language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | pydata__xarray | xarray/tests/test_datatree_mapping.py | {
"start": 260,
"end": 7941
} | class ____:
def test_no_trees_passed(self):
with pytest.raises(TypeError, match="must pass at least one tree object"):
map_over_datasets(lambda x: x, "dt")
def test_not_isomorphic(self, create_test_datatree):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
dt2["set1/set2/extra"] = xr.DataTree(name="extra")
with pytest.raises(
TreeIsomorphismError,
match=re.escape(
r"children at node 'set1/set2' do not match: [] vs ['extra']"
),
):
map_over_datasets(lambda x, y: None, dt1, dt2)
def test_no_trees_returned(self, create_test_datatree):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
expected = xr.DataTree.from_dict(dict.fromkeys(dt1.to_dict()))
actual = map_over_datasets(lambda x, y: None, dt1, dt2)
assert_equal(expected, actual)
def test_single_tree_arg(self, create_test_datatree):
dt = create_test_datatree()
expected = create_test_datatree(modify=lambda x: 10.0 * x)
result_tree = map_over_datasets(lambda x: 10 * x, dt)
assert_equal(result_tree, expected)
def test_single_tree_arg_plus_arg(self, create_test_datatree):
dt = create_test_datatree()
expected = create_test_datatree(modify=lambda ds: (10.0 * ds))
result_tree = map_over_datasets(lambda x, y: x * y, dt, 10.0)
assert_equal(result_tree, expected)
result_tree = map_over_datasets(lambda x, y: x * y, 10.0, dt)
assert_equal(result_tree, expected)
def test_single_tree_arg_plus_kwarg(self, create_test_datatree):
dt = create_test_datatree()
expected = create_test_datatree(modify=lambda ds: (10.0 * ds))
def multiply_by_kwarg(ds, **kwargs):
ds = ds * kwargs.pop("multiplier")
return ds
result_tree = map_over_datasets(
multiply_by_kwarg, dt, kwargs=dict(multiplier=10.0)
)
assert_equal(result_tree, expected)
def test_multiple_tree_args(self, create_test_datatree):
dt1 = create_test_datatree()
dt2 = create_test_datatree()
expected = create_test_datatree(modify=lambda ds: 2.0 * ds)
result = map_over_datasets(lambda x, y: x + y, dt1, dt2)
assert_equal(result, expected)
def test_return_multiple_trees(self, create_test_datatree):
dt = create_test_datatree()
dt_min, dt_max = map_over_datasets(lambda x: (x.min(), x.max()), dt)
expected_min = create_test_datatree(modify=lambda ds: ds.min())
assert_equal(dt_min, expected_min)
expected_max = create_test_datatree(modify=lambda ds: ds.max())
assert_equal(dt_max, expected_max)
def test_return_wrong_type(self, simple_datatree):
dt1 = simple_datatree
with pytest.raises(
TypeError,
match=re.escape(
"the result of calling func on the node at position '.' is not a "
"Dataset or None or a tuple of such types"
),
):
map_over_datasets(lambda x: "string", dt1) # type: ignore[arg-type,return-value]
def test_return_tuple_of_wrong_types(self, simple_datatree):
dt1 = simple_datatree
with pytest.raises(
TypeError,
match=re.escape(
"the result of calling func on the node at position '.' is not a "
"Dataset or None or a tuple of such types"
),
):
map_over_datasets(lambda x: (x, "string"), dt1) # type: ignore[arg-type,return-value]
def test_return_inconsistent_number_of_results(self, simple_datatree):
dt1 = simple_datatree
with pytest.raises(
TypeError,
match=re.escape(
r"Calling func on the nodes at position set1 returns a tuple "
"of 0 datasets, whereas calling func on the nodes at position "
". instead returns a tuple of 2 datasets."
),
):
# Datasets in simple_datatree have different numbers of dims
map_over_datasets(lambda ds: tuple((None,) * len(ds.dims)), dt1)
def test_wrong_number_of_arguments_for_func(self, simple_datatree):
dt = simple_datatree
with pytest.raises(
TypeError, match="takes 1 positional argument but 2 were given"
):
map_over_datasets(lambda x: 10 * x, dt, dt)
def test_map_single_dataset_against_whole_tree(self, create_test_datatree):
dt = create_test_datatree()
def nodewise_merge(node_ds, fixed_ds):
return xr.merge([node_ds, fixed_ds])
other_ds = xr.Dataset({"z": ("z", [0])})
expected = create_test_datatree(modify=lambda ds: xr.merge([ds, other_ds]))
result_tree = map_over_datasets(nodewise_merge, dt, other_ds)
assert_equal(result_tree, expected)
@pytest.mark.xfail
def test_trees_with_different_node_names(self):
# TODO test this after I've got good tests for renaming nodes
raise NotImplementedError
def test_tree_method(self, create_test_datatree):
dt = create_test_datatree()
def multiply(ds, times):
return times * ds
expected = create_test_datatree(modify=lambda ds: 10.0 * ds)
result_tree = dt.map_over_datasets(multiply, 10.0)
assert_equal(result_tree, expected)
def test_tree_method_with_kwarg(self, create_test_datatree):
dt = create_test_datatree()
def multiply(ds, **kwargs):
return kwargs.pop("times") * ds
expected = create_test_datatree(modify=lambda ds: 10.0 * ds)
result_tree = dt.map_over_datasets(multiply, kwargs=dict(times=10.0))
assert_equal(result_tree, expected)
def test_discard_ancestry(self, create_test_datatree):
# Check for datatree GH issue https://github.com/xarray-contrib/datatree/issues/48
dt = create_test_datatree()
subtree = dt["set1"]
expected = create_test_datatree(modify=lambda ds: 10.0 * ds)["set1"]
result_tree = map_over_datasets(lambda x: 10.0 * x, subtree)
assert_equal(result_tree, expected)
def test_keep_attrs_on_empty_nodes(self, create_test_datatree):
# GH278
dt = create_test_datatree()
dt["set1/set2"].attrs["foo"] = "bar"
def empty_func(ds):
return ds
result = dt.map_over_datasets(empty_func)
assert result["set1/set2"].attrs == dt["set1/set2"].attrs
def test_error_contains_path_of_offending_node(self, create_test_datatree):
dt = create_test_datatree()
dt["set1"]["bad_var"] = 0
print(dt)
def fail_on_specific_node(ds):
if "bad_var" in ds:
raise ValueError("Failed because 'bar_var' present in dataset")
with pytest.raises(
ValueError,
match=re.escape(
r"Raised whilst mapping function over node(s) with path 'set1'"
),
):
dt.map_over_datasets(fail_on_specific_node)
def test_inherited_coordinates_with_index(self):
root = xr.Dataset(coords={"x": [1, 2]})
child = xr.Dataset({"foo": ("x", [0, 1])}) # no coordinates
tree = xr.DataTree.from_dict({"/": root, "/child": child})
actual = tree.map_over_datasets(lambda ds: ds) # identity
assert isinstance(actual, xr.DataTree)
assert_identical(tree, actual)
actual_child = actual.children["child"].to_dataset(inherit=False)
assert_identical(actual_child, child)
| TestMapOverSubTree |
python | geekcomputers__Python | Flappy Bird - created with tkinter/Bird.py | {
"start": 141,
"end": 7845
} | class ____(Thread):
"""
Classe para criar um pássaro
"""
__tag = "Bird"
__isAlive = None
__going_up = False
__going_down = 0
__times_skipped = 0
__running = False
decends = 0.00390625
climbsUp = 0.0911458333
def __init__(
self,
background,
gameover_function,
*screen_geometry,
fp="bird.png",
event="<Up>",
descend_speed=5,
):
# Verifica se "background" é uma instância de Background e se o "gamerover_method" é chamável
if not isinstance(background, Background):
raise TypeError(
"The background argument must be an instance of Background."
)
if not callable(gameover_function):
raise TypeError("The gameover_method argument must be a callable object.")
# Instância os parâmetros
self.__canvas = background
self.image_path = fp
self.__descend_speed = descend_speed
self.gameover_method = gameover_function
# Recebe a largura e altura do background
self.__width = screen_geometry[0]
self.__height = screen_geometry[1]
# Define a decida e subida do pássaro com base na altura do background
self.decends *= self.__height
self.decends = int(self.decends + 0.5)
self.climbsUp *= self.__height
self.climbsUp = int(self.climbsUp + 0.5)
# Invoca o método construtor de Thread
Thread.__init__(self)
# Calcula o tamanho do pássaro com base na largura e altura da janela
self.width = (self.__width // 100) * 6
self.height = (self.__height // 100) * 11
# Carrega e cria a imagem do pássaro no background
self.__canvas.bird_image = self.getPhotoImage(
image_path=self.image_path,
width=self.width,
height=self.height,
closeAfter=True,
)[0]
self.__birdID = self.__canvas.create_image(
self.__width // 2,
self.__height // 2,
image=self.__canvas.bird_image,
tag=self.__tag,
)
# Define evento para fazer o pássaro subir
self.__canvas.focus_force()
self.__canvas.bind(event, self.jumps)
self.__isAlive = True
def birdIsAlive(self):
"""
Método para verificar se o pássaro está vivo
"""
return self.__isAlive
def checkCollision(self):
"""
Método para verificar se o pássaro ultrapassou a borda da janela ou colidiu com algo
"""
# Recebe a posição do pássaro no background
position = list(self.__canvas.bbox(self.__tag))
# Se o pássaro tiver ultrapassado a borda de baixo do background, ele será declarado morto
if position[3] >= self.__height + 20:
self.__isAlive = False
# Se o pássaro tiver ultrapassado a borda de cima do background, ele será declarado morto
if position[1] <= -20:
self.__isAlive = False
# Dá uma margem de erro ao pássaro de X pixels
position[0] += int(25 / 78 * self.width)
position[1] += int(25 / 77 * self.height)
position[2] -= int(20 / 78 * self.width)
position[3] -= int(10 / 77 * self.width)
# Define os objetos a serem ignorados em colisões
ignored_collisions = self.__canvas.getBackgroundID()
ignored_collisions.append(self.__birdID)
# Verifica possíveis colisões com o pássaro
possible_collisions = list(self.__canvas.find_overlapping(*position))
# Remove das possíveis colisões os objetos ignorados
for _id in ignored_collisions:
try:
possible_collisions.remove(_id)
except BaseException:
continue
# Se houver alguma colisão o pássaro morre
if len(possible_collisions) >= 1:
self.__isAlive = False
return not self.__isAlive
def getTag(self):
"""
Método para retornar a tag do pássaro
"""
return self.__tag
@staticmethod
def getPhotoImage(
image=None, image_path=None, width=None, height=None, closeAfter=False
):
"""
Retorna um objeto da classe PIL.ImageTk.PhotoImage de uma imagem e as imagens criadas de PIL.Image
(photoImage, new, original)
@param image: Instância de PIL.Image.open
@param image_path: Diretório da imagem
@param width: Largura da imagem
@param height: Altura da imagem
@param closeAfter: Se True, a imagem será fechada após ser criado um PhotoImage da mesma
"""
if not image:
if not image_path:
return
# Abre a imagem utilizando o caminho dela
image = openImage(image_path)
# Será redimesionada a imagem somente se existir um width ou height
if not width:
width = image.width
if not height:
height = image.height
# Cria uma nova imagem já redimensionada
newImage = image.resize([width, height])
# Cria um photoImage
photoImage = PhotoImage(newImage)
# Se closeAfter for True, ele fecha as imagens
if closeAfter:
# Fecha a imagem nova
newImage.close()
newImage = None
# Fecha a imagem original
image.close()
image = None
# Retorna o PhotoImage da imagem,a nova imagem que foi utilizada e a imagem original
return photoImage, newImage, image
def jumps(self, event=None):
"""
Método para fazer o pássaro pular
"""
# Verifica se o pássaro saiu da área do background
self.checkCollision()
# Se o pássaro estiver morto, esse método não pode ser executado
if not self.__isAlive or not self.__running:
self.__going_up = False
return
# Declara que o pássaro está subindo
self.__going_up = True
self.__going_down = 0
# Move o pássaro enquanto o limite de subida por animação não tiver excedido
if self.__times_skipped < self.climbsUp:
# Move o pássaro para cima
self.__canvas.move(self.__tag, 0, -1)
self.__times_skipped += 1
# Executa o método novamente
self.__canvas.after(3, self.jumps)
else:
# Declara que o pássaro não está mais subindo
self.__going_up = False
self.__times_skipped = 0
def kill(self):
"""
Método para matar o pássaro
"""
self.__isAlive = False
def run(self):
"""
#Método para iniciar a animação do passáro caindo
"""
self.__running = True
# Verifica se o pássaro saiu da área do background
self.checkCollision()
# Enquanto o pássaro não tiver chegado em sua velocidade máxima, a velocidade aumentará em 0.05
if self.__going_down < self.decends:
self.__going_down += 0.05
# Executa a animação de descida somente se o pássaro estiver vivo
if self.__isAlive:
# Executa a animação de descida somente se o pássaro não estiver subindo
if not self.__going_up:
# Move o pássaro para baixo
self.__canvas.move(self.__tag, 0, self.__going_down)
# Executa novamente o método
self.__canvas.after(self.__descend_speed, self.run)
# Se o pássaro estiver morto, será executado um método de fim de jogo
else:
self.__running = False
self.gameover_method()
| Bird |
python | openai__openai-python | src/openai/types/conversations/text_content.py | {
"start": 188,
"end": 259
} | class ____(BaseModel):
text: str
type: Literal["text"]
| TextContent |
python | langchain-ai__langchain | libs/partners/anthropic/langchain_anthropic/_client_utils.py | {
"start": 395,
"end": 680
} | class ____(anthropic.DefaultHttpxClient):
"""Borrowed from anthropic._base_client."""
def __del__(self) -> None:
if self.is_closed:
return
try:
self.close()
except Exception: # noqa: S110
pass
| _SyncHttpxClientWrapper |
python | Netflix__metaflow | metaflow/decorators.py | {
"start": 3618,
"end": 8129
} | class ____(object):
"""
Base class for all decorators.
"""
name = "NONAME"
defaults = {}
# `allow_multiple` allows setting many decorators of the same type to a step/flow.
allow_multiple = False
def __init__(self, attributes=None, statically_defined=False, inserted_by=None):
self.attributes = self.defaults.copy()
self.statically_defined = statically_defined
self.inserted_by = inserted_by
self._user_defined_attributes = set()
self._ran_init = False
if attributes:
for k, v in attributes.items():
if k in self.defaults or k.startswith(UNPACK_KEY):
self.attributes[k] = v
if not k.startswith(UNPACK_KEY):
self._user_defined_attributes.add(k)
else:
raise InvalidDecoratorAttribute(self.name, k, self.defaults)
def init(self):
"""
Initializes the decorator. In general, any operation you would do in __init__
should be done here.
"""
pass
def external_init(self):
# In some cases (specifically when using remove_decorator), we may need to call
# init multiple times. Short-circuit re-evaluating.
if self._ran_init:
return
# Note that by design, later values override previous ones.
self.attributes, new_user_attributes = unpack_delayed_evaluator(self.attributes)
self._user_defined_attributes.update(new_user_attributes)
self.attributes = resolve_delayed_evaluator(self.attributes, to_dict=True)
if "init" in self.__class__.__dict__:
self.init()
self._ran_init = True
@classmethod
def extract_args_kwargs_from_decorator_spec(cls, deco_spec):
if len(deco_spec) == 0:
return [], {}
attrs = {}
# TODO: Do we really want to allow spaces in the names of attributes?!?
for a in re.split(r""",(?=[\s\w]+=)""", deco_spec):
name, val = a.split("=", 1)
try:
val_parsed = json.loads(val.strip().replace('\\"', '"'))
except json.JSONDecodeError:
# In this case, we try to convert to either an int or a float or
# leave as is. Prefer ints if possible.
try:
val_parsed = int(val.strip())
except ValueError:
try:
val_parsed = float(val.strip())
except ValueError:
val_parsed = val.strip()
attrs[name.strip()] = val_parsed
return [], attrs
@classmethod
def parse_decorator_spec(cls, deco_spec):
if len(deco_spec) == 0:
return cls()
_, kwargs = cls.extract_args_kwargs_from_decorator_spec(deco_spec)
return cls(attributes=kwargs)
def make_decorator_spec(self):
# Make sure all attributes are evaluated
self.external_init()
attrs = {k: v for k, v in self.attributes.items() if v is not None}
if attrs:
attr_list = []
# We dump simple types directly as string to get around the nightmare quote
# escaping but for more complex types (typically dictionaries or lists),
# we dump using JSON.
for k, v in attrs.items():
if isinstance(v, (int, float, str)):
attr_list.append("%s=%s" % (k, str(v)))
else:
attr_list.append("%s=%s" % (k, json.dumps(v).replace('"', '\\"')))
attrstr = ",".join(attr_list)
return "%s:%s" % (self.name, attrstr)
else:
return self.name
def get_args_kwargs(self) -> Tuple[List[Any], Dict[str, Any]]:
"""
Get the arguments and keyword arguments of the decorator.
Returns
-------
Tuple[List[Any], Dict[str, Any]]
A tuple containing a list of arguments and a dictionary of keyword arguments.
"""
return [], dict(self.attributes)
def __str__(self):
mode = "static" if self.statically_defined else "dynamic"
if self.inserted_by:
mode += " (inserted by %s)" % " from ".join(self.inserted_by)
attrs = " ".join("%s=%s" % x for x in self.attributes.items())
if attrs:
attrs = " " + attrs
fmt = "%s<%s%s>" % (self.name, mode, attrs)
return fmt
| Decorator |
python | bottlepy__bottle | bottle.py | {
"start": 156450,
"end": 157131
} | class ____(BaseTemplate):
def prepare(self, **options):
from Cheetah.Template import Template
self.context = threading.local()
self.context.vars = {}
options['searchList'] = [self.context.vars]
if self.source:
self.tpl = Template(source=self.source, **options)
else:
self.tpl = Template(file=self.filename, **options)
def render(self, *args, **kwargs):
for dictarg in args:
kwargs.update(dictarg)
self.context.vars.update(self.defaults)
self.context.vars.update(kwargs)
out = str(self.tpl)
self.context.vars.clear()
return out
| CheetahTemplate |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-google/llama_index/vector_stores/google/genai_extension.py | {
"start": 2214,
"end": 2759
} | class ____:
name: str
display_name: Optional[str]
create_time: Optional[timestamp_pb2.Timestamp]
update_time: Optional[timestamp_pb2.Timestamp]
@property
def corpus_id(self) -> str:
name = EntityName.from_str(self.name)
return name.corpus_id
@classmethod
def from_corpus(cls, c: genai.Corpus) -> "Corpus":
return cls(
name=c.name,
display_name=c.display_name,
create_time=c.create_time,
update_time=c.update_time,
)
@dataclass
| Corpus |
python | wandb__wandb | wandb/sdk/artifacts/_generated/artifact_version_files.py | {
"start": 654,
"end": 796
} | class ____(GQLResult):
files: Optional[ArtifactVersionFilesProjectArtifactTypeArtifactFiles]
| ArtifactVersionFilesProjectArtifactTypeArtifact |
python | pandas-dev__pandas | pandas/tests/frame/methods/test_select_dtypes.py | {
"start": 280,
"end": 534
} | class ____(ExtensionDtype):
type = int
def __init__(self, numeric) -> None:
self._numeric = numeric
@property
def name(self):
return "Dummy"
@property
def _is_numeric(self):
return self._numeric
| DummyDtype |
python | keras-team__keras | keras/src/layers/regularization/dropout.py | {
"start": 157,
"end": 4697
} | class ____(Layer):
"""Applies dropout to the input.
The `Dropout` layer randomly sets input units to 0 with a frequency of
`rate` at each step during training time, which helps prevent overfitting.
Inputs not set to 0 are scaled up by `1 / (1 - rate)` such that the sum over
all inputs is unchanged.
Note that the `Dropout` layer only applies when `training` is set to `True`
in `call()`, such that no values are dropped during inference.
When using `model.fit`, `training` will be appropriately set to `True`
automatically. In other contexts, you can set the argument explicitly
to `True` when calling the layer.
(This is in contrast to setting `trainable=False` for a `Dropout` layer.
`trainable` does not affect the layer's behavior, as `Dropout` does
not have any variables/weights that can be frozen during training.)
Args:
rate: Float between 0 and 1. Fraction of the input units to drop.
noise_shape: 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
For instance, if your inputs have shape
`(batch_size, timesteps, features)` and
you want the dropout mask to be the same for all timesteps,
you can use `noise_shape=(batch_size, 1, features)`.
seed: A Python integer to use as random seed.
Call arguments:
inputs: Input tensor (of any rank).
training: Python boolean indicating whether the layer should behave in
training mode (adding dropout) or in inference mode (doing nothing).
"""
def __init__(self, rate, noise_shape=None, seed=None, **kwargs):
super().__init__(**kwargs)
if not 0 <= rate <= 1:
raise ValueError(
f"Invalid value received for argument "
"`rate`. Expected a float value between 0 and 1. "
f"Received: rate={rate}"
)
self.rate = rate
self.seed = seed
self.noise_shape = self._validate_noise_shape(noise_shape)
if rate > 0:
self.seed_generator = backend.random.SeedGenerator(seed)
self.supports_masking = True
self._build_at_init()
def _validate_noise_shape(self, noise_shape):
if noise_shape is None:
return None
if isinstance(noise_shape, str):
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected a tuple or list of integers. "
f"Received: noise_shape={noise_shape}"
)
if not isinstance(noise_shape, tuple):
try:
noise_shape = tuple(noise_shape)
except TypeError:
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected an iterable of integers "
f"(e.g., a tuple or list). "
f"Received: noise_shape={noise_shape}"
)
for i, dim in enumerate(noise_shape):
if dim is not None:
if not isinstance(dim, int):
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected all elements to be integers or None. "
f"Received element at index {i}: {dim} "
f"(type: {type(dim).__name__})"
)
if dim <= 0:
raise ValueError(
f"Invalid value received for argument `noise_shape`. "
f"Expected all dimensions to be positive integers "
f"or None. "
f"Received negative or zero value at index {i}: {dim}"
)
return noise_shape
def call(self, inputs, training=False):
if training and self.rate > 0:
return backend.random.dropout(
inputs,
self.rate,
noise_shape=self.noise_shape,
seed=self.seed_generator,
)
return inputs
def compute_output_shape(self, input_shape):
return input_shape
def get_config(self):
base_config = super().get_config()
config = {
"rate": self.rate,
"seed": self.seed,
"noise_shape": self.noise_shape,
}
return {**base_config, **config}
| Dropout |
python | getsentry__sentry | src/sentry/api/permissions.py | {
"start": 1126,
"end": 1296
} | class ____(BasePermission):
def has_permission(self, request: Request, view: object) -> bool:
return getattr(request, "relay", None) is not None
| RelayPermission |
python | pypa__setuptools | setuptools/tests/test_editable_install.py | {
"start": 6115,
"end": 8873
} | class ____:
# legacy => pkg_resources.declare_namespace(...) + setup(namespace_packages=...)
def test_nspkg_file_is_unique(self, tmp_path, monkeypatch):
deprecation = pytest.warns(
SetuptoolsDeprecationWarning, match=".*namespace_packages parameter.*"
)
installation_dir = tmp_path / ".installation_dir"
installation_dir.mkdir()
examples = (
"myns.pkgA",
"myns.pkgB",
"myns.n.pkgA",
"myns.n.pkgB",
)
for name in examples:
pkg = namespaces.build_namespace_package(tmp_path, name, version="42")
with deprecation, monkeypatch.context() as ctx:
ctx.chdir(pkg)
dist = run_setup("setup.py", stop_after="config")
cmd = editable_wheel(dist)
cmd.finalize_options()
editable_name = cmd.get_finalized_command("dist_info").name
cmd._install_namespaces(installation_dir, editable_name)
files = list(installation_dir.glob("*-nspkg.pth"))
assert len(files) == len(examples)
@pytest.mark.parametrize(
"impl",
(
"pkg_resources",
# "pkgutil", => does not work
),
)
@pytest.mark.parametrize("ns", ("myns.n",))
def test_namespace_package_importable(
self, venv, tmp_path, ns, impl, editable_opts
):
"""
Installing two packages sharing the same namespace, one installed
naturally using pip or `--single-version-externally-managed`
and the other installed in editable mode should leave the namespace
intact and both packages reachable by import.
(Ported from test_develop).
"""
build_system = """\
[build-system]
requires = ["setuptools"]
build-backend = "setuptools.build_meta"
"""
pkg_A = namespaces.build_namespace_package(tmp_path, f"{ns}.pkgA", impl=impl)
pkg_B = namespaces.build_namespace_package(tmp_path, f"{ns}.pkgB", impl=impl)
(pkg_A / "pyproject.toml").write_text(build_system, encoding="utf-8")
(pkg_B / "pyproject.toml").write_text(build_system, encoding="utf-8")
# use pip to install to the target directory
opts = editable_opts[:]
opts.append("--no-build-isolation") # force current version of setuptools
venv.run(["python", "-m", "pip", "install", str(pkg_A), *opts])
venv.run(["python", "-m", "pip", "install", "-e", str(pkg_B), *opts])
venv.run(["python", "-c", f"import {ns}.pkgA; import {ns}.pkgB"])
# additionally ensure that pkg_resources import works
venv.run(["python", "-c", "import pkg_resources"])
| TestLegacyNamespaces |
python | pennersr__django-allauth | allauth/headless/internal/restkit/response.py | {
"start": 1267,
"end": 1969
} | class ____(APIResponse):
def __init__(
self, request, exception=None, input=None, status=HTTPStatus.BAD_REQUEST
):
errors = []
if exception is not None:
error_datas = ErrorList(exception.error_list).get_json_data()
errors.extend(error_datas)
if input is not None:
for field, error_list in input.errors.items():
error_datas = error_list.get_json_data()
for error_data in error_datas:
if field != "__all__":
error_data["param"] = field
errors.extend(error_datas)
super().__init__(request, status=status, errors=errors)
| ErrorResponse |
python | dagster-io__dagster | python_modules/libraries/dagster-databricks/dagster_databricks/databricks.py | {
"start": 843,
"end": 987
} | class ____(Enum):
OAUTH_M2M = "oauth-m2m"
PAT = "pat"
AZURE_CLIENT_SECRET = "azure-client-secret"
DEFAULT = "default"
| AuthTypeEnum |
python | has2k1__plotnine | plotnine/themes/themeable.py | {
"start": 16114,
"end": 16818
} | class ____(themeable):
"""
x axis label
Parameters
----------
theme_element : element_text
"""
_omit = ["margin"]
def apply_figure(self, figure: Figure, targets: ThemeTargets):
super().apply_figure(figure, targets)
if text := targets.axis_title_x:
props = self.properties
# ha can be a float and is handled by the layout manager
with suppress(KeyError):
del props["ha"]
text.set(**props)
def blank_figure(self, figure: Figure, targets: ThemeTargets):
super().blank_figure(figure, targets)
if text := targets.axis_title_x:
text.set_visible(False)
| axis_title_x |
python | django__django | tests/contenttypes_tests/test_checks.py | {
"start": 5502,
"end": 10128
} | class ____(SimpleTestCase):
def test_valid_generic_relationship(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Bookmark(models.Model):
tags = GenericRelation("TaggedItem")
self.assertEqual(Bookmark.tags.field.check(), [])
def test_valid_generic_relationship_with_explicit_fields(self):
class TaggedItem(models.Model):
custom_content_type = models.ForeignKey(ContentType, models.CASCADE)
custom_object_id = models.PositiveIntegerField()
content_object = GenericForeignKey(
"custom_content_type", "custom_object_id"
)
class Bookmark(models.Model):
tags = GenericRelation(
"TaggedItem",
content_type_field="custom_content_type",
object_id_field="custom_object_id",
)
self.assertEqual(Bookmark.tags.field.check(), [])
def test_pointing_to_missing_model(self):
class Model(models.Model):
rel = GenericRelation("MissingModel")
self.assertEqual(
Model.rel.field.check(),
[
checks.Error(
"Field defines a relation with model 'MissingModel', "
"which is either not installed, or is abstract.",
obj=Model.rel.field,
id="fields.E300",
)
],
)
def test_valid_self_referential_generic_relationship(self):
class Model(models.Model):
rel = GenericRelation("Model")
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
self.assertEqual(Model.rel.field.check(), [])
def test_missing_generic_foreign_key(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
class Bookmark(models.Model):
tags = GenericRelation("TaggedItem")
self.assertEqual(
Bookmark.tags.field.check(),
[
checks.Error(
"The GenericRelation defines a relation with the model "
"'contenttypes_tests.TaggedItem', but that model does not have a "
"GenericForeignKey.",
obj=Bookmark.tags.field,
id="contenttypes.E004",
)
],
)
@override_settings(TEST_SWAPPED_MODEL="contenttypes_tests.Replacement")
def test_pointing_to_swapped_model(self):
class Replacement(models.Model):
pass
class SwappedModel(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class Meta:
swappable = "TEST_SWAPPED_MODEL"
class Model(models.Model):
rel = GenericRelation("SwappedModel")
self.assertEqual(
Model.rel.field.check(),
[
checks.Error(
"Field defines a relation with the model "
"'contenttypes_tests.SwappedModel', "
"which has been swapped out.",
hint=(
"Update the relation to point at 'settings.TEST_SWAPPED_MODEL'."
),
obj=Model.rel.field,
id="fields.E301",
)
],
)
def test_field_name_ending_with_underscore(self):
class TaggedItem(models.Model):
content_type = models.ForeignKey(ContentType, models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey()
class InvalidBookmark(models.Model):
tags_ = GenericRelation("TaggedItem")
self.assertEqual(
InvalidBookmark.tags_.field.check(),
[
checks.Error(
"Field names must not end with an underscore.",
obj=InvalidBookmark.tags_.field,
id="fields.E001",
)
],
)
@isolate_apps("contenttypes_tests", attr_name="apps")
| GenericRelationTests |
python | pytorch__pytorch | test/inductor/test_custom_partitioner_fn.py | {
"start": 923,
"end": 2700
} | class ____(TestCase):
def test_custom_partitioner_fn(self):
"""
For function f(a, b), with the partitioner in the compile_fx stack,
the addition `a+b` (equivalently `buf0`) is saved for backward.
With the custom partitioner function, we indicate that
`a` and `b` (equivalently `primals_1` and `primals_2`) do not take
additional memory and thus, they are saved for backward.
"""
# initialization
@torch.compile
def f(a, b):
return (a + b).cos().cos()
a = torch.randn((2, 2), requires_grad=True, device=GPU_TYPE)
b = torch.randn((2, 2), requires_grad=True, device=GPU_TYPE)
# CASE 1 -- default
# addition `a + b` (i.e, `buf0`) is saved for backward.
code_og = run_fw_bw_and_get_code(lambda: f(a, b))
fwd_code_og = code_og[1][0]
FileCheck().check("return (buf1, buf0, )").run(fwd_code_og)
# CASE 2 -- custom partitioner function
# `a` and `b` (i.e., `primals_1` and `primals_2`) are saved for backward.
custom_partitioner_fn = MyCustomPartitionerFn()
self.assertFalse(custom_partitioner_fn.called)
self.assertIsNotNone(custom_partitioner_fn.uuid())
with torch._inductor.config.patch(custom_partitioner_fn=custom_partitioner_fn):
code_cp = run_fw_bw_and_get_code(lambda: f(a, b))
fwd_code_cp = code_cp[1][0]
FileCheck().check("return (buf0, primals_1, primals_2, )").run(fwd_code_cp)
# make sure the custom partitioner function is indeed invoked
self.assertTrue(custom_partitioner_fn.called)
if __name__ == "__main__":
from torch._inductor.test_case import run_tests
if HAS_GPU:
run_tests()
| TestCustomPartitionerFn |
python | chroma-core__chroma | chromadb/api/types.py | {
"start": 17013,
"end": 17207
} | class ____(TypedDict):
embeddings: Embeddings
ids: Optional[IDs]
where: Optional[Where]
where_document: Optional[WhereDocument]
include: Include
n_results: int
| QueryRequest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/plugins.py | {
"start": 2075,
"end": 2353
} | class ____(BaseModel):
"""Base serializer for UI Plugin responses."""
model_config = ConfigDict(extra="allow")
name: str
icon: str | None = None
icon_dark_mode: str | None = None
url_route: str | None = None
category: str | None = None
| BaseUIResponse |
python | numpy__numpy | numpy/matrixlib/tests/test_defmatrix.py | {
"start": 6411,
"end": 6906
} | class ____:
def test_basic(self):
A = np.arange(100).reshape(10, 10)
mA = matrix(A)
mB = mA.copy()
O = np.ones((10, 10), np.float64) * 0.1
mB = mB + O
assert_(mB.dtype.type == np.float64)
assert_(np.all(mA != mB))
assert_(np.all(mB == mA + 0.1))
mC = mA.copy()
O = np.ones((10, 10), np.complex128)
mC = mC * O
assert_(mC.dtype.type == np.complex128)
assert_(np.all(mA != mB))
| TestCasting |
python | ray-project__ray | rllib/policy/sample_batch.py | {
"start": 50914,
"end": 68721
} | class ____:
"""A batch of experiences from multiple agents in the environment.
Attributes:
policy_batches (Dict[PolicyID, SampleBatch]): Dict mapping policy IDs to
SampleBatches of experiences.
count: The number of env steps in this batch.
"""
@PublicAPI
def __init__(self, policy_batches: Dict[PolicyID, SampleBatch], env_steps: int):
"""Initialize a MultiAgentBatch instance.
Args:
policy_batches: Dict mapping policy IDs to SampleBatches of experiences.
env_steps: The number of environment steps in the environment
this batch contains. This will be less than the number of
transitions this batch contains across all policies in total.
"""
for v in policy_batches.values():
assert isinstance(v, SampleBatch)
self.policy_batches = policy_batches
# Called "count" for uniformity with SampleBatch.
# Prefer to access this via the `env_steps()` method when possible
# for clarity.
self.count = env_steps
@PublicAPI
def env_steps(self) -> int:
"""The number of env steps (there are >= 1 agent steps per env step).
Returns:
The number of environment steps contained in this batch.
"""
return self.count
@PublicAPI
def __len__(self) -> int:
"""Same as `self.env_steps()`."""
return self.count
@PublicAPI
def agent_steps(self) -> int:
"""The number of agent steps (there are >= 1 agent steps per env step).
Returns:
The number of agent steps total in this batch.
"""
ct = 0
for batch in self.policy_batches.values():
ct += batch.count
return ct
@PublicAPI
def timeslices(self, k: int) -> List["MultiAgentBatch"]:
"""Returns k-step batches holding data for each agent at those steps.
For examples, suppose we have agent1 observations [a1t1, a1t2, a1t3],
for agent2, [a2t1, a2t3], and for agent3, [a3t3] only.
Calling timeslices(1) would return three MultiAgentBatches containing
[a1t1, a2t1], [a1t2], and [a1t3, a2t3, a3t3].
Calling timeslices(2) would return two MultiAgentBatches containing
[a1t1, a1t2, a2t1], and [a1t3, a2t3, a3t3].
This method is used to implement "lockstep" replay mode. Note that this
method does not guarantee each batch contains only data from a single
unroll. Batches might contain data from multiple different envs.
"""
from ray.rllib.evaluation.sample_batch_builder import SampleBatchBuilder
# Build a sorted set of (eps_id, t, policy_id, data...)
steps = []
for policy_id, batch in self.policy_batches.items():
for row in batch.rows():
steps.append(
(
row[SampleBatch.EPS_ID],
row[SampleBatch.T],
row[SampleBatch.AGENT_INDEX],
policy_id,
row,
)
)
steps.sort()
finished_slices = []
cur_slice = collections.defaultdict(SampleBatchBuilder)
cur_slice_size = 0
def finish_slice():
nonlocal cur_slice_size
assert cur_slice_size > 0
batch = MultiAgentBatch(
{k: v.build_and_reset() for k, v in cur_slice.items()}, cur_slice_size
)
cur_slice_size = 0
cur_slice.clear()
finished_slices.append(batch)
# For each unique env timestep.
for _, group in itertools.groupby(steps, lambda x: x[:2]):
# Accumulate into the current slice.
for _, _, _, policy_id, row in group:
cur_slice[policy_id].add_values(**row)
cur_slice_size += 1
# Slice has reached target number of env steps.
if cur_slice_size >= k:
finish_slice()
assert cur_slice_size == 0
if cur_slice_size > 0:
finish_slice()
assert len(finished_slices) > 0, finished_slices
return finished_slices
@staticmethod
@PublicAPI
def wrap_as_needed(
policy_batches: Dict[PolicyID, SampleBatch], env_steps: int
) -> Union[SampleBatch, "MultiAgentBatch"]:
"""Returns SampleBatch or MultiAgentBatch, depending on given policies.
If policy_batches is empty (i.e. {}) it returns an empty MultiAgentBatch.
Args:
policy_batches: Mapping from policy ids to SampleBatch.
env_steps: Number of env steps in the batch.
Returns:
The single default policy's SampleBatch or a MultiAgentBatch
(more than one policy).
"""
if len(policy_batches) == 1 and DEFAULT_POLICY_ID in policy_batches:
return policy_batches[DEFAULT_POLICY_ID]
return MultiAgentBatch(policy_batches=policy_batches, env_steps=env_steps)
@staticmethod
@PublicAPI
@Deprecated(new="concat_samples() from rllib.policy.sample_batch", error=True)
def concat_samples(samples: List["MultiAgentBatch"]) -> "MultiAgentBatch":
return concat_samples_into_ma_batch(samples)
@PublicAPI
def copy(self) -> "MultiAgentBatch":
"""Deep-copies self into a new MultiAgentBatch.
Returns:
The copy of self with deep-copied data.
"""
return MultiAgentBatch(
{k: v.copy() for (k, v) in self.policy_batches.items()}, self.count
)
@ExperimentalAPI
def to_device(
self,
device,
framework="torch",
pin_memory: bool = False,
use_stream: bool = False,
stream: Optional[Union["torch.cuda.Stream", "torch.cuda.Stream"]] = None,
):
"""TODO: transfer batch to given device as framework tensor."""
if framework == "torch":
assert torch is not None
for pid, policy_batch in self.policy_batches.items():
self.policy_batches[pid] = policy_batch.to_device(
device,
framework=framework,
pin_memory=pin_memory,
use_stream=use_stream,
stream=stream,
)
else:
raise NotImplementedError
return self
@PublicAPI
def size_bytes(self) -> int:
"""
Returns:
The overall size in bytes of all policy batches (all columns).
"""
return sum(b.size_bytes() for b in self.policy_batches.values())
@DeveloperAPI
def compress(
self, bulk: bool = False, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> None:
"""Compresses each policy batch (per column) in place.
Args:
bulk: Whether to compress across the batch dimension (0)
as well. If False will compress n separate list items, where n
is the batch size.
columns: Set of column names to compress.
"""
for batch in self.policy_batches.values():
batch.compress(bulk=bulk, columns=columns)
@DeveloperAPI
def decompress_if_needed(
self, columns: Set[str] = frozenset(["obs", "new_obs"])
) -> "MultiAgentBatch":
"""Decompresses each policy batch (per column), if already compressed.
Args:
columns: Set of column names to decompress.
Returns:
Self.
"""
for batch in self.policy_batches.values():
batch.decompress_if_needed(columns)
return self
@DeveloperAPI
def as_multi_agent(self) -> "MultiAgentBatch":
"""Simply returns `self` (already a MultiAgentBatch).
Returns:
This very instance of MultiAgentBatch.
"""
return self
def __getitem__(self, key: str) -> SampleBatch:
"""Returns the SampleBatch for the given policy id."""
return self.policy_batches[key]
def __str__(self):
return "MultiAgentBatch({}, env_steps={})".format(
str(self.policy_batches), self.count
)
def __repr__(self):
return "MultiAgentBatch({}, env_steps={})".format(
str(self.policy_batches), self.count
)
@PublicAPI
def concat_samples(samples: List[SampleBatchType]) -> SampleBatchType:
"""Concatenates a list of SampleBatches or MultiAgentBatches.
If all items in the list are or SampleBatch typ4, the output will be
a SampleBatch type. Otherwise, the output will be a MultiAgentBatch type.
If input is a mixture of SampleBatch and MultiAgentBatch types, it will treat
SampleBatch objects as MultiAgentBatch types with 'default_policy' key and
concatenate it with th rest of MultiAgentBatch objects.
Empty samples are simply ignored.
Args:
samples: List of SampleBatches or MultiAgentBatches to be
concatenated.
Returns:
A new (concatenated) SampleBatch or MultiAgentBatch.
.. testcode::
:skipif: True
import numpy as np
from ray.rllib.policy.sample_batch import SampleBatch
b1 = SampleBatch({"a": np.array([1, 2]),
"b": np.array([10, 11])})
b2 = SampleBatch({"a": np.array([3]),
"b": np.array([12])})
print(concat_samples([b1, b2]))
c1 = MultiAgentBatch({'default_policy': {
"a": np.array([1, 2]),
"b": np.array([10, 11])
}}, env_steps=2)
c2 = SampleBatch({"a": np.array([3]),
"b": np.array([12])})
print(concat_samples([b1, b2]))
.. testoutput::
{"a": np.array([1, 2, 3]), "b": np.array([10, 11, 12])}
MultiAgentBatch = {'default_policy': {"a": np.array([1, 2, 3]),
"b": np.array([10, 11, 12])}}
"""
if any(isinstance(s, MultiAgentBatch) for s in samples):
return concat_samples_into_ma_batch(samples)
# the output is a SampleBatch type
concatd_seq_lens = []
concatd_num_grad_updates = [0, 0.0] # [0]=count; [1]=weighted sum values
concated_samples = []
# Make sure these settings are consistent amongst all batches.
zero_padded = max_seq_len = time_major = None
for s in samples:
if s.count <= 0:
continue
if max_seq_len is None:
zero_padded = s.zero_padded
max_seq_len = s.max_seq_len
time_major = s.time_major
# Make sure these settings are consistent amongst all batches.
if s.zero_padded != zero_padded or s.time_major != time_major:
raise ValueError(
"All SampleBatches' `zero_padded` and `time_major` settings "
"must be consistent!"
)
if (
s.max_seq_len is None or max_seq_len is None
) and s.max_seq_len != max_seq_len:
raise ValueError(
"Samples must consistently either provide or omit `max_seq_len`!"
)
elif zero_padded and s.max_seq_len != max_seq_len:
raise ValueError(
"For `zero_padded` SampleBatches, the values of `max_seq_len` "
"must be consistent!"
)
if max_seq_len is not None:
max_seq_len = max(max_seq_len, s.max_seq_len)
if s.get(SampleBatch.SEQ_LENS) is not None:
concatd_seq_lens.extend(s[SampleBatch.SEQ_LENS])
if s.num_grad_updates is not None:
concatd_num_grad_updates[0] += s.count
concatd_num_grad_updates[1] += s.num_grad_updates * s.count
concated_samples.append(s)
# If we don't have any samples (0 or only empty SampleBatches),
# return an empty SampleBatch here.
if len(concated_samples) == 0:
return SampleBatch()
# Collect the concat'd data.
concatd_data = {}
for k in concated_samples[0].keys():
if k == SampleBatch.INFOS:
concatd_data[k] = _concat_values(
*[s[k] for s in concated_samples],
time_major=time_major,
)
else:
values_to_concat = [c[k] for c in concated_samples]
_concat_values_w_time = partial(_concat_values, time_major=time_major)
concatd_data[k] = tree.map_structure(
_concat_values_w_time, *values_to_concat
)
if concatd_seq_lens != [] and torch and torch.is_tensor(concatd_seq_lens[0]):
concatd_seq_lens = torch.Tensor(concatd_seq_lens)
elif concatd_seq_lens != [] and tf and tf.is_tensor(concatd_seq_lens[0]):
concatd_seq_lens = tf.convert_to_tensor(concatd_seq_lens)
# Return a new (concat'd) SampleBatch.
return SampleBatch(
concatd_data,
seq_lens=concatd_seq_lens,
_time_major=time_major,
_zero_padded=zero_padded,
_max_seq_len=max_seq_len,
# Compute weighted average of the num_grad_updates for the batches
# (assuming they all come from the same policy).
_num_grad_updates=(
concatd_num_grad_updates[1] / (concatd_num_grad_updates[0] or 1.0)
),
)
@PublicAPI
def concat_samples_into_ma_batch(samples: List[SampleBatchType]) -> "MultiAgentBatch":
"""Concatenates a list of SampleBatchTypes to a single MultiAgentBatch type.
This function, as opposed to concat_samples() forces the output to always be
MultiAgentBatch which is more generic than SampleBatch.
Args:
samples: List of SampleBatches or MultiAgentBatches to be
concatenated.
Returns:
A new (concatenated) MultiAgentBatch.
.. testcode::
:skipif: True
import numpy as np
from ray.rllib.policy.sample_batch import SampleBatch
b1 = MultiAgentBatch({'default_policy': {
"a": np.array([1, 2]),
"b": np.array([10, 11])
}}, env_steps=2)
b2 = SampleBatch({"a": np.array([3]),
"b": np.array([12])})
print(concat_samples([b1, b2]))
.. testoutput::
{'default_policy': {"a": np.array([1, 2, 3]),
"b": np.array([10, 11, 12])}}
"""
policy_batches = collections.defaultdict(list)
env_steps = 0
for s in samples:
# Some batches in `samples` may be SampleBatch.
if isinstance(s, SampleBatch):
# If empty SampleBatch: ok (just ignore).
if len(s) <= 0:
continue
else:
# if non-empty: just convert to MA-batch and move forward
s = s.as_multi_agent()
elif not isinstance(s, MultiAgentBatch):
# Otherwise: Error.
raise ValueError(
"`concat_samples_into_ma_batch` can only concat "
"SampleBatch|MultiAgentBatch objects, not {}!".format(type(s).__name__)
)
for key, batch in s.policy_batches.items():
policy_batches[key].append(batch)
env_steps += s.env_steps()
out = {}
for key, batches in policy_batches.items():
out[key] = concat_samples(batches)
return MultiAgentBatch(out, env_steps)
def _concat_values(*values, time_major=None) -> TensorType:
"""Concatenates a list of values.
Args:
values: The values to concatenate.
time_major: Whether to concatenate along the first axis
(time_major=False) or the second axis (time_major=True).
"""
if torch and torch.is_tensor(values[0]):
return torch.cat(values, dim=1 if time_major else 0)
elif isinstance(values[0], np.ndarray):
return np.concatenate(values, axis=1 if time_major else 0)
elif tf and tf.is_tensor(values[0]):
return tf.concat(values, axis=1 if time_major else 0)
elif isinstance(values[0], list):
concatenated_list = []
for sublist in values:
concatenated_list.extend(sublist)
return concatenated_list
else:
raise ValueError(
f"Unsupported type for concatenation: {type(values[0])} "
f"first element: {values[0]}"
)
@DeveloperAPI
def convert_ma_batch_to_sample_batch(batch: SampleBatchType) -> SampleBatch:
"""Converts a MultiAgentBatch to a SampleBatch if neccessary.
Args:
batch: The SampleBatchType to convert.
Returns:
batch: the converted SampleBatch
Raises:
ValueError if the MultiAgentBatch has more than one policy_id
or if the policy_id is not `DEFAULT_POLICY_ID`
"""
if isinstance(batch, MultiAgentBatch):
policy_keys = batch.policy_batches.keys()
if len(policy_keys) == 1 and DEFAULT_POLICY_ID in policy_keys:
batch = batch.policy_batches[DEFAULT_POLICY_ID]
else:
raise ValueError(
"RLlib tried to convert a multi agent-batch with data from more "
"than one policy to a single-agent batch. This is not supported and "
"may be due to a number of issues. Here are two possible ones:"
"1) Off-Policy Estimation is not implemented for "
"multi-agent batches. You can set `off_policy_estimation_methods: {}` "
"to resolve this."
"2) Loading multi-agent data for offline training is not implemented."
"Load single-agent data instead to resolve this."
)
return batch
| MultiAgentBatch |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/qual_names.py | {
"start": 1579,
"end": 6529
} | class ____(object):
"""Represents a qualified name."""
def __init__(self, base, attr=None, subscript=None):
if attr is not None and subscript is not None:
raise ValueError('A QN can only be either an attr or a subscript, not '
'both: attr={}, subscript={}.'.format(attr, subscript))
self._has_attr = False
self._has_subscript = False
if attr is not None:
if not isinstance(base, QN):
raise ValueError(
'for attribute QNs, base must be a QN; got instead "%s"' % base)
if not isinstance(attr, str):
raise ValueError('attr may only be a string; got instead "%s"' % attr)
self._parent = base
# TODO(mdan): Get rid of the tuple - it can only have 1 or 2 elements now.
self.qn = (base, attr)
self._has_attr = True
elif subscript is not None:
if not isinstance(base, QN):
raise ValueError('For subscript QNs, base must be a QN.')
self._parent = base
self.qn = (base, subscript)
self._has_subscript = True
else:
if not isinstance(base, (str, Literal)):
# TODO(mdan): Require Symbol instead of string.
raise ValueError(
'for simple QNs, base must be a string or a Literal object;'
' got instead "%s"' % type(base))
assert '.' not in base and '[' not in base and ']' not in base
self._parent = None
self.qn = (base,)
def is_symbol(self):
return isinstance(self.qn[0], str)
def is_simple(self):
return len(self.qn) <= 1
def is_composite(self):
return len(self.qn) > 1
def has_subscript(self):
return self._has_subscript
def has_attr(self):
return self._has_attr
@property
def attr(self):
if not self._has_attr:
raise ValueError('Cannot get attr of non-attribute "%s".' % self)
return self.qn[1]
@property
def parent(self):
if self._parent is None:
raise ValueError('Cannot get parent of simple name "%s".' % self.qn[0])
return self._parent
@property
def owner_set(self):
"""Returns all the symbols (simple or composite) that own this QN.
In other words, if this symbol was modified, the symbols in the owner set
may also be affected.
Examples:
'a.b[c.d]' has two owners, 'a' and 'a.b'
"""
owners = set()
if self.has_attr() or self.has_subscript():
owners.add(self.parent)
owners.update(self.parent.owner_set)
return owners
@property
def support_set(self):
"""Returns the set of simple symbols that this QN relies on.
This would be the smallest set of symbols necessary for the QN to
statically resolve (assuming properties and index ranges are verified
at runtime).
Examples:
'a.b' has only one support symbol, 'a'
'a[i]' has two support symbols, 'a' and 'i'
"""
# TODO(mdan): This might be the set of Name nodes in the AST. Track those?
roots = set()
if self.has_attr():
roots.update(self.parent.support_set)
elif self.has_subscript():
roots.update(self.parent.support_set)
roots.update(self.qn[1].support_set)
else:
roots.add(self)
return roots
def __hash__(self):
return hash(self.qn + (self._has_attr, self._has_subscript))
def __eq__(self, other):
return (isinstance(other, QN) and self.qn == other.qn and
self.has_subscript() == other.has_subscript() and
self.has_attr() == other.has_attr())
def __lt__(self, other):
return str(self) < str(other)
def __gt__(self, other):
return str(self) > str(other)
def __str__(self):
root = self.qn[0]
if self.has_subscript():
return '{}[{}]'.format(root, self.qn[1])
if self.has_attr():
return '.'.join(map(str, self.qn))
else:
return str(root)
def __repr__(self):
return str(self)
def ssf(self):
"""Simple symbol form."""
ssfs = [n.ssf() if isinstance(n, QN) else n for n in self.qn]
ssf_string = ''
for i in range(0, len(self.qn) - 1):
if self.has_subscript():
delimiter = '_sub_'
else:
delimiter = '_'
ssf_string += ssfs[i] + delimiter
return ssf_string + ssfs[-1]
def ast(self):
"""AST representation."""
# The caller must adjust the context appropriately.
if self.has_subscript():
return gast.Subscript(
value=self.parent.ast(),
slice=self.qn[-1].ast(),
ctx=CallerMustSetThis)
if self.has_attr():
return gast.Attribute(
value=self.parent.ast(), attr=self.qn[-1], ctx=CallerMustSetThis)
base = self.qn[0]
if isinstance(base, str):
return gast.Name(
base, ctx=CallerMustSetThis, annotation=None, type_comment=None)
elif isinstance(base, Literal):
return gast.Constant(base.value, kind=None)
else:
assert False, ('the constructor should prevent types other than '
'str and Literal')
| QN |
python | google__jax | tests/key_reuse_test.py | {
"start": 20975,
"end": 23861
} | class ____(jtu.JaxTestCase):
def assertEquivalent(self, a, b):
self.assertEqual(a, b)
self.assertEqual(hash(a), hash(b))
def assertNotEquivalent(self, a, b):
self.assertNotEqual(a, b)
self.assertNotEqual(hash(a), hash(b))
def test_source_sink_immutability(self):
mask = np.array([True, False])
orig_mask_writeable = mask.flags.writeable
sink = Sink(0, mask)
source = Source(0, mask)
self.assertFalse(sink.mask.flags.writeable)
self.assertFalse(source.mask.flags.writeable)
self.assertEqual(mask.flags.writeable, orig_mask_writeable)
with self.assertRaises(ValueError):
sink.idx = 1
with self.assertRaises(ValueError):
sink.mask = True
with self.assertRaises(ValueError):
source.idx = 1
with self.assertRaises(ValueError):
source.mask = True
def test_source_sink_forward_equivalence_semantics(self):
true_mask = np.array([True, True])
false_mask = np.array([False, False])
mixed_mask = np.array([True, False])
self.assertEquivalent(Source(0), Source(0, True))
self.assertEquivalent(Source(0, True), Source(0, true_mask))
self.assertEquivalent(Source(0, False), Source(0, false_mask))
self.assertEquivalent(Source(0, mixed_mask), Source(0, mixed_mask))
self.assertNotEquivalent(Source(0), Source(1))
self.assertNotEquivalent(Source(0), Source(0, False))
self.assertNotEquivalent(Source(0), Source(0, mixed_mask))
self.assertEquivalent(Sink(0), Sink(0, True))
self.assertEquivalent(Sink(0, True), Sink(0, true_mask))
self.assertEquivalent(Sink(0, False), Sink(0, false_mask))
self.assertEquivalent(Sink(0, mixed_mask), Sink(0, mixed_mask))
self.assertNotEquivalent(Sink(0), Sink(1))
self.assertNotEquivalent(Sink(0), Sink(0, False))
self.assertNotEquivalent(Sink(0), Sink(0, mixed_mask))
self.assertNotEquivalent(Source(0), Sink(0))
self.assertEquivalent(Forward(0, 1), Forward(0, 1))
self.assertNotEquivalent(Forward(0, 1), Forward(1, 0))
def test_signature_equality_semantics(self):
self.assertEquivalent(
KeyReuseSignature(Sink(0), Source(1), Forward(1, 0)),
KeyReuseSignature(Forward(1, 0), Source(1), Sink(0)))
self.assertEquivalent(
KeyReuseSignature(), KeyReuseSignature())
self.assertNotEquivalent(
KeyReuseSignature(Source(0)), KeyReuseSignature(Sink(0)))
def test_reprs(self):
self.assertEqual(repr(Sink(0)), "Sink(0)")
self.assertEqual(repr(Source(0)), "Source(0)")
self.assertEqual(repr(Forward(0, 1)), "Forward(0, 1)")
self.assertEqual(repr(KeyReuseSignature(Sink(1), Source(0))),
"KeyReuseSignature(Sink(1), Source(0))")
self.assertEqual(repr(KeyReuseSignature(Sink(1), Sink(0))),
"KeyReuseSignature(Sink(0), Sink(1))")
@jtu.with_config(jax_enable_checks=False)
| KeyReuseImplementationTest |
python | tensorflow__tensorflow | tensorflow/python/training/basic_session_run_hooks_test.py | {
"start": 37629,
"end": 46318
} | class ____(test.TestCase):
def setUp(self):
self.log_dir = tempfile.mkdtemp()
def tearDown(self):
shutil.rmtree(self.log_dir, ignore_errors=True)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=10)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
with test.mock.patch.object(tf_logging, 'warning') as mock_log:
for _ in range(30):
mock_time.return_value += 0.01
mon_sess.run(train_op)
# logging.warning should not be called.
self.assertIsNone(mock_log.call_args)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([11, 21], summary_writer.summaries.keys())
for step in [11, 21]:
summary_value = summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_step_counter_every_n_secs(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=None, every_n_secs=0.1)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
mock_time.return_value += 0.2
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2, 3], summary_writer.summaries.keys())
for summary in summary_writer.summaries.values():
summary_value = summary[0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
def test_global_step_name(self):
with ops.Graph().as_default() as g, session_lib.Session() as sess:
with variable_scope.variable_scope('bar'):
variable_scope.get_variable(
'foo',
initializer=0,
trainable=False,
collections=[
ops.GraphKeys.GLOBAL_STEP, ops.GraphKeys.GLOBAL_VARIABLES
])
train_op = training_util._increment_global_step(1)
summary_writer = fake_summary_writer.FakeSummaryWriter(self.log_dir, g)
hook = basic_session_run_hooks.StepCounterHook(
summary_writer=summary_writer, every_n_steps=1, every_n_secs=None)
hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op)
mon_sess.run(train_op)
hook.end(sess)
summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertTrue(summary_writer.summaries, 'No summaries were created.')
self.assertItemsEqual([2], summary_writer.summaries.keys())
summary_value = summary_writer.summaries[2][0].value[0]
self.assertEqual('bar/foo/sec', summary_value.tag)
def test_log_warning_if_global_step_not_increased(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
training_util.get_or_create_global_step()
train_op = training_util._increment_global_step(0) # keep same.
self.evaluate(variables_lib.global_variables_initializer())
hook = basic_session_run_hooks.StepCounterHook(
every_n_steps=1, every_n_secs=None)
hook.begin()
mon_sess = monitored_session._HookedSession(sess, [hook])
mon_sess.run(train_op) # Run one step to record global step.
with test.mock.patch.object(tf_logging, 'log_first_n') as mock_log:
for _ in range(30):
mon_sess.run(train_op)
self.assertRegex(
str(mock_log.call_args), 'global step.*has not been increased')
hook.end(sess)
def _setup_steps_per_run_test(self,
every_n_steps,
steps_per_run,
graph,
sess):
training_util.get_or_create_global_step()
self.train_op = training_util._increment_global_step(steps_per_run)
self.summary_writer = fake_summary_writer.FakeSummaryWriter(
self.log_dir, graph)
self.hook = basic_session_run_hooks.StepCounterHook(
summary_writer=self.summary_writer, every_n_steps=every_n_steps)
self.hook._set_steps_per_run(steps_per_run)
self.hook.begin()
self.evaluate(variables_lib.global_variables_initializer())
self.mon_sess = monitored_session._HookedSession(sess, [self.hook])
@test.mock.patch.object(time, 'time')
def test_steps_per_run_less_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(10, 5, g, sess)
# Logs at 15, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([15, 25], self.summary_writer.summaries.keys())
for step in [15, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_equal_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 5, g, sess)
# Logs at 10, 15, 20, 25
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([10, 15, 20, 25],
self.summary_writer.summaries.keys())
for step in [10, 15, 20, 25]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test.mock.patch.object(time, 'time')
def test_steps_per_run_greater_than_every_n_steps(self, mock_time):
mock_time.return_value = MOCK_START_TIME
with ops.Graph().as_default() as g, session_lib.Session() as sess:
self._setup_steps_per_run_test(5, 10, g, sess)
# Logs at 20, 30, 40, 50
for _ in range(5):
mock_time.return_value += 0.01
self.mon_sess.run(self.train_op)
self.hook.end(sess)
self.summary_writer.assert_summaries(
test_case=self,
expected_logdir=self.log_dir,
expected_graph=g,
expected_summaries={})
self.assertItemsEqual([20, 30, 40, 50],
self.summary_writer.summaries.keys())
for step in [20, 30, 40, 50]:
summary_value = self.summary_writer.summaries[step][0].value[0]
self.assertEqual('global_step/sec', summary_value.tag)
self.assertGreater(summary_value.simple_value, 0)
@test_util.run_deprecated_v1
| StepCounterHookTest |
python | pytorch__pytorch | test/dynamo/cpython/3_13/test_dict.py | {
"start": 700,
"end": 1728
} | class ____(importlib.abc.MetaPathFinder):
def find_spec(self, fullname, path, target=None):
# Check if the import is the problematic one
if fullname in redirect_imports:
try:
# Attempt to import the standalone module
name = fullname.removeprefix("test.")
r = importlib.import_module(name)
# Redirect the module in sys.modules
sys.modules[fullname] = r
# Return a module spec from the found module
return importlib.util.find_spec(name)
except ImportError:
return None
return None
# Add the custom finder to sys.meta_path
sys.meta_path.insert(0, RedirectImportFinder())
# ======= END DYNAMO PATCH =======
import collections
import collections.abc
import gc
import pickle
import random
import string
import sys
import unittest
import weakref
from test import support
from test.support import import_helper, get_c_recursion_limit
| RedirectImportFinder |
python | doocs__leetcode | solution/0700-0799/0753.Cracking the Safe/Solution.py | {
"start": 0,
"end": 451
} | class ____:
def crackSafe(self, n: int, k: int) -> str:
def dfs(u):
for x in range(k):
e = u * 10 + x
if e not in vis:
vis.add(e)
v = e % mod
dfs(v)
ans.append(str(x))
mod = 10 ** (n - 1)
vis = set()
ans = []
dfs(0)
ans.append("0" * (n - 1))
return "".join(ans)
| Solution |
python | huggingface__transformers | src/transformers/models/perceiver/modeling_perceiver.py | {
"start": 57909,
"end": 64315
} | class ____(PerceiverPreTrainedModel):
def __init__(self, config):
super().__init__(config)
fourier_position_encoding_kwargs_preprocessor = {
"num_bands": 64,
"max_resolution": config.train_size,
"sine_only": False,
"concat_pos": True,
}
fourier_position_encoding_kwargs_decoder = {
"concat_pos": True,
"max_resolution": config.train_size,
"num_bands": 64,
"sine_only": False,
}
image_preprocessor = PerceiverImagePreprocessor(
config,
prep_type="patches",
spatial_downsample=1,
conv_after_patching=True,
conv_after_patching_in_channels=54,
temporal_downsample=2,
position_encoding_type="fourier",
# position_encoding_kwargs
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_preprocessor,
)
self.perceiver = PerceiverModel(
config,
input_preprocessor=image_preprocessor,
decoder=PerceiverOpticalFlowDecoder(
config,
num_channels=image_preprocessor.num_channels,
output_image_shape=config.train_size,
rescale_factor=100.0,
# decoder kwargs
use_query_residual=False,
output_num_channels=2,
# We query the decoder using the first frame features
# rather than a standard decoder position encoding.
position_encoding_type="fourier",
fourier_position_encoding_kwargs=fourier_position_encoding_kwargs_decoder,
),
)
# Initialize weights and apply final processing
self.post_init()
@auto_docstring
def forward(
self,
inputs: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
labels: Optional[torch.Tensor] = None,
return_dict: Optional[bool] = None,
) -> Union[tuple, PerceiverClassifierOutput]:
r"""
inputs (`torch.FloatTensor`):
Inputs to the perceiver. Can be anything: images, text, audio, video, etc.
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
Labels for computing the optical flow loss. Indices should be in `[0, ..., config.num_labels - 1]`.
Examples:
```python
>>> from transformers import PerceiverForOpticalFlow
>>> import torch
>>> model = PerceiverForOpticalFlow.from_pretrained("deepmind/optical-flow-perceiver")
>>> # in the Perceiver IO paper, the authors extract a 3 x 3 patch around each pixel,
>>> # leading to 3 x 3 x 3 = 27 values for each pixel (as each pixel also has 3 color channels)
>>> # patches have shape (batch_size, num_frames, num_channels, height, width)
>>> # the authors train on resolutions of 368 x 496
>>> patches = torch.randn(1, 2, 27, 368, 496)
>>> outputs = model(inputs=patches)
>>> logits = outputs.logits
>>> list(logits.shape)
[1, 368, 496, 2]
```"""
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
loss = None
if labels is not None:
raise NotImplementedError("Optical flow training is not yet supported")
outputs = self.perceiver(
inputs=inputs,
attention_mask=attention_mask,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
)
logits = outputs.logits if return_dict else outputs[0]
if not return_dict:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return PerceiverClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
cross_attentions=outputs.cross_attentions,
)
@auto_docstring(
custom_intro="""
Example use of Perceiver for multimodal (video) autoencoding, for tasks such as Kinetics-700.
[`PerceiverForMultimodalAutoencoding`] uses [`~models.perceiver.modeling_perceiver.PerceiverMultimodalPreprocessor`] to
preprocess the 3 modalities: images, audio and class labels. This preprocessor uses modality-specific preprocessors to
preprocess every modality separately, after which they are concatenated. Trainable position embeddings are used to pad
each modality to the same number of channels to make concatenation along the time dimension possible. Next, one applies
the Perceiver encoder.
[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] is used to decode the latent representation of
[`PerceiverModel`]. This decoder uses each modality-specific decoder to construct queries. The decoder queries are
created based on the inputs after preprocessing. However, autoencoding an entire video in a single forward pass is
computationally infeasible, hence one only uses parts of the decoder queries to do cross-attention with the latent
representation. This is determined by the subsampled indices for each modality, which can be provided as additional
input to the forward pass of [`PerceiverForMultimodalAutoencoding`].
[`~models.perceiver.modeling_perceiver.PerceiverMultimodalDecoder`] also pads the decoder queries of the different
modalities to the same number of channels, in order to concatenate them along the time dimension. Next, cross-attention
is performed with the latent representation of [`PerceiverModel`].
Finally, [`~models.perceiver.modeling_perceiver.PerceiverMultiModalPostprocessor`] is used to turn this tensor into an
actual video. It first splits up the output into the different modalities, and then applies the respective
postprocessor for each modality.
Note that, by masking the classification label during evaluation (i.e. simply providing a tensor of zeros for the
"label" modality), this auto-encoding model becomes a Kinetics 700 video classifier.
"""
)
| PerceiverForOpticalFlow |
python | pallets__werkzeug | src/werkzeug/exceptions.py | {
"start": 14884,
"end": 15158
} | class ____(HTTPException):
"""*413* `Request Entity Too Large`
The status code one should return if the data submitted exceeded a given
limit.
"""
code = 413
description = "The data value transmitted exceeds the capacity limit."
| RequestEntityTooLarge |
python | dateutil__dateutil | tests/test_tz.py | {
"start": 6969,
"end": 16573
} | class ____(object):
""" Mix-in class for testing ambiguous times """
def gettz(self, tzname):
raise NotImplementedError
def _get_tzname(self, tzname):
return tzname
def _gettz_context(self, tzname):
return context_passthrough()
def testFoldPositiveUTCOffset(self):
# Test that we can resolve ambiguous times
tzname = self._get_tzname('Australia/Sydney')
with self._gettz_context(tzname):
SYD = self.gettz(tzname)
t0_u = datetime(2012, 3, 31, 15, 30, tzinfo=tz.UTC) # AEST
t1_u = datetime(2012, 3, 31, 16, 30, tzinfo=tz.UTC) # AEDT
t0_syd0 = t0_u.astimezone(SYD)
t1_syd1 = t1_u.astimezone(SYD)
self.assertEqual(t0_syd0.replace(tzinfo=None),
datetime(2012, 4, 1, 2, 30))
self.assertEqual(t1_syd1.replace(tzinfo=None),
datetime(2012, 4, 1, 2, 30))
self.assertEqual(t0_syd0.utcoffset(), timedelta(hours=11))
self.assertEqual(t1_syd1.utcoffset(), timedelta(hours=10))
def testGapPositiveUTCOffset(self):
# Test that we don't have a problem around gaps.
tzname = self._get_tzname('Australia/Sydney')
with self._gettz_context(tzname):
SYD = self.gettz(tzname)
t0_u = datetime(2012, 10, 6, 15, 30, tzinfo=tz.UTC) # AEST
t1_u = datetime(2012, 10, 6, 16, 30, tzinfo=tz.UTC) # AEDT
t0 = t0_u.astimezone(SYD)
t1 = t1_u.astimezone(SYD)
self.assertEqual(t0.replace(tzinfo=None),
datetime(2012, 10, 7, 1, 30))
self.assertEqual(t1.replace(tzinfo=None),
datetime(2012, 10, 7, 3, 30))
self.assertEqual(t0.utcoffset(), timedelta(hours=10))
self.assertEqual(t1.utcoffset(), timedelta(hours=11))
def testFoldNegativeUTCOffset(self):
# Test that we can resolve ambiguous times
tzname = self._get_tzname('America/Toronto')
with self._gettz_context(tzname):
TOR = self.gettz(tzname)
t0_u = datetime(2011, 11, 6, 5, 30, tzinfo=tz.UTC)
t1_u = datetime(2011, 11, 6, 6, 30, tzinfo=tz.UTC)
t0_tor = t0_u.astimezone(TOR)
t1_tor = t1_u.astimezone(TOR)
self.assertEqual(t0_tor.replace(tzinfo=None),
datetime(2011, 11, 6, 1, 30))
self.assertEqual(t1_tor.replace(tzinfo=None),
datetime(2011, 11, 6, 1, 30))
self.assertNotEqual(t0_tor.tzname(), t1_tor.tzname())
self.assertEqual(t0_tor.utcoffset(), timedelta(hours=-4.0))
self.assertEqual(t1_tor.utcoffset(), timedelta(hours=-5.0))
def testGapNegativeUTCOffset(self):
# Test that we don't have a problem around gaps.
tzname = self._get_tzname('America/Toronto')
with self._gettz_context(tzname):
TOR = self.gettz(tzname)
t0_u = datetime(2011, 3, 13, 6, 30, tzinfo=tz.UTC)
t1_u = datetime(2011, 3, 13, 7, 30, tzinfo=tz.UTC)
t0 = t0_u.astimezone(TOR)
t1 = t1_u.astimezone(TOR)
self.assertEqual(t0.replace(tzinfo=None),
datetime(2011, 3, 13, 1, 30))
self.assertEqual(t1.replace(tzinfo=None),
datetime(2011, 3, 13, 3, 30))
self.assertNotEqual(t0, t1)
self.assertEqual(t0.utcoffset(), timedelta(hours=-5.0))
self.assertEqual(t1.utcoffset(), timedelta(hours=-4.0))
def testFoldLondon(self):
tzname = self._get_tzname('Europe/London')
with self._gettz_context(tzname):
LON = self.gettz(tzname)
UTC = tz.UTC
t0_u = datetime(2013, 10, 27, 0, 30, tzinfo=UTC) # BST
t1_u = datetime(2013, 10, 27, 1, 30, tzinfo=UTC) # GMT
t0 = t0_u.astimezone(LON)
t1 = t1_u.astimezone(LON)
self.assertEqual(t0.replace(tzinfo=None),
datetime(2013, 10, 27, 1, 30))
self.assertEqual(t1.replace(tzinfo=None),
datetime(2013, 10, 27, 1, 30))
self.assertEqual(t0.utcoffset(), timedelta(hours=1))
self.assertEqual(t1.utcoffset(), timedelta(hours=0))
def testFoldIndependence(self):
tzname = self._get_tzname('America/New_York')
with self._gettz_context(tzname):
NYC = self.gettz(tzname)
UTC = tz.UTC
hour = timedelta(hours=1)
# Firmly 2015-11-01 0:30 EDT-4
pre_dst = datetime(2015, 11, 1, 0, 30, tzinfo=NYC)
# Ambiguous between 2015-11-01 1:30 EDT-4 and 2015-11-01 1:30 EST-5
in_dst = pre_dst + hour
in_dst_tzname_0 = in_dst.tzname() # Stash the tzname - EDT
# Doing the arithmetic in UTC creates a date that is unambiguously
# 2015-11-01 1:30 EDT-5
in_dst_via_utc = (pre_dst.astimezone(UTC) + 2*hour).astimezone(NYC)
# Make sure the dates are actually ambiguous
self.assertEqual(in_dst, in_dst_via_utc)
# Make sure we got the right folding behavior
self.assertNotEqual(in_dst_via_utc.tzname(), in_dst_tzname_0)
# Now check to make sure in_dst's tzname hasn't changed
self.assertEqual(in_dst_tzname_0, in_dst.tzname())
def testInZoneFoldEquality(self):
# Two datetimes in the same zone are considered to be equal if their
# wall times are equal, even if they have different absolute times.
tzname = self._get_tzname('America/New_York')
with self._gettz_context(tzname):
NYC = self.gettz(tzname)
UTC = tz.UTC
dt0 = datetime(2011, 11, 6, 1, 30, tzinfo=NYC)
dt1 = tz.enfold(dt0, fold=1)
# Make sure these actually represent different times
self.assertNotEqual(dt0.astimezone(UTC), dt1.astimezone(UTC))
# Test that they compare equal
self.assertEqual(dt0, dt1)
def _test_ambiguous_time(self, dt, tzid, ambiguous):
# This is a test to check that the individual is_ambiguous values
# on the _tzinfo subclasses work.
tzname = self._get_tzname(tzid)
with self._gettz_context(tzname):
tzi = self.gettz(tzname)
self.assertEqual(tz.datetime_ambiguous(dt, tz=tzi), ambiguous)
def testAmbiguousNegativeUTCOffset(self):
self._test_ambiguous_time(datetime(2015, 11, 1, 1, 30),
'America/New_York', True)
def testAmbiguousPositiveUTCOffset(self):
self._test_ambiguous_time(datetime(2012, 4, 1, 2, 30),
'Australia/Sydney', True)
def testUnambiguousNegativeUTCOffset(self):
self._test_ambiguous_time(datetime(2015, 11, 1, 2, 30),
'America/New_York', False)
def testUnambiguousPositiveUTCOffset(self):
self._test_ambiguous_time(datetime(2012, 4, 1, 3, 30),
'Australia/Sydney', False)
def testUnambiguousGapNegativeUTCOffset(self):
# Imaginary time
self._test_ambiguous_time(datetime(2011, 3, 13, 2, 30),
'America/New_York', False)
def testUnambiguousGapPositiveUTCOffset(self):
# Imaginary time
self._test_ambiguous_time(datetime(2012, 10, 7, 2, 30),
'Australia/Sydney', False)
def _test_imaginary_time(self, dt, tzid, exists):
tzname = self._get_tzname(tzid)
with self._gettz_context(tzname):
tzi = self.gettz(tzname)
self.assertEqual(tz.datetime_exists(dt, tz=tzi), exists)
def testImaginaryNegativeUTCOffset(self):
self._test_imaginary_time(datetime(2011, 3, 13, 2, 30),
'America/New_York', False)
def testNotImaginaryNegativeUTCOffset(self):
self._test_imaginary_time(datetime(2011, 3, 13, 1, 30),
'America/New_York', True)
def testImaginaryPositiveUTCOffset(self):
self._test_imaginary_time(datetime(2012, 10, 7, 2, 30),
'Australia/Sydney', False)
def testNotImaginaryPositiveUTCOffset(self):
self._test_imaginary_time(datetime(2012, 10, 7, 1, 30),
'Australia/Sydney', True)
def testNotImaginaryFoldNegativeUTCOffset(self):
self._test_imaginary_time(datetime(2015, 11, 1, 1, 30),
'America/New_York', True)
def testNotImaginaryFoldPositiveUTCOffset(self):
self._test_imaginary_time(datetime(2012, 4, 1, 3, 30),
'Australia/Sydney', True)
@unittest.skip("Known failure in Python 3.6.")
def testEqualAmbiguousComparison(self):
tzname = self._get_tzname('Australia/Sydney')
with self._gettz_context(tzname):
SYD0 = self.gettz(tzname)
SYD1 = self.gettz(tzname)
t0_u = datetime(2012, 3, 31, 14, 30, tzinfo=tz.UTC) # AEST
t0_syd0 = t0_u.astimezone(SYD0)
t0_syd1 = t0_u.astimezone(SYD1)
# This is considered an "inter-zone comparison" because it's an
# ambiguous datetime.
self.assertEqual(t0_syd0, t0_syd1)
| TzFoldMixin |
python | pennersr__django-allauth | allauth/socialaccount/migrations/0002_token_max_lengths.py | {
"start": 76,
"end": 1340
} | class ____(migrations.Migration):
dependencies = [
("socialaccount", "0001_initial"),
]
operations = [
migrations.AlterField(
model_name="socialaccount",
name="uid",
field=models.CharField(
max_length=getattr(settings, "SOCIALACCOUNT_UID_MAX_LENGTH", 191),
verbose_name="uid",
),
),
migrations.AlterField(
model_name="socialapp",
name="client_id",
field=models.CharField(
help_text="App ID, or consumer key",
max_length=191,
verbose_name="client id",
),
),
migrations.AlterField(
model_name="socialapp",
name="key",
field=models.CharField(
help_text="Key", max_length=191, verbose_name="key", blank=True
),
),
migrations.AlterField(
model_name="socialapp",
name="secret",
field=models.CharField(
help_text="API secret, client secret, or consumer secret",
max_length=191,
verbose_name="secret key",
blank=True,
),
),
]
| Migration |
python | huggingface__transformers | src/transformers/utils/auto_docstring.py | {
"start": 32887,
"end": 81618
} | class ____:
# fmt: off
base_model_prefix = r"""
A string indicating the attribute associated to the base model in derived classes of the same architecture adding modules on top of the base model.
"""
supports_gradient_checkpointing = r"""
Whether the model supports gradient checkpointing or not. Gradient checkpointing is a memory-saving technique that trades compute for memory, by storing only a subset of activations (checkpoints) and recomputing the activations that are not stored during the backward pass.
"""
_no_split_modules = r"""
Layers of modules that should not be split across devices should be added to `_no_split_modules`. This can be useful for modules that contains skip connections or other operations that are not compatible with splitting the module across devices. Setting this attribute will enable the use of `device_map="auto"` in the `from_pretrained` method.
"""
_skip_keys_device_placement = r"""
A list of keys to ignore when moving inputs or outputs between devices when using the `accelerate` library.
"""
_supports_flash_attn = r"""
Whether the model's attention implementation supports FlashAttention.
"""
_supports_sdpa = r"""
Whether the model's attention implementation supports SDPA (Scaled Dot Product Attention).
"""
_supports_flex_attn = r"""
Whether the model's attention implementation supports FlexAttention.
"""
_can_compile_fullgraph = r"""
Whether the model can `torch.compile` fullgraph without graph breaks. Models will auto-compile if this flag is set to `True`
in inference, if a compilable cache is used.
"""
_supports_attention_backend = r"""
Whether the model supports attention interface functions. This flag signal that the model can be used as an efficient backend in TGI and vLLM.
"""
_tied_weights_keys = r"""
A list of `state_dict` keys that are potentially tied to another key in the state_dict.
"""
# fmt: on
ARGS_TO_IGNORE = {"self", "kwargs", "args", "deprecated_arguments"}
def get_indent_level(func):
# Use this instead of `inspect.getsource(func)` as getsource can be very slow
return (len(func.__qualname__.split(".")) - 1) * 4
def equalize_indent(docstring, indent_level):
"""
Adjust the indentation of a docstring to match the specified indent level.
"""
# fully dedent the docstring
docstring = "\n".join([line.lstrip() for line in docstring.splitlines()])
return textwrap.indent(docstring, " " * indent_level)
def set_min_indent(docstring, indent_level):
"""
Adjust the indentation of a docstring to match the specified indent level.
"""
return textwrap.indent(textwrap.dedent(docstring), " " * indent_level)
def parse_shape(docstring):
shape_pattern = re.compile(r"(of shape\s*(?:`.*?`|\(.*?\)))")
match = shape_pattern.search(docstring)
if match:
return " " + match.group(1)
return None
def parse_default(docstring):
default_pattern = re.compile(r"(defaults to \s*[^)]*)")
match = default_pattern.search(docstring)
if match:
return " " + match.group(1)
return None
def parse_docstring(docstring, max_indent_level=0, return_intro=False):
"""
Parse the docstring to extract the Args section and return it as a dictionary.
The docstring is expected to be in the format:
Args:
arg1 (type):
Description of arg1.
arg2 (type):
Description of arg2.
# This function will also return the remaining part of the docstring after the Args section.
Returns:/Example:
...
"""
match = re.search(r"(?m)^([ \t]*)(?=Example|Return)", docstring)
if match:
remainder_docstring = docstring[match.start() :]
docstring = docstring[: match.start()]
else:
remainder_docstring = ""
args_pattern = re.compile(r"(?:Args:)(\n.*)?(\n)?$", re.DOTALL)
args_match = args_pattern.search(docstring)
# still try to find args description in the docstring, if args are not preceded by "Args:"
docstring_intro = None
if args_match:
docstring_intro = docstring[: args_match.start()]
if docstring_intro.split("\n")[-1].strip() == '"""':
docstring_intro = "\n".join(docstring_intro.split("\n")[:-1])
if docstring_intro.split("\n")[0].strip() == 'r"""' or docstring_intro.split("\n")[0].strip() == '"""':
docstring_intro = "\n".join(docstring_intro.split("\n")[1:])
if docstring_intro.strip() == "":
docstring_intro = None
args_section = args_match.group(1).lstrip("\n") if args_match else docstring
if args_section.split("\n")[-1].strip() == '"""':
args_section = "\n".join(args_section.split("\n")[:-1])
if args_section.split("\n")[0].strip() == 'r"""' or args_section.split("\n")[0].strip() == '"""':
args_section = "\n".join(args_section.split("\n")[1:])
args_section = set_min_indent(args_section, 0)
params = {}
if args_section:
param_pattern = re.compile(
# |--- Group 1 ---|| Group 2 ||- Group 3 -||---------- Group 4 ----------|
rf"^\s{{0,{max_indent_level}}}(\w+)\s*\(\s*([^, \)]*)(\s*.*?)\s*\)\s*:\s*((?:(?!\n^\s{{0,{max_indent_level}}}\w+\s*\().)*)",
re.DOTALL | re.MULTILINE,
)
for match in param_pattern.finditer(args_section):
param_name = match.group(1)
param_type = match.group(2)
# param_type = match.group(2).replace("`", "")
additional_info = match.group(3)
optional = "optional" in additional_info
shape = parse_shape(additional_info)
default = parse_default(additional_info)
param_description = match.group(4).strip()
# set first line of param_description to 4 spaces:
param_description = re.sub(r"^", " " * 4, param_description, 1)
param_description = f"\n{param_description}"
params[param_name] = {
"type": param_type,
"description": param_description,
"optional": optional,
"shape": shape,
"default": default,
"additional_info": additional_info,
}
if params and remainder_docstring:
remainder_docstring = "\n" + remainder_docstring
remainder_docstring = set_min_indent(remainder_docstring, 0)
if return_intro:
return params, remainder_docstring, docstring_intro
return params, remainder_docstring
def contains_type(type_hint, target_type) -> tuple[bool, object | None]:
"""
Check if a "nested" type hint contains a specific target type,
return the first-level type containing the target_type if found.
"""
args = get_args(type_hint)
if args == ():
try:
return issubclass(type_hint, target_type), type_hint
except Exception:
return issubclass(type(type_hint), target_type), type_hint
found_type_tuple = [contains_type(arg, target_type)[0] for arg in args]
found_type = any(found_type_tuple)
if found_type:
type_hint = args[found_type_tuple.index(True)]
return found_type, type_hint
def get_model_name(obj):
"""
Get the model name from the file path of the object.
"""
path = inspect.getsourcefile(obj)
if path is None:
return None
if path.split(os.path.sep)[-3] != "models":
return None
file_name = path.split(os.path.sep)[-1]
for file_type in AUTODOC_FILES:
start = file_type.split("*")[0]
end = file_type.split("*")[-1] if "*" in file_type else ""
if file_name.startswith(start) and file_name.endswith(end):
model_name_lowercase = file_name[len(start) : -len(end)]
return model_name_lowercase
print(f"[ERROR] Something went wrong trying to find the model name in the path: {path}")
return "model"
def get_placeholders_dict(placeholders: list, model_name: str) -> dict:
"""
Get the dictionary of placeholders for the given model name.
"""
# import here to avoid circular import
from transformers.models import auto as auto_module
placeholders_dict = {}
for placeholder in placeholders:
# Infer placeholders from the model name and the auto modules
if placeholder in PLACEHOLDER_TO_AUTO_MODULE:
try:
place_holder_value = getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE[placeholder][0]),
PLACEHOLDER_TO_AUTO_MODULE[placeholder][1],
).get(model_name, None)
except ImportError:
# In case a library is not installed, we don't want to fail the docstring generation
place_holder_value = None
if place_holder_value is not None:
if isinstance(place_holder_value, (list, tuple)):
place_holder_value = place_holder_value[0]
placeholders_dict[placeholder] = place_holder_value if place_holder_value is not None else placeholder
else:
placeholders_dict[placeholder] = placeholder
return placeholders_dict
def format_args_docstring(docstring, model_name):
"""
Replaces placeholders such as {image_processor_class} in the docstring with the actual values,
deducted from the model name and the auto modules.
"""
# first check if there are any placeholders in the docstring, if not return it as is
placeholders = set(re.findall(r"{(.*?)}", docstring))
if not placeholders:
return docstring
# get the placeholders dictionary for the given model name
placeholders_dict = get_placeholders_dict(placeholders, model_name)
# replace the placeholders in the docstring with the values from the placeholders_dict
for placeholder, value in placeholders_dict.items():
if placeholder is not None:
try:
docstring = docstring.replace(f"{{{placeholder}}}", value)
except Exception:
pass
return docstring
def get_args_doc_from_source(args_classes: object | list[object]) -> dict:
if isinstance(args_classes, (list, tuple)):
args_classes_dict = {}
for args_class in args_classes:
args_classes_dict.update(args_class.__dict__)
return args_classes_dict
return args_classes.__dict__
def get_checkpoint_from_config_class(config_class):
checkpoint = None
# source code of `config_class`
# config_source = inspect.getsource(config_class)
config_source = config_class.__doc__
checkpoints = _re_checkpoint.findall(config_source)
# Each `checkpoint` is a tuple of a checkpoint name and a checkpoint link.
# For example, `('google-bert/bert-base-uncased', 'https://huggingface.co/google-bert/bert-base-uncased')`
for ckpt_name, ckpt_link in checkpoints:
# allow the link to end with `/`
ckpt_link = ckpt_link.removesuffix("/")
# verify the checkpoint name corresponds to the checkpoint link
ckpt_link_from_name = f"https://huggingface.co/{ckpt_name}"
if ckpt_link == ckpt_link_from_name:
checkpoint = ckpt_name
break
return checkpoint
def add_intro_docstring(func, class_name, indent_level=0):
intro_docstring = ""
if func.__name__ == "forward":
intro_docstring = rf"""The [`{class_name}`] forward method, overrides the `__call__` special method.
<Tip>
Although the recipe for forward pass needs to be defined within this function, one should call the [`Module`]
instance afterwards instead of this since the former takes care of running the pre and post processing steps while
the latter silently ignores them.
</Tip>
"""
intro_docstring = equalize_indent(intro_docstring, indent_level + 4)
return intro_docstring
def _get_model_info(func, parent_class):
"""
Extract model information from a function or its parent class.
Args:
func (`function`): The function to extract information from
parent_class (`class`): Optional parent class of the function
"""
# import here to avoid circular import
from transformers.models import auto as auto_module
# Get model name from either parent class or function
if parent_class is not None:
model_name_lowercase = get_model_name(parent_class)
else:
model_name_lowercase = get_model_name(func)
# Normalize model name if needed
if model_name_lowercase and model_name_lowercase not in getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE["config_class"][0]),
PLACEHOLDER_TO_AUTO_MODULE["config_class"][1],
):
model_name_lowercase = model_name_lowercase.replace("_", "-")
# Get class name from function's qualified name
class_name = func.__qualname__.split(".")[0]
# Get config class for the model
if model_name_lowercase is None:
config_class = None
else:
try:
config_class = getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE["config_class"][0]),
PLACEHOLDER_TO_AUTO_MODULE["config_class"][1],
)[model_name_lowercase]
except KeyError:
if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS:
config_class = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase]
else:
config_class = "ModelConfig"
print(
f"[ERROR] Config not found for {model_name_lowercase}. You can manually add it to HARDCODED_CONFIG_FOR_MODELS in utils/auto_docstring.py"
)
return model_name_lowercase, class_name, config_class
def _process_parameter_type(param, param_name, func):
"""
Process and format a parameter's type annotation.
Args:
param (`inspect.Parameter`): The parameter from the function signature
param_name (`str`): The name of the parameter
func (`function`): The function the parameter belongs to
"""
optional = False
if param.annotation != inspect.Parameter.empty:
param_type = param.annotation
if "typing" in str(param_type):
param_type = "".join(str(param_type).split("typing.")).replace("transformers.", "~")
elif hasattr(param_type, "__module__"):
param_type = f"{param_type.__module__.replace('transformers.', '~').replace('builtins', '')}.{param.annotation.__name__}"
if param_type[0] == ".":
param_type = param_type[1:]
else:
if False:
print(
f"[ERROR] {param_type} for {param_name} of {func.__qualname__} in file {func.__code__.co_filename} has an invalid type"
)
if "ForwardRef" in param_type:
param_type = re.sub(r"ForwardRef\('([\w.]+)'\)", r"\1", param_type)
if "Optional" in param_type:
param_type = re.sub(r"Optional\[(.*?)\]", r"\1", param_type)
optional = True
else:
param_type = ""
return param_type, optional
def _get_parameter_info(param_name, documented_params, source_args_dict, param_type, optional):
"""
Get parameter documentation details from the appropriate source.
Tensor shape, optional status and description are taken from the custom docstring in priority if available.
Type is taken from the function signature first, then from the custom docstring if missing from the signature
Args:
param_name (`str`): Name of the parameter
documented_params (`dict`): Dictionary of documented parameters (manually specified in the docstring)
source_args_dict (`dict`): Default source args dictionary to use if not in documented_params
param_type (`str`): Current parameter type (may be updated)
optional (`bool`): Whether the parameter is optional (may be updated)
"""
description = None
shape = None
shape_string = ""
is_documented = True
additional_info = None
optional_string = r", *optional*" if optional else ""
if param_name in documented_params:
# Parameter is documented in the function's docstring
if (
param_type == ""
and documented_params[param_name].get("type", None) is not None
or documented_params[param_name]["additional_info"]
):
param_type = documented_params[param_name]["type"]
optional = documented_params[param_name]["optional"]
shape = documented_params[param_name]["shape"]
shape_string = shape if shape else ""
additional_info = documented_params[param_name]["additional_info"] or ""
description = f"{documented_params[param_name]['description']}\n"
elif param_name in source_args_dict:
# Parameter is documented in ModelArgs or ImageProcessorArgs
shape = source_args_dict[param_name]["shape"]
shape_string = " " + shape if shape else ""
description = source_args_dict[param_name]["description"]
additional_info = source_args_dict[param_name].get("additional_info", None)
if additional_info:
additional_info = shape_string + optional_string + ", " + additional_info
else:
# Parameter is not documented
is_documented = False
return param_type, optional_string, shape_string, additional_info, description, is_documented
def _process_regular_parameters(
sig, func, class_name, documented_params, indent_level, undocumented_parameters, source_args_dict, parent_class
):
"""
Process all regular parameters (not kwargs parameters) from the function signature.
Args:
sig (`inspect.Signature`): Function signature
func (`function`): Function the parameters belong to
class_name (`str`): Name of the class
documented_params (`dict`): Dictionary of parameters that are already documented
indent_level (`int`): Indentation level
undocumented_parameters (`list`): List to append undocumented parameters to
"""
docstring = ""
source_args_dict = (
get_args_doc_from_source([ModelArgs, ImageProcessorArgs]) if source_args_dict is None else source_args_dict
)
missing_args = {}
for param_name, param in sig.parameters.items():
# Skip parameters that should be ignored
if (
param_name in ARGS_TO_IGNORE
or param.kind == inspect.Parameter.VAR_POSITIONAL
or param.kind == inspect.Parameter.VAR_KEYWORD
):
continue
# Process parameter type and optional status
param_type, optional = _process_parameter_type(param, param_name, func)
# Check for default value
param_default = ""
if param.default != inspect._empty and param.default is not None:
param_default = f", defaults to `{str(param.default)}`"
param_type, optional_string, shape_string, additional_info, description, is_documented = _get_parameter_info(
param_name, documented_params, source_args_dict, param_type, optional
)
if is_documented:
if param_name == "config":
if param_type == "":
param_type = f"[`{class_name}`]"
else:
param_type = f"[`{param_type.split('.')[-1]}`]"
# elif param_type == "" and False: # TODO: Enforce typing for all parameters
# print(f"[ERROR] {param_name} for {func.__qualname__} in file {func.__code__.co_filename} has no type")
param_type = param_type if "`" in param_type else f"`{param_type}`"
# Format the parameter docstring
if additional_info:
param_docstring = f"{param_name} ({param_type}{additional_info}):{description}"
else:
param_docstring = (
f"{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}"
)
docstring += set_min_indent(
param_docstring,
indent_level + 8,
)
else:
missing_args[param_name] = {
"type": param_type if param_type else "<fill_type>",
"optional": optional,
"shape": shape_string,
"description": description if description else "\n <fill_description>",
"default": param_default,
}
undocumented_parameters.append(
f"[ERROR] `{param_name}` is part of {func.__qualname__}'s signature, but not documented. Make sure to add it to the docstring of the function in {func.__code__.co_filename}."
)
return docstring, missing_args
def find_sig_line(lines, line_end):
parenthesis_count = 0
sig_line_end = line_end
found_sig = False
while not found_sig:
for char in lines[sig_line_end]:
if char == "(":
parenthesis_count += 1
elif char == ")":
parenthesis_count -= 1
if parenthesis_count == 0:
found_sig = True
break
sig_line_end += 1
return sig_line_end
def _process_kwargs_parameters(sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters):
"""
Process **kwargs parameters if needed.
Args:
sig (`inspect.Signature`): Function signature
func (`function`): Function the parameters belong to
parent_class (`class`): Parent class of the function
documented_kwargs (`dict`): Dictionary of kwargs that are already documented
indent_level (`int`): Indentation level
undocumented_parameters (`list`): List to append undocumented parameters to
"""
docstring = ""
source_args_dict = get_args_doc_from_source(ImageProcessorArgs)
# Check if we need to add typed kwargs description to the docstring
unroll_kwargs = func.__name__ in UNROLL_KWARGS_METHODS
if not unroll_kwargs and parent_class is not None:
# Check if the function has a parent class with unroll kwargs
unroll_kwargs = any(
unroll_kwargs_class in parent_class.__name__ for unroll_kwargs_class in UNROLL_KWARGS_CLASSES
)
if unroll_kwargs:
# get all unpackable "kwargs" parameters
kwargs_parameters = [
kwargs_param
for _, kwargs_param in sig.parameters.items()
if kwargs_param.kind == inspect.Parameter.VAR_KEYWORD
]
for kwarg_param in kwargs_parameters:
# If kwargs not typed, skip
if kwarg_param.annotation == inspect.Parameter.empty:
continue
# Extract documentation for kwargs
kwargs_documentation = kwarg_param.annotation.__args__[0].__doc__
if kwargs_documentation is not None:
documented_kwargs = parse_docstring(kwargs_documentation)[0]
# Process each kwarg parameter
for param_name, param_type_annotation in kwarg_param.annotation.__args__[0].__annotations__.items():
param_type = str(param_type_annotation)
optional = False
# Process parameter type
if "typing" in param_type:
param_type = "".join(param_type.split("typing.")).replace("transformers.", "~")
else:
param_type = f"{param_type.replace('transformers.', '~').replace('builtins', '')}.{param_name}"
if "ForwardRef" in param_type:
param_type = re.sub(r"ForwardRef\('([\w.]+)'\)", r"\1", param_type)
if "Optional" in param_type:
param_type = re.sub(r"Optional\[(.*?)\]", r"\1", param_type)
optional = True
# Check for default value
param_default = ""
if parent_class is not None:
param_default = str(getattr(parent_class, param_name, ""))
param_default = f", defaults to `{param_default}`" if param_default != "" else ""
param_type, optional_string, shape_string, additional_info, description, is_documented = (
_get_parameter_info(param_name, documented_kwargs, source_args_dict, param_type, optional)
)
if is_documented:
# Check if type is missing
if param_type == "":
print(
f"[ERROR] {param_name} for {kwarg_param.annotation.__args__[0].__qualname__} in file {func.__code__.co_filename} has no type"
)
param_type = param_type if "`" in param_type else f"`{param_type}`"
# Format the parameter docstring
if additional_info:
docstring += set_min_indent(
f"{param_name} ({param_type}{additional_info}):{description}",
indent_level + 8,
)
else:
docstring += set_min_indent(
f"{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}",
indent_level + 8,
)
else:
undocumented_parameters.append(
f"[ERROR] `{param_name}` is part of {kwarg_param.annotation.__args__[0].__qualname__}, but not documented. Make sure to add it to the docstring of the function in {func.__code__.co_filename}."
)
return docstring
def _process_parameters_section(
func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level, source_args_dict
):
"""
Process the parameters section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
func (`function`): Function the parameters belong to
class_name (`str`): Name of the class the function belongs to
model_name_lowercase (`str`): Lowercase model name
parent_class (`class`): Parent class of the function (if any)
indent_level (`int`): Indentation level
"""
# Start Args section
docstring = set_min_indent("Args:\n", indent_level + 4)
undocumented_parameters = []
documented_params = {}
documented_kwargs = {}
# Parse existing docstring if available
if func_documentation is not None:
documented_params, func_documentation = parse_docstring(func_documentation)
# Process regular parameters
param_docstring, missing_args = _process_regular_parameters(
sig, func, class_name, documented_params, indent_level, undocumented_parameters, source_args_dict, parent_class
)
docstring += param_docstring
# Process **kwargs parameters if needed
kwargs_docstring = _process_kwargs_parameters(
sig, func, parent_class, documented_kwargs, indent_level, undocumented_parameters
)
docstring += kwargs_docstring
# Report undocumented parameters
if len(undocumented_parameters) > 0:
print("\n".join(undocumented_parameters))
return docstring
def _process_returns_section(func_documentation, sig, config_class, indent_level):
"""
Process the returns section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
sig (`inspect.Signature`): Function signature
config_class (`str`): Config class for the model
indent_level (`int`): Indentation level
"""
return_docstring = ""
# Extract returns section from existing docstring if available
if (
func_documentation is not None
and (match_start := re.search(r"(?m)^([ \t]*)(?=Return)", func_documentation)) is not None
):
match_end = re.search(r"(?m)^([ \t]*)(?=Example)", func_documentation)
if match_end:
return_docstring = func_documentation[match_start.start() : match_end.start()]
func_documentation = func_documentation[match_end.start() :]
else:
return_docstring = func_documentation[match_start.start() :]
func_documentation = ""
return_docstring = set_min_indent(return_docstring, indent_level + 4)
# Otherwise, generate return docstring from return annotation if available
elif sig.return_annotation is not None and sig.return_annotation != inspect._empty:
add_intro, return_annotation = contains_type(sig.return_annotation, ModelOutput)
return_docstring = _prepare_output_docstrings(return_annotation, config_class, add_intro=add_intro)
return_docstring = return_docstring.replace("typing.", "")
return_docstring = set_min_indent(return_docstring, indent_level + 4)
return return_docstring, func_documentation
def _process_example_section(
func_documentation, func, parent_class, class_name, model_name_lowercase, config_class, checkpoint, indent_level
):
"""
Process the example section of the docstring.
Args:
func_documentation (`str`): Existing function documentation (manually specified in the docstring)
func (`function`): Function being processed
parent_class (`class`): Parent class of the function
class_name (`str`): Name of the class
model_name_lowercase (`str`): Lowercase model name
config_class (`str`): Config class for the model
checkpoint: Checkpoint to use in examples
indent_level (`int`): Indentation level
"""
# Import here to avoid circular import
from transformers.models import auto as auto_module
example_docstring = ""
# Use existing example section if available
if func_documentation is not None and (match := re.search(r"(?m)^([ \t]*)(?=Example)", func_documentation)):
example_docstring = func_documentation[match.start() :]
example_docstring = "\n" + set_min_indent(example_docstring, indent_level + 4)
# No examples for __init__ methods or if the class is not a model
elif parent_class is None and model_name_lowercase is not None:
task = rf"({'|'.join(PT_SAMPLE_DOCSTRINGS.keys())})"
model_task = re.search(task, class_name)
CONFIG_MAPPING = auto_module.configuration_auto.CONFIG_MAPPING
# Get checkpoint example
if (checkpoint_example := checkpoint) is None:
try:
checkpoint_example = get_checkpoint_from_config_class(CONFIG_MAPPING[model_name_lowercase])
except KeyError:
# For models with inconsistent lowercase model name
if model_name_lowercase in HARDCODED_CONFIG_FOR_MODELS:
CONFIG_MAPPING_NAMES = auto_module.configuration_auto.CONFIG_MAPPING_NAMES
config_class_name = HARDCODED_CONFIG_FOR_MODELS[model_name_lowercase]
if config_class_name in CONFIG_MAPPING_NAMES.values():
model_name_for_auto_config = [
k for k, v in CONFIG_MAPPING_NAMES.items() if v == config_class_name
][0]
if model_name_for_auto_config in CONFIG_MAPPING:
checkpoint_example = get_checkpoint_from_config_class(
CONFIG_MAPPING[model_name_for_auto_config]
)
# Add example based on model task
if model_task is not None:
if checkpoint_example is not None:
example_annotation = ""
task = model_task.group()
example_annotation = PT_SAMPLE_DOCSTRINGS[task].format(
model_class=class_name,
checkpoint=checkpoint_example,
expected_output="...",
expected_loss="...",
qa_target_start_index=14,
qa_target_end_index=15,
mask="<mask>",
)
example_docstring = set_min_indent(example_annotation, indent_level + 4)
else:
print(
f"[ERROR] No checkpoint found for {class_name}.{func.__name__}. Please add a `checkpoint` arg to `auto_docstring` or add one in {config_class}'s docstring"
)
else:
# Check if the model is in a pipeline to get an example
for name_model_list_for_task in MODELS_TO_PIPELINE:
model_list_for_task = getattr(auto_module.modeling_auto, name_model_list_for_task)
if class_name in model_list_for_task.values():
pipeline_name = MODELS_TO_PIPELINE[name_model_list_for_task]
example_annotation = PIPELINE_TASKS_TO_SAMPLE_DOCSTRINGS[pipeline_name].format(
model_class=class_name,
checkpoint=checkpoint_example,
expected_output="...",
expected_loss="...",
qa_target_start_index=14,
qa_target_end_index=15,
)
example_docstring = set_min_indent(example_annotation, indent_level + 4)
break
return example_docstring
def auto_method_docstring(
func, parent_class=None, custom_intro=None, custom_args=None, checkpoint=None, source_args_dict=None
):
"""
Wrapper that automatically generates docstring.
"""
# Use inspect to retrieve the method's signature
sig = inspect.signature(func)
indent_level = get_indent_level(func) if not parent_class else get_indent_level(parent_class)
# Get model information
model_name_lowercase, class_name, config_class = _get_model_info(func, parent_class)
func_documentation = func.__doc__
if custom_args is not None and func_documentation is not None:
func_documentation = "\n" + set_min_indent(custom_args.strip("\n"), 0) + "\n" + func_documentation
elif custom_args is not None:
func_documentation = "\n" + set_min_indent(custom_args.strip("\n"), 0)
# Add intro to the docstring before args description if needed
if custom_intro is not None:
docstring = set_min_indent(custom_intro, indent_level + 4)
if not docstring.strip().endswith("\n"):
docstring += "\n"
else:
docstring = add_intro_docstring(func, class_name=class_name, indent_level=indent_level)
# Process Parameters section
docstring += _process_parameters_section(
func_documentation, sig, func, class_name, model_name_lowercase, parent_class, indent_level, source_args_dict
)
# Process Returns section
return_docstring, func_documentation = _process_returns_section(
func_documentation, sig, config_class, indent_level
)
docstring += return_docstring
# Process Example section
example_docstring = _process_example_section(
func_documentation,
func,
parent_class,
class_name,
model_name_lowercase,
config_class,
checkpoint,
indent_level,
)
docstring += example_docstring
# Format the docstring with the placeholders
docstring = format_args_docstring(docstring, model_name_lowercase)
# Assign the dynamically generated docstring to the wrapper function
func.__doc__ = docstring
return func
def auto_class_docstring(cls, custom_intro=None, custom_args=None, checkpoint=None):
"""
Wrapper that automatically generates a docstring for classes based on their attributes and methods.
"""
# import here to avoid circular import
from transformers.models import auto as auto_module
is_dataclass = False
docstring_init = ""
docstring_args = ""
if "PreTrainedModel" in (x.__name__ for x in cls.__mro__):
docstring_init = auto_method_docstring(
cls.__init__, parent_class=cls, custom_args=custom_args, checkpoint=checkpoint
).__doc__.replace("Args:", "Parameters:")
elif "ModelOutput" in (x.__name__ for x in cls.__mro__):
# We have a data class
is_dataclass = True
doc_class = cls.__doc__
if custom_args is None and doc_class:
custom_args = doc_class
docstring_args = auto_method_docstring(
cls.__init__,
parent_class=cls,
custom_args=custom_args,
checkpoint=checkpoint,
source_args_dict=get_args_doc_from_source(ModelOutputArgs),
).__doc__
indent_level = get_indent_level(cls)
model_name_lowercase = get_model_name(cls)
model_name_title = " ".join([k.title() for k in model_name_lowercase.split("_")]) if model_name_lowercase else None
if model_name_lowercase and model_name_lowercase not in getattr(
getattr(auto_module, PLACEHOLDER_TO_AUTO_MODULE["config_class"][0]),
PLACEHOLDER_TO_AUTO_MODULE["config_class"][1],
):
model_name_lowercase = model_name_lowercase.replace("_", "-")
name = re.findall(rf"({'|'.join(ClassDocstring.__dict__.keys())})$", cls.__name__)
if name == [] and custom_intro is None and not is_dataclass:
raise ValueError(
f"`{cls.__name__}` is not registered in the auto doc. Here are the available classes: {ClassDocstring.__dict__.keys()}.\n"
"Add a `custom_intro` to the decorator if you want to use `auto_docstring` on a class not registered in the auto doc."
)
if name != [] or custom_intro is not None or is_dataclass:
name = name[0] if name else None
if custom_intro is not None:
pre_block = equalize_indent(custom_intro, indent_level)
if not pre_block.endswith("\n"):
pre_block += "\n"
elif model_name_title is None or name is None:
pre_block = ""
else:
pre_block = getattr(ClassDocstring, name).format(model_name=model_name_title)
# Start building the docstring
docstring = set_min_indent(f"{pre_block}", indent_level) if len(pre_block) else ""
if name != "PreTrainedModel" and "PreTrainedModel" in (x.__name__ for x in cls.__mro__):
docstring += set_min_indent(f"{ClassDocstring.PreTrainedModel}", indent_level)
# Add the __init__ docstring
if docstring_init:
docstring += set_min_indent(f"\n{docstring_init}", indent_level)
elif is_dataclass:
# No init function, we have a data class
docstring += docstring_args if docstring_args else "\nArgs:\n"
source_args_dict = get_args_doc_from_source(ModelOutputArgs)
doc_class = cls.__doc__ if cls.__doc__ else ""
documented_kwargs = parse_docstring(doc_class)[0]
for param_name, param_type_annotation in cls.__annotations__.items():
param_type = str(param_type_annotation)
optional = False
# Process parameter type
if "typing" in param_type:
param_type = "".join(param_type.split("typing.")).replace("transformers.", "~")
else:
param_type = f"{param_type.replace('transformers.', '~').replace('builtins', '')}.{param_name}"
if "ForwardRef" in param_type:
param_type = re.sub(r"ForwardRef\('([\w.]+)'\)", r"\1", param_type)
if "Optional" in param_type:
param_type = re.sub(r"Optional\[(.*?)\]", r"\1", param_type)
optional = True
# Check for default value
param_default = ""
param_default = str(getattr(cls, param_name, ""))
param_default = f", defaults to `{param_default}`" if param_default != "" else ""
param_type, optional_string, shape_string, additional_info, description, is_documented = (
_get_parameter_info(param_name, documented_kwargs, source_args_dict, param_type, optional)
)
if is_documented:
# Check if type is missing
if param_type == "":
print(
f"[ERROR] {param_name} for {cls.__qualname__} in file {cls.__code__.co_filename} has no type"
)
param_type = param_type if "`" in param_type else f"`{param_type}`"
# Format the parameter docstring
if additional_info:
docstring += set_min_indent(
f"{param_name} ({param_type}{additional_info}):{description}",
indent_level + 8,
)
else:
docstring += set_min_indent(
f"{param_name} ({param_type}{shape_string}{optional_string}{param_default}):{description}",
indent_level + 8,
)
# TODO (Yoni): Add support for Attributes section in docs
else:
print(
f"You used `@auto_class_docstring` decorator on `{cls.__name__}` but this class is not part of the AutoMappings. Remove the decorator"
)
# Assign the dynamically generated docstring to the wrapper class
cls.__doc__ = docstring
return cls
def auto_docstring(obj=None, *, custom_intro=None, custom_args=None, checkpoint=None):
r"""
Automatically generates comprehensive docstrings for model classes and methods in the Transformers library.
This decorator reduces boilerplate by automatically including standard argument descriptions while allowing
overrides to add new or custom arguments. It inspects function signatures, retrieves predefined docstrings
for common arguments (like `input_ids`, `attention_mask`, etc.), and generates complete documentation
including examples and return value descriptions.
For complete documentation and examples, read this [guide](https://huggingface.co/docs/transformers/auto_docstring).
Examples of usage:
Basic usage (no parameters):
```python
@auto_docstring
class MyAwesomeModel(PreTrainedModel):
def __init__(self, config, custom_parameter: int = 10):
r'''
custom_parameter (`int`, *optional*, defaults to 10):
Description of the custom parameter for MyAwesomeModel.
'''
super().__init__(config)
self.custom_parameter = custom_parameter
```
Using `custom_intro` with a class:
```python
@auto_docstring(
custom_intro="This model implements a novel attention mechanism for improved performance."
)
class MySpecialModel(PreTrainedModel):
def __init__(self, config, attention_type: str = "standard"):
r'''
attention_type (`str`, *optional*, defaults to "standard"):
Type of attention mechanism to use.
'''
super().__init__(config)
```
Using `custom_intro` with a method, and specify custom arguments and example directly in the docstring:
```python
@auto_docstring(
custom_intro="Performs forward pass with enhanced attention computation."
)
def forward(
self,
input_ids: Optional[torch.Tensor] = None,
attention_mask: Optional[torch.Tensor] = None,
):
r'''
custom_parameter (`int`, *optional*, defaults to 10):
Description of the custom parameter for MyAwesomeModel.
Example:
```python
>>> model = MyAwesomeModel(config)
>>> model.forward(input_ids=torch.tensor([1, 2, 3]), attention_mask=torch.tensor([1, 1, 1]))
```
'''
```
Using `custom_args` to define reusable arguments:
```python
VISION_ARGS = r'''
pixel_values (`torch.FloatTensor`, *optional*):
Pixel values of the input images.
image_features (`torch.FloatTensor`, *optional*):
Pre-computed image features for efficient processing.
'''
@auto_docstring(custom_args=VISION_ARGS)
def encode_images(self, pixel_values=None, image_features=None):
# ... method implementation
```
Combining `custom_intro` and `custom_args`:
```python
MULTIMODAL_ARGS = r'''
vision_features (`torch.FloatTensor`, *optional*):
Pre-extracted vision features from the vision encoder.
fusion_strategy (`str`, *optional*, defaults to "concat"):
Strategy for fusing text and vision modalities.
'''
@auto_docstring(
custom_intro="Processes multimodal inputs combining text and vision.",
custom_args=MULTIMODAL_ARGS
)
def forward(
self,
input_ids,
attention_mask=None,
vision_features=None,
fusion_strategy="concat"
):
# ... multimodal processing
```
Using with ModelOutput classes:
```python
@dataclass
@auto_docstring(
custom_intro="Custom model outputs with additional fields."
)
class MyModelOutput(ImageClassifierOutput):
r'''
loss (`torch.FloatTensor`, *optional*):
The loss of the model.
custom_field (`torch.FloatTensor` of shape `(batch_size, hidden_size)`, *optional*):
A custom output field specific to this model.
'''
# Standard fields like hidden_states, logits, attentions etc. can be automatically documented
# However, given that the loss docstring is often different per model, you should document it above
loss: Optional[torch.FloatTensor] = None
logits: Optional[torch.FloatTensor] = None
hidden_states: Optional[tuple[torch.FloatTensor, ...]] = None
attentions: Optional[tuple[torch.FloatTensor, ...]] = None
custom_field: Optional[torch.FloatTensor] = None
```
Args:
custom_intro (`str`, *optional*):
Custom introduction text to add to the docstring. This replaces the default
introduction text generated by the decorator before the Args section. Use this to describe what
makes your model or method special.
custom_args (`str`, *optional*):
Custom argument documentation in docstring format. This allows you to define
argument descriptions once and reuse them across multiple methods. The format should follow the
standard docstring convention: `arg_name (`type`, *optional*, defaults to `value`): Description.`
checkpoint (`str`, *optional*):
Checkpoint name to use in examples within the docstring. This is typically
automatically inferred from the model configuration class, but can be overridden if needed for
custom examples.
Note:
- Standard arguments (`input_ids`, `attention_mask`, `pixel_values`, etc.) are automatically documented
from predefined descriptions and should not be redefined unless their behavior differs in your model.
- New or custom arguments should be documented in the method's docstring using the `r''' '''` block
or passed via the `custom_args` parameter.
- For model classes, the decorator derives parameter descriptions from the `__init__` method's signature
and docstring.
- Return value documentation is automatically generated for methods that return ModelOutput subclasses.
"""
def auto_docstring_decorator(obj):
if len(obj.__qualname__.split(".")) > 1:
return auto_method_docstring(
obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint
)
else:
return auto_class_docstring(obj, custom_args=custom_args, custom_intro=custom_intro, checkpoint=checkpoint)
if obj:
return auto_docstring_decorator(obj)
return auto_docstring_decorator
| ClassAttrs |
python | huggingface__transformers | src/transformers/models/ernie4_5/modeling_ernie4_5.py | {
"start": 12423,
"end": 13152
} | class ____(nn.Module):
def __init__(self, hidden_size, eps=1e-6):
"""
Ernie4_5RMSNorm is equivalent to T5LayerNorm
"""
super().__init__()
self.weight = nn.Parameter(torch.ones(hidden_size))
self.variance_epsilon = eps
def forward(self, hidden_states):
input_dtype = hidden_states.dtype
hidden_states = hidden_states.to(torch.float32)
variance = hidden_states.pow(2).mean(-1, keepdim=True)
hidden_states = hidden_states * torch.rsqrt(variance + self.variance_epsilon)
return self.weight * hidden_states.to(input_dtype)
def extra_repr(self):
return f"{tuple(self.weight.shape)}, eps={self.variance_epsilon}"
| Ernie4_5RMSNorm |
python | marshmallow-code__marshmallow | tests/test_decorators.py | {
"start": 7876,
"end": 8109
} | class ____(Schema):
foo = fields.Int()
@validates("foo")
def validate_foo(self, value, **kwargs):
if value != 42:
raise ValidationError("The answer to life the universe and everything.")
| ValidatesSchema |
python | py-pdf__pypdf | pypdf/filters.py | {
"start": 18440,
"end": 18939
} | class ____:
@staticmethod
def decode(
data: bytes,
decode_parms: Optional[DictionaryObject] = None,
**kwargs: Any,
) -> bytes:
"""
Decompresses data encoded using the wavelet-based JPEG 2000 standard,
reproducing the original image data.
Args:
data: text to decode.
decode_parms: this filter does not use parameters.
Returns:
decoded data.
"""
return data
@dataclass
| JPXDecode |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 89074,
"end": 89930
} | class ____(sgqlc.types.Enum):
"""The access level to a repository
Enumeration Choices:
* `ADMIN`: Can read, clone, and push to this repository. Can also
manage issues, pull requests, and repository settings, including
adding collaborators
* `MAINTAIN`: Can read, clone, and push to this repository. They
can also manage issues, pull requests, and some repository
settings
* `READ`: Can read and clone this repository. Can also open and
comment on issues and pull requests
* `TRIAGE`: Can read and clone this repository. Can also manage
issues and pull requests
* `WRITE`: Can read, clone, and push to this repository. Can also
manage issues and pull requests
"""
__schema__ = github_schema
__choices__ = ("ADMIN", "MAINTAIN", "READ", "TRIAGE", "WRITE")
| RepositoryPermission |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/orm/strategies.py | {
"start": 17560,
"end": 18317
} | class ____:
"""serializable loader object used by DeferredColumnLoader"""
def __init__(self, key: str, raiseload: bool = False):
self.key = key
self.raiseload = raiseload
def __call__(self, state, passive=attributes.PASSIVE_OFF):
key = self.key
localparent = state.manager.mapper
prop = localparent._props[key]
if self.raiseload:
strategy_key = (
("deferred", True),
("instrument", True),
("raiseload", True),
)
else:
strategy_key = (("deferred", True), ("instrument", True))
strategy = prop._get_strategy(strategy_key)
return strategy._load_for_state(state, passive)
| _LoadDeferredColumns |
python | sympy__sympy | sympy/assumptions/predicates/matrices.py | {
"start": 10477,
"end": 10972
} | class ____(Predicate):
"""
Normal matrix predicate.
A matrix is normal if it commutes with its conjugate transpose.
Examples
========
>>> from sympy import Q, ask, MatrixSymbol
>>> X = MatrixSymbol('X', 4, 4)
>>> ask(Q.normal(X), Q.unitary(X))
True
References
==========
.. [1] https://en.wikipedia.org/wiki/Normal_matrix
"""
name = "normal"
handler = Dispatcher("NormalHandler", doc="Predicate fore key 'normal'.")
| NormalPredicate |
python | pytorch__pytorch | test/functorch/test_control_flow.py | {
"start": 205446,
"end": 208803
} | class ____(TestCase):
def setUp(self):
torch._dynamo.reset()
super().setUp()
def _check_tracing(self, fn, args, allow_non_fake_inputs=False):
graphs = {}
eager_res = fn(*args)
for tracing_mode in ["symbolic", "real", "fake"]:
graph = make_fx(
fn,
tracing_mode=tracing_mode,
_allow_non_fake_inputs=allow_non_fake_inputs,
)(*args)
graphs[tracing_mode] = graph
self.assertEqual(graph(*args), eager_res)
return graphs
def _check_compile(self, fn, args, *, dynamic=False, backend="eager"):
eager_res = fn(*args)
compiled_fn = torch.compile(fn, backend=backend, dynamic=dynamic)
self.assertEqual(compiled_fn(*args), eager_res)
def _check_export(self, fn, args, *, strict=False, dynamic_shapes=None):
eg_out = fn(*args)
with torch._export.config.patch(use_new_tracer_experimental=True):
ep = torch.export.export(
fn, args, strict=strict, dynamic_shapes=dynamic_shapes
)
ep_out = ep.module()(*args)
self.assertEqual(eg_out, ep_out)
return ep
def test_cond_traced_not_nested(self):
def true_fn(x):
return x.sin()
def false_fn(x):
return x.cos()
def f(x, y):
return cond(y, true_fn, false_fn, [x])
x = torch.randn(4)
graph = make_fx(f)(x, torch.tensor(False))
result_true = graph.forward(x, torch.tensor(True))
result_false = graph.forward(x, torch.tensor(False))
self.assertFalse(torch.allclose(result_true, result_false))
self.assertEqual(result_true, torch.sin(x))
self.assertEqual(result_false, torch.cos(x))
graph = make_fx(f, tracing_mode="symbolic")(x, torch.tensor(False))
self.assertEqual(graph(x, torch.tensor(True)), f(x, torch.tensor(True)))
@skipIfTorchDynamo("Graph is not captured by backend if test with dynamo")
@skipIfCrossRef # Arg order changes with crossref
def test_cond_simple_with_linear_compile_check_graph(self):
def true_fn(x):
return x.sin()
def false_fn(x):
return x.cos()
x = torch.randn(4, requires_grad=True)
def f(pred, x):
result = cond(pred, true_fn, false_fn, (x,))
grad_out = torch.ones_like(result)
return torch.autograd.grad(result, (x,), grad_out)
backend = EagerAndRecordGraphs()
torch.compile(f, backend=backend)(torch.tensor(False), x)
self.assertEqual(len(backend.graphs), 2)
gm = backend.graphs[0]
self.assertExpectedInline(
gm.code.strip(),
"""\
def forward(self, L_pred_ : torch.Tensor, L_x_ : torch.Tensor):
l_pred_ = L_pred_
l_x_ = L_x_
cond_true_0 = self.cond_true_0
cond_false_0 = self.cond_false_0
cond = torch.ops.higher_order.cond(l_pred_, cond_true_0, cond_false_0, (l_x_,)); l_pred_ = cond_true_0 = cond_false_0 = l_x_ = None
result = cond[0]; cond = None
grad_out = torch.ones_like(result)
return (result, grad_out)""", # noqa: B950
)
self.assertExpectedInline(
normalize_gm(backend.graphs[1].print_readable(print_output=False)),
"""\
| TestControlFlowTraced |
python | realpython__materials | python-assignment-statements/user.py | {
"start": 0,
"end": 95
} | class ____:
def __init__(self, name, job):
self.name = name
self.job = job
| User |
python | patrick-kidger__equinox | equinox/nn/_normalisation.py | {
"start": 387,
"end": 5905
} | class ____(Module):
r"""
Computes a mean and standard deviation over the whole input array, and uses these
to normalise the whole array. Optionally applies an elementwise affine
transformation afterwards.
Given an input array $x$, this layer computes
$$\frac{x - \mathbb{E}[x]}{\sqrt{\text{Var}[x] + \varepsilon}} * \gamma + \beta$$
where $\gamma$, $\beta$ have the same shape as $x$ if `elementwise_affine=True`,
and $\gamma = 1$, $\beta = 0$ if `elementwise_affine=False`.
??? cite
[Layer Normalization](https://arxiv.org/abs/1607.06450)
```bibtex
@article{ba2016layer,
author={Jimmy Lei Ba, Jamie Ryan Kriso, Geoffrey E. Hinton},
title={Layer Normalization},
year={2016},
journal={arXiv:1607.06450},
}
```
!!! faq "FAQ"
If you need to normalise over only some input dimensions, then this can be
achieved by vmap'ing. For example the following will compute statistics over
every dimension *except* the first:
```python
layer = LayerNorm(...)
array = jax.vmap(layer)(array)
```
"""
shape: tuple[int, ...] = field(static=True)
eps: float = field(static=True)
use_weight: bool = field(static=True)
use_bias: bool = field(static=True)
weight: Float[Array, "*shape"] | None
bias: Float[Array, "*shape"] | None
def __init__(
self,
shape: int | Sequence[int],
eps: float = 1e-5,
use_weight: bool = True,
use_bias: bool = True,
dtype=None,
*,
elementwise_affine: bool | None = None,
):
"""**Arguments:**
- `shape`: Shape of the input.
- `eps`: Value added to denominator for numerical stability.
- `use_weight`: Whether the module has learnable affine weights.
- `use_bias`: Whether the module has learnable affine biases.
- `dtype`: The dtype to use for the weight and the bias in this layer if
`use_weight` or `use_bias` is set to `True`.
Defaults to either `jax.numpy.float32` or `jax.numpy.float64` depending
on whether JAX is in 64-bit mode.
- `elementwise_affine`: Deprecated alternative to `use_weight` and `use_bias`.
"""
if isinstance(shape, int):
shape = (shape,)
else:
shape = tuple(shape)
self.shape = shape
self.eps = eps
if elementwise_affine is not None:
use_weight = elementwise_affine
use_bias = elementwise_affine
warnings.warn(
"LayerNorm(elementwise_affine=...) is deprecated "
"in favour of LayerNorm(use_weight=...) and LayerNorm(use_bias=...)"
)
self.use_weight = use_weight
self.use_bias = use_bias
self.weight = jnp.ones(shape, dtype=dtype) if use_weight else None
self.bias = jnp.zeros(shape, dtype=dtype) if use_bias else None
@overload
def __call__(self, x: Array, *, key: PRNGKeyArray | None = None) -> Array: ...
@overload
def __call__(
self, x: Array, state: State, *, key: PRNGKeyArray | None = None
) -> tuple[Array, State]: ...
@named_scope("eqx.nn.LayerNorm")
def __call__(
self,
x: Float[Array, "*shape"],
state: State = sentinel,
*,
key: PRNGKeyArray | None = None,
) -> Array | tuple[Array, State]:
"""**Arguments:**
- `x`: A JAX array, with the same shape as the `shape` passed to `__init__`.
- `state`: Ignored; provided for interchangeability with the
[`equinox.nn.BatchNorm`][] API.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
The output is a JAX array of the same shape as `x`.
If `state` is passed, then a 2-tuple of `(output, state)` is returned. The state
is passed through unchanged. If `state` is not passed, then just the output is
returned.
"""
if x.shape != self.shape:
raise ValueError(
"`LayerNorm(shape)(x)` must satisfy the invariant `shape == x.shape`"
f"Received `shape={self.shape} and `x.shape={x.shape}`. You might need "
"to replace `layer_norm(x)` with `jax.vmap(layer_norm)(x)`.\n"
"\n"
"If this is a new error for you, it might be because this became "
"stricter in Equinox v0.11.0. Previously all that was required is that "
"`x.shape` ended with `shape`. However, this turned out to be a "
"frequent source of bugs, so we made the check stricter!"
)
orig_dtype = x.dtype
with jax.numpy_dtype_promotion("standard"):
dtype = jnp.result_type(x.dtype, jnp.float32)
x = x.astype(dtype)
mean = jnp.mean(x, keepdims=True)
variance = jnp.var(x, keepdims=True)
variance = jnp.maximum(0.0, variance)
inv = jax.lax.rsqrt(variance + self.eps)
out = (x - mean) * inv
if self.use_weight:
out = self.weight.astype(dtype) * out # pyright: ignore
if self.use_bias:
out = out + self.bias.astype(dtype) # pyright: ignore
if state is sentinel:
return out.astype(orig_dtype)
else:
return out.astype(orig_dtype), state
| LayerNorm |
python | PrefectHQ__prefect | src/prefect/events/actions.py | {
"start": 5745,
"end": 5875
} | class ____(WorkPoolAction):
"""Resumes a Work Pool"""
type: Literal["resume-work-pool"] = "resume-work-pool"
| ResumeWorkPool |
python | django__django | tests/prefetch_related/models.py | {
"start": 3709,
"end": 4639
} | class ____(models.Model):
tag = models.SlugField()
content_type = models.ForeignKey(
ContentType,
models.CASCADE,
related_name="taggeditem_set2",
)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey("content_type", "object_id")
created_by_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name="taggeditem_set3",
)
created_by_fkey = models.PositiveIntegerField(null=True)
created_by = GenericForeignKey(
"created_by_ct",
"created_by_fkey",
)
favorite_ct = models.ForeignKey(
ContentType,
models.SET_NULL,
null=True,
related_name="taggeditem_set4",
)
favorite_fkey = models.CharField(max_length=64, null=True)
favorite = GenericForeignKey("favorite_ct", "favorite_fkey")
class Meta:
ordering = ["id"]
| TaggedItem |
python | huggingface__transformers | tests/models/colqwen2/test_modeling_colqwen2.py | {
"start": 7255,
"end": 10494
} | class ____(ModelTesterMixin, unittest.TestCase):
"""
Model tester for `ColQwen2ForRetrieval`.
"""
all_model_classes = (ColQwen2ForRetrieval,) if is_torch_available() else ()
test_resize_embeddings = True
def setUp(self):
self.model_tester = ColQwen2ForRetrievalModelTester(self)
self.config_tester = ConfigTester(self, config_class=ColQwen2Config, has_text_modality=False)
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
wte = model.get_input_embeddings()
inputs["inputs_embeds"] = wte(input_ids)
with torch.no_grad():
model(**inputs)
# overwrite inputs_embeds tests because we need to delete "pixel values" for LVLMs
# while some other models require pixel_values to be present
def test_inputs_embeds_matches_input_ids(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
input_ids = inputs["input_ids"]
del inputs["input_ids"]
del inputs["pixel_values"]
inputs_embeds = model.get_input_embeddings()(input_ids)
with torch.no_grad():
out_ids = model(input_ids=input_ids, **inputs)[0]
out_embeds = model(inputs_embeds=inputs_embeds, **inputs)[0]
self.assertTrue(torch.allclose(out_embeds, out_ids))
@slow
@require_vision
def test_colqwen2_forward_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
model.to(torch_device)
model.eval()
inputs = self._prepare_for_class(inputs_dict, model_class)
with torch.no_grad():
outputs = model(**inputs, return_dict=True)
self.assertIsInstance(outputs, ColQwen2ForRetrievalOutput)
@unittest.skip(reason="Some undefined behavior encountered with test versions of Qwen2-VL. Skip for now.")
def test_model_parallelism(self):
pass
@unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`")
def test_sdpa_can_dispatch_on_flash(self):
pass
@unittest.skip(reason="Pass because ColQwen2 requires `attention_mask is not None`")
@pytest.mark.torch_compile_test
def test_sdpa_can_compile_dynamic(self):
pass
@unittest.skip(reason="This architecture doesn't support weight tying/untying.")
def test_load_save_without_tied_weights(self):
pass
@require_torch
| ColQwen2ForRetrievalModelTest |
python | hyperopt__hyperopt | hyperopt/tests/integration/test_spark.py | {
"start": 2847,
"end": 23119
} | class ____(unittest.TestCase, BaseSparkContext):
@classmethod
def setUpClass(cls):
cls.setup_spark()
cls._sc.setLogLevel("OFF")
@classmethod
def tearDownClass(cls):
cls.teardown_spark()
def sparkSupportsJobCancelling(self):
return hasattr(self.sc.parallelize([1]), "collectWithJobGroup")
def check_run_status(
self, spark_trials, output, num_total, num_success, num_failure
):
self.assertEqual(
spark_trials.count_total_trials(),
num_total,
"Wrong number of total trial runs: Expected {e} but got {r}.".format(
e=num_total, r=spark_trials.count_total_trials()
),
)
self.assertEqual(
spark_trials.count_successful_trials(),
num_success,
"Wrong number of successful trial runs: Expected {e} but got {r}.".format(
e=num_success, r=spark_trials.count_successful_trials()
),
)
self.assertEqual(
spark_trials.count_failed_trials(),
num_failure,
"Wrong number of failed trial runs: Expected {e} but got {r}.".format(
e=num_failure, r=spark_trials.count_failed_trials()
),
)
log_output = output.getvalue().strip()
self.assertIn(
"Total Trials: " + str(num_total),
log_output,
"""Logging "Total Trials: {num}" missing from the log: {log}""".format(
num=str(num_total), log=log_output
),
)
self.assertIn(
str(num_success) + " succeeded",
log_output,
"""Logging "{num} succeeded " missing from the log: {log}""".format(
num=str(num_success), log=log_output
),
)
self.assertIn(
str(num_failure) + " failed",
log_output,
""" Logging "{num} failed " missing from the log: {log}""".format(
num=str(num_failure), log=log_output
),
)
def assert_task_succeeded(self, log_output, task):
self.assertIn(
f"trial {task} task thread exits normally",
log_output,
"""Debug info "trial {task} task thread exits normally" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def assert_task_failed(self, log_output, task):
self.assertIn(
f"trial {task} task thread catches an exception",
log_output,
"""Debug info "trial {task} task thread catches an exception" missing from log:
{log_output}""".format(
task=task, log_output=log_output
),
)
def test_quadratic1_tpe(self):
# TODO: Speed this up or remove it since it is slow (1 minute on laptop)
spark_trials = SparkTrials(parallelism=4)
test_quadratic1_tpe(spark_trials)
def test_trial_run_info(self):
spark_trials = SparkTrials(parallelism=4)
with patch_logger("hyperopt-spark") as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -5, 5),
algo=anneal.suggest,
max_evals=8,
return_argmin=False,
trials=spark_trials,
rstate=np.random.default_rng(94),
)
self.check_run_status(
spark_trials, output, num_total=8, num_success=6, num_failure=2
)
expected_result = {"loss": 1.0, "status": "ok"}
for trial in spark_trials._dynamic_trials:
if trial["state"] == base.JOB_STATE_DONE:
self.assertEqual(
trial["result"],
expected_result,
f"Wrong result has been saved: Expected {expected_result} but got {trial['result']}.",
)
elif trial["state"] == base.JOB_STATE_ERROR:
err_message = trial["misc"]["error"][1]
self.assertIn(
"RuntimeError",
err_message,
"Missing {e} in {r}.".format(e="RuntimeError", r=err_message),
)
self.assertIn(
"Traceback (most recent call last)",
err_message,
"Missing {e} in {r}.".format(e="Traceback", r=err_message),
)
num_success = spark_trials.count_by_state_unsynced(base.JOB_STATE_DONE)
self.assertEqual(
num_success,
6,
f"Wrong number of successful trial runs: Expected 6 but got {num_success}.",
)
num_failure = spark_trials.count_by_state_unsynced(base.JOB_STATE_ERROR)
self.assertEqual(
num_failure,
2,
f"Wrong number of failed trial runs: Expected 2 but got {num_failure}.",
)
def test_accepting_sparksession(self):
spark_trials = SparkTrials(
parallelism=2, spark_session=SparkSession.builder.getOrCreate()
)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_parallelism_arg(self):
default_parallelism = 2
# Test requested_parallelism is None or negative values.
for requested_parallelism in [None, -1]:
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=requested_parallelism,
spark_default_parallelism=default_parallelism,
)
self.assertEqual(
parallelism,
default_parallelism,
"Failed to set parallelism to be default parallelism ({p})"
" ({e})".format(p=parallelism, e=default_parallelism),
)
log_output = output.getvalue().strip()
self.assertIn(
"Because the requested parallelism was None or a non-positive value, "
"parallelism will be set to ({d})".format(d=default_parallelism),
log_output,
"""set to default parallelism missing from log: {log_output}""".format(
log_output=log_output
),
)
# Test requested_parallelism exceeds hard cap
with patch_logger("hyperopt-spark") as output:
parallelism = SparkTrials._decide_parallelism(
requested_parallelism=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED + 1,
spark_default_parallelism=default_parallelism,
)
self.assertEqual(
parallelism,
SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED,
"Failed to limit parallelism ({p}) to MAX_CONCURRENT_JOBS_ALLOWED ({e})".format(
p=parallelism, e=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
)
log_output = output.getvalue().strip()
self.assertIn(
"SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED ({c})".format(
c=SparkTrials.MAX_CONCURRENT_JOBS_ALLOWED
),
log_output,
"""MAX_CONCURRENT_JOBS_ALLOWED value missing from log: {log_output}""".format(
log_output=log_output
),
)
def test_all_successful_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_successful_trials(), 1)
self.assertIn(
"fmin thread exits normally",
log_output,
"""Debug info "fmin thread exits normally" missing from
log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_all_failed_trials(self):
spark_trials = SparkTrials(parallelism=1)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 10),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertEqual(spark_trials.count_failed_trials(), 1)
self.assert_task_failed(log_output, 0)
spark_trials = SparkTrials(parallelism=4)
# Here return_argmin is True (by default) and an exception should be thrown
with self.assertRaisesRegex(Exception, "There are no evaluation tasks"):
fmin(
fn=fn_succeed_within_range,
space=hp.uniform("x", 5, 8),
algo=anneal.suggest,
max_evals=2,
trials=spark_trials,
)
def test_timeout_without_job_cancellation(self):
timeout = 4
spark_trials = SparkTrials(parallelism=1, timeout=timeout)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(0.5)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_without_job_cancellation_fmin_timeout(self):
timeout = 5
spark_trials = SparkTrials(parallelism=1)
spark_trials._spark_supports_job_cancelling = False
def fn(x):
time.sleep(1)
return x
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -1, 1),
algo=anneal.suggest,
max_evals=10,
timeout=timeout,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.default_rng(99),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertGreater(spark_trials.count_successful_trials(), 0)
self.assertGreater(spark_trials.count_cancelled_trials(), 0)
self.assertIn(
"fmin is cancelled, so new trials will not be launched",
log_output,
""" "fmin is cancelled, so new trials will not be launched" missing from log:
{log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"SparkTrials will block",
log_output,
""" "SparkTrials will block" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_succeeded(log_output, 0)
def test_timeout_with_job_cancellation(self):
if not self.sparkSupportsJobCancelling():
print(
"Skipping timeout test since this Apache PySpark version does not "
"support cancelling jobs by job group ID."
)
return
timeout = 2
spark_trials = SparkTrials(parallelism=4, timeout=timeout)
def fn(x):
if x < 0:
time.sleep(timeout + 20)
raise Exception("Task should have been cancelled")
else:
time.sleep(1)
return x
# Test 1 cancelled trial. Examine logs.
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=fn,
space=hp.uniform("x", -2, 0),
algo=anneal.suggest,
max_evals=1,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=False,
rstate=np.random.default_rng(4),
)
log_output = output.getvalue().strip()
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
self.assertEqual(spark_trials.count_cancelled_trials(), 1)
self.assertIn(
"Cancelling all running jobs",
log_output,
""" "Cancelling all running jobs" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assertIn(
"trial task 0 cancelled",
log_output,
""" "trial task 0 cancelled" missing from log: {log_output}""".format(
log_output=log_output
),
)
self.assert_task_failed(log_output, 0)
# Test mix of successful and cancelled trials.
spark_trials = SparkTrials(parallelism=4, timeout=4)
fmin(
fn=fn,
space=hp.uniform("x", -0.25, 5),
algo=anneal.suggest,
max_evals=6,
trials=spark_trials,
max_queue_len=1,
show_progressbar=False,
return_argmin=True,
rstate=np.random.default_rng(4),
)
time.sleep(2)
self.assertTrue(spark_trials._fmin_cancelled)
self.assertEqual(spark_trials._fmin_cancelled_reason, "fmin run timeout")
# There are 2 finished trials, 1 cancelled running trial and 1 cancelled
# new trial. We do not need to check the new trial since it is not started yet.
self.assertGreaterEqual(
spark_trials.count_successful_trials(),
1,
"Expected at least 1 successful trial but found none.",
)
self.assertGreaterEqual(
spark_trials.count_cancelled_trials(),
1,
"Expected at least 1 cancelled trial but found none.",
)
def test_invalid_timeout(self):
with self.assertRaisesRegex(
Exception,
"timeout argument should be None or a positive value. Given value: -1",
):
SparkTrials(parallelism=4, timeout=-1)
with self.assertRaisesRegex(
Exception,
"timeout argument should be None or a positive value. Given value: True",
):
SparkTrials(parallelism=4, timeout=True)
def test_exception_when_spark_not_available(self):
import hyperopt
orig_have_spark = hyperopt.spark._have_spark
hyperopt.spark._have_spark = False
try:
with self.assertRaisesRegex(Exception, "cannot import pyspark"):
SparkTrials(parallelism=4)
finally:
hyperopt.spark._have_spark = orig_have_spark
def test_no_retry_for_long_tasks(self):
NUM_TRIALS = 2
output_dir = tempfile.mkdtemp()
def fn(_):
with open(os.path.join(output_dir, str(timeit.default_timer())), "w") as f:
f.write("1")
raise Exception("Failed!")
spark_trials = SparkTrials(parallelism=2)
try:
fmin(
fn=fn,
space=hp.uniform("x", 0, 1),
algo=anneal.suggest,
max_evals=NUM_TRIALS,
trials=spark_trials,
show_progressbar=False,
return_argmin=False,
)
except BaseException as e:
self.assertEqual(
"There are no evaluation tasks, cannot return argmin of task losses.",
str(e),
)
call_count = len(os.listdir(output_dir))
self.assertEqual(NUM_TRIALS, call_count)
def test_pin_thread_off(self):
if self._pin_mode_enabled:
raise unittest.SkipTest()
spark_trials = SparkTrials(parallelism=2)
self.assertFalse(spark_trials._spark_pinned_threads_enabled)
self.assertTrue(spark_trials._spark_supports_job_cancelling)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", -1, 1),
algo=rand.suggest,
max_evals=5,
trials=spark_trials,
)
self.assertEqual(spark_trials.count_successful_trials(), 5)
def test_pin_thread_on(self):
if not self._pin_mode_enabled:
raise unittest.SkipTest()
spark_trials = SparkTrials(parallelism=2)
self.assertTrue(spark_trials._spark_pinned_threads_enabled)
self.assertTrue(spark_trials._spark_supports_job_cancelling)
fmin(
fn=lambda x: x + 1,
space=hp.uniform("x", -1, 1),
algo=rand.suggest,
max_evals=5,
trials=spark_trials,
)
self.assertEqual(spark_trials.count_successful_trials(), 5)
def test_early_stop(self):
# stop after at least 3 trials succeed
def early_stop_fn(trials):
num_ok_trials = 0
for trial in trials:
if trial["result"]["status"] == STATUS_OK:
num_ok_trials += 1
return num_ok_trials >= 3, []
spark_trials = SparkTrials(parallelism=2)
with patch_logger("hyperopt-spark", logging.DEBUG) as output:
fmin(
fn=lambda x: {"loss": x, "status": STATUS_OK, "other_info": "hello"},
space=hp.uniform("x", -1, 1),
algo=rand.suggest,
max_evals=10,
trials=spark_trials,
early_stop_fn=early_stop_fn,
)
log_output = output.getvalue().strip()
num_successful_trials = spark_trials.count_successful_trials()
self.assertGreaterEqual(num_successful_trials, 3)
self.assertLess(num_successful_trials, 10)
self.assertIn(
"fmin thread exits normally",
log_output,
f'Debug info "fmin thread exits normally" missing from log: {log_output}',
)
self.assertIn(
"fmin cancelled because of early stopping condition",
log_output,
f"Debug info for early stopping missing from log: {log_output}",
)
| FMinTestCase |
python | celery__celery | examples/stamping/visitors.py | {
"start": 649,
"end": 2410
} | class ____(StampingVisitor):
def on_signature(self, sig: Signature, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_signature")
return {
"on_signature": "FullVisitor.on_signature()",
}
def on_callback(self, sig, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_callback")
return {
"on_callback": "FullVisitor.on_callback()",
}
def on_errback(self, sig, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_errback")
return {
"on_errback": "FullVisitor.on_errback()",
}
def on_chain_start(self, sig: Signature, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_chain_start")
return {
"on_chain_start": "FullVisitor.on_chain_start()",
}
def on_group_start(self, sig: Signature, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_group_start")
return {
"on_group_start": "FullVisitor.on_group_start()",
}
def on_chord_header_start(self, sig: Signature, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_chord_header_start")
s = super().on_chord_header_start(sig, **headers)
s.update(
{
"on_chord_header_start": "FullVisitor.on_chord_header_start()",
}
)
return s
def on_chord_body(self, sig: Signature, **headers) -> dict:
logger.critical(f"Visitor: Sig '{sig}' is stamped with: on_chord_body")
return {
"on_chord_body": "FullVisitor.on_chord_body()",
}
| FullVisitor |
python | pytorch__pytorch | torch/_functorch/fx_minifier.py | {
"start": 4665,
"end": 17372
} | class ____:
graph: fx.Graph
inps: list[torch.Tensor]
def __post_init__(self):
ph_nodes = get_placeholders(self.graph)
assert len(ph_nodes) == len(self.inps)
def minifier(
fail_f: fx.GraphModule,
inps,
module_fails,
dump_state: Callable = dump_state,
*,
save_dir=None,
offload_to_disk=False,
skip_offload=False,
skip_sanity=False,
max_granularity=None,
):
"""
Minimizes a FX graph with given inputs, such that the resulting FX graph still returns True for module_fails.
Does 2 main strategies:
1. Truncates suffix: Removes some suffix from the graph and sets a new output.
2. Delta Debugging: Tries replacing half of the graph with inputs. If fails,
tries replacing quarter of the graph, etc.
>>> # xdoctest: +SKIP(failing)
>>> failing_function = fx.symbolic_trace(f)
>>> minimize(failing_function, [torch.randn(5)], lambda fx_g, inps: fx_g(*inps))
note: module_fails returns True if it fails.
"""
assert isinstance(inps, (tuple, list))
failing_graph = fail_f.graph
cur_size = len(failing_graph.nodes)
if max_granularity is not None and not is_power_of_two(max_granularity):
raise RuntimeError(f"max_granularity {max_granularity} not power of two")
num_queries = 0
def deepcopy_fx_graph(fx_graph):
return fx.GraphModule(fail_f, copy.deepcopy(fx_graph)).graph
def graph_fails(graph, inps):
nonlocal num_queries
graph = copy.deepcopy(graph)
num_queries += 1
mod = fx.GraphModule(fail_f, graph)
mod.graph.lint()
return module_fails(mod, inps)
writer = None
if offload_to_disk:
writer = ContentStoreWriter(save_dir)
ConcreteProp(fail_f, writer=writer, skip_offload=skip_offload).propagate(*inps)
if not skip_sanity and not graph_fails(failing_graph, inps):
raise RuntimeError("Input graph did not fail the tester")
print(f"Started off with {cur_size} nodes", file=sys.stderr)
def _register_strategy(strategy: Callable, name: str):
@wraps(strategy)
def new_func(old_state: ReproState, granularity=1):
print(file=sys.stderr)
print(
f"Strategy: {name} (G: {granularity}) "
f"({len(old_state.graph.nodes)} nodes, {len(old_state.inps)} inputs)",
file=sys.stderr,
)
new_state = strategy(
deepcopy_fx_graph(old_state.graph), list(old_state.inps), granularity
)
if new_state is not None:
new_nodes = len(new_state.graph.nodes)
old_nodes = len(old_state.graph.nodes)
new_inps = len(new_state.inps)
old_inps = len(old_state.inps)
new_outs = len(get_outputs(new_state.graph))
old_outs = len(get_outputs(old_state.graph))
progress_made = False
if new_nodes < old_nodes:
progress_made = True
print(
f"SUCCESS: Went from {old_nodes} to {new_nodes} nodes",
file=sys.stderr,
)
if new_inps > old_inps:
progress_made = True
print(
f"SUCCESS: Went from {old_inps} to {new_inps} inputs",
file=sys.stderr,
)
if new_outs < old_outs:
progress_made = True
print(
f"SUCCESS: Went from {old_outs} to {new_outs} outputs",
file=sys.stderr,
)
if not progress_made:
raise RuntimeError("Success raised but no progress made?")
if not graph_fails(new_state.graph, new_state.inps):
print(
"WARNING: Something went wrong, not applying this minification",
file=sys.stderr,
)
return None
return new_state
else:
print(f"FAIL: {name}", file=sys.stderr)
return None
return new_func
def register_strategy(name: str):
return partial(_register_strategy, name=name)
@register_strategy("Truncate suffix")
def remove_suffix(cur_graph, cur_inps, granularity):
tested = set()
new_graph = fx.Graph()
env = {}
for idx, node in enumerate(cur_graph.nodes):
new_node = new_graph.node_copy(node, lambda x: env[x])
if node.op not in ["placeholder", "output"]:
# If idx is divisible by (granularity * 2), it would have been checked already.
if (
idx % granularity == 0
and (idx % (granularity * 2) != 0)
and idx not in tested
):
output_node = new_graph.output((new_node,))
if len(new_graph.nodes) < len(cur_graph.nodes) and graph_fails(
new_graph, cur_inps
):
return ReproState(new_graph, cur_inps)
else:
tested.add(idx)
new_graph.erase_node(output_node)
env[node] = new_node
return None
@register_strategy("Remove outputs")
def remove_outputs(cur_graph, cur_inps, granularity):
granularity = max(1, granularity // 2)
for idx, node in enumerate(cur_graph.nodes):
node.idx = idx
if node.op == "output":
output = node
break
if isinstance(output.args[0], fx.Node):
return None
output_args = sorted(
output.args[0], key=lambda x: x.idx if isinstance(x, fx.Node) else int(1e9)
)
if len(output_args) == 1:
return None
for idx in range(0, len(output_args), granularity):
output.args = (output_args[:idx] + output_args[idx + granularity :],)
if graph_fails(cur_graph, cur_inps):
return ReproState(cur_graph, cur_inps)
return None
def remove_unused_inputs_unchecked(cur_state: ReproState):
cur_graph = cur_state.graph
cur_inps = cur_state.inps
ph_nodes = get_placeholders(cur_graph)
assert len(ph_nodes) == len(cur_inps)
new_inps = []
for idx in range(len(ph_nodes)):
if len(ph_nodes[idx].users) == 0:
cur_graph.erase_node(ph_nodes[idx])
else:
new_inps.append(cur_inps[idx])
if len(new_inps) < len(cur_inps):
return ReproState(cur_graph, new_inps)
return None
def remove_unused_inputs_checked(cur_state: ReproState):
new_state = remove_unused_inputs_unchecked(cur_state)
if new_state is not None and graph_fails(new_state.graph, new_state.inps):
return new_state
return None
def _remove_unused_wrapper(cur_graph, cur_inps, granularity):
return remove_unused_inputs_checked(ReproState(cur_graph, cur_inps))
remove_unused_inputs = register_strategy("Remove unused inputs")(
_remove_unused_wrapper
)
@register_strategy("Eliminate dead code")
def eliminate_dead_code(cur_graph, cur_inps, granularity):
if cur_graph.eliminate_dead_code() and graph_fails(cur_graph, cur_inps):
return ReproState(cur_graph, cur_inps)
return None
def _consolidate_placeholders(cur_graph, inps):
new_graph = fx.Graph()
env = {}
seen_non_placeholder = False
# Move all placeholders to the front; also, if any load_tensor
# is at the front, convert it into an input (because it can be live
# all the time)
for node in cur_graph.nodes:
if node.op == "placeholder":
new_node = new_graph.node_copy(node, lambda x: env[x])
env[node] = new_node
elif not seen_non_placeholder and is_load_tensor_node(node):
new_node = new_graph.placeholder(node.name)
env[node] = new_node
inps.append(
torch.ops.debugprims.load_tensor.default(*node.args, **node.kwargs)
)
else:
seen_non_placeholder = True
# Move everyone else
for node in cur_graph.nodes:
if node not in env:
new_node = new_graph.node_copy(node, lambda x: env[x])
env[node] = new_node
return new_graph
@register_strategy("Delta Debugging")
def delta_debugging(cur_graph: fx.Graph, cur_inps, granularity):
num_nodes = len(cur_graph.nodes)
for start_range in range(0, num_nodes, granularity):
is_removing = False
new_graph = deepcopy_fx_graph(cur_graph)
new_inps = cur_inps[:]
end_range = min(num_nodes, start_range + granularity)
for idx in range(start_range, end_range):
new_node = list(new_graph.nodes)[idx]
if _convert_node_to_placeholder(new_graph, new_node, new_inps):
is_removing = True
if not is_removing:
continue
new_graph.eliminate_dead_code()
new_graph = _consolidate_placeholders(new_graph, new_inps)
new_state = remove_unused_inputs_unchecked(ReproState(new_graph, new_inps))
if new_state is None:
new_state = ReproState(new_graph, new_inps)
if graph_fails(new_state.graph, new_state.inps):
return ReproState(new_state.graph, new_state.inps)
return None
@register_strategy("Consolidate Inputs")
def consolidate_inputs(cur_graph, cur_inps, granularity):
old_len = len(cur_inps)
cur_graph = _consolidate_placeholders(cur_graph, cur_inps)
if len(cur_inps) > old_len and graph_fails(cur_graph, cur_inps):
return ReproState(cur_graph, cur_inps)
return None
failing_state = ReproState(failing_graph, inps)
def try_granularity(failing_state, granularity, use_non_granular):
print(f"Trying granularity {granularity}", file=sys.stderr)
strategies = []
num_nodes = len(failing_state.graph.nodes)
num_outputs = len(get_outputs(failing_state.graph))
if num_outputs > num_nodes // 2:
strategies += [remove_outputs]
if use_non_granular:
strategies += [
eliminate_dead_code,
remove_unused_inputs,
consolidate_inputs,
]
strategies += [remove_suffix, delta_debugging]
for strategy in strategies:
new_state = strategy(failing_state, granularity)
if new_state is not None:
return new_state
return None
while True:
dump_state(fx.GraphModule(fail_f, failing_state.graph), failing_state.inps)
granularity = int(2 ** (math.floor(math.log2(len(failing_state.graph.nodes)))))
if max_granularity is not None:
granularity = min(max_granularity, granularity)
new_state = try_granularity(failing_state, granularity, use_non_granular=True)
if new_state is not None:
failing_state = new_state
continue
granularity //= 2
has_progress = False
while granularity >= 1:
new_state = try_granularity(
failing_state, granularity, use_non_granular=False
)
if new_state is not None:
failing_state = new_state
has_progress = True
break
granularity //= 2
if has_progress:
continue
new_state = remove_outputs(failing_state, 1)
if new_state is not None:
failing_state = new_state
continue
break
if not graph_fails(failing_state.graph, failing_state.inps):
raise RuntimeError("Uh oh, something went wrong :( Final graph is not failing")
print(f"Made {num_queries} queries", file=sys.stderr)
failing_fx = fx.GraphModule(fail_f, failing_state.graph)
# If XLA debugging environment is enabled, create minified HLO graph as well
if "XLA_HLO_DEBUG" in os.environ:
create_minified_hlo_graph(failing_fx, failing_state.inps)
dump_state(failing_fx, failing_state.inps)
print("Wrote minimal repro out to repro.py", file=sys.stderr)
return failing_fx, failing_state.inps
| ReproState |
python | realpython__materials | python-selenium/src/bandcamp/web/pages.py | {
"start": 265,
"end": 938
} | class ____(WebPage):
"""Model the relevant parts of the Bandcamp Discover page."""
def __init__(self, driver: WebDriver) -> None:
super().__init__(driver)
self._accept_cookie_consent()
self.discover_tracklist = TrackListElement(
self._driver.find_element(*DiscoverPageLocator.DISCOVER_RESULTS),
self._driver,
)
def _accept_cookie_consent(self) -> None:
"""Accept the necessary cookie consent."""
try:
self._driver.find_element(
*DiscoverPageLocator.COOKIE_ACCEPT_NECESSARY
).click()
except NoSuchElementException:
pass
| DiscoverPage |
python | numba__numba | numba/tests/test_dictobject.py | {
"start": 51898,
"end": 52208
} | class ____(TestCase):
def test_jitclass_as_value(self):
@njit
def foo(x):
d = Dict()
d[0] = x
d[1] = Bag(101)
return d
d = foo(Bag(a=100))
self.assertEqual(d[0].a, 100)
self.assertEqual(d[1].a, 101)
| TestDictWithJitclass |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 16173,
"end": 16555
} | class ____(_RerankerProvider):
reranker: Union[Rerankers, _EnumLikeStr] = Field(
default=Rerankers.TRANSFORMERS, frozen=True, exclude=True
)
RerankerJinaAIModel = Literal[
"jina-reranker-v2-base-multilingual",
"jina-reranker-v1-base-en",
"jina-reranker-v1-turbo-en",
"jina-reranker-v1-tiny-en",
"jina-colbert-v1-en",
]
| _RerankerTransformersConfig |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-tiktok-marketing/components.py | {
"start": 2881,
"end": 3703
} | class ____(MultipleAdvertiserIdsPerPartition):
"""
SingleAdvertiserIdPerPartition returns single slice for every advertiser_id in the parent stream
or takes value for advertiser_id from a config and skips reading slices.
path_in_config: List[List[str]]: path to value in the config in priority order.
partition_field: str: field to insert partition value.
"""
def stream_slices(self) -> Iterable[StreamSlice]:
partition_value_in_config = self.get_partition_value_from_config()
if partition_value_in_config:
yield StreamSlice(partition={self._partition_field: partition_value_in_config, "parent_slice": {}}, cursor_slice={})
else:
yield from super(MultipleAdvertiserIdsPerPartition, self).stream_slices()
@dataclass
| SingleAdvertiserIdPerPartition |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 97545,
"end": 98318
} | class ____(_PrintableStructure):
_fields_ = [("version", c_uint),
("id", c_uint),
("isP2pSupported", c_uint),
("sliceCount", c_uint),
("instanceCount", c_uint),
("multiprocessorCount", c_uint),
("copyEngineCount", c_uint),
("decoderCount", c_uint),
("encoderCount", c_uint),
("jpegCount", c_uint),
("ofaCount", c_uint),
("memorySizeMB", c_ulonglong),
("name", c_char * NVML_DEVICE_NAME_V2_BUFFER_SIZE)
]
def __init__(self):
super(c_nvmlGpuInstanceProfileInfo_v2_t, self).__init__(version=nvmlGpuInstanceProfileInfo_v2)
| c_nvmlGpuInstanceProfileInfo_v2_t |
python | pytorch__pytorch | torch/ao/quantization/observer.py | {
"start": 66204,
"end": 68154
} | class ____(Granularity):
"""
Represents per-token granularity in quantization.
This granularity type calculates a different set of quantization parameters
for each token, which is represented as the last dimension of the tensor.
For example, if the input tensor has shape [2, 3, 4], then there are 6 tokens
with 4 elements each, and we will calculate 6 sets of quantization parameters,
one for each token.
If the input tensor has only two dimensions, e.g. [8, 16], then this is
equivalent to `PerAxis(axis=0)`, which yields 8 sets of quantization parameters.
"""
def get_block_size(
input_shape: tuple[int, ...], granularity: Granularity
) -> tuple[int, ...]:
"""Get the block size based on the input shape and granularity type.
Args:
input_shape: The input tensor shape possibly more than 2 dimensions
granularity: The granularity type of the quantization
"""
if not isinstance(granularity, Granularity):
raise AssertionError(
"Please provide an instance of Granularity, not subclass of it"
)
if isinstance(granularity, PerTensor):
return input_shape
elif isinstance(granularity, PerAxis):
block_size = list(input_shape)
block_size[granularity.axis] = 1
return tuple(block_size)
elif isinstance(granularity, PerRow):
return (1,) * (len(input_shape) - 1) + (input_shape[-1],)
elif isinstance(granularity, PerGroup):
if len(input_shape) != 2:
raise AssertionError(
f"Expecting input shape dim to be 2 for per group quantization, gotinput shape: {input_shape}"
)
return (1, granularity.group_size)
elif isinstance(granularity, PerToken):
block_size = [1] * len(input_shape)
block_size[-1] = input_shape[-1]
return tuple(block_size)
raise ValueError(f"Unsupported Granularity: {granularity}")
| PerToken |
python | pyparsing__pyparsing | examples/statemachine/vending_machine.py | {
"start": 869,
"end": 2441
} | class ____(VendingMachineStateMixin):
def __init__(self):
self.initialize_state(Idle)
self._pressed = None
self._alpha_pressed = None
self._digit_pressed = None
def press_button(self, button):
if button in "ABCD":
self._pressed = button
self.press_alpha_button()
elif button in "1234":
self._pressed = button
self.press_digit_button()
else:
print("Did not recognize button {!r}".format(str(button)))
def press_alpha_button(self):
try:
super().press_alpha_button()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
self._alpha_pressed = self._pressed
def press_digit_button(self):
try:
super().press_digit_button()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
self._digit_pressed = self._pressed
self.dispense()
def dispense(self):
try:
super().dispense()
except VendingMachineState.InvalidTransitionException as ite:
print(ite)
else:
print("Dispensing at {}{}".format(self._alpha_pressed, self._digit_pressed))
self._alpha_pressed = self._digit_pressed = None
vm = VendingMachine()
for button in "1 A B 1".split():
print(">> pressing {!r}".format(button))
vm.press_button(button)
print("Vending machine is now in {} state".format(vm.state))
| VendingMachine |
python | jazzband__django-oauth-toolkit | oauth2_provider/views/device.py | {
"start": 6945,
"end": 7375
} | class ____(LoginRequiredMixin, DetailView):
"""
The view to display the status of a DeviceGrant.
"""
model = DeviceGrant
template_name = "oauth2_provider/device/device_grant_status.html"
def get_object(self):
client_id, user_code = self.kwargs.get("client_id"), self.kwargs.get("user_code")
return get_object_or_404(DeviceGrant, client_id=client_id, user_code=user_code)
| DeviceGrantStatusView |
python | yaml__pyyaml | examples/pygments-lexer/yaml.py | {
"start": 5186,
"end": 15616
} | class ____(ExtendedRegexLexer):
"""Lexer for the YAML language."""
name = 'YAML'
aliases = ['yaml']
filenames = ['*.yaml', '*.yml']
mimetypes = ['text/x-yaml']
tokens = {
# the root rules
'root': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text.Blank),
# line breaks
(r'\n+', Text.Break),
# a comment
(r'#[^\n]*', Comment.Single),
# the '%YAML' directive
(r'^%YAML(?=[ ]|$)', reset_indent(Name.Directive),
'yaml-directive'),
# the %TAG directive
(r'^%TAG(?=[ ]|$)', reset_indent(Name.Directive),
'tag-directive'),
# document start and document end indicators
(r'^(?:---|\.\.\.)(?=[ ]|$)',
reset_indent(Punctuation.Document), 'block-line'),
# indentation spaces
(r'[ ]*(?![ \t\n\r\f\v]|$)',
save_indent(Text.Indent, start=True),
('block-line', 'indentation')),
],
# trailing whitespaces after directives or a block scalar indicator
'ignored-line': [
# ignored whitespaces
(r'[ ]+(?=#|$)', Text.Blank),
# a comment
(r'#[^\n]*', Comment.Single),
# line break
(r'\n', Text.Break, '#pop:2'),
],
# the %YAML directive
'yaml-directive': [
# the version number
(r'([ ]+)([0-9]+\.[0-9]+)',
bygroups(Text.Blank, Literal.Version), 'ignored-line'),
],
# the %YAG directive
'tag-directive': [
# a tag handle and the corresponding prefix
(r'([ ]+)(!|![0-9A-Za-z_-]*!)'
r'([ ]+)(!|!?[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)',
bygroups(Text.Blank, Name.Type, Text.Blank, Name.Type),
'ignored-line'),
],
# block scalar indicators and indentation spaces
'indentation': [
# trailing whitespaces are ignored
(r'[ ]*$', something(Text.Blank), '#pop:2'),
# whitespaces preceding block collection indicators
(r'[ ]+(?=[?:-](?:[ ]|$))', save_indent(Text.Indent)),
# block collection indicators
(r'[?:-](?=[ ]|$)', set_indent(Punctuation.Indicator)),
# the beginning a block line
(r'[ ]*', save_indent(Text.Indent), '#pop'),
],
# an indented line in the block context
'block-line': [
# the line end
(r'[ ]*(?=#|$)', something(Text.Blank), '#pop'),
# whitespaces separating tokens
(r'[ ]+', Text.Blank),
# tags, anchors and aliases,
include('descriptors'),
# block collections and scalars
include('block-nodes'),
# flow collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`-]|[?:-][^ \t\n\r\f\v])',
something(Literal.Scalar.Plain),
'plain-scalar-in-block-context'),
],
# tags, anchors, aliases
'descriptors' : [
# a full-form tag
(r'!<[0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+>', Name.Type),
# a tag in the form '!', '!suffix' or '!handle!suffix'
(r'!(?:[0-9A-Za-z_-]+)?'
r'(?:![0-9A-Za-z;/?:@&=+$,_.!~*\'()\[\]%-]+)?', Name.Type),
# an anchor
(r'&[0-9A-Za-z_-]+', Name.Anchor),
# an alias
(r'\*[0-9A-Za-z_-]+', Name.Alias),
],
# block collections and scalars
'block-nodes': [
# implicit key
(r':(?=[ ]|$)', set_indent(Punctuation.Indicator, implicit=True)),
# literal and folded scalars
(r'[|>]', Punctuation.Indicator,
('block-scalar-content', 'block-scalar-header')),
],
# flow collections and quoted scalars
'flow-nodes': [
# a flow sequence
(r'\[', Punctuation.Indicator, 'flow-sequence'),
# a flow mapping
(r'\{', Punctuation.Indicator, 'flow-mapping'),
# a single-quoted scalar
(r'\'', Literal.Scalar.Flow.Quote, 'single-quoted-scalar'),
# a double-quoted scalar
(r'\"', Literal.Scalar.Flow.Quote, 'double-quoted-scalar'),
],
# the content of a flow collection
'flow-collection': [
# whitespaces
(r'[ ]+', Text.Blank),
# line breaks
(r'\n+', Text.Break),
# a comment
(r'#[^\n]*', Comment.Single),
# simple indicators
(r'[?:,]', Punctuation.Indicator),
# tags, anchors and aliases
include('descriptors'),
# nested collections and quoted scalars
include('flow-nodes'),
# a plain scalar
(r'(?=[^ \t\n\r\f\v?:,\[\]{}#&*!|>\'"%@`])',
something(Literal.Scalar.Plain),
'plain-scalar-in-flow-context'),
],
# a flow sequence indicated by '[' and ']'
'flow-sequence': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\]', Punctuation.Indicator, '#pop'),
],
# a flow mapping indicated by '{' and '}'
'flow-mapping': [
# include flow collection rules
include('flow-collection'),
# the closing indicator
(r'\}', Punctuation.Indicator, '#pop'),
],
# block scalar lines
'block-scalar-content': [
# line break
(r'\n', Text.Break),
# empty line
(r'^[ ]+$',
parse_block_scalar_empty_line(Text.Indent,
Literal.Scalar.Block)),
# indentation spaces (we may leave the state here)
(r'^[ ]*', parse_block_scalar_indent(Text.Indent)),
# line content
(r'[^\n\r\f\v]+', Literal.Scalar.Block),
],
# the content of a literal or folded scalar
'block-scalar-header': [
# indentation indicator followed by chomping flag
(r'([1-9])?[+-]?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
# chomping flag followed by indentation indicator
(r'[+-]?([1-9])?(?=[ ]|$)',
set_block_scalar_indent(Punctuation.Indicator),
'ignored-line'),
],
# ignored and regular whitespaces in quoted scalars
'quoted-scalar-whitespaces': [
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text.Blank),
# line breaks are ignored
(r'\n+', Text.Break),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Flow),
],
# single-quoted scalars
'single-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of the quote character
(r'\'\'', Literal.Scalar.Flow.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\']+', Literal.Scalar.Flow),
# the closing quote
(r'\'', Literal.Scalar.Flow.Quote, '#pop'),
],
# double-quoted scalars
'double-quoted-scalar': [
# include whitespace and line break rules
include('quoted-scalar-whitespaces'),
# escaping of special characters
(r'\\[0abt\tn\nvfre "\\N_LP]', Literal.Scalar.Flow.Escape),
# escape codes
(r'\\(?:x[0-9A-Fa-f]{2}|u[0-9A-Fa-f]{4}|U[0-9A-Fa-f]{8})',
Literal.Scalar.Flow.Escape),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v\"\\]+', Literal.Scalar.Flow),
# the closing quote
(r'"', Literal.Scalar.Flow.Quote, '#pop'),
],
# the beginning of a new line while scanning a plain scalar
'plain-scalar-in-block-context-new-line': [
# empty lines
(r'^[ ]+$', Text.Blank),
# line breaks
(r'\n+', Text.Break),
# document start and document end indicators
(r'^(?=---|\.\.\.)', something(Punctuation.Document), '#pop:3'),
# indentation spaces (we may leave the block line state here)
(r'^[ ]*', parse_plain_scalar_indent(Text.Indent), '#pop'),
],
# a plain scalar in the block context
'plain-scalar-in-block-context': [
# the scalar ends with the ':' indicator
(r'[ ]*(?=:[ ]|:$)', something(Text.Blank), '#pop'),
# the scalar ends with whitespaces followed by a comment
(r'[ ]+(?=#)', Text.Blank, '#pop'),
# trailing whitespaces are ignored
(r'[ ]+$', Text.Blank),
# line breaks are ignored
(r'\n+', Text.Break, 'plain-scalar-in-block-context-new-line'),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'(?::(?![ \t\n\r\f\v])|[^ \t\n\r\f\v:])+',
Literal.Scalar.Plain),
],
# a plain scalar is the flow context
'plain-scalar-in-flow-context': [
# the scalar ends with an indicator character
(r'[ ]*(?=[,:?\[\]{}])', something(Text.Blank), '#pop'),
# the scalar ends with a comment
(r'[ ]+(?=#)', Text.Blank, '#pop'),
# leading and trailing whitespaces are ignored
(r'^[ ]+|[ ]+$', Text.Blank),
# line breaks are ignored
(r'\n+', Text.Break),
# other whitespaces are a part of the value
(r'[ ]+', Literal.Scalar.Plain),
# regular non-whitespace characters
(r'[^ \t\n\r\f\v,:?\[\]{}]+', Literal.Scalar.Plain),
],
}
def get_tokens_unprocessed(self, text=None, context=None):
if context is None:
context = YAMLLexerContext(text, 0)
return super(YAMLLexer, self).get_tokens_unprocessed(text, context)
| YAMLLexer |
python | tensorflow__tensorflow | tensorflow/python/framework/type_spec_test.py | {
"start": 4138,
"end": 5336
} | class ____(type_spec.BatchableTypeSpec):
"""A TypeSpec for the TwoTensors value type."""
def __init__(self, x_spec, y_spec, color="red"):
self.x_spec = x_spec
self.y_spec = y_spec
self.color = color
value_type = property(lambda self: TwoComposites)
@property
def _component_specs(self):
return (self.x_spec, self.y_spec)
def _to_components(self, value):
return (value.x, value.y)
def _from_components(self, components):
x, y = components
return TwoComposites(x, y, self.color)
def _serialize(self):
return (self.x_spec, self.y_spec, self.color)
@classmethod
def from_value(cls, value):
return cls(type_spec.type_spec_from_value(value.x),
type_spec.type_spec_from_value(value.y),
value.color)
def _batch(self, batch_size):
return TwoCompositesSpec(
self.x_spec._batch(batch_size), self.y_spec._batch(batch_size),
self.color)
def _unbatch(self):
return TwoCompositesSpec(self.x_spec._unbatch(), self.y_spec._unbatch(),
self.color)
type_spec.register_type_spec_from_value_converter(
TwoComposites, TwoCompositesSpec.from_value)
| TwoCompositesSpec |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 17890,
"end": 19103
} | class ____(RequestHandler):
def get(self, *path_args):
# Type checks: web.py interfaces convert argument values to
# unicode strings (by default, but see also decode_argument).
# In httpserver.py (i.e. self.request.arguments), they're left
# as bytes. Keys are always native strings.
for key in self.request.arguments:
if type(key) is not str:
raise Exception("incorrect type for key: %r" % type(key))
for bvalue in self.request.arguments[key]:
if type(bvalue) is not bytes:
raise Exception("incorrect type for value: %r" % type(bvalue))
for svalue in self.get_arguments(key):
if type(svalue) is not unicode_type:
raise Exception("incorrect type for value: %r" % type(svalue))
for arg in path_args:
if type(arg) is not unicode_type:
raise Exception("incorrect type for path arg: %r" % type(arg))
self.write(
dict(
path=self.request.path,
path_args=path_args,
args=recursive_unicode(self.request.arguments),
)
)
| EchoHandler |
python | celery__celery | celery/backends/base.py | {
"start": 3210,
"end": 3448
} | class ____(dict):
def ignore(self, *a, **kw):
pass
__setitem__ = update = setdefault = ignore
def _is_request_ignore_result(request):
if request is None:
return False
return request.ignore_result
| _nulldict |
python | getsentry__sentry | src/sentry/users/web/accounts_form.py | {
"start": 462,
"end": 1535
} | class ____(forms.Form):
user = forms.CharField(
label=_("Account"),
max_length=128,
widget=forms.TextInput(attrs={"placeholder": _("username or email")}),
)
def clean_user(self) -> User | None:
value = (self.cleaned_data.get("user") or "").strip()
if not value:
return None
users = find_users(value, with_valid_password=False)
if not users:
return None
# If we find more than one user, we likely matched on email address.
# We silently bail here as we emailing the 'wrong' person isn't great.
# They will have to retry with their username which is guaranteed
# to be unique
if len(users) > 1:
return None
users = [u for u in users if not u.is_managed]
if not users:
raise forms.ValidationError(
_(
"The account you are trying to recover is managed and does not support password recovery."
)
)
return users[0]
| RecoverPasswordForm |
python | getsentry__sentry | tests/sentry/grouping/seer_similarity/test_get_seer_similar_issues.py | {
"start": 50364,
"end": 53696
} | class ____(TestCase):
@patch("sentry.grouping.ingest.seer.metrics.distribution")
@patch("sentry.grouping.ingest.seer.metrics.incr")
def test_simple(self, mock_incr: MagicMock, mock_distribution: MagicMock) -> None:
new_event, new_variants, new_grouphash, _ = create_new_event(self.project)
with patch(
"sentry.grouping.ingest.seer.get_similarity_data_from_seer",
return_value=[],
):
assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == (None, None)
assert_metrics_call(
mock_incr,
"get_seer_similar_issues",
"no_seer_matches",
{"is_hybrid": False, "training_mode": False},
)
assert_metrics_call(
mock_distribution,
"seer_results_returned",
"no_seer_matches",
{"is_hybrid": False, "training_mode": False},
value=0,
)
# Ensure we're not recording things we don't want to be. (The metrics we're checking
# should only be recorded for events or parent grouphashes with hybrid fingerprints.)
incr_metrics_recorded = {call.args[0] for call in mock_incr.mock_calls}
distribution_metrics_recorded = {call.args[0] for call in mock_distribution.mock_calls}
assert "grouping.similarity.hybrid_fingerprint_match_check" not in incr_metrics_recorded
assert (
"grouping.similarity.hybrid_fingerprint_results_checked"
not in distribution_metrics_recorded
)
@patch("sentry.grouping.ingest.seer.metrics.distribution")
@patch("sentry.grouping.ingest.seer.metrics.incr")
def test_hybrid_fingerprint(self, mock_incr: MagicMock, mock_distribution: MagicMock) -> None:
new_event, new_variants, new_grouphash, _ = create_new_event(
self.project,
fingerprint=["{{ default }}", "maisey"],
)
with patch(
"sentry.grouping.ingest.seer.get_similarity_data_from_seer",
return_value=[],
):
assert get_seer_similar_issues(new_event, new_grouphash, new_variants) == (None, None)
assert_metrics_call(
mock_incr,
"get_seer_similar_issues",
"no_seer_matches",
{"is_hybrid": True, "training_mode": False},
)
assert_metrics_call(
mock_distribution,
"seer_results_returned",
"no_seer_matches",
{"is_hybrid": True, "training_mode": False},
value=0,
)
# Ensure we're not recording things we don't want to be. (This metric should only be
# recorded when there are results to check.)
incr_metrics_recorded = {call.args[0] for call in mock_incr.mock_calls}
distribution_metrics_recorded = {call.args[0] for call in mock_distribution.mock_calls}
assert "grouping.similarity.hybrid_fingerprint_match_check" not in incr_metrics_recorded
assert (
"grouping.similarity.hybrid_fingerprint_results_checked"
not in distribution_metrics_recorded
)
| NoParentGroupFoundTest |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/filters/base.py | {
"start": 5541,
"end": 5858
} | class ____(Filter):
"""
Always enable feature.
"""
def __call__(self) -> bool:
return True
def __or__(self, other: Filter) -> Filter:
return self
def __and__(self, other: Filter) -> Filter:
return other
def __invert__(self) -> Never:
return Never()
| Always |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 3832,
"end": 4954
} | class ____(BaseParam[bool]):
"""Filter Dags by favorite status."""
def __init__(self, user_id: str, value: T | None = None, skip_none: bool = True) -> None:
super().__init__(skip_none=skip_none)
self.user_id = user_id
def to_orm(self, select_stmt: Select) -> Select:
if self.value is None and self.skip_none:
return select_stmt
if self.value:
select_stmt = select_stmt.join(DagFavorite, DagFavorite.dag_id == DagModel.dag_id).where(
DagFavorite.user_id == self.user_id
)
else:
select_stmt = select_stmt.where(
not_(
sql_select(DagFavorite)
.where(and_(DagFavorite.dag_id == DagModel.dag_id, DagFavorite.user_id == self.user_id))
.exists()
)
)
return select_stmt
@classmethod
def depends(cls, user: GetUserDep, is_favorite: bool | None = Query(None)) -> _FavoriteFilter:
instance = cls(user_id=str(user.get_id())).set_value(is_favorite)
return instance
| _FavoriteFilter |
python | tensorflow__tensorflow | tensorflow/python/framework/ops_test.py | {
"start": 80825,
"end": 85351
} | class ____(test_util.TensorFlowTestCase):
def test_get_collections(self):
g = ops.Graph()
self.assertSequenceEqual(g.collections, [])
g.add_to_collection("key", 12)
g.add_to_collection("key", 15)
self.assertSequenceEqual(g.collections, ["key"])
g.add_to_collection("other", "foo")
self.assertSequenceEqual(sorted(g.collections), ["key", "other"])
self.assertSequenceEqual(
sorted(g.get_all_collection_keys()), ["key", "other"])
def test_add_to_collection(self):
g = ops.Graph()
g.add_to_collection("key", 12)
g.add_to_collection("other", "foo")
g.add_to_collection("key", 34)
# Note that only blank1 is returned.
g.add_to_collection("blah", 27)
blank1 = ObjectWithName("prefix/foo")
g.add_to_collection("blah", blank1)
blank2 = ObjectWithName("junk/foo")
g.add_to_collection("blah", blank2)
self.assertEqual([12, 34], g.get_collection("key"))
self.assertEqual([], g.get_collection("nothing"))
self.assertEqual([27, blank1, blank2], g.get_collection("blah"))
self.assertEqual([blank1], g.get_collection("blah", "prefix"))
self.assertEqual([blank1], g.get_collection("blah", ".*x"))
# Make sure that get_collection() returns a first-level
# copy of the collection, while get_collection_ref() returns
# the original list.
other_collection_snapshot = g.get_collection("other")
other_collection_ref = g.get_collection_ref("other")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo"], other_collection_ref)
g.add_to_collection("other", "bar")
self.assertEqual(["foo"], other_collection_snapshot)
self.assertEqual(["foo", "bar"], other_collection_ref)
self.assertEqual(["foo", "bar"], g.get_collection("other"))
self.assertTrue(other_collection_ref is g.get_collection_ref("other"))
# Verify that getting an empty collection ref returns a modifiable list.
empty_coll_ref = g.get_collection_ref("empty")
self.assertEqual([], empty_coll_ref)
empty_coll = g.get_collection("empty")
self.assertEqual([], empty_coll)
self.assertFalse(empty_coll is empty_coll_ref)
empty_coll_ref2 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref2 is empty_coll_ref)
# Add to the collection.
empty_coll_ref.append("something")
self.assertEqual(["something"], empty_coll_ref)
self.assertEqual(["something"], empty_coll_ref2)
self.assertEqual([], empty_coll)
self.assertEqual(["something"], g.get_collection("empty"))
empty_coll_ref3 = g.get_collection_ref("empty")
self.assertTrue(empty_coll_ref3 is empty_coll_ref)
def test_add_to_collections_uniquify(self):
g = ops.Graph()
g.add_to_collections([1, 2, 1], "key")
# Make sure "key" is not added twice
self.assertEqual(["key"], g.get_collection(1))
def test_add_to_collections_from_list(self):
g = ops.Graph()
g.add_to_collections(["abc", "123"], "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_tuple(self):
g = ops.Graph()
g.add_to_collections(("abc", "123"), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_generator(self):
g = ops.Graph()
def generator():
yield "abc"
yield "123"
g.add_to_collections(generator(), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_set(self):
g = ops.Graph()
g.add_to_collections(set(["abc", "123"]), "key")
self.assertEqual(["key"], g.get_collection("abc"))
self.assertEqual(["key"], g.get_collection("123"))
def test_add_to_collections_from_string(self):
g = ops.Graph()
g.add_to_collections("abc", "key")
self.assertEqual(["key"], g.get_collection("abc"))
def test_default_graph(self):
with ops.Graph().as_default():
ops.add_to_collection("key", 90)
ops.add_to_collection("key", 100)
# Collections are ordered.
self.assertEqual([90, 100], ops.get_collection("key"))
ops.NotDifferentiable("FloatOutput")
@ops.RegisterGradient("CopyOp")
def _CopyGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
@ops.RegisterGradient("copy_override")
def _CopyOverrideGrad(op, x_grad): # pylint: disable=invalid-name
_ = op
return x_grad
| CollectionTest |
python | sqlalchemy__sqlalchemy | test/engine/test_processors.py | {
"start": 3687,
"end": 3994
} | class ____(_DateProcessorTest):
@classmethod
def setup_test_class(cls):
from sqlalchemy.engine import _processors_cy
from sqlalchemy.util.langhelpers import load_uncompiled_module
py_mod = load_uncompiled_module(_processors_cy)
cls.module = py_mod
| PyDateProcessorTest |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-db2/llama_index/vector_stores/db2/base.py | {
"start": 5591,
"end": 13763
} | class ____(BasePydanticVectorStore):
"""
`DB2LlamaVS` vector store.
To use, you should have both:
- the ``ibm_db`` python package installed
- a connection to db2 database with vector store feature (v12.1.2+)
"""
metadata_column: str = "metadata"
stores_text: bool = True
_client: Connection = PrivateAttr()
table_name: str
distance_strategy: DistanceStrategy
batch_size: Optional[int]
params: Optional[dict[str, Any]]
embed_dim: int
def __init__(
self,
_client: Connection,
table_name: str,
distance_strategy: DistanceStrategy = DistanceStrategy.EUCLIDEAN_DISTANCE,
batch_size: Optional[int] = 32,
embed_dim: int = 1536,
params: Optional[dict[str, Any]] = None,
):
try:
import ibm_db_dbi
except ImportError as e:
raise ImportError(
"Unable to import ibm_db_dbi, please install with "
"`pip install -U ibm_db`."
) from e
try:
"""Initialize with necessary components."""
super().__init__(
table_name=table_name,
distance_strategy=distance_strategy,
batch_size=batch_size,
embed_dim=embed_dim,
params=params,
)
# Assign _client to PrivateAttr after the Pydantic initialization
object.__setattr__(self, "_client", _client)
create_table(_client, table_name, embed_dim)
except ibm_db_dbi.DatabaseError as db_err:
logger.exception(f"Database error occurred while create table: {db_err}")
raise RuntimeError(
"Failed to create table due to a database error."
) from db_err
except ValueError as val_err:
logger.exception(f"Validation error: {val_err}")
raise RuntimeError(
"Failed to create table due to a validation error."
) from val_err
except Exception as ex:
logger.exception("An unexpected error occurred while creating the index.")
raise RuntimeError(
"Failed to create table due to an unexpected error."
) from ex
@property
def client(self) -> Any:
"""Get client."""
return self._client
@classmethod
def class_name(cls) -> str:
return "DB2LlamaVS"
def _append_meta_filter_condition(
self, where_str: Optional[str], exact_match_filter: list
) -> str:
filter_str = " AND ".join(
f"JSON_VALUE({self.metadata_column}, '$.{filter_item.key}') = '{filter_item.value}'"
for filter_item in exact_match_filter
)
if where_str is None:
where_str = filter_str
else:
where_str += " AND " + filter_str
return where_str
def _build_insert(self, values: List[BaseNode]) -> List[tuple]:
_data = []
for item in values:
item_values = tuple(
column["extract_func"](item) for column in column_config.values()
)
_data.append(item_values)
return _data
def _build_query(
self, distance_function: str, k: int, where_str: Optional[str] = None
) -> str:
where_clause = f"WHERE {where_str}" if where_str else ""
return f"""
SELECT id,
doc_id,
text,
SYSTOOLS.BSON2JSON(node_info),
SYSTOOLS.BSON2JSON(metadata),
vector_distance(embedding, VECTOR(?, {self.embed_dim}, FLOAT32), {distance_function}) AS distance
FROM {self.table_name}
{where_clause}
ORDER BY distance
FETCH FIRST {k} ROWS ONLY
"""
@_handle_exceptions
def add(self, nodes: list[BaseNode], **kwargs: Any) -> list[str]:
if not nodes:
return []
for result_batch in iter_batch(nodes, self.batch_size):
bind_values = self._build_insert(values=result_batch)
dml = f"""
INSERT INTO {self.table_name} ({", ".join(column_config.keys())})
VALUES (?, ?, VECTOR(?, {self.embed_dim}, FLOAT32), SYSTOOLS.JSON2BSON(?), SYSTOOLS.JSON2BSON(?), ?)
"""
cursor = self.client.cursor()
try:
# Use executemany to insert the batch
cursor.executemany(dml, bind_values)
cursor.execute("COMMIT")
finally:
cursor.close()
return [node.node_id for node in nodes]
@_handle_exceptions
def delete(self, ref_doc_id: str, **kwargs: Any) -> None:
ddl = f"DELETE FROM {self.table_name} WHERE doc_id = '{ref_doc_id}'"
cursor = self._client.cursor()
try:
cursor.execute(ddl)
cursor.execute("COMMIT")
finally:
cursor.close()
@_handle_exceptions
def drop(self) -> None:
drop_table(self._client, self.table_name)
@_handle_exceptions
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
distance_function = _get_distance_function(self.distance_strategy)
where_str = (
f"doc_id in {_stringify_list(query.doc_ids)}" if query.doc_ids else None
)
if query.filters is not None:
where_str = self._append_meta_filter_condition(
where_str, query.filters.filters
)
# build query sql
query_sql = self._build_query(
distance_function, query.similarity_top_k, where_str
)
embedding = f"{query.query_embedding}"
cursor = self._client.cursor()
try:
cursor.execute(query_sql, [embedding])
results = cursor.fetchall()
finally:
cursor.close()
similarities = []
ids = []
nodes = []
for result in results:
doc_id = result[1]
text = result[2] if result[2] is not None else ""
node_info = json.loads(result[3] if result[3] is not None else "{}")
metadata = json.loads(result[4] if result[4] is not None else "{}")
if query.node_ids:
if result[0] not in query.node_ids:
continue
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
try:
node = metadata_dict_to_node(metadata)
node.set_content(text)
except Exception:
# Note: deprecated legacy logic for backward compatibility
node = TextNode(
id_=result[0],
text=text,
metadata=metadata,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(node_id=doc_id)
},
)
nodes.append(node)
similarities.append(1.0 - math.exp(-result[5]))
ids.append(result[0])
return VectorStoreQueryResult(nodes=nodes, similarities=similarities, ids=ids)
@classmethod
@_handle_exceptions
def from_documents(
cls: Type[DB2LlamaVS],
docs: List[BaseNode],
table_name: str = "llama_index",
**kwargs: Any,
) -> DB2LlamaVS:
"""Return VectorStore initialized from texts and embeddings."""
_client = kwargs.get("client")
if _client is None:
raise ValueError("client parameter is required...")
params = kwargs.get("params")
distance_strategy = kwargs.get("distance_strategy")
drop_table(_client, table_name)
embed_dim = kwargs.get("embed_dim")
vss = cls(
_client=_client,
table_name=table_name,
params=params,
distance_strategy=distance_strategy,
embed_dim=embed_dim,
)
vss.add(nodes=docs)
return vss
| DB2LlamaVS |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_ops.py | {
"start": 42169,
"end": 57547
} | class ____(CrossDeviceOps):
"""All-reduce cross device ops using collective ops.
In the between-graph replicated training, it will still do all-reduces across
all workers and then put results on the right destinations.
"""
def __init__(self,
devices,
group_size,
options,
collective_keys=None,
canonicalize_devices=True):
"""Initializes the object.
Args:
devices: a list of device strings to run collectives on.
group_size: the global group size. For between-graph replicated training
it's the total number of devices across all workers.
options: a `tf.distribute.experimental.CommunicationOptions`.
collective_keys: an optional CollectiveKey object.
canonicalize_devices: Whether to canonicalize devices for workers or not.
"""
if group_size % len(devices) > 0:
raise ValueError("group_size must be divisible by the number of devices.")
self._group_size = group_size
self._options = options
self._collective_keys = (collective_keys or
cross_device_utils.CollectiveKeys())
# This lock guards all collective launches, i.e. calls to
# cross_device_utils.build_collectve_*.
#
# In a multi threaded eager program we need to ensure different groups of
# collectives don't interleave each other, otherwise there could be
# deadlocks. E.g. if two user threads both are launching collectives:
# user-thread-0 device0 device1
# user-thread-1 device0 device1
# In eager mode, we use one thread per device to launch collective ops, so
# the above launch sequences end up with the following queues:
# device-0 collective-0 collective-1
# device-1 collective-1 collective-0
# This deadlocks since neither collective is able to finish.
self._lock = threading.Lock()
if canonicalize_devices:
self._devices = tuple(device_util.canonicalize(d) for d in devices)
else:
self._devices = tuple(
device_util.canonicalize_without_job_and_task(d) for d in devices)
group_key = self._collective_keys.get_group_key(self._devices)
self._launchers = []
# Whether to only use NCCL for batched all-reduce when NCCL is requested.
# This is because of the lack of mechanism to order NCCL operations
# deterministically.
self._limited_nccl = False
for device in self._devices:
launcher = cross_device_utils.CollectiveReplicaLauncher(
group_key, group_size, self._collective_keys, device, options)
self._launchers.append(launcher)
if not launcher.can_order_nccl():
self._limited_nccl = True
super(CollectiveAllReduce, self).__init__()
self._canonicalize_devices = canonicalize_devices
@property
def _num_between_graph_workers(self):
# Currently we only support equal number of devices on each worker.
return self._group_size / len(self._devices)
def _all_reduce(self, reduce_op, value, replica_id, options):
"""Implements CrossDeviceOps.all_reduce."""
# TODO(b/122840926): reuse this method in _batch_all_reduce.
flat_values = nest.flatten(value)
# If NCCL launches can't be ordered (self._limited_nccl == True), we only
# use NCCL when batch_size > 1, hoping that there's only one batched
# all-reduce, which is the gradient aggregation in optimizer. For TF 2.x,
# NCCL launches are always ordered.
if (self._limited_nccl and options.implementation
== collective_util.CommunicationImplementation.NCCL and
len(flat_values) == 1):
options = options.merge(
collective_util.Options(
implementation=collective_util.CommunicationImplementation.RING))
launcher = self._launchers[replica_id]
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(flat_values))
dense_results = []
sparse_results = []
if dense_values:
# Reverse the lists so that there's better chance that values follows
# the order in which they are calculated (e.g. when they're gradients), so
# as to overlap calculation with communication. However, this may not be
# optimal for cases like gradients of complicated non-sequential models.
#
# Note that we reverse the list before packing so that the first pack
# won't be too small, since it's more likely for first few packs to have
# long queuing time due to concurrent intense computation.
#
# TODO(b/147393503): explore solutions for optimal ordering.
dense_values.reverse()
packs = cross_device_utils.group_by_size(dense_values,
options.bytes_per_pack)
if not context.executing_eagerly() and replica_id == 0:
logging.info(
"Collective all_reduce tensors: %d all_reduces, num_devices = %d, "
"group_size = %d, implementation = %s, num_packs = %d",
len(dense_values), len(self._launchers), self._group_size,
options.implementation, len(packs))
dense_results = launcher.batch_all_reduce(packs, options)
if reduce_op == reduce_util.ReduceOp.MEAN:
for i, v in enumerate(dense_results):
with ops.device(self._devices[replica_id]):
dense_results[i] = v / self._group_size
dense_results.reverse()
if sparse_values:
if not context.executing_eagerly() and replica_id == 0:
logging.info(
"Collective all_reduce IndexedSlices: %d all_reduces, num_devices ="
"%d, group_size = %d, implementation = %s", len(sparse_values),
len(self._launchers), self._group_size, options.implementation)
for indexed_slice in sparse_values:
sparse_results.append(
launcher.all_reduce_indexed_slices(indexed_slice, options))
if reduce_op == reduce_util.ReduceOp.MEAN:
for i, v in enumerate(sparse_results):
with ops.device(self._devices[replica_id]):
sparse_results[i] = indexed_slices.IndexedSlices(
values=sparse_results[i].values / self._group_size,
indices=sparse_results[i].indices,
dense_shape=sparse_results[i].dense_shape)
flat_results = cross_device_utils.stitch_values(
((dense_results, dense_indices), (sparse_results, sparse_indices)))
return nest.pack_sequence_as(value, flat_results)
def _all_reduce_per_replica_values(self, reduce_op, per_replica_values,
options):
"""All reduce a list of per_replica_value."""
values_by_device = [[] for _ in self._devices]
num_devices = len(self._devices)
for per_replica in per_replica_values:
for i in range(num_devices):
values_by_device[i].append(per_replica.values[i])
if context.executing_eagerly():
def thread_fn(device_id):
with context.eager_mode():
return self._all_reduce(reduce_op, values_by_device[device_id],
device_id, options)
with self._lock:
pool = multiprocessing.pool.ThreadPool(len(self._devices))
outputs_by_device = pool.map(thread_fn, list(range(num_devices)))
pool.close()
else:
outputs_by_device = []
with self._lock:
for i in range(num_devices):
outputs_by_device.append(
self._all_reduce(reduce_op, values_by_device[i], i, options))
result = []
for values in zip(*outputs_by_device):
result.append(
distribute_utils.regroup(values, wrap_class=value_lib.Mirrored))
return result
def reduce_implementation(self, reduce_op, per_replica_value, destinations,
options):
values_util.mark_as_unsaveable()
all_reduced = self._all_reduce_per_replica_values(reduce_op,
[per_replica_value],
options)[0]
devices = get_devices_from(destinations, self._canonicalize_devices)
if _devices_match(per_replica_value, destinations,
self._canonicalize_devices):
return all_reduced
# Convert `all_reduced` to a `Mirrored` object, as a simple and uniform
# utility to access component for a particular device.
if not isinstance(all_reduced, value_lib.Mirrored):
all_reduced = value_lib.Mirrored([all_reduced])
# If we got this far, the destination devices do not match the all-reduce
# devices, so we must map from one to the other.
index = []
# We must add these control dependencies, otherwise we can get deadlock.
with ops.control_dependencies(all_reduced.values):
for d in devices:
with ops.device(d):
for v in all_reduced.values:
if v.device == d:
index.append(array_ops.identity(v))
break
else:
# TODO(josh11b): Once we add support for model parallelism, get the
# copy from the corresponding replica instead of the primary.
index.append(array_ops.identity(all_reduced._primary)) # pylint: disable=protected-access
return distribute_utils.regroup(index, wrap_class=value_lib.Mirrored)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs,
options):
values_util.mark_as_unsaveable()
all_devices_match = _all_devices_match(value_destination_pairs,
self._canonicalize_devices)
if all_devices_match:
return self._all_reduce_per_replica_values(
reduce_op, [v[0] for v in value_destination_pairs], options)
else:
if not all_devices_match:
logging.log_first_n(
logging.WARN, "Efficient batch_reduce is not supported if "
"destinations are different.", 10)
return [
self.reduce_implementation(reduce_op, value, dest, options)
for value, dest in value_destination_pairs
]
def _gather_implementation(self, per_replica_value, destinations, axis,
options):
all_gathered = self._batch_all_gather([per_replica_value], axis, options)[0]
values_util.mark_as_unsaveable()
devices = get_devices_from(destinations, self._canonicalize_devices)
if _devices_match(per_replica_value, destinations,
self._canonicalize_devices):
return all_gathered
# Convert `all_gathered` to a `Mirrored` object, as a simple and uniform
# utility to access component for a particular device.
if not isinstance(all_gathered, value_lib.Mirrored):
all_gathered = value_lib.Mirrored([all_gathered])
# If we got this far, the destination devices do not match the all-gather
# devices, so we must map from one to the other.
index = []
# We must add these control dependencies, otherwise we can get deadlock.
with ops.control_dependencies(all_gathered.values):
for d in devices:
with ops.device(d):
for v in all_gathered.values:
if v.device == d:
index.append(array_ops.identity(v))
break
else:
index.append(array_ops.identity(all_gathered._primary)) # pylint: disable=protected-access
return distribute_utils.regroup(index, wrap_class=value_lib.Mirrored)
def _batch_all_gather(self, per_replica_values, axis, options):
"""all gather multiple per-replica-values."""
batch_size = len(per_replica_values)
# For now, we use NCCL only when batch_size > 1.
# TODO(b/132575814): switch to NCCL for all collectives when implementation
# is NCCL.
if (self._limited_nccl and options.implementation
== collective_util.CommunicationImplementation.NCCL and
batch_size == 1):
options = options.merge(
collective_util.Options(
implementation=collective_util.CommunicationImplementation.RING))
logging.log_first_n(
logging.INFO, "Collective batch_all_gather: %d all-gathers, "
"num_devices = %d, group_size = %d, implementation = %s, " %
(batch_size, len(
self._devices), self._group_size, options.implementation), 10)
def compute_gathered_values():
gathered_values = []
with self._lock, ops.name_scope("allgather"):
for per_replica in per_replica_values:
outputs = []
for i in range(len(self._devices)):
outputs.append(self._launchers[i].all_gather(
per_replica.values[i], axis, options))
gathered_values.append(outputs)
return gathered_values
if context.executing_eagerly():
gathered_values = def_function.function(compute_gathered_values)()
else:
gathered_values = compute_gathered_values()
mirrored = []
for value in gathered_values:
mirrored.append(
distribute_utils.regroup(value, wrap_class=value_lib.Mirrored))
return mirrored
def __deepcopy__(self, memo):
# distribute_coordinator deep-copies the strategy object, so
# CollectiveAllReduce needs to support deep copy as well.
collective_keys = copy.deepcopy(self._collective_keys, memo)
return CollectiveAllReduce(self._devices, self._group_size, self._options,
collective_keys, self._canonicalize_devices)
def select_cross_device_ops(devices, session_config=None):
"""Find the best `CrossDeviceOps` locally given a `tf.compat.v1.ConfigProto`.
Args:
devices: a list of devices passed to `tf.distribute.Strategy`.
session_config: a `tf.compat.v1.ConfigProto` or `None`. If `None`, it will
make decision based on all logical devices.
Returns:
A subclass of `CrossDeviceOps`.
"""
requested_devices = set(device_util.canonicalize(d) for d in devices)
if ops.executing_eagerly_outside_functions():
logical_gpus = context.context().list_logical_devices(device_type="GPU")
physical_gpus = context.context().list_physical_devices(device_type="GPU")
if len(logical_gpus) != len(physical_gpus):
logging.warning("NCCL is not supported when using virtual GPUs, falling"
"back to reduction to one device")
return ReductionToOneDevice()
machine_devices = context.context().list_logical_devices()
else:
machine_devices = device_lib.list_local_devices(
session_config=session_config)
using_devices = set()
for d in machine_devices:
if device_util.canonicalize(d.name) in requested_devices:
using_devices.add(d.name)
if len(using_devices) != len(requested_devices):
logging.warning(
"Some requested devices in `tf.distribute.Strategy` are not visible "
"to TensorFlow: %s", ",".join(list(requested_devices - using_devices)))
if any("gpu" not in d.lower() for d in requested_devices):
logging.warning("There are non-GPU devices in `tf.distribute.Strategy`, "
"not using nccl allreduce.")
return ReductionToOneDevice()
if kernels.get_registered_kernels_for_op("NcclAllReduce"):
return NcclAllReduce(num_packs=1)
else:
logging.warning("Nccl kernel is not found, not using nccl allreduce.")
return ReductionToOneDevice()
| CollectiveAllReduce |
python | cython__cython | Cython/Compiler/UtilNodes.py | {
"start": 338,
"end": 755
} | class ____:
# THIS IS DEPRECATED, USE LetRefNode instead
temp = None
needs_xdecref = False
def __init__(self, type, needs_cleanup=None):
self.type = type
if needs_cleanup is None:
self.needs_cleanup = type.is_pyobject
else:
self.needs_cleanup = needs_cleanup
def ref(self, pos):
return TempRefNode(pos, handle=self, type=self.type)
| TempHandle |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/sql/functions.py | {
"start": 59487,
"end": 59634
} | class ____(AnsiFunction[datetime.date]):
"""The CURRENT_DATE() SQL function."""
type = sqltypes.Date()
inherit_cache = True
| current_date |
python | encode__django-rest-framework | tests/test_fields.py | {
"start": 41980,
"end": 42508
} | class ____(FieldValues):
"""
Output values for `DecimalField` with `coerce_to_string=False`.
"""
valid_inputs = {}
invalid_inputs = {}
outputs = {
1.09: Decimal('1.1'),
0.04: Decimal('0.0'),
'1.09': Decimal('1.1'),
'0.04': Decimal('0.0'),
Decimal('1.09'): Decimal('1.1'),
Decimal('0.04'): Decimal('0.0'),
}
field = serializers.DecimalField(
max_digits=3, decimal_places=1,
coerce_to_string=False
)
| TestNoStringCoercionDecimalField |
python | huggingface__transformers | tests/models/auto/test_tokenization_auto.py | {
"start": 23327,
"end": 23502
} | class ____(transformers.PreTrainedTokenizer):
def get_vocab(self):
return {}
"""
nop_config_code = """
from transformers import PreTrainedConfig
| NopTokenizer |
python | pypa__warehouse | tests/conftest.py | {
"start": 4432,
"end": 9822
} | class ____:
def __init__(self):
self._services = defaultdict(lambda: defaultdict(dict))
def register_service(self, service_obj, iface=None, context=None, name=""):
self._services[iface][context][name] = service_obj
def find_service(self, iface=None, context=None, name=""):
return self._services[iface][context][name]
@pytest.fixture
def pyramid_services(
billing_service,
email_service,
metrics,
organization_service,
subscription_service,
token_service,
user_service,
project_service,
github_oidc_service,
activestate_oidc_service,
integrity_service,
macaroon_service,
helpdesk_service,
notification_service,
query_results_cache_service,
search_service,
domain_status_service,
ratelimit_service,
):
services = _Services()
# Register our global services.
services.register_service(billing_service, IBillingService, None, name="")
services.register_service(email_service, IEmailSender, None, name="")
services.register_service(metrics, IMetricsService, None, name="")
services.register_service(organization_service, IOrganizationService, None, name="")
services.register_service(subscription_service, ISubscriptionService, None, name="")
services.register_service(token_service, ITokenService, None, name="password")
services.register_service(token_service, ITokenService, None, name="email")
services.register_service(user_service, IUserService, None, name="")
services.register_service(project_service, IProjectService, None, name="")
services.register_service(
github_oidc_service, IOIDCPublisherService, None, name="github"
)
services.register_service(
activestate_oidc_service, IOIDCPublisherService, None, name="activestate"
)
services.register_service(integrity_service, IIntegrityService, None)
services.register_service(macaroon_service, IMacaroonService, None, name="")
services.register_service(helpdesk_service, IHelpDeskService, None)
services.register_service(notification_service, IAdminNotificationService)
services.register_service(query_results_cache_service, IQueryResultsCache)
services.register_service(search_service, ISearchService)
services.register_service(domain_status_service, IDomainStatusService)
services.register_service(ratelimit_service, IRateLimiter, name="email.add")
services.register_service(ratelimit_service, IRateLimiter, name="email.verify")
return services
@pytest.fixture
def pyramid_request(pyramid_services, jinja):
pyramid.testing.setUp()
dummy_request = pyramid.testing.DummyRequest()
dummy_request.find_service = pyramid_services.find_service
dummy_request.remote_addr = REMOTE_ADDR
dummy_request.remote_addr_hashed = REMOTE_ADDR_HASHED
dummy_request.authentication_method = pretend.stub()
dummy_request._unauthenticated_userid = None
dummy_request.user = None
dummy_request.oidc_publisher = None
dummy_request.metrics = dummy_request.find_service(IMetricsService)
dummy_request.registry.registerUtility(jinja, IJinja2Environment, name=".jinja2")
dummy_request._task_stub = pretend.stub(
delay=pretend.call_recorder(lambda *a, **kw: None)
)
dummy_request.task = pretend.call_recorder(
lambda *a, **kw: dummy_request._task_stub
)
dummy_request.log = pretend.stub(
bind=pretend.call_recorder(lambda *args, **kwargs: dummy_request.log),
info=pretend.call_recorder(lambda *args, **kwargs: None),
warning=pretend.call_recorder(lambda *args, **kwargs: None),
error=pretend.call_recorder(lambda *args, **kwargs: None),
)
def localize(message, **kwargs):
ts = TranslationString(message, **kwargs)
return ts.interpolate()
dummy_request._ = localize
yield dummy_request
pyramid.testing.tearDown()
@pytest.fixture
def pyramid_config(pyramid_request):
with pyramid.testing.testConfig(request=pyramid_request) as config:
yield config
@pytest.fixture
def pyramid_user(pyramid_request):
user = UserFactory.create()
EmailFactory.create(user=user, verified=True)
pyramid_request.user = user
return user
@pytest.fixture
def cli():
runner = click.testing.CliRunner()
with runner.isolated_filesystem():
yield runner
@pytest.fixture(scope="session")
def database(request, worker_id):
config = get_config(request)
pg_host = config.get("host")
pg_port = config.get("port") or os.environ.get("PGPORT", 5432)
pg_user = config.get("user")
pg_db = f"tests-{worker_id}"
pg_version = config.get("version", 16.1)
janitor = DatabaseJanitor(
user=pg_user,
host=pg_host,
port=pg_port,
dbname=pg_db,
version=pg_version,
)
# In case the database already exists, possibly due to an aborted test run,
# attempt to drop it before creating
try:
janitor.drop()
except InvalidCatalogName:
# We can safely ignore this exception as that means there was
# no leftover database
pass
# Create our Database.
janitor.init()
# Ensure our database gets deleted.
@request.addfinalizer
def drop_database():
janitor.drop()
return f"postgresql+psycopg://{pg_user}@{pg_host}:{pg_port}/{pg_db}"
| _Services |
python | pennersr__django-allauth | allauth/headless/mfa/views.py | {
"start": 4589,
"end": 6411
} | class ____(AuthenticatedAPIView):
input_class = {
"POST": AddWebAuthnInput,
"PUT": UpdateWebAuthnInput,
"DELETE": DeleteWebAuthnInput,
}
def handle(self, request, *args, **kwargs):
if request.method in ["GET", "POST"]:
err = _validate_can_add_authenticator(request)
if err:
return err
return super().handle(request, *args, **kwargs)
def get(self, request, *args, **kwargs):
passwordless = "passwordless" in request.GET
creation_options = webauthn_flows.begin_registration(
request, request.user, passwordless
)
return response.AddWebAuthnResponse(request, creation_options)
def get_input_kwargs(self):
return {"user": self.request.user}
def post(self, request, *args, **kwargs):
auth, rc_auth = webauthn_flows.add_authenticator(
request,
name=self.input.cleaned_data["name"],
credential=self.input.cleaned_data["credential"],
)
did_generate_recovery_codes = bool(rc_auth)
return response.AuthenticatorResponse(
request,
auth,
meta={"recovery_codes_generated": did_generate_recovery_codes},
)
def put(self, request, *args, **kwargs):
authenticator = self.input.cleaned_data["id"]
webauthn_flows.rename_authenticator(
request, authenticator, self.input.cleaned_data["name"]
)
return response.AuthenticatorResponse(request, authenticator)
def delete(self, request, *args, **kwargs):
authenticators = self.input.cleaned_data["authenticators"]
webauthn_flows.remove_authenticators(request, authenticators)
return response.AuthenticatorsDeletedResponse(request)
| ManageWebAuthnView |
python | kamyu104__LeetCode-Solutions | Python/maximum-and-sum-of-array.py | {
"start": 2656,
"end": 3438
} | class ____(object):
def maximumANDSum(self, nums, numSlots):
"""
:type nums: List[int]
:type numSlots: int
:rtype: int
"""
def count(x):
result = 0
while x:
result += x%3
x //= 3
return result
dp = [0]*(3**numSlots)
for mask in xrange(1, len(dp)):
i = count(mask)-1
x = nums[i] if i < len(nums) else 0
base = 1
for slot in xrange(1, numSlots+1):
if mask//base%3:
dp[mask] = max(dp[mask], (x&slot)+dp[mask-base])
base *= 3
return dp[-1]
# Time: O(n * 3^n)
# Space: O(3^n)
# memoization, top-down dp (easy to implement but slower)
| Solution3 |
python | langchain-ai__langchain | libs/core/langchain_core/exceptions.py | {
"start": 2348,
"end": 3340
} | class ____(Enum):
"""Error codes."""
INVALID_PROMPT_INPUT = "INVALID_PROMPT_INPUT"
INVALID_TOOL_RESULTS = "INVALID_TOOL_RESULTS" # Used in JS; not Py (yet)
MESSAGE_COERCION_FAILURE = "MESSAGE_COERCION_FAILURE"
MODEL_AUTHENTICATION = "MODEL_AUTHENTICATION" # Used in JS; not Py (yet)
MODEL_NOT_FOUND = "MODEL_NOT_FOUND" # Used in JS; not Py (yet)
MODEL_RATE_LIMIT = "MODEL_RATE_LIMIT" # Used in JS; not Py (yet)
OUTPUT_PARSING_FAILURE = "OUTPUT_PARSING_FAILURE"
def create_message(*, message: str, error_code: ErrorCode) -> str:
"""Create a message with a link to the LangChain troubleshooting guide.
Args:
message: The message to display.
error_code: The error code to display.
Returns:
The full message with the troubleshooting link.
"""
return (
f"{message}\n"
"For troubleshooting, visit: https://docs.langchain.com/oss/python/langchain"
f"/errors/{error_code.value} "
)
| ErrorCode |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/flake8_pie/PIE790.py | {
"start": 2005,
"end": 2177
} | class ____(Protocol[int]):
def func(self) -> str:
"""Docstring"""
...
def impl(self) -> str:
"""Docstring"""
return self.func()
| Repro |
python | viewflow__viewflow | tests/test_urls__base.py | {
"start": 1150,
"end": 1969
} | class ____(TestCase): # noqa: D101
def test_created_urls(self):
self.assertEqual("/", reverse("root:index"))
self.assertEqual("/test/", reverse("root:nested:index"))
self.assertEqual("/test/page/", reverse("root:nested:page"))
self.assertEqual("/nested2/", reverse("root:nested2:index"))
self.assertEqual("/nested2/page/", reverse("root:nested2:page"))
def test_urlconf_resolve(self):
self.assertEqual("/", urlconfig.reverse("index"))
self.assertEqual("/test/", urlconfig.nested_path.viewset.reverse("index"))
self.assertEqual("/test/page/", urlconfig.nested_path.viewset.reverse("page"))
def test_auto_redirect(self):
response = self.client.get(reverse("root:nested:index"))
self.assertRedirects(response, "/test/page/")
| Test |
python | huggingface__transformers | tests/models/mimi/test_modeling_mimi.py | {
"start": 1819,
"end": 5421
} | class ____:
def __init__(
self,
parent,
batch_size=5,
num_channels=1,
is_training=False,
intermediate_size=40,
hidden_size=32,
num_filters=8,
num_residual_layers=1,
upsampling_ratios=[8, 4],
codebook_size=64,
vector_quantization_hidden_dimension=64,
codebook_dim=64,
upsample_groups=32,
num_hidden_layers=2,
num_attention_heads=2,
num_key_value_heads=2,
sliding_window=4,
use_cache=False,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.is_training = is_training
self.intermediate_size = intermediate_size
self.hidden_size = hidden_size
self.num_filters = num_filters
self.num_residual_layers = num_residual_layers
self.upsampling_ratios = upsampling_ratios
self.codebook_size = codebook_size
self.vector_quantization_hidden_dimension = vector_quantization_hidden_dimension
self.codebook_dim = codebook_dim
self.upsample_groups = upsample_groups
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.num_key_value_heads = num_key_value_heads
self.sliding_window = sliding_window
self.use_cache = use_cache
def prepare_config_and_inputs(self, input_values_length=None):
input_values = floats_tensor(
[
self.batch_size,
self.num_channels,
self.intermediate_size if input_values_length is None else input_values_length,
],
scale=1.0,
)
config = self.get_config()
inputs_dict = {"input_values": input_values}
return config, inputs_dict
def prepare_config_and_inputs_for_common(self, input_values_length=None):
config, inputs_dict = self.prepare_config_and_inputs(input_values_length=input_values_length)
return config, inputs_dict
def prepare_config_and_inputs_for_model_class(self, model_class):
config, inputs_dict = self.prepare_config_and_inputs()
inputs_dict["audio_codes"] = ids_tensor([self.batch_size, 1, self.num_channels], self.codebook_size).type(
torch.int32
)
return config, inputs_dict
def get_config(self):
return MimiConfig(
audio_channels=self.num_channels,
chunk_in_sec=None,
hidden_size=self.hidden_size,
num_filters=self.num_filters,
num_residual_layers=self.num_residual_layers,
upsampling_ratios=self.upsampling_ratios,
codebook_size=self.codebook_size,
vector_quantization_hidden_dimension=self.vector_quantization_hidden_dimension,
upsample_groups=self.upsample_groups,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
num_key_value_heads=self.num_key_value_heads,
sliding_window=self.sliding_window,
codebook_dim=self.codebook_dim,
use_cache=self.use_cache,
)
def create_and_check_model_forward(self, config, inputs_dict):
model = MimiModel(config=config).to(torch_device).eval()
input_values = inputs_dict["input_values"]
result = model(input_values)
self.parent.assertEqual(
result.audio_values.shape, (self.batch_size, self.num_channels, self.intermediate_size)
)
@require_torch
| MimiModelTester |
python | redis__redis-py | redis/multidb/healthcheck.py | {
"start": 5591,
"end": 6128
} | class ____(HealthCheck):
"""
Health check based on PING command.
"""
def check_health(self, database) -> bool:
if isinstance(database.client, Redis):
return database.client.execute_command("PING")
else:
# For a cluster checks if all nodes are healthy.
all_nodes = database.client.get_nodes()
for node in all_nodes:
if not node.redis_connection.execute_command("PING"):
return False
return True
| PingHealthCheck |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/extractors/base.py | {
"start": 2055,
"end": 3129
} | class ____(ABC, LoggingMixin):
"""
Abstract base extractor class.
This is used mostly to maintain support for custom extractors.
"""
_allowed_query_params: list[str] = []
def __init__(self, operator):
super().__init__()
self.operator = operator
@classmethod
@abstractmethod
def get_operator_classnames(cls) -> list[str]:
"""
Get a list of operators that extractor works for.
This is an abstract method that subclasses should implement. There are
operators that work very similarly and one extractor can cover.
"""
raise NotImplementedError()
@abstractmethod
def _execute_extraction(self) -> OperatorLineage | None: ...
def extract(self) -> OperatorLineage | None:
return self._execute_extraction()
def extract_on_complete(self, task_instance) -> OperatorLineage | None:
return self.extract()
def extract_on_failure(self, task_instance) -> OperatorLineage | None:
return self.extract_on_complete(task_instance)
| BaseExtractor |
python | pytorch__pytorch | test/onnx/torchlib/test_ops.py | {
"start": 3111,
"end": 10934
} | class ____(common_utils.TestCase):
@parameterized.parameterized.expand(
[
(info.op.name, info)
for info in ops_test_data.TESTED_TORCHLIB_OPS
if isinstance(info.op, onnxscript.OnnxFunction)
],
skip_on_empty=True,
)
def test_script_function_passes_checker(
self, _, torchlib_op_info: ops_test_data.TorchLibOpInfo
):
function_proto = torchlib_op_info.op.to_function_proto()
onnx.checker.check_function(function_proto) # type: ignore[attr-defined]
def run_test_output_match(
test_suite: unittest.TestCase,
device: str,
dtype: torch.dtype,
op: opinfo_core.OpInfo,
function_executor: Callable,
tested_op_mapping: dict[
str,
ops_test_data.TorchLibOpInfo,
],
):
"""Base test method for testing each opset, used by instantiate_device_type_tests.
Args:
test_suite: The test class instance.
device: The PyTorch device. instantiate_device_type_tests provides this.
dtype: The PyTorch dtype. instantiate_device_type_tests provides this.
op: The OpInfo instance. instantiate_device_type_tests provides this.
function_executor: The function executor. This is a function that takes
a function and its arguments and returns the output of the function.
tested_op_mapping: The mapping of op name to the tested op.
"""
samples = op.sample_inputs(
device,
dtype,
requires_grad=False,
)
torchlib_op_info = tested_op_mapping[op.name]
# Obtain the input_wrangler that manipulates the OpInfo inputs
# to match the aten operator signature
# An example is nn.functional.upsample_nearest2d, which has a different signature
# than the aten operator upsample_nearest2d
onnx_function = torchlib_op_info.op
input_wrangler = torchlib_op_info.input_wrangler
if (
not ops_test_common.dtype_op_schema_compatible(dtype, onnx_function.op_schema)
and dtype not in COMPLEX_TYPES
):
test_suite.skipTest(
f"dtype '{dtype}' is not supported by the op '{op.name}'. "
f"Type constraints: {onnx_function.op_schema.type_constraints}"
)
# Obtain the tolerance for the op
rtol, atol = torchlib_op_info.get_tolerance(dtype)
for i, cpu_sample in enumerate(samples):
inputs = (cpu_sample.input, *cpu_sample.args)
# Provide the repr to subtest because tensors are not serializable in parallel test runs
with test_suite.subTest(
sample_num=i,
inputs=repr(
[
f"Tensor<{inp.shape}, dtype={inp.dtype}>"
if isinstance(inp, torch.Tensor)
else inp
for inp in inputs
]
),
kwargs=repr(cpu_sample.kwargs),
):
try:
device_type = cpu_sample.args[0].device.type
except (AttributeError, IndexError):
device_type = "cpu"
test_behavior, reason = _should_skip_xfail_test_sample(
op.name, cpu_sample, dtype, device_type
)
with ops_test_common.normal_xfail_skip_test_behaviors(
test_behavior, reason
):
input_onnx = [
ops_test_common.convert_tensor_to_numpy(x) for x in inputs
]
kwargs_onnx = ops_test_common.convert_kwargs_for_onnx(cpu_sample.kwargs)
if input_wrangler:
input_onnx, kwargs_onnx = input_wrangler(input_onnx, kwargs_onnx)
torch_output = op(*inputs, **cpu_sample.kwargs)
if isinstance(torch_output, torch.Tensor) and torch.is_complex(
torch_output
):
torch_output = torch.view_as_real(torch_output.resolve_conj())
reference_torch_outputs, _ = pytree.tree_flatten(torch_output)
if (
op.name.startswith("split")
or op.name.startswith("chunk")
or op.name.startswith("unbind")
or op.name
in {
"atleast_1d_Sequence",
"atleast_2d_Sequence",
"atleast_3d_Sequence",
}
):
# Hack for handling split, chunk and unbind which relies on SplitToSequence op.
# Split returns a Sequence that should be treats as a single
# value. So we wrap it into a tuple.
# TODO(justinchuby): Find a more general solution
reference_torch_outputs = [reference_torch_outputs]
test_name = test_suite.id()
function_output, model_proto = function_executor(
test_name,
reference_torch_outputs,
opset_version=torchlib_op_info.opset_introduced,
)(onnx_function, input_onnx, kwargs_onnx)
# Finally we re-flatten everything
# TODO: add pytree structure comparison.
flattened_torch_outputs, _ = pytree.tree_flatten(torch_output)
flattened_function_outputs, _ = pytree.tree_flatten(function_output)
assert flattened_torch_outputs
assert len(flattened_torch_outputs) == len(flattened_function_outputs)
for j, (torch_output, function_output) in enumerate(
zip(flattened_torch_outputs, flattened_function_outputs)
):
actual = torch.tensor(function_output)
expected = (
torch_output
if isinstance(torch_output, torch.Tensor)
else torch.tensor(torch_output)
)
if (
op.name in ops_test_data.NONDETERMINISTIC_OPS
or j in ops_test_data.COMPARE_SHAPE_ONLY_OPS[op.name]
):
# Check shape and dtype only for ops that are known to be
# nondeterministic
test_suite.assertEqual(actual.shape, expected.shape)
test_suite.assertEqual(actual.dtype, expected.dtype)
continue
# Use torch.testing as opposed to np.testing to ensure dtypes and shapes match
try:
torch.testing.assert_close(
actual,
expected,
rtol=rtol,
atol=atol,
equal_nan=True,
check_device=False,
)
except AssertionError as e:
if (
os.environ.get("CREATE_REPRODUCTION_REPORT") == "1"
and test_behavior is None
):
error_reproduction.create_mismatch_report(
test_name,
i,
model_proto,
inputs,
cpu_sample.kwargs,
actual,
expected,
e,
__file__,
)
if len(flattened_torch_outputs) > 1:
raise AssertionError(f"Output {j} mismatch") from e
raise
| TestFunctionValidity |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/operator1.py | {
"start": 234,
"end": 1250
} | class ____:
def __ne__(self, Bar):
return self
def __lt__(self, Bar):
return "string"
def __gt__(self, Bar):
return "string"
def __ge__(self, Bar):
return "string"
def __le__(self, Bar):
return "string"
def needs_a_string(val: str):
pass
def needs_a_string_or_bool(val: bool | str):
pass
def test():
a = A()
needs_a_string(a == a)
# This should generate an error because there
# is no __ne__ operator defined, so a bool
# value will result.
needs_a_string(a != a)
if True:
a = B()
# At this point, a should be of type Union[Foo, Bar],
# so the == operator should return either a str or
# a bool.
needs_a_string_or_bool(a == a)
# This should generate an error.
needs_a_string(a == a)
# This should generate an error.
needs_a_string_or_bool(a != a)
b = B()
needs_a_string(b < b)
needs_a_string(b > b)
needs_a_string(b <= b)
needs_a_string(b >= b)
| B |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pygments/formatters/terminal256.py | {
"start": 1155,
"end": 3193
} | class ____:
def __init__(self, fg=None, bg=None, bold=False, underline=False, italic=False):
self.fg = fg
self.bg = bg
self.bold = bold
self.underline = underline
self.italic = italic
def escape(self, attrs):
if len(attrs):
return "\x1b[" + ";".join(attrs) + "m"
return ""
def color_string(self):
attrs = []
if self.fg is not None:
if self.fg in ansicolors:
esc = codes[self.fg.replace('ansi','')]
if ';01m' in esc:
self.bold = True
# extract fg color code.
attrs.append(esc[2:4])
else:
attrs.extend(("38", "5", "%i" % self.fg))
if self.bg is not None:
if self.bg in ansicolors:
esc = codes[self.bg.replace('ansi','')]
# extract fg color code, add 10 for bg.
attrs.append(str(int(esc[2:4])+10))
else:
attrs.extend(("48", "5", "%i" % self.bg))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def true_color_string(self):
attrs = []
if self.fg:
attrs.extend(("38", "2", str(self.fg[0]), str(self.fg[1]), str(self.fg[2])))
if self.bg:
attrs.extend(("48", "2", str(self.bg[0]), str(self.bg[1]), str(self.bg[2])))
if self.bold:
attrs.append("01")
if self.underline:
attrs.append("04")
if self.italic:
attrs.append("03")
return self.escape(attrs)
def reset_string(self):
attrs = []
if self.fg is not None:
attrs.append("39")
if self.bg is not None:
attrs.append("49")
if self.bold or self.underline or self.italic:
attrs.append("00")
return self.escape(attrs)
| EscapeSequence |
python | pennersr__django-allauth | allauth/socialaccount/providers/mediawiki/provider.py | {
"start": 821,
"end": 1992
} | class ____(OAuth2Provider):
id = "mediawiki"
name = "MediaWiki"
account_class = MediaWikiAccount
oauth2_adapter_class = MediaWikiOAuth2Adapter
@staticmethod
def _get_email(data: Mapping[str, Any]) -> Optional[str]:
if data.get("confirmed_email"):
return data.get("email")
return None
def extract_uid(self, data):
return str(data["sub"])
def extract_extra_data(self, data: Mapping[str, Any]) -> Dict[str, Any]:
return dict(
email=self._get_email(data),
realname=data.get("realname"),
username=data.get("username"),
)
def extract_common_fields(self, data):
return dict(
email=self._get_email(data),
username=data.get("username"),
name=data.get("realname"),
)
def extract_email_addresses(self, data: Mapping[str, Any]) -> List[EmailAddress]:
# A MediaWiki account may not have email address.
if addr := self._get_email(data):
return [EmailAddress(email=addr, verified=True, primary=True)]
return []
provider_classes = [MediaWikiProvider]
| MediaWikiProvider |
python | spack__spack | lib/spack/spack/hooks/drop_redundant_rpaths.py | {
"start": 2146,
"end": 3538
} | class ____(BaseDirectoryVisitor):
"""Visitor that collects all elf files that have an rpath"""
def __init__(self):
# Keep track of what hardlinked files we've already visited.
self.visited = set()
def visit_file(self, root, rel_path, depth):
filepath = os.path.join(root, rel_path)
s = os.lstat(filepath)
identifier = (s.st_ino, s.st_dev)
# We're hitting a hardlink or symlink of an excluded lib, no need to parse.
if s.st_nlink > 1:
if identifier in self.visited:
return
self.visited.add(identifier)
result = drop_redundant_rpaths(filepath)
if result is not None:
old, new = result
tty.debug(f"Patched rpath in {rel_path} from {old!r} to {new!r}")
def visit_symlinked_file(self, root, rel_path, depth):
pass
def before_visit_dir(self, root, rel_path, depth):
# Always enter dirs
return True
def before_visit_symlinked_dir(self, root, rel_path, depth):
# Never enter symlinked dirs
return False
def post_install(spec, explicit=None):
# Skip externals
if spec.external:
return
# Only enable on platforms using ELF.
if not spec.satisfies("platform=linux"):
return
visit_directory_tree(spec.prefix, ElfFilesWithRPathVisitor())
| ElfFilesWithRPathVisitor |
python | wandb__wandb | tests/system_tests/test_functional/metaflow/flow_decoboth.py | {
"start": 490,
"end": 1811
} | class ____(FlowSpec):
# Not obvious how to support metaflow.IncludeFile
seed = Parameter("seed", default=1337)
test_size = Parameter("test_size", default=0.2)
raw_data = Parameter(
"raw_data",
default=pathlib.Path(__file__).parent / "wine.csv",
help="path to the raw data",
)
@wandb_log(datasets=True, models=True)
@step
def start(self):
self.raw_df = pd.read_csv(self.raw_data)
self.next(self.split_data)
@wandb_log(datasets=True)
@step
def split_data(self):
X = self.raw_df.drop("Wine", axis=1) # noqa: N806
y = self.raw_df[["Wine"]]
self.X_train, self.X_test, self.y_train, self.y_test = train_test_split(
X, y, test_size=self.test_size, random_state=self.seed
)
self.next(self.train)
@step
def train(self):
self.clf = RandomForestClassifier(
n_estimators=2,
max_depth=2,
random_state=self.seed,
)
self.clf.fit(self.X_train, self.y_train)
self.next(self.end)
@step
def end(self):
self.preds = self.clf.predict(self.X_test)
self.accuracy = accuracy_score(self.y_test, self.preds)
if __name__ == "__main__":
wandb.setup()
WandbExampleFlowDecoBoth()
| WandbExampleFlowDecoBoth |
python | falconry__falcon | tests/asgi/test_middleware_asgi.py | {
"start": 256,
"end": 816
} | class ____:
async def process_response(self, req, resp, resource, req_succeeded):
pass
@pytest.mark.parametrize(
'middleware',
[
MiddlewareIncompatibleWithWSGI_A(),
MiddlewareIncompatibleWithWSGI_B(),
MiddlewareIncompatibleWithWSGI_C(),
(MiddlewareIncompatibleWithWSGI_C(), MiddlewareIncompatibleWithWSGI_A()),
],
)
def test_raise_on_incompatible(middleware):
api = falcon.App()
with pytest.raises(falcon.CompatibilityError):
api.add_middleware(middleware)
| MiddlewareIncompatibleWithWSGI_C |
python | psf__black | tests/data/cases/line_ranges_basic.py | {
"start": 2586,
"end": 3519
} | class ____( object): # Trailing comment with extra leading space.
#NOTE: The following indentation is incorrect:
@decor( 1 * 3 )
def my_func( arg):
pass
try: # Trailing comment with extra leading space.
for i in range(10): # Trailing comment with extra leading space.
while condition:
if something:
then_something( )
elif something_else:
then_something_else( )
except ValueError as e:
unformatted( )
finally:
unformatted( )
async def test_async_unformatted( ): # Trailing comment with extra leading space.
async for i in some_iter( unformatted ): # Trailing comment with extra leading space.
await asyncio.sleep( 1 )
async with some_context( unformatted ):
print( "unformatted" )
| MyClass |
python | huggingface__transformers | tests/models/plbart/test_tokenization_plbart.py | {
"start": 1204,
"end": 9129
} | class ____(TokenizerTesterMixin, unittest.TestCase):
from_pretrained_id = "uclanlp/plbart-base"
tokenizer_class = PLBartTokenizer
test_tokenizer_from_extractor = False
test_sentencepiece = True
@classmethod
def setUpClass(cls):
super().setUpClass()
tokenizer = PLBartTokenizer(SAMPLE_VOCAB, language_codes="base")
tokenizer.save_pretrained(cls.tmpdirname)
# Integration test data - expected outputs for the default input string
integration_expected_tokens = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁b', 'orn', '▁in', '▁92', '000,', '▁and', '▁this', '▁is', '▁f', 'als', 'é', '.', '▁', '生', '活', '的', '真', '谛', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁', 'ป', 'ี', '▁ir', 'd', '▁', 'ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_token_ids = [670, 96, 14, 242, 33438, 39172, 34, 880, 56, 6309, 55, 26431, 9478, 135, 143, 96, 33, 3875, 33537, 33455, 33438, 33859, 34721, 33590, 34984, 3, 33720, 9434, 4536, 9434, 4536, 4536, 0, 8039, 0, 656, 418, 987, 625, 595, 229, 3914, 5158, 33475, 4536, 33455, 1160, 4042, 33448, 135, 33438, 34693, 34205, 4042, 33448, 33438, 34410, 22256, 654, 395, 144, 1777] # fmt: skip
expected_tokens_from_ids = ['▁This', '▁is', '▁a', '▁test', '▁', '😊', '▁I', '▁was', '▁b', 'orn', '▁in', '▁92', '000,', '▁and', '▁this', '▁is', '▁f', 'als', 'é', '.', '▁', '生', '活', '的', '真', '<unk>', '是', '▁Hi', '▁Hello', '▁Hi', '▁Hello', '▁Hello', '<s>', '▁hi', '<s>', '▁there', '▁The', '▁following', '▁string', '▁should', '▁be', '▁properly', '▁encoded', ':', '▁Hello', '.', '▁But', '▁ir', 'd', '▁and', '▁', 'ป', 'ี', '▁ir', 'd', '▁', 'ด', '▁Hey', '▁how', '▁are', '▁you', '▁doing'] # fmt: skip
integration_expected_decoded_text = "This is a test 😊 I was born in 92000, and this is falsé. 生活的真<unk>是 Hi Hello Hi Hello Hello<s> hi<s> there The following string should be properly encoded: Hello. But ird and ปี ird ด Hey how are you doing"
def test_full_base_tokenizer(self):
tokenizer = PLBartTokenizer(vocab_file=SAMPLE_VOCAB, language_codes="base")
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
end = tokenizer.vocab_size
language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 4, end)]
self.assertListEqual(language_tokens, ["__java__", "__python__", "__en_XX__", "<mask>"])
code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
input_ids = tokenizer(code).input_ids
self.assertEqual(
tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False),
code,
)
def test_full_multi_tokenizer(self):
tokenizer = PLBartTokenizer(vocab_file=SAMPLE_VOCAB, language_codes="multi")
tokens = tokenizer.tokenize("This is a test")
self.assertListEqual(tokens, ["▁This", "▁is", "▁a", "▁t", "est"])
self.assertListEqual(
tokenizer.convert_tokens_to_ids(tokens),
[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]],
)
tokens = tokenizer.tokenize("I was born in 92000, and this is falsé.")
self.assertListEqual(
tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
],
)
ids = tokenizer.convert_tokens_to_ids(tokens)
self.assertListEqual(
ids,
[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
],
)
back_tokens = tokenizer.convert_ids_to_tokens(ids)
self.assertListEqual(
back_tokens,
[
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
],
)
end = tokenizer.vocab_size
language_tokens = [tokenizer.convert_ids_to_tokens(x) for x in range(end - 7, end)]
self.assertListEqual(
language_tokens, ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"]
)
code = "java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"
input_ids = tokenizer(code).input_ids
self.assertEqual(
tokenizer.decode(input_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False),
code,
)
@require_torch
@require_sentencepiece
@require_tokenizers
| PLBartTokenizationTest |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_types.py | {
"start": 51953,
"end": 52878
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__only_on__ = "mssql"
@testing.provide_metadata
@testing.combinations(
("as_boolean_null", Boolean, True, "CREATE TABLE tbl (boo BIT NULL)"),
("as_bit_null", BIT, True, "CREATE TABLE tbl (boo BIT NULL)"),
(
"as_boolean_not_null",
Boolean,
False,
"CREATE TABLE tbl (boo BIT NOT NULL)",
),
("as_bit_not_null", BIT, False, "CREATE TABLE tbl (boo BIT NOT NULL)"),
id_="iaaa",
argnames="col_type, is_nullable, ddl",
)
def test_boolean_as_bit(self, col_type, is_nullable, ddl):
tbl = Table(
"tbl", self.metadata, Column("boo", col_type, nullable=is_nullable)
)
self.assert_compile(
schema.CreateTable(tbl),
ddl,
)
assert isinstance(tbl.c.boo.type.as_generic(), Boolean)
| BooleanTest |
python | getsentry__sentry | src/sentry/workflow_engine/processors/delayed_workflow.py | {
"start": 2433,
"end": 3063
} | class ____(BaseModel):
event_id: str
occurrence_id: str | None = None
timestamp: datetime | None = None
class Config:
# Ignore unknown fields; we'd like to be able to add new fields easily.
extra = "ignore"
@validator("event_id")
def validate_event_id(cls, v: str) -> str:
if not v.strip():
raise ValueError("event_id is required")
return v
@validator("occurrence_id")
def validate_occurrence_id(cls, v: str | None) -> str | None:
if v is not None and not v.strip():
return None
return v
@dataclass(frozen=True)
| EventInstance |
python | pytorch__pytorch | test/onnx/model_defs/dcgan.py | {
"start": 1708,
"end": 2953
} | class ____(nn.Module):
def __init__(self, ngpu):
super().__init__()
self.ngpu = ngpu
self.main = nn.Sequential(
# input is (nc) x 64 x 64
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf) x 32 x 32
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*2) x 16 x 16
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*4) x 8 x 8
nn.Conv2d(ndf * 4, ndf * 8, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 8),
nn.LeakyReLU(0.2, inplace=True),
# state size. (ndf*8) x 4 x 4
nn.Conv2d(ndf * 8, 1, 4, 1, 0, bias=False),
nn.Sigmoid(),
)
def forward(self, input):
if self.ngpu > 1 and isinstance(input.data, torch.cuda.FloatTensor):
output = nn.parallel.data_parallel(self.main, input, range(self.ngpu))
else:
output = self.main(input)
return output.view(-1, 1)
| _netD |
python | pytorch__pytorch | torch/distributed/fsdp/api.py | {
"start": 16298,
"end": 16365
} | class ____(StateDictConfig):
pass
@dataclass
| LocalStateDictConfig |
python | apache__airflow | shared/secrets_masker/tests/secrets_masker/test_secrets_masker.py | {
"start": 29610,
"end": 40143
} | class ____:
"""Test the merge functionality for restoring original values from redacted data."""
def setup_method(self):
self.masker = SecretsMasker()
configure_secrets_masker_for_test(self.masker)
@pytest.mark.parametrize(
("new_value", "old_value", "name", "expected"),
[
("***", "original_secret", "password", "original_secret"),
("new_secret", "original_secret", "password", "new_secret"),
("***", "original_value", "normal_field", "***"),
("new_value", "original_value", "normal_field", "new_value"),
("***", "original_value", None, "***"),
("new_value", "original_value", None, "new_value"),
],
)
def test_merge_simple_strings(self, new_value, old_value, name, expected):
result = self.masker.merge(new_value, old_value, name)
assert result == expected
@pytest.mark.parametrize(
("old_data", "new_data", "expected"),
[
(
{
"password": "original_password",
"api_key": "original_api_key",
"normal_field": "original_normal",
},
{
"password": "***",
"api_key": "new_api_key",
"normal_field": "new_normal",
},
{
"password": "original_password",
"api_key": "new_api_key",
"normal_field": "new_normal",
},
),
(
{
"config": {"password": "original_password", "host": "original_host"},
"credentials": {"api_key": "original_api_key", "username": "original_user"},
},
{
"config": {
"password": "***",
"host": "new_host",
},
"credentials": {
"api_key": "new_api_key",
"username": "new_user",
},
},
{
"config": {
"password": "original_password",
"host": "new_host",
},
"credentials": {
"api_key": "new_api_key",
"username": "new_user",
},
},
),
],
)
def test_merge_dictionaries(self, old_data, new_data, expected):
result = self.masker.merge(new_data, old_data)
assert result == expected
@pytest.mark.parametrize(
("old_data", "new_data", "name", "expected"),
[
# Lists
(
["original_item1", "original_item2", "original_item3"],
["new_item1", "new_item2"],
None,
["new_item1", "new_item2"],
),
(
["original_item1", "original_item2"],
["new_item1", "new_item2", "new_item3", "new_item4"],
None,
["new_item1", "new_item2", "new_item3", "new_item4"],
),
(
["secret1", "secret2", "secret3"],
["***", "new_secret2", "***"],
"password",
["secret1", "new_secret2", "secret3"],
),
(
["value1", "value2", "value3"],
["***", "new_value2", "***"],
"normal_list",
["***", "new_value2", "***"],
),
# Tuples
(
("original_item1", "original_item2", "original_item3"),
("new_item1", "new_item2"),
None,
("new_item1", "new_item2"),
),
(
("original_item1", "original_item2"),
("new_item1", "new_item2", "new_item3", "new_item4"),
None,
("new_item1", "new_item2", "new_item3", "new_item4"),
),
(
("secret1", "secret2", "secret3"),
("***", "new_secret2", "***"),
"password",
("secret1", "new_secret2", "secret3"),
),
(
("value1", "value2", "value3"),
("***", "new_value2", "***"),
"normal_tuple",
("***", "new_value2", "***"),
),
# Sets
(
{"original_item1", "original_item2", "original_item3"},
{"new_item1", "new_item2"},
None,
{"new_item1", "new_item2"},
),
(
{"original_item1", "original_item2"},
{"new_item1", "new_item2", "new_item3", "new_item4"},
None,
{"new_item1", "new_item2", "new_item3", "new_item4"},
),
(
{"secret1", "secret2", "secret3"},
{"***", "new_secret2", "***"},
"password",
{"***", "new_secret2", "***"},
),
(
{"value1", "value2", "value3"},
{"***", "new_value2", "***"},
"normal_tuple",
{"***", "new_value2", "***"},
),
],
)
def test_merge_collections(self, old_data, new_data, name, expected):
result = self.masker.merge(new_data, old_data, name)
assert result == expected
def test_merge_mismatched_types(self):
old_data = {"key": "value"}
new_data = "some_string" # Different type
# When types don't match, prefer the new item
expected = "some_string"
result = self.masker.merge(new_data, old_data)
assert result == expected
def test_merge_with_missing_keys(self):
old_data = {"password": "original_password", "old_only_key": "old_value", "common_key": "old_common"}
new_data = {
"password": "***",
"new_only_key": "new_value",
"common_key": "new_common",
}
expected = {
"password": "original_password",
"new_only_key": "new_value",
"common_key": "new_common",
}
result = self.masker.merge(new_data, old_data)
assert result == expected
def test_merge_complex_redacted_structures(self):
old_data = {
"some_config": {
"nested_password": "original_nested_password",
"passwords": ["item1", "item2"],
},
"normal_field": "normal_value",
}
new_data = {
"some_config": {"nested_password": "***", "passwords": ["***", "new_item2"]},
"normal_field": "new_normal_value",
}
result = self.masker.merge(new_data, old_data)
expected = {
"some_config": {
"nested_password": "original_nested_password",
"passwords": ["item1", "new_item2"],
},
"normal_field": "new_normal_value",
}
assert result == expected
def test_merge_partially_redacted_structures(self):
old_data = {
"config": {
"password": "original_password",
"host": "original_host",
"nested": {"api_key": "original_api_key", "timeout": 30},
}
}
new_data = {
"config": {
"password": "***",
"host": "new_host",
"nested": {
"api_key": "***",
"timeout": 60,
},
}
}
expected = {
"config": {
"password": "original_password",
"host": "new_host",
"nested": {
"api_key": "original_api_key",
"timeout": 60,
},
}
}
result = self.masker.merge(new_data, old_data)
assert result == expected
def test_merge_max_depth(self):
old_data = {"level1": {"level2": {"level3": {"password": "original_password"}}}}
new_data = {"level1": {"level2": {"level3": {"password": "***"}}}}
result = merge(new_data, old_data, max_depth=1)
assert result == new_data
result = self.masker.merge(new_data, old_data, max_depth=10)
assert result["level1"]["level2"]["level3"]["password"] == "original_password"
def test_merge_enum_values(self):
old_enum = MyEnum.testname
new_enum = MyEnum.testname2
result = self.masker.merge(new_enum, old_enum)
assert result == new_enum
assert isinstance(result, MyEnum)
def test_merge_round_trip(self):
# Original data with sensitive information
original_config = {
"database": {"host": "db.example.com", "password": "super_secret_password", "username": "admin"},
"api": {"api_key": "secret_api_key_12345", "endpoint": "https://api.example.com", "timeout": 30},
"app_name": "my_application",
}
# Step 1: Redact the original data
redacted_dict = self.masker.redact(original_config)
# Verify sensitive fields are redacted
assert redacted_dict["database"]["password"] == "***"
assert redacted_dict["api"]["api_key"] == "***"
assert redacted_dict["database"]["host"] == "db.example.com"
# Step 2: User modifies some fields
updated_dict = redacted_dict.copy()
updated_dict["database"]["host"] = "new-db.example.com"
updated_dict["api"]["timeout"] = 60
updated_dict["api"]["api_key"] = "new_api_key_67890"
# User left password as "***" (unchanged)
# Step 3: Merge to restore unchanged sensitive values
final_dict = self.masker.merge(updated_dict, original_config)
# Verify the results
assert final_dict["database"]["password"] == "super_secret_password" # Restored
assert final_dict["database"]["host"] == "new-db.example.com" # User modification kept
assert final_dict["api"]["api_key"] == "new_api_key_67890" # User modification kept
assert final_dict["api"]["timeout"] == 60 # User modification kept
assert final_dict["app_name"] == "my_application" # Unchanged
| TestSecretsMaskerMerge |
python | openai__openai-python | src/openai/resources/evals/evals.py | {
"start": 22998,
"end": 23721
} | class ____:
def __init__(self, evals: Evals) -> None:
self._evals = evals
self.create = _legacy_response.to_raw_response_wrapper(
evals.create,
)
self.retrieve = _legacy_response.to_raw_response_wrapper(
evals.retrieve,
)
self.update = _legacy_response.to_raw_response_wrapper(
evals.update,
)
self.list = _legacy_response.to_raw_response_wrapper(
evals.list,
)
self.delete = _legacy_response.to_raw_response_wrapper(
evals.delete,
)
@cached_property
def runs(self) -> RunsWithRawResponse:
return RunsWithRawResponse(self._evals.runs)
| EvalsWithRawResponse |
python | openai__openai-python | src/openai/types/webhooks/batch_completed_webhook_event.py | {
"start": 326,
"end": 770
} | class ____(BaseModel):
id: str
"""The unique ID of the event."""
created_at: int
"""The Unix timestamp (in seconds) of when the batch API request was completed."""
data: Data
"""Event data payload."""
type: Literal["batch.completed"]
"""The type of the event. Always `batch.completed`."""
object: Optional[Literal["event"]] = None
"""The object of the event. Always `event`."""
| BatchCompletedWebhookEvent |
python | django__django | tests/queries/test_iterator.py | {
"start": 195,
"end": 2229
} | class ____(TestCase):
itersize_index_in_mock_args = 3
@classmethod
def setUpTestData(cls):
Article.objects.create(name="Article 1", created=datetime.datetime.now())
Article.objects.create(name="Article 2", created=datetime.datetime.now())
def test_iterator_invalid_chunk_size(self):
for size in (0, -1):
with self.subTest(size=size):
with self.assertRaisesMessage(
ValueError, "Chunk size must be strictly positive."
):
Article.objects.iterator(chunk_size=size)
def test_default_iterator_chunk_size(self):
qs = Article.objects.iterator()
with mock.patch(
"django.db.models.sql.compiler.cursor_iter", side_effect=cursor_iter
) as cursor_iter_mock:
next(qs)
self.assertEqual(cursor_iter_mock.call_count, 1)
mock_args, _mock_kwargs = cursor_iter_mock.call_args
self.assertEqual(mock_args[self.itersize_index_in_mock_args], 2000)
def test_iterator_chunk_size(self):
batch_size = 3
qs = Article.objects.iterator(chunk_size=batch_size)
with mock.patch(
"django.db.models.sql.compiler.cursor_iter", side_effect=cursor_iter
) as cursor_iter_mock:
next(qs)
self.assertEqual(cursor_iter_mock.call_count, 1)
mock_args, _mock_kwargs = cursor_iter_mock.call_args
self.assertEqual(mock_args[self.itersize_index_in_mock_args], batch_size)
def test_no_chunked_reads(self):
"""
If the database backend doesn't support chunked reads, then the
result of SQLCompiler.execute_sql() is a list.
"""
qs = Article.objects.all()
compiler = qs.query.get_compiler(using=qs.db)
features = connections[qs.db].features
with mock.patch.object(features, "can_use_chunked_reads", False):
result = compiler.execute_sql(chunked_fetch=True)
self.assertIsInstance(result, list)
| QuerySetIteratorTests |
python | sphinx-doc__sphinx | sphinx/domains/c/__init__.py | {
"start": 12882,
"end": 13001
} | class ____(CObject):
object_type = 'function'
doc_field_types = _function_doc_field_types.copy()
| CFunctionObject |
python | pytest-dev__pytest-xdist | testing/acceptance_test.py | {
"start": 51545,
"end": 53994
} | class ____:
_test_content = """
class TestClassName%s(object):
@classmethod
def setup_class(cls):
FILE_LOCK.acquire()
@classmethod
def teardown_class(cls):
FILE_LOCK.release()
def test_a(self):
pass
def test_b(self):
pass
def test_c(self):
pass
"""
test_file1 = """
import filelock
FILE_LOCK = filelock.FileLock("test.lock")
""" + ((_test_content * 4) % ("A", "B", "C", "D"))
@pytest.mark.parametrize(
"scope", ["each", "load", "loadscope", "loadfile", "worksteal", "no"]
)
def test_single_file(self, pytester: pytest.Pytester, scope: str) -> None:
pytester.makepyfile(test_a=self.test_file1)
result = pytester.runpytest("-n2", "--dist=%s" % scope, "-v")
result.assert_outcomes(passed=(12 if scope != "each" else 12 * 2))
@pytest.mark.parametrize(
"scope", ["each", "load", "loadscope", "loadfile", "worksteal", "no"]
)
def test_multi_file(self, pytester: pytest.Pytester, scope: str) -> None:
pytester.makepyfile(
test_a=self.test_file1,
test_b=self.test_file1,
test_c=self.test_file1,
test_d=self.test_file1,
)
result = pytester.runpytest("-n2", "--dist=%s" % scope, "-v")
result.assert_outcomes(passed=(48 if scope != "each" else 48 * 2))
def parse_tests_and_workers_from_output(lines: list[str]) -> list[tuple[str, str, str]]:
result = []
for line in lines:
# example match: "[gw0] PASSED test_a.py::test[7]"
m = re.match(
r"""
\[(gw\d)\] # worker
\s*
(?:\[\s*\d+%\])? # progress indicator
\s(.*?) # status string ("PASSED")
\s(.*::.*) # nodeid
""",
line.strip(),
re.VERBOSE,
)
if m:
worker, status, nodeid = m.groups()
result.append((worker, status, nodeid))
return result
def get_workers_and_test_count_by_prefix(
prefix: str, lines: list[str], expected_status: str = "PASSED"
) -> dict[str, int]:
result: dict[str, int] = {}
for worker, status, nodeid in parse_tests_and_workers_from_output(lines):
if expected_status == status and nodeid.startswith(prefix):
result[worker] = result.get(worker, 0) + 1
return result
| TestLocking |
python | tensorflow__tensorflow | tensorflow/python/debug/cli/debugger_cli_common.py | {
"start": 32831,
"end": 36004
} | class ____:
"""Keeps command history and supports lookup."""
_HISTORY_FILE_NAME = ".tfdbg_history"
def __init__(self, limit=100, history_file_path=None):
"""CommandHistory constructor.
Args:
limit: Maximum number of the most recent commands that this instance
keeps track of, as an int.
history_file_path: (str) Manually specified path to history file. Used in
testing.
"""
self._commands = []
self._limit = limit
self._history_file_path = (
history_file_path or self._get_default_history_file_path())
self._load_history_from_file()
def _load_history_from_file(self):
if os.path.isfile(self._history_file_path):
try:
with open(self._history_file_path, "rt") as history_file:
commands = history_file.readlines()
self._commands = [command.strip() for command in commands
if command.strip()]
# Limit the size of the history file.
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
with open(self._history_file_path, "wt") as history_file:
for command in self._commands:
history_file.write(command + "\n")
except IOError:
print("WARNING: writing history file failed.")
def _add_command_to_history_file(self, command):
try:
with open(self._history_file_path, "at") as history_file:
history_file.write(command + "\n")
except IOError:
pass
@classmethod
def _get_default_history_file_path(cls):
return os.path.join(os.path.expanduser("~"), cls._HISTORY_FILE_NAME)
def add_command(self, command):
"""Add a command to the command history.
Args:
command: The history command, as a str.
Raises:
TypeError: if command is not a str.
"""
if self._commands and command == self._commands[-1]:
# Ignore repeating commands in a row.
return
if not isinstance(command, str):
raise TypeError("Attempt to enter non-str entry to command history")
self._commands.append(command)
if len(self._commands) > self._limit:
self._commands = self._commands[-self._limit:]
self._add_command_to_history_file(command)
def most_recent_n(self, n):
"""Look up the n most recent commands.
Args:
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands, or all available most recent commands,
if n exceeds size of the command history, in chronological order.
"""
return self._commands[-n:]
def lookup_prefix(self, prefix, n):
"""Look up the n most recent commands that starts with prefix.
Args:
prefix: The prefix to lookup.
n: Number of most recent commands to look up.
Returns:
A list of n most recent commands that have the specified prefix, or all
available most recent commands that have the prefix, if n exceeds the
number of history commands with the prefix.
"""
commands = [cmd for cmd in self._commands if cmd.startswith(prefix)]
return commands[-n:]
# TODO(cais): Lookup by regex.
| CommandHistory |
python | kamyu104__LeetCode-Solutions | Python/contain-virus.py | {
"start": 70,
"end": 2113
} | class ____(object):
def containVirus(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
directions = [(0, 1), (0, -1), (-1, 0), (1, 0)]
def dfs(grid, r, c, lookup, regions, frontiers, perimeters):
if (r, c) in lookup:
return
lookup.add((r, c))
regions[-1].add((r, c))
for d in directions:
nr, nc = r+d[0], c+d[1]
if not (0 <= nr < len(grid) and \
0 <= nc < len(grid[r])):
continue
if grid[nr][nc] == 1:
dfs(grid, nr, nc, lookup, regions, frontiers, perimeters)
elif grid[nr][nc] == 0:
frontiers[-1].add((nr, nc))
perimeters[-1] += 1
result = 0
while True:
lookup, regions, frontiers, perimeters = set(), [], [], []
for r, row in enumerate(grid):
for c, val in enumerate(row):
if val == 1 and (r, c) not in lookup:
regions.append(set())
frontiers.append(set())
perimeters.append(0)
dfs(grid, r, c, lookup, regions, frontiers, perimeters)
if not regions: break
triage_idx = frontiers.index(max(frontiers, key = len))
for i, region in enumerate(regions):
if i == triage_idx:
result += perimeters[i]
for r, c in region:
grid[r][c] = -1
continue
for r, c in region:
for d in directions:
nr, nc = r+d[0], c+d[1]
if not (0 <= nr < len(grid) and \
0 <= nc < len(grid[r])):
continue
if grid[nr][nc] == 0:
grid[nr][nc] = 1
return result
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.