language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | fabric__fabric | integration/connection.py | {
"start": 327,
"end": 5684
} | class ____:
class ssh_connections:
def open_method_generates_real_connection(self):
c = Connection("localhost")
c.open()
assert c.client.get_transport().active is True
assert c.is_connected is True
return c
def close_method_closes_connection(self):
# Handy shortcut - open things up, then return Connection for us to
# close
c = self.open_method_generates_real_connection()
c.close()
assert c.client.get_transport() is None
assert c.is_connected is False
class run:
def simple_command_on_host(self):
"""
Run command on localhost
"""
result = Connection("localhost").run("echo foo", hide=True)
assert result.stdout == "foo\n"
assert result.exited == 0
assert result.ok is True
def simple_command_with_pty(self):
"""
Run command under PTY on localhost
"""
# Most Unix systems should have stty, which asplodes when not run
# under a pty, and prints useful info otherwise
result = Connection("localhost").run(
"stty size", hide=True, pty=True
)
found = result.stdout.strip().split()
cols, rows = pty_size()
assert tuple(map(int, found)), rows == cols
# PTYs use \r\n, not \n, line separation
assert "\r\n" in result.stdout
assert result.pty is True
class shell:
@trap
def base_case(self):
result = Connection("localhost").shell(
# Some extra newlines to make sure it doesn't get split up by
# motd/prompt
in_stream=StringIO("\n\nexit\n")
)
assert result.command is None
assert "exit" in result.stdout
assert result.stderr == ""
assert result.exited == 0
assert result.pty is True
class local:
def wraps_invoke_run(self):
# NOTE: most of the interesting tests about this are in
# invoke.runners / invoke.integration.
cxn = Connection("localhost")
result = cxn.local("echo foo", hide=True)
assert result.stdout == "foo\n"
assert not cxn.is_connected # meh way of proving it didn't use SSH
def mixed_use_of_local_and_run(self):
"""
Run command truly locally, and over SSH via localhost
"""
cxn = Connection("localhost")
result = cxn.local("echo foo", hide=True)
assert result.stdout == "foo\n"
assert not cxn.is_connected # meh way of proving it didn't use SSH yet
result = cxn.run("echo foo", hide=True)
assert cxn.is_connected # NOW it's using SSH
assert result.stdout == "foo\n"
class sudo:
def setup(self):
# NOTE: assumes a user configured for passworded (NOT
# passwordless)_sudo, whose password is 'mypass', is executing the
# test suite. I.e. our travis-ci setup.
config = Config(
{"sudo": {"password": "mypass"}, "run": {"hide": True}}
)
self.cxn = Connection("localhost", config=config)
def sudo_command(self):
"""
Run command via sudo on host localhost
"""
skip_outside_ci()
assert self.cxn.sudo("whoami").stdout.strip() == "root"
def mixed_sudo_and_normal_commands(self):
"""
Run command via sudo, and not via sudo, on localhost
"""
skip_outside_ci()
logname = os.environ["LOGNAME"]
assert self.cxn.run("whoami").stdout.strip() == logname
assert self.cxn.sudo("whoami").stdout.strip() == "root"
def large_remote_commands_finish_cleanly(self):
# Guards against e.g. cleanup finishing before actually reading all
# data from the remote end. Which is largely an issue in Invoke-level
# code but one that only really manifests when doing stuff over the
# network. Yay computers!
path = "/usr/share/dict/words"
cxn = Connection("localhost")
with open(path) as fd:
words = [x.strip() for x in fd.readlines()]
stdout = cxn.run("cat {}".format(path), hide=True).stdout
lines = [x.strip() for x in stdout.splitlines()]
# When bug present, # lines received is significantly fewer than the
# true count in the file (by thousands).
assert len(lines) == len(words)
class command_timeout:
def setup(self):
self.cxn = Connection("localhost")
def does_not_raise_exception_when_under_timeout(self):
assert self.cxn.run("sleep 1", timeout=3)
def raises_exception_when_over_timeout(self):
with raises(CommandTimedOut) as info:
start = time.time()
self.cxn.run("sleep 5", timeout=1)
elapsed = time.time() - start
assert info.value.timeout == 1
# Catch scenarios where we except but don't actually shut down
# early (w/ a bit of fudge time for overhead)
assert elapsed <= 2
| Connection_ |
python | has2k1__plotnine | plotnine/scales/limits.py | {
"start": 4077,
"end": 5626
} | class ____:
"""
Set aesthetic limits
Parameters
----------
kwargs :
Aesthetic and the values of the limits.
e.g `x=(40, 100)`
Notes
-----
If the 2nd value of `limits` is less than
the first, a reversed scale will be created.
"""
def __init__(self, **kwargs):
self._kwargs = kwargs
def __radd__(self, other):
"""
Add limits to ggplot object
"""
thismodule = sys.modules[__name__]
for ae, value in self._kwargs.items():
try:
klass = getattr(thismodule, f"{ae}lim")
except AttributeError as e:
msg = "Cannot change limits for '{}'"
raise PlotnineError(msg) from e
other += klass(value)
return other
def expand_limits(**kwargs):
"""
Expand the limits any aesthetic using data
Parameters
----------
kwargs : dict | dataframe
Data to use in expanding the limits.
The keys should be aesthetic names
e.g. *x*, *y*, *colour*, ...
"""
def as_list(key):
with suppress(KeyError):
if isinstance(kwargs[key], (int, float, str)):
kwargs[key] = [kwargs[key]]
if isinstance(kwargs, dict):
as_list("x")
as_list("y")
data = pd.DataFrame(kwargs)
else:
data = kwargs
mapping = aes()
for ae in set(kwargs) & ALL_AESTHETICS:
mapping[ae] = ae
return geom_blank(data=data, mapping=mapping, inherit_aes=False)
| lims |
python | spack__spack | lib/spack/spack/test/installer.py | {
"start": 35714,
"end": 50435
} | class ____(Exception):
pass
_old_complete_task = None
def _install_fail_my_build_exception(installer, task, install_status, **kwargs):
if task.pkg.name == "pkg-a":
raise MyBuildException("mock internal package build error for pkg-a")
else:
_old_complete_task(installer, task, install_status)
def test_install_fail_single(install_mockery, mock_fetch, monkeypatch):
"""Test expected results for failure of single package."""
global _old_complete_task
installer = create_installer(["pkg-a"], {"fake": True})
# Raise a KeyboardInterrupt error to trigger early termination
_old_complete_task = inst.PackageInstaller._complete_task
monkeypatch.setattr(inst.PackageInstaller, "_complete_task", _install_fail_my_build_exception)
with pytest.raises(MyBuildException, match="mock internal package build error for pkg-a"):
installer.install()
# ensure dependency of a is 'installed' and a is not
assert any(pkg_id.startswith("pkg-b-") for pkg_id in installer.installed)
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
def test_install_fail_multi(install_mockery, mock_fetch, monkeypatch):
"""Test expected results for failure of multiple packages."""
global _old_complete_task
installer = create_installer(["pkg-a", "pkg-c"], {"fake": True})
# Raise a KeyboardInterrupt error to trigger early termination
_old_complete_task = inst.PackageInstaller._complete_task
monkeypatch.setattr(inst.PackageInstaller, "_complete_task", _install_fail_my_build_exception)
with pytest.raises(spack.error.InstallError, match="Installation request failed"):
installer.install()
# ensure the the second spec installed but not the first
assert any(pkg_id.startswith("pkg-c-") for pkg_id in installer.installed)
assert not any(pkg_id.startswith("pkg-a-") for pkg_id in installer.installed)
def test_install_fail_fast_on_detect(install_mockery, monkeypatch, capsys):
"""Test fail_fast install when an install failure is detected."""
a = spack.concretize.concretize_one("parallel-package-a")
a_id = inst.package_id(a)
b_id = inst.package_id(a["parallel-package-b"])
c_id = inst.package_id(a["parallel-package-c"])
installer = create_installer([a], {"fail_fast": True})
# Make sure all packages are identified as failed
# This will prevent a and b from installing, which will cause the build of c to be skipped
# and the active processes to be killed.
monkeypatch.setattr(spack.database.FailureTracker, "has_failed", _true)
installer.max_active_tasks = 2
with pytest.raises(spack.error.InstallError, match="after first install failure"):
installer.install()
assert b_id in installer.failed, "Expected b to be marked as failed"
assert c_id in installer.failed, "Exepected c to be marked as failed"
assert (
a_id not in installer.installed
), "Package a cannot install due to its dependencies failing"
# check that b's active process got killed when c failed
assert f"{b_id} failed to install" in capsys.readouterr().err
def _test_install_fail_fast_on_except_patch(installer, **kwargs):
"""Helper for test_install_fail_fast_on_except."""
# This is a module-scope function and not a local function because it
# needs to be pickleable.
raise RuntimeError("mock patch failure")
@pytest.mark.disable_clean_stage_check
def test_install_fail_fast_on_except(install_mockery, monkeypatch, capsys):
"""Test fail_fast install when an install failure results from an error."""
installer = create_installer(["pkg-a"], {"fail_fast": True})
# Raise a non-KeyboardInterrupt exception to trigger fast failure.
#
# This will prevent b from installing, which will cause the build of a
# to be skipped.
monkeypatch.setattr(
spack.package_base.PackageBase, "do_patch", _test_install_fail_fast_on_except_patch
)
with pytest.raises(spack.error.InstallError, match="mock patch failure"):
installer.install()
out = str(capsys.readouterr())
assert "Skipping build of pkg-a" in out
def test_install_lock_failures(install_mockery, monkeypatch, capfd):
"""Cover basic install lock failure handling in a single pass."""
# Note: this test relies on installing a package with no dependencies
def _requeued(installer, task, install_status):
tty.msg("requeued {0}".format(task.pkg.spec.name))
installer = create_installer(["pkg-c"], {})
# Ensure never acquire a lock
monkeypatch.setattr(inst.PackageInstaller, "_ensure_locked", _not_locked)
# Ensure don't continually requeue the task
monkeypatch.setattr(inst.PackageInstaller, "_requeue_task", _requeued)
with pytest.raises(spack.error.InstallError, match="request failed"):
installer.install()
out = capfd.readouterr()[0]
expected = ["write locked", "read locked", "requeued"]
for exp, ln in zip(expected, out.split("\n")):
assert exp in ln
def test_install_lock_installed_requeue(install_mockery, monkeypatch, capfd):
"""Cover basic install handling for installed package."""
# Note: this test relies on installing a package with no dependencies
concrete_spec = spack.concretize.concretize_one("pkg-c")
pkg_id = inst.package_id(concrete_spec)
installer = create_installer([concrete_spec])
def _prep(installer, task):
installer.installed.add(pkg_id)
tty.msg(f"{pkg_id} is installed")
# also do not allow the package to be locked again
monkeypatch.setattr(inst.PackageInstaller, "_ensure_locked", _not_locked)
def _requeued(installer, task, install_status):
tty.msg(f"requeued {inst.package_id(task.pkg.spec)}")
# Flag the package as installed
monkeypatch.setattr(inst.PackageInstaller, "_prepare_for_install", _prep)
# Ensure don't continually requeue the task
monkeypatch.setattr(inst.PackageInstaller, "_requeue_task", _requeued)
with pytest.raises(spack.error.InstallError, match="request failed"):
installer.install()
assert pkg_id not in installer.installed
expected = ["is installed", "read locked", "requeued"]
for exp, ln in zip(expected, capfd.readouterr().out.splitlines()):
assert exp in ln
def test_install_read_locked_requeue(install_mockery, monkeypatch, capfd):
"""Cover basic read lock handling for uninstalled package with requeue."""
# Note: this test relies on installing a package with no dependencies
orig_fn = inst.PackageInstaller._ensure_locked
def _read(installer, lock_type, pkg):
tty.msg("{0}->read locked {1}".format(lock_type, pkg.spec.name))
return orig_fn(installer, "read", pkg)
def _prep(installer, task):
tty.msg("preparing {0}".format(task.pkg.spec.name))
assert task.pkg.spec.name not in installer.installed
def _requeued(installer, task, install_status):
tty.msg("requeued {0}".format(task.pkg.spec.name))
# Force a read lock
monkeypatch.setattr(inst.PackageInstaller, "_ensure_locked", _read)
# Flag the package as installed
monkeypatch.setattr(inst.PackageInstaller, "_prepare_for_install", _prep)
# Ensure don't continually requeue the task
monkeypatch.setattr(inst.PackageInstaller, "_requeue_task", _requeued)
installer = create_installer(["pkg-c"], {})
with pytest.raises(spack.error.InstallError, match="request failed"):
installer.install()
assert "b" not in installer.installed
out = capfd.readouterr()[0]
expected = ["write->read locked", "preparing", "requeued"]
for exp, ln in zip(expected, out.split("\n")):
assert exp in ln
def test_install_skip_patch(install_mockery, mock_fetch):
"""Test the path skip_patch install path."""
# Note: this test relies on installing a package with no dependencies
installer = create_installer(["pkg-c"], {"fake": False, "skip_patch": True})
installer.install()
assert inst.package_id(installer.build_requests[0].pkg.spec) in installer.installed
def test_install_implicit(install_mockery, mock_fetch):
"""Test the path skip_patch install path."""
spec_name = "trivial-install-test-package"
installer = create_installer([spec_name], {"fake": False})
pkg = installer.build_requests[0].pkg
assert not create_build_task(pkg, {"explicit": []}).explicit
assert create_build_task(pkg, {"explicit": [pkg.spec.dag_hash()]}).explicit
assert not create_build_task(pkg).explicit
# Install that wipes the prefix directory
def wipe_prefix(pkg, install_args):
shutil.rmtree(pkg.prefix, ignore_errors=True)
fs.mkdirp(pkg.prefix)
raise Exception("Some fatal install error")
def fail(*args, **kwargs):
assert False
def test_overwrite_install_backup_success(monkeypatch, temporary_store, config, mock_packages):
"""
When doing an overwrite install that fails, Spack should restore the backup
of the original prefix, and leave the original spec marked installed.
"""
# Get a build task. TODO: Refactor this to avoid calling internal methods.
# This task relies on installing something with no dependencies
installer = create_installer(["pkg-c"])
installer._init_queue()
task = installer._pop_task()
install_status = MockInstallStatus(1)
term_status = MockTermStatusLine(True)
# Make sure the install prefix exists with some trivial file
installed_file = os.path.join(task.pkg.prefix, "some_file")
fs.touchp(installed_file)
monkeypatch.setattr(inst, "build_process", wipe_prefix)
# Make sure the package is not marked uninstalled
monkeypatch.setattr(spack.store.STORE.db, "remove", fail)
# Make sure that the installer does an overwrite install
monkeypatch.setattr(task, "_install_action", inst.InstallAction.OVERWRITE)
# Installation should throw the installation exception, not the backup
# failure.
installer.start_task(task, install_status, term_status)
with pytest.raises(Exception, match="Some fatal install error"):
installer.complete_task(task, install_status)
# Check that the original file is back.
assert os.path.exists(installed_file)
# Install that removes the backup directory, which is at the same level as
# the prefix, starting with .backup
def remove_backup(pkg, install_args):
backup_glob = os.path.join(os.path.dirname(os.path.normpath(pkg.prefix)), ".backup*")
for backup in glob.iglob(backup_glob):
shutil.rmtree(backup)
raise Exception("Some fatal install error")
def test_overwrite_install_backup_failure(monkeypatch, temporary_store, config, mock_packages):
"""
When doing an overwrite install that fails, Spack should try to recover the
original prefix. If that fails, the spec is lost, and it should be removed
from the database.
"""
# Get a build task. TODO: refactor this to avoid calling internal methods
installer = create_installer(["pkg-c"])
installer._init_queue()
task = installer._pop_task()
install_status = MockInstallStatus(1)
term_status = MockTermStatusLine(True)
# Make sure the install prefix exists
installed_file = os.path.join(task.pkg.prefix, "some_file")
fs.touchp(installed_file)
monkeypatch.setattr(inst, "build_process", remove_backup)
# Make sure that the installer does an overwrite install
monkeypatch.setattr(task, "_install_action", inst.InstallAction.OVERWRITE)
# Make sure that `remove` was called on the database after an unsuccessful
# attempt to restore the backup.
# This error is raised while handling the original install error
installer.start_task(task, install_status, term_status)
with pytest.raises(Exception, match="No such spec in database"):
installer.complete_task(task, install_status)
def test_term_status_line():
# Smoke test for TermStatusLine; to actually test output it would be great
# to pass a StringIO instance, but we use tty.msg() internally which does not
# accept that. `with log_output(buf)` doesn't really work because it trims output
# and we actually want to test for escape sequences etc.
x = inst.TermStatusLine(enabled=True)
x.add("pkg-a")
x.add("pkg-b")
x.clear()
@pytest.mark.parametrize("explicit", [True, False])
def test_single_external_implicit_install(install_mockery, explicit):
pkg = "trivial-install-test-package"
s = spack.concretize.concretize_one(pkg)
s.external_path = "/usr"
args = {"explicit": [s.dag_hash()] if explicit else []}
create_installer([s], args).install()
assert spack.store.STORE.db.get_record(pkg).explicit == explicit
def test_overwrite_install_does_install_build_deps(install_mockery, mock_fetch):
"""When overwrite installing something from sources, build deps should be installed."""
s = spack.concretize.concretize_one("dtrun3")
create_installer([s]).install()
# Verify there is a pure build dep
edge = s.edges_to_dependencies(name="dtbuild3").pop()
assert edge.depflag == dt.BUILD
build_dep = edge.spec
# Uninstall the build dep
build_dep.package.do_uninstall()
# Overwrite install the root dtrun3
create_installer([s], {"overwrite": [s.dag_hash()]}).install()
# Verify that the build dep was also installed.
assert build_dep.installed
@pytest.mark.parametrize("run_tests", [True, False])
def test_print_install_test_log_skipped(install_mockery, mock_packages, capfd, run_tests):
"""Confirm printing of install log skipped if not run/no failures."""
name = "trivial-install-test-package"
s = spack.concretize.concretize_one(name)
pkg = s.package
pkg.run_tests = run_tests
spack.installer.print_install_test_log(pkg)
out = capfd.readouterr()[0]
assert out == ""
def test_print_install_test_log_failures(
tmp_path: pathlib.Path, install_mockery, mock_packages, ensure_debug, capfd
):
"""Confirm expected outputs when there are test failures."""
name = "trivial-install-test-package"
s = spack.concretize.concretize_one(name)
pkg = s.package
# Missing test log is an error
pkg.run_tests = True
pkg.tester.test_log_file = str(tmp_path / "test-log.txt")
pkg.tester.add_failure(AssertionError("test"), "test-failure")
spack.installer.print_install_test_log(pkg)
err = capfd.readouterr()[1]
assert "no test log file" in err
# Having test log results in path being output
fs.touch(pkg.tester.test_log_file)
spack.installer.print_install_test_log(pkg)
out = capfd.readouterr()[0]
assert "See test results at" in out
| MyBuildException |
python | bokeh__bokeh | tests/unit/bokeh/embed/test_util__embed.py | {
"start": 24754,
"end": 25181
} | class ____:
def test_basic(self) -> None:
t = Theme(json={})
d = Document()
beu._themes[d] = t
beu._unset_temp_theme(d)
assert d.theme is t
assert d not in beu._themes
def test_no_old_theme(self) -> None:
d = Document()
orig = d.theme
beu._unset_temp_theme(d)
assert d.theme is orig
assert d not in beu._themes
| Test__unset_temp_theme |
python | airbytehq__airbyte | airbyte-ci/connectors/pipelines/pipelines/airbyte_ci/steps/gradle.py | {
"start": 711,
"end": 812
} | class ____(Exception):
"""Raised when a Gradle operation times out."""
pass
| GradleTimeoutError |
python | catalyst-team__catalyst | catalyst/contrib/datasets/cifar.py | {
"start": 3554,
"end": 8747
} | class ____(VisionDataset):
"""`CIFAR10 <https://www.cs.toronto.edu/~kriz/cifar.html>`_ Dataset.
Args:
root (string): Root directory of dataset where directory
``cifar-10-batches-py`` exists or will be saved to if download is set to True.
train (bool, optional): If True, creates dataset from training set, otherwise
creates from test set.
transform (callable, optional): A function/transform that takes in an PIL image
and returns a transformed version. E.g, ``transforms.RandomCrop``
target_transform (callable, optional): A function/transform that takes in the
target and transforms it.
download (bool, optional): If true, downloads the dataset from the internet and
puts it in root directory. If dataset is already downloaded, it is not
downloaded again.
"""
base_folder = "cifar-10-batches-py"
url = "https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz"
filename = "cifar-10-python.tar.gz"
tgz_md5 = "c58f30108f718f92721af3b95e74349a"
train_list = [
["data_batch_1", "c99cafc152244af753f735de768cd75f"],
["data_batch_2", "d4bba439e000b95fd0a9bffe97cbabec"],
["data_batch_3", "54ebc095f3ab1f0389bbae665268c751"],
["data_batch_4", "634d18415352ddfa80567beed471001a"],
["data_batch_5", "482c414d41f54cd18b22e5b47cb7c3cb"],
]
test_list = [
["test_batch", "40351d587109b95175f43aff81a1287e"],
]
meta = {
"filename": "batches.meta",
"key": "label_names",
"md5": "5ff9c542aee3614f3951f8cda6e48888",
}
def __init__(
self,
root: str,
train: bool = True,
transform: Optional[Callable] = None,
target_transform: Optional[Callable] = None,
download: bool = False,
) -> None:
super(CIFAR10, self).__init__(
root, transform=transform, target_transform=target_transform
)
self.train = train # training set or test set
if download:
self.download()
if not self._check_integrity():
raise RuntimeError(
"Dataset not found or corrupted."
+ " You can use download=True to download it"
)
if self.train:
downloaded_list = self.train_list
else:
downloaded_list = self.test_list
self.data: Any = []
self.targets = []
# now load the picked numpy arrays
for file_name, checksum in downloaded_list:
file_path = os.path.join(self.root, self.base_folder, file_name)
with open(file_path, "rb") as f:
entry = pickle.load(f, encoding="latin1")
self.data.append(entry["data"])
if "labels" in entry:
self.targets.extend(entry["labels"])
else:
self.targets.extend(entry["fine_labels"])
self.data = np.vstack(self.data).reshape(-1, 3, 32, 32)
self.data = self.data.transpose((0, 2, 3, 1)) # convert to HWC
self._load_meta()
def _load_meta(self) -> None:
path = os.path.join(self.root, self.base_folder, self.meta["filename"])
if not _check_integrity(path, self.meta["md5"]):
raise RuntimeError(
"Dataset metadata file not found or corrupted."
+ " You can use download=True to download it"
)
with open(path, "rb") as infile:
data = pickle.load(infile, encoding="latin1")
self.classes = data[self.meta["key"]]
self.class_to_idx = {_class: i for i, _class in enumerate(self.classes)}
def __getitem__(self, index: int) -> Tuple[Any, Any]:
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target = self.data[index], self.targets[index]
# @TODO: here is the channle - no image requirements!
# doing this so that it is consistent with all other datasets
# to return a PIL Image
# img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target
def __len__(self) -> int:
return len(self.data)
def _check_integrity(self) -> bool:
root = self.root
for fentry in self.train_list + self.test_list:
filename, md5 = fentry[0], fentry[1]
fpath = os.path.join(root, self.base_folder, filename)
if not _check_integrity(fpath, md5):
return False
return True
def download(self) -> None:
if self._check_integrity():
print("Files already downloaded and verified")
return
download_and_extract_archive(
self.url, self.root, filename=self.filename, md5=self.tgz_md5
)
def extra_repr(self) -> str:
return "Split: {}".format("Train" if self.train is True else "Test")
| CIFAR10 |
python | PrefectHQ__prefect | tests/server/orchestration/api/test_task_run_states.py | {
"start": 99,
"end": 961
} | class ____:
async def test_read_task_run_state(self, task_run, client, session):
# create a flow run state to read
result = await models.task_runs.set_task_run_state(
session=session,
task_run_id=task_run.id,
state=schemas.states.Running(),
)
await session.commit()
# make sure we can read the state
task_run_state_id = result.state.id
response = await client.get(f"/task_run_states/{task_run_state_id}")
assert response.status_code == status.HTTP_200_OK
assert response.json()["id"] == str(task_run_state_id)
async def test_read_task_run_state_returns_404_if_does_not_exist(self, client):
response = await client.get(f"/task_run_states/{uuid4()}")
assert response.status_code == status.HTTP_404_NOT_FOUND
| TestReadTaskRunStateById |
python | apache__airflow | providers/http/tests/unit/http/triggers/test_http.py | {
"start": 7441,
"end": 11544
} | class ____:
@staticmethod
def _mock_run_result(result_to_mock):
f = Future()
f.set_result(result_to_mock)
return f
def test_serialization(self, event_trigger):
"""
Asserts that the HttpEventTrigger correctly serializes its arguments
and classpath.
"""
classpath, kwargs = event_trigger.serialize()
assert classpath == "airflow.providers.http.triggers.http.HttpEventTrigger"
assert kwargs == {
"http_conn_id": TEST_CONN_ID,
"auth_type": TEST_AUTH_TYPE,
"method": TEST_METHOD,
"endpoint": TEST_ENDPOINT,
"headers": TEST_HEADERS,
"data": TEST_DATA,
"extra_options": TEST_EXTRA_OPTIONS,
"response_check_path": TEST_RESPONSE_CHECK_PATH,
"poll_interval": TEST_POLL_INTERVAL,
}
@pytest.mark.asyncio
@mock.patch(HTTP_PATH.format("HttpAsyncHook"))
async def test_trigger_on_success_yield_successfully(self, mock_hook, event_trigger, client_response):
"""
Tests the HttpEventTrigger only fires once the job execution reaches a successful state.
"""
mock_hook.return_value.run.return_value = self._mock_run_result(client_response)
event_trigger._run_response_check = mock.AsyncMock(side_effect=[False, True])
response = await HttpEventTrigger._convert_response(client_response)
generator = event_trigger.run()
actual = await generator.asend(None)
assert actual == TriggerEvent(
{
"status": "success",
"response": base64.standard_b64encode(pickle.dumps(response)).decode("ascii"),
}
)
assert mock_hook.return_value.run.call_count == 2
assert event_trigger._run_response_check.call_count == 2
@pytest.mark.asyncio
@mock.patch(HTTP_PATH.format("HttpAsyncHook"))
async def test_trigger_on_exception_logs_error_and_never_yields(
self, mock_hook, event_trigger, monkeypatch
):
"""
Tests the HttpEventTrigger logs the appropriate message and does not yield a TriggerEvent when an exception is raised.
"""
mock_hook.return_value.run.side_effect = Exception("Test exception")
mock_logger = mock.Mock()
monkeypatch.setattr(type(event_trigger), "log", mock_logger)
generator = event_trigger.run()
with pytest.raises(StopAsyncIteration):
await generator.asend(None)
mock_logger.error.assert_called_once_with("status: error, message: %s", "Test exception")
@pytest.mark.asyncio
async def test_convert_response(self, client_response):
"""
Assert convert aiohttp.client_reqrep.ClientResponse to requests.Response.
"""
response = await HttpEventTrigger._convert_response(client_response)
assert response.content == await client_response.read()
assert response.status_code == client_response.status
assert response.headers == CaseInsensitiveDict(client_response.headers)
assert response.url == str(client_response.url)
assert response.history == [HttpEventTrigger._convert_response(h) for h in client_response.history]
assert response.encoding == client_response.get_encoding()
assert response.reason == client_response.reason
assert dict(response.cookies) == dict(client_response.cookies)
@pytest.mark.db_test
@pytest.mark.asyncio
@mock.patch("aiohttp.client.ClientSession.post")
async def test_trigger_on_post_with_data(self, mock_http_post, event_trigger):
"""
Test that HttpEventTrigger posts the correct payload when a request is made.
"""
generator = event_trigger.run()
with pytest.raises(StopAsyncIteration):
await generator.asend(None)
mock_http_post.assert_called_once()
_, kwargs = mock_http_post.call_args
assert kwargs["data"] == TEST_DATA
assert kwargs["json"] is None
assert kwargs["params"] is None
| TestHttpEventTrigger |
python | pytorch__pytorch | test/dynamo/test_python_dispatcher.py | {
"start": 395,
"end": 2388
} | class ____(torch._dynamo.test_case.TestCase):
def test_dispatch_key1(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x + 1
return torch._C._dispatch_keys(x)
x = torch.randn(2, 3)
self.assertTrue(fn(x).raw_repr() == torch._C._dispatch_keys(x + 1).raw_repr())
def test_dispatch_key2(self):
from torch.testing._internal.two_tensor import TwoTensor
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
x = x.sin()
return torch._C._dispatch_keys(x)
x = torch.randn(3)
y = torch.randn(3)
z = TwoTensor(x, y)
self.assertTrue(fn(z).raw_repr() == torch._C._dispatch_keys(z.sin()).raw_repr())
def test_dispatch_key3(self):
@torch.compile(backend="aot_eager", fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
return torch.sin(x + 1), key_set
x = torch.randn(2, 3)
self.assertEqual(fn(x)[0], torch.sin(x + 1))
self.assertTrue(
fn(x)[1].raw_repr() == torch._C._dispatch_tls_local_include_set().raw_repr()
)
def test_dispatch_key4(self):
eager = EagerAndRecordGraphs()
@torch.compile(backend=eager, fullgraph=True)
def fn(x):
key_set = torch._C._dispatch_tls_local_include_set()
key_set = key_set | torch._C._dispatch_keys(x)
key_set = key_set - torch._C._dispatch_tls_local_exclude_set()
if key_set.highestPriorityTypeId() == torch.DispatchKey.PythonDispatcher:
return torch.sin(x + 1)
else:
return torch.sin(x - 1)
x = torch.randn(2, 3)
self.assertEqual(fn(x), torch.sin(x - 1))
graph = eager.graphs[0]
actual = normalize_gm(graph.print_readable(False))
self.assertExpectedInline(
actual,
"""\
| PythonDispatcherTests |
python | davidhalter__jedi | test/completion/basic.py | {
"start": 3714,
"end": 5340
} | class ____(Exception):
def __init__(self, my_attr):
self.my_attr = my_attr
try:
raise MyException(1)
except MyException as e:
#? ['my_attr']
e.my_attr
#? 22 ['my_attr']
for x in e.my_attr:
pass
# -----------------
# params
# -----------------
my_param = 1
#? 9 str()
def foo1(my_param):
my_param = 3.0
foo1("")
my_type = float()
#? 20 float()
def foo2(my_param: my_type):
pass
foo2("")
#? 20 int()
def foo3(my_param=my_param):
pass
foo3("")
some_default = ''
#? []
def foo(my_t
#? []
def foo(my_t, my_ty
#? ['some_default']
def foo(my_t=some_defa
#? ['some_default']
def foo(my_t=some_defa, my_t2=some_defa
#? ['my_type']
def foo(my_t: lala=some_defa, my_t2: my_typ
#? ['my_type']
def foo(my_t: lala=some_defa, my_t2: my_typ
#? []
def foo(my_t: lala=some_defa, my_t
#? []
lambda my_t
#? []
lambda my_, my_t
#? ['some_default']
lambda x=some_defa
#? ['some_default']
lambda y, x=some_defa
# Just make sure we're not in some weird parsing recovery after opening brackets
def
# -----------------
# continuations
# -----------------
foo = \
1
#? int()
foo
# -----------------
# module attributes
# -----------------
# Don't move this to imports.py, because there's a star import.
#? str()
__file__
#? ['__file__']
__file__
#? str()
math.__file__
# Should not lead to errors
#?
math()
# -----------------
# with statements
# -----------------
with open('') as f:
#? ['closed']
f.closed
for line in f:
#? str() bytes()
line
with open('') as f1, open('') as f2:
#? ['closed']
f1.closed
#? ['closed']
f2.closed
| MyException |
python | keon__algorithms | tests/test_maths.py | {
"start": 8172,
"end": 8474
} | class ____(unittest.TestCase):
"""[summary]
Test for the file pythagoras.py
Arguments:
unittest {[type]} -- [description]
"""
def test_pythagoras(self):
self.assertEqual("Hypotenuse = 3.605551275463989",
pythagoras(3, 2, "?"))
| TestPythagoras |
python | pyinstaller__pyinstaller | tests/unit/test_modulegraph/test_imports.py | {
"start": 13895,
"end": 14682
} | class ____ (unittest.TestCase):
if not hasattr(unittest.TestCase, 'assertIsInstance'):
def assertIsInstance(self, value, types):
if not isinstance(value, types):
self.fail("%r is not an instance of %r"%(value, types))
def setUp(self):
root = os.path.join(
os.path.dirname(os.path.abspath(__file__)),
'testpkg-regr2')
self.mf = modulegraph.ModuleGraph(path=[ root ] + sys.path)
self.mf.add_script(os.path.join(root, 'main_script.py'))
def testRegr1(self):
node = self.mf.find_node('pkg.base')
self.assertIsInstance(node, modulegraph.SourceModule)
node = self.mf.find_node('pkg.pkg')
self.assertIsInstance(node, modulegraph.SourceModule)
| TestRegressions2 |
python | numpy__numpy | numpy/random/tests/test_direct.py | {
"start": 16176,
"end": 17634
} | class ____(Base):
@classmethod
def setup_class(cls):
cls.bit_generator = MT19937
cls.bits = 32
cls.dtype = np.uint32
cls.data1 = cls._read_csv(join(pwd, './data/mt19937-testset-1.csv'))
cls.data2 = cls._read_csv(join(pwd, './data/mt19937-testset-2.csv'))
cls.seed_error_type = ValueError
cls.invalid_init_types = []
cls.invalid_init_values = [(-1,)]
def test_seed_float_array(self):
assert_raises(TypeError, self.bit_generator, np.array([np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([-np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([np.pi, -np.pi]))
assert_raises(TypeError, self.bit_generator, np.array([0, np.pi]))
assert_raises(TypeError, self.bit_generator, [np.pi])
assert_raises(TypeError, self.bit_generator, [0, np.pi])
def test_state_tuple(self):
rs = Generator(self.bit_generator(*self.data1['seed']))
bit_generator = rs.bit_generator
state = bit_generator.state
desired = rs.integers(2 ** 16)
tup = (state['bit_generator'], state['state']['key'],
state['state']['pos'])
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
tup = tup + (0, 0.0)
bit_generator.state = tup
actual = rs.integers(2 ** 16)
assert_equal(actual, desired)
| TestMT19937 |
python | getsentry__sentry | tests/sentry/api/helpers/test_group_index.py | {
"start": 1933,
"end": 3911
} | class ____(TestCase):
def run_test(self, query: str) -> None:
validate_search_filter_permissions(self.organization, parse_search_query(query), self.user)
def assert_analytics_recorded(self, mock_record: Mock) -> None:
assert_last_analytics_event(
mock_record,
AdvancedSearchFeatureGateEvent(
user_id=self.user.id,
default_user_id=self.user.id,
organization_id=self.organization.id,
),
)
@patch("sentry.analytics.record")
def test_negative(self, mock_record: Mock) -> None:
query = "!has:user"
with (
self.feature({"organizations:advanced-search": False}),
pytest.raises(ValidationError, match=".*negative search.*"),
):
self.run_test(query)
self.run_test(query)
self.assert_analytics_recorded(mock_record)
query = "!something:123"
with (
self.feature({"organizations:advanced-search": False}),
pytest.raises(ValidationError, match=".*negative search.*"),
):
self.run_test(query)
self.run_test(query)
self.assert_analytics_recorded(mock_record)
@patch("sentry.analytics.record")
def test_wildcard(self, mock_record: Mock) -> None:
query = "abc:hello*"
with (
self.feature({"organizations:advanced-search": False}),
pytest.raises(ValidationError, match=".*wildcard search.*"),
):
self.run_test(query)
self.run_test(query)
self.assert_analytics_recorded(mock_record)
query = "raw * search"
with (
self.feature({"organizations:advanced-search": False}),
pytest.raises(ValidationError, match=".*wildcard search.*"),
):
self.run_test(query)
self.run_test(query)
self.assert_analytics_recorded(mock_record)
| ValidateSearchFilterPermissionsTest |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-google-ads/source_google_ads/components.py | {
"start": 13333,
"end": 15652
} | class ____(RecordTransformation):
"""
Transforms keys in a Google Ads record to snake_case.
The difference with KeysToSnakeCaseTransformation is that this transformation doesn't add underscore before digits.
"""
token_pattern: re.Pattern[str] = re.compile(
r"""
\d*[A-Z]+[a-z]*\d* # uppercase word (with optional leading/trailing digits)
| \d*[a-z]+\d* # lowercase word (with optional leading/trailing digits)
| (?P<NoToken>[^a-zA-Z\d]+) # any non-alphanumeric separators
""",
re.VERBOSE,
)
def transform(
self,
record: Dict[str, Any],
config: Optional[Config] = None,
stream_state: Optional[StreamState] = None,
stream_slice: Optional[StreamSlice] = None,
) -> None:
transformed_record = self._transform_record(record)
record.clear()
record.update(transformed_record)
def _transform_record(self, record: Dict[str, Any]) -> Dict[str, Any]:
transformed_record = {}
for key, value in record.items():
transformed_key = self.process_key(key)
transformed_value = value
if isinstance(value, dict):
transformed_value = self._transform_record(value)
transformed_record[transformed_key] = transformed_value
return transformed_record
def process_key(self, key: str) -> str:
key = self.normalize_key(key)
tokens = self.tokenize_key(key)
tokens = self.filter_tokens(tokens)
return self.tokens_to_snake_case(tokens)
def normalize_key(self, key: str) -> str:
return str(anyascii.anyascii(key))
def tokenize_key(self, key: str) -> List[str]:
tokens = []
for match in self.token_pattern.finditer(key):
token = match.group(0) if match.group("NoToken") is None else ""
tokens.append(token)
return tokens
def filter_tokens(self, tokens: List[str]) -> List[str]:
if len(tokens) >= 3:
tokens = tokens[:1] + [t for t in tokens[1:-1] if t] + tokens[-1:]
return tokens
def tokens_to_snake_case(self, tokens: List[str]) -> str:
return "_".join(token.lower() for token in tokens)
@dataclass
| KeysToSnakeCaseGoogleAdsTransformation |
python | apache__airflow | task-sdk/tests/task_sdk/api/test_client.py | {
"start": 38878,
"end": 40723
} | class ____:
@pytest.mark.parametrize(
"request_params",
[
({"name": "this_asset", "uri": "s3://bucket/key"}),
({"alias_name": "this_asset_alias"}),
],
)
def test_by_name_get_success(self, request_params):
def handle_request(request: httpx.Request) -> httpx.Response:
params = request.url.params
if request.url.path == "/asset-events/by-asset":
assert params.get("name") == request_params.get("name")
assert params.get("uri") == request_params.get("uri")
elif request.url.path == "/asset-events/by-asset-alias":
assert params.get("name") == request_params.get("alias_name")
else:
return httpx.Response(status_code=400, json={"detail": "Bad Request"})
return httpx.Response(
status_code=200,
json={
"asset_events": [
{
"id": 1,
"asset": {
"name": "this_asset",
"uri": "s3://bucket/key",
"group": "asset",
},
"created_dagruns": [],
"timestamp": "2023-01-01T00:00:00Z",
}
]
},
)
client = make_client(transport=httpx.MockTransport(handle_request))
result = client.asset_events.get(**request_params)
assert isinstance(result, AssetEventsResponse)
assert len(result.asset_events) == 1
assert result.asset_events[0].asset.name == "this_asset"
assert result.asset_events[0].asset.uri == "s3://bucket/key"
| TestAssetEventOperations |
python | numpy__numpy | numpy/random/tests/test_randomstate.py | {
"start": 81444,
"end": 87749
} | class ____:
def _create_arrays(self):
return np.array([2]), np.array([3]), np.array([4]), (1,)
def test_one_arg_funcs(self):
argOne, _, _, tgtShape = self._create_arrays()
funcs = (random.exponential, random.standard_gamma,
random.chisquare, random.standard_t,
random.pareto, random.weibull,
random.power, random.rayleigh,
random.poisson, random.zipf,
random.geometric, random.logseries)
probfuncs = (random.geometric, random.logseries)
for func in funcs:
if func in probfuncs: # p < 1.0
out = func(np.array([0.5]))
else:
out = func(argOne)
assert_equal(out.shape, tgtShape)
def test_two_arg_funcs(self):
argOne, argTwo, _, tgtShape = self._create_arrays()
funcs = (random.uniform, random.normal,
random.beta, random.gamma,
random.f, random.noncentral_chisquare,
random.vonmises, random.laplace,
random.gumbel, random.logistic,
random.lognormal, random.wald,
random.binomial, random.negative_binomial)
probfuncs = (random.binomial, random.negative_binomial)
for func in funcs:
if func in probfuncs: # p <= 1
argTwo = np.array([0.5])
else:
argTwo = argTwo
out = func(argOne, argTwo)
assert_equal(out.shape, tgtShape)
out = func(argOne[0], argTwo)
assert_equal(out.shape, tgtShape)
out = func(argOne, argTwo[0])
assert_equal(out.shape, tgtShape)
def test_three_arg_funcs(self):
argOne, argTwo, argThree, tgtShape = self._create_arrays()
funcs = [random.noncentral_f, random.triangular,
random.hypergeometric]
for func in funcs:
out = func(argOne, argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(argOne[0], argTwo, argThree)
assert_equal(out.shape, tgtShape)
out = func(argOne, argTwo[0], argThree)
assert_equal(out.shape, tgtShape)
# Ensure returned array dtype is correct for platform
def test_integer_dtype(int_func):
random.seed(123456789)
fname, args, sha256 = int_func
f = getattr(random, fname)
actual = f(*args, size=2)
assert_(actual.dtype == np.dtype('l'))
def test_integer_repeat(int_func):
rng = random.RandomState(123456789)
fname, args, sha256 = int_func
f = getattr(rng, fname)
val = f(*args, size=1000000)
if sys.byteorder != 'little':
val = val.byteswap()
res = hashlib.sha256(val.view(np.int8)).hexdigest()
assert_(res == sha256)
def test_broadcast_size_error():
# GH-16833
with pytest.raises(ValueError):
random.binomial(1, [0.3, 0.7], size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], 0.3, size=(2, 1))
with pytest.raises(ValueError):
random.binomial([1, 2], [0.3, 0.7], size=(2, 1))
def test_randomstate_ctor_old_style_pickle():
rs = np.random.RandomState(MT19937(0))
rs.standard_normal(1)
# Directly call reduce which is used in pickling
ctor, args, state_a = rs.__reduce__()
# Simulate unpickling an old pickle that only has the name
assert args[0].__class__.__name__ == "MT19937"
b = ctor(*("MT19937",))
b.set_state(state_a)
state_b = b.get_state(legacy=False)
assert_equal(state_a['bit_generator'], state_b['bit_generator'])
assert_array_equal(state_a['state']['key'], state_b['state']['key'])
assert_array_equal(state_a['state']['pos'], state_b['state']['pos'])
assert_equal(state_a['has_gauss'], state_b['has_gauss'])
assert_equal(state_a['gauss'], state_b['gauss'])
@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state")
def test_hot_swap(restore_singleton_bitgen):
# GH 21808
def_bg = np.random.default_rng(0)
bg = def_bg.bit_generator
np.random.set_bit_generator(bg)
assert isinstance(np.random.mtrand._rand._bit_generator, type(bg))
second_bg = np.random.get_bit_generator()
assert bg is second_bg
@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state")
def test_seed_alt_bit_gen(restore_singleton_bitgen):
# GH 21808
bg = PCG64(0)
np.random.set_bit_generator(bg)
state = np.random.get_state(legacy=False)
np.random.seed(1)
new_state = np.random.get_state(legacy=False)
print(state)
print(new_state)
assert state["bit_generator"] == "PCG64"
assert state["state"]["state"] != new_state["state"]["state"]
assert state["state"]["inc"] != new_state["state"]["inc"]
@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state")
def test_state_error_alt_bit_gen(restore_singleton_bitgen):
# GH 21808
state = np.random.get_state()
bg = PCG64(0)
np.random.set_bit_generator(bg)
with pytest.raises(ValueError, match="state must be for a PCG64"):
np.random.set_state(state)
@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state")
def test_swap_worked(restore_singleton_bitgen):
# GH 21808
np.random.seed(98765)
vals = np.random.randint(0, 2 ** 30, 10)
bg = PCG64(0)
state = bg.state
np.random.set_bit_generator(bg)
state_direct = np.random.get_state(legacy=False)
for field in state:
assert state[field] == state_direct[field]
np.random.seed(98765)
pcg_vals = np.random.randint(0, 2 ** 30, 10)
assert not np.all(vals == pcg_vals)
new_state = bg.state
assert new_state["state"]["state"] != state["state"]["state"]
assert new_state["state"]["inc"] == new_state["state"]["inc"]
@pytest.mark.thread_unsafe(reason="np.random.set_bit_generator affects global state")
def test_swapped_singleton_against_direct(restore_singleton_bitgen):
np.random.set_bit_generator(PCG64(98765))
singleton_vals = np.random.randint(0, 2 ** 30, 10)
rg = np.random.RandomState(PCG64(98765))
non_singleton_vals = rg.randint(0, 2 ** 30, 10)
assert_equal(non_singleton_vals, singleton_vals)
| TestSingleEltArrayInput |
python | PrefectHQ__prefect | tests/blocks/test_core.py | {
"start": 87898,
"end": 93309
} | class ____:
def test_block_type_slug_is_included_in_dict(self):
assert "block_type_slug" in AChildBlock().model_dump()
def test_block_type_slug_respects_exclude(self):
assert "block_type_slug" not in AChildBlock().model_dump(
exclude={"block_type_slug"}
)
def test_block_type_slug_respects_include(self):
assert "block_type_slug" not in AChildBlock().model_dump(include={"a"})
async def test_block_type_slug_excluded_from_document(self, prefect_client):
await AChildBlock.register_type_and_schema(client=prefect_client)
document = AChildBlock()._to_block_document(name="foo")
assert "block_type_slug" not in document.data
def test_base_parse_works_for_base_instance(self):
block = BaseBlock.model_validate(BaseBlock().model_dump())
assert type(block) is BaseBlock
block = BaseBlock.model_validate(BaseBlock().model_dump())
assert type(block) is BaseBlock
def test_base_parse_creates_child_instance_from_dict(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
assert type(block) is AChildBlock
assert block.a == 1
block = BaseBlock.model_validate(BChildBlock().model_dump())
assert type(block) is BChildBlock
assert block.b == 2
def test_base_parse_creates_child_instance_from_json(self):
block = BaseBlock.model_validate_json(AChildBlock().model_dump_json())
assert type(block) is AChildBlock
block = BaseBlock.model_validate_json(BChildBlock().model_dump_json())
assert type(block) is BChildBlock
def test_base_parse_retains_default_attributes(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
assert block.base == 0
assert block.a == 1
def test_base_parse_retains_set_child_attributes(self):
block = BaseBlock.model_validate(BChildBlock(b=3).model_dump())
assert block.base == 0
assert block.b == 3
def test_base_parse_retains_set_base_attributes(self):
block = BaseBlock.model_validate(BChildBlock(base=1).model_dump())
assert block.base == 1
assert block.b == 2
def test_base_field_creates_child_instance_from_object(self):
model = ParentModel(block=AChildBlock())
assert type(model.block) is AChildBlock
model = ParentModel(block=BChildBlock())
assert type(model.block) is BChildBlock
def test_base_field_creates_child_instance_from_dict(self):
model = ParentModel(block=AChildBlock().model_dump())
assert type(model.block) is AChildBlock
assert model.block.a == 1
model = ParentModel(block=BChildBlock().model_dump())
assert type(model.block) is BChildBlock
assert model.block.b == 2
def test_created_block_has_pydantic_attributes(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
assert block.model_fields_set
def test_created_block_can_be_copied(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
block_copy = block.model_copy()
assert block == block_copy
async def test_created_block_can_be_saved(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
assert await block.save("test")
async def test_created_block_can_be_saved_then_loaded(self):
block = BaseBlock.model_validate(AChildBlock().model_dump())
await block.save("test")
new_block = await block.load("test")
assert_blocks_equal(block, new_block)
assert new_block.model_fields_set
def test_created_block_fields_set(self):
expected = {"base", "block_type_slug", "a"}
block = BaseBlock.model_validate(AChildBlock().model_dump())
assert block.model_fields_set == expected
assert block.a == 1
block = BaseBlock.model_validate(AChildBlock(a=2).model_dump())
assert block.model_fields_set == expected
assert block.a == 2
block = block.model_copy()
assert block.model_fields_set == expected
assert block.a == 2
def test_base_field_creates_child_instance_with_union(self):
class UnionParentModel(BaseModel):
block: Union[AChildBlock, BChildBlock]
model = UnionParentModel(block=AChildBlock(a=3).model_dump())
assert type(model.block) is AChildBlock
# Assignment with a copy works still
model.block = model.block.model_copy()
assert type(model.block) is AChildBlock
assert model.block
assert model.block.a == 3
model = UnionParentModel(block=BChildBlock(b=4).model_dump())
assert type(model.block) is BChildBlock
assert model.block.b == 4
def test_base_field_creates_child_instance_with_assignment_validation(self):
class AssignmentParentModel(BaseModel, validate_assignment=True):
block: BaseBlock
model = AssignmentParentModel(block=AChildBlock(a=3).model_dump())
assert type(model.block) is AChildBlock
assert model.block.a == 3
model.block = model.block.model_copy()
assert type(model.block) is AChildBlock
assert model.block.a == 3
model.block = BChildBlock(b=4).model_dump()
assert type(model.block) is BChildBlock
assert model.block.b == 4
| TestTypeDispatch |
python | pydata__xarray | asv_bench/benchmarks/dataarray_missing.py | {
"start": 978,
"end": 1872
} | class ____:
def setup(self, shape, chunks, limit):
if chunks is not None:
requires_dask()
self.da = make_bench_data(shape, 0.1, chunks)
@parameterized(
["shape", "chunks", "limit"],
(
[(365, 75, 75)],
[None, {"x": 25, "y": 25}],
[None, 3],
),
)
def time_ffill(self, shape, chunks, limit):
actual = self.da.ffill(dim="time", limit=limit)
if chunks is not None:
actual = actual.compute()
@parameterized(
["shape", "chunks", "limit"],
(
[(365, 75, 75)],
[None, {"x": 25, "y": 25}],
[None, 3],
),
)
def time_bfill(self, shape, chunks, limit):
actual = self.da.bfill(dim="time", limit=limit)
if chunks is not None:
actual = actual.compute()
| DataArrayMissingBottleneck |
python | numba__llvmlite | llvmlite/binding/orcjit.py | {
"start": 395,
"end": 6749
} | class ____:
"""
Create a library for linking by OrcJIT
OrcJIT operates like a linker: a number of compilation units and
dependencies are collected together and linked into a single dynamic library
that can export functions to other libraries or to be consumed directly as
entry points into JITted code. The native OrcJIT has a lot of memory
management complications so this API is designed to work well with Python's
garbage collection.
The creation of a new library is a bit like a linker command line where
compilation units, mostly as LLVM IR, and previously constructed libraries
are linked together, then loaded into memory, and the addresses of exported
symbols are extracted. Any static initializers are run and the exported
addresses and a resource tracker is produced. As long as the resource
tracker is referenced somewhere in Python, the exported addresses will be
valid. Once the resource tracker is garbage collected, the static
destructors will run and library will be unloaded from memory.
"""
def __init__(self):
self.__entries = []
self.__exports = set()
self.__imports = {}
def add_ir(self, llvmir):
"""
Adds a compilation unit to the library using LLVM IR as the input
format.
This takes a string or an object that can be converted to a string,
including IRBuilder, that contains LLVM IR.
"""
self.__entries.append((0, str(llvmir).encode('utf-8')))
return self
def add_native_assembly(self, asm):
"""
Adds a compilation unit to the library using native assembly as the
input format.
This takes a string or an object that can be converted to a string that
contains native assembly, which will be
parsed by LLVM.
"""
self.__entries.append((1, str(asm).encode('utf-8')))
return self
def add_object_img(self, data):
"""
Adds a compilation unit to the library using pre-compiled object code.
This takes the bytes of the contents of an object artifact which will be
loaded by LLVM.
"""
self.__entries.append((2, bytes(data)))
return self
def add_object_file(self, file_path):
"""
Adds a compilation unit to the library using pre-compiled object file.
This takes a string or path-like object that references an object file
which will be loaded by LLVM.
"""
with open(file_path, "rb") as f:
self.__entries.append((2, f.read()))
return self
def add_jit_library(self, name):
"""
Adds an existing JIT library as prerequisite.
The name of the library must match the one provided in a previous link
command.
"""
self.__entries.append((3, str(name).encode('utf-8')))
return self
def add_current_process(self):
"""
Allows the JITted library to access symbols in the current binary.
That is, it allows exporting the current binary's symbols, including
loaded libraries, as imports to the JITted
library.
"""
self.__entries.append((3, b''))
return self
def import_symbol(self, name, address):
"""
Register the *address* of global symbol *name*. This will make
it usable (e.g. callable) from LLVM-compiled functions.
"""
self.__imports[str(name)] = c_uint64(address)
return self
def export_symbol(self, name):
"""
During linking, extract the address of a symbol that was defined in one
of the compilation units.
This allows getting symbols, functions or global variables, out of the
JIT linked library. The addresses will be
available when the link method is called.
"""
self.__exports.add(str(name))
return self
def link(self, lljit, library_name):
"""
Link all the current compilation units into a JITted library and extract
the address of exported symbols.
An instance of the OrcJIT instance must be provided and this will be the
scope that is used to find other JITted libraries that are dependencies
and also be the place where this library will be defined.
After linking, the method will return a resource tracker that keeps the
library alive. This tracker also knows the addresses of any exported
symbols that were requested.
The addresses will be valid as long as the resource tracker is
referenced.
When the resource tracker is destroyed, the library will be cleaned up,
however, the name of the library cannot be reused.
"""
assert not lljit.closed, "Cannot add to closed JIT"
encoded_library_name = str(library_name).encode('utf-8')
assert len(encoded_library_name) > 0, "Library cannot be empty"
elements = (_LinkElement * len(self.__entries))()
for idx, (kind, value) in enumerate(self.__entries):
elements[idx].element_kind = c_uint8(kind)
elements[idx].value = c_char_p(value)
elements[idx].value_len = c_size_t(len(value))
exports = (_SymbolAddress * len(self.__exports))()
for idx, name in enumerate(self.__exports):
exports[idx].name = name.encode('utf-8')
imports = (_SymbolAddress * len(self.__imports))()
for idx, (name, addr) in enumerate(self.__imports.items()):
imports[idx].name = name.encode('utf-8')
imports[idx].address = addr
with ffi.OutputString() as outerr:
tracker = lljit._capi.LLVMPY_LLJIT_Link(
lljit._ptr,
encoded_library_name,
elements,
len(self.__entries),
imports,
len(self.__imports),
exports,
len(self.__exports),
outerr)
if not tracker:
raise RuntimeError(str(outerr))
return ResourceTracker(tracker,
library_name,
{name: exports[idx].address
for idx, name in enumerate(self.__exports)})
| JITLibraryBuilder |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dlp.py | {
"start": 5443,
"end": 6321
} | class ____:
@mock.patch("airflow.providers.google.cloud.operators.dlp.CloudDLPHook")
def test_create_inspect_template(self, mock_hook):
mock_hook.return_value.create_inspect_template.return_value = InspectTemplate(name=DLP_JOB_PATH)
operator = CloudDLPCreateInspectTemplateOperator(organization_id=ORGANIZATION_ID, task_id="id")
operator.execute(context=mock.MagicMock())
mock_hook.assert_called_once_with(
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=None,
)
mock_hook.return_value.create_inspect_template.assert_called_once_with(
organization_id=ORGANIZATION_ID,
project_id=None,
inspect_template=None,
template_id=None,
retry=DEFAULT,
timeout=None,
metadata=(),
)
| TestCloudDLPCreateInspectTemplateOperator |
python | tensorflow__tensorflow | tensorflow/python/ops/control_flow_ops_test.py | {
"start": 65305,
"end": 65648
} | class ____(test_util.TensorFlowTestCase):
def testAsyncNoop(self):
@def_function.function
def f():
x = constant_op.constant(2)
with ops.control_dependencies([while_v2.async_noop()]):
y = x + 2
return y
self.assertEqual(self.evaluate(f()), 4)
if __name__ == "__main__":
googletest.main()
| AsyncNoopTest |
python | sqlalchemy__sqlalchemy | test/sql/test_metadata.py | {
"start": 30742,
"end": 58903
} | class ____(fixtures.TestBase, AssertsCompiledSQL, ComparesTables):
@testing.fixture
def copy_fixture(self, metadata):
from sqlalchemy.testing.schema import Table
table = Table(
"mytable",
metadata,
Column("myid", Integer, Sequence("foo_id_seq"), primary_key=True),
Column("name", String(40), nullable=True),
Column("status", Boolean(create_constraint=True)),
Column(
"entry",
Enum(
"one",
"two",
"three",
name="entry_enum",
create_constraint=True,
),
),
Column(
"foo",
String(40),
nullable=False,
server_default="x",
server_onupdate="q",
),
Column(
"bar", String(40), nullable=False, default="y", onupdate="z"
),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
test_needs_fk=True,
)
table2 = Table(
"othertable",
metadata,
Column("id", Integer, Sequence("foo_seq"), primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
test_needs_fk=True,
)
table3 = Table(
"has_comments",
metadata,
Column("foo", Integer, comment="some column"),
comment="table comment",
)
metadata.create_all(testing.db)
return table, table2, table3
@testing.fixture(
params=[
"to_metadata",
"pickle",
"pickle_via_reflect",
]
)
def copy_tables_fixture(self, request, metadata, copy_fixture, picklers):
table, table2, table3 = copy_fixture
test = request.param
if test == "to_metadata":
meta2 = MetaData()
table_c = table.to_metadata(meta2)
table2_c = table2.to_metadata(meta2)
table3_c = table3.to_metadata(meta2)
return (table_c, table2_c, table3_c, (True, False))
elif test == "pickle":
meta2 = picklers.loads(picklers.dumps(metadata))
picklers.loads(picklers.dumps(meta2))
return (
meta2.tables["mytable"],
meta2.tables["othertable"],
meta2.tables["has_comments"],
(True, False),
)
elif test == "pickle_via_reflect":
# this is the most common use case, pickling the results of a
# database reflection
meta2 = MetaData()
t1 = Table("mytable", meta2, autoload_with=testing.db)
Table("othertable", meta2, autoload_with=testing.db)
Table("has_comments", meta2, autoload_with=testing.db)
meta3 = picklers.loads(picklers.dumps(meta2))
assert meta3.tables["mytable"] is not t1
return (
meta3.tables["mytable"],
meta3.tables["othertable"],
meta3.tables["has_comments"],
(False, True),
)
assert False
@testing.requires.check_constraints
def test_copy(self, metadata, copy_fixture, copy_tables_fixture):
table, table2, table3 = copy_fixture
table_c, table2_c, table3_c, (has_constraints, reflect) = (
copy_tables_fixture
)
self.assert_tables_equal(table, table_c)
self.assert_tables_equal(table2, table2_c)
assert table is not table_c
assert table.primary_key is not table_c.primary_key
assert list(table2_c.c.myid.foreign_keys)[0].column is table_c.c.myid
assert list(table2_c.c.myid.foreign_keys)[0].column is not table.c.myid
assert "x" in str(table_c.c.foo.server_default.arg)
if not reflect:
assert isinstance(table_c.c.myid.default, Sequence)
assert str(table_c.c.foo.server_onupdate.arg) == "q"
assert str(table_c.c.bar.default.arg) == "y"
assert (
getattr(
table_c.c.bar.onupdate.arg,
"arg",
table_c.c.bar.onupdate.arg,
)
== "z"
)
assert isinstance(table2_c.c.id.default, Sequence)
if testing.requires.unique_constraint_reflection.enabled:
for c in table_c.constraints:
if isinstance(c, UniqueConstraint):
break
else:
for c in table_c.indexes:
break
else:
assert False
assert c.columns.contains_column(table_c.c.name)
assert not c.columns.contains_column(table.c.name)
# CHECK constraints don't get reflected for any dialect right
# now
if has_constraints:
for c in table_c.c.description.constraints:
if isinstance(c, CheckConstraint):
break
else:
assert False
assert str(c.sqltext) == "description='hi'"
if testing.requires.comment_reflection.enabled:
eq_(table3_c.comment, "table comment")
eq_(table3_c.c.foo.comment, "some column")
def test_col_key_fk_parent(self):
# test #2643
m1 = MetaData()
a = Table("a", m1, Column("x", Integer))
b = Table("b", m1, Column("x", Integer, ForeignKey("a.x"), key="y"))
assert b.c.y.references(a.c.x)
m2 = MetaData()
b2 = b.to_metadata(m2)
a2 = a.to_metadata(m2)
assert b2.c.y.references(a2.c.x)
def test_fk_w_no_colname(self):
"""test a ForeignKey that refers to table name only. the column
name is assumed to be the same col name on parent table.
this is a little used feature from long ago that nonetheless is
still in the code.
The feature was found to be not working but is repaired for
SQLAlchemy 2.0.
"""
m1 = MetaData()
a = Table("a", m1, Column("x", Integer))
b = Table("b", m1, Column("x", Integer, ForeignKey("a")))
assert b.c.x.references(a.c.x)
m2 = MetaData()
b2 = b.to_metadata(m2)
a2 = a.to_metadata(m2)
assert b2.c.x.references(a2.c.x)
def test_fk_w_no_colname_name_missing(self):
"""test a ForeignKey that refers to table name only. the column
name is assumed to be the same col name on parent table.
this is a little used feature from long ago that nonetheless is
still in the code.
"""
m1 = MetaData()
a = Table("a", m1, Column("x", Integer))
b = Table("b", m1, Column("y", Integer, ForeignKey("a")))
with expect_raises_message(
exc.NoReferencedColumnError,
"Could not initialize target column for ForeignKey 'a' on "
"table 'b': table 'a' has no column named 'y'",
):
assert b.c.y.references(a.c.x)
def test_column_collection_constraint_w_ad_hoc_columns(self):
"""Test ColumnCollectionConstraint that has columns that aren't
part of the Table.
"""
meta = MetaData()
uq1 = UniqueConstraint(literal_column("some_name"))
cc1 = CheckConstraint(literal_column("some_name") > 5)
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
uq1,
cc1,
)
self.assert_compile(
schema.AddConstraint(uq1),
"ALTER TABLE mytable ADD UNIQUE (some_name)",
dialect="default",
)
self.assert_compile(
schema.AddConstraint(cc1),
"ALTER TABLE mytable ADD CHECK (some_name > 5)",
dialect="default",
)
meta2 = MetaData()
table2 = table.to_metadata(meta2)
uq2 = [
c for c in table2.constraints if isinstance(c, UniqueConstraint)
][0]
cc2 = [
c for c in table2.constraints if isinstance(c, CheckConstraint)
][0]
self.assert_compile(
schema.AddConstraint(uq2),
"ALTER TABLE mytable ADD UNIQUE (some_name)",
dialect="default",
)
self.assert_compile(
schema.AddConstraint(cc2),
"ALTER TABLE mytable ADD CHECK (some_name > 5)",
dialect="default",
)
def test_change_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
)
meta2 = MetaData()
table_c = table.to_metadata(meta2, schema="someschema")
table2_c = table2.to_metadata(meta2, schema="someschema")
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"someschema.mytable.myid = someschema.othertable.myid",
)
def test_retain_table_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("myschema.mytable.myid")),
schema="myschema",
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
table2_c = table2.to_metadata(meta2)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"myschema.mytable.myid = myschema.othertable.myid",
)
def test_change_name_retain_metadata(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = table.to_metadata(table.metadata, name="newtable")
table3 = table.to_metadata(
table.metadata, schema="newschema", name="newtable"
)
assert table.metadata is table2.metadata
assert table.metadata is table3.metadata
eq_(
(table.name, table2.name, table3.name),
("mytable", "newtable", "newtable"),
)
eq_(
(table.key, table2.key, table3.key),
("myschema.mytable", "myschema.newtable", "newschema.newtable"),
)
def test_change_name_change_metadata(self):
meta = MetaData()
meta2 = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
schema="myschema",
)
table2 = table.to_metadata(meta2, name="newtable")
assert table.metadata is not table2.metadata
eq_((table.name, table2.name), ("mytable", "newtable"))
eq_((table.key, table2.key), ("myschema.mytable", "myschema.newtable"))
def test_change_name_selfref_fk_moves(self):
meta = MetaData()
referenced = Table(
"ref", meta, Column("id", Integer, primary_key=True)
)
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("mytable.id")),
Column("ref_id", ForeignKey("ref.id")),
)
table2 = table.to_metadata(table.metadata, name="newtable")
assert table.metadata is table2.metadata
assert table2.c.ref_id.references(referenced.c.id)
assert table2.c.parent_id.references(table2.c.id)
def test_change_name_selfref_fk_moves_w_schema(self):
meta = MetaData()
referenced = Table(
"ref", meta, Column("id", Integer, primary_key=True)
)
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("parent_id", ForeignKey("mytable.id")),
Column("ref_id", ForeignKey("ref.id")),
)
table2 = table.to_metadata(
table.metadata, name="newtable", schema="newschema"
)
ref2 = referenced.to_metadata(table.metadata, schema="newschema")
assert table.metadata is table2.metadata
assert table2.c.ref_id.references(ref2.c.id)
assert table2.c.parent_id.references(table2.c.id)
def _assert_fk(self, t2, schema, expected, referred_schema_fn=None):
m2 = MetaData()
existing_schema = t2.schema
if schema:
t2c = t2.to_metadata(
m2, schema=schema, referred_schema_fn=referred_schema_fn
)
eq_(t2c.schema, schema)
else:
t2c = t2.to_metadata(m2, referred_schema_fn=referred_schema_fn)
eq_(t2c.schema, existing_schema)
eq_(list(t2c.c.y.foreign_keys)[0]._get_colspec(), expected)
def test_fk_has_schema_string_retain_schema(self):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("q.t1.x")))
self._assert_fk(t2, None, "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, None, "q.t1.x")
def test_fk_has_schema_string_new_schema(self):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("q.t1.x")))
self._assert_fk(t2, "z", "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_has_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table("t2", m, Column("y", Integer, ForeignKey(t1.c.x)))
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_has_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table("t2", m, Column("y", Integer, ForeignKey(t1.c.x)))
self._assert_fk(t2, "z", "q.t1.x")
def test_fk_and_referent_has_same_schema_string_retain_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("q.t1.x")), schema="q"
)
self._assert_fk(t2, None, "q.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, None, "q.t1.x")
def test_fk_and_referent_has_same_schema_string_new_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("q.t1.x")), schema="q"
)
self._assert_fk(t2, "z", "z.t1.x")
Table("t1", m, Column("x", Integer), schema="q")
self._assert_fk(t2, "z", "z.t1.x")
def test_fk_and_referent_has_same_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, None, "q.t1.x")
def test_fk_and_referent_has_same_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="q")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, "z", "z.t1.x")
def test_fk_and_referent_has_diff_schema_string_retain_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
self._assert_fk(t2, None, "p.t1.x")
Table("t1", m, Column("x", Integer), schema="p")
self._assert_fk(t2, None, "p.t1.x")
def test_fk_and_referent_has_diff_schema_string_new_schema(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
self._assert_fk(t2, "z", "p.t1.x")
Table("t1", m, Column("x", Integer), schema="p")
self._assert_fk(t2, "z", "p.t1.x")
def test_fk_and_referent_has_diff_schema_col_retain_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="p")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, None, "p.t1.x")
def test_fk_and_referent_has_diff_schema_col_new_schema(self):
m = MetaData()
t1 = Table("t1", m, Column("x", Integer), schema="p")
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey(t1.c.x)), schema="q"
)
self._assert_fk(t2, "z", "p.t1.x")
def test_fk_custom_system(self):
m = MetaData()
t2 = Table(
"t2", m, Column("y", Integer, ForeignKey("p.t1.x")), schema="q"
)
def ref_fn(table, to_schema, constraint, referred_schema):
assert table is t2
eq_(to_schema, "z")
eq_(referred_schema, "p")
return "h"
self._assert_fk(t2, "z", "h.t1.x", referred_schema_fn=ref_fn)
def test_fk_reset_to_none(self):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("p.t1.x")))
def ref_fn(table, to_schema, constraint, referred_schema):
return BLANK_SCHEMA
self._assert_fk(t2, None, "t1.x", referred_schema_fn=ref_fn)
@testing.combinations(None, RETAIN_SCHEMA)
def test_fk_test_non_return_for_referred_schema(self, sym):
m = MetaData()
t2 = Table("t2", m, Column("y", Integer, ForeignKey("p.t1.x")))
def ref_fn(table, to_schema, constraint, referred_schema):
return sym
self._assert_fk(t2, None, "p.t1.x", referred_schema_fn=ref_fn)
def test_fk_get_referent_is_always_a_column(self):
"""test the annotation on ForeignKey.get_referent() in that it does
in fact return Column even if given a labeled expr in a subquery"""
m = MetaData()
a = Table("a", m, Column("id", Integer, primary_key=True))
b = Table("b", m, Column("aid", Integer, ForeignKey("a.id")))
stmt = select(a.c.id.label("somelabel")).subquery()
referent = list(b.c.aid.foreign_keys)[0].get_referent(stmt)
is_(referent, stmt.c.somelabel)
assert isinstance(referent, Column)
def test_copy_info(self):
m = MetaData()
fk = ForeignKey("t2.id")
c = Column("c", Integer, fk)
ck = CheckConstraint("c > 5")
t = Table("t", m, c, ck)
m.info["minfo"] = True
fk.info["fkinfo"] = True
c.info["cinfo"] = True
ck.info["ckinfo"] = True
t.info["tinfo"] = True
t.primary_key.info["pkinfo"] = True
fkc = [
const
for const in t.constraints
if isinstance(const, ForeignKeyConstraint)
][0]
fkc.info["fkcinfo"] = True
m2 = MetaData()
t2 = t.to_metadata(m2)
m.info["minfo"] = False
fk.info["fkinfo"] = False
c.info["cinfo"] = False
ck.info["ckinfo"] = False
t.primary_key.info["pkinfo"] = False
fkc.info["fkcinfo"] = False
eq_(m2.info, {})
eq_(t2.info, {"tinfo": True})
eq_(t2.c.c.info, {"cinfo": True})
eq_(list(t2.c.c.foreign_keys)[0].info, {"fkinfo": True})
eq_(t2.primary_key.info, {"pkinfo": True})
fkc2 = [
const
for const in t2.constraints
if isinstance(const, ForeignKeyConstraint)
][0]
eq_(fkc2.info, {"fkcinfo": True})
ck2 = [
const
for const in t2.constraints
if isinstance(const, CheckConstraint)
][0]
eq_(ck2.info, {"ckinfo": True})
def test_dialect_kwargs(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
mysql_engine="InnoDB",
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
eq_(table.kwargs, {"mysql_engine": "InnoDB"})
eq_(table.kwargs, table_c.kwargs)
def test_indexes(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("data1", Integer, index=True),
Column("data2", Integer),
Index("text", text("data1 + 1")),
)
Index("multi", table.c.data1, table.c.data2)
Index("func", func.abs(table.c.data1))
Index("multi-func", table.c.data1, func.abs(table.c.data2))
meta2 = MetaData()
table_c = table.to_metadata(meta2)
def _get_key(i):
return (
[i.name, i.unique]
+ sorted(i.kwargs.items())
+ [str(col) for col in i.expressions]
)
eq_(
sorted([_get_key(i) for i in table.indexes]),
sorted([_get_key(i) for i in table_c.indexes]),
)
def test_indexes_with_col_redefine(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("id", Integer, primary_key=True),
Column("data1", Integer),
Column("data2", Integer),
Index("text", text("data1 + 1")),
)
Index("multi", table.c.data1, table.c.data2)
Index("func", func.abs(table.c.data1))
Index("multi-func", table.c.data1, func.abs(table.c.data2))
table = Table(
"mytable",
meta,
Column("data1", Integer),
Column("data2", Integer),
extend_existing=True,
)
meta2 = MetaData()
table_c = table.to_metadata(meta2)
def _get_key(i):
return (
[i.name, i.unique]
+ sorted(i.kwargs.items())
+ [str(col) for col in i.expressions]
)
eq_(
sorted([_get_key(i) for i in table.indexes]),
sorted([_get_key(i) for i in table_c.indexes]),
)
@emits_warning("Table '.+' already exists within the given MetaData")
def test_already_exists(self):
meta1 = MetaData()
table1 = Table(
"mytable", meta1, Column("myid", Integer, primary_key=True)
)
meta2 = MetaData()
table2 = Table(
"mytable", meta2, Column("yourid", Integer, primary_key=True)
)
table_c = table1.to_metadata(meta2)
table_d = table2.to_metadata(meta2)
# d'oh!
assert table_c is table_d
def test_default_schema_metadata(self):
meta = MetaData(schema="myschema")
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("myschema.mytable.myid")),
)
meta2 = MetaData(schema="someschema")
table_c = table.to_metadata(meta2, schema=None)
table2_c = table2.to_metadata(meta2, schema=None)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"someschema.mytable.myid = someschema.othertable.myid",
)
def test_strip_schema(self):
meta = MetaData()
table = Table(
"mytable",
meta,
Column("myid", Integer, primary_key=True),
Column("name", String(40), nullable=True),
Column(
"description", String(30), CheckConstraint("description='hi'")
),
UniqueConstraint("name"),
)
table2 = Table(
"othertable",
meta,
Column("id", Integer, primary_key=True),
Column("myid", Integer, ForeignKey("mytable.myid")),
)
meta2 = MetaData()
table_c = table.to_metadata(meta2, schema=None)
table2_c = table2.to_metadata(meta2, schema=None)
eq_(
str(table_c.join(table2_c).onclause),
str(table_c.c.myid == table2_c.c.myid),
)
eq_(
str(table_c.join(table2_c).onclause),
"mytable.myid = othertable.myid",
)
def test_unique_true_flag(self):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer, unique=True))
m2 = MetaData()
t2 = table.to_metadata(m2)
eq_(
len(
[
const
for const in t2.constraints
if isinstance(const, UniqueConstraint)
]
),
1,
)
def test_index_true_flag(self):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer, index=True))
m2 = MetaData()
t2 = table.to_metadata(m2)
eq_(len(t2.indexes), 1)
@testing.variation("type_", ["create_table_as", "create_view"])
def test_table_via_select(self, type_: testing.Variation):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer))
m2 = MetaData()
if type_.create_table_as:
target = select(table).into("ctas", metadata=meta)
elif type_.create_view:
target = CreateView(select(table), "tview", metadata=meta)
else:
type_.fail()
is_(target.table.metadata, meta)
tt2 = target.table.to_metadata(m2)
if type_.create_view:
is_true(tt2.is_view)
ttarget = tt2._creator_ddl
is_(ttarget.metadata, m2)
is_(ttarget.table, tt2)
if tt2.is_view:
is_(tt2._dropper_ddl.element, tt2)
def test_alternate_create_drop(self):
meta = MetaData()
table = Table("mytable", meta, Column("x", Integer))
table.set_creator_ddl(CreateTable(table, if_not_exists=True))
table.set_dropper_ddl(DropTable(table, if_exists=True))
m2 = MetaData()
ttarget = table.to_metadata(m2)
is_(ttarget._creator_ddl.element, ttarget)
is_(ttarget._dropper_ddl.element, ttarget)
| ToMetaDataTest |
python | scrapy__scrapy | scrapy/extensions/telnet.py | {
"start": 905,
"end": 4070
} | class ____(protocol.ServerFactory):
def __init__(self, crawler: Crawler):
if not crawler.settings.getbool("TELNETCONSOLE_ENABLED"):
raise NotConfigured
self.crawler: Crawler = crawler
self.noisy: bool = False
self.portrange: list[int] = [
int(x) for x in crawler.settings.getlist("TELNETCONSOLE_PORT")
]
self.host: str = crawler.settings["TELNETCONSOLE_HOST"]
self.username: str = crawler.settings["TELNETCONSOLE_USERNAME"]
self.password: str = crawler.settings["TELNETCONSOLE_PASSWORD"]
if not self.password:
self.password = binascii.hexlify(os.urandom(8)).decode("utf8")
logger.info("Telnet Password: %s", self.password)
self.crawler.signals.connect(self.start_listening, signals.engine_started)
self.crawler.signals.connect(self.stop_listening, signals.engine_stopped)
@classmethod
def from_crawler(cls, crawler: Crawler) -> Self:
return cls(crawler)
def start_listening(self) -> None:
self.port: Port = listen_tcp(self.portrange, self.host, self)
h = self.port.getHost()
logger.info(
"Telnet console listening on %(host)s:%(port)d",
{"host": h.host, "port": h.port},
extra={"crawler": self.crawler},
)
def stop_listening(self) -> None:
self.port.stopListening()
def protocol(self) -> telnet.TelnetTransport:
class Portal:
"""An implementation of IPortal"""
@defers
def login(self_, credentials, mind, *interfaces): # pylint: disable=no-self-argument
if not (
credentials.username == self.username.encode("utf8")
and credentials.checkPassword(self.password.encode("utf8"))
):
raise ValueError("Invalid credentials")
from twisted.conch import manhole
protocol = telnet.TelnetBootstrapProtocol(
insults.ServerProtocol, manhole.Manhole, self._get_telnet_vars()
)
return (interfaces[0], protocol, lambda: None)
return telnet.TelnetTransport(telnet.AuthenticatingTelnetProtocol, Portal())
def _get_telnet_vars(self) -> dict[str, Any]:
# Note: if you add entries here also update topics/telnetconsole.rst
assert self.crawler.engine
telnet_vars: dict[str, Any] = {
"engine": self.crawler.engine,
"spider": self.crawler.engine.spider,
"crawler": self.crawler,
"extensions": self.crawler.extensions,
"stats": self.crawler.stats,
"settings": self.crawler.settings,
"est": lambda: print_engine_status(self.crawler.engine),
"p": pprint.pprint,
"prefs": print_live_refs,
"help": "This is Scrapy telnet console. For more info see: "
"https://docs.scrapy.org/en/latest/topics/telnetconsole.html",
}
self.crawler.signals.send_catch_log(update_telnet_vars, telnet_vars=telnet_vars)
return telnet_vars
| TelnetConsole |
python | numpy__numpy | numpy/_core/memmap.py | {
"start": 406,
"end": 12651
} | class ____(ndarray):
"""Create a memory-map to an array stored in a *binary* file on disk.
Memory-mapped files are used for accessing small segments of large files
on disk, without reading the entire file into memory. NumPy's
memmap's are array-like objects. This differs from Python's ``mmap``
module, which uses file-like objects.
This subclass of ndarray has some unpleasant interactions with
some operations, because it doesn't quite fit properly as a subclass.
An alternative to using this subclass is to create the ``mmap``
object yourself, then create an ndarray with ndarray.__new__ directly,
passing the object created in its 'buffer=' parameter.
This class may at some point be turned into a factory function
which returns a view into an mmap buffer.
Flush the memmap instance to write the changes to the file. Currently there
is no API to close the underlying ``mmap``. It is tricky to ensure the
resource is actually closed, since it may be shared between different
memmap instances.
Parameters
----------
filename : str, file-like object, or pathlib.Path instance
The file name or file object to be used as the array data buffer.
dtype : data-type, optional
The data-type used to interpret the file contents.
Default is `uint8`.
mode : {'r+', 'r', 'w+', 'c'}, optional
The file is opened in this mode:
+------+-------------------------------------------------------------+
| 'r' | Open existing file for reading only. |
+------+-------------------------------------------------------------+
| 'r+' | Open existing file for reading and writing. |
+------+-------------------------------------------------------------+
| 'w+' | Create or overwrite existing file for reading and writing. |
| | If ``mode == 'w+'`` then `shape` must also be specified. |
+------+-------------------------------------------------------------+
| 'c' | Copy-on-write: assignments affect data in memory, but |
| | changes are not saved to disk. The file on disk is |
| | read-only. |
+------+-------------------------------------------------------------+
Default is 'r+'.
offset : int, optional
In the file, array data starts at this offset. Since `offset` is
measured in bytes, it should normally be a multiple of the byte-size
of `dtype`. When ``mode != 'r'``, even positive offsets beyond end of
file are valid; The file will be extended to accommodate the
additional data. By default, ``memmap`` will start at the beginning of
the file, even if ``filename`` is a file pointer ``fp`` and
``fp.tell() != 0``.
shape : int or sequence of ints, optional
The desired shape of the array. If ``mode == 'r'`` and the number
of remaining bytes after `offset` is not a multiple of the byte-size
of `dtype`, you must specify `shape`. By default, the returned array
will be 1-D with the number of elements determined by file size
and data-type.
.. versionchanged:: 2.0
The shape parameter can now be any integer sequence type, previously
types were limited to tuple and int.
order : {'C', 'F'}, optional
Specify the order of the ndarray memory layout:
:term:`row-major`, C-style or :term:`column-major`,
Fortran-style. This only has an effect if the shape is
greater than 1-D. The default order is 'C'.
Attributes
----------
filename : str or pathlib.Path instance
Path to the mapped file.
offset : int
Offset position in the file.
mode : str
File mode.
Methods
-------
flush
Flush any changes in memory to file on disk.
When you delete a memmap object, flush is called first to write
changes to disk.
See also
--------
lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
Notes
-----
The memmap object can be used anywhere an ndarray is accepted.
Given a memmap ``fp``, ``isinstance(fp, numpy.ndarray)`` returns
``True``.
Memory-mapped files cannot be larger than 2GB on 32-bit systems.
When a memmap causes a file to be created or extended beyond its
current size in the filesystem, the contents of the new part are
unspecified. On systems with POSIX filesystem semantics, the extended
part will be filled with zero bytes.
Examples
--------
>>> import numpy as np
>>> data = np.arange(12, dtype='float32')
>>> data.resize((3,4))
This example uses a temporary file so that doctest doesn't write
files to your directory. You would use a 'normal' filename.
>>> from tempfile import mkdtemp
>>> import os.path as path
>>> filename = path.join(mkdtemp(), 'newfile.dat')
Create a memmap with dtype and shape that matches our data:
>>> fp = np.memmap(filename, dtype='float32', mode='w+', shape=(3,4))
>>> fp
memmap([[0., 0., 0., 0.],
[0., 0., 0., 0.],
[0., 0., 0., 0.]], dtype=float32)
Write data to memmap array:
>>> fp[:] = data[:]
>>> fp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fp.filename == path.abspath(filename)
True
Flushes memory changes to disk in order to read them back
>>> fp.flush()
Load the memmap and verify data was stored:
>>> newfp = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> newfp
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Read-only memmap:
>>> fpr = np.memmap(filename, dtype='float32', mode='r', shape=(3,4))
>>> fpr.flags.writeable
False
Copy-on-write memmap:
>>> fpc = np.memmap(filename, dtype='float32', mode='c', shape=(3,4))
>>> fpc.flags.writeable
True
It's possible to assign to copy-on-write array, but values are only
written into the memory copy of the array, and not written to disk:
>>> fpc
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
>>> fpc[0,:] = 0
>>> fpc
memmap([[ 0., 0., 0., 0.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
File on disk is unchanged:
>>> fpr
memmap([[ 0., 1., 2., 3.],
[ 4., 5., 6., 7.],
[ 8., 9., 10., 11.]], dtype=float32)
Offset into a memmap:
>>> fpo = np.memmap(filename, dtype='float32', mode='r', offset=16)
>>> fpo
memmap([ 4., 5., 6., 7., 8., 9., 10., 11.], dtype=float32)
"""
__array_priority__ = -100.0
def __new__(subtype, filename, dtype=uint8, mode='r+', offset=0,
shape=None, order='C'):
# Import here to minimize 'import numpy' overhead
import mmap
import os.path
try:
mode = mode_equivalents[mode]
except KeyError as e:
if mode not in valid_filemodes:
all_modes = valid_filemodes + list(mode_equivalents.keys())
raise ValueError(
f"mode must be one of {all_modes!r} (got {mode!r})"
) from None
if mode == 'w+' and shape is None:
raise ValueError("shape must be given if mode == 'w+'")
if hasattr(filename, 'read'):
f_ctx = nullcontext(filename)
else:
f_ctx = open(
os.fspath(filename),
('r' if mode == 'c' else mode) + 'b'
)
with f_ctx as fid:
fid.seek(0, 2)
flen = fid.tell()
descr = dtypedescr(dtype)
_dbytes = descr.itemsize
if shape is None:
bytes = flen - offset
if bytes % _dbytes:
raise ValueError("Size of available data is not a "
"multiple of the data-type size.")
size = bytes // _dbytes
shape = (size,)
else:
if not isinstance(shape, (tuple, list)):
try:
shape = [operator.index(shape)]
except TypeError:
pass
shape = tuple(shape)
size = np.intp(1) # avoid overflows
for k in shape:
size *= k
bytes = int(offset + size * _dbytes)
if mode in ('w+', 'r+'):
# gh-27723
# if bytes == 0, we write out 1 byte to allow empty memmap.
bytes = max(bytes, 1)
if flen < bytes:
fid.seek(bytes - 1, 0)
fid.write(b'\0')
fid.flush()
if mode == 'c':
acc = mmap.ACCESS_COPY
elif mode == 'r':
acc = mmap.ACCESS_READ
else:
acc = mmap.ACCESS_WRITE
start = offset - offset % mmap.ALLOCATIONGRANULARITY
bytes -= start
# bytes == 0 is problematic as in mmap length=0 maps the full file.
# See PR gh-27723 for a more detailed explanation.
if bytes == 0 and start > 0:
bytes += mmap.ALLOCATIONGRANULARITY
start -= mmap.ALLOCATIONGRANULARITY
array_offset = offset - start
mm = mmap.mmap(fid.fileno(), bytes, access=acc, offset=start)
self = ndarray.__new__(subtype, shape, dtype=descr, buffer=mm,
offset=array_offset, order=order)
self._mmap = mm
self.offset = offset
self.mode = mode
if isinstance(filename, os.PathLike):
# special case - if we were constructed with a pathlib.path,
# then filename is a path object, not a string
self.filename = filename.resolve()
elif hasattr(fid, "name") and isinstance(fid.name, str):
# py3 returns int for TemporaryFile().name
self.filename = os.path.abspath(fid.name)
# same as memmap copies (e.g. memmap + 1)
else:
self.filename = None
return self
def __array_finalize__(self, obj):
if hasattr(obj, '_mmap') and np.may_share_memory(self, obj):
self._mmap = obj._mmap
self.filename = obj.filename
self.offset = obj.offset
self.mode = obj.mode
else:
self._mmap = None
self.filename = None
self.offset = None
self.mode = None
def flush(self):
"""
Write any changes in the array to the file on disk.
For further information, see `memmap`.
Parameters
----------
None
See Also
--------
memmap
"""
if self.base is not None and hasattr(self.base, 'flush'):
self.base.flush()
def __array_wrap__(self, arr, context=None, return_scalar=False):
arr = super().__array_wrap__(arr, context)
# Return a memmap if a memmap was given as the output of the
# ufunc. Leave the arr class unchanged if self is not a memmap
# to keep original memmap subclasses behavior
if self is arr or type(self) is not memmap:
return arr
# Return scalar instead of 0d memmap, e.g. for np.sum with
# axis=None (note that subclasses will not reach here)
if return_scalar:
return arr[()]
# Return ndarray otherwise
return arr.view(np.ndarray)
def __getitem__(self, index):
res = super().__getitem__(index)
if type(res) is memmap and res._mmap is None:
return res.view(type=ndarray)
return res
| memmap |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/loader.py | {
"start": 3148,
"end": 5389
} | class ____(ABC, Generic[TKey, TContext]):
"""Make An object Loadable by ID of type TKey using a LoadingContext."""
@classmethod
async def _batch_load(cls, keys: Iterable[TKey], context: TContext) -> Iterable[Optional[Self]]:
return cls._blocking_batch_load(keys, context)
@classmethod
@abstractmethod
def _blocking_batch_load(
cls, keys: Iterable[TKey], context: TContext
) -> Iterable[Optional[Self]]:
# There is no good way of turning an async function into a sync one that
# will allow us to execute that sync function inside of a broader async context.
#
# In the spirit of allowing incremental migration from a fully-sync pattern to
# an async one, we provide two separate functions here to allow sync and async
# calls to coexist.
raise NotImplementedError()
@classmethod
async def gen(cls, context: TContext, id: TKey) -> Optional[Self]:
"""Fetch an object by its id."""
loader, _ = context.get_loaders_for(cls)
return await loader.load(id)
@classmethod
async def gen_many(cls, context: TContext, ids: Iterable[TKey]) -> Iterable[Optional[Self]]:
"""Fetch N objects by their id."""
loader, _ = context.get_loaders_for(cls)
return await loader.load_many(ids)
@classmethod
def blocking_get(cls, context: TContext, id: TKey) -> Optional[Self]:
"""Fetch an object by its id."""
_, blocking_loader = context.get_loaders_for(cls)
return blocking_loader.blocking_load(id)
@classmethod
def blocking_get_many(cls, context: TContext, ids: Iterable[TKey]) -> Iterable[Optional[Self]]:
"""Fetch N objects by their id."""
# in the future, can consider priming the non-blocking loader with the results of this
# sync call
_, blocking_loader = context.get_loaders_for(cls)
return list(blocking_loader.blocking_load_many(ids))
@classmethod
def prepare(cls, context: TContext, ids: Iterable[TKey]) -> None:
"""Ensure the provided ids will be fetched on the next blocking query."""
_, blocking_loader = context.get_loaders_for(cls)
blocking_loader.prepare(ids)
| LoadableBy |
python | getsentry__sentry | tests/snuba/models/test_group.py | {
"start": 19268,
"end": 26061
} | class ____(TestCase, SnubaTestCase, OccurrenceTestMixin):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.issue_occ_a, _ = self.process_occurrence(
project_id=self.project.id,
event_id="a" * 32,
event_data={
"timestamp": before_now(minutes=1).isoformat(),
"fingerprint": ["group-1"],
"environment": "staging",
"contexts": {
"profile": {"profile_id": uuid.uuid4().hex},
},
},
)
self.issue_occ_b, _ = self.process_occurrence(
project_id=self.project.id,
event_id="b" * 32,
event_data={
"timestamp": before_now(minutes=2).isoformat(),
"fingerprint": ["group-1"],
"environment": "production",
"contexts": {
"profile": {"profile_id": uuid.uuid4().hex},
"replay": {"replay_id": uuid.uuid4().hex},
"trace": {
"sampled": True,
"span_id": "babaae0d4b7512d9",
"trace_id": "a7d67cf796774551a95be6543cacd459",
},
},
},
)
self.issue_occ_c, _ = self.process_occurrence(
project_id=self.project.id,
event_id="c" * 32,
event_data={
"timestamp": before_now(minutes=3).isoformat(),
"fingerprint": ["group-1"],
"environment": "staging",
"tags": {"organization.slug": "sentry"},
"contexts": {
"profile": {"profile_id": uuid.uuid4().hex},
"replay": {"replay_id": uuid.uuid4().hex},
},
},
)
self.group: Group = Group.objects.get()
self.group.update(type=ProfileFileIOGroupType.type_id)
assert isinstance(self.group, Group)
assert self.group.type == ProfileFileIOGroupType.type_id
def test_recommended_event(self) -> None:
# No filter
self.assert_occurrences_identical(_get_recommended(self.group).occurrence, self.issue_occ_b)
# Filter by environment
conditions = [Condition(Column("environment"), Op.IN, ["staging"])]
assert (
_get_recommended(self.group, conditions=conditions).event_id
== self.issue_occ_c.event_id
)
conditions = [Condition(Column("environment"), Op.IN, ["production"])]
assert (
_get_recommended(self.group, conditions=conditions).event_id
== self.issue_occ_b.event_id
)
conditions = [Condition(Column("environment"), Op.IN, ["development"])]
assert self.group.get_recommended_event(conditions=conditions) is None
# Filter by query
conditions = [Condition(Column("tags[organization.slug]"), Op.EQ, "sentry")]
assert (
_get_recommended(self.group, conditions=conditions).event_id
== self.issue_occ_c.event_id
)
conditions = [Condition(Column("replay_id"), Op.IS_NULL)]
assert (
_get_recommended(self.group, conditions=conditions).event_id
== self.issue_occ_a.event_id
)
# Filter by date range
assert (
_get_recommended(
self.group, start=before_now(seconds=150), end=before_now(seconds=30)
).event_id
== self.issue_occ_b.event_id
)
assert (
_get_recommended(
self.group, start=before_now(hours=1), end=before_now(seconds=90)
).event_id
== self.issue_occ_b.event_id
)
def test_latest_event(self) -> None:
# No filter
self.assert_occurrences_identical(_get_latest(self.group).occurrence, self.issue_occ_a)
# Filter by environment
conditions = [Condition(Column("environment"), Op.IN, ["staging"])]
assert _get_latest(self.group, conditions=conditions).event_id == self.issue_occ_a.event_id
conditions = [Condition(Column("environment"), Op.IN, ["production"])]
assert _get_latest(self.group, conditions=conditions).event_id == self.issue_occ_b.event_id
conditions = [Condition(Column("environment"), Op.IN, ["development"])]
assert self.group.get_latest_event(conditions=conditions) is None
# Filter by query
conditions = [Condition(Column("tags[organization.slug]"), Op.EQ, "sentry")]
assert _get_latest(self.group, conditions=conditions).event_id == self.issue_occ_c.event_id
conditions = [Condition(Column("replay_id"), Op.IS_NULL)]
assert _get_latest(self.group, conditions=conditions).event_id == self.issue_occ_a.event_id
# Filter by date range
assert (
_get_latest(
self.group, start=before_now(seconds=120), end=before_now(seconds=30)
).event_id
== self.issue_occ_a.event_id
)
assert (
_get_latest(self.group, start=before_now(hours=1), end=before_now(seconds=90)).event_id
== self.issue_occ_b.event_id
)
def test_oldest_event(self) -> None:
# No filter
self.assert_occurrences_identical(_get_oldest(self.group).occurrence, self.issue_occ_c)
# Filter by environment
conditions = [Condition(Column("environment"), Op.IN, ["staging"])]
assert _get_oldest(self.group, conditions=conditions).event_id == self.issue_occ_c.event_id
conditions = [Condition(Column("environment"), Op.IN, ["production"])]
assert _get_oldest(self.group, conditions=conditions).event_id == self.issue_occ_b.event_id
conditions = [Condition(Column("environment"), Op.IN, ["development"])]
assert self.group.get_oldest_event(conditions=conditions) is None
# Filter by query
conditions = [Condition(Column("tags[organization.slug]"), Op.EQ, "sentry")]
assert _get_oldest(self.group, conditions=conditions).event_id == self.issue_occ_c.event_id
conditions = [Condition(Column("replay_id"), Op.IS_NULL)]
assert _get_oldest(self.group, conditions=conditions).event_id == self.issue_occ_a.event_id
# Filter by date range
assert (
_get_oldest(
self.group, start=before_now(seconds=150), end=before_now(seconds=30)
).event_id
== self.issue_occ_b.event_id
)
assert (
_get_oldest(self.group, start=before_now(hours=1), end=before_now(seconds=90)).event_id
== self.issue_occ_c.event_id
)
| GroupTestSnubaOccurrenceIssue |
python | Textualize__textual | tests/test_binding_inheritance.py | {
"start": 13101,
"end": 14267
} | class ____(AppKeyRecorder):
"""An app with a non-default screen that handles movement key bindings."""
SCREENS = {"main": ScreenWithMovementBindings}
def on_mount(self) -> None:
self.push_screen("main")
async def test_contained_focused_child_widget_with_movement_bindings_on_screen() -> (
None
):
"""A contained focused child widget, with movement bindings in the screen, should trigger screen actions."""
async with AppWithScreenWithBindingsWrappedWidgetNoBindings().run_test() as pilot:
await pilot.press(*AppKeyRecorder.ALL_KEYS)
pilot.app.all_recorded("screenly_")
##############################################################################
# A focused widget with bindings but no inheriting of bindings, on app.
#
# Now we move on to testing inherit_bindings. To start with we go back to an
# app with a default screen, with the app itself composing in a widget that
# can and will have focus, which has bindings for all the test keys, and
# crucially has inherit_bindings set to False.
#
# We should expect to see all of the test keys recorded post-press.
| AppWithScreenWithBindingsWrappedWidgetNoBindings |
python | pyca__cryptography | tests/hazmat/primitives/test_aes.py | {
"start": 8970,
"end": 12737
} | class ____:
test_ctr = generate_encrypt_test(
load_nist_vectors,
os.path.join("ciphers", "AES", "CTR"),
["aes-128-ctr.txt", "aes-192-ctr.txt", "aes-256-ctr.txt"],
lambda key, **kwargs: algorithms.AES(binascii.unhexlify(key)),
lambda iv, **kwargs: modes.CTR(binascii.unhexlify(iv)),
)
@pytest.mark.parametrize(
"mode",
[
modes.CBC(bytearray(b"\x00" * 16)),
modes.CTR(bytearray(b"\x00" * 16)),
OFB(bytearray(b"\x00" * 16)),
CFB(bytearray(b"\x00" * 16)),
CFB8(bytearray(b"\x00" * 16)),
modes.XTS(bytearray(b"\x00" * 16)),
# Add a dummy mode for coverage of the cipher_supported check.
DummyMode(),
],
)
def test_buffer_protocol_alternate_modes(mode, backend):
data = bytearray(b"sixteen_byte_msg")
key = algorithms.AES(bytearray(os.urandom(32)))
if not backend.cipher_supported(key, mode):
pytest.skip(f"AES-{key.key_size} in {mode.name} mode not supported")
cipher = base.Cipher(key, mode, backend)
enc = cipher.encryptor()
ct = enc.update(data) + enc.finalize()
dec = cipher.decryptor()
pt = dec.update(ct) + dec.finalize()
assert pt == data
@pytest.mark.parametrize(
"mode",
[
modes.ECB(),
modes.CBC(bytearray(b"\x00" * 16)),
modes.CTR(bytearray(b"\x00" * 16)),
OFB(bytearray(b"\x00" * 16)),
CFB(bytearray(b"\x00" * 16)),
CFB8(bytearray(b"\x00" * 16)),
],
)
@pytest.mark.parametrize("alg_cls", [algorithms.AES128, algorithms.AES256])
def test_alternate_aes_classes(mode, alg_cls, backend):
alg = alg_cls(b"0" * (alg_cls.key_size // 8))
if not backend.cipher_supported(alg, mode):
pytest.skip(f"AES in {mode.name} mode not supported")
data = bytearray(b"sixteen_byte_msg")
cipher = base.Cipher(alg, mode, backend)
enc = cipher.encryptor()
ct = enc.update(data) + enc.finalize()
dec = cipher.decryptor()
pt = dec.update(ct) + dec.finalize()
assert pt == data
def test_reset_nonce(backend):
data = b"helloworld" * 10
nonce = b"\x00" * 16
nonce_alt = b"\xee" * 16
cipher = base.Cipher(
algorithms.AES(b"\x00" * 16),
modes.CTR(nonce),
)
cipher_alt = base.Cipher(
algorithms.AES(b"\x00" * 16),
modes.CTR(nonce_alt),
)
enc = cipher.encryptor()
ct1 = enc.update(data)
assert len(ct1) == len(data)
for _ in range(2):
enc.reset_nonce(nonce)
assert enc.update(data) == ct1
# Reset the nonce to a different value
# and check it matches with a different context
enc_alt = cipher_alt.encryptor()
ct2 = enc_alt.update(data)
enc.reset_nonce(nonce_alt)
assert enc.update(data) == ct2
enc_alt.finalize()
enc.finalize()
with pytest.raises(AlreadyFinalized):
enc.reset_nonce(nonce)
dec = cipher.decryptor()
assert dec.update(ct1) == data
for _ in range(2):
dec.reset_nonce(nonce)
assert dec.update(ct1) == data
# Reset the nonce to a different value
# and check it matches with a different context
dec_alt = cipher_alt.decryptor()
dec.reset_nonce(nonce_alt)
assert dec.update(ct2) == dec_alt.update(ct2)
dec_alt.finalize()
dec.finalize()
with pytest.raises(AlreadyFinalized):
dec.reset_nonce(nonce)
def test_reset_nonce_invalid_mode(backend):
iv = b"\x00" * 16
c = base.Cipher(
algorithms.AES(b"\x00" * 16),
modes.CBC(iv),
)
enc = c.encryptor()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
enc.reset_nonce(iv)
dec = c.decryptor()
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_CIPHER):
dec.reset_nonce(iv)
| TestAESModeCTR |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 96313,
"end": 96754
} | class ____:
xlPivotTableVersion10 = 1 # from enum XlPivotTableVersionList
xlPivotTableVersion11 = 2 # from enum XlPivotTableVersionList
xlPivotTableVersion12 = 3 # from enum XlPivotTableVersionList
xlPivotTableVersion14 = 4 # from enum XlPivotTableVersionList
xlPivotTableVersion2000 = 0 # from enum XlPivotTableVersionList
xlPivotTableVersionCurrent = -1 # from enum XlPivotTableVersionList
| PivotTableVersionList |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/array_ops/diag_op_test.py | {
"start": 31133,
"end": 37673
} | class ____(test.TestCase):
@test_util.run_deprecated_v1
def testSquare(self):
with self.session():
v = np.array([1.0, 2.0, 3.0])
mat = np.diag(v)
mat_diag = array_ops.matrix_diag_part(mat)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag, v)
for offset in [-2, 3]:
mat = np.diag(v, offset)
mat_diag = array_ops.matrix_diag_part(mat, k=offset)
self.assertEqual((3,), mat_diag.get_shape())
self.assertAllEqual(mat_diag, v)
# Diagonal bands.
for align in alignment_list:
mat, tests = square_cases(align)
for diags, pair in tests.items():
solution, _ = pair
mat_diag = array_ops.matrix_diag_part(mat[0], k=diags, align=align)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag, solution[0])
@test_util.run_deprecated_v1
def testRectangular(self):
with self.session():
mat = np.array([[1.0, 2.0, 3.0], [4.0, 5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag, np.array([1.0, 5.0]))
mat = np.array([[1.0, 2.0], [3.0, 4.0], [5.0, 6.0]])
mat_diag = array_ops.matrix_diag_part(mat)
self.assertAllEqual(mat_diag, np.array([1.0, 4.0]))
# Diagonal bands.
for align in alignment_list:
for mat, tests in [tall_cases(align), fat_cases(align)]:
for diags, pair in tests.items():
solution, _ = pair
mat_diag = array_ops.matrix_diag_part(
mat[0], k=diags, align=align)
self.assertEqual(mat_diag.get_shape(), solution[0].shape)
self.assertAllEqual(mat_diag, solution[0])
def _testSquareBatch(self, dtype):
with self.cached_session():
v_batch = np.array([[1.0, 0.0, 3.0], [4.0, 5.0, 6.0]]).astype(dtype)
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 0.0, 0.0], [0.0, 0.0, 3.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0],
[0.0, 0.0, 6.0]]]).astype(dtype)
self.assertEqual(mat_batch.shape, (2, 3, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 3), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag, v_batch)
# Diagonal bands with padding_value.
for padding_value, align in zip_to_first_list_length([0, 555, -11],
alignment_list):
mat, tests = square_cases(align)
for diags, pair in tests.items():
solution, _ = pair
mat_batch_diag = array_ops.matrix_diag_part(
mat.astype(dtype),
k=diags,
padding_value=padding_value,
align=align)
mask = solution == 0
solution = (solution + padding_value * mask).astype(dtype)
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag, solution)
@test_util.run_deprecated_v1
def testSquareBatch(self):
self._testSquareBatch(dtypes_lib.bfloat16.as_numpy_dtype)
self._testSquareBatch(np.float32)
self._testSquareBatch(np.float64)
self._testSquareBatch(np.int32)
self._testSquareBatch(np.int64)
self._testSquareBatch(np.bool_)
@test_util.run_deprecated_v1
def testRectangularBatch(self):
with self.session():
v_batch = np.array([[1.0, 2.0], [4.0, 5.0]])
mat_batch = np.array([[[1.0, 0.0, 0.0], [0.0, 2.0, 0.0]],
[[4.0, 0.0, 0.0], [0.0, 5.0, 0.0]]])
self.assertEqual(mat_batch.shape, (2, 2, 3))
mat_batch_diag = array_ops.matrix_diag_part(mat_batch)
self.assertEqual((2, 2), mat_batch_diag.get_shape())
self.assertAllEqual(mat_batch_diag, v_batch)
# Diagonal bands with padding_value and align.
for padding_value, align in zip_to_first_list_length([0, 555, -11],
alignment_list):
for mat, tests in [tall_cases(align), fat_cases(align)]:
for diags, pair in tests.items():
solution, _ = pair
mat_batch_diag = array_ops.matrix_diag_part(
mat, k=diags, padding_value=padding_value, align=align)
mask = solution == 0
solution = solution + padding_value * mask
self.assertEqual(mat_batch_diag.get_shape(), solution.shape)
self.assertAllEqual(mat_batch_diag, solution)
@test_util.run_deprecated_v1
def testUnknownShape(self):
matrix = array_ops.placeholder(dtypes_lib.int32, shape=[None, None])
result = array_ops.matrix_diag_part(matrix, k=-1)
input_matrix = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
with self.session():
result_eval = result.eval(feed_dict={matrix: input_matrix})
self.assertAllEqual([4, 8], result_eval)
@test_util.run_deprecated_v1
def testInvalidShape(self):
with self.assertRaisesRegex(ValueError, "must be at least rank 2"):
array_ops.matrix_diag_part(0)
@test_util.run_deprecated_v1
def testInvalidShapeAtEval(self):
with self.session():
v = array_ops.placeholder(dtype=dtypes_lib.float32)
with self.assertRaisesOpError("input must be at least 2-dim"):
array_ops.matrix_diag_part(v).eval(feed_dict={v: 0.0})
@test_util.run_deprecated_v1
def testGrad(self):
shapes = ((3, 3), (2, 3), (3, 2), (5, 3, 3))
with self.session():
for shape in shapes:
x = constant_op.constant(np.random.rand(*shape), dtype=np.float32)
y = array_ops.matrix_diag_part(x)
error = gradient_checker.compute_gradient_error(x,
x.get_shape().as_list(),
y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
# {Sub,super}diagonals/band.
tests = dict() # tests[shape] = (d_lower, d_upper)
tests[(3, 3)] = (-1, -1)
tests[(7, 3, 4)] = (-1, 1)
with self.session():
for align in alignment_list:
for shape, diags in tests.items():
x = constant_op.constant(np.random.rand(*shape), np.float32)
y = array_ops.matrix_diag_part(input=x, k=diags, align=align)
error = gradient_checker.compute_gradient_error(
x,
x.get_shape().as_list(), y,
y.get_shape().as_list())
self.assertLess(error, 1e-4)
| MatrixDiagPartTest |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/execution_api/versions/v2025_11_07.py | {
"start": 1122,
"end": 2311
} | class ____(VersionChange):
"""Add the `partition_key` field to DagRun model."""
description = __doc__
instructions_to_migrate_to_previous_version = (
schema(DagRun).field("partition_key").didnt_exist,
schema(AssetEventResponse).field("partition_key").didnt_exist,
)
@convert_response_to_previous_version_for(TIRunContext) # type: ignore[arg-type]
def remove_partition_key_from_dag_run(response: ResponseInfo) -> None: # type: ignore[misc]
"""Remove the `partition_key` field from the dag_run object when converting to the previous version."""
if "dag_run" in response.body and isinstance(response.body["dag_run"], dict):
response.body["dag_run"].pop("partition_key", None)
@convert_response_to_previous_version_for(AssetEventsResponse) # type: ignore[arg-type]
def remove_partition_key_from_asset_events(response: ResponseInfo) -> None: # type: ignore[misc]
"""Remove the `partition_key` field from the dag_run object when converting to the previous version."""
events = response.body["asset_events"]
for elem in events:
elem.pop("partition_key", None)
| AddPartitionKeyField |
python | huggingface__transformers | src/transformers/models/cohere2_vision/modular_cohere2_vision.py | {
"start": 3572,
"end": 6493
} | class ____(AyaVisionModel):
_checkpoint_conversion_mapping = {}
def get_image_features(self, pixel_values: torch.FloatTensor):
"""
Obtains image last hidden states from the vision tower and apply multimodal projection.
Args:
pixel_values (`torch.FloatTensor]` of shape `(batch_size, num_patches, channels, height, width)`)
The tensors corresponding to the input images.
Returns:
image_features (List[`torch.Tensor`]): List of image feature tensor, each contains all the visual feature of all patches
and are of shape `(num_patches, image_length, embed_dim)`).
"""
image_features = self.vision_tower(pixel_values, output_hidden_states=True)
selected_image_feature = image_features.last_hidden_state
image_features = self.multi_modal_projector(selected_image_feature)
return image_features
@check_model_inputs()
@auto_docstring
def forward(
self,
input_ids: Optional[torch.LongTensor] = None,
pixel_values: Optional[torch.FloatTensor] = None,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
inputs_embeds: Optional[torch.FloatTensor] = None,
use_cache: Optional[bool] = None,
cache_position: Optional[torch.LongTensor] = None,
**kwargs: Unpack[FlashAttentionKwargs],
) -> Union[tuple, Cohere2VisionModelOutputWithPast]:
if (input_ids is None) ^ (inputs_embeds is not None):
raise ValueError("You must specify exactly one of input_ids or inputs_embeds")
if inputs_embeds is None:
inputs_embeds = self.get_input_embeddings()(input_ids)
if pixel_values is not None:
image_features = self.get_image_features(pixel_values)
image_features = image_features.to(inputs_embeds.device, inputs_embeds.dtype)
special_image_mask = self.get_placeholder_mask(
input_ids, inputs_embeds=inputs_embeds, image_features=image_features
)
inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features)
outputs = self.language_model(
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
inputs_embeds=inputs_embeds,
use_cache=use_cache,
cache_position=cache_position,
**kwargs,
)
return Cohere2VisionModelOutputWithPast(
last_hidden_state=outputs.last_hidden_state,
past_key_values=outputs.past_key_values,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
image_hidden_states=image_features if pixel_values is not None else None,
)
| Cohere2VisionModel |
python | bokeh__bokeh | src/bokeh/core/property/visual.py | {
"start": 7066,
"end": 9049
} | class ____(Either):
""" Accept (min, max) bounds tuples for use with Ranges.
Bounds are provided as a tuple of ``(min, max)`` so regardless of whether your range is
increasing or decreasing, the first item should be the minimum value of the range and the
second item should be the maximum. Setting min > max will result in a ``ValueError``.
Setting bounds to None will allow your plot to pan/zoom as far as you want. If you only
want to constrain one end of the plot, you can set min or max to
``None`` e.g. ``DataRange1d(bounds=(None, 12))`` """
def __init__(self, default='auto', *, accept_datetime: bool = False, help: str | None = None) -> None:
types = (
Auto,
Tuple(Float, Float),
Tuple(Nullable(Float), Float),
Tuple(Float, Nullable(Float)),
Tuple(TimeDelta, TimeDelta),
Tuple(Nullable(TimeDelta), TimeDelta),
Tuple(TimeDelta, Nullable(TimeDelta)),
)
if accept_datetime:
types = (
*types,
Tuple(Datetime, Datetime),
Tuple(Nullable(Datetime), Datetime),
Tuple(Datetime, Nullable(Datetime)),
)
super().__init__(*types, default=default, help=help)
def validate(self, value: Any, detail: bool = True) -> None:
super().validate(value, detail)
if value[0] is None or value[1] is None:
return
value = list(value)
# make sure the values are timestamps for comparison
if isinstance(value[0], datetime.datetime):
value[0] = convert_datetime_type(value[0])
if isinstance(value[1], datetime.datetime):
value[1] = convert_datetime_type(value[1])
if value[0] < value[1]:
return
msg = "" if not detail else "Invalid bounds: maximum smaller than minimum. Correct usage: bounds=(min, max)"
raise ValueError(msg)
| MinMaxBounds |
python | celery__celery | celery/worker/request.py | {
"start": 2011,
"end": 27859
} | class ____:
"""A request for task execution."""
acknowledged = False
time_start = None
worker_pid = None
time_limits = (None, None)
_already_revoked = False
_already_cancelled = False
_terminate_on_ack = None
_apply_result = None
_tzlocal = None
if not IS_PYPY: # pragma: no cover
__slots__ = (
'_app', '_type', 'name', 'id', '_root_id', '_parent_id',
'_on_ack', '_body', '_hostname', '_eventer', '_connection_errors',
'_task', '_eta', '_expires', '_request_dict', '_on_reject', '_utc',
'_content_type', '_content_encoding', '_argsrepr', '_kwargsrepr',
'_args', '_kwargs', '_decoded', '__payload',
'__weakref__', '__dict__',
)
def __init__(self, message, on_ack=noop,
hostname=None, eventer=None, app=None,
connection_errors=None, request_dict=None,
task=None, on_reject=noop, body=None,
headers=None, decoded=False, utc=True,
maybe_make_aware=maybe_make_aware,
maybe_iso8601=maybe_iso8601, **opts):
self._message = message
self._request_dict = (message.headers.copy() if headers is None
else headers.copy())
self._body = message.body if body is None else body
self._app = app
self._utc = utc
self._decoded = decoded
if decoded:
self._content_type = self._content_encoding = None
else:
self._content_type, self._content_encoding = (
message.content_type, message.content_encoding,
)
self.__payload = self._body if self._decoded else message.payload
self.id = self._request_dict['id']
self._type = self.name = self._request_dict['task']
if 'shadow' in self._request_dict:
self.name = self._request_dict['shadow'] or self.name
self._root_id = self._request_dict.get('root_id')
self._parent_id = self._request_dict.get('parent_id')
timelimit = self._request_dict.get('timelimit', None)
if timelimit:
self.time_limits = timelimit
self._argsrepr = self._request_dict.get('argsrepr', '')
self._kwargsrepr = self._request_dict.get('kwargsrepr', '')
self._on_ack = on_ack
self._on_reject = on_reject
self._hostname = hostname or gethostname()
self._eventer = eventer
self._connection_errors = connection_errors or ()
self._task = task or self._app.tasks[self._type]
self._ignore_result = self._request_dict.get('ignore_result', False)
# timezone means the message is timezone-aware, and the only timezone
# supported at this point is UTC.
eta = self._request_dict.get('eta')
if eta is not None:
try:
eta = maybe_iso8601(eta)
except (AttributeError, ValueError, TypeError) as exc:
raise InvalidTaskError(
f'invalid ETA value {eta!r}: {exc}')
self._eta = maybe_make_aware(eta, self.tzlocal)
else:
self._eta = None
expires = self._request_dict.get('expires')
if expires is not None:
try:
expires = maybe_iso8601(expires)
except (AttributeError, ValueError, TypeError) as exc:
raise InvalidTaskError(
f'invalid expires value {expires!r}: {exc}')
self._expires = maybe_make_aware(expires, self.tzlocal)
else:
self._expires = None
delivery_info = message.delivery_info or {}
properties = message.properties or {}
self._delivery_info = {
'exchange': delivery_info.get('exchange'),
'routing_key': delivery_info.get('routing_key'),
'priority': properties.get('priority'),
'redelivered': delivery_info.get('redelivered', False),
}
self._request_dict.update({
'properties': properties,
'reply_to': properties.get('reply_to'),
'correlation_id': properties.get('correlation_id'),
'hostname': self._hostname,
'delivery_info': self._delivery_info
})
# this is a reference pass to avoid memory usage burst
self._request_dict['args'], self._request_dict['kwargs'], _ = self.__payload
self._args = self._request_dict['args']
self._kwargs = self._request_dict['kwargs']
@property
def delivery_info(self):
return self._delivery_info
@property
def message(self):
return self._message
@property
def request_dict(self):
return self._request_dict
@property
def body(self):
return self._body
@property
def app(self):
return self._app
@property
def utc(self):
return self._utc
@property
def content_type(self):
return self._content_type
@property
def content_encoding(self):
return self._content_encoding
@property
def type(self):
return self._type
@property
def root_id(self):
return self._root_id
@property
def parent_id(self):
return self._parent_id
@property
def argsrepr(self):
return self._argsrepr
@property
def args(self):
return self._args
@property
def kwargs(self):
return self._kwargs
@property
def kwargsrepr(self):
return self._kwargsrepr
@property
def on_ack(self):
return self._on_ack
@property
def on_reject(self):
return self._on_reject
@on_reject.setter
def on_reject(self, value):
self._on_reject = value
@property
def hostname(self):
return self._hostname
@property
def ignore_result(self):
return self._ignore_result
@property
def eventer(self):
return self._eventer
@eventer.setter
def eventer(self, eventer):
self._eventer = eventer
@property
def connection_errors(self):
return self._connection_errors
@property
def task(self):
return self._task
@property
def eta(self):
return self._eta
@property
def expires(self):
return self._expires
@expires.setter
def expires(self, value):
self._expires = value
@property
def tzlocal(self):
if self._tzlocal is None:
self._tzlocal = self._app.conf.timezone
return self._tzlocal
@property
def store_errors(self):
return (not self.task.ignore_result or
self.task.store_errors_even_if_ignored)
@property
def task_id(self):
# XXX compat
return self.id
@task_id.setter
def task_id(self, value):
self.id = value
@property
def task_name(self):
# XXX compat
return self.name
@task_name.setter
def task_name(self, value):
self.name = value
@property
def reply_to(self):
# used by rpc backend when failures reported by parent process
return self._request_dict['reply_to']
@property
def replaced_task_nesting(self):
return self._request_dict.get('replaced_task_nesting', 0)
@property
def groups(self):
return self._request_dict.get('groups', [])
@property
def stamped_headers(self) -> list:
return self._request_dict.get('stamped_headers') or []
@property
def stamps(self) -> dict:
stamps = self._request_dict.get('stamps') or {}
return {header: stamps.get(header) for header in self.stamped_headers}
@property
def correlation_id(self):
# used similarly to reply_to
return self._request_dict['correlation_id']
def execute_using_pool(self, pool: BasePool, **kwargs):
"""Used by the worker to send this task to the pool.
Arguments:
pool (~celery.concurrency.base.TaskPool): The execution pool
used to execute this request.
Raises:
celery.exceptions.TaskRevokedError: if the task was revoked.
"""
task_id = self.id
task = self._task
if self.revoked():
raise TaskRevokedError(task_id)
time_limit, soft_time_limit = self.time_limits
trace = fast_trace_task if self._app.use_fast_trace_task else trace_task_ret
result = pool.apply_async(
trace,
args=(self._type, task_id, self._request_dict, self._body,
self._content_type, self._content_encoding),
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
error_callback=self.on_failure,
soft_timeout=soft_time_limit or task.soft_time_limit,
timeout=time_limit or task.time_limit,
correlation_id=task_id,
)
# cannot create weakref to None
self._apply_result = maybe(ref, result)
return result
def execute(self, loglevel=None, logfile=None):
"""Execute the task in a :func:`~celery.app.trace.trace_task`.
Arguments:
loglevel (int): The loglevel used by the task.
logfile (str): The logfile used by the task.
"""
if self.revoked():
return
# acknowledge task as being processed.
if not self.task.acks_late:
self.acknowledge()
_, _, embed = self._payload
request = self._request_dict
# pylint: disable=unpacking-non-sequence
# payload is a property, so pylint doesn't think it's a tuple.
request.update({
'loglevel': loglevel,
'logfile': logfile,
'is_eager': False,
}, **embed or {})
retval, I, _, _ = trace_task(self.task, self.id, self._args, self._kwargs, request,
hostname=self._hostname, loader=self._app.loader,
app=self._app)
if I:
self.reject(requeue=False)
else:
self.acknowledge()
return retval
def maybe_expire(self):
"""If expired, mark the task as revoked."""
if self.expires:
now = datetime.now(self.expires.tzinfo)
if now > self.expires:
revoked_tasks.add(self.id)
return True
def terminate(self, pool, signal=None):
signal = _signals.signum(signal or TERM_SIGNAME)
if self.time_start:
pool.terminate_job(self.worker_pid, signal)
self._announce_revoked('terminated', True, signal, False)
else:
self._terminate_on_ack = pool, signal
if self._apply_result is not None:
obj = self._apply_result() # is a weakref
if obj is not None:
obj.terminate(signal)
def cancel(self, pool, signal=None, emit_retry=True):
signal = _signals.signum(signal or TERM_SIGNAME)
if self.time_start:
pool.terminate_job(self.worker_pid, signal)
self._announce_cancelled(emit_retry=emit_retry)
if self._apply_result is not None:
obj = self._apply_result() # is a weakref
if obj is not None:
obj.terminate(signal)
def _announce_cancelled(self, emit_retry=True):
task_ready(self)
self.send_event('task-cancelled')
if emit_retry:
reason = 'cancelled by Celery'
exc = Retry(message=reason)
self.task.backend.mark_as_retry(self.id,
exc,
request=self._context)
self.task.on_retry(exc, self.id, self.args, self.kwargs, None)
self._already_cancelled = True
if emit_retry:
send_retry(self.task, request=self._context, einfo=None)
def _announce_revoked(self, reason, terminated, signum, expired):
task_ready(self)
self.send_event('task-revoked',
terminated=terminated, signum=signum, expired=expired)
self.task.backend.mark_as_revoked(
self.id, reason, request=self._context,
store_result=self.store_errors,
)
self.acknowledge()
self._already_revoked = True
send_revoked(self.task, request=self._context,
terminated=terminated, signum=signum, expired=expired)
def revoked(self):
"""If revoked, skip task and mark state."""
expired = False
if self._already_revoked:
return True
if self.expires:
expired = self.maybe_expire()
revoked_by_id = self.id in revoked_tasks
revoked_by_header, revoking_header = False, None
if not revoked_by_id and self.stamped_headers:
for stamp in self.stamped_headers:
if stamp in revoked_stamps:
revoked_header = revoked_stamps[stamp]
stamped_header = self._message.headers['stamps'][stamp]
if isinstance(stamped_header, (list, tuple)):
for stamped_value in stamped_header:
if stamped_value in maybe_list(revoked_header):
revoked_by_header = True
revoking_header = {stamp: stamped_value}
break
else:
revoked_by_header = any([
stamped_header in maybe_list(revoked_header),
stamped_header == revoked_header, # When the header is a single set value
])
revoking_header = {stamp: stamped_header}
break
if any((expired, revoked_by_id, revoked_by_header)):
log_msg = 'Discarding revoked task: %s[%s]'
if revoked_by_header:
log_msg += ' (revoked by header: %s)' % revoking_header
info(log_msg, self.name, self.id)
self._announce_revoked(
'expired' if expired else 'revoked', False, None, expired,
)
return True
return False
def send_event(self, type, **fields):
if self._eventer and self._eventer.enabled and self.task.send_events:
self._eventer.send(type, uuid=self.id, **fields)
def on_accepted(self, pid, time_accepted):
"""Handler called when task is accepted by worker pool."""
self.worker_pid = pid
# Convert monotonic time_accepted to absolute time
self.time_start = time() - (monotonic() - time_accepted)
task_accepted(self)
if not self.task.acks_late:
self.acknowledge()
self.send_event('task-started')
if _does_debug:
debug('Task accepted: %s[%s] pid:%r', self.name, self.id, pid)
if self._terminate_on_ack is not None:
self.terminate(*self._terminate_on_ack)
def on_timeout(self, soft, timeout):
"""Handler called if the task times out."""
if soft:
warn('Soft time limit (%ss) exceeded for %s[%s]',
timeout, self.name, self.id)
else:
task_ready(self)
# This is a special case where the task timeout handling is done during
# the cold shutdown process.
if not state.should_terminate:
error('Hard time limit (%ss) exceeded for %s[%s]', timeout, self.name, self.id)
exc = TimeLimitExceeded(timeout)
self.task.backend.mark_as_failure(
self.id, exc, request=self._context,
store_result=self.store_errors,
)
if self.task.acks_late and self.task.acks_on_failure_or_timeout:
self.acknowledge()
def on_success(self, failed__retval__runtime, **kwargs):
"""Handler called if the task was successfully processed."""
failed, retval, runtime = failed__retval__runtime
if failed:
exc = retval.exception
if isinstance(exc, ExceptionWithTraceback):
exc = exc.exc
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
raise exc
return self.on_failure(retval, return_ok=True)
task_ready(self, successful=True)
if self.task.acks_late:
self.acknowledge()
self.send_event('task-succeeded', result=retval, runtime=runtime)
def on_retry(self, exc_info):
"""Handler called if the task should be retried."""
if self.task.acks_late:
self.acknowledge()
self.send_event('task-retried',
exception=safe_repr(exc_info.exception.exc),
traceback=safe_str(exc_info.traceback))
def on_failure(self, exc_info, send_failed_event=True, return_ok=False):
"""Handler called if the task raised an exception."""
task_ready(self)
exc = exc_info.exception
if isinstance(exc, ExceptionWithTraceback):
exc = exc.exc
is_terminated = isinstance(exc, Terminated)
if is_terminated:
# If the task was terminated and the task was not cancelled due
# to a connection loss, it is revoked.
# We always cancel the tasks inside the master process.
# If the request was cancelled, it was not revoked and there's
# nothing to be done.
# According to the comment below, we need to check if the task
# is already revoked and if it wasn't, we should announce that
# it was.
if not self._already_cancelled and not self._already_revoked:
# This is a special case where the process
# would not have had time to write the result.
self._announce_revoked(
'terminated', True, str(exc), False)
return
elif isinstance(exc, MemoryError):
raise MemoryError(f'Process got: {exc}')
elif isinstance(exc, Reject):
return self.reject(requeue=exc.requeue)
elif isinstance(exc, Ignore):
return self.acknowledge()
elif isinstance(exc, Retry):
return self.on_retry(exc_info)
# (acks_late) acknowledge after result stored.
requeue = False
is_worker_lost = isinstance(exc, WorkerLostError)
if self.task.acks_late:
reject = (
(self.task.reject_on_worker_lost and is_worker_lost)
or (isinstance(exc, TimeLimitExceeded) and not self.task.acks_on_failure_or_timeout)
)
ack = self.task.acks_on_failure_or_timeout
if reject:
requeue = True
self.reject(requeue=requeue)
send_failed_event = False
elif ack:
self.acknowledge()
else:
# supporting the behaviour where a task failed and
# need to be removed from prefetched local queue
self.reject(requeue=False)
# This is a special case where the task failure handling is done during
# the cold shutdown process.
if state.should_terminate:
return_ok = True
send_failed_event = False
# This is a special case where the process would not have had time
# to write the result.
if not requeue and (is_worker_lost or not return_ok):
# only mark as failure if task has not been requeued
self.task.backend.mark_as_failure(
self.id, exc, request=self._context,
store_result=self.store_errors,
)
signals.task_failure.send(sender=self.task, task_id=self.id,
exception=exc, args=self.args,
kwargs=self.kwargs,
traceback=exc_info.traceback,
einfo=exc_info)
if send_failed_event:
self.send_event(
'task-failed',
exception=safe_repr(get_pickled_exception(exc_info.exception)),
traceback=exc_info.traceback,
)
if not return_ok:
error('Task handler raised error: %r', exc,
exc_info=exc_info.exc_info)
def acknowledge(self):
"""Acknowledge task."""
if not self.acknowledged:
self._on_ack(logger, self._connection_errors)
self.acknowledged = True
def reject(self, requeue=False):
if not self.acknowledged:
self._on_reject(logger, self._connection_errors, requeue)
self.acknowledged = True
self.send_event('task-rejected', requeue=requeue)
def info(self, safe=False):
return {
'id': self.id,
'name': self.name,
'args': self._args if not safe else self._argsrepr,
'kwargs': self._kwargs if not safe else self._kwargsrepr,
'type': self._type,
'hostname': self._hostname,
'time_start': self.time_start,
'acknowledged': self.acknowledged,
'delivery_info': self.delivery_info,
'worker_pid': self.worker_pid,
}
def humaninfo(self):
return '{0.name}[{0.id}]'.format(self)
def __str__(self):
"""``str(self)``."""
return ' '.join([
self.humaninfo(),
f' ETA:[{self._eta}]' if self._eta else '',
f' expires:[{self._expires}]' if self._expires else '',
]).strip()
def __repr__(self):
"""``repr(self)``."""
return '<{}: {} {} {}>'.format(
type(self).__name__, self.humaninfo(),
self._argsrepr, self._kwargsrepr,
)
@cached_property
def _payload(self):
return self.__payload
@cached_property
def chord(self):
# used by backend.mark_as_failure when failure is reported
# by parent process
# pylint: disable=unpacking-non-sequence
# payload is a property, so pylint doesn't think it's a tuple.
_, _, embed = self._payload
return embed.get('chord')
@cached_property
def errbacks(self):
# used by backend.mark_as_failure when failure is reported
# by parent process
# pylint: disable=unpacking-non-sequence
# payload is a property, so pylint doesn't think it's a tuple.
_, _, embed = self._payload
return embed.get('errbacks')
@cached_property
def group(self):
# used by backend.on_chord_part_return when failures reported
# by parent process
return self._request_dict.get('group')
@cached_property
def _context(self):
"""Context (:class:`~celery.app.task.Context`) of this task."""
request = self._request_dict
# pylint: disable=unpacking-non-sequence
# payload is a property, so pylint doesn't think it's a tuple.
_, _, embed = self._payload
request.update(**embed or {})
return Context(request)
@cached_property
def group_index(self):
# used by backend.on_chord_part_return to order return values in group
return self._request_dict.get('group_index')
def create_request_cls(base, task, pool, hostname, eventer,
ref=ref, revoked_tasks=revoked_tasks,
task_ready=task_ready, trace=None, app=current_app):
default_time_limit = task.time_limit
default_soft_time_limit = task.soft_time_limit
apply_async = pool.apply_async
acks_late = task.acks_late
events = eventer and eventer.enabled
if trace is None:
trace = fast_trace_task if app.use_fast_trace_task else trace_task_ret
class Request(base):
def execute_using_pool(self, pool, **kwargs):
task_id = self.task_id
if self.revoked():
raise TaskRevokedError(task_id)
time_limit, soft_time_limit = self.time_limits
result = apply_async(
trace,
args=(self.type, task_id, self.request_dict, self.body,
self.content_type, self.content_encoding),
accept_callback=self.on_accepted,
timeout_callback=self.on_timeout,
callback=self.on_success,
error_callback=self.on_failure,
soft_timeout=soft_time_limit or default_soft_time_limit,
timeout=time_limit or default_time_limit,
correlation_id=task_id,
)
# cannot create weakref to None
# pylint: disable=attribute-defined-outside-init
self._apply_result = maybe(ref, result)
return result
def on_success(self, failed__retval__runtime, **kwargs):
failed, retval, runtime = failed__retval__runtime
if failed:
exc = retval.exception
if isinstance(exc, ExceptionWithTraceback):
exc = exc.exc
if isinstance(exc, (SystemExit, KeyboardInterrupt)):
raise exc
return self.on_failure(retval, return_ok=True)
task_ready(self, successful=True)
if acks_late:
self.acknowledge()
if events:
self.send_event(
'task-succeeded', result=retval, runtime=runtime,
)
return Request
| Request |
python | oauthlib__oauthlib | tests/oauth2/rfc6749/test_utils.py | {
"start": 471,
"end": 3725
} | class ____(TestCase):
def test_escape(self):
"""Assert that we are only escaping unicode"""
self.assertRaises(ValueError, escape, b"I am a string type. Not a unicode type.")
self.assertEqual(escape("I am a unicode type."), "I%20am%20a%20unicode%20type.")
def test_host_from_uri(self):
"""Test if hosts and ports are properly extracted from URIs.
This should be done according to the MAC Authentication spec.
Defaults ports should be provided when none is present in the URI.
"""
self.assertEqual(host_from_uri('http://a.b-c.com:8080'), ('a.b-c.com', '8080'))
self.assertEqual(host_from_uri('https://a.b.com:8080'), ('a.b.com', '8080'))
self.assertEqual(host_from_uri('http://www.example.com'), ('www.example.com', '80'))
self.assertEqual(host_from_uri('https://www.example.com'), ('www.example.com', '443'))
def test_is_secure_transport(self):
"""Test check secure uri."""
if 'OAUTHLIB_INSECURE_TRANSPORT' in os.environ:
del os.environ['OAUTHLIB_INSECURE_TRANSPORT']
self.assertTrue(is_secure_transport('https://example.com'))
self.assertFalse(is_secure_transport('http://example.com'))
os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1'
self.assertTrue(is_secure_transport('http://example.com'))
del os.environ['OAUTHLIB_INSECURE_TRANSPORT']
def test_params_from_uri(self):
self.assertEqual(params_from_uri('http://i.b/?foo=bar&g&scope=a+d'),
{'foo': 'bar', 'g': '', 'scope': ['a', 'd']})
def test_generate_age(self):
issue_time = datetime.datetime.now() - datetime.timedelta(
days=3, minutes=1, seconds=4)
self.assertGreater(float(generate_age(issue_time)), 259263.0)
def test_list_to_scope(self):
expected = 'foo bar baz'
string_list = ['foo', 'bar', 'baz']
self.assertEqual(list_to_scope(string_list), expected)
string_tuple = ('foo', 'bar', 'baz')
self.assertEqual(list_to_scope(string_tuple), expected)
obj_list = [ScopeObject('foo'), ScopeObject('bar'), ScopeObject('baz')]
self.assertEqual(list_to_scope(obj_list), expected)
set_list = set(string_list)
set_scope = list_to_scope(set_list)
assert len(set_scope.split(' ')) == 3
for x in string_list:
assert x in set_scope
self.assertRaises(ValueError, list_to_scope, object())
def test_scope_to_list(self):
expected = ['foo', 'bar', 'baz']
string_scopes = 'foo bar baz '
self.assertEqual(scope_to_list(string_scopes), expected)
string_list_scopes = ['foo', 'bar', 'baz']
self.assertEqual(scope_to_list(string_list_scopes), expected)
tuple_list_scopes = ('foo', 'bar', 'baz')
self.assertEqual(scope_to_list(tuple_list_scopes), expected)
obj_list_scopes = [ScopeObject('foo'), ScopeObject('bar'), ScopeObject('baz')]
self.assertEqual(scope_to_list(obj_list_scopes), expected)
set_list_scopes = set(string_list_scopes)
set_list = scope_to_list(set_list_scopes)
self.assertEqual(sorted(set_list), sorted(string_list_scopes))
| UtilsTests |
python | scipy__scipy | scipy/_lib/_util.py | {
"start": 23710,
"end": 23976
} | class ____:
"""
Object to wrap user's function, allowing picklability
"""
def __init__(self, f, args):
self.f = f
self.args = [] if args is None else args
def __call__(self, x):
return self.f(x, *self.args)
| _FunctionWrapper |
python | pyqtgraph__pyqtgraph | benchmarks/arrayToQPath.py | {
"start": 860,
"end": 988
} | class ____(_TimeSuite):
def __init__(self):
super().__init__()
self.have_nonfinite = True
| TimeSuiteWithNonFinite |
python | readthedocs__readthedocs.org | readthedocs/api/v2/views/integrations.py | {
"start": 2735,
"end": 13135
} | class ____:
"""Base class for Webhook mixins."""
permission_classes = (permissions.AllowAny,)
renderer_classes = (JSONRenderer,)
integration = None
integration_type = None
invalid_payload_msg = "Payload not valid"
missing_secret_deprecated_msg = dedent(
"""
This webhook doesn't have a secret configured.
For security reasons, webhooks without a secret are no longer permitted.
For more information, read our blog post: https://blog.readthedocs.com/security-update-on-incoming-webhooks/.
"""
).strip()
def post(self, request, project_slug):
"""Set up webhook post view with request and project objects."""
self.request = request
structlog.contextvars.bind_contextvars(
project_slug=project_slug,
integration_type=self.integration_type,
)
# WARNING: this is a hack to allow us access to `request.body` later.
# Due to a limitation of DRF, we can't access `request.body`
# after accessing `request.data`.
# By accessing `request.body` we are able to access `request.body` and
# `request.data` later without any problem (mostly black magic).
# See #4940 for more background.
self.request.body # noqa
self.project = None
self.data = self.get_data()
try:
self.project = self.get_project(slug=project_slug)
if not Project.objects.is_active(self.project):
resp = {"detail": "This project is currently disabled"}
return Response(resp, status=status.HTTP_406_NOT_ACCEPTABLE)
except Project.DoesNotExist as exc:
raise NotFound("Project not found") from exc
# Webhooks without a secret are no longer permitted.
# https://blog.readthedocs.com/security-update-on-incoming-webhooks/.
if not self.has_secret():
return Response(
{"detail": self.missing_secret_deprecated_msg},
status=HTTP_400_BAD_REQUEST,
)
if not self.is_payload_valid():
log.warning("Invalid payload for project and integration.")
return Response(
{"detail": self.invalid_payload_msg},
status=HTTP_400_BAD_REQUEST,
)
resp = self.handle_webhook()
if resp is None:
log.info("Unhandled webhook event")
resp = {"detail": "Unhandled webhook event"}
# The response can be a DRF Response with with the status code already set.
# In that case, we just return it as is.
if isinstance(resp, Response):
return resp
return Response(resp)
def has_secret(self):
integration = self.get_integration()
if hasattr(integration, "token"):
return bool(integration.token)
return bool(integration.secret)
def get_project(self, **kwargs):
return Project.objects.get(**kwargs)
def finalize_response(self, req, *args, **kwargs):
"""If the project was set on POST, store an HTTP exchange."""
resp = super().finalize_response(req, *args, **kwargs)
if hasattr(self, "project") and self.project:
try:
integration = self.get_integration()
except (Http404, ParseError):
# If we can't get a single integration (either none or multiple exist),
# we can't store the HTTP exchange
integration = None
if integration:
HttpExchange.objects.from_exchange(
req,
resp,
related_object=integration,
payload=self.data,
)
return resp
def get_data(self):
"""
Normalize posted data.
This can be overridden to support multiples content types.
"""
return self.request.data
def handle_webhook(self):
"""Handle webhook payload."""
raise NotImplementedError
def get_external_version_data(self):
"""Get External Version data from payload."""
raise NotImplementedError
def is_payload_valid(self):
"""Validates the webhook's payload using the integration's secret."""
return False
@staticmethod
def get_digest(secret, msg):
"""Get a HMAC digest of `msg` using `secret`."""
digest = hmac.new(
secret.encode(),
msg=msg.encode(),
digestmod=hashlib.sha256,
)
return digest.hexdigest()
def get_integration(self):
"""
Get or create an inbound webhook to track webhook requests.
Most providers don't pass the webhook ID in either, so we default
to just finding *any* integration from the provider. This is not ideal,
but the :py:class:`WebhookView` view solves this by performing a lookup
on the integration instead of guessing.
"""
# `integration` can be passed in as an argument to `as_view`, as it is
# in `WebhookView`
if self.integration is not None:
return self.integration
integrations = Integration.objects.filter(
project=self.project,
integration_type=self.integration_type,
)
if not integrations.exists():
raise Http404("No Integration matches the given query.")
elif integrations.count() > 1:
raise ParseError(
"Multiple integrations found for this project. "
"Please use the webhook URL with an explicit integration ID."
)
self.integration = integrations.first()
return self.integration
def get_response_push(self, project, versions_info: list[VersionInfo]):
"""
Build branches on push events and return API response.
Return a JSON response with the following::
{
"build_triggered": true,
"project": "project_name",
"versions": [...]
}
:param project: Project instance
:type project: Project
"""
to_build, not_building = build_versions_from_names(project, versions_info)
if not_building:
log.info(
"Skipping project versions.",
versions=not_building,
)
triggered = bool(to_build)
return {
"build_triggered": triggered,
"project": project.slug,
"versions": list(to_build),
}
def sync_versions_response(self, project, sync=True):
"""
Trigger a sync and returns a response indicating if the build was triggered or not.
If `sync` is False, the sync isn't triggered and a response indicating so is returned.
"""
version = None
if sync:
version = trigger_sync_versions(project)
return {
"build_triggered": False,
"project": project.slug,
"versions": [version] if version else [],
"versions_synced": version is not None,
}
def get_external_version_response(self, project):
"""
Trigger builds for External versions on pull/merge request events and return API response.
Return a JSON response with the following::
{
"build_triggered": true,
"project": "project_name",
"versions": [verbose_name]
}
:param project: Project instance
:type project: readthedocs.projects.models.Project
"""
version_data = self.get_external_version_data()
# create or get external version object using `verbose_name`.
external_version = get_or_create_external_version(
project=project,
version_data=version_data,
)
# returns external version verbose_name (pull/merge request number)
to_build = build_external_version(
project=project,
version=external_version,
)
return {
"build_triggered": bool(to_build),
"project": project.slug,
"versions": [to_build] if to_build else [],
}
def get_closed_external_version_response(self, project):
"""
Close the external version on merge/close events and return the API response.
Return a JSON response with the following::
{
"closed": true,
"project": "project_name",
"versions": [verbose_name]
}
:param project: Project instance
:type project: Project
"""
version_data = self.get_external_version_data()
version_closed = close_external_version(
project=project,
version_data=version_data,
)
return {
"closed": bool(version_closed),
"project": project.slug,
"versions": [version_closed] if version_closed else [],
}
def update_default_branch(self, default_branch):
"""
Update the `Version.identifer` for `latest` with the VCS's `default_branch`.
The VCS's `default_branch` is the branch cloned when there is no specific branch specified
(e.g. `git clone <URL>`).
Some VCS providers (GitHub and GitLab) send the `default_branch` via incoming webhooks.
We use that data to update our database and keep it in sync.
This solves the problem about "changing the default branch in GitHub"
and also importing repositories with a different `default_branch` than `main` manually:
https://github.com/readthedocs/readthedocs.org/issues/9367
In case the user already selected a `default-branch` from the "Advanced settings",
it does not override it.
This action can be performed only if the integration has a secret,
requests from anonymous users are ignored.
"""
if self.get_integration().secret and not self.project.default_branch:
# Always check for the machine attribute, since latest can be user created.
# RTD doesn't manage those.
self.project.versions.filter(slug=LATEST, machine=True).update(
identifier=default_branch
)
| WebhookMixin |
python | ipython__ipython | IPython/core/history.py | {
"start": 5958,
"end": 19436
} | class ____(HistoryAccessorBase):
"""Access the history database without adding to it.
This is intended for use by standalone history tools. IPython shells use
HistoryManager, below, which is a subclass of this."""
# counter for init_db retries, so we don't keep trying over and over
_corrupt_db_counter = 0
# after two failures, fallback on :memory:
_corrupt_db_limit = 2
# String holding the path to the history file
hist_file = Union(
[Instance(Path), Unicode()],
help="""Path to file to use for SQLite history database.
By default, IPython will put the history database in the IPython
profile directory. If you would rather share one history among
profiles, you can set this value in each, so that they are consistent.
Due to an issue with fcntl, SQLite is known to misbehave on some NFS
mounts. If you see IPython hanging, try setting this to something on a
local disk, e.g::
ipython --HistoryManager.hist_file=/tmp/ipython_hist.sqlite
you can also use the specific value `:memory:` (including the colon
at both end but not the back ticks), to avoid creating an history file.
""",
).tag(config=True)
enabled = Bool(
sqlite3_found,
help="""enable the SQLite history
set enabled=False to disable the SQLite history,
in which case there will be no stored history, no SQLite connection,
and no background saving thread. This may be necessary in some
threaded environments where IPython is embedded.
""",
).tag(config=True)
connection_options = Dict(
help="""Options for configuring the SQLite connection
These options are passed as keyword args to sqlite3.connect
when establishing database connections.
"""
).tag(config=True)
@default("connection_options")
def _default_connection_options(self) -> dict[str, bool]:
return dict(check_same_thread=False)
# The SQLite database
db = Any()
@observe("db")
@only_when_enabled
def _db_changed(self, change): # type: ignore [no-untyped-def]
"""validate the db, since it can be an Instance of two different types"""
new = change["new"]
connection_types = (DummyDB, sqlite3.Connection)
if not isinstance(new, connection_types):
msg = "%s.db must be sqlite3 Connection or DummyDB, not %r" % (
self.__class__.__name__,
new,
)
raise TraitError(msg)
def __init__(
self, profile: str = "default", hist_file: str = "", **traits: typing.Any
) -> None:
"""Create a new history accessor.
Parameters
----------
profile : str
The name of the profile from which to open history.
hist_file : str
Path to an SQLite history database stored by IPython. If specified,
hist_file overrides profile.
config : :class:`~traitlets.config.loader.Config`
Config object. hist_file can also be set through this.
"""
super(HistoryAccessor, self).__init__(**traits)
# defer setting hist_file from kwarg until after init,
# otherwise the default kwarg value would clobber any value
# set by config
if hist_file:
self.hist_file = hist_file
try:
self.hist_file
except TraitError:
# No one has set the hist_file, yet.
self.hist_file = self._get_hist_file_name(profile)
self.init_db()
def _get_hist_file_name(self, profile: str = "default") -> Path:
"""Find the history file for the given profile name.
This is overridden by the HistoryManager subclass, to use the shell's
active profile.
Parameters
----------
profile : str
The name of a profile which has a history file.
"""
return Path(locate_profile(profile)) / "history.sqlite"
@catch_corrupt_db
def init_db(self) -> None:
"""Connect to the database, and create tables if necessary."""
if not self.enabled:
self.db = DummyDB()
return
# use detect_types so that timestamps return datetime objects
kwargs = dict(detect_types=sqlite3.PARSE_DECLTYPES | sqlite3.PARSE_COLNAMES)
kwargs.update(self.connection_options)
self.db = sqlite3.connect(str(self.hist_file), **kwargs) # type: ignore [call-overload]
with self.db:
self.db.execute(
"""CREATE TABLE IF NOT EXISTS sessions (session integer
primary key autoincrement, start timestamp,
end timestamp, num_cmds integer, remark text)"""
)
self.db.execute(
"""CREATE TABLE IF NOT EXISTS history
(session integer, line integer, source text, source_raw text,
PRIMARY KEY (session, line))"""
)
# Output history is optional, but ensure the table's there so it can be
# enabled later.
self.db.execute(
"""CREATE TABLE IF NOT EXISTS output_history
(session integer, line integer, output text,
PRIMARY KEY (session, line))"""
)
# success! reset corrupt db count
self._corrupt_db_counter = 0
def writeout_cache(self) -> None:
"""Overridden by HistoryManager to dump the cache before certain
database lookups."""
pass
## -------------------------------
## Methods for retrieving history:
## -------------------------------
def _run_sql(
self,
sql: str,
params: tuple,
raw: bool = True,
output: bool = False,
latest: bool = False,
) -> Iterable[tuple[int, int, InOrInOut]]:
"""Prepares and runs an SQL query for the history database.
Parameters
----------
sql : str
Any filtering expressions to go after SELECT ... FROM ...
params : tuple
Parameters passed to the SQL query (to replace "?")
raw, output : bool
See :meth:`get_range`
latest : bool
Select rows with max (session, line)
Returns
-------
Tuples as :meth:`get_range`
"""
toget = "source_raw" if raw else "source"
sqlfrom = "history"
if output:
sqlfrom = "history LEFT JOIN output_history USING (session, line)"
toget = "history.%s, output_history.output" % toget
if latest:
toget += ", MAX(session * 128 * 1024 + line)"
this_querry = "SELECT session, line, %s FROM %s " % (toget, sqlfrom) + sql
cur = self.db.execute(this_querry, params)
if latest:
cur = (row[:-1] for row in cur)
if output: # Regroup into 3-tuples, and parse JSON
return ((ses, lin, (inp, out)) for ses, lin, inp, out in cur)
return cur
@only_when_enabled
@catch_corrupt_db
def get_session_info(
self, session: int
) -> tuple[int, datetime.datetime, Optional[datetime.datetime], Optional[int], str]:
"""Get info about a session.
Parameters
----------
session : int
Session number to retrieve.
Returns
-------
session_id : int
Session ID number
start : datetime
Timestamp for the start of the session.
end : datetime
Timestamp for the end of the session, or None if IPython crashed.
num_cmds : int
Number of commands run, or None if IPython crashed.
remark : str
A manually set description.
"""
query = "SELECT * from sessions where session == ?"
return self.db.execute(query, (session,)).fetchone()
@catch_corrupt_db
def get_last_session_id(self) -> Optional[int]:
"""Get the last session ID currently in the database.
Within IPython, this should be the same as the value stored in
:attr:`HistoryManager.session_number`.
"""
for record in self.get_tail(n=1, include_latest=True):
return record[0]
return None
@catch_corrupt_db
def get_tail(
self,
n: int = 10,
raw: bool = True,
output: bool = False,
include_latest: bool = False,
) -> Iterable[tuple[int, int, InOrInOut]]:
"""Get the last n lines from the history database.
Parameters
----------
n : int
The number of lines to get
raw, output : bool
See :meth:`get_range`
include_latest : bool
If False (default), n+1 lines are fetched, and the latest one
is discarded. This is intended to be used where the function
is called by a user command, which it should not return.
Returns
-------
Tuples as :meth:`get_range`
"""
self.writeout_cache()
if not include_latest:
n += 1
cur = self._run_sql(
"ORDER BY session DESC, line DESC LIMIT ?", (n,), raw=raw, output=output
)
if not include_latest:
return reversed(list(cur)[1:])
return reversed(list(cur))
@catch_corrupt_db
def search(
self,
pattern: str = "*",
raw: bool = True,
search_raw: bool = True,
output: bool = False,
n: Optional[int] = None,
unique: bool = False,
) -> Iterable[tuple[int, int, InOrInOut]]:
"""Search the database using unix glob-style matching (wildcards
* and ?).
Parameters
----------
pattern : str
The wildcarded pattern to match when searching
search_raw : bool
If True, search the raw input, otherwise, the parsed input
raw, output : bool
See :meth:`get_range`
n : None or int
If an integer is given, it defines the limit of
returned entries.
unique : bool
When it is true, return only unique entries.
Returns
-------
Tuples as :meth:`get_range`
"""
tosearch = "source_raw" if search_raw else "source"
if output:
tosearch = "history." + tosearch
self.writeout_cache()
sqlform = "WHERE %s GLOB ?" % tosearch
params: tuple[typing.Any, ...] = (pattern,)
if unique:
sqlform += " GROUP BY {0}".format(tosearch)
if n is not None:
sqlform += " ORDER BY session DESC, line DESC LIMIT ?"
params += (n,)
elif unique:
sqlform += " ORDER BY session, line"
cur = self._run_sql(sqlform, params, raw=raw, output=output, latest=unique)
if n is not None:
return reversed(list(cur))
return cur
@catch_corrupt_db
def get_range(
self,
session: int,
start: int = 1,
stop: Optional[int] = None,
raw: bool = True,
output: bool = False,
) -> Iterable[tuple[int, int, InOrInOut]]:
"""Retrieve input by session.
Parameters
----------
session : int
Session number to retrieve.
start : int
First line to retrieve.
stop : int
End of line range (excluded from output itself). If None, retrieve
to the end of the session.
raw : bool
If True, return untranslated input
output : bool
If True, attempt to include output. This will be 'real' Python
objects for the current session, or text reprs from previous
sessions if db_log_output was enabled at the time. Where no output
is found, None is used.
Returns
-------
entries
An iterator over the desired lines. Each line is a 3-tuple, either
(session, line, input) if output is False, or
(session, line, (input, output)) if output is True.
"""
params: tuple[typing.Any, ...]
if stop:
lineclause = "line >= ? AND line < ?"
params = (session, start, stop)
else:
lineclause = "line>=?"
params = (session, start)
return self._run_sql(
"WHERE session==? AND %s" % lineclause, params, raw=raw, output=output
)
def get_range_by_str(
self, rangestr: str, raw: bool = True, output: bool = False
) -> Iterable[tuple[int, int, InOrInOut]]:
"""Get lines of history from a string of ranges, as used by magic
commands %hist, %save, %macro, etc.
Parameters
----------
rangestr : str
A string specifying ranges, e.g. "5 ~2/1-4". If empty string is used,
this will return everything from current session's history.
See the documentation of :func:`%history` for the full details.
raw, output : bool
As :meth:`get_range`
Returns
-------
Tuples as :meth:`get_range`
"""
for sess, s, e in extract_hist_ranges(rangestr):
yield from self.get_range(sess, s, e, raw=raw, output=output)
@dataclass
| HistoryAccessor |
python | django-extensions__django-extensions | django_extensions/management/email_notifications.py | {
"start": 150,
"end": 5312
} | class ____(BaseCommand):
"""
A BaseCommand subclass which adds sending email functionality.
Subclasses will have an extra command line option ``--email-notification``
and will be able to send emails by calling ``send_email_notification()``
if SMTP host and port are specified in settings. The handling of the
command line option is left to the management command implementation.
Configuration is done in settings.EMAIL_NOTIFICATIONS dict.
Configuration example::
EMAIL_NOTIFICATIONS = {
'scripts.my_script': {
'subject': 'my_script subject',
'body': 'my_script body',
'from_email': 'from_email@example.com',
'recipients': ('recipient0@example.com',),
'no_admins': False,
'no_traceback': False,
'notification_level': 0,
'fail_silently': False
},
'scripts.another_script': {
...
},
...
}
Configuration explained:
subject: Email subject.
body: Email body.
from_email: Email from address.
recipients: Sequence of email recipient addresses.
no_admins: When True do not include ADMINS to recipients.
no_traceback: When True do not include traceback to email body.
notification_level: 0: send email on fail, 1: send email always.
fail_silently: Parameter passed to django's send_mail().
"""
def add_arguments(self, parser):
parser.add_argument(
"--email-notifications",
action="store_true",
default=False,
dest="email_notifications",
help="Send email notifications for command.",
)
parser.add_argument(
"--email-exception",
action="store_true",
default=False,
dest="email_exception",
help="Send email for command exceptions.",
)
def run_from_argv(self, argv):
"""Overridden in order to access the command line arguments."""
self.argv_string = " ".join(argv)
super().run_from_argv(argv)
def execute(self, *args, **options):
"""
Overridden in order to send emails on unhandled exception.
If an unhandled exception in ``def handle(self, *args, **options)``
occurs and `--email-exception` is set or `self.email_exception` is
set to True send an email to ADMINS with the traceback and then
reraise the exception.
"""
try:
super().execute(*args, **options)
except Exception:
if options["email_exception"] or getattr(self, "email_exception", False):
self.send_email_notification(include_traceback=True)
raise
def send_email_notification(
self, notification_id=None, include_traceback=False, verbosity=1
):
"""
Send email notifications.
Reads settings from settings.EMAIL_NOTIFICATIONS dict, if available,
using ``notification_id`` as a key or else provides reasonable
defaults.
"""
# Load email notification settings if available
if notification_id is not None:
try:
email_settings = settings.EMAIL_NOTIFICATIONS.get(notification_id, {})
except AttributeError:
email_settings = {}
else:
email_settings = {}
# Exit if no traceback found and not in 'notify always' mode
if not include_traceback and not email_settings.get("notification_level", 0):
print(self.style.ERROR("Exiting, not in 'notify always' mode."))
return
# Set email fields.
subject = email_settings.get("subject", "Django extensions email notification.")
command_name = self.__module__.split(".")[-1]
body = email_settings.get(
"body", "Reporting execution of command: '%s'" % command_name
)
# Include traceback
if include_traceback and not email_settings.get("no_traceback", False):
try:
exc_type, exc_value, exc_traceback = sys.exc_info()
trb = "".join(traceback.format_tb(exc_traceback))
body += "\n\nTraceback:\n\n%s\n" % trb
finally:
del exc_traceback
# Set from address
from_email = email_settings.get("from_email", settings.DEFAULT_FROM_EMAIL)
# Calculate recipients
recipients = list(email_settings.get("recipients", []))
if not email_settings.get("no_admins", False):
recipients.extend(settings.ADMINS)
if not recipients:
if verbosity > 0:
print(self.style.ERROR("No email recipients available."))
return
# Send email...
send_mail(
subject,
body,
from_email,
recipients,
fail_silently=email_settings.get("fail_silently", True),
)
| EmailNotificationCommand |
python | mlflow__mlflow | mlflow/server/graphql/autogenerated_graphql_schema.py | {
"start": 10851,
"end": 11658
} | class ____(graphene.ObjectType):
mlflow_search_datasets = graphene.Field(MlflowSearchDatasetsResponse, input=MlflowSearchDatasetsInput())
mlflow_search_runs = graphene.Field(MlflowSearchRunsResponse, input=MlflowSearchRunsInput())
def resolve_mlflow_search_datasets(self, info, input):
input_dict = vars(input)
request_message = mlflow.protos.service_pb2.SearchDatasets()
parse_dict(input_dict, request_message)
return mlflow.server.handlers.search_datasets_impl(request_message)
def resolve_mlflow_search_runs(self, info, input):
input_dict = vars(input)
request_message = mlflow.protos.service_pb2.SearchRuns()
parse_dict(input_dict, request_message)
return mlflow.server.handlers.search_runs_impl(request_message)
| MutationType |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/instance/instance.py | {
"start": 3313,
"end": 39035
} | class ____(
SettingsMethods,
StorageMethods,
DaemonMethods,
RunLauncherMethods,
EventMethods,
SchedulingMethods,
AssetMethods,
RunMethods,
DynamicPartitionsStore,
):
"""Core abstraction for managing Dagster's access to storage and other resources.
Use DagsterInstance.get() to grab the current DagsterInstance which will load based on
the values in the ``dagster.yaml`` file in ``$DAGSTER_HOME``.
Alternatively, DagsterInstance.ephemeral() can use used which provides a set of
transient in-memory components.
Configuration of this class should be done by setting values in ``$DAGSTER_HOME/dagster.yaml``.
For example, to use Postgres for dagster storage, you can write a ``dagster.yaml`` such as the
following:
.. literalinclude:: ../../../../../../examples/docs_snippets/docs_snippets/deploying/dagster-pg.yaml
:caption: dagster.yaml
:language: YAML
Args:
instance_type (InstanceType): Indicates whether the instance is ephemeral or persistent.
Users should not attempt to set this value directly or in their ``dagster.yaml`` files.
local_artifact_storage (LocalArtifactStorage): The local artifact storage is used to
configure storage for any artifacts that require a local disk, such as schedules, or
when using the filesystem system storage to manage files and intermediates. By default,
this will be a :py:class:`dagster._core.storage.root.LocalArtifactStorage`. Configurable
in ``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass`
machinery.
run_storage (RunStorage): The run storage is used to store metadata about ongoing and past
pipeline runs. By default, this will be a
:py:class:`dagster._core.storage.runs.SqliteRunStorage`. Configurable in ``dagster.yaml``
using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
event_storage (EventLogStorage): Used to store the structured event logs generated by
pipeline runs. By default, this will be a
:py:class:`dagster._core.storage.event_log.SqliteEventLogStorage`. Configurable in
``dagster.yaml`` using the :py:class:`~dagster.serdes.ConfigurableClass` machinery.
compute_log_manager (Optional[ComputeLogManager]): The compute log manager handles stdout
and stderr logging for op compute functions. By default, this will be a
:py:class:`dagster._core.storage.local_compute_log_manager.LocalComputeLogManager`.
Configurable in ``dagster.yaml`` using the
:py:class:`~dagster.serdes.ConfigurableClass` machinery.
run_coordinator (Optional[RunCoordinator]): A runs coordinator may be used to manage the execution
of pipeline runs.
run_launcher (Optional[RunLauncher]): Optionally, a run launcher may be used to enable
a Dagster instance to launch pipeline runs, e.g. on a remote Kubernetes cluster, in
addition to running them locally.
settings (Optional[Dict]): Specifies certain per-instance settings,
such as feature flags. These are set in the ``dagster.yaml`` under a set of whitelisted
keys.
ref (Optional[InstanceRef]): Used by internal machinery to pass instances across process
boundaries.
"""
# Stores TemporaryDirectory instances that were created for DagsterInstance.local_temp() calls
# to be removed once the instance is garbage collected.
_TEMP_DIRS: "weakref.WeakKeyDictionary[DagsterInstance, TemporaryDirectory]" = (
weakref.WeakKeyDictionary()
)
def __init__(
self,
instance_type: InstanceType,
local_artifact_storage: "LocalArtifactStorage",
run_storage: "RunStorage",
event_storage: "EventLogStorage",
run_coordinator: Optional["RunCoordinator"],
compute_log_manager: Optional["ComputeLogManager"],
run_launcher: Optional["RunLauncher"],
scheduler: Optional["Scheduler"] = None,
schedule_storage: Optional["ScheduleStorage"] = None,
settings: Optional[Mapping[str, Any]] = None,
secrets_loader: Optional["SecretsLoader"] = None,
defs_state_storage: Optional["DefsStateStorage"] = None,
ref: Optional[InstanceRef] = None,
**_kwargs: Any, # we accept kwargs for forward-compat of custom instances
):
from dagster._core.launcher import RunLauncher
from dagster._core.run_coordinator import RunCoordinator
from dagster._core.scheduler import Scheduler
from dagster._core.secrets import SecretsLoader
from dagster._core.storage.compute_log_manager import ComputeLogManager
from dagster._core.storage.defs_state import DefsStateStorage
from dagster._core.storage.event_log import EventLogStorage
from dagster._core.storage.root import LocalArtifactStorage
from dagster._core.storage.runs import RunStorage
from dagster._core.storage.schedules import ScheduleStorage
self._instance_type = check.inst_param(instance_type, "instance_type", InstanceType)
self._local_artifact_storage = check.inst_param(
local_artifact_storage, "local_artifact_storage", LocalArtifactStorage
)
self._event_storage = check.inst_param(event_storage, "event_storage", EventLogStorage)
self._event_storage.register_instance(self)
self._run_storage = check.inst_param(run_storage, "run_storage", RunStorage)
self._run_storage.register_instance(self)
if compute_log_manager:
self._compute_log_manager = check.inst_param(
compute_log_manager, "compute_log_manager", ComputeLogManager
)
self._compute_log_manager.register_instance(self)
else:
check.invariant(
ref,
"Compute log manager must be provided if instance is not from a ref",
)
self._compute_log_manager = None
self._scheduler = check.opt_inst_param(scheduler, "scheduler", Scheduler)
self._schedule_storage = check.opt_inst_param(
schedule_storage, "schedule_storage", ScheduleStorage
)
if self._schedule_storage:
self._schedule_storage.register_instance(self)
if run_coordinator:
self._run_coordinator = check.inst_param(
run_coordinator, "run_coordinator", RunCoordinator
)
self._run_coordinator.register_instance(self)
else:
check.invariant(ref, "Run coordinator must be provided if instance is not from a ref")
self._run_coordinator = None
if run_launcher:
self._run_launcher: Optional[RunLauncher] = check.inst_param(
run_launcher, "run_launcher", RunLauncher
)
run_launcher.register_instance(self)
else:
check.invariant(ref, "Run launcher must be provided if instance is not from a ref")
self._run_launcher = None
self._settings = check.opt_mapping_param(settings, "settings")
self._secrets_loader = check.opt_inst_param(secrets_loader, "secrets_loader", SecretsLoader)
if self._secrets_loader:
self._secrets_loader.register_instance(self)
self._defs_state_storage = check.opt_inst_param(
defs_state_storage, "defs_state_storage", DefsStateStorage
)
if self._defs_state_storage:
self._defs_state_storage.register_instance(self)
self._ref = check.opt_inst_param(ref, "ref", InstanceRef)
self._subscribers: dict[str, list[Callable]] = defaultdict(list)
self._initialize_run_monitoring()
# Used for batched event handling
self._event_buffer: dict[str, list[EventLogEntry]] = defaultdict(list)
# =====================================================================================
# PUBLIC API METHODS
# =====================================================================================
# Factory Methods
# ---------------
@public
@staticmethod
def ephemeral(
tempdir: Optional[str] = None,
preload: Optional[Sequence["DebugRunPayload"]] = None,
settings: Optional[dict] = None,
) -> "DagsterInstance":
"""Create a `DagsterInstance` suitable for ephemeral execution, useful in test contexts. An
ephemeral instance uses mostly in-memory components. Use `local_temp` to create a test
instance that is fully persistent.
Args:
tempdir (Optional[str]): The path of a directory to be used for local artifact storage.
preload (Optional[Sequence[DebugRunPayload]]): A sequence of payloads to load into the
instance's run storage. Useful for debugging.
settings (Optional[Dict]): Settings for the instance.
Returns:
DagsterInstance: An ephemeral DagsterInstance.
"""
from dagster._core.instance.factory import create_ephemeral_instance
return create_ephemeral_instance(tempdir=tempdir, preload=preload, settings=settings)
@public
@staticmethod
def get() -> "DagsterInstance":
"""Get the current `DagsterInstance` as specified by the ``DAGSTER_HOME`` environment variable.
Returns:
DagsterInstance: The current DagsterInstance.
"""
from dagster._core.instance.factory import create_instance_from_dagster_home
return create_instance_from_dagster_home()
@public
@staticmethod
def local_temp(
tempdir: Optional[str] = None,
overrides: Optional[DagsterInstanceOverrides] = None,
) -> "DagsterInstance":
"""Create a DagsterInstance that uses a temporary directory for local storage. This is a
regular, fully persistent instance. Use `ephemeral` to get an ephemeral instance with
in-memory components.
Args:
tempdir (Optional[str]): The path of a directory to be used for local artifact storage.
overrides (Optional[DagsterInstanceOverrides]): Override settings for the instance.
Returns:
DagsterInstance
"""
from dagster._core.instance.factory import create_local_temp_instance
return create_local_temp_instance(tempdir=tempdir, overrides=overrides)
# Asset Domain
# ------------
@public
@traced
def fetch_materializations(
self,
records_filter: Union["AssetKey", "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of materialization records stored in the event log storage.
Args:
records_filter (Union[AssetKey, AssetRecordsFilter]): the filter by which to
filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string
"""
return AssetMethods.fetch_materializations(self, records_filter, limit, cursor, ascending)
@public
@traced
def fetch_observations(
self,
records_filter: Union["AssetKey", "AssetRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of observation records stored in the event log storage.
Args:
records_filter (Optional[Union[AssetKey, AssetRecordsFilter]]): the filter by which to
filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string
"""
return self._event_storage.fetch_observations(records_filter, limit, cursor, ascending)
@public
@traced
def get_asset_keys(
self,
prefix: Optional[Sequence[str]] = None,
limit: Optional[int] = None,
cursor: Optional[str] = None,
) -> Sequence["AssetKey"]:
"""Return a filtered subset of asset keys managed by this instance.
Args:
prefix (Optional[Sequence[str]]): Return only assets having this key prefix.
limit (Optional[int]): Maximum number of keys to return.
cursor (Optional[str]): Cursor to use for pagination.
Returns:
Sequence[AssetKey]: List of asset keys.
"""
return AssetMethods.get_asset_keys(self, prefix, limit, cursor)
@public
@traced
def get_asset_records(
self, asset_keys: Optional[Sequence["AssetKey"]] = None
) -> Sequence["AssetRecord"]:
"""Return an `AssetRecord` for each of the given asset keys.
Args:
asset_keys (Optional[Sequence[AssetKey]]): List of asset keys to retrieve records for.
Returns:
Sequence[AssetRecord]: List of asset records.
"""
return AssetMethods.get_asset_records(self, asset_keys)
@public
def get_latest_materialization_code_versions(
self, asset_keys: Iterable["AssetKey"]
) -> Mapping["AssetKey", Optional[str]]:
"""Returns the code version used for the latest materialization of each of the provided
assets.
Args:
asset_keys (Iterable[AssetKey]): The asset keys to find latest materialization code
versions for.
Returns:
Mapping[AssetKey, Optional[str]]: A dictionary with a key for each of the provided asset
keys. The values will be None if the asset has no materializations. If an asset does
not have a code version explicitly assigned to its definitions, but was
materialized, Dagster assigns the run ID as its code version.
"""
return AssetMethods.get_latest_materialization_code_versions(self, asset_keys)
@public
@traced
def get_latest_materialization_event(self, asset_key: "AssetKey") -> Optional["EventLogEntry"]:
"""Fetch the latest materialization event for the given asset key.
Args:
asset_key (AssetKey): Asset key to return materialization for.
Returns:
Optional[EventLogEntry]: The latest materialization event for the given asset
key, or `None` if the asset has not been materialized.
"""
return AssetMethods.get_latest_materialization_event(self, asset_key)
@public
@traced
def get_status_by_partition(
self,
asset_key: "AssetKey",
partition_keys: Sequence[str],
partitions_def: "PartitionsDefinition",
) -> Optional[Mapping[str, "AssetPartitionStatus"]]:
"""Get the current status of provided partition_keys for the provided asset.
Args:
asset_key (AssetKey): The asset to get per-partition status for.
partition_keys (Sequence[str]): The partitions to get status for.
partitions_def (PartitionsDefinition): The PartitionsDefinition of the asset to get
per-partition status for.
Returns:
Optional[Mapping[str, AssetPartitionStatus]]: status for each partition key
"""
return AssetMethods.get_status_by_partition(self, asset_key, partition_keys, partitions_def)
@public
@traced
def has_asset_key(self, asset_key: "AssetKey") -> bool:
"""Return true if this instance manages the given asset key.
Args:
asset_key (AssetKey): Asset key to check.
"""
return AssetMethods.has_asset_key(self, asset_key)
@public
def report_runless_asset_event(
self,
asset_event: Union[
"AssetMaterialization",
"AssetObservation",
"AssetCheckEvaluation",
"FreshnessStateEvaluation",
"FreshnessStateChange",
],
):
"""Record an event log entry related to assets that does not belong to a Dagster run."""
return AssetMethods.report_runless_asset_event(self, asset_event)
@public
@traced
def wipe_assets(self, asset_keys: Sequence["AssetKey"]) -> None:
"""Wipes asset event history from the event log for the given asset keys.
Args:
asset_keys (Sequence[AssetKey]): Asset keys to wipe.
"""
AssetMethods.wipe_assets(self, asset_keys)
# Run Domain
# ----------
@public
@traced
def delete_run(self, run_id: str) -> None:
"""Delete a run and all events generated by that from storage.
Args:
run_id (str): The id of the run to delete.
"""
self._run_storage.delete_run(run_id)
self._event_storage.delete_events(run_id)
@public
def get_run_by_id(self, run_id: str) -> Optional[DagsterRun]:
"""Get a :py:class:`DagsterRun` matching the provided `run_id`.
Args:
run_id (str): The id of the run to retrieve.
Returns:
Optional[DagsterRun]: The run corresponding to the given id. If no run matching the id
is found, return `None`.
"""
record = self.get_run_record_by_id(run_id)
if record is None:
return None
return record.dagster_run
@public
@traced
def get_run_record_by_id(self, run_id: str) -> Optional["RunRecord"]:
"""Get a :py:class:`RunRecord` matching the provided `run_id`.
Args:
run_id (str): The id of the run record to retrieve.
Returns:
Optional[RunRecord]: The run record corresponding to the given id. If no run matching
the id is found, return `None`.
"""
if not run_id:
return None
records = self._run_storage.get_run_records(RunsFilter(run_ids=[run_id]), limit=1)
if not records:
return None
return records[0]
@public
@traced
def get_run_records(
self,
filters: Optional["RunsFilter"] = None,
limit: Optional[int] = None,
order_by: Optional[str] = None,
ascending: bool = False,
cursor: Optional[str] = None,
bucket_by: Optional[Union["JobBucket", "TagBucket"]] = None,
) -> Sequence["RunRecord"]:
"""Return a list of run records stored in the run storage, sorted by the given column in given order.
Args:
filters (Optional[RunsFilter]): the filter by which to filter runs.
limit (Optional[int]): Number of results to get. Defaults to infinite.
order_by (Optional[str]): Name of the column to sort by. Defaults to id.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
List[RunRecord]: List of run records stored in the run storage.
"""
return self._run_storage.get_run_records(
filters, limit, order_by, ascending, cursor, bucket_by
)
# Event Domain
# ------------
@public
@traced
def fetch_run_status_changes(
self,
records_filter: Union["DagsterEventType", "RunStatusChangeRecordsFilter"],
limit: int,
cursor: Optional[str] = None,
ascending: bool = False,
) -> "EventRecordsResult":
"""Return a list of run_status_event records stored in the event log storage.
Args:
records_filter (Optional[Union[DagsterEventType, RunStatusChangeRecordsFilter]]): the
filter by which to filter event records.
limit (int): Number of results to get.
cursor (Optional[str]): Cursor to use for pagination. Defaults to None.
ascending (Optional[bool]): Sort the result in ascending order if True, descending
otherwise. Defaults to descending.
Returns:
EventRecordsResult: Object containing a list of event log records and a cursor string
"""
return self._event_storage.fetch_run_status_changes(
records_filter, limit, cursor, ascending
)
@traced
def get_records_for_run(
self,
run_id: str,
cursor: Optional[str] = None,
of_type: Optional[Union["DagsterEventType", set["DagsterEventType"]]] = None,
limit: Optional[int] = None,
ascending: bool = True,
) -> "EventLogConnection":
"""Get event records for run.
NOTE: This method is duplicated here (vs only being in EventMethods) because
some Datadog tracing spans specifically expect to find this method on the
DagsterInstance class for proper trace attribution.
"""
return EventMethods.get_records_for_run(self, run_id, cursor, of_type, limit, ascending)
# Storage/Partition Domain
# ------------------------
@public
@traced
def add_dynamic_partitions(
self, partitions_def_name: str, partition_keys: Sequence[str]
) -> None:
"""Add partitions to the specified :py:class:`DynamicPartitionsDefinition` idempotently.
Does not add any partitions that already exist.
Args:
partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.
partition_keys (Sequence[str]): Partition keys to add.
"""
return self._event_storage.add_dynamic_partitions(partitions_def_name, partition_keys)
@public
@traced
def delete_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> None:
"""Delete a partition for the specified :py:class:`DynamicPartitionsDefinition`.
If the partition does not exist, exits silently.
Args:
partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.
partition_key (str): Partition key to delete.
"""
return self._event_storage.delete_dynamic_partition(partitions_def_name, partition_key)
@public
@traced
def get_dynamic_partitions(self, partitions_def_name: str) -> Sequence[str]:
"""Get the set of partition keys for the specified :py:class:`DynamicPartitionsDefinition`.
Args:
partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.
"""
return self._event_storage.get_dynamic_partitions(partitions_def_name)
def get_dynamic_partitions_definition_id(self, partitions_def_name: str) -> str:
from dagster._core.definitions.partitions.context import partition_loading_context
from dagster._core.definitions.partitions.utils import (
generate_partition_key_based_definition_id,
)
with partition_loading_context() as calling_context:
dynamic_partitions_store = calling_context.dynamic_partitions_store or self
# matches the base implementation of the get_serializable_unique_identifier on PartitionsDefinition
partition_keys = dynamic_partitions_store.get_dynamic_partitions(partitions_def_name)
return generate_partition_key_based_definition_id(partition_keys)
@public
@traced
def has_dynamic_partition(self, partitions_def_name: str, partition_key: str) -> bool:
"""Check if a partition key exists for the :py:class:`DynamicPartitionsDefinition`.
Args:
partitions_def_name (str): The name of the `DynamicPartitionsDefinition`.
partition_key (str): Partition key to check.
"""
return self._event_storage.has_dynamic_partition(partitions_def_name, partition_key)
# =====================================================================================
# INTERNAL METHODS
# =====================================================================================
# Core Instance Methods
# ---------------------
@staticmethod
def from_config(
config_dir: str,
config_filename: str = DAGSTER_CONFIG_YAML_FILENAME,
) -> "DagsterInstance":
from dagster._core.instance.factory import create_instance_from_config
return create_instance_from_config(config_dir, config_filename)
@staticmethod
def from_ref(instance_ref: InstanceRef) -> "DagsterInstance":
from dagster._core.instance.factory import create_instance_from_ref
return create_instance_from_ref(instance_ref)
# flags
@property
def is_persistent(self) -> bool:
return self._instance_type == InstanceType.PERSISTENT
@property
def is_ephemeral(self) -> bool:
return self._instance_type == InstanceType.EPHEMERAL
def get_ref(self) -> InstanceRef:
if self._ref:
return self._ref
check.failed(
"Attempted to prepare an ineligible DagsterInstance ({inst_type}) for cross "
"process communication.{dagster_home_msg}".format(
inst_type=self._instance_type,
dagster_home_msg=(
"\nDAGSTER_HOME environment variable is not set, set it to "
"a directory on the filesystem for dagster to use for storage and cross "
"process coordination."
if os.getenv("DAGSTER_HOME") is None
else ""
),
)
)
@property
def root_directory(self) -> str:
return self._local_artifact_storage.base_dir
def _info(self, component: object) -> Union[str, Mapping[Any, Any]]:
# ConfigurableClass may not have inst_data if it's a direct instantiation
# which happens for ephemeral instances
if isinstance(component, ConfigurableClass) and component.inst_data:
return component.inst_data.info_dict()
if type(component) is dict:
return component
return component.__class__.__name__
def _info_str_for_component(self, component_name: str, component: object) -> str:
return yaml.dump(
{component_name: self._info(component)},
default_flow_style=False,
sort_keys=False,
)
def info_dict(self) -> Mapping[str, object]:
settings: Mapping[str, object] = self._settings if self._settings else {}
ret = {
"local_artifact_storage": self._info(self._local_artifact_storage),
"run_storage": self._info(self._run_storage),
"event_log_storage": self._info(self._event_storage),
"compute_logs": self._info(self._compute_log_manager),
"schedule_storage": self._info(self._schedule_storage),
"scheduler": self._info(self._scheduler),
"run_coordinator": self._info(self._run_coordinator),
"run_launcher": self._info(self.run_launcher),
}
ret.update(
{
settings_key: self._info(settings_value)
for settings_key, settings_value in settings.items()
}
)
return ret
def info_str(self) -> str:
return yaml.dump(self.info_dict(), default_flow_style=False, sort_keys=False)
def schema_str(self) -> str:
def _schema_dict(
alembic_version: Optional["AlembicVersion"],
) -> Optional[Mapping[str, object]]:
if not alembic_version:
return None
db_revision, head_revision = alembic_version
return {
"current": db_revision,
"latest": head_revision,
}
return yaml.dump(
{
"schema": {
"event_log_storage": _schema_dict(self._event_storage.alembic_version()),
"run_storage": _schema_dict(self._event_storage.alembic_version()),
"schedule_storage": _schema_dict(self._event_storage.alembic_version()),
}
},
default_flow_style=False,
sort_keys=False,
)
@property
def run_storage(self) -> "RunStorage":
return self._run_storage
@property
def defs_state_storage(self) -> Optional["DefsStateStorage"]:
return self._defs_state_storage
@property
def event_log_storage(self) -> "EventLogStorage":
return self._event_storage
@property
def daemon_cursor_storage(self) -> "DaemonCursorStorage":
return self._run_storage
@property
def scheduler(self) -> Optional["Scheduler"]:
return self._scheduler
# run coordinator
@property
def run_coordinator(self) -> "RunCoordinator":
# Lazily load in case the run coordinator requires dependencies that are not available
# everywhere that loads the instance
if not self._run_coordinator:
check.invariant(
self._ref, "Run coordinator not provided, and no instance ref available"
)
run_coordinator = cast("InstanceRef", self._ref).run_coordinator
check.invariant(run_coordinator, "Run coordinator not configured in instance ref")
self._run_coordinator = cast("RunCoordinator", run_coordinator)
self._run_coordinator.register_instance(self)
return self._run_coordinator
@property
def run_launcher(self) -> "RunLauncher":
# Lazily load in case the launcher requires dependencies that are not available everywhere
# that loads the instance (e.g. The EcsRunLauncher requires boto3)
if not self._run_launcher:
check.invariant(self._ref, "Run launcher not provided, and no instance ref available")
launcher = cast("InstanceRef", self._ref).run_launcher
check.invariant(launcher, "Run launcher not configured in instance ref")
self._run_launcher = cast("RunLauncher", launcher)
self._run_launcher.register_instance(self)
return self._run_launcher
# compute logs
@property
def compute_log_manager(self) -> "ComputeLogManager":
if not self._compute_log_manager:
check.invariant(
self._ref,
"Compute log manager not provided, and no instance ref available",
)
compute_log_manager = cast("InstanceRef", self._ref).compute_log_manager
check.invariant(
compute_log_manager,
"Compute log manager not configured in instance ref",
)
self._compute_log_manager = cast("ComputeLogManager", compute_log_manager)
self._compute_log_manager.register_instance(self)
return self._compute_log_manager
def get_settings(self, settings_key: str) -> Any:
check.str_param(settings_key, "settings_key")
if self._settings and settings_key in self._settings:
return self._settings.get(settings_key)
return {}
def upgrade(self, print_fn: Optional[PrintFn] = None) -> None:
from dagster._core.storage.migration.utils import upgrading_instance
with upgrading_instance(self):
if print_fn:
print_fn("Updating run storage...")
self._run_storage.upgrade() # type: ignore # (unknown method on run storage)
self._run_storage.migrate(print_fn)
if print_fn:
print_fn("Updating event storage...")
self._event_storage.upgrade()
self._event_storage.reindex_assets(print_fn=print_fn)
if print_fn:
print_fn("Updating schedule storage...")
self._schedule_storage.upgrade() # type: ignore # (possible none)
self._schedule_storage.migrate(print_fn) # type: ignore # (possible none)
def dispose(self) -> None:
StorageMethods.dispose(self)
if self._run_coordinator:
self._run_coordinator.dispose()
if self._run_launcher:
self._run_launcher.dispose()
if self._compute_log_manager:
self._compute_log_manager.dispose()
if self._secrets_loader:
self._secrets_loader.dispose()
if self in DagsterInstance._TEMP_DIRS:
DagsterInstance._TEMP_DIRS[self].cleanup()
del DagsterInstance._TEMP_DIRS[self]
# run storage
@traced
def get_job_snapshot(self, snapshot_id: str) -> "JobSnap":
return self._run_storage.get_job_snapshot(snapshot_id)
@traced
def has_job_snapshot(self, snapshot_id: str) -> bool:
return self._run_storage.has_job_snapshot(snapshot_id)
@traced
def has_snapshot(self, snapshot_id: str) -> bool:
return self._run_storage.has_snapshot(snapshot_id)
@traced
def get_historical_job(self, snapshot_id: str) -> "HistoricalJob":
from dagster._core.remote_representation.historical import HistoricalJob
snapshot = self._run_storage.get_job_snapshot(snapshot_id)
parent_snapshot = (
self._run_storage.get_job_snapshot(snapshot.lineage_snapshot.parent_snapshot_id)
if snapshot.lineage_snapshot
else None
)
return HistoricalJob(snapshot, snapshot_id, parent_snapshot)
@traced
def has_historical_job(self, snapshot_id: str) -> bool:
return self._run_storage.has_job_snapshot(snapshot_id)
@traced
def get_execution_plan_snapshot(self, snapshot_id: str) -> "ExecutionPlanSnapshot":
return self._run_storage.get_execution_plan_snapshot(snapshot_id)
@traced
def add_snapshot(
self,
snapshot: Union["JobSnap", "ExecutionPlanSnapshot"],
) -> None:
return self._run_storage.add_snapshot(snapshot)
def wipe(self) -> None:
self._run_storage.wipe()
self._event_storage.wipe()
# asset storage methods are now in AssetMixin
# directories
def __enter__(self) -> Self:
from dagster._core.storage.defs_state.base import set_defs_state_storage
self._exit_stack = ExitStack()
self._exit_stack.enter_context(set_defs_state_storage(self.defs_state_storage))
return self
def __exit__(
self,
_exception_type: Optional[type[BaseException]],
_exception_value: Optional[BaseException],
_traceback: Optional[TracebackType],
) -> None:
self.dispose()
self._exit_stack.close()
# backfill
@property
def should_start_background_run_thread(self) -> bool:
"""Gate on a feature to start a thread that monitors for if the run should be canceled."""
return False
def inject_env_vars(self, location_name: Optional[str]) -> None:
if not self._secrets_loader:
return
new_env = self._secrets_loader.get_secrets_for_environment(location_name)
for k, v in new_env.items():
os.environ[k] = v
def backfill_log_storage_enabled(self) -> bool:
return False
def da_request_backfills(self) -> bool:
return False
def dagster_observe_supported(self) -> bool:
return False
| DagsterInstance |
python | allegroai__clearml | clearml/backend_api/services/v2_23/queues.py | {
"start": 26540,
"end": 27401
} | class ____(Response):
"""
Response of queues.create endpoint.
:param id: New queue ID
:type id: str
"""
_service = "queues"
_action = "create"
_version = "2.23"
_schema = {
"definitions": {},
"properties": {"id": {"description": "New queue ID", "type": ["string", "null"]}},
"type": "object",
}
def __init__(self, id: Optional[str] = None, **kwargs: Any) -> None:
super(CreateResponse, self).__init__(**kwargs)
self.id = id
@schema_property("id")
def id(self) -> Optional[str]:
return self._property_id
@id.setter
def id(self, value: Optional[str]) -> None:
if value is None:
self._property_id = None
return
self.assert_isinstance(value, "id", six.string_types)
self._property_id = value
| CreateResponse |
python | coleifer__peewee | peewee.py | {
"start": 163690,
"end": 163747
} | class ____(_StringField):
field_type = 'TEXT'
| TextField |
python | pydantic__pydantic | pydantic/v1/errors.py | {
"start": 9818,
"end": 10080
} | class ____(PydanticValueError):
code = 'any_str.max_length'
msg_template = 'ensure this value has at most {limit_value} characters'
def __init__(self, *, limit_value: int) -> None:
super().__init__(limit_value=limit_value)
| AnyStrMaxLengthError |
python | streamlit__streamlit | lib/streamlit/source_util.py | {
"start": 979,
"end": 3168
} | class ____(TypedDict):
script_path: ScriptPath
page_script_hash: PageHash
icon: NotRequired[Icon]
page_name: NotRequired[PageName]
url_pathname: NotRequired[str]
def open_python_file(filename: str) -> TextIO:
"""Open a read-only Python file taking proper care of its encoding.
In Python 3, we would like all files to be opened with utf-8 encoding.
However, some author like to specify PEP263 headers in their source files
with their own encodings. In that case, we should respect the author's
encoding.
"""
import tokenize
if hasattr(tokenize, "open"): # Added in Python 3.2
# Open file respecting PEP263 encoding. If no encoding header is
# found, opens as utf-8.
return tokenize.open(filename)
return open(filename, encoding="utf-8")
PAGE_FILENAME_REGEX = re.compile(r"([0-9]*)[_ -]*(.*)\.py")
def page_sort_key(script_path: Path) -> tuple[float, str]:
matches = re.findall(PAGE_FILENAME_REGEX, script_path.name)
# Failing this should only be possible if script_path isn't a Python
# file, which should never happen.
if len(matches) == 0:
raise ValueError(
f"{script_path} is not a Python file. This should never happen."
)
[(number, label)] = matches
label = label.lower()
if number == "":
return (float("inf"), label)
return (float(number), label)
def page_icon_and_name(script_path: Path) -> tuple[str, str]:
"""Compute the icon and name of a page from its script path.
This is *almost* the page name displayed in the nav UI, but it has
underscores instead of spaces. The reason we do this is because having
spaces in URLs both looks bad and is hard to deal with due to the need to
URL-encode them. To solve this, we only swap the underscores for spaces
right before we render page names.
"""
extraction: re.Match[str] | None = re.search(PAGE_FILENAME_REGEX, script_path.name)
if extraction is None:
return "", ""
icon_and_name = re.sub(
r"[_ ]+", "_", extraction.group(2)
).strip() or extraction.group(1)
return extract_leading_emoji(icon_and_name)
| PageInfo |
python | apache__airflow | providers/openlineage/src/airflow/providers/openlineage/utils/sql.py | {
"start": 1373,
"end": 1764
} | class ____(IntEnum):
"""Enumerates the indices of columns in information schema view."""
SCHEMA = 0
TABLE_NAME = 1
COLUMN_NAME = 2
ORDINAL_POSITION = 3
# Use 'udt_name' which is the underlying type of column
UDT_NAME = 4
# Database is optional as 6th column
DATABASE = 5
TablesHierarchy = dict[str | None, dict[str | None, list[str]]]
@define
| ColumnIndex |
python | apache__airflow | airflow-core/tests/unit/api_fastapi/core_api/routes/public/test_pools.py | {
"start": 8342,
"end": 14720
} | class ____(TestPoolsEndpoint):
@pytest.mark.parametrize(
("pool_name", "query_params", "body", "expected_status_code", "expected_response"),
[
# Error
(
Pool.DEFAULT_POOL_NAME,
{},
{},
400,
{"detail": "Only slots and included_deferred can be modified on Default Pool"},
),
(
Pool.DEFAULT_POOL_NAME,
{"update_mask": ["description"]},
{"pool": Pool.DEFAULT_POOL_NAME},
400,
{"detail": "Only slots and included_deferred can be modified on Default Pool"},
),
(
"unknown_pool",
{},
{"pool": "unknown_pool"},
404,
{"detail": "The Pool with name: `unknown_pool` was not found"},
),
# Pool name can't be updated
(
POOL1_NAME,
{},
{"pool": "pool1_updated"},
400,
{"detail": "Invalid body, pool name from request body doesn't match uri parameter"},
),
(
POOL1_NAME,
{},
{"pool": POOL1_NAME},
422,
{
"detail": [
{
"input": {"pool": POOL1_NAME},
"loc": ["slots"],
"msg": "Field required",
"type": "missing",
},
{
"input": {"pool": POOL1_NAME},
"loc": ["include_deferred"],
"msg": "Field required",
"type": "missing",
},
],
},
),
# Partial body on default_pool
(
Pool.DEFAULT_POOL_NAME,
{"update_mask": ["slots"]},
{"slots": 150, "name": Pool.DEFAULT_POOL_NAME, "include_deferred": True},
200,
{
"deferred_slots": 0,
"description": "Default pool",
"include_deferred": False,
"name": "default_pool",
"occupied_slots": 0,
"open_slots": 150,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 150,
},
),
# Partial body on default_pool alternate
(
Pool.DEFAULT_POOL_NAME,
{"update_mask": ["slots", "include_deferred"]},
{"pool": Pool.DEFAULT_POOL_NAME, "slots": 150, "include_deferred": True},
200,
{
"deferred_slots": 0,
"description": "Default pool",
"include_deferred": True,
"name": "default_pool",
"occupied_slots": 0,
"open_slots": 150,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 150,
},
),
# Full body
(
POOL1_NAME,
{},
{
"slots": 8,
"description": "Description Updated",
"name": POOL1_NAME,
"include_deferred": False,
},
200,
{
"deferred_slots": 0,
"description": "Description Updated",
"include_deferred": False,
"name": POOL1_NAME,
"occupied_slots": 0,
"open_slots": 8,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 8,
},
),
],
)
def test_should_respond_200(
self, test_client, session, pool_name, query_params, body, expected_status_code, expected_response
):
self.create_pools()
response = test_client.patch(f"/pools/{pool_name}", params=query_params, json=body)
assert response.status_code == expected_status_code
body = response.json()
if response.status_code == 422:
for error in body["detail"]:
# pydantic version can vary in tests (lower constraints), we do not assert the url.
del error["url"]
assert body == expected_response
if response.status_code == 200:
check_last_log(session, dag_id=None, event="patch_pool", logical_date=None)
def test_should_respond_401(self, unauthenticated_test_client):
response = unauthenticated_test_client.patch(f"/pools/{POOL1_NAME}", params={}, json={})
assert response.status_code == 401
def test_should_respond_403(self, unauthorized_test_client):
response = unauthorized_test_client.patch(f"/pools/{POOL1_NAME}", params={}, json={})
assert response.status_code == 403
def test_patch_pool3_should_respond_200(self, test_client, session):
"""Test patching POOL3 with forward slash in name"""
self.create_pools()
body = {
"slots": 10,
"description": "Updated Description",
"name": POOL3_NAME,
"include_deferred": True,
}
response = test_client.patch(f"/pools/{POOL3_NAME}", json=body)
assert response.status_code == 200
expected_response = {
"deferred_slots": 0,
"description": "Updated Description",
"include_deferred": True,
"name": "pool3/with_slashes",
"occupied_slots": 0,
"open_slots": 10,
"queued_slots": 0,
"running_slots": 0,
"scheduled_slots": 0,
"slots": 10,
}
assert response.json() == expected_response
check_last_log(session, dag_id=None, event="patch_pool", logical_date=None)
| TestPatchPool |
python | huggingface__transformers | src/transformers/models/xlm_roberta/tokenization_xlm_roberta.py | {
"start": 1128,
"end": 4915
} | class ____(TokenizersBackend):
r"""
Construct an XLM-RoBERTa tokenizer (backed by HuggingFace's tokenizers library). Based on SentencePiece.
This tokenizer inherits from [`TokenizersBackend`] which contains most of the main methods. Users should refer to
this superclass for more information regarding those methods.
Args:
vocab_file (`str`, optional): Path to the vocabulary file.
merges_file (`str`, optional): Path to the merges file.
tokenizer_file (`str`, optional): Path to a tokenizers JSON file containing the serialization of a tokenizer.
bos_token (`str`, optional, defaults to `"<s>"`): The beginning of sequence token.
eos_token (`str`, optional, defaults to `"</s>"`): The end of sequence token.
sep_token (`str`, optional, defaults to `"</s>"`): The separator token.
cls_token (`str`, optional, defaults to `"<s>"`): The classifier token.
unk_token (`str`, optional, defaults to `"<unk>"`): The unknown token.
pad_token (`str`, optional, defaults to `"<pad>"`): The padding token.
mask_token (`str`, optional, defaults to `"<mask>"`): The mask token.
add_prefix_space (`bool`, optional, defaults to `True`): Whether to add an initial space.
vocab (`dict`, optional): Custom vocabulary dictionary.
merges (`list`, optional): Custom merges list.
"""
vocab_files_names = VOCAB_FILES_NAMES
model_input_names = ["input_ids", "attention_mask"]
slow_tokenizer_class = None
def __init__(
self,
bos_token: str = "<s>",
eos_token: str = "</s>",
sep_token: str = "</s>",
cls_token: str = "<s>",
unk_token: str = "<unk>",
pad_token: str = "<pad>",
mask_token: str = "<mask>",
add_prefix_space: bool = True,
vocab: Optional[dict] = None,
vocab_file: Optional[str] = None,
**kwargs,
):
self.add_prefix_space = add_prefix_space
if vocab is not None:
self._vocab = vocab
else:
self._vocab = [
(str(bos_token), 0.0),
(str(pad_token), 0.0),
(str(eos_token), 0.0),
(str(unk_token), 0.0),
(str(mask_token), 0.0),
]
self._tokenizer = Tokenizer(Unigram(vocab=self._vocab, unk_id=3, byte_fallback=False))
self._tokenizer.normalizer = normalizers.Sequence(
[
normalizers.Strip(left=False, right=True),
normalizers.Replace(" {2,}", "▁"),
]
)
prepend_scheme = "always" if add_prefix_space else "never"
self._tokenizer.pre_tokenizer = pre_tokenizers.Sequence(
[
pre_tokenizers.WhitespaceSplit(),
pre_tokenizers.Metaspace(replacement="▁", prepend_scheme=prepend_scheme),
]
)
self._tokenizer.decoder = decoders.Metaspace(replacement="▁", prepend_scheme=prepend_scheme)
tokenizer_object = self._tokenizer
super().__init__(
tokenizer_object=tokenizer_object,
bos_token=bos_token,
eos_token=eos_token,
sep_token=sep_token,
cls_token=cls_token,
unk_token=unk_token,
pad_token=pad_token,
mask_token=mask_token,
add_prefix_space=add_prefix_space,
**kwargs,
)
self._tokenizer.post_processor = processors.TemplateProcessing(
single=["$A", "</s>"],
pair=["$A", "</s>", "$B", "</s>"],
special_tokens=[
("</s>", self.eos_token_id),
],
)
self.vocab_file = vocab_file
__all__ = ["XLMRobertaTokenizer"]
| XLMRobertaTokenizer |
python | apache__airflow | providers/databricks/src/airflow/providers/databricks/operators/databricks_repos.py | {
"start": 1289,
"end": 6775
} | class ____(BaseOperator):
"""
Creates, and optionally checks out, a Databricks Repo using the POST api/2.0/repos API endpoint.
See: https://docs.databricks.com/dev-tools/api/latest/repos.html#operation/create-repo
:param git_url: Required HTTPS URL of a Git repository
:param git_provider: Optional name of Git provider. Must be provided if we can't guess its name from URL.
:param repo_path: optional path for a repository. Must be in the format ``/Repos/{folder}/{repo-name}``.
If not specified, it will be created in the user's directory.
:param branch: optional name of branch to check out.
:param tag: optional name of tag to checkout.
:param ignore_existing_repo: don't throw exception if repository with given path already exists.
:param databricks_conn_id: Reference to the :ref:`Databricks connection <howto/connection:databricks>`.
By default and in the common case this will be ``databricks_default``. To use
token based authentication, provide the key ``token`` in the extra field for the
connection and create the key ``host`` and leave the ``host`` field empty. (templated)
:param databricks_retry_limit: Amount of times retry if the Databricks backend is
unreachable. Its value must be greater than or equal to 1.
:param databricks_retry_delay: Number of seconds to wait between retries (it
might be a floating point number).
"""
# Used in airflow.models.BaseOperator
template_fields: Sequence[str] = ("repo_path", "tag", "branch", "databricks_conn_id")
__git_providers__ = {
"github.com": "gitHub",
"dev.azure.com": "azureDevOpsServices",
"gitlab.com": "gitLab",
"bitbucket.org": "bitbucketCloud",
}
__aws_code_commit_regexp__ = re.compile(r"^git-codecommit\.[^.]+\.amazonaws.com$")
__repos_path_regexp__ = re.compile(r"/Repos/[^/]+/[^/]+/?$")
def __init__(
self,
*,
git_url: str,
git_provider: str | None = None,
branch: str | None = None,
tag: str | None = None,
repo_path: str | None = None,
ignore_existing_repo: bool = False,
databricks_conn_id: str = "databricks_default",
databricks_retry_limit: int = 3,
databricks_retry_delay: int = 1,
**kwargs,
) -> None:
"""Create a new ``DatabricksReposCreateOperator``."""
super().__init__(**kwargs)
self.databricks_conn_id = databricks_conn_id
self.databricks_retry_limit = databricks_retry_limit
self.databricks_retry_delay = databricks_retry_delay
self.git_url = git_url
self.ignore_existing_repo = ignore_existing_repo
if git_provider is None:
self.git_provider = self.__detect_repo_provider__(git_url)
if self.git_provider is None:
raise AirflowException(
f"git_provider isn't specified and couldn't be guessed for URL {git_url}"
)
else:
self.git_provider = git_provider
self.repo_path = repo_path
if branch is not None and tag is not None:
raise AirflowException("Only one of branch or tag should be provided, but not both")
self.branch = branch
self.tag = tag
@staticmethod
def __detect_repo_provider__(url):
provider = None
try:
netloc = urlsplit(url).netloc.lower()
_, _, netloc = netloc.rpartition("@")
provider = DatabricksReposCreateOperator.__git_providers__.get(netloc)
if provider is None and DatabricksReposCreateOperator.__aws_code_commit_regexp__.match(netloc):
provider = "awsCodeCommit"
except ValueError:
pass
return provider
@cached_property
def _hook(self) -> DatabricksHook:
return DatabricksHook(
self.databricks_conn_id,
retry_limit=self.databricks_retry_limit,
retry_delay=self.databricks_retry_delay,
caller="DatabricksReposCreateOperator",
)
def execute(self, context: Context):
"""
Create a Databricks Repo.
:param context: context
:return: Repo ID
"""
payload = {
"url": self.git_url,
"provider": self.git_provider,
}
if self.repo_path is not None:
if not self.__repos_path_regexp__.match(self.repo_path):
raise AirflowException(
f"repo_path should have form of /Repos/{{folder}}/{{repo-name}}, got '{self.repo_path}'"
)
payload["path"] = self.repo_path
existing_repo_id = None
if self.repo_path is not None:
existing_repo_id = self._hook.get_repo_by_path(self.repo_path)
if existing_repo_id is not None and not self.ignore_existing_repo:
raise AirflowException(f"Repo with path '{self.repo_path}' already exists")
if existing_repo_id is None:
result = self._hook.create_repo(payload)
repo_id = result["id"]
else:
repo_id = existing_repo_id
# update repo if necessary
if self.branch is not None:
self._hook.update_repo(str(repo_id), {"branch": str(self.branch)})
elif self.tag is not None:
self._hook.update_repo(str(repo_id), {"tag": str(self.tag)})
return repo_id
| DatabricksReposCreateOperator |
python | django-haystack__django-haystack | haystack/fields.py | {
"start": 10671,
"end": 11577
} | class ____(SearchField):
field_type = "date"
def __init__(self, **kwargs):
if kwargs.get("facet_class") is None:
kwargs["facet_class"] = FacetDateField
super().__init__(**kwargs)
def prepare(self, obj):
return self.convert(super().prepare(obj))
def convert(self, value):
if value is None:
return None
if isinstance(value, str):
match = DATE_REGEX.search(value)
if match:
data = match.groupdict()
return datetime.date(
int(data["year"]), int(data["month"]), int(data["day"])
)
else:
raise SearchFieldError(
"Date provided to '%s' field doesn't appear to be a valid date string: '%s'"
% (self.instance_name, value)
)
return value
| DateField |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/data_loss_prevention.py | {
"start": 2719,
"end": 3015
} | class ____(BaseGoogleLink):
"""Helper class for constructing Cloud Data Loss Prevention link."""
name = "Cloud DLP Deidentify Template Details"
key = "cloud_dlp_deidentify_template_details_key"
format_str = DLP_DEIDENTIFY_TEMPLATE_DETAILS_LINK
| CloudDLPDeidentifyTemplateDetailsLink |
python | pypa__setuptools | setuptools/_vendor/inflect/__init__.py | {
"start": 39998,
"end": 40941
} | class ____(str):
lowered: str
split_: List[str]
first: str
last: str
def __init__(self, orig) -> None:
self.lowered = self.lower()
self.split_ = self.split()
self.first = self.split_[0]
self.last = self.split_[-1]
Falsish = Any # ideally, falsish would only validate on bool(value) is False
_STATIC_TYPE_CHECKING = TYPE_CHECKING
# ^-- Workaround for typeguard AST manipulation:
# https://github.com/agronholm/typeguard/issues/353#issuecomment-1556306554
if _STATIC_TYPE_CHECKING: # pragma: no cover
Word = Annotated[str, "String with at least 1 character"]
else:
class _WordMeta(type): # Too dynamic to be supported by mypy...
def __instancecheck__(self, instance: Any) -> bool:
return isinstance(instance, str) and len(instance) >= 1
class Word(metaclass=_WordMeta): # type: ignore[no-redef]
"""String with at least 1 character"""
| Words |
python | allegroai__clearml | clearml/backend_api/services/v2_20/events.py | {
"start": 21170,
"end": 23303
} | class ____(NonStrictDataModel):
"""
:param task: Task ID
:type task: str
:param metric: Metric name
:type metric: str
:param variants: Metric variant names
:type variants: Sequence[str]
"""
_schema = {
"properties": {
"metric": {"description": "Metric name", "type": "string"},
"task": {"description": "Task ID", "type": "string"},
"variants": {
"description": "Metric variant names",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["task"],
"type": "object",
}
def __init__(
self, task: str, metric: Optional[str] = None, variants: Optional[List[str]] = None, **kwargs: Any
) -> None:
super(TaskMetricVariants, self).__init__(**kwargs)
self.task = task
self.metric = metric
self.variants = variants
@schema_property("task")
def task(self) -> str:
return self._property_task
@task.setter
def task(self, value: str) -> None:
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("metric")
def metric(self) -> Optional[str]:
return self._property_metric
@metric.setter
def metric(self, value: Optional[str]) -> None:
if value is None:
self._property_metric = None
return
self.assert_isinstance(value, "metric", six.string_types)
self._property_metric = value
@schema_property("variants")
def variants(self) -> Optional[List[str]]:
return self._property_variants
@variants.setter
def variants(self, value: Optional[List[str]]) -> None:
if value is None:
self._property_variants = None
return
self.assert_isinstance(value, "variants", (list, tuple))
self.assert_isinstance(value, "variants", six.string_types, is_array=True)
self._property_variants = value
| TaskMetricVariants |
python | huggingface__transformers | examples/pytorch/multiple-choice/run_swag.py | {
"start": 3642,
"end": 16947
} | class ____:
"""
Arguments pertaining to what data we are going to input our model for training and eval.
"""
train_file: Optional[str] = field(default=None, metadata={"help": "The input training data file (a text file)."})
validation_file: Optional[str] = field(
default=None,
metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."},
)
overwrite_cache: bool = field(
default=False, metadata={"help": "Overwrite the cached training and evaluation sets"}
)
preprocessing_num_workers: Optional[int] = field(
default=None,
metadata={"help": "The number of processes to use for the preprocessing."},
)
max_seq_length: Optional[int] = field(
default=None,
metadata={
"help": (
"The maximum total input sequence length after tokenization. If passed, sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
},
)
pad_to_max_length: bool = field(
default=False,
metadata={
"help": (
"Whether to pad all samples to the maximum sentence length. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch. More "
"efficient on GPU but very bad for TPU."
)
},
)
max_train_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
},
)
max_eval_samples: Optional[int] = field(
default=None,
metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
},
)
def __post_init__(self):
if self.train_file is not None:
extension = self.train_file.split(".")[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
extension = self.validation_file.split(".")[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
def main():
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
parser = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments))
if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
model_args, data_args, training_args = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1]))
else:
model_args, data_args, training_args = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
handlers=[logging.StreamHandler(sys.stdout)],
)
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
log_level = training_args.get_process_log_level()
logger.setLevel(log_level)
datasets.utils.logging.set_verbosity(log_level)
transformers.utils.logging.set_verbosity(log_level)
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"Process rank: {training_args.local_process_index}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
+ f"distributed training: {training_args.parallel_mode.value == 'distributed'}, 16-bits training: {training_args.fp16}"
)
logger.info(f"Training/evaluation parameters {training_args}")
# Set seed before initializing model.
set_seed(training_args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
data_files = {}
if data_args.train_file is not None:
data_files["train"] = data_args.train_file
extension = data_args.train_file.split(".")[-1]
if data_args.validation_file is not None:
data_files["validation"] = data_args.validation_file
extension = data_args.validation_file.split(".")[-1]
raw_datasets = load_dataset(
extension,
data_files=data_files,
cache_dir=model_args.cache_dir,
token=model_args.token,
)
else:
# Downloading and loading the swag dataset from the hub.
raw_datasets = load_dataset(
"swag",
"regular",
cache_dir=model_args.cache_dir,
token=model_args.token,
)
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
config = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
tokenizer = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path,
cache_dir=model_args.cache_dir,
use_fast=model_args.use_fast_tokenizer,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
model = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path,
from_tf=bool(".ckpt" in model_args.model_name_or_path),
config=config,
cache_dir=model_args.cache_dir,
revision=model_args.model_revision,
token=model_args.token,
trust_remote_code=model_args.trust_remote_code,
)
# When using your own dataset or a different dataset from swag, you will probably need to change this.
ending_names = [f"ending{i}" for i in range(4)]
context_name = "sent1"
question_header_name = "sent2"
if data_args.max_seq_length is None:
max_seq_length = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`."
)
max_seq_length = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the "
f"model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."
)
max_seq_length = min(data_args.max_seq_length, tokenizer.model_max_length)
# Preprocessing the datasets.
def preprocess_function(examples):
first_sentences = [[context] * 4 for context in examples[context_name]]
question_headers = examples[question_header_name]
second_sentences = [
[f"{header} {examples[end][i]}" for end in ending_names] for i, header in enumerate(question_headers)
]
# Flatten out
first_sentences = list(chain(*first_sentences))
second_sentences = list(chain(*second_sentences))
# Tokenize
tokenized_examples = tokenizer(
first_sentences,
second_sentences,
truncation=True,
max_length=max_seq_length,
padding="max_length" if data_args.pad_to_max_length else False,
)
# Un-flatten
return {k: [v[i : i + 4] for i in range(0, len(v), 4)] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset")
train_dataset = raw_datasets["train"]
if data_args.max_train_samples is not None:
max_train_samples = min(len(train_dataset), data_args.max_train_samples)
train_dataset = train_dataset.select(range(max_train_samples))
with training_args.main_process_first(desc="train dataset map pre-processing"):
train_dataset = train_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset")
eval_dataset = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
max_eval_samples = min(len(eval_dataset), data_args.max_eval_samples)
eval_dataset = eval_dataset.select(range(max_eval_samples))
with training_args.main_process_first(desc="validation dataset map pre-processing"):
eval_dataset = eval_dataset.map(
preprocess_function,
batched=True,
num_proc=data_args.preprocessing_num_workers,
load_from_cache_file=not data_args.overwrite_cache,
)
# Data collator
data_collator = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(
tokenizer=tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None, return_tensors="pt"
)
)
# Metric
def compute_metrics(eval_predictions):
predictions, label_ids = eval_predictions
preds = np.argmax(predictions, axis=1)
return {"accuracy": (preds == label_ids).astype(np.float32).mean().item()}
# Initialize our Trainer
trainer = Trainer(
model=model,
args=training_args,
train_dataset=train_dataset if training_args.do_train else None,
eval_dataset=eval_dataset if training_args.do_eval else None,
processing_class=tokenizer,
data_collator=data_collator,
compute_metrics=compute_metrics,
)
# Training
if training_args.do_train:
checkpoint = None
if training_args.resume_from_checkpoint is not None:
checkpoint = training_args.resume_from_checkpoint
train_result = trainer.train(resume_from_checkpoint=checkpoint)
trainer.save_model() # Saves the tokenizer too for easy upload
metrics = train_result.metrics
max_train_samples = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(train_dataset)
)
metrics["train_samples"] = min(max_train_samples, len(train_dataset))
trainer.log_metrics("train", metrics)
trainer.save_metrics("train", metrics)
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***")
metrics = trainer.evaluate()
max_eval_samples = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(eval_dataset)
metrics["eval_samples"] = min(max_eval_samples, len(eval_dataset))
trainer.log_metrics("eval", metrics)
trainer.save_metrics("eval", metrics)
kwargs = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**kwargs)
else:
trainer.create_model_card(**kwargs)
def _mp_fn(index):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| DataTrainingArguments |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/readers.py | {
"start": 34548,
"end": 48481
} | class ____(dataset_ops.DatasetV1Adapter):
"""A Dataset comprising lines from one or more CSV files."""
@functools.wraps(CsvDatasetV2.__init__, ("__module__", "__name__"))
def __init__(self,
filenames,
record_defaults,
compression_type=None,
buffer_size=None,
header=False,
field_delim=",",
use_quote_delim=True,
na_value="",
select_cols=None):
"""Creates a `CsvDataset` by reading and decoding CSV files.
The elements of this dataset correspond to records from the file(s).
RFC 4180 format is expected for CSV files
(https://tools.ietf.org/html/rfc4180)
Note that we allow leading and trailing spaces with int or float field.
For example, suppose we have a file 'my_file0.csv' with four CSV columns of
different data types:
```
abcdefg,4.28E10,5.55E6,12
hijklmn,-5.3E14,,2
```
We can construct a CsvDataset from it as follows:
```python
dataset = tf.data.experimental.CsvDataset(
"my_file*.csv",
[tf.float32, # Required field, use dtype or empty tensor
tf.constant([0.0], dtype=tf.float32), # Optional field, default to 0.0
tf.int32, # Required field, use dtype or empty tensor
],
select_cols=[1,2,3] # Only parse last three columns
)
```
The expected output of its iterations is:
```python
for element in dataset:
print(element)
>> (4.28e10, 5.55e6, 12)
>> (-5.3e14, 0.0, 2)
```
Args:
filenames: A `tf.string` tensor containing one or more filenames.
record_defaults: A list of default values for the CSV fields. Each item in
the list is either a valid CSV `DType` (float32, float64, int32, int64,
string), or a `Tensor` object with one of the above types. One per
column of CSV data, with either a scalar `Tensor` default value for the
column if it is optional, or `DType` or empty `Tensor` if required. If
both this and `select_columns` are specified, these must have the same
lengths, and `column_defaults` is assumed to be sorted in order of
increasing column index. If both this and 'exclude_cols' are specified,
the sum of lengths of record_defaults and exclude_cols should equal the
total number of columns in the CSV file.
compression_type: (Optional.) A `tf.string` scalar evaluating to one of
`""` (no compression), `"ZLIB"`, or `"GZIP"`. Defaults to no
compression.
buffer_size: (Optional.) A `tf.int64` scalar denoting the number of bytes
to buffer while reading files. Defaults to 4MB.
header: (Optional.) A `tf.bool` scalar indicating whether the CSV file(s)
have header line(s) that should be skipped when parsing. Defaults to
`False`.
field_delim: (Optional.) A `tf.string` scalar containing the delimiter
character that separates fields in a record. Defaults to `","`.
use_quote_delim: (Optional.) A `tf.bool` scalar. If `False`, treats double
quotation marks as regular characters inside of string fields (ignoring
RFC 4180, Section 2, Bullet 5). Defaults to `True`.
na_value: (Optional.) A `tf.string` scalar indicating a value that will be
treated as NA/NaN.
select_cols: (Optional.) A sorted list of column indices to select from
the input data. If specified, only this subset of columns will be
parsed. Defaults to parsing all columns. At most one of `select_cols`
and `exclude_cols` can be specified.
"""
wrapped = CsvDatasetV2(filenames, record_defaults, compression_type,
buffer_size, header, field_delim, use_quote_delim,
na_value, select_cols)
super(CsvDatasetV1, self).__init__(wrapped)
@tf_export("data.experimental.make_batched_features_dataset", v1=[])
def make_batched_features_dataset_v2(file_pattern,
batch_size,
features,
reader=None,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
reader_num_threads=None,
parser_num_threads=None,
sloppy_ordering=False,
drop_final_batch=False):
"""Returns a `Dataset` of feature dictionaries from `Example` protos.
If label_key argument is provided, returns a `Dataset` of tuple
comprising of feature dictionaries and label.
Example:
```
serialized_examples = [
features {
feature { key: "age" value { int64_list { value: [ 0 ] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "code", "art" ] } } }
},
features {
feature { key: "age" value { int64_list { value: [] } } }
feature { key: "gender" value { bytes_list { value: [ "f" ] } } }
feature { key: "kws" value { bytes_list { value: [ "sports" ] } } }
}
]
```
We can use arguments:
```
features: {
"age": FixedLenFeature([], dtype=tf.int64, default_value=-1),
"gender": FixedLenFeature([], dtype=tf.string),
"kws": VarLenFeature(dtype=tf.string),
}
```
And the expected output is:
```python
{
"age": [[0], [-1]],
"gender": [["f"], ["f"]],
"kws": SparseTensor(
indices=[[0, 0], [0, 1], [1, 0]],
values=["code", "art", "sports"]
dense_shape=[2, 2]),
}
```
Args:
file_pattern: List of files or patterns of file paths containing
`Example` records. See `tf.io.gfile.glob` for pattern rules.
batch_size: An int representing the number of records to combine
in a single batch.
features: A `dict` mapping feature keys to `FixedLenFeature` or
`VarLenFeature` values. See `tf.io.parse_example`.
reader: A function or class that can be
called with a `filenames` tensor and (optional) `reader_args` and returns
a `Dataset` of `Example` tensors. Defaults to `tf.data.TFRecordDataset`.
label_key: (Optional) A string corresponding to the key labels are stored in
`tf.Examples`. If provided, it must be one of the `features` key,
otherwise results in `ValueError`.
reader_args: Additional arguments to pass to the reader class.
num_epochs: Integer specifying the number of times to read through the
dataset. If None, cycles through the dataset forever. Defaults to `None`.
shuffle: A boolean, indicates whether the input should be shuffled. Defaults
to `True`.
shuffle_buffer_size: Buffer size of the ShuffleDataset. A large capacity
ensures better shuffling but would increase memory usage and startup time.
shuffle_seed: Randomization seed to use for shuffling.
prefetch_buffer_size: Number of feature batches to prefetch in order to
improve performance. Recommended value is the number of batches consumed
per training step. Defaults to auto-tune.
reader_num_threads: Number of threads used to read `Example` records. If >1,
the results will be interleaved. Defaults to `1`.
parser_num_threads: Number of threads to use for parsing `Example` tensors
into a dictionary of `Feature` tensors. Defaults to `2`.
sloppy_ordering: If `True`, reading performance will be improved at
the cost of non-deterministic ordering. If `False`, the order of elements
produced is deterministic prior to shuffling (elements are still
randomized if `shuffle=True`. Note that if the seed is set, then order
of elements after shuffling is deterministic). Defaults to `False`.
drop_final_batch: If `True`, and the batch size does not evenly divide the
input dataset size, the final smaller batch will be dropped. Defaults to
`False`.
Returns:
A dataset of `dict` elements, (or a tuple of `dict` elements and label).
Each `dict` maps feature keys to `Tensor` or `SparseTensor` objects.
Raises:
TypeError: If `reader` is of the wrong type.
ValueError: If `label_key` is not one of the `features` keys.
"""
if reader is None:
reader = core_readers.TFRecordDataset
if reader_num_threads is None:
reader_num_threads = 1
if parser_num_threads is None:
parser_num_threads = 2
if prefetch_buffer_size is None:
prefetch_buffer_size = dataset_ops.AUTOTUNE
# Create dataset of all matching filenames
dataset = dataset_ops.Dataset.list_files(
file_pattern, shuffle=shuffle, seed=shuffle_seed)
if isinstance(reader, type) and issubclass(reader, io_ops.ReaderBase):
raise TypeError("The `reader` argument must return a `Dataset` object. "
"`tf.ReaderBase` subclasses are not supported. For "
"example, pass `tf.data.TFRecordDataset` instead of "
"`tf.TFRecordReader`.")
# Read `Example` records from files as tensor objects.
if reader_args is None:
reader_args = []
if reader_num_threads == dataset_ops.AUTOTUNE:
dataset = dataset.interleave(
lambda filename: reader(filename, *reader_args),
num_parallel_calls=reader_num_threads)
options = options_lib.Options()
options.deterministic = not sloppy_ordering
dataset = dataset.with_options(options)
else:
# Read files sequentially (if reader_num_threads=1) or in parallel
def apply_fn(dataset):
return core_readers.ParallelInterleaveDataset(
dataset,
lambda filename: reader(filename, *reader_args),
cycle_length=reader_num_threads,
block_length=1,
sloppy=sloppy_ordering,
buffer_output_elements=None,
prefetch_input_elements=None)
dataset = dataset.apply(apply_fn)
# Extract values if the `Example` tensors are stored as key-value tuples.
if dataset_ops.get_legacy_output_types(dataset) == (
dtypes.string, dtypes.string):
dataset = map_op._MapDataset( # pylint: disable=protected-access
dataset, lambda _, v: v, use_inter_op_parallelism=False)
# Apply dataset repeat and shuffle transformations.
dataset = _maybe_shuffle_and_repeat(
dataset, num_epochs, shuffle, shuffle_buffer_size, shuffle_seed)
# NOTE(mrry): We set `drop_remainder=True` when `num_epochs is None` to
# improve the shape inference, because it makes the batch dimension static.
# It is safe to do this because in that case we are repeating the input
# indefinitely, and all batches will be full-sized.
dataset = dataset.batch(
batch_size, drop_remainder=drop_final_batch or num_epochs is None)
# Parse `Example` tensors to a dictionary of `Feature` tensors.
dataset = dataset.apply(
parsing_ops.parse_example_dataset(
features, num_parallel_calls=parser_num_threads))
if label_key:
if label_key not in features:
raise ValueError(
f"The `label_key` provided ({label_key}) must be one of the "
f"`features` keys: {features.keys()}.")
dataset = dataset.map(lambda x: (x, x.pop(label_key)))
dataset = dataset.prefetch(prefetch_buffer_size)
return dataset
@tf_export(v1=["data.experimental.make_batched_features_dataset"])
def make_batched_features_dataset_v1(file_pattern, # pylint: disable=missing-docstring
batch_size,
features,
reader=None,
label_key=None,
reader_args=None,
num_epochs=None,
shuffle=True,
shuffle_buffer_size=10000,
shuffle_seed=None,
prefetch_buffer_size=None,
reader_num_threads=None,
parser_num_threads=None,
sloppy_ordering=False,
drop_final_batch=False):
return dataset_ops.DatasetV1Adapter(make_batched_features_dataset_v2(
file_pattern, batch_size, features, reader, label_key, reader_args,
num_epochs, shuffle, shuffle_buffer_size, shuffle_seed,
prefetch_buffer_size, reader_num_threads, parser_num_threads,
sloppy_ordering, drop_final_batch))
make_batched_features_dataset_v1.__doc__ = (
make_batched_features_dataset_v2.__doc__)
def _get_file_names(file_pattern, shuffle):
"""Parse list of file names from pattern, optionally shuffled.
Args:
file_pattern: File glob pattern, or list of glob patterns.
shuffle: Whether to shuffle the order of file names.
Returns:
List of file names matching `file_pattern`.
Raises:
ValueError: If `file_pattern` is empty, or pattern matches no files.
"""
if isinstance(file_pattern, list):
if not file_pattern:
raise ValueError("Argument `file_pattern` should not be empty.")
file_names = []
for entry in file_pattern:
file_names.extend(gfile.Glob(entry))
else:
file_names = list(gfile.Glob(file_pattern))
if not file_names:
raise ValueError(f"No files match `file_pattern` {file_pattern}.")
# Sort files so it will be deterministic for unit tests.
if not shuffle:
file_names = sorted(file_names)
return file_names
@tf_export("data.experimental.SqlDataset", v1=[])
| CsvDatasetV1 |
python | PrefectHQ__prefect | tests/blocks/test_abstract.py | {
"start": 236,
"end": 1059
} | class ____:
def test_credentials_block_is_abstract(self):
with pytest.raises(
TypeError, match="Can't instantiate abstract class CredentialsBlock"
):
CredentialsBlock()
def test_credentials_block_implementation(self, caplog):
class ACredentialsBlock(CredentialsBlock):
def get_client(self):
self.logger.info("Got client.")
return "client"
a_credentials_block = ACredentialsBlock()
assert a_credentials_block.get_client() == "client"
# test logging
assert hasattr(a_credentials_block, "logger")
assert len(caplog.records) == 1
record = caplog.records[0]
assert record.name == "prefect.ACredentialsBlock"
assert record.msg == "Got client."
| TestCredentialsBlock |
python | getsentry__sentry | src/sentry/workflow_engine/typings/notification_action.py | {
"start": 10920,
"end": 11647
} | class ____(BaseActionTranslator):
@property
def action_type(self) -> ActionType:
return ActionType.MSTEAMS
@property
def required_fields(self) -> list[str]:
return [
ACTION_FIELD_MAPPINGS[ActionType.MSTEAMS][
ActionFieldMappingKeys.INTEGRATION_ID_KEY.value
],
ACTION_FIELD_MAPPINGS[ActionType.MSTEAMS][
ActionFieldMappingKeys.TARGET_IDENTIFIER_KEY.value
],
ACTION_FIELD_MAPPINGS[ActionType.MSTEAMS][
ActionFieldMappingKeys.TARGET_DISPLAY_KEY.value
],
]
@property
def target_type(self) -> int:
return ActionTarget.SPECIFIC.value
| MSTeamsActionTranslator |
python | walkccc__LeetCode | solutions/3327. Check if DFS Strings Are Palindromes/3327.py | {
"start": 0,
"end": 2376
} | class ____:
def findAnswer(self, parent: list[int], s: str) -> list[bool]:
n = len(parent)
tree = [[] for _ in parent]
start = [0] * n # start[i] := the start index of `dfsStr` of node i
end = [0] * n # end[i] := the end index of `dfsStr` of node i
dfsStr = []
for i in range(1, n):
tree[parent[i]].append(i)
self._dfs(tree, 0, 0, s, start, end, dfsStr)
t = '#'.join('@' + ''.join(dfsStr) + '$')
p = self._manacher(t)
return [self._isPalindrome(s, e, p)
for s, e in zip(start, end)]
def _dfs(
self,
tree: list[list[int]],
u: int,
index: int,
s: str,
start: list[int],
end: list[int],
dfsStr: list[str]
) -> int:
"""Returns the start index of the "DFS string" of u's next node."""
start[u] = index
for v in tree[u]:
index = self._dfs(tree, v, index, s, start, end, dfsStr)
end[u] = index
dfsStr.append(s[u])
return index + 1
def _manacher(self, t: str) -> list[int]:
"""
Returns an array `p` s.t. `p[i]` is the length of the longest palindrome
centered at `t[i]`, where `t` is a string with delimiters and sentinels.
"""
p = [0] * len(t)
center = 0
for i in range(1, len(t) - 1):
rightBoundary = center + p[center]
mirrorIndex = center - (i - center)
if rightBoundary > i:
p[i] = min(rightBoundary - i, p[mirrorIndex])
# Try to expand the palindrome centered at i.
while t[i + 1 + p[i]] == t[i - 1 - p[i]]:
p[i] += 1
# If a palindrome centered at i expands past `rightBoundary`, adjust
# the center based on the expanded palindrome.
if i + p[i] > rightBoundary:
center = i
return p
def _isPalindrome(self, s: int, e: int, p: list[int]) -> bool:
"""
Returns true if `dfsStr[s..e]` is a palindrome by using the precomputed
array `p` from the Manacher's algorithm.
The precomputed array `p` is based on the string `t` with delimiters and
sentinels. Let `t = '#'.join('@' + dfsStr + '$')`. Then, the center of
`dfsStr` maps to `t[s + e + 2]` since `dfsStr[s]` maps to `t[2 * s + 2]`
and `dfsStr[e]` maps to `t[2 * e + 2]`. So, the center of `dfsStr` is
`t[(2 * s + 2 + 2 * e + 2) / 2] = t[s + e + 2]`.
"""
length = e - s + 1
center = s + e + 2
return p[center] >= length
| Solution |
python | scipy__scipy | scipy/linalg/tests/test_lapack.py | {
"start": 2502,
"end": 5847
} | class ____:
def test_gebal(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9]]
a1 = [[1, 0, 0, 3e-4],
[4, 0, 0, 2e-3],
[7, 1, 0, 0],
[0, 1, 0, 0]]
for p in 'sdzc':
f = getattr(flapack, p+'gebal', None)
if f is None:
continue
ba, lo, hi, pivscale, info = f(a)
assert_(not info, repr(info))
assert_array_almost_equal(ba, a)
assert_equal((lo, hi), (0, len(a[0])-1))
assert_array_almost_equal(pivscale, np.ones(len(a)))
ba, lo, hi, pivscale, info = f(a1, permute=1, scale=1)
assert_(not info, repr(info))
# print(a1)
# print(ba, lo, hi, pivscale)
def test_gehrd(self):
a = [[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]]
for p in 'd':
f = getattr(flapack, p+'gehrd', None)
if f is None:
continue
ht, tau, info = f(a)
assert_(not info, repr(info))
def test_trsyl(self):
a = np.array([[1, 2], [0, 4]])
b = np.array([[5, 6], [0, 8]])
c = np.array([[9, 10], [11, 12]])
trans = 'T'
# Test single and double implementations, including most
# of the options
for dtype in 'fdFD':
a1, b1, c1 = a.astype(dtype), b.astype(dtype), c.astype(dtype)
trsyl, = get_lapack_funcs(('trsyl',), (a1,))
if dtype.isupper(): # is complex dtype
a1[0] += 1j
trans = 'C'
x, scale, info = trsyl(a1, b1, c1)
assert_array_almost_equal(np.dot(a1, x) + np.dot(x, b1),
scale * c1)
x, scale, info = trsyl(a1, b1, c1, trana=trans, tranb=trans)
assert_array_almost_equal(
np.dot(a1.conjugate().T, x) + np.dot(x, b1.conjugate().T),
scale * c1, decimal=4)
x, scale, info = trsyl(a1, b1, c1, isgn=-1)
assert_array_almost_equal(np.dot(a1, x) - np.dot(x, b1),
scale * c1, decimal=4)
def test_lange(self):
a = np.array([
[-149, -50, -154],
[537, 180, 546],
[-27, -9, -25]])
for dtype in 'fdFD':
for norm_str in 'Mm1OoIiFfEe':
a1 = a.astype(dtype)
if dtype.isupper():
# is complex dtype
a1[0, 0] += 1j
lange, = get_lapack_funcs(('lange',), (a1,))
value = lange(norm_str, a1)
if norm_str in 'FfEe':
if dtype in 'Ff':
decimal = 3
else:
decimal = 7
ref = np.sqrt(np.sum(np.square(np.abs(a1))))
assert_almost_equal(value, ref, decimal)
else:
if norm_str in 'Mm':
ref = np.max(np.abs(a1))
elif norm_str in '1Oo':
ref = np.max(np.sum(np.abs(a1), axis=0))
elif norm_str in 'Ii':
ref = np.max(np.sum(np.abs(a1), axis=1))
assert_equal(value, ref)
| TestFlapackSimple |
python | tensorflow__tensorflow | tensorflow/examples/custom_ops_doc/simple_hash_table/simple_hash_table_test.py | {
"start": 1114,
"end": 4851
} | class ____(tf.test.TestCase, parameterized.TestCase):
# Helper function using "create, find, insert, find, remove, find
def _use_table(self, key_dtype, value_dtype):
hash_table = simple_hash_table.SimpleHashTable(key_dtype, value_dtype, 111)
result1 = hash_table.find(1, -999)
hash_table.insert(1, 100)
result2 = hash_table.find(1, -999)
hash_table.remove(1)
result3 = hash_table.find(1, -999)
results = tf.stack((result1, result2, result3))
return results # expect [-999, 100, -999]
# Test of "create, find, insert, find" in eager mode.
@parameterized.named_parameters(('int32_float', tf.int32, float),
('int64_int32', tf.int64, tf.int32))
def test_find_insert_find_eager(self, key_dtype, value_dtype):
results = self._use_table(key_dtype, value_dtype)
self.assertAllClose(results, [-999, 100, -999])
# Test of "create, find, insert, find" in a tf.function. Note that the
# creation and use of the ref-counted resource occurs inside a single
# self.evaluate.
@parameterized.named_parameters(('int32_float', tf.int32, float),
('int64_int32', tf.int64, tf.int32))
def test_find_insert_find_tf_function(self, key_dtype, value_dtype):
results = def_function.function(
lambda: self._use_table(key_dtype, value_dtype))
self.assertAllClose(self.evaluate(results), [-999.0, 100.0, -999.0])
# strings for key and value
def test_find_insert_find_strings_eager(self):
default = 'Default'
foo = 'Foo'
bar = 'Bar'
hash_table = simple_hash_table.SimpleHashTable(tf.string, tf.string,
default)
result1 = hash_table.find(foo, default)
self.assertEqual(result1, default)
hash_table.insert(foo, bar)
result2 = hash_table.find(foo, default)
self.assertEqual(result2, bar)
def test_export(self):
table = simple_hash_table.SimpleHashTable(
tf.int64, tf.int64, default_value=-1)
table.insert(1, 100)
table.insert(2, 200)
table.insert(3, 300)
keys, values = self.evaluate(table.export())
self.assertAllEqual(sorted(keys), [1, 2, 3])
self.assertAllEqual(sorted(values), [100, 200, 300])
def test_import(self):
table = simple_hash_table.SimpleHashTable(
tf.int64, tf.int64, default_value=-1)
keys = tf.constant([1, 2, 3], dtype=tf.int64)
values = tf.constant([100, 200, 300], dtype=tf.int64)
table.do_import(keys, values)
self.assertEqual(table.find(1), 100)
self.assertEqual(table.find(2), 200)
self.assertEqual(table.find(3), 300)
self.assertEqual(table.find(9), -1)
@test_util.run_v2_only
def testSavedModelSaveRestore(self):
save_dir = os.path.join(self.get_temp_dir(), 'save_restore')
save_path = os.path.join(tempfile.mkdtemp(prefix=save_dir), 'hash')
# TODO(b/203097231) is there an alternative that is not __internal__?
root = tf.__internal__.tracking.AutoTrackable()
default_value = -1
root.table = simple_hash_table.SimpleHashTable(
tf.int64, tf.int64, default_value=default_value)
@def_function.function(input_signature=[tf.TensorSpec((), tf.int64)])
def lookup(key):
return root.table.find(key)
root.lookup = lookup
root.table.insert(1, 100)
root.table.insert(2, 200)
root.table.insert(3, 300)
self.assertEqual(root.lookup(2), 200)
self.assertAllEqual(3, len(self.evaluate(root.table.export()[0])))
tf.saved_model.save(root, save_path)
del root
loaded = tf.saved_model.load(save_path)
self.assertEqual(loaded.lookup(2), 200)
self.assertEqual(loaded.lookup(10), -1)
if __name__ == '__main__':
tf.test.main()
| SimpleHashTableTest |
python | davidhalter__jedi | jedi/inference/arguments.py | {
"start": 1059,
"end": 4449
} | class ____(Exception):
pass
def repack_with_argument_clinic(clinic_string):
"""
Transforms a function or method with arguments to the signature that is
given as an argument clinic notation.
Argument clinic is part of CPython and used for all the functions that are
implemented in C (Python 3.7):
str.split.__text_signature__
# Results in: '($self, /, sep=None, maxsplit=-1)'
"""
def decorator(func):
def wrapper(value, arguments):
try:
args = tuple(iterate_argument_clinic(
value.inference_state,
arguments,
clinic_string,
))
except ParamIssue:
return NO_VALUES
else:
return func(value, *args)
return wrapper
return decorator
def iterate_argument_clinic(inference_state, arguments, clinic_string):
"""Uses a list with argument clinic information (see PEP 436)."""
clinic_args = list(_parse_argument_clinic(clinic_string))
iterator = PushBackIterator(arguments.unpack())
for i, (name, optional, allow_kwargs, stars) in enumerate(clinic_args):
if stars == 1:
lazy_values = []
for key, argument in iterator:
if key is not None:
iterator.push_back((key, argument))
break
lazy_values.append(argument)
yield ValueSet([iterable.FakeTuple(inference_state, lazy_values)])
lazy_values
continue
elif stars == 2:
raise NotImplementedError()
key, argument = next(iterator, (None, None))
if key is not None:
debug.warning('Keyword arguments in argument clinic are currently not supported.')
raise ParamIssue
if argument is None and not optional:
debug.warning('TypeError: %s expected at least %s arguments, got %s',
name, len(clinic_args), i)
raise ParamIssue
value_set = NO_VALUES if argument is None else argument.infer()
if not value_set and not optional:
# For the stdlib we always want values. If we don't get them,
# that's ok, maybe something is too hard to resolve, however,
# we will not proceed with the type inference of that function.
debug.warning('argument_clinic "%s" not resolvable.', name)
raise ParamIssue
yield value_set
def _parse_argument_clinic(string):
allow_kwargs = False
optional = False
while string:
# Optional arguments have to begin with a bracket. And should always be
# at the end of the arguments. This is therefore not a proper argument
# clinic implementation. `range()` for exmple allows an optional start
# value at the beginning.
match = re.match(r'(?:(?:(\[),? ?|, ?|)(\**\w+)|, ?/)\]*', string)
string = string[len(match.group(0)):]
if not match.group(2): # A slash -> allow named arguments
allow_kwargs = True
continue
optional = optional or bool(match.group(1))
word = match.group(2)
stars = word.count('*')
word = word[stars:]
yield (word, optional, allow_kwargs, stars)
if stars:
allow_kwargs = True
| ParamIssue |
python | miyuchina__mistletoe | test/test_html_renderer.py | {
"start": 5359,
"end": 6228
} | class ____(TestCase):
@parameterized.expand([
(False, False, '" and \''),
(False, True, '" and ''),
(True, False, '" and \''),
(True, True, '" and ''),
])
def test_escape_html_text(self, escape_double, escape_single, expected):
with HtmlRenderer(html_escape_double_quotes=escape_double,
html_escape_single_quotes=escape_single) as renderer:
self.assertEqual(renderer.escape_html_text('" and \''), expected)
def test_unprocessed_html_tokens_escaped(self):
with HtmlRenderer(process_html_tokens=False) as renderer:
token = Document(['<div><br> as plain text</div>\n'])
expected = '<p><div><br> as plain text</div></p>\n'
self.assertEqual(renderer.render(token), expected)
| TestHtmlRendererEscaping |
python | PyCQA__pylint | tests/functional/t/too/too_many_ancestors_ignored_parents.py | {
"start": 651,
"end": 709
} | class ____(B, C): # [too-many-ancestors]
"""5 parents"""
| A |
python | fluentpython__example-code-2e | 05-data-classes/meaning/demo_dc.py | {
"start": 46,
"end": 148
} | class ____:
a: int # <1>
b: float = 1.1 # <2>
c = 'spam' # <3>
| DemoDataClass |
python | getsentry__sentry | tests/sentry/integrations/discord/test_utils.py | {
"start": 580,
"end": 1404
} | class ____(TestCase):
def test_verify_signature_valid(self) -> None:
public_key_string = "3AC1A3E56E967E1C61E3D17B37FA1865CB20CD6C54418631F4E8AE4D1E83EE0E"
signature = "DBC99471F8DD30BA0F488912CF9BA7AC1E938047782BB72FF9A6873D452A1A75DC9F8A07182B8EB7FC67A3771C2271D568DCDC2AB2A5D927A42A4F0FC233C506"
timestamp = "1688960024"
body = '{"type":1}'
verify_signature(public_key_string, signature, timestamp, body)
def test_verify_signature_invalid(self) -> None:
public_key_string = "3AC1A3E56E967E1C61E3D17B37FA1865CB20CD6C54418631F4E8AE4D1E83EE0E"
signature = "0123456789abcdef"
timestamp = "1688960024"
body = '{"type":1}'
with raises(InvalidSignature):
verify_signature(public_key_string, signature, timestamp, body)
| AuthTest |
python | paramiko__paramiko | tests/_stub_sftp.py | {
"start": 1116,
"end": 1347
} | class ____(ServerInterface):
def check_auth_password(self, username, password):
# all are allowed
return AUTH_SUCCESSFUL
def check_channel_request(self, kind, chanid):
return OPEN_SUCCEEDED
| StubServer |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 4396,
"end": 4541
} | class ____(GreatExpectationsError):
def __init__(self) -> None:
super().__init__("No available batches found.")
| NoAvailableBatchesError |
python | huggingface__transformers | src/transformers/models/distilbert/modeling_distilbert.py | {
"start": 8978,
"end": 10216
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: PreTrainedConfig):
super().__init__()
# Have an even number of Configure multi-heads
if config.dim % config.n_heads != 0:
raise ValueError(f"config.n_heads {config.n_heads} must divide config.dim {config.dim} evenly")
self.attention = DistilBertSelfAttention(config)
self.sa_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
self.ffn = FFN(config)
self.output_layer_norm = nn.LayerNorm(normalized_shape=config.dim, eps=1e-12)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor, ...]:
# Self-Attention
attention_output, _ = self.attention(
hidden_states,
attention_mask=attention_mask,
**kwargs,
)
attention_output = self.sa_layer_norm(attention_output + hidden_states)
# Feed Forward Network
ffn_output = self.ffn(attention_output)
ffn_output = self.output_layer_norm(ffn_output + attention_output)
return ffn_output
| TransformerBlock |
python | doocs__leetcode | solution/2000-2099/2025.Maximum Number of Ways to Partition an Array/Solution.py | {
"start": 0,
"end": 662
} | class ____:
def waysToPartition(self, nums: List[int], k: int) -> int:
n = len(nums)
s = [nums[0]] * n
right = defaultdict(int)
for i in range(1, n):
s[i] = s[i - 1] + nums[i]
right[s[i - 1]] += 1
ans = 0
if s[-1] % 2 == 0:
ans = right[s[-1] // 2]
left = defaultdict(int)
for v, x in zip(s, nums):
d = k - x
if (s[-1] + d) % 2 == 0:
t = left[(s[-1] + d) // 2] + right[(s[-1] - d) // 2]
if ans < t:
ans = t
left[v] += 1
right[v] -= 1
return ans
| Solution |
python | getsentry__sentry | src/sentry/replays/usecases/ingest/event_parser.py | {
"start": 970,
"end": 1068
} | class ____:
click_event: ClickEvent
click_count: int
@dataclass(frozen=True)
| MultiClickEvent |
python | pytorch__pytorch | test/distributed/fsdp/test_fsdp_sharded_grad_scaler.py | {
"start": 6056,
"end": 13989
} | class ____(FSDPTest):
def _get_init_modes_for_test(self, cpu_offload):
modes = [DEVICEInitMode.DEVICE_AFTER, DEVICEInitMode.DEVICE_BEFORE]
# Note that DEVICEInitMode.DEVICE_NEVER works currently only with CPU
# offload as we explicitly bring the param back to CUDA device. In
# general, it will not work since we try to all_gather p.data which is
# on CPU but NCCL only supports GPU.
if cpu_offload.offload_params:
modes.append(DEVICEInitMode.DEVICE_NEVER)
return modes
@skip_if_lt_x_gpu(2)
@parametrize(params, configs, subtest_name)
def test_fsdp_ddp_parity_with_grad_scaler(
self,
cpu_offload: CPUOffload,
sharding_strategy: Optional[ShardingStrategy],
mixed_precision: Optional[str],
use_orig_params: Optional[str],
):
init_modes = self._get_init_modes_for_test(cpu_offload)
mp = (
MixedPrecision(
param_dtype=torch.float16,
reduce_dtype=torch.float16,
buffer_dtype=torch.float16,
)
if mixed_precision is not None
else None
)
# the ``NonUniformReqGradNWM`` model requires we set `init_scale`
# more conservatively than default to avoid infs with the initial steps
if use_orig_params == "enable_use_orig_params":
use_orig = True
model_cls = NonUniformReqGradNWM
sharded_grad_scaler_kwargs = {"init_scale": 2.0**11}
else:
use_orig = False
model_cls = NestedWrappedModule # type: ignore[assignment]
sharded_grad_scaler_kwargs = None
for device_init_mode in init_modes:
self._test_fsdp_parity(
model_cls,
FSDPInitMode.RECURSIVE,
device_init_mode=device_init_mode,
cpu_offload=cpu_offload,
sharding_strategy=sharding_strategy,
mixed_precision=mp,
enable_sharded_grad_scaler=True,
use_orig_params=use_orig,
sharded_grad_scaler_kwargs=sharded_grad_scaler_kwargs,
)
def _build_model_and_optim(
self,
cpu_offload: CPUOffload = CPUOffload(offload_params=False),
use_orig_params: bool = False,
):
model = TransformerWithSharedParams.init(
self.process_group,
FSDPInitMode.NO_FSDP,
DEVICEInitMode.DEVICE_BEFORE,
deterministic=True,
)
ref_model = DDP(
copy.deepcopy(model),
device_ids=[self.rank],
)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
fsdp_kwargs = {
"use_orig_params": use_orig_params,
"cpu_offload": cpu_offload,
"auto_wrap_policy": ModuleWrapPolicy(
{
TransformerEncoderLayer,
TransformerDecoderLayer,
},
),
"device_id": self.rank,
}
model = FSDP(model, **fsdp_kwargs)
optim = torch.optim.Adam(model.parameters(), lr=1e-2)
return model, optim, ref_model, ref_optim
@skip_if_lt_x_gpu(2)
def test_sharded_grad_scaler_found_inf(self):
self.run_subtests(
{
"use_orig_params": [False, True],
"cpu_offload": [
CPUOffload(offload_params=True),
CPUOffload(offload_params=False),
],
},
self._test_sharded_grad_scaler_found_inf,
)
def _test_sharded_grad_scaler_found_inf(
self,
use_orig_params: bool,
cpu_offload: CPUOffload,
):
model, optim, ref_model, ref_optim = self._build_model_and_optim(
cpu_offload=cpu_offload,
use_orig_params=use_orig_params,
)
grad_scaler = ShardedGradScaler(device=device_type, init_scale=2.0)
ref_grad_scaler = torch.amp.GradScaler(device=device_type, init_scale=2.0)
scaled_losses: list[torch.Tensor] = []
device = torch.device(device_type)
torch.manual_seed(42 + self.rank + 1)
for iter in range(10):
for _model, _optim, _grad_scaler in (
(ref_model, ref_optim, ref_grad_scaler),
(model, optim, grad_scaler),
):
module = _model.module
inp = module.get_input(device)
_optim.zero_grad()
output = _model(*inp)
loss = module.get_loss(inp, output)
scaled_loss = _grad_scaler.scale(loss)
scaled_losses.append(scaled_loss)
scaled_loss.backward()
orig_params = [
param.detach().clone()
for param in _model.parameters()
if param.grad is not None
]
should_find_inf = iter % 2 == 0
if should_find_inf and (
_model is ref_model or (_model is model and self.rank == 0)
):
# other ranks should find infs from rank 0
# after collectives
for param in _model.parameters():
if param.grad is None:
continue
param.grad.fill_(float("inf"))
break
_grad_scaler.step(_optim)
orig_scale = _grad_scaler.get_scale()
_grad_scaler.update()
if should_find_inf:
self.assertEqual(
_grad_scaler.get_scale(),
orig_scale * _grad_scaler.get_backoff_factor(),
(
f"rank: {self.rank} iter: {iter} expect origin scale {orig_scale} "
f"to be backed off by {_grad_scaler.get_backoff_factor()} "
f"but got {_grad_scaler.get_scale()}"
),
)
else:
self.assertEqual(
_grad_scaler.get_scale(),
orig_scale,
(
f"rank: {self.rank} iter: {iter} expect same scale {orig_scale} "
f"but got {_grad_scaler.get_scale()}"
),
)
for param, orig_param in zip(
[param for param in _model.parameters() if param.grad is not None],
orig_params,
):
if should_find_inf:
self.assertEqual(
param,
orig_param,
(
f"rank: {self.rank} iter: {iter} expect the same params before "
f"and after optim.step but got {param} vs {orig_param}"
),
)
else:
self.assertNotEqual(
param,
orig_param,
(
f"rank: {self.rank} iter: {iter} expect the updated params after "
f"optim.step but got {param} vs {orig_param}"
),
)
self.assertEqual(
scaled_losses[0],
scaled_losses[1],
f"iter: {iter} {scaled_losses[0]} vs {scaled_losses[1]}",
)
instantiate_parametrized_tests(TestShardGradScaler)
instantiate_parametrized_tests(TestShardedGradScalerParityWithDDP)
if __name__ == "__main__":
run_tests()
| TestShardedGradScalerParityWithDDP |
python | gevent__gevent | src/gevent/libev/watcher.py | {
"start": 6393,
"end": 6443
} | class ____(_base.CheckMixin, watcher):
pass
| check |
python | facebookresearch__faiss | tests/test_factory.py | {
"start": 12503,
"end": 12981
} | class ____(unittest.TestCase):
def test_constructor(self):
index = faiss.IndexIVFSpectralHash(faiss.IndexFlat(10), 10, 20, 10, 1)
gc.collect()
index.quantizer.ntotal # this should not crash
def test_replace_vt(self):
index = faiss.IndexIVFSpectralHash(faiss.IndexFlat(10), 10, 20, 10, 1)
index.replace_vt(faiss.ITQTransform(10, 10))
gc.collect()
index.vt.d_out # this should not crash
| TestIVFSpectralHashOwnership |
python | pytorch__pytorch | benchmarks/tensorexpr/reduction.py | {
"start": 5104,
"end": 5571
} | class ____(Reduce2DBench):
def __init__(self, mode, device, dtype, dim0, dim1):
super().__init__(mode, device, dtype, 1, dim0, dim1)
@staticmethod
def default_configs():
parent_config = Reduce2DBench.default_configs()[0]
return [parent_config[1:]]
def config(self):
parent_config = super().config()
return parent_config[1:]
@staticmethod
def module():
return "reduce2d_inner"
| Reduce2DInnerBench |
python | django__django | tests/serializers/test_jsonl.py | {
"start": 362,
"end": 9105
} | class ____(SerializersTestBase, TestCase):
serializer_name = "jsonl"
pkless_str = [
'{"pk": null,"model": "serializers.category","fields": {"name": "Reference"}}',
'{"model": "serializers.category","fields": {"name": "Non-fiction"}}',
]
pkless_str = "\n".join([s.replace("\n", "") for s in pkless_str])
mapping_ordering_str = (
'{"model": "serializers.article","pk": %(article_pk)s,'
'"fields": {'
'"author": %(author_pk)s,'
'"headline": "Poker has no place on ESPN",'
'"pub_date": "2006-06-16T11:00:00",'
'"categories": [%(first_category_pk)s,%(second_category_pk)s],'
'"meta_data": [],'
'"topics": []}}\n'
)
@staticmethod
def _validate_output(serial_str):
try:
for line in serial_str.split("\n"):
if line:
json.loads(line)
except Exception:
return False
else:
return True
@staticmethod
def _get_pk_values(serial_str):
serial_list = [json.loads(line) for line in serial_str.split("\n") if line]
return [obj_dict["pk"] for obj_dict in serial_list]
@staticmethod
def _get_field_values(serial_str, field_name):
serial_list = [json.loads(line) for line in serial_str.split("\n") if line]
return [
obj_dict["fields"][field_name]
for obj_dict in serial_list
if field_name in obj_dict["fields"]
]
def test_no_indentation(self):
s = serializers.jsonl.Serializer()
json_data = s.serialize([Score(score=5.0), Score(score=6.0)], indent=2)
for line in json_data.splitlines():
self.assertIsNone(re.search(r".+,\s*$", line))
@isolate_apps("serializers")
def test_custom_encoder(self):
class ScoreDecimal(models.Model):
score = models.DecimalField()
class CustomJSONEncoder(json.JSONEncoder):
def default(self, o):
if isinstance(o, decimal.Decimal):
return str(o)
return super().default(o)
s = serializers.jsonl.Serializer()
json_data = s.serialize(
[ScoreDecimal(score=decimal.Decimal(1.0))],
cls=CustomJSONEncoder,
)
self.assertIn('"fields": {"score": "1"}', json_data)
def test_json_deserializer_exception(self):
with self.assertRaises(DeserializationError):
for obj in serializers.deserialize("jsonl", """[{"pk":1}"""):
pass
def test_helpful_error_message_invalid_pk(self):
"""
If there is an invalid primary key, the error message contains the
model associated with it.
"""
test_string = (
'{"pk": "badpk","model": "serializers.player",'
'"fields": {"name": "Bob","rank": 1,"team": "Team"}}'
)
with self.assertRaisesMessage(
DeserializationError, "(serializers.player:pk=badpk)"
):
list(serializers.deserialize("jsonl", test_string))
def test_helpful_error_message_invalid_field(self):
"""
If there is an invalid field value, the error message contains the
model associated with it.
"""
test_string = (
'{"pk": "1","model": "serializers.player",'
'"fields": {"name": "Bob","rank": "invalidint","team": "Team"}}'
)
expected = "(serializers.player:pk=1) field_value was 'invalidint'"
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize("jsonl", test_string))
def test_helpful_error_message_for_foreign_keys(self):
"""
Invalid foreign keys with a natural key throws a helpful error message,
such as what the failing key is.
"""
test_string = (
'{"pk": 1, "model": "serializers.category",'
'"fields": {'
'"name": "Unknown foreign key",'
'"meta_data": ["doesnotexist","metadata"]}}'
)
key = ["doesnotexist", "metadata"]
expected = "(serializers.category:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize("jsonl", test_string))
def test_helpful_error_message_for_many2many_non_natural(self):
"""
Invalid many-to-many keys throws a helpful error message.
"""
test_strings = [
"""{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"categories": [1, "doesnotexist"]
}
}""",
"""{
"pk": 1,
"model": "serializers.author",
"fields": {"name": "Agnes"}
}""",
"""{
"pk": 1,
"model": "serializers.category",
"fields": {"name": "Reference"}
}""",
]
test_string = "\n".join([s.replace("\n", "") for s in test_strings])
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(DeserializationError, expected):
list(serializers.deserialize("jsonl", test_string))
def test_helpful_error_message_for_many2many_natural1(self):
"""
Invalid many-to-many keys throws a helpful error message where one of a
list of natural keys is invalid.
"""
test_strings = [
"""{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {"kind": "author","name": "meta1","value": "Agnes"}
}""",
"""{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [
["author", "meta1"],
["doesnotexist", "meta1"],
["author", "meta1"]
]
}
}""",
"""{
"pk": 1,
"model": "serializers.author",
"fields": {"name": "Agnes"}
}""",
]
test_string = "\n".join([s.replace("\n", "") for s in test_strings])
key = ["doesnotexist", "meta1"]
expected = "(serializers.article:pk=1) field_value was '%r'" % key
with self.assertRaisesMessage(DeserializationError, expected):
for obj in serializers.deserialize("jsonl", test_string):
obj.save()
def test_helpful_error_message_for_many2many_natural2(self):
"""
Invalid many-to-many keys throws a helpful error message where a
natural many-to-many key has only a single value.
"""
test_strings = [
"""{
"pk": 1,
"model": "serializers.article",
"fields": {
"author": 1,
"headline": "Unknown many to many",
"pub_date": "2014-09-15T10:35:00",
"meta_data": [1, "doesnotexist"]
}
}""",
"""{
"pk": 1,
"model": "serializers.categorymetadata",
"fields": {"kind": "author","name": "meta1","value": "Agnes"}
}""",
"""{
"pk": 1,
"model": "serializers.author",
"fields": {"name": "Agnes"}
}""",
]
test_string = "\n".join([s.replace("\n", "") for s in test_strings])
expected = "(serializers.article:pk=1) field_value was 'doesnotexist'"
with self.assertRaisesMessage(DeserializationError, expected):
for obj in serializers.deserialize("jsonl", test_string, ignore=False):
obj.save()
def test_helpful_error_message_for_many2many_not_iterable(self):
"""
Not iterable many-to-many field value throws a helpful error message.
"""
test_string = (
'{"pk": 1,"model": "serializers.m2mdata","fields": {"data": null}}'
)
expected = "(serializers.m2mdata:pk=1) field_value was 'None'"
with self.assertRaisesMessage(DeserializationError, expected):
next(serializers.deserialize("jsonl", test_string, ignore=False))
| JsonlSerializerTestCase |
python | apache__airflow | task-sdk/src/airflow/sdk/definitions/asset/__init__.py | {
"start": 7141,
"end": 8491
} | class ____:
"""
Protocol for all asset triggers to use in ``DAG(schedule=...)``.
:meta private:
"""
def __bool__(self) -> bool:
return True
def __or__(self, other: BaseAsset) -> BaseAsset:
if not isinstance(other, BaseAsset):
return NotImplemented
return AssetAny(self, other)
def __and__(self, other: BaseAsset) -> BaseAsset:
if not isinstance(other, BaseAsset):
return NotImplemented
return AssetAll(self, other)
def as_expression(self) -> Any:
"""
Serialize the asset into its scheduling expression.
The return value is stored in DagModel for display purposes. It must be
JSON-compatible.
:meta private:
"""
raise NotImplementedError
def iter_assets(self) -> Iterator[tuple[AssetUniqueKey, Asset]]:
raise NotImplementedError
def iter_asset_aliases(self) -> Iterator[tuple[str, AssetAlias]]:
raise NotImplementedError
def iter_asset_refs(self) -> Iterator[AssetRef]:
raise NotImplementedError
def iter_dag_dependencies(self, *, source: str, target: str) -> Iterator[DagDependency]:
"""
Iterate a base asset as dag dependency.
:meta private:
"""
raise NotImplementedError
@attrs.define(init=False)
| BaseAsset |
python | pandas-dev__pandas | pandas/tests/plotting/frame/test_hist_box_by.py | {
"start": 548,
"end": 6611
} | class ____:
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, titles, legends",
[
("C", "A", ["a", "b", "c"], [["A"]] * 3),
("C", ["A", "B"], ["a", "b", "c"], [["A", "B"]] * 3),
("C", None, ["a", "b", "c"], [["A", "B"]] * 3),
(
["C", "D"],
"A",
[
"(a, a)",
"(b, b)",
"(c, c)",
],
[["A"]] * 3,
),
(
["C", "D"],
["A", "B"],
[
"(a, a)",
"(b, b)",
"(c, c)",
],
[["A", "B"]] * 3,
),
(
["C", "D"],
None,
[
"(a, a)",
"(b, b)",
"(c, c)",
],
[["A", "B"]] * 3,
),
],
)
def test_hist_plot_by_argument(self, by, column, titles, legends, hist_df):
# GH 15079
axes = _check_plot_works(
hist_df.plot.hist, column=column, by=by, default_axes=True
)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column, titles, legends",
[
(0, "A", ["a", "b", "c"], [["A"]] * 3),
(0, None, ["a", "b", "c"], [["A", "B"]] * 3),
(
[0, "D"],
"A",
[
"(a, a)",
"(b, b)",
"(c, c)",
],
[["A"]] * 3,
),
],
)
def test_hist_plot_by_0(self, by, column, titles, legends, hist_df):
# GH 15079
df = hist_df.copy()
df = df.rename(columns={"C": 0})
axes = _check_plot_works(df.plot.hist, default_axes=True, column=column, by=by)
result_titles = [ax.get_title() for ax in axes]
result_legends = [
[legend.get_text() for legend in ax.get_legend().texts] for ax in axes
]
assert result_legends == legends
assert result_titles == titles
@pytest.mark.parametrize(
"by, column",
[
([], ["A"]),
([], ["A", "B"]),
((), None),
((), ["A", "B"]),
],
)
def test_hist_plot_empty_list_string_tuple_by(self, by, column, hist_df):
# GH 15079
msg = "No group keys passed"
with pytest.raises(ValueError, match=msg):
_check_plot_works(
hist_df.plot.hist, default_axes=True, column=column, by=by
)
@pytest.mark.slow
@pytest.mark.parametrize(
"by, column, layout, axes_num",
[
(["C"], "A", (2, 2), 3),
("C", "A", (2, 2), 3),
(["C"], ["A"], (1, 3), 3),
("C", None, (3, 1), 3),
("C", ["A", "B"], (3, 1), 3),
(["C", "D"], "A", (9, 1), 3),
(["C", "D"], "A", (3, 3), 3),
(["C", "D"], ["A"], (5, 2), 3),
(["C", "D"], ["A", "B"], (9, 1), 3),
(["C", "D"], None, (9, 1), 3),
(["C", "D"], ["A", "B"], (5, 2), 3),
],
)
def test_hist_plot_layout_with_by(self, by, column, layout, axes_num, hist_df):
# GH 15079
# _check_plot_works adds an ax so catch warning. see GH #13188
with tm.assert_produces_warning(UserWarning, check_stacklevel=False):
axes = _check_plot_works(
hist_df.plot.hist, column=column, by=by, layout=layout
)
_check_axes_shape(axes, axes_num=axes_num, layout=layout)
@pytest.mark.parametrize(
"msg, by, layout",
[
("larger than required size", ["C", "D"], (1, 1)),
(re.escape("Layout must be a tuple of (rows, columns)"), "C", (1,)),
("At least one dimension of layout must be positive", "C", (-1, -1)),
],
)
def test_hist_plot_invalid_layout_with_by_raises(self, msg, by, layout, hist_df):
# GH 15079, test if error is raised when invalid layout is given
with pytest.raises(ValueError, match=msg):
hist_df.plot.hist(column=["A", "B"], by=by, layout=layout)
@pytest.mark.slow
def test_axis_share_x_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharex=True)
# share x
assert get_x_axis(ax1).joined(ax1, ax2)
assert get_x_axis(ax2).joined(ax1, ax2)
assert get_x_axis(ax3).joined(ax1, ax3)
assert get_x_axis(ax3).joined(ax2, ax3)
# don't share y
assert not get_y_axis(ax1).joined(ax1, ax2)
assert not get_y_axis(ax2).joined(ax1, ax2)
assert not get_y_axis(ax3).joined(ax1, ax3)
assert not get_y_axis(ax3).joined(ax2, ax3)
@pytest.mark.slow
def test_axis_share_y_with_by(self, hist_df):
# GH 15079
ax1, ax2, ax3 = hist_df.plot.hist(column="A", by="C", sharey=True)
# share y
assert get_y_axis(ax1).joined(ax1, ax2)
assert get_y_axis(ax2).joined(ax1, ax2)
assert get_y_axis(ax3).joined(ax1, ax3)
assert get_y_axis(ax3).joined(ax2, ax3)
# don't share x
assert not get_x_axis(ax1).joined(ax1, ax2)
assert not get_x_axis(ax2).joined(ax1, ax2)
assert not get_x_axis(ax3).joined(ax1, ax3)
assert not get_x_axis(ax3).joined(ax2, ax3)
@pytest.mark.parametrize("figsize", [(12, 8), (20, 10)])
def test_figure_shape_hist_with_by(self, figsize, hist_df):
# GH 15079
axes = hist_df.plot.hist(column="A", by="C", figsize=figsize)
_check_axes_shape(axes, axes_num=3, figsize=figsize)
| TestHistWithBy |
python | spyder-ide__spyder | spyder/api/widgets/main_container.py | {
"start": 516,
"end": 5438
} | class ____(QWidget, SpyderWidgetMixin):
"""
Spyder plugin main container class.
This class handles a non-dockable widget to be able to contain, parent and
store references to other widgets, like status bar widgets, toolbars,
context menus, etc.
Notes
-----
All Spyder non dockable plugins can define a plugin container that must
subclass this.
"""
CONTEXT_NAME = None
"""
This optional attribute defines the context name under which actions,
toolbars, toolbuttons and menus should be registered on the
Spyder global registry.
If actions, toolbars, toolbuttons or menus belong to the global scope of
the plugin, then this attribute should have a `None` value.
"""
# ---- Signals
# ------------------------------------------------------------------------
sig_free_memory_requested = Signal()
"""
This signal can be emitted to request the main application to garbage
collect deleted objects.
"""
sig_quit_requested = Signal()
"""
This signal can be emitted to request the main application to quit.
"""
sig_restart_requested = Signal()
"""
This signal can be emitted to request the main application to restart.
"""
sig_redirect_stdio_requested = Signal(bool)
"""
This signal can be emitted to request the main application to redirect
standard output/error when using Open/Save/Browse dialogs within widgets.
Parameters
----------
enable: bool
Enable/Disable standard input/output redirection.
"""
sig_exception_occurred = Signal(dict)
"""
This signal can be emitted to report an exception handled by this widget.
Parameters
----------
error_data: dict
The dictionary containing error data. The expected keys are:
.. code-block:: python
error_data = {
"text": str,
"is_traceback": bool,
"repo": str,
"title": str,
"label": str,
"steps": str,
}
Notes
-----
The `is_traceback` key indicates if `text` contains plain text or a
Python error traceback.
The `title` and `repo` keys indicate how the error data should
customize the report dialog and Github error submission.
The `label` and `steps` keys allow customizing the content of the
error dialog.
"""
sig_unmaximize_plugin_requested = Signal((), (object,))
"""
This signal is emitted to inform the main window that it needs to
unmaximize the currently maximized plugin, if any.
Parameters
----------
plugin_instance: spyder.api.plugins.SpyderDockablePlugin
Unmaximize plugin only if it is not `plugin_instance`.
"""
def __init__(self, name, plugin, parent=None):
if not PYSIDE2:
super().__init__(parent=parent, class_parent=plugin)
else:
QWidget.__init__(self, parent)
SpyderWidgetMixin.__init__(self, class_parent=plugin)
# ---- Attributes
# --------------------------------------------------------------------
self._name = name
self._plugin = plugin
self._parent = parent
# Attribute used to access the action, toolbar, toolbutton and menu
# registries
self.PLUGIN_NAME = name
# Widget setup
# A PluginMainContainer inherits from QWidget so it can be a parent
# for the widgets it contains. Since it is a QWidget it will occupy a
# physical space on the screen and may cast "shadow" on the top left
# of the main window. To prevent this we ensure the widget has zero
# width and zero height.
# See: spyder-ide/spyder#13547
self.setMaximumWidth(0)
self.setMaximumHeight(0)
# ---- Public Qt overridden methods
# -------------------------------------------------------------------------
def closeEvent(self, event):
self.on_close()
super().closeEvent(event)
# ---- API: methods to define or override
# ------------------------------------------------------------------------
def setup(self):
"""
Create actions, widgets, add to menu and other setup requirements.
"""
raise NotImplementedError(
'A PluginMainContainer subclass must define a `setup` method!')
def update_actions(self):
"""
Update the state of exposed actions.
Exposed actions are actions created by the self.create_action method.
"""
raise NotImplementedError(
'A PluginMainContainer subclass must define a `update_actions` '
'method!')
def on_close(self):
"""
Perform actions before the container is closed.
This method **must** only operate on local attributes.
"""
pass
| PluginMainContainer |
python | anthropics__anthropic-sdk-python | src/anthropic/types/beta/beta_tool_text_editor_20250429_param.py | {
"start": 361,
"end": 1086
} | class ____(TypedDict, total=False):
name: Required[Literal["str_replace_based_edit_tool"]]
"""Name of the tool.
This is how the tool will be called by the model and in `tool_use` blocks.
"""
type: Required[Literal["text_editor_20250429"]]
allowed_callers: List[Literal["direct", "code_execution_20250825"]]
cache_control: Optional[BetaCacheControlEphemeralParam]
"""Create a cache control breakpoint at this content block."""
defer_loading: bool
"""If true, tool will not be included in initial system prompt.
Only loaded when returned via tool_reference from tool search.
"""
input_examples: Iterable[Dict[str, object]]
strict: bool
| BetaToolTextEditor20250429Param |
python | apache__airflow | providers/apache/kafka/src/airflow/providers/apache/kafka/hooks/client.py | {
"start": 1039,
"end": 2686
} | class ____(KafkaBaseHook):
"""
A hook for interacting with the Kafka Cluster.
:param kafka_config_id: The connection object to use, defaults to "kafka_default"
"""
def __init__(self, kafka_config_id=KafkaBaseHook.default_conn_name) -> None:
super().__init__(kafka_config_id=kafka_config_id)
def create_topic(
self,
topics: Sequence[Sequence[Any]],
) -> None:
"""
Create a topic.
:param topics: a list of topics to create including the number of partitions for the topic
and the replication factor. Format: [ ("topic_name", number of partitions, replication factor)]
"""
admin_client = self.get_conn
new_topics = [NewTopic(t[0], num_partitions=t[1], replication_factor=t[2]) for t in topics]
futures = admin_client.create_topics(new_topics)
for t, f in futures.items():
try:
f.result()
self.log.info("The topic %s has been created.", t)
except KafkaException as e:
if e.args[0].name == "TOPIC_ALREADY_EXISTS":
self.log.warning("The topic %s already exists.", t)
else:
raise
def delete_topic(
self,
topics: Sequence[str],
) -> None:
"""
Delete a topic.
:param topics: a list of topics to delete.
"""
admin_client = self.get_conn
futures = admin_client.delete_topics(topics)
for t, f in futures.items():
f.result()
self.log.info("The topic %s has been deleted.", t)
| KafkaAdminClientHook |
python | sphinx-doc__sphinx | sphinx/errors.py | {
"start": 87,
"end": 860
} | class ____(Exception):
"""Base class for Sphinx errors.
This is the base class for "nice" exceptions. When such an exception is
raised, Sphinx will abort the build and present the exception category and
message to the user.
Extensions are encouraged to derive from this exception for their custom
errors.
Exceptions *not* derived from :exc:`SphinxError` are treated as unexpected
and shown to the user with a part of the traceback (and the full traceback
saved in a temporary file).
.. attribute:: category
Description of the exception "category", used in converting the
exception to a string ("category: message"). Should be set accordingly
in subclasses.
"""
category = 'Sphinx error'
| SphinxError |
python | huggingface__transformers | src/transformers/models/data2vec/modeling_data2vec_text.py | {
"start": 20811,
"end": 21376
} | class ____(PreTrainedModel):
config_class = Data2VecTextConfig
base_model_prefix = "data2vec_text"
supports_gradient_checkpointing = True
_no_split_modules = ["Data2VecTextForTextEmbeddings", "Data2VecTextLayer"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Data2VecTextLayer,
"attentions": Data2VecTextSelfAttention,
"cross_attentions": Data2VecTextCrossAttention,
}
| Data2VecTextPreTrainedModel |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-code-hierarchy/tests/test_code_hierarchy_with_skeleton.py | {
"start": 12739,
"end": 13565
} | class ____ {{
{double_forward_slash} {CodeHierarchyNodeParser._get_comment_text(chunks[2])}
}}
function baz() {{
{double_forward_slash} {CodeHierarchyNodeParser._get_comment_text(chunks[4])}
}}"""
)
# Test the second chunk (function foo)
assert (
chunks[1].text
== """\
function foo() {
console.log("bar");
}"""
)
assert chunks[1].metadata["inclusive_scopes"] == [
{"name": "foo", "type": "function_declaration", "signature": "function foo()"}
]
assert (
cast(RelatedNodeInfo, chunks[1].relationships[NodeRelationship.PARENT]).node_id
== chunks[0].id_
)
assert [c.node_id for c in chunks[1].relationships[NodeRelationship.CHILD]] == []
# Test the third chunk (class Example)
assert (
chunks[2].text
== f"""\
| Example |
python | scrapy__scrapy | tests/test_spidermiddleware_referer.py | {
"start": 24133,
"end": 24310
} | class ____(
MixinNoReferrerWhenDowngrade, TestRefererMiddleware
):
req_meta = {"referrer_policy": POLICY_NO_REFERRER_WHEN_DOWNGRADE}
| TestRequestMetaNoReferrerWhenDowngrade |
python | huggingface__transformers | src/transformers/models/qwen2_audio/modeling_qwen2_audio.py | {
"start": 4079,
"end": 7848
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
# Copied from transformers.models.whisper.modeling_whisper.WhisperAttention.__init__ with Whisper->Qwen2Audio
def __init__(
self,
embed_dim: int,
num_heads: int,
dropout: float = 0.0,
is_decoder: bool = False,
bias: bool = True,
is_causal: bool = False,
layer_idx: Optional[int] = None,
config: Optional[Qwen2AudioConfig] = None,
):
super().__init__()
self.embed_dim = embed_dim
self.num_heads = num_heads
self.dropout = dropout
self.head_dim = embed_dim // num_heads
self.config = config
if (self.head_dim * num_heads) != self.embed_dim:
raise ValueError(
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
f" and `num_heads`: {num_heads})."
)
self.scaling = self.head_dim**-0.5
self.is_decoder = is_decoder
self.is_causal = is_causal
if layer_idx is None and is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.layer_idx = layer_idx
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=False)
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
def _shape(self, tensor: torch.Tensor, seq_len: int, bsz: int):
return tensor.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2).contiguous()
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
output_attentions: bool = False,
**kwargs,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
"""Input shape: Batch x Time x Channel"""
bsz, tgt_len, _ = hidden_states.size()
# Scaling is susceptible to floating point arithmetics' inprecisions
# which can lead to different results (this is dependent from model
# to model, e.g. whisper is one such case). We therefore keep the
# original order of scaling to follow the original implementation
# and enforce no scaling (1.0) in the attention call below.
query_states = self._shape(self.q_proj(hidden_states) * self.scaling, tgt_len, bsz)
key_states = self._shape(self.k_proj(hidden_states), -1, bsz)
value_states = self._shape(self.v_proj(hidden_states), -1, bsz)
attention_interface: Callable = eager_attention_forward
if self.config._attn_implementation != "eager":
attention_interface = ALL_ATTENTION_FUNCTIONS[self.config._attn_implementation]
attn_output, attn_weights = attention_interface(
self,
query_states,
key_states,
value_states,
attention_mask,
dropout=0.0 if not self.training else self.dropout,
scaling=1.0,
output_attentions=output_attentions,
**kwargs,
)
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
attn_output = self.out_proj(attn_output)
return attn_output, attn_weights
# Copied from transformers.models.whisper.modeling_whisper.WhisperEncoderLayer with Whisper->Qwen2Audio, WHISPER->QWEN2AUDIO
| Qwen2AudioAttention |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 26977,
"end": 29058
} | class ____(nn.Module):
"""
ASPP module defined in DeepLab papers: https://huggingface.co/papers/1606.00915, https://huggingface.co/papers/1706.05587
"""
def __init__(self, config: MobileViTV2Config) -> None:
super().__init__()
encoder_out_channels = make_divisible(512 * config.width_multiplier, divisor=8) # layer 5 output dimension
in_channels = encoder_out_channels
out_channels = config.aspp_out_channels
if len(config.atrous_rates) != 3:
raise ValueError("Expected 3 values for atrous_rates")
self.convs = nn.ModuleList()
in_projection = MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
use_activation="relu",
)
self.convs.append(in_projection)
self.convs.extend(
[
MobileViTV2ConvLayer(
config,
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
dilation=rate,
use_activation="relu",
)
for rate in config.atrous_rates
]
)
pool_layer = MobileViTV2ASPPPooling(config, in_channels, out_channels)
self.convs.append(pool_layer)
self.project = MobileViTV2ConvLayer(
config, in_channels=5 * out_channels, out_channels=out_channels, kernel_size=1, use_activation="relu"
)
self.dropout = nn.Dropout(p=config.aspp_dropout_prob)
def forward(self, features: torch.Tensor) -> torch.Tensor:
pyramid = []
for conv in self.convs:
pyramid.append(conv(features))
pyramid = torch.cat(pyramid, dim=1)
pooled_features = self.project(pyramid)
pooled_features = self.dropout(pooled_features)
return pooled_features
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTDeepLabV3 with MobileViT->MobileViTV2
| MobileViTV2ASPP |
python | kamyu104__LeetCode-Solutions | Python/count-negative-numbers-in-a-sorted-matrix.py | {
"start": 33,
"end": 361
} | class ____(object):
def countNegatives(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
result, c = 0, len(grid[0])-1
for row in grid:
while c >= 0 and row[c] < 0:
c -= 1
result += len(grid[0])-1-c
return result
| Solution |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/scheduler/instigation.py | {
"start": 8288,
"end": 11404
} | class ____(
NamedTuple(
"_InstigationState",
[
("origin", RemoteInstigatorOrigin),
("instigator_type", InstigatorType),
("status", InstigatorStatus),
("instigator_data", Optional[InstigatorData]),
],
)
):
def __new__(
cls,
origin: RemoteInstigatorOrigin,
instigator_type: InstigatorType,
status: InstigatorStatus,
instigator_data: Optional[InstigatorData] = None,
):
return super().__new__(
cls,
check.inst_param(origin, "origin", RemoteInstigatorOrigin),
check.inst_param(instigator_type, "instigator_type", InstigatorType),
check.inst_param(status, "status", InstigatorStatus),
check_instigator_data(instigator_type, instigator_data),
)
@property
def is_running(self) -> bool:
return self.status != InstigatorStatus.STOPPED
@property
def name(self) -> str:
return self.origin.instigator_name
@property
def instigator_name(self) -> str:
return self.origin.instigator_name
@property
def repository_origin_id(self) -> str:
return self.origin.repository_origin.get_id()
@property
def repository_selector(self) -> RepositorySelector:
return RepositorySelector(
location_name=self.origin.repository_origin.code_location_origin.location_name,
repository_name=self.origin.repository_origin.repository_name,
)
@property
def repository_selector_id(self) -> str:
return create_snapshot_id(self.repository_selector)
@property
def instigator_origin_id(self) -> str:
return self.origin.get_id()
@property
def selector(self) -> InstigatorSelector:
return InstigatorSelector(
location_name=self.origin.repository_origin.code_location_origin.location_name,
repository_name=self.origin.repository_origin.repository_name,
name=self.origin.instigator_name,
)
@property
def selector_id(self) -> str:
return create_snapshot_id(self.selector)
def with_status(self, status: InstigatorStatus) -> "InstigatorState":
check.inst_param(status, "status", InstigatorStatus)
return InstigatorState(
self.origin,
instigator_type=self.instigator_type,
status=status,
instigator_data=self.instigator_data,
)
def with_data(self, instigator_data: InstigatorData) -> "InstigatorState":
check_instigator_data(self.instigator_type, instigator_data)
return InstigatorState(
self.origin,
instigator_type=self.instigator_type,
status=self.status,
instigator_data=instigator_data,
)
@property
def sensor_instigator_data(self) -> Optional["SensorInstigatorData"]:
if isinstance(self.instigator_data, SensorInstigatorData):
return self.instigator_data
return None
@whitelist_for_serdes(old_storage_names={"JobTickStatus"})
| InstigatorState |
python | apache__airflow | providers/elasticsearch/tests/unit/elasticsearch/hooks/test_elasticsearch.py | {
"start": 4213,
"end": 7572
} | class ____:
def setup_method(self):
sql = MagicMock(spec=SqlClient)
sql.query.side_effect = RESPONSES
es = MagicMock(sql=sql, spec=Elasticsearch)
self.cur = ElasticsearchSQLCursor(es=es, options={})
self.spy_agency = SpyAgency()
self.spy_agency.spy_on(self.cur.close, call_original=True)
self.spy_agency.spy_on(self.cur.execute, call_original=True)
self.spy_agency.spy_on(self.cur.fetchall, call_original=True)
self.conn = MagicMock(spec=ESConnection)
self.conn.cursor.return_value = self.cur
conn = self.conn
class UnitTestElasticsearchSQLHook(ElasticsearchSQLHook):
conn_name_attr = "test_conn_id"
def get_conn(self):
return conn
self.db_hook = UnitTestElasticsearchSQLHook()
def test_get_first_record(self):
statement = "SELECT * FROM hollywood.actors"
assert self.db_hook.get_first(statement) == ROWS[0]
self.conn.close.assert_called_once_with()
self.spy_agency.assert_spy_called(self.cur.close)
self.spy_agency.assert_spy_called(self.cur.execute)
def test_get_records(self):
statement = "SELECT * FROM hollywood.actors"
assert self.db_hook.get_records(statement) == ROWS
self.conn.close.assert_called_once_with()
self.spy_agency.assert_spy_called(self.cur.close)
self.spy_agency.assert_spy_called(self.cur.execute)
def test_get_df_pandas(self):
statement = "SELECT * FROM hollywood.actors"
df = self.db_hook.get_df(statement, df_type="pandas")
assert list(df.columns) == ["index", "name", "firstname", "age"]
assert df.values.tolist() == ROWS
self.conn.close.assert_called_once_with()
self.spy_agency.assert_spy_called(self.cur.close)
self.spy_agency.assert_spy_called(self.cur.execute)
def test_get_df_polars(self):
with pytest.raises(NotImplementedError):
self.db_hook.get_df("SQL", df_type="polars")
def test_run(self):
statement = "SELECT * FROM hollywood.actors"
assert self.db_hook.run(statement, handler=fetch_all_handler) == ROWS
self.conn.close.assert_called_once_with()
self.spy_agency.assert_spy_called(self.cur.close)
self.spy_agency.assert_spy_called(self.cur.execute)
@mock.patch("airflow.providers.elasticsearch.hooks.elasticsearch.Elasticsearch")
def test_execute_sql_query(self, mock_es):
mock_es_sql_client = MagicMock()
mock_es_sql_client.query.return_value = RESPONSE_WITHOUT_CURSOR
mock_es.return_value.sql = mock_es_sql_client
es_connection = ESConnection(host="localhost", port=9200)
response = es_connection.execute_sql("SELECT * FROM hollywood.actors")
mock_es_sql_client.query.assert_called_once_with(
body={
"fetch_size": 1000,
"field_multi_value_leniency": False,
"query": "SELECT * FROM hollywood.actors",
}
)
assert response == RESPONSE_WITHOUT_CURSOR
def test_connection_ignore_cursor_parameters(self):
assert ESConnection(
host="localhost",
port=9200,
fetch_size=1000,
field_multi_value_leniency=True,
)
| TestElasticsearchSQLHook |
python | ray-project__ray | rllib/env/env_runner_group.py | {
"start": 1752,
"end": 55212
} | class ____:
"""Set of EnvRunners with n @ray.remote workers and zero or one local worker.
Where: n >= 0.
"""
def __init__(
self,
*,
env_creator: Optional[EnvCreator] = None,
validate_env: Optional[Callable[[EnvType], None]] = None,
default_policy_class: Optional[Type[Policy]] = None,
config: Optional["AlgorithmConfig"] = None,
local_env_runner: bool = True,
logdir: Optional[str] = None,
_setup: bool = True,
tune_trial_id: Optional[str] = None,
pg_offset: int = 0,
# Deprecated args.
num_env_runners: Optional[int] = None,
num_workers=DEPRECATED_VALUE,
local_worker=DEPRECATED_VALUE,
):
"""Initializes a EnvRunnerGroup instance.
Args:
env_creator: Function that returns env given env config.
validate_env: Optional callable to validate the generated
environment (only on worker=0). This callable should raise
an exception if the environment is invalid.
default_policy_class: An optional default Policy class to use inside
the (multi-agent) `policies` dict. In case the PolicySpecs in there
have no class defined, use this `default_policy_class`.
If None, PolicySpecs will be using the Algorithm's default Policy
class.
config: Optional AlgorithmConfig (or config dict).
local_env_runner: Whether to create a local (non @ray.remote) EnvRunner
in the returned set as well (default: True). If `num_env_runners`
is 0, always create a local EnvRunner.
logdir: Optional logging directory for workers.
_setup: Whether to actually set up workers. This is only for testing.
tune_trial_id: The Ray Tune trial ID, if this EnvRunnerGroup is part of
an Algorithm run as a Tune trial. None, otherwise.
"""
if num_workers != DEPRECATED_VALUE or local_worker != DEPRECATED_VALUE:
deprecation_warning(
old="WorkerSet(num_workers=..., local_worker=...)",
new="EnvRunnerGroup(num_env_runners=..., local_env_runner=...)",
error=True,
)
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
# Make sure `config` is an AlgorithmConfig object.
if not config:
config = AlgorithmConfig()
elif isinstance(config, dict):
config = AlgorithmConfig.from_dict(config)
self._env_creator = env_creator
self._policy_class = default_policy_class
self._remote_config = config
self._remote_config_obj_ref = ray.put(self._remote_config)
self._remote_args = {
"num_cpus": self._remote_config.num_cpus_per_env_runner,
"num_gpus": self._remote_config.num_gpus_per_env_runner,
"resources": self._remote_config.custom_resources_per_env_runner,
"max_restarts": (
config.max_num_env_runner_restarts
if config.restart_failed_env_runners
else 0
),
}
self._tune_trial_id = tune_trial_id
self._pg_offset = pg_offset
# Set the EnvRunner subclass to be used as "workers". Default: RolloutWorker.
self.env_runner_cls = config.env_runner_cls
if self.env_runner_cls is None:
if config.enable_env_runner_and_connector_v2:
# If experiences should be recorded, use the `
# OfflineSingleAgentEnvRunner`.
if config.output:
# No multi-agent support.
if config.is_multi_agent:
raise ValueError("Multi-agent recording is not supported, yet.")
# Otherwise, load the single-agent env runner for
# recording.
else:
from ray.rllib.offline.offline_env_runner import (
OfflineSingleAgentEnvRunner,
)
self.env_runner_cls = OfflineSingleAgentEnvRunner
else:
if config.is_multi_agent:
from ray.rllib.env.multi_agent_env_runner import (
MultiAgentEnvRunner,
)
self.env_runner_cls = MultiAgentEnvRunner
else:
from ray.rllib.env.single_agent_env_runner import (
SingleAgentEnvRunner,
)
self.env_runner_cls = SingleAgentEnvRunner
else:
self.env_runner_cls = RolloutWorker
self._logdir = logdir
self._ignore_ray_errors_on_env_runners = (
config.ignore_env_runner_failures or config.restart_failed_env_runners
)
# Create remote worker manager.
# ID=0 is used by the local worker.
# Starting remote workers from ID=1 to avoid conflicts.
self._worker_manager = FaultTolerantActorManager(
max_remote_requests_in_flight_per_actor=(
config.max_requests_in_flight_per_env_runner
),
init_id=1,
)
if _setup:
try:
self._setup(
validate_env=validate_env,
config=config,
num_env_runners=(
num_env_runners
if num_env_runners is not None
else config.num_env_runners
),
local_env_runner=local_env_runner,
)
# EnvRunnerGroup creation possibly fails, if some (remote) workers cannot
# be initialized properly (due to some errors in the EnvRunners's
# constructor).
except RayActorError as e:
# In case of an actor (remote worker) init failure, the remote worker
# may still exist and will be accessible, however, e.g. calling
# its `sample.remote()` would result in strange "property not found"
# errors.
if e.actor_init_failed:
# Raise the original error here that the EnvRunners raised
# during its construction process. This is to enforce transparency
# for the user (better to understand the real reason behind the
# failure).
# - e.args[0]: The RayTaskError (inside the caught RayActorError).
# - e.args[0].args[2]: The original Exception (e.g. a ValueError due
# to a config mismatch) thrown inside the actor.
raise e.args[0].args[2]
# In any other case, raise the RayActorError as-is.
else:
raise e
def _setup(
self,
*,
validate_env: Optional[Callable[[EnvType], None]] = None,
config: Optional["AlgorithmConfig"] = None,
num_env_runners: int = 0,
local_env_runner: bool = True,
):
"""Sets up an EnvRunnerGroup instance.
Args:
validate_env: Optional callable to validate the generated
environment (only on worker=0).
config: Optional dict that extends the common config of
the Algorithm class.
num_env_runners: Number of remote EnvRunner workers to create.
local_env_runner: Whether to create a local (non @ray.remote) EnvRunner
in the returned set as well (default: True). If `num_env_runners`
is 0, always create a local EnvRunner.
"""
# Force a local worker if num_env_runners == 0 (no remote workers).
# Otherwise, this EnvRunnerGroup would be empty.
self._local_env_runner = None
if num_env_runners == 0:
local_env_runner = True
# Create a local (learner) version of the config for the local worker.
# The only difference is the tf_session_args, which - for the local worker -
# will be `config.tf_session_args` updated/overridden with
# `config.local_tf_session_args`.
local_tf_session_args = config.tf_session_args.copy()
local_tf_session_args.update(config.local_tf_session_args)
self._local_config = config.copy(copy_frozen=False).framework(
tf_session_args=local_tf_session_args
)
if config.input_ == "dataset":
# Create the set of dataset readers to be shared by all the
# rollout workers.
self._ds, self._ds_shards = get_dataset_and_shards(config, num_env_runners)
else:
self._ds = None
self._ds_shards = None
# Create a number of @ray.remote workers.
self.add_workers(
num_env_runners,
validate=config.validate_env_runners_after_construction,
)
# If num_env_runners > 0 and we don't have an env on the local worker,
# get the observation- and action spaces for each policy from
# the first remote worker (which does have an env).
if (
local_env_runner
and self._worker_manager.num_actors() > 0
and not config.create_env_on_local_worker
and (not config.observation_space or not config.action_space)
):
spaces = self.get_spaces()
else:
spaces = None
# Create a local worker, if needed.
if local_env_runner:
self._local_env_runner = self._make_worker(
env_creator=self._env_creator,
validate_env=validate_env,
worker_index=0,
num_workers=num_env_runners,
config=self._local_config,
spaces=spaces,
)
def get_spaces(self):
"""Infer observation and action spaces from one (local or remote) EnvRunner.
Returns:
A dict mapping from ModuleID to a 2-tuple containing obs- and action-space.
"""
# Get ID of the first remote worker.
remote_worker_ids = (
[self._worker_manager.actor_ids()[0]]
if self._worker_manager.actor_ids()
else []
)
spaces = self.foreach_env_runner(
lambda env_runner: env_runner.get_spaces(),
remote_worker_ids=remote_worker_ids,
local_env_runner=not remote_worker_ids,
)[0]
logger.info(
"Inferred observation/action spaces from remote "
f"worker (local worker has no env): {spaces}"
)
return spaces
@property
def local_env_runner(self) -> EnvRunner:
"""Returns the local EnvRunner."""
return self._local_env_runner
def healthy_env_runner_ids(self) -> List[int]:
"""Returns the list of remote worker IDs."""
return self._worker_manager.healthy_actor_ids()
def healthy_worker_ids(self) -> List[int]:
"""Returns the list of remote worker IDs."""
return self.healthy_env_runner_ids()
def num_remote_env_runners(self) -> int:
"""Returns the number of remote EnvRunners."""
return self._worker_manager.num_actors()
def num_remote_workers(self) -> int:
"""Returns the number of remote EnvRunners."""
return self.num_remote_env_runners()
def num_healthy_remote_env_runners(self) -> int:
"""Returns the number of healthy remote workers."""
return self._worker_manager.num_healthy_actors()
def num_healthy_remote_workers(self) -> int:
"""Returns the number of healthy remote workers."""
return self.num_healthy_remote_env_runners()
def num_healthy_env_runners(self) -> int:
"""Returns the number of all healthy workers, including the local worker."""
return int(bool(self._local_env_runner)) + self.num_healthy_remote_workers()
def num_healthy_workers(self) -> int:
"""Returns the number of all healthy workers, including the local worker."""
return self.num_healthy_env_runners()
def num_in_flight_async_reqs(self, tag: Optional[str] = None) -> int:
"""Returns the number of in-flight async requests."""
return self._worker_manager.num_outstanding_async_reqs(tag=tag)
def num_remote_worker_restarts(self) -> int:
"""Total number of times managed remote workers have been restarted."""
return self._worker_manager.total_num_restarts()
def sync_env_runner_states(
self,
*,
config: "AlgorithmConfig",
from_worker: Optional[EnvRunner] = None,
env_steps_sampled: Optional[int] = None,
connector_states: Optional[List[Dict[str, Any]]] = None,
rl_module_state: Optional[Dict[str, Any]] = None,
env_runner_indices_to_update: Optional[List[int]] = None,
env_to_module=None,
module_to_env=None,
) -> None:
"""Synchronizes the connectors of this EnvRunnerGroup's EnvRunners.
The exact procedure works as follows:
- If `from_worker` is None, set `from_worker=self.local_env_runner`.
- If `config.use_worker_filter_stats` is True, gather all remote EnvRunners'
ConnectorV2 states. Otherwise, only use the ConnectorV2 states of `from_worker`.
- Merge all gathered states into one resulting state.
- Broadcast the resulting state back to all remote EnvRunners AND the local
EnvRunner.
Args:
config: The AlgorithmConfig object to use to determine, in which
direction(s) we need to synch and what the timeouts are.
from_worker: The EnvRunner from which to synch. If None, will use the local
worker of this EnvRunnerGroup.
env_steps_sampled: The total number of env steps taken thus far by all
workers combined. Used to broadcast this number to all remote workers
if `update_worker_filter_stats` is True in `config`.
env_runner_indices_to_update: The indices of those EnvRunners to update
with the merged state. Use None (default) to update all remote
EnvRunners.
"""
from_worker = from_worker or self.local_env_runner
merge = (
not config.enable_env_runner_and_connector_v2
and config.use_worker_filter_stats
) or (
config.enable_env_runner_and_connector_v2
and (
config.merge_env_runner_states is True
or (
config.merge_env_runner_states == "training_only"
and not config.in_evaluation
)
)
)
broadcast = (
not config.enable_env_runner_and_connector_v2
and config.update_worker_filter_stats
) or (
config.enable_env_runner_and_connector_v2
and config.broadcast_env_runner_states
)
# Early out if the number of (healthy) remote workers is 0. In this case, the
# local worker is the only operating worker and thus of course always holds
# the reference connector state.
if self.num_healthy_remote_workers() == 0 and self.local_env_runner:
self.local_env_runner.set_state(
{
**(
{NUM_ENV_STEPS_SAMPLED_LIFETIME: env_steps_sampled}
if env_steps_sampled is not None
else {}
),
**(rl_module_state or {}),
}
)
return
# Also early out, if we don't merge AND don't broadcast.
if not merge and not broadcast:
return
# Use states from all remote EnvRunners.
if merge:
if connector_states == []:
env_runner_states = {}
else:
if connector_states is None:
connector_states = self.foreach_env_runner(
lambda w: w.get_state(
components=[
COMPONENT_ENV_TO_MODULE_CONNECTOR,
COMPONENT_MODULE_TO_ENV_CONNECTOR,
]
),
local_env_runner=False,
timeout_seconds=(
config.sync_filters_on_rollout_workers_timeout_s
),
)
env_to_module_states = [
s[COMPONENT_ENV_TO_MODULE_CONNECTOR]
for s in connector_states
if COMPONENT_ENV_TO_MODULE_CONNECTOR in s
]
module_to_env_states = [
s[COMPONENT_MODULE_TO_ENV_CONNECTOR]
for s in connector_states
if COMPONENT_MODULE_TO_ENV_CONNECTOR in s
]
if (
self.local_env_runner is not None
and hasattr(self.local_env_runner, "_env_to_module")
and hasattr(self.local_env_runner, "_module_to_env")
):
assert env_to_module is None
env_to_module = self.local_env_runner._env_to_module
assert module_to_env is None
module_to_env = self.local_env_runner._module_to_env
env_runner_states = {}
if env_to_module_states:
env_runner_states.update(
{
COMPONENT_ENV_TO_MODULE_CONNECTOR: (
env_to_module.merge_states(env_to_module_states)
),
}
)
if module_to_env_states:
env_runner_states.update(
{
COMPONENT_MODULE_TO_ENV_CONNECTOR: (
module_to_env.merge_states(module_to_env_states)
),
}
)
# Ignore states from remote EnvRunners (use the current `from_worker` states
# only).
else:
if from_worker is None:
env_runner_states = {
COMPONENT_ENV_TO_MODULE_CONNECTOR: env_to_module.get_state(),
COMPONENT_MODULE_TO_ENV_CONNECTOR: module_to_env.get_state(),
}
else:
env_runner_states = from_worker.get_state(
components=[
COMPONENT_ENV_TO_MODULE_CONNECTOR,
COMPONENT_MODULE_TO_ENV_CONNECTOR,
]
)
# Update the global number of environment steps, if necessary.
# Make sure to divide by the number of env runners (such that each EnvRunner
# knows (roughly) its own(!) lifetime count and can infer the global lifetime
# count from it).
if env_steps_sampled is not None:
env_runner_states[NUM_ENV_STEPS_SAMPLED_LIFETIME] = env_steps_sampled // (
config.num_env_runners or 1
)
# If we do NOT want remote EnvRunners to get their Connector states updated,
# only update the local worker here (with all state components, except the model
# weights) and then remove the connector components.
if not broadcast:
if self.local_env_runner is not None:
self.local_env_runner.set_state(env_runner_states)
else:
env_to_module.set_state(
env_runner_states.get(COMPONENT_ENV_TO_MODULE_CONNECTOR), {}
)
module_to_env.set_state(
env_runner_states.get(COMPONENT_MODULE_TO_ENV_CONNECTOR), {}
)
env_runner_states.pop(COMPONENT_ENV_TO_MODULE_CONNECTOR, None)
env_runner_states.pop(COMPONENT_MODULE_TO_ENV_CONNECTOR, None)
# If there are components in the state left -> Update remote workers with these
# state components (and maybe the local worker, if it hasn't been updated yet).
if env_runner_states:
# Update the local EnvRunner, but NOT with the weights. If used at all for
# evaluation (through the user calling `self.evaluate`), RLlib would update
# the weights up front either way.
if self.local_env_runner is not None and broadcast:
self.local_env_runner.set_state(env_runner_states)
# Send the model weights only to remote EnvRunners.
# In case the local EnvRunner is ever needed for evaluation,
# RLlib updates its weight right before such an eval step.
if rl_module_state:
env_runner_states.update(rl_module_state)
# Broadcast updated states back to all workers.
# We explicitly don't want to fire and forget here, because this can lead to a lot of in-flight requests.
# When these pile up, object store memory can spike.
self.foreach_env_runner_async_fetch_ready(
func="set_state",
tag="set_state",
kwargs=dict(state=env_runner_states),
remote_worker_ids=env_runner_indices_to_update,
timeout_seconds=0.0,
)
def foreach_env_runner_async_fetch_ready(
self,
func: Union[
Callable[[EnvRunner], T], List[Callable[[EnvRunner], T]], str, List[str]
],
kwargs: Optional[Dict[str, Any]] = None,
tag: Optional[str] = None,
timeout_seconds: Optional[float] = 0.0,
return_obj_refs: bool = False,
mark_healthy: bool = False,
healthy_only: bool = True,
remote_worker_ids: List[int] = None,
return_actor_ids: bool = False,
) -> List[Union[Tuple[int, T], T]]:
"""Calls the given function asynchronously and returns previous results if any.
This is a convenience function that calls the underlying actor manager's
`foreach_actor_async_fetch_ready()` method.
"""
return self._worker_manager.foreach_actor_async_fetch_ready(
func=func,
tag=tag,
kwargs=kwargs,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
healthy_only=healthy_only,
remote_actor_ids=remote_worker_ids,
ignore_ray_errors=self._ignore_ray_errors_on_env_runners,
return_actor_ids=return_actor_ids,
)
def sync_weights(
self,
policies: Optional[List[PolicyID]] = None,
from_worker_or_learner_group: Optional[Union[EnvRunner, "LearnerGroup"]] = None,
to_worker_indices: Optional[List[int]] = None,
global_vars: Optional[Dict[str, TensorType]] = None,
timeout_seconds: Optional[float] = 0.0,
inference_only: Optional[bool] = False,
) -> None:
"""Syncs model weights from the given weight source to all remote workers.
Weight source can be either a (local) rollout worker or a learner_group. It
should just implement a `get_weights` method.
Args:
policies: Optional list of PolicyIDs to sync weights for.
If None (default), sync weights to/from all policies.
from_worker_or_learner_group: Optional (local) EnvRunner instance or
LearnerGroup instance to sync from. If None (default),
sync from this EnvRunnerGroup's local worker.
to_worker_indices: Optional list of worker indices to sync the
weights to. If None (default), sync to all remote workers.
global_vars: An optional global vars dict to set this
worker to. If None, do not update the global_vars.
timeout_seconds: Timeout in seconds to wait for the sync weights
calls to complete. Default is 0.0 (fire-and-forget, do not wait
for any sync calls to finish). Setting this to 0.0 might significantly
improve algorithm performance, depending on the algo's `training_step`
logic.
inference_only: Sync weights with workers that keep inference-only
modules. This is needed for algorithms in the new stack that
use inference-only modules. In this case only a part of the
parameters are synced to the workers. Default is False.
"""
if self.local_env_runner is None and from_worker_or_learner_group is None:
raise TypeError(
"No `local_env_runner` in EnvRunnerGroup! Must provide "
"`from_worker_or_learner_group` arg in `sync_weights()`!"
)
# Only sync if we have remote workers or `from_worker_or_trainer` is provided.
rl_module_state = None
if self.num_remote_workers() or from_worker_or_learner_group is not None:
weights_src = (
from_worker_or_learner_group
if from_worker_or_learner_group is not None
else self.local_env_runner
)
if weights_src is None:
raise ValueError(
"`from_worker_or_trainer` is None. In this case, EnvRunnerGroup "
"should have local_env_runner. But local_env_runner is also None."
)
modules = (
[COMPONENT_RL_MODULE + "/" + p for p in policies]
if policies is not None
else [COMPONENT_RL_MODULE]
)
# LearnerGroup has-a Learner, which has-a RLModule.
if isinstance(weights_src, LearnerGroup):
rl_module_state = weights_src.get_state(
components=[COMPONENT_LEARNER + "/" + m for m in modules],
inference_only=inference_only,
)[COMPONENT_LEARNER]
# EnvRunner (new API stack).
elif self._remote_config.enable_env_runner_and_connector_v2:
# EnvRunner (remote) has-a RLModule.
# TODO (sven): Replace this with a new ActorManager API:
# try_remote_request_till_success("get_state") -> tuple(int,
# remoteresult)
# `weights_src` could be the ActorManager, then. Then RLlib would know
# that it has to ping the manager to try all healthy actors until the
# first returns something.
if isinstance(weights_src, ray.actor.ActorHandle):
rl_module_state = ray.get(
weights_src.get_state.remote(
components=modules,
inference_only=inference_only,
)
)
# EnvRunner (local) has-a RLModule.
else:
rl_module_state = weights_src.get_state(
components=modules,
inference_only=inference_only,
)
# RolloutWorker (old API stack).
else:
rl_module_state = weights_src.get_weights(
policies=policies,
inference_only=inference_only,
)
if self._remote_config.enable_env_runner_and_connector_v2:
# Make sure `rl_module_state` only contains the weights and the
# weight seq no, nothing else.
rl_module_state = {
k: v
for k, v in rl_module_state.items()
if k in [COMPONENT_RL_MODULE, WEIGHTS_SEQ_NO]
}
# Move weights to the object store to avoid having to make n pickled
# copies of the weights dict for each worker.
rl_module_state_ref = ray.put(rl_module_state)
# Sync to specified remote workers in this EnvRunnerGroup.
# We explicitly don't want to fire and forget here, because this can lead to a lot of in-flight requests.
# When these pile up, object store memory can spike.
self.foreach_env_runner_async_fetch_ready(
func="set_state",
tag="set_state",
kwargs=dict(state=rl_module_state_ref),
remote_worker_ids=to_worker_indices,
timeout_seconds=timeout_seconds,
)
else:
rl_module_state_ref = ray.put(rl_module_state)
def _set_weights(env_runner):
env_runner.set_weights(ray.get(rl_module_state_ref), global_vars)
# Sync to specified remote workers in this EnvRunnerGroup.
self.foreach_env_runner(
func=_set_weights,
local_env_runner=False, # Do not sync back to local worker.
remote_worker_ids=to_worker_indices,
timeout_seconds=timeout_seconds,
)
# If `from_worker_or_learner_group` is provided, also sync to this
# EnvRunnerGroup's local worker.
if self.local_env_runner is not None:
if from_worker_or_learner_group is not None:
if self._remote_config.enable_env_runner_and_connector_v2:
self.local_env_runner.set_state(rl_module_state)
else:
self.local_env_runner.set_weights(rl_module_state)
# If `global_vars` is provided and local worker exists -> Update its
# global_vars.
if global_vars is not None:
self.local_env_runner.set_global_vars(global_vars)
def add_workers(self, num_workers: int, validate: bool = False) -> None:
"""Creates and adds a number of remote workers to this worker set.
Can be called several times on the same EnvRunnerGroup to add more
EnvRunners to the set.
Args:
num_workers: The number of remote Workers to add to this
EnvRunnerGroup.
validate: Whether to validate remote workers after their construction
process.
Raises:
RayError: If any of the constructed remote workers is not up and running
properly.
"""
old_num_workers = self._worker_manager.num_actors()
new_workers = [
self._make_worker(
env_creator=self._env_creator,
validate_env=None,
worker_index=old_num_workers + i + 1,
num_workers=old_num_workers + num_workers,
# self._remote_config can be large
# and it's best practice to pass it by reference
# instead of value (https://docs.ray.io/en/latest/ray-core/patterns/pass-large-arg-by-value.html)
config=self._remote_config_obj_ref,
)
for i in range(num_workers)
]
self._worker_manager.add_actors(new_workers)
# Validate here, whether all remote workers have been constructed properly
# and are "up and running". Establish initial states.
if validate:
for result in self._worker_manager.foreach_actor(
lambda w: w.assert_healthy()
):
# Simiply raise the error, which will get handled by the try-except
# clause around the _setup().
if not result.ok:
e = result.get()
if self._ignore_ray_errors_on_env_runners:
logger.error(f"Validation of EnvRunner failed! Error={str(e)}")
else:
raise e
def reset(self, new_remote_workers: List[ActorHandle]) -> None:
"""Hard overrides the remote EnvRunners in this set with the provided ones.
Args:
new_remote_workers: A list of new EnvRunners (as `ActorHandles`) to use as
new remote workers.
"""
self._worker_manager.clear()
self._worker_manager.add_actors(new_remote_workers)
def stop(self) -> None:
"""Calls `stop` on all EnvRunners (including the local one)."""
try:
# Make sure we stop all EnvRunners, include the ones that were just
# restarted / recovered or that are tagged unhealthy (at least, we should
# try).
self.foreach_env_runner(
lambda w: w.stop(), healthy_only=False, local_env_runner=True
)
except Exception:
logger.exception("Failed to stop workers!")
finally:
self._worker_manager.clear()
def foreach_env_runner(
self,
func: Union[
Callable[[EnvRunner], T], List[Callable[[EnvRunner], T]], str, List[str]
],
*,
kwargs=None,
local_env_runner: bool = True,
healthy_only: bool = True,
remote_worker_ids: List[int] = None,
timeout_seconds: Optional[float] = None,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> List[T]:
"""Calls the given function with each EnvRunner as its argument.
Args:
func: The function to call for each EnvRunners. The only call argument is
the respective EnvRunner instance.
local_env_runner: Whether to apply `func` to local EnvRunner, too.
Default is True.
healthy_only: Apply `func` on known-to-be healthy EnvRunners only.
remote_worker_ids: Apply `func` on a selected set of remote EnvRunners.
Use None (default) for all remote EnvRunners.
timeout_seconds: Time to wait (in seconds) for results. Set this to 0.0 for
fire-and-forget. Set this to None (default) to wait infinitely (i.e. for
synchronous execution).
return_obj_refs: Whether to return ObjectRef instead of actual results.
Note, for fault tolerance reasons, these returned ObjectRefs should
never be resolved with ray.get() outside of this EnvRunnerGroup.
mark_healthy: Whether to mark all those EnvRunners healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that EnvRunners are NOT set unhealthy, if they simply time out
(only if they return a RayActorError).
Also note that this setting is ignored if `healthy_only=True` (b/c
`mark_healthy` only affects EnvRunners that are currently tagged as
unhealthy).
Returns:
The list of return values of all calls to `func([worker])`.
"""
assert (
not return_obj_refs or not local_env_runner
), "Can not return ObjectRef from local worker."
local_result = []
if local_env_runner and self.local_env_runner is not None:
assert kwargs is None
if isinstance(func, str):
local_result = [getattr(self.local_env_runner, func)]
else:
local_result = [func(self.local_env_runner)]
if not self._worker_manager.actor_ids():
return local_result
remote_results = self._worker_manager.foreach_actor(
func,
kwargs=kwargs,
healthy_only=healthy_only,
remote_actor_ids=remote_worker_ids,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
FaultTolerantActorManager.handle_remote_call_result_errors(
remote_results, ignore_ray_errors=self._ignore_ray_errors_on_env_runners
)
# With application errors handled, return good results.
remote_results = [r.get() for r in remote_results.ignore_errors()]
return local_result + remote_results
def foreach_env_runner_async(
self,
func: Union[
Callable[[EnvRunner], T], List[Callable[[EnvRunner], T]], str, List[str]
],
tag: Optional[str] = None,
*,
kwargs=None,
healthy_only: bool = True,
remote_worker_ids: List[int] = None,
) -> int:
"""Calls the given function asynchronously with each EnvRunner as the argument.
Does not return results directly. Instead, `fetch_ready_async_reqs()` can be
used to pull results in an async manner whenever they are available.
Args:
func: The function to call for each EnvRunners. The only call argument is
the respective EnvRunner instance.
tag: A tag to identify the results from this async call when fetching with
`fetch_ready_async_reqs()`.
kwargs: An optional kwargs dict to be passed to the remote function calls.
healthy_only: Apply `func` on known-to-be healthy EnvRunners only.
remote_worker_ids: Apply `func` on a selected set of remote EnvRunners.
Returns:
The number of async requests that have actually been made. This is the
length of `remote_worker_ids` (or self.num_remote_workers()` if
`remote_worker_ids` is None) minus the number of requests that were NOT
made b/c a remote EnvRunner already had its
`max_remote_requests_in_flight_per_actor` counter reached for this tag.
"""
return self._worker_manager.foreach_actor_async(
func,
tag=tag,
kwargs=kwargs,
healthy_only=healthy_only,
remote_actor_ids=remote_worker_ids,
)
def fetch_ready_async_reqs(
self,
*,
tags: Optional[Union[str, List[str], Tuple[str]]] = None,
timeout_seconds: Optional[float] = 0.0,
return_obj_refs: bool = False,
mark_healthy: bool = False,
) -> List[Tuple[int, T]]:
"""Get results from outstanding asynchronous requests that are ready.
Args:
tags: Tags to identify the results from a specific async call.
If None (default), returns results from all ready async requests.
If a single string, returns results from all ready async requests with that tag.
timeout_seconds: Time to wait for results. Default is 0, meaning
those requests that are already ready.
return_obj_refs: Whether to return ObjectRef instead of actual results.
mark_healthy: Whether to mark all those workers healthy again that are
currently marked unhealthy AND that returned results from the remote
call (within the given `timeout_seconds`).
Note that workers are NOT set unhealthy, if they simply time out
(only if they return a RayActorError).
Also note that this setting is ignored if `healthy_only=True` (b/c
`mark_healthy` only affects workers that are currently tagged as
unhealthy).
Returns:
A list of results successfully returned from outstanding remote calls,
paired with the indices of the callee workers.
"""
# Get remote results
remote_results = self._worker_manager.fetch_ready_async_reqs(
tags=tags,
timeout_seconds=timeout_seconds,
return_obj_refs=return_obj_refs,
mark_healthy=mark_healthy,
)
FaultTolerantActorManager.handle_remote_call_result_errors(
remote_results,
ignore_ray_errors=self._ignore_ray_errors_on_env_runners,
)
return [(r.actor_id, r.get()) for r in remote_results.ignore_errors()]
@OldAPIStack
def foreach_env(self, func: Callable[[EnvType], List[T]]) -> List[List[T]]:
"""Calls `func` with all workers' sub-environments as args.
An "underlying sub environment" is a single clone of an env within
a vectorized environment.
`func` takes a single underlying sub environment as arg, e.g. a
gym.Env object.
Args:
func: A function - taking an EnvType (normally a gym.Env object)
as arg and returning a list of lists of return values, one
value per underlying sub-environment per each worker.
Returns:
The list (workers) of lists (sub environments) of results.
"""
return list(
self.foreach_env_runner(
lambda w: w.foreach_env(func),
local_env_runner=True,
)
)
@OldAPIStack
def foreach_env_with_context(
self, func: Callable[[BaseEnv, EnvContext], List[T]]
) -> List[List[T]]:
"""Calls `func` with all workers' sub-environments and env_ctx as args.
An "underlying sub environment" is a single clone of an env within
a vectorized environment.
`func` takes a single underlying sub environment and the env_context
as args.
Args:
func: A function - taking a BaseEnv object and an EnvContext as
arg - and returning a list of lists of return values over envs
of the worker.
Returns:
The list (1 item per workers) of lists (1 item per sub-environment)
of results.
"""
return list(
self.foreach_env_runner(
lambda w: w.foreach_env_with_context(func),
local_env_runner=True,
)
)
def probe_unhealthy_env_runners(self) -> List[int]:
"""Checks for unhealthy workers and tries restoring their states.
Returns:
List of IDs of the workers that were restored.
"""
return self._worker_manager.probe_unhealthy_actors(
timeout_seconds=self._remote_config.env_runner_health_probe_timeout_s,
mark_healthy=True,
)
@OldAPIStack
def foreach_policy(self, func: Callable[[Policy, PolicyID], T]) -> List[T]:
"""Calls `func` with each worker's (policy, PolicyID) tuple.
Note that in the multi-agent case, each worker may have more than one
policy.
Args:
func: A function - taking a Policy and its ID - that is
called on all workers' Policies.
Returns:
The list of return values of func over all workers' policies. The
length of this list is:
(num_workers + 1 (local-worker)) *
[num policies in the multi-agent config dict].
The local workers' results are first, followed by all remote
workers' results
"""
results = []
for r in self.foreach_env_runner(
lambda w: w.foreach_policy(func), local_env_runner=True
):
results.extend(r)
return results
@OldAPIStack
def foreach_policy_to_train(self, func: Callable[[Policy, PolicyID], T]) -> List[T]:
"""Apply `func` to all workers' Policies iff in `policies_to_train`.
Args:
func: A function - taking a Policy and its ID - that is
called on all workers' Policies, for which
`worker.is_policy_to_train()` returns True.
Returns:
List[any]: The list of n return values of all
`func([trainable policy], [ID])`-calls.
"""
results = []
for r in self.foreach_env_runner(
lambda w: w.foreach_policy_to_train(func), local_env_runner=True
):
results.extend(r)
return results
@OldAPIStack
def is_policy_to_train(
self, policy_id: PolicyID, batch: Optional[SampleBatchType] = None
) -> bool:
"""Whether given PolicyID (optionally inside some batch) is trainable."""
if self.local_env_runner:
if self.local_env_runner.is_policy_to_train is None:
return True
return self.local_env_runner.is_policy_to_train(policy_id, batch)
else:
raise NotImplementedError
@OldAPIStack
def add_policy(
self,
policy_id: PolicyID,
policy_cls: Optional[Type[Policy]] = None,
policy: Optional[Policy] = None,
*,
observation_space: Optional[gym.spaces.Space] = None,
action_space: Optional[gym.spaces.Space] = None,
config: Optional[Union["AlgorithmConfig", PartialAlgorithmConfigDict]] = None,
policy_state: Optional[PolicyState] = None,
policy_mapping_fn: Optional[Callable[[AgentID, EpisodeID], PolicyID]] = None,
policies_to_train: Optional[
Union[
Collection[PolicyID],
Callable[[PolicyID, Optional[SampleBatchType]], bool],
]
] = None,
module_spec: Optional[RLModuleSpec] = None,
# Deprecated.
workers: Optional[List[Union[EnvRunner, ActorHandle]]] = DEPRECATED_VALUE,
) -> None:
"""Adds a policy to this EnvRunnerGroup's workers or a specific list of workers.
Args:
policy_id: ID of the policy to add.
policy_cls: The Policy class to use for constructing the new Policy.
Note: Only one of `policy_cls` or `policy` must be provided.
policy: The Policy instance to add to this EnvRunnerGroup. If not None, the
given Policy object will be directly inserted into the
local worker and clones of that Policy will be created on all remote
workers.
Note: Only one of `policy_cls` or `policy` must be provided.
observation_space: The observation space of the policy to add.
If None, try to infer this space from the environment.
action_space: The action space of the policy to add.
If None, try to infer this space from the environment.
config: The config object or overrides for the policy to add.
policy_state: Optional state dict to apply to the new
policy instance, right after its construction.
policy_mapping_fn: An optional (updated) policy mapping function
to use from here on. Note that already ongoing episodes will
not change their mapping but will use the old mapping till
the end of the episode.
policies_to_train: An optional list of policy IDs to be trained
or a callable taking PolicyID and SampleBatchType and
returning a bool (trainable or not?).
If None, will keep the existing setup in place. Policies,
whose IDs are not in the list (or for which the callable
returns False) will not be updated.
module_spec: In the new RLModule API we need to pass in the module_spec for
the new module that is supposed to be added. Knowing the policy spec is
not sufficient.
workers: A list of EnvRunner/ActorHandles (remote
EnvRunners) to add this policy to. If defined, will only
add the given policy to these workers.
Raises:
KeyError: If the given `policy_id` already exists in this EnvRunnerGroup.
"""
if self.local_env_runner and policy_id in self.local_env_runner.policy_map:
raise KeyError(
f"Policy ID '{policy_id}' already exists in policy map! "
"Make sure you use a Policy ID that has not been taken yet."
" Policy IDs that are already in your policy map: "
f"{list(self.local_env_runner.policy_map.keys())}"
)
if workers is not DEPRECATED_VALUE:
deprecation_warning(
old="EnvRunnerGroup.add_policy(.., workers=..)",
help=(
"The `workers` argument to `EnvRunnerGroup.add_policy()` is "
"deprecated! Please do not use it anymore."
),
error=True,
)
if (policy_cls is None) == (policy is None):
raise ValueError(
"Only one of `policy_cls` or `policy` must be provided to "
"staticmethod: `EnvRunnerGroup.add_policy()`!"
)
validate_module_id(policy_id, error=False)
# Policy instance not provided: Use the information given here.
if policy_cls is not None:
new_policy_instance_kwargs = dict(
policy_id=policy_id,
policy_cls=policy_cls,
observation_space=observation_space,
action_space=action_space,
config=config,
policy_state=policy_state,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=list(policies_to_train)
if policies_to_train
else None,
module_spec=module_spec,
)
# Policy instance provided: Create clones of this very policy on the different
# workers (copy all its properties here for the calls to add_policy on the
# remote workers).
else:
new_policy_instance_kwargs = dict(
policy_id=policy_id,
policy_cls=type(policy),
observation_space=policy.observation_space,
action_space=policy.action_space,
config=policy.config,
policy_state=policy.get_state(),
policy_mapping_fn=policy_mapping_fn,
policies_to_train=list(policies_to_train)
if policies_to_train
else None,
module_spec=module_spec,
)
def _create_new_policy_fn(worker):
# `foreach_env_runner` function: Adds the policy to the worker (and
# maybe changes its policy_mapping_fn - if provided here).
worker.add_policy(**new_policy_instance_kwargs)
if self.local_env_runner is not None:
# Add policy directly by (already instantiated) object.
if policy is not None:
self.local_env_runner.add_policy(
policy_id=policy_id,
policy=policy,
policy_mapping_fn=policy_mapping_fn,
policies_to_train=policies_to_train,
module_spec=module_spec,
)
# Add policy by constructor kwargs.
else:
self.local_env_runner.add_policy(**new_policy_instance_kwargs)
# Add the policy to all remote workers.
self.foreach_env_runner(_create_new_policy_fn, local_env_runner=False)
def _make_worker(
self,
*,
env_creator: EnvCreator,
validate_env: Optional[Callable[[EnvType], None]],
worker_index: int,
num_workers: int,
recreated_worker: bool = False,
config: "AlgorithmConfig",
spaces: Optional[
Dict[PolicyID, Tuple[gym.spaces.Space, gym.spaces.Space]]
] = None,
) -> Union[EnvRunner, ActorHandle]:
kwargs = dict(
env_creator=env_creator,
validate_env=validate_env,
default_policy_class=self._policy_class,
config=config,
worker_index=worker_index,
num_workers=num_workers,
recreated_worker=recreated_worker,
log_dir=self._logdir,
spaces=spaces,
dataset_shards=self._ds_shards,
tune_trial_id=self._tune_trial_id,
)
if worker_index == 0:
return self.env_runner_cls(**kwargs)
pg_bundle_idx = (
-1
if ray.util.get_current_placement_group() is None
else self._pg_offset + worker_index
)
return (
ray.remote(**self._remote_args)(self.env_runner_cls)
.options(placement_group_bundle_index=pg_bundle_idx)
.remote(**kwargs)
)
@staticmethod
def _valid_module(class_path):
if (
isinstance(class_path, str)
and not os.path.isfile(class_path)
and "." in class_path
):
module_path, class_name = class_path.rsplit(".", 1)
try:
spec = importlib.util.find_spec(module_path)
if spec is not None:
return True
except (ModuleNotFoundError, ValueError) as e:
logger.warning(
f"module {module_path} not found using input {class_path} with error: {e}"
)
return False
| EnvRunnerGroup |
python | gevent__gevent | src/greentest/3.12/test_interpreters.py | {
"start": 1600,
"end": 2011
} | class ____(unittest.TestCase):
def os_pipe(self):
r, w = os.pipe()
def cleanup():
try:
os.close(w)
except Exception:
pass
try:
os.close(r)
except Exception:
pass
self.addCleanup(cleanup)
return r, w
def tearDown(self):
clean_up_interpreters()
| TestBase |
python | tensorflow__tensorflow | tensorflow/python/ops/parsing_config.py | {
"start": 12075,
"end": 15150
} | class ____(
collections.namedtuple(
"SparseFeature",
["index_key", "value_key", "dtype", "size", "already_sorted"])):
"""Configuration for parsing a sparse input feature from an `Example`.
Note, preferably use `VarLenFeature` (possibly in combination with a
`SequenceExample`) in order to parse out `SparseTensor`s instead of
`SparseFeature` due to its simplicity.
Closely mimicking the `SparseTensor` that will be obtained by parsing an
`Example` with a `SparseFeature` config, a `SparseFeature` contains a
* `value_key`: The name of key for a `Feature` in the `Example` whose parsed
`Tensor` will be the resulting `SparseTensor.values`.
* `index_key`: A list of names - one for each dimension in the resulting
`SparseTensor` whose `indices[i][dim]` indicating the position of
the `i`-th value in the `dim` dimension will be equal to the `i`-th value in
the Feature with key named `index_key[dim]` in the `Example`.
* `size`: A list of ints for the resulting `SparseTensor.dense_shape`.
For example, we can represent the following 2D `SparseTensor`
```python
SparseTensor(indices=[[3, 1], [20, 0]],
values=[0.5, -1.0]
dense_shape=[100, 3])
```
with an `Example` input proto
```python
features {
feature { key: "val" value { float_list { value: [ 0.5, -1.0 ] } } }
feature { key: "ix0" value { int64_list { value: [ 3, 20 ] } } }
feature { key: "ix1" value { int64_list { value: [ 1, 0 ] } } }
}
```
and `SparseFeature` config with 2 `index_key`s
```python
SparseFeature(index_key=["ix0", "ix1"],
value_key="val",
dtype=tf.float32,
size=[100, 3])
```
Fields:
index_key: A single string name or a list of string names of index features.
For each key the underlying feature's type must be `int64` and its length
must always match that of the `value_key` feature.
To represent `SparseTensor`s with a `dense_shape` of `rank` higher than 1
a list of length `rank` should be used.
value_key: Name of value feature. The underlying feature's type must
be `dtype` and its length must always match that of all the `index_key`s'
features.
dtype: Data type of the `value_key` feature.
size: A Python int or list thereof specifying the dense shape. Should be a
list if and only if `index_key` is a list. In that case the list must be
equal to the length of `index_key`. Each for each entry `i` all values in
the `index_key`[i] feature must be in `[0, size[i])`.
already_sorted: A Python boolean to specify whether the values in
`value_key` are already sorted by their index position. If so skip
sorting. False by default (optional).
"""
def __new__(cls, index_key, value_key, dtype, size, already_sorted=False):
return super(SparseFeature, cls).__new__(
cls, index_key, value_key, dtype, size, already_sorted)
@tf_export("io.FixedLenFeature", v1=["io.FixedLenFeature", "FixedLenFeature"])
| SparseFeature |
python | plotly__plotly.py | plotly/graph_objs/funnel/legendgrouptitle/_font.py | {
"start": 233,
"end": 9921
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "funnel.legendgrouptitle"
_path_str = "funnel.legendgrouptitle.font"
_valid_props = {
"color",
"family",
"lineposition",
"shadow",
"size",
"style",
"textcase",
"variant",
"weight",
}
@property
def color(self):
"""
The 'color' property is a color and may be specified as:
- A hex string (e.g. '#ff0000')
- An rgb/rgba string (e.g. 'rgb(255,0,0)')
- An hsl/hsla string (e.g. 'hsl(0,100%,50%)')
- An hsv/hsva string (e.g. 'hsv(0,100%,100%)')
- A named CSS color: see https://plotly.com/python/css-colors/ for a list
Returns
-------
str
"""
return self["color"]
@color.setter
def color(self, val):
self["color"] = val
@property
def family(self):
"""
HTML font family - the typeface that will be applied by the web
browser. The web browser can only apply a font if it is
available on the system where it runs. Provide multiple font
families, separated by commas, to indicate the order in which
to apply fonts if they aren't available.
The 'family' property is a string and must be specified as:
- A non-empty string
Returns
-------
str
"""
return self["family"]
@family.setter
def family(self, val):
self["family"] = val
@property
def lineposition(self):
"""
Sets the kind of decoration line(s) with text, such as an
"under", "over" or "through" as well as combinations e.g.
"under+over", etc.
The 'lineposition' property is a flaglist and may be specified
as a string containing:
- Any combination of ['under', 'over', 'through'] joined with '+' characters
(e.g. 'under+over')
OR exactly one of ['none'] (e.g. 'none')
Returns
-------
Any
"""
return self["lineposition"]
@lineposition.setter
def lineposition(self, val):
self["lineposition"] = val
@property
def shadow(self):
"""
Sets the shape and color of the shadow behind text. "auto"
places minimal shadow and applies contrast text font color. See
https://developer.mozilla.org/en-US/docs/Web/CSS/text-shadow
for additional options.
The 'shadow' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["shadow"]
@shadow.setter
def shadow(self, val):
self["shadow"] = val
@property
def size(self):
"""
The 'size' property is a number and may be specified as:
- An int or float in the interval [1, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def style(self):
"""
Sets whether a font should be styled with a normal or italic
face from its family.
The 'style' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'italic']
Returns
-------
Any
"""
return self["style"]
@style.setter
def style(self, val):
self["style"] = val
@property
def textcase(self):
"""
Sets capitalization of text. It can be used to make text appear
in all-uppercase or all-lowercase, or with each word
capitalized.
The 'textcase' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'word caps', 'upper', 'lower']
Returns
-------
Any
"""
return self["textcase"]
@textcase.setter
def textcase(self, val):
self["textcase"] = val
@property
def variant(self):
"""
Sets the variant of the font.
The 'variant' property is an enumeration that may be specified as:
- One of the following enumeration values:
['normal', 'small-caps', 'all-small-caps',
'all-petite-caps', 'petite-caps', 'unicase']
Returns
-------
Any
"""
return self["variant"]
@variant.setter
def variant(self, val):
self["variant"] = val
@property
def weight(self):
"""
Sets the weight (or boldness) of the font.
The 'weight' property is a integer and may be specified as:
- An int (or float that will be cast to an int)
in the interval [1, 1000]
OR exactly one of ['normal', 'bold'] (e.g. 'bold')
Returns
-------
int
"""
return self["weight"]
@weight.setter
def weight(self, val):
self["weight"] = val
@property
def _prop_descriptions(self):
return """\
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
"""
def __init__(
self,
arg=None,
color=None,
family=None,
lineposition=None,
shadow=None,
size=None,
style=None,
textcase=None,
variant=None,
weight=None,
**kwargs,
):
"""
Construct a new Font object
Sets this legend group's title font.
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.funnel.legendgrouptitle.Font`
color
family
HTML font family - the typeface that will be applied by
the web browser. The web browser can only apply a font
if it is available on the system where it runs. Provide
multiple font families, separated by commas, to
indicate the order in which to apply fonts if they
aren't available.
lineposition
Sets the kind of decoration line(s) with text, such as
an "under", "over" or "through" as well as combinations
e.g. "under+over", etc.
shadow
Sets the shape and color of the shadow behind text.
"auto" places minimal shadow and applies contrast text
font color. See https://developer.mozilla.org/en-
US/docs/Web/CSS/text-shadow for additional options.
size
style
Sets whether a font should be styled with a normal or
italic face from its family.
textcase
Sets capitalization of text. It can be used to make
text appear in all-uppercase or all-lowercase, or with
each word capitalized.
variant
Sets the variant of the font.
weight
Sets the weight (or boldness) of the font.
Returns
-------
Font
"""
super().__init__("font")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.funnel.legendgrouptitle.Font
constructor must be a dict or
an instance of :class:`plotly.graph_objs.funnel.legendgrouptitle.Font`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("color", arg, color)
self._set_property("family", arg, family)
self._set_property("lineposition", arg, lineposition)
self._set_property("shadow", arg, shadow)
self._set_property("size", arg, size)
self._set_property("style", arg, style)
self._set_property("textcase", arg, textcase)
self._set_property("variant", arg, variant)
self._set_property("weight", arg, weight)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Font |
python | getsentry__sentry | src/sentry/models/files/abstractfileblobowner.py | {
"start": 110,
"end": 300
} | class ____(Model):
__relocation_scope__ = RelocationScope.Excluded
organization_id = BoundedBigIntegerField(db_index=True)
class Meta:
abstract = True
| AbstractFileBlobOwner |
python | optuna__optuna | tests/storages_tests/rdb_tests/test_models.py | {
"start": 4839,
"end": 7476
} | class ____:
@staticmethod
def test_default_datetime(session: Session) -> None:
# Regardless of the initial state the trial created here should have null datetime_start
session.add(TrialModel(state=TrialState.WAITING))
session.commit()
trial_model = session.query(TrialModel).first()
assert trial_model is not None
assert trial_model.datetime_start is None
assert trial_model.datetime_complete is None
@staticmethod
def test_count(session: Session) -> None:
study_1 = StudyModel(study_id=1, study_name="test-study-1")
study_2 = StudyModel(study_id=2, study_name="test-study-2")
session.add(TrialModel(study_id=study_1.study_id, state=TrialState.COMPLETE))
session.add(TrialModel(study_id=study_1.study_id, state=TrialState.RUNNING))
session.add(TrialModel(study_id=study_2.study_id, state=TrialState.RUNNING))
session.commit()
assert 3 == TrialModel.count(session)
assert 2 == TrialModel.count(session, study=study_1)
assert 1 == TrialModel.count(session, state=TrialState.COMPLETE)
@staticmethod
def test_count_past_trials(session: Session) -> None:
study_1 = StudyModel(study_id=1, study_name="test-study-1")
study_2 = StudyModel(study_id=2, study_name="test-study-2")
trial_1_1 = TrialModel(study_id=study_1.study_id, state=TrialState.COMPLETE)
session.add(trial_1_1)
session.commit()
assert 0 == trial_1_1.count_past_trials(session)
trial_1_2 = TrialModel(study_id=study_1.study_id, state=TrialState.RUNNING)
session.add(trial_1_2)
session.commit()
assert 1 == trial_1_2.count_past_trials(session)
trial_2_1 = TrialModel(study_id=study_2.study_id, state=TrialState.RUNNING)
session.add(trial_2_1)
session.commit()
assert 0 == trial_2_1.count_past_trials(session)
@staticmethod
def test_cascade_delete_on_study(session: Session) -> None:
study_id = 1
direction = StudyDirectionModel(direction=StudyDirection.MINIMIZE, objective=0)
study = StudyModel(study_id=study_id, study_name="test-study", directions=[direction])
study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.COMPLETE))
study.trials.append(TrialModel(study_id=study.study_id, state=TrialState.RUNNING))
session.add(study)
session.commit()
assert 2 == TrialModel.count(session, study)
session.delete(study)
session.commit()
assert 0 == TrialModel.count(session, study)
| TestTrialModel |
python | pytorch__pytorch | torch/distributed/checkpoint/filesystem.py | {
"start": 16521,
"end": 18520
} | class ____(FileSystemBase):
@contextmanager
def create_stream(
self, path: Union[str, os.PathLike], mode: str
) -> Generator[io.IOBase, None, None]:
if not isinstance(path, Path):
path = Path(path)
with path.open(mode) as stream:
yield cast(io.IOBase, stream)
def concat_path(
self, path: Union[str, os.PathLike], suffix: str
) -> Union[str, os.PathLike]:
if not isinstance(path, Path):
path = Path(path)
return path / suffix
def init_path(self, path: Union[str, os.PathLike]) -> Union[str, os.PathLike]:
if not isinstance(path, Path):
path = Path(path)
return path
def rename(
self, path: Union[str, os.PathLike], new_path: Union[str, os.PathLike]
) -> None:
if not isinstance(path, Path):
path = Path(path)
path.rename(cast(Path, new_path))
def mkdir(self, path: Union[str, os.PathLike]) -> None:
if not isinstance(path, Path):
path = Path(path)
path.mkdir(parents=True, exist_ok=True)
@classmethod
def validate_checkpoint_id(cls, checkpoint_id: Union[str, os.PathLike]) -> bool:
if isinstance(checkpoint_id, Path):
return True
if "://" in str(checkpoint_id):
return False
for p in Path(checkpoint_id).parents:
if p.exists() and os.access(str(p), os.W_OK):
return True
return False
def exists(self, path: Union[str, os.PathLike]) -> bool:
if not isinstance(path, Path):
path = Path(path)
return path.exists()
def rm_file(self, path: Union[str, os.PathLike]) -> None:
if not isinstance(path, Path):
path = Path(path)
path.unlink()
def ls(self, path: Union[str, os.PathLike]) -> list[str]:
if not isinstance(path, Path):
path = Path(path)
return [str(p) for p in path.iterdir()]
| FileSystem |
python | pypa__warehouse | tests/unit/manage/test_forms.py | {
"start": 453,
"end": 2308
} | class ____:
def test_validate(self):
user_service = pretend.stub(find_userid=pretend.call_recorder(lambda userid: 1))
form = forms.CreateRoleForm(
formdata=MultiDict({"role_name": "Owner", "username": "valid_username"}),
user_service=user_service,
)
assert form.user_service is user_service
assert form.validate(), str(form.errors)
def test_validate_username_with_no_user(self):
user_service = pretend.stub(
find_userid=pretend.call_recorder(lambda userid: None)
)
form = forms.CreateRoleForm(user_service=user_service)
field = pretend.stub(data="my_username")
with pytest.raises(wtforms.validators.ValidationError):
form.validate_username(field)
assert user_service.find_userid.calls == [pretend.call("my_username")]
def test_validate_username_with_user(self):
user_service = pretend.stub(find_userid=pretend.call_recorder(lambda userid: 1))
form = forms.CreateRoleForm(user_service=user_service)
field = pretend.stub(data="my_username")
form.validate_username(field)
assert user_service.find_userid.calls == [pretend.call("my_username")]
@pytest.mark.parametrize(
("value", "expected"),
[
("", "Select role"),
("invalid", "Not a valid choice."),
(None, "Select role"),
],
)
def test_validate_role_name_fails(self, value, expected):
user_service = pretend.stub(find_userid=pretend.call_recorder(lambda userid: 1))
form = forms.CreateRoleForm(
MultiDict({"role_name": value, "username": "valid_username"}),
user_service=user_service,
)
assert not form.validate()
assert form.role_name.errors == [expected]
| TestCreateRoleForm |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_datacatalog.py | {
"start": 9679,
"end": 11487
} | class ____:
@mock.patch(
"airflow.providers.google.cloud.operators.datacatalog.CloudDataCatalogHook",
**{"return_value.create_entry_group.return_value": TEST_ENTRY_GROUP},
)
def test_assert_valid_hook_call(self, mock_hook) -> None:
with pytest.warns(AirflowProviderDeprecationWarning):
task = CloudDataCatalogCreateEntryGroupOperator(
task_id="task_id",
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_ti = mock.MagicMock()
mock_context = {"ti": mock_ti}
if not AIRFLOW_V_3_0_PLUS:
mock_context["task"] = task # type: ignore[assignment]
result = task.execute(context=mock_context) # type: ignore[arg-type]
mock_hook.assert_called_once_with(
gcp_conn_id=TEST_GCP_CONN_ID,
impersonation_chain=TEST_IMPERSONATION_CHAIN,
)
mock_hook.return_value.create_entry_group.assert_called_once_with(
location=TEST_LOCATION,
entry_group_id=TEST_ENTRY_GROUP_ID,
entry_group=TEST_ENTRY_GROUP,
project_id=TEST_PROJECT_ID,
retry=TEST_RETRY,
timeout=TEST_TIMEOUT,
metadata=TEST_METADATA,
)
mock_ti.xcom_push.assert_any_call(
key="entry_group_id",
value=TEST_ENTRY_GROUP_ID,
)
assert result == TEST_ENTRY_GROUP_DICT
| TestCloudDataCatalogCreateEntryGroupOperator |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.