language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | Netflix__metaflow | test/core/tests/extensions.py | {
"start": 72,
"end": 1341
} | class ____(MetaflowTest):
"""
Test that the metaflow_extensions module is properly loaded
"""
PRIORITY = 0
SKIP_GRAPHS = [
"simple_switch",
"nested_switch",
"branch_in_switch",
"foreach_in_switch",
"switch_in_branch",
"switch_in_foreach",
"recursive_switch",
"recursive_switch_inside_foreach",
]
@tag("test_step_decorator")
@steps(0, ["all"])
def step_all(self):
from metaflow.metaflow_config import METAFLOW_ADDITIONAL_VALUE
from metaflow import tl_value
from metaflow.plugins.nondecoplugin import my_value
from metaflow.exception import MetaflowTestException
from metaflow.plugins.frameworks.pytorch import NewPytorchParallelDecorator
self.plugin_value = my_value
self.tl_value = tl_value
self.additional_value = METAFLOW_ADDITIONAL_VALUE
def check_results(self, flow, checker):
for step in flow:
checker.assert_artifact(step.name, "additional_value", 42)
checker.assert_artifact(step.name, "tl_value", 42)
checker.assert_artifact(step.name, "plugin_value", 42)
checker.assert_artifact(step.name, "plugin_set_value", step.name)
| ExtensionsTest |
python | matplotlib__matplotlib | lib/matplotlib/tests/test_ticker.py | {
"start": 56135,
"end": 65417
} | class ____:
# (unicode_minus, input, expected) where ''expected'' corresponds to the
# outputs respectively returned when (places=None, places=0, places=2)
# unicode_minus is a boolean value for the rcParam['axes.unicode_minus']
raw_format_data = [
(False, -1234.56789, ('-1.23457 k', '-1 k', '-1.23 k')),
(True, -1234.56789, ('\N{MINUS SIGN}1.23457 k', '\N{MINUS SIGN}1 k',
'\N{MINUS SIGN}1.23 k')),
(False, -1.23456789, ('-1.23457', '-1', '-1.23')),
(True, -1.23456789, ('\N{MINUS SIGN}1.23457', '\N{MINUS SIGN}1',
'\N{MINUS SIGN}1.23')),
(False, -0.123456789, ('-123.457 m', '-123 m', '-123.46 m')),
(True, -0.123456789, ('\N{MINUS SIGN}123.457 m', '\N{MINUS SIGN}123 m',
'\N{MINUS SIGN}123.46 m')),
(False, -0.00123456789, ('-1.23457 m', '-1 m', '-1.23 m')),
(True, -0.00123456789, ('\N{MINUS SIGN}1.23457 m', '\N{MINUS SIGN}1 m',
'\N{MINUS SIGN}1.23 m')),
(True, -0.0, ('0', '0', '0.00')),
(True, -0, ('0', '0', '0.00')),
(True, 0, ('0', '0', '0.00')),
(True, 1.23456789e-6, ('1.23457 µ', '1 µ', '1.23 µ')),
(True, 0.123456789, ('123.457 m', '123 m', '123.46 m')),
(True, 0.1, ('100 m', '100 m', '100.00 m')),
(True, 1, ('1', '1', '1.00')),
(True, 1.23456789, ('1.23457', '1', '1.23')),
# places=0: corner-case rounding
(True, 999.9, ('999.9', '1 k', '999.90')),
# corner-case rounding for all
(True, 999.9999, ('1 k', '1 k', '1.00 k')),
# negative corner-case
(False, -999.9999, ('-1 k', '-1 k', '-1.00 k')),
(True, -999.9999, ('\N{MINUS SIGN}1 k', '\N{MINUS SIGN}1 k',
'\N{MINUS SIGN}1.00 k')),
(True, 1000, ('1 k', '1 k', '1.00 k')),
(True, 1001, ('1.001 k', '1 k', '1.00 k')),
(True, 100001, ('100.001 k', '100 k', '100.00 k')),
(True, 987654.321, ('987.654 k', '988 k', '987.65 k')),
# OoR value (> 1000 Q)
(True, 1.23e33, ('1230 Q', '1230 Q', '1230.00 Q'))
]
@pytest.mark.parametrize('unicode_minus, input, expected', raw_format_data)
def test_params(self, unicode_minus, input, expected):
"""
Test the formatting of EngFormatter for various values of the 'places'
argument, in several cases:
0. without a unit symbol but with a (default) space separator;
1. with both a unit symbol and a (default) space separator;
2. with both a unit symbol and some non default separators;
3. without a unit symbol but with some non default separators.
Note that cases 2. and 3. are looped over several separator strings.
"""
plt.rcParams['axes.unicode_minus'] = unicode_minus
UNIT = 's' # seconds
DIGITS = '0123456789' # %timeit showed 10-20% faster search than set
# Case 0: unit='' (default) and sep=' ' (default).
# 'expected' already corresponds to this reference case.
exp_outputs = expected
formatters = (
mticker.EngFormatter(), # places=None (default)
mticker.EngFormatter(places=0),
mticker.EngFormatter(places=2)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Case 1: unit=UNIT and sep=' ' (default).
# Append a unit symbol to the reference case.
# Beware of the values in [1, 1000), where there is no prefix!
exp_outputs = (_s + " " + UNIT if _s[-1] in DIGITS # case w/o prefix
else _s + UNIT for _s in expected)
formatters = (
mticker.EngFormatter(unit=UNIT), # places=None (default)
mticker.EngFormatter(unit=UNIT, places=0),
mticker.EngFormatter(unit=UNIT, places=2)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Test several non default separators: no separator, a narrow
# no-break space (Unicode character) and an extravagant string.
for _sep in ("", "\N{NARROW NO-BREAK SPACE}", "@_@"):
# Case 2: unit=UNIT and sep=_sep.
# Replace the default space separator from the reference case
# with the tested one `_sep` and append a unit symbol to it.
exp_outputs = (_s + _sep + UNIT if _s[-1] in DIGITS # no prefix
else _s.replace(" ", _sep) + UNIT
for _s in expected)
formatters = (
mticker.EngFormatter(unit=UNIT, sep=_sep), # places=None
mticker.EngFormatter(unit=UNIT, places=0, sep=_sep),
mticker.EngFormatter(unit=UNIT, places=2, sep=_sep)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
# Case 3: unit='' (default) and sep=_sep.
# Replace the default space separator from the reference case
# with the tested one `_sep`. Reference case is already unitless.
exp_outputs = (_s.replace(" ", _sep) for _s in expected)
formatters = (
mticker.EngFormatter(sep=_sep), # places=None (default)
mticker.EngFormatter(places=0, sep=_sep),
mticker.EngFormatter(places=2, sep=_sep)
)
for _formatter, _exp_output in zip(formatters, exp_outputs):
assert _formatter(input) == _exp_output
def test_engformatter_usetex_useMathText():
fig, ax = plt.subplots()
ax.plot([0, 500, 1000], [0, 500, 1000])
ax.set_xticks([0, 500, 1000])
for formatter in (mticker.EngFormatter(usetex=True),
mticker.EngFormatter(useMathText=True)):
ax.xaxis.set_major_formatter(formatter)
fig.canvas.draw()
x_tick_label_text = [labl.get_text() for labl in ax.get_xticklabels()]
# Checking if the dollar `$` signs have been inserted around numbers
# in tick labels.
assert x_tick_label_text == ['$0$', '$500$', '$1$ k']
@pytest.mark.parametrize(
'data_offset, noise, oom_center_desired, oom_noise_desired', [
(271_490_000_000.0, 10, 9, 0),
(27_149_000_000_000.0, 10_000_000, 12, 6),
(27.149, 0.01, 0, -3),
(2_714.9, 0.01, 3, -3),
(271_490.0, 0.001, 3, -3),
(271.49, 0.001, 0, -3),
# The following sets of parameters demonstrates that when
# oom(data_offset)-1 and oom(noise)-2 equal a standard 3*N oom, we get
# that oom_noise_desired < oom(noise)
(27_149_000_000.0, 100, 9, +3),
(27.149, 1e-07, 0, -6),
(271.49, 0.0001, 0, -3),
(27.149, 0.0001, 0, -3),
# Tests where oom(data_offset) <= oom(noise), those are probably
# covered by the part where formatter.offset != 0
(27_149.0, 10_000, 0, 3),
(27.149, 10_000, 0, 3),
(27.149, 1_000, 0, 3),
(27.149, 100, 0, 0),
(27.149, 10, 0, 0),
]
)
def test_engformatter_offset_oom(
data_offset,
noise,
oom_center_desired,
oom_noise_desired
):
UNIT = "eV"
fig, ax = plt.subplots()
ydata = data_offset + np.arange(-5, 7, dtype=float)*noise
ax.plot(ydata)
formatter = mticker.EngFormatter(useOffset=True, unit=UNIT)
# So that offset strings will always have the same size
formatter.ENG_PREFIXES[0] = "_"
ax.yaxis.set_major_formatter(formatter)
fig.canvas.draw()
offset_got = formatter.get_offset()
ticks_got = [labl.get_text() for labl in ax.get_yticklabels()]
# Predicting whether offset should be 0 or not is essentially testing
# ScalarFormatter._compute_offset . This function is pretty complex and it
# would be nice to test it, but this is out of scope for this test which
# only makes sure that offset text and the ticks gets the correct unit
# prefixes and the ticks.
if formatter.offset:
prefix_noise_got = offset_got[2]
prefix_noise_desired = formatter.ENG_PREFIXES[oom_noise_desired]
prefix_center_got = offset_got[-1-len(UNIT)]
prefix_center_desired = formatter.ENG_PREFIXES[oom_center_desired]
assert prefix_noise_desired == prefix_noise_got
assert prefix_center_desired == prefix_center_got
# Make sure the ticks didn't get the UNIT
for tick in ticks_got:
assert UNIT not in tick
else:
assert oom_center_desired == 0
assert offset_got == ""
# Make sure the ticks contain now the prefixes
for tick in ticks_got:
# 0 is zero on all orders of magnitudes, no matter what is
# oom_noise_desired
prefix_idx = 0 if tick[0] == "0" else oom_noise_desired
assert tick.endswith(formatter.ENG_PREFIXES[prefix_idx] + UNIT)
| TestEngFormatter |
python | spack__spack | lib/spack/spack/test/error_messages.py | {
"start": 664,
"end": 803
} | class ____(Package):
version("1.2")
version("1.1")
depends_on("x2")
depends_on("x3")
""",
)
_pkgx2 = (
"x2",
"""\
| X1 |
python | pypa__setuptools | setuptools/_distutils/command/sdist.py | {
"start": 1021,
"end": 19151
} | class ____(Command):
description = "create a source distribution (tarball, zip file, etc.)"
def checking_metadata(self) -> bool:
"""Callable used for the check sub-command.
Placed here so user_options can view it"""
return self.metadata_check
user_options = [
('template=', 't', "name of manifest template file [default: MANIFEST.in]"),
('manifest=', 'm', "name of manifest file [default: MANIFEST]"),
(
'use-defaults',
None,
"include the default file set in the manifest "
"[default; disable with --no-defaults]",
),
('no-defaults', None, "don't include the default file set"),
(
'prune',
None,
"specifically exclude files/directories that should not be "
"distributed (build tree, RCS/CVS dirs, etc.) "
"[default; disable with --no-prune]",
),
('no-prune', None, "don't automatically exclude anything"),
(
'manifest-only',
'o',
"just regenerate the manifest and then stop (implies --force-manifest)",
),
(
'force-manifest',
'f',
"forcibly regenerate the manifest and carry on as usual. "
"Deprecated: now the manifest is always regenerated.",
),
('formats=', None, "formats for source distribution (comma-separated list)"),
(
'keep-temp',
'k',
"keep the distribution tree around after creating " + "archive file(s)",
),
(
'dist-dir=',
'd',
"directory to put the source distribution archive(s) in [default: dist]",
),
(
'metadata-check',
None,
"Ensure that all required elements of meta-data "
"are supplied. Warn if any missing. [default]",
),
(
'owner=',
'u',
"Owner name used when creating a tar file [default: current user]",
),
(
'group=',
'g',
"Group name used when creating a tar file [default: current group]",
),
]
boolean_options: ClassVar[list[str]] = [
'use-defaults',
'prune',
'manifest-only',
'force-manifest',
'keep-temp',
'metadata-check',
]
help_options: ClassVar[list[tuple[str, str | None, str, Callable[[], object]]]] = [
('help-formats', None, "list available distribution formats", show_formats),
]
negative_opt: ClassVar[dict[str, str]] = {
'no-defaults': 'use-defaults',
'no-prune': 'prune',
}
sub_commands = [('check', checking_metadata)]
READMES: ClassVar[tuple[str, ...]] = ('README', 'README.txt', 'README.rst')
def initialize_options(self):
# 'template' and 'manifest' are, respectively, the names of
# the manifest template and manifest file.
self.template = None
self.manifest = None
# 'use_defaults': if true, we will include the default file set
# in the manifest
self.use_defaults = True
self.prune = True
self.manifest_only = False
self.force_manifest = False
self.formats = ['gztar']
self.keep_temp = False
self.dist_dir = None
self.archive_files = None
self.metadata_check = True
self.owner = None
self.group = None
def finalize_options(self) -> None:
if self.manifest is None:
self.manifest = "MANIFEST"
if self.template is None:
self.template = "MANIFEST.in"
self.ensure_string_list('formats')
bad_format = archive_util.check_archive_formats(self.formats)
if bad_format:
raise DistutilsOptionError(f"unknown archive format '{bad_format}'")
if self.dist_dir is None:
self.dist_dir = "dist"
def run(self) -> None:
# 'filelist' contains the list of files that will make up the
# manifest
self.filelist = FileList()
# Run sub commands
for cmd_name in self.get_sub_commands():
self.run_command(cmd_name)
# Do whatever it takes to get the list of files to process
# (process the manifest template, read an existing manifest,
# whatever). File list is accumulated in 'self.filelist'.
self.get_file_list()
# If user just wanted us to regenerate the manifest, stop now.
if self.manifest_only:
return
# Otherwise, go ahead and create the source distribution tarball,
# or zipfile, or whatever.
self.make_distribution()
def get_file_list(self) -> None:
"""Figure out the list of files to include in the source
distribution, and put it in 'self.filelist'. This might involve
reading the manifest template (and writing the manifest), or just
reading the manifest, or just using the default file set -- it all
depends on the user's options.
"""
# new behavior when using a template:
# the file list is recalculated every time because
# even if MANIFEST.in or setup.py are not changed
# the user might have added some files in the tree that
# need to be included.
#
# This makes --force the default and only behavior with templates.
template_exists = os.path.isfile(self.template)
if not template_exists and self._manifest_is_not_generated():
self.read_manifest()
self.filelist.sort()
self.filelist.remove_duplicates()
return
if not template_exists:
self.warn(
("manifest template '%s' does not exist " + "(using default file list)")
% self.template
)
self.filelist.findall()
if self.use_defaults:
self.add_defaults()
if template_exists:
self.read_template()
if self.prune:
self.prune_file_list()
self.filelist.sort()
self.filelist.remove_duplicates()
self.write_manifest()
def add_defaults(self) -> None:
"""Add all the default files to self.filelist:
- README or README.txt
- setup.py
- tests/test*.py and test/test*.py
- all pure Python modules mentioned in setup script
- all files pointed by package_data (build_py)
- all files defined in data_files.
- all files defined as scripts.
- all C sources listed as part of extensions or C libraries
in the setup script (doesn't catch C headers!)
Warns if (README or README.txt) or setup.py are missing; everything
else is optional.
"""
self._add_defaults_standards()
self._add_defaults_optional()
self._add_defaults_python()
self._add_defaults_data_files()
self._add_defaults_ext()
self._add_defaults_c_libs()
self._add_defaults_scripts()
@staticmethod
def _cs_path_exists(fspath):
"""
Case-sensitive path existence check
>>> sdist._cs_path_exists(__file__)
True
>>> sdist._cs_path_exists(__file__.upper())
False
"""
if not os.path.exists(fspath):
return False
# make absolute so we always have a directory
abspath = os.path.abspath(fspath)
directory, filename = os.path.split(abspath)
return filename in os.listdir(directory)
def _add_defaults_standards(self):
standards = [self.READMES, self.distribution.script_name]
for fn in standards:
if isinstance(fn, tuple):
alts = fn
got_it = False
for fn in alts:
if self._cs_path_exists(fn):
got_it = True
self.filelist.append(fn)
break
if not got_it:
self.warn(
"standard file not found: should have one of " + ', '.join(alts)
)
else:
if self._cs_path_exists(fn):
self.filelist.append(fn)
else:
self.warn(f"standard file '{fn}' not found")
def _add_defaults_optional(self):
optional = ['tests/test*.py', 'test/test*.py', 'setup.cfg']
for pattern in optional:
files = filter(os.path.isfile, glob(pattern))
self.filelist.extend(files)
def _add_defaults_python(self):
# build_py is used to get:
# - python modules
# - files defined in package_data
build_py = self.get_finalized_command('build_py')
# getting python files
if self.distribution.has_pure_modules():
self.filelist.extend(build_py.get_source_files())
# getting package_data files
# (computed in build_py.data_files by build_py.finalize_options)
for _pkg, src_dir, _build_dir, filenames in build_py.data_files:
for filename in filenames:
self.filelist.append(os.path.join(src_dir, filename))
def _add_defaults_data_files(self):
# getting distribution.data_files
if self.distribution.has_data_files():
for item in self.distribution.data_files:
if isinstance(item, str):
# plain file
item = convert_path(item)
if os.path.isfile(item):
self.filelist.append(item)
else:
# a (dirname, filenames) tuple
dirname, filenames = item
for f in filenames:
f = convert_path(f)
if os.path.isfile(f):
self.filelist.append(f)
def _add_defaults_ext(self):
if self.distribution.has_ext_modules():
build_ext = self.get_finalized_command('build_ext')
self.filelist.extend(build_ext.get_source_files())
def _add_defaults_c_libs(self):
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.filelist.extend(build_clib.get_source_files())
def _add_defaults_scripts(self):
if self.distribution.has_scripts():
build_scripts = self.get_finalized_command('build_scripts')
self.filelist.extend(build_scripts.get_source_files())
def read_template(self) -> None:
"""Read and parse manifest template file named by self.template.
(usually "MANIFEST.in") The parsing and processing is done by
'self.filelist', which updates itself accordingly.
"""
log.info("reading manifest template '%s'", self.template)
template = TextFile(
self.template,
strip_comments=True,
skip_blanks=True,
join_lines=True,
lstrip_ws=True,
rstrip_ws=True,
collapse_join=True,
)
try:
while True:
line = template.readline()
if line is None: # end of file
break
try:
self.filelist.process_template_line(line)
# the call above can raise a DistutilsTemplateError for
# malformed lines, or a ValueError from the lower-level
# convert_path function
except (DistutilsTemplateError, ValueError) as msg:
self.warn(
f"{template.filename}, line {int(template.current_line)}: {msg}"
)
finally:
template.close()
def prune_file_list(self) -> None:
"""Prune off branches that might slip into the file list as created
by 'read_template()', but really don't belong there:
* the build tree (typically "build")
* the release tree itself (only an issue if we ran "sdist"
previously with --keep-temp, or it aborted)
* any RCS, CVS, .svn, .hg, .git, .bzr, _darcs directories
"""
build = self.get_finalized_command('build')
base_dir = self.distribution.get_fullname()
self.filelist.exclude_pattern(None, prefix=os.fspath(build.build_base))
self.filelist.exclude_pattern(None, prefix=base_dir)
if sys.platform == 'win32':
seps = r'/|\\'
else:
seps = '/'
vcs_dirs = ['RCS', 'CVS', r'\.svn', r'\.hg', r'\.git', r'\.bzr', '_darcs']
vcs_ptrn = r'(^|{})({})({}).*'.format(seps, '|'.join(vcs_dirs), seps)
self.filelist.exclude_pattern(vcs_ptrn, is_regex=True)
def write_manifest(self) -> None:
"""Write the file list in 'self.filelist' (presumably as filled in
by 'add_defaults()' and 'read_template()') to the manifest file
named by 'self.manifest'.
"""
if self._manifest_is_not_generated():
log.info(
f"not writing to manually maintained manifest file '{self.manifest}'"
)
return
content = self.filelist.files[:]
content.insert(0, '# file GENERATED by distutils, do NOT edit')
self.execute(
file_util.write_file,
(self.manifest, content),
f"writing manifest file '{self.manifest}'",
)
def _manifest_is_not_generated(self):
# check for special comment used in 3.1.3 and higher
if not os.path.isfile(self.manifest):
return False
with open(self.manifest, encoding='utf-8') as fp:
first_line = next(fp)
return first_line != '# file GENERATED by distutils, do NOT edit\n'
def read_manifest(self) -> None:
"""Read the manifest file (named by 'self.manifest') and use it to
fill in 'self.filelist', the list of files to include in the source
distribution.
"""
log.info("reading manifest file '%s'", self.manifest)
with open(self.manifest, encoding='utf-8') as lines:
self.filelist.extend(
# ignore comments and blank lines
filter(None, filterfalse(is_comment, map(str.strip, lines)))
)
def make_release_tree(self, base_dir, files) -> None:
"""Create the directory tree that will become the source
distribution archive. All directories implied by the filenames in
'files' are created under 'base_dir', and then we hard link or copy
(if hard linking is unavailable) those files into place.
Essentially, this duplicates the developer's source tree, but in a
directory named after the distribution, containing only the files
to be distributed.
"""
# Create all the directories under 'base_dir' necessary to
# put 'files' there; the 'mkpath()' is just so we don't die
# if the manifest happens to be empty.
self.mkpath(base_dir)
dir_util.create_tree(base_dir, files, dry_run=self.dry_run)
# And walk over the list of files, either making a hard link (if
# os.link exists) to each one that doesn't already exist in its
# corresponding location under 'base_dir', or copying each file
# that's out-of-date in 'base_dir'. (Usually, all files will be
# out-of-date, because by default we blow away 'base_dir' when
# we're done making the distribution archives.)
if hasattr(os, 'link'): # can make hard links on this system
link = 'hard'
msg = f"making hard links in {base_dir}..."
else: # nope, have to copy
link = None
msg = f"copying files to {base_dir}..."
if not files:
log.warning("no files to distribute -- empty manifest?")
else:
log.info(msg)
for file in files:
if not os.path.isfile(file):
log.warning("'%s' not a regular file -- skipping", file)
else:
dest = os.path.join(base_dir, file)
self.copy_file(file, dest, link=link)
self.distribution.metadata.write_pkg_info(base_dir)
def make_distribution(self) -> None:
"""Create the source distribution(s). First, we create the release
tree with 'make_release_tree()'; then, we create all required
archive files (according to 'self.formats') from the release tree.
Finally, we clean up by blowing away the release tree (unless
'self.keep_temp' is true). The list of archive files created is
stored so it can be retrieved later by 'get_archive_files()'.
"""
# Don't warn about missing meta-data here -- should be (and is!)
# done elsewhere.
base_dir = self.distribution.get_fullname()
base_name = os.path.join(self.dist_dir, base_dir)
self.make_release_tree(base_dir, self.filelist.files)
archive_files = [] # remember names of files we create
# tar archive must be created last to avoid overwrite and remove
if 'tar' in self.formats:
self.formats.append(self.formats.pop(self.formats.index('tar')))
for fmt in self.formats:
file = self.make_archive(
base_name, fmt, base_dir=base_dir, owner=self.owner, group=self.group
)
archive_files.append(file)
self.distribution.dist_files.append(('sdist', '', file))
self.archive_files = archive_files
if not self.keep_temp:
dir_util.remove_tree(base_dir, dry_run=self.dry_run)
def get_archive_files(self):
"""Return the list of archive files created when the command
was run, or None if the command hasn't run yet.
"""
return self.archive_files
def is_comment(line: str) -> bool:
return line.startswith('#')
| sdist |
python | vyperlang__vyper | vyper/codegen/jumptable_utils.py | {
"start": 259,
"end": 1008
} | class ____:
bucket_id: int
magic: int
method_ids: list[int]
@property
def image(self):
return _image_of([s for s in self.method_ids], self.magic)
@property
# return method ids, sorted by by their image
def method_ids_image_order(self):
return [x[1] for x in sorted(zip(self.image, self.method_ids))]
@property
def bucket_size(self):
return len(self.method_ids)
BITS_MAGIC = 24 # a constant which produced good results, see _bench_dense()
def _image_of(xs, magic):
bits_shift = BITS_MAGIC
# take the upper bits from the multiplication for more entropy
# can we do better using primes of some sort?
return [((x * magic) >> bits_shift) % len(xs) for x in xs]
| Bucket |
python | pdm-project__pdm | src/pdm/cli/utils.py | {
"start": 4655,
"end": 6368
} | class ____(argparse.ArgumentParser):
"""A standard argument parser but with title-cased help."""
def __init__(self, *args: Any, **kwargs: Any) -> None:
if sys.version_info >= (3, 14):
kwargs["formatter_class"] = argparse.RawDescriptionHelpFormatter
else:
kwargs["formatter_class"] = PdmFormatter
kwargs["add_help"] = False
super().__init__(*args, **kwargs)
self.add_argument(
"-h", "--help", action="help", default=argparse.SUPPRESS, help="Show this help message and exit."
)
self._optionals.title = "options"
def parse_known_args(self, args: Any = None, namespace: Any = None) -> Any:
args, argv = super().parse_known_args(args, namespace)
if argv:
msg = _("unrecognized arguments: %s")
self.error(msg % " ".join(argv))
return args, argv
def format_similar_command(root_command: str, commands: list[str], script_commands: list[str]) -> str:
from difflib import get_close_matches
similar_commands = get_close_matches(root_command, commands)
similar_script_commands = get_close_matches(root_command, script_commands)
commands_text = "\n".join([f" - {cmd}" for cmd in similar_commands])
script_commands_text = "\n".join([f" - {cmd}" for cmd in similar_script_commands])
message = f"[red]Command not found: {root_command}[/]"
if commands_text:
message += f"""
[green]Did you mean one of these commands?
{commands_text}[/]"""
if script_commands_text:
message += f"""
[yellow]{"Or" if commands_text else "Did you mean"} one of these script commands?
{script_commands_text}[/]"""
return message
| ArgumentParser |
python | pandas-dev__pandas | pandas/tests/arrays/masked_shared.py | {
"start": 1545,
"end": 5195
} | class ____:
# Shared by IntegerArray and FloatingArray, not BooleanArray
def test_searchsorted_nan(self, dtype):
# The base class casts to object dtype, for which searchsorted returns
# 0 from the left and 10 from the right.
arr = pd.array(range(10), dtype=dtype)
assert arr.searchsorted(np.nan, side="left") == 10
assert arr.searchsorted(np.nan, side="right") == 10
def test_no_shared_mask(self, data):
result = data + 1
assert not tm.shares_memory(result, data)
def test_array(self, comparison_op, dtype):
op = comparison_op
left = pd.array([0, 1, 2, None, None, None], dtype=dtype)
right = pd.array([0, 1, None, 0, 1, None], dtype=dtype)
result = op(left, right)
values = op(left._data, right._data)
mask = left._mask | right._mask
expected = pd.arrays.BooleanArray(values, mask)
tm.assert_extension_array_equal(result, expected)
# ensure we haven't mutated anything inplace
result[0] = pd.NA
tm.assert_extension_array_equal(
left, pd.array([0, 1, 2, None, None, None], dtype=dtype)
)
tm.assert_extension_array_equal(
right, pd.array([0, 1, None, 0, 1, None], dtype=dtype)
)
def test_compare_with_booleanarray(self, comparison_op, dtype):
op = comparison_op
left = pd.array([True, False, None] * 3, dtype="boolean")
right = pd.array([0] * 3 + [1] * 3 + [None] * 3, dtype=dtype)
other = pd.array([False] * 3 + [True] * 3 + [None] * 3, dtype="boolean")
expected = op(left, other)
result = op(left, right)
tm.assert_extension_array_equal(result, expected)
# reversed op
expected = op(other, left)
result = op(right, left)
tm.assert_extension_array_equal(result, expected)
def test_compare_to_string(self, dtype):
# GH#28930
ser = pd.Series([1, None], dtype=dtype)
result = ser == "a"
expected = pd.Series([False, pd.NA], dtype="boolean")
tm.assert_series_equal(result, expected)
def test_ufunc_with_out(self, dtype):
arr = pd.array([1, 2, 3], dtype=dtype)
arr2 = pd.array([1, 2, pd.NA], dtype=dtype)
mask = arr == arr
mask2 = arr2 == arr2
result = np.zeros(3, dtype=bool)
result |= mask
# If MaskedArray.__array_ufunc__ handled "out" appropriately,
# `result` should still be an ndarray.
assert isinstance(result, np.ndarray)
assert result.all()
# result |= mask worked because mask could be cast losslessly to
# boolean ndarray. mask2 can't, so this raises
result = np.zeros(3, dtype=bool)
msg = "Specify an appropriate 'na_value' for this dtype"
with pytest.raises(ValueError, match=msg):
result |= mask2
# addition
res = np.add(arr, arr2)
expected = pd.array([2, 4, pd.NA], dtype=dtype)
tm.assert_extension_array_equal(res, expected)
# when passing out=arr, we will modify 'arr' inplace.
res = np.add(arr, arr2, out=arr)
assert res is arr
tm.assert_extension_array_equal(res, expected)
tm.assert_extension_array_equal(arr, expected)
def test_mul_td64_array(self, dtype):
# GH#45622
arr = pd.array([1, 2, pd.NA], dtype=dtype)
other = np.arange(3, dtype=np.int64).view("m8[ns]")
result = arr * other
expected = pd.array([pd.Timedelta(0), pd.Timedelta(2), pd.NaT])
tm.assert_extension_array_equal(result, expected)
| NumericOps |
python | kamyu104__LeetCode-Solutions | Python/continuous-subarrays.py | {
"start": 65,
"end": 924
} | class ____(object):
def continuousSubarrays(self, nums):
"""
:type nums: List[int]
:rtype: int
"""
result = left = 0
mn, mx = float("inf"), float("-inf")
for right in xrange(len(nums)):
if mn <= nums[right] <= mx:
mn, mx = max(mn, nums[right]-2), min(mx, nums[right]+2)
else:
mn, mx = nums[right]-2, nums[right]+2
for left in reversed(xrange(right)):
if not mn <= nums[left] <= mx:
break
mn, mx = max(mn, nums[left]-2), min(mx, nums[left]+2)
else:
left = -1
left += 1
result += right-left+1
return result
# Time: O(n)
# Space: O(n)
import collections
# mono deque, two pointers
| Solution |
python | gevent__gevent | src/gevent/tests/test__order.py | {
"start": 634,
"end": 669
} | class ____(Test):
count = 3
| Test3 |
python | lazyprogrammer__machine_learning_examples | rl2/cartpole/dqn_theano.py | {
"start": 1857,
"end": 6950
} | class ____:
def __init__(self, D, K, hidden_layer_sizes, gamma, max_experiences=10000, min_experiences=100, batch_sz=32):
self.K = K
lr = 1e-2
mu = 0.
decay = 0.99
# create the graph
self.layers = []
M1 = D
for M2 in hidden_layer_sizes:
layer = HiddenLayer(M1, M2)
self.layers.append(layer)
M1 = M2
# final layer
layer = HiddenLayer(M1, K, lambda x: x)
self.layers.append(layer)
# collect params for copy
self.params = []
for layer in self.layers:
self.params += layer.params
# inputs and targets
X = T.matrix('X')
G = T.vector('G')
actions = T.ivector('actions')
# calculate output and cost
Z = X
for layer in self.layers:
Z = layer.forward(Z)
Y_hat = Z
selected_action_values = Y_hat[T.arange(actions.shape[0]), actions]
cost = T.sum((G - selected_action_values)**2)
# create train function
updates = adam(cost, self.params)
# compile functions
self.train_op = theano.function(
inputs=[X, G, actions],
updates=updates,
allow_input_downcast=True
)
self.predict_op = theano.function(
inputs=[X],
outputs=Y_hat,
allow_input_downcast=True
)
# create replay memory
self.experience = {'s': [], 'a': [], 'r': [], 's2': [], 'done': []}
self.max_experiences = max_experiences
self.min_experiences = min_experiences
self.batch_sz = batch_sz
self.gamma = gamma
def copy_from(self, other):
my_params = self.params
other_params = other.params
for p, q in zip(my_params, other_params):
actual = q.get_value()
p.set_value(actual)
def predict(self, X):
X = np.atleast_2d(X)
return self.predict_op(X)
def train(self, target_network):
# sample a random batch from buffer, do an iteration of GD
if len(self.experience['s']) < self.min_experiences:
# don't do anything if we don't have enough experience
return
# randomly select a batch
idx = np.random.choice(len(self.experience['s']), size=self.batch_sz, replace=False)
# print("idx:", idx)
states = [self.experience['s'][i] for i in idx]
actions = [self.experience['a'][i] for i in idx]
rewards = [self.experience['r'][i] for i in idx]
next_states = [self.experience['s2'][i] for i in idx]
dones = [self.experience['done'][i] for i in idx]
next_Q = np.max(target_network.predict(next_states), axis=1)
targets = [r + self.gamma*next_q if not done else r for r, next_q, done in zip(rewards, next_Q, dones)]
# call optimizer
self.train_op(states, targets, actions)
def add_experience(self, s, a, r, s2, done):
if len(self.experience['s']) >= self.max_experiences:
self.experience['s'].pop(0)
self.experience['a'].pop(0)
self.experience['r'].pop(0)
self.experience['s2'].pop(0)
self.experience['done'].pop(0)
self.experience['s'].append(s)
self.experience['a'].append(a)
self.experience['r'].append(r)
self.experience['s2'].append(s2)
self.experience['done'].append(done)
def sample_action(self, x, eps):
if np.random.random() < eps:
return np.random.choice(self.K)
else:
X = np.atleast_2d(x)
return np.argmax(self.predict(X)[0])
def play_one(env, model, tmodel, eps, gamma, copy_period):
global global_iters
observation = env.reset()
done = False
totalreward = 0
iters = 0
while not done and iters < 2000:
# if we reach 2000, just quit, don't want this going forever
# the 200 limit seems a bit early
action = model.sample_action(observation, eps)
prev_observation = observation
observation, reward, done, info = env.step(action)
totalreward += reward
if done:
reward = -200
# update the model
model.add_experience(prev_observation, action, reward, observation, done)
model.train(tmodel)
iters += 1
global_iters += 1
if global_iters % copy_period == 0:
tmodel.copy_from(model)
return totalreward
def main():
env = gym.make('CartPole-v0')
gamma = 0.99
copy_period = 50
D = len(env.observation_space.sample())
K = env.action_space.n
sizes = [200,200]
model = DQN(D, K, sizes, gamma)
tmodel = DQN(D, K, sizes, gamma)
if 'monitor' in sys.argv:
filename = os.path.basename(__file__).split('.')[0]
monitor_dir = './' + filename + '_' + str(datetime.now())
env = wrappers.Monitor(env, monitor_dir)
N = 500
totalrewards = np.empty(N)
costs = np.empty(N)
for n in range(N):
eps = 1.0/np.sqrt(n+1)
totalreward = play_one(env, model, tmodel, eps, gamma, copy_period)
totalrewards[n] = totalreward
if n % 100 == 0:
print("episode:", n, "total reward:", totalreward, "eps:", eps, "avg reward (last 100):", totalrewards[max(0, n-100):(n+1)].mean())
print("avg reward for last 100 episodes:", totalrewards[-100:].mean())
print("total steps:", totalrewards.sum())
plt.plot(totalrewards)
plt.title("Rewards")
plt.show()
plot_running_avg(totalrewards)
if __name__ == '__main__':
main()
| DQN |
python | dagster-io__dagster | python_modules/libraries/dagster-cloud-cli/dagster_cloud_cli/config/models.py | {
"start": 988,
"end": 1101
} | class ____(BaseModel, extra="forbid"):
directory: Optional[str] = None
registry: Optional[str] = None
| Build |
python | has2k1__plotnine | plotnine/scales/scale_xy.py | {
"start": 9751,
"end": 9933
} | class ____(scale_x_continuous):
"""
Continuous x position symmetric logarithm transformed scale
"""
trans: TransUser = "symlog"
@dataclass(kw_only=True)
| scale_x_symlog |
python | PrefectHQ__prefect | src/prefect/client/schemas/objects.py | {
"start": 35470,
"end": 35835
} | class ____(ObjectBaseModel):
"""An ORM representation of flow data."""
name: Name = Field(
default=..., description="The name of the flow", examples=["my-flow"]
)
tags: list[str] = Field(
default_factory=list,
description="A list of flow tags",
examples=[["tag-1", "tag-2"]],
)
labels: KeyValueLabelsField
| Flow |
python | openai__openai-python | src/openai/types/chat/chat_completion.py | {
"start": 745,
"end": 1572
} | class ____(BaseModel):
finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
"""The reason the model stopped generating tokens.
This will be `stop` if the model hit a natural stop point or a provided stop
sequence, `length` if the maximum number of tokens specified in the request was
reached, `content_filter` if content was omitted due to a flag from our content
filters, `tool_calls` if the model called a tool, or `function_call`
(deprecated) if the model called a function.
"""
index: int
"""The index of the choice in the list of choices."""
logprobs: Optional[ChoiceLogprobs] = None
"""Log probability information for the choice."""
message: ChatCompletionMessage
"""A chat completion message generated by the model."""
| Choice |
python | astropy__astropy | astropy/units/tests/test_structured.py | {
"start": 16260,
"end": 25545
} | class ____(StructuredTestBaseWithUnits):
def test_initialization_and_keying(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_p = q_pv["p"]
assert isinstance(q_p, Quantity)
assert isinstance(q_p.unit, UnitBase)
assert np.all(q_p == self.pv["p"] * self.pv_unit["p"])
q_v = q_pv["v"]
assert isinstance(q_v, Quantity)
assert isinstance(q_v.unit, UnitBase)
assert np.all(q_v == self.pv["v"] * self.pv_unit["v"])
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_t = q_pv_t["t"]
assert np.all(q_t == self.pv_t["t"] * self.pv_t_unit["t"])
q_pv2 = q_pv_t["pv"]
assert isinstance(q_pv2, Quantity)
assert q_pv2.unit == self.pv_unit
with pytest.raises(ValueError):
Quantity(self.pv, self.pv_t_unit)
with pytest.raises(ValueError):
Quantity(self.pv_t, self.pv_unit)
def test_initialization_with_unit_tuples(self):
q_pv_t = Quantity(self.pv_t, (("km", "km/s"), "s"))
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_with_string(self):
q_pv_t = Quantity(self.pv_t, "(km, km/s), s")
assert isinstance(q_pv_t.unit, StructuredUnit)
assert q_pv_t.unit == self.pv_t_unit
def test_initialization_by_multiplication_with_unit(self):
q_pv_t = self.pv_t * self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert not np.may_share_memory(q_pv_t, self.pv_t)
q_pv_t2 = self.pv_t_unit * self.pv_t
assert q_pv_t.unit is self.pv_t_unit
# Not testing equality of structured Quantity here.
assert np.all(q_pv_t2.value == q_pv_t.value)
def test_initialization_by_shifting_to_unit(self):
q_pv_t = self.pv_t << self.pv_t_unit
assert q_pv_t.unit is self.pv_t_unit
assert np.all(q_pv_t.value == self.pv_t)
assert np.may_share_memory(q_pv_t, self.pv_t)
def test_initialization_without_unit(self):
q_pv_t = u.Quantity(self.pv_t, unit=None)
assert np.all(q_pv_t.value == self.pv_t)
# Test that unit is a structured unit like the dtype
expected_unit = _structured_unit_like_dtype(
u.Quantity._default_unit, self.pv_t.dtype
)
assert q_pv_t.unit == expected_unit
# A more explicit test
assert q_pv_t.unit == u.StructuredUnit(((u.one, u.one), u.one))
def test_getitem(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t01 = q_pv_t[:2]
assert isinstance(q_pv_t01, Quantity)
assert q_pv_t01.unit == q_pv_t.unit
assert np.all(q_pv_t01["t"] == q_pv_t["t"][:2])
q_pv_t1 = q_pv_t[1]
assert isinstance(q_pv_t1, Quantity)
assert q_pv_t1.unit == q_pv_t.unit
assert q_pv_t1.shape == ()
assert q_pv_t1["t"] == q_pv_t["t"][1]
def test_value(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
value = q_pv_t.value
assert type(value) is np.ndarray
assert np.all(value == self.pv_t)
value1 = q_pv_t[1].value
assert type(value1) is np.void
assert np.all(value1 == self.pv_t[1])
def test_conversion(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.to(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv.to(self.pv_unit)
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert not np.may_share_memory(q2, q_pv)
pv1 = q_pv.to_value(("AU", "AU/day"))
assert type(pv1) is np.ndarray
assert np.all(pv1["p"] == q_pv["p"].to_value(u.AU))
assert np.all(pv1["v"] == q_pv["v"].to_value(u.AU / u.day))
pv11 = q_pv[1].to_value(("AU", "AU/day"))
assert type(pv11) is np.void
assert pv11 == pv1[1]
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.to((("kpc", "kpc/Myr"), "Myr"))
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_conversion_via_lshift(self):
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv << StructuredUnit(("AU", "AU/day"))
assert isinstance(q1, Quantity)
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q2 = q_pv << self.pv_unit
assert q2["p"].unit == self.p_unit
assert q2["v"].unit == self.v_unit
assert np.all(q2["p"].value == self.pv["p"])
assert np.all(q2["v"].value == self.pv["v"])
assert np.may_share_memory(q2, q_pv)
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t << "(kpc,kpc/Myr),Myr"
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_inplace_conversion(self):
# In principle, in-place might be possible, in which case this should be
# changed -- ie ``q1 is q_link``.
q_pv = Quantity(self.pv, self.pv_unit)
q1 = q_pv.copy()
q_link = q1
q1 <<= StructuredUnit(("AU", "AU/day"))
assert q1 is not q_link
assert q1["p"].unit == u.AU
assert q1["v"].unit == u.AU / u.day
assert np.all(q1["p"] == q_pv["p"].to(u.AU))
assert np.all(q1["v"] == q_pv["v"].to(u.AU / u.day))
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q2 = q_pv_t.copy()
q_link = q2
q2 <<= "(kpc,kpc/Myr),Myr"
assert q2 is not q_link
assert q2["pv"]["p"].unit == u.kpc
assert q2["pv"]["v"].unit == u.kpc / u.Myr
assert q2["t"].unit == u.Myr
assert np.all(q2["pv"]["p"] == q_pv_t["pv"]["p"].to(u.kpc))
assert np.all(q2["pv"]["v"] == q_pv_t["pv"]["v"].to(u.kpc / u.Myr))
assert np.all(q2["t"] == q_pv_t["t"].to(u.Myr))
def test_si(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_si = q_pv_t.si
assert_array_equal(q_pv_t_si, q_pv_t.to("(m,m/s),s"))
def test_cgs(self):
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t_cgs = q_pv_t.cgs
assert_array_equal(q_pv_t_cgs, q_pv_t.to("(cm,cm/s),s"))
def test_equality(self):
q_pv = Quantity(self.pv, self.pv_unit)
equal = q_pv == q_pv
not_equal = q_pv != q_pv
assert np.all(equal)
assert not np.any(not_equal)
equal2 = q_pv == q_pv[1]
not_equal2 = q_pv != q_pv[1]
assert np.all(equal2 == [False, True, False])
assert np.all(not_equal2 != equal2)
q1 = q_pv.to(("AU", "AU/day"))
# Ensure same conversion is done, by placing q1 first.
assert np.all(q1 == q_pv)
assert not np.any(q1 != q_pv)
# Check different names in dtype.
assert np.all(q1.value * u.Unit("AU, AU/day") == q_pv)
assert not np.any(q1.value * u.Unit("AU, AU/day") != q_pv)
assert (q_pv == "b") is False
assert ("b" != q_pv) is True
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
assert np.all((q_pv_t[2] == q_pv_t) == [False, False, True])
assert np.all((q_pv_t[2] != q_pv_t) != [False, False, True])
assert (q_pv == q_pv_t) is False
assert (q_pv_t != q_pv) is True
def test_setitem(self):
q_pv = Quantity(self.pv, self.pv_unit)
q_pv[1] = (2.0, 2.0) * self.pv_unit
assert q_pv[1].value == np.array((2.0, 2.0), self.pv_dtype)
q_pv[1:2] = (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv["p"][1] == 1.0 * u.AU
assert q_pv["v"][1] == 0.5 * u.AU / u.day
q_pv["v"] = 1.0 * u.km / u.s
assert np.all(q_pv["v"] == 1.0 * u.km / u.s)
with pytest.raises(u.UnitsError):
q_pv[1] = (1.0, 1.0) * u.Unit("AU, AU")
with pytest.raises(u.UnitsError):
q_pv["v"] = 1.0 * u.km
q_pv_t = Quantity(self.pv_t, self.pv_t_unit)
q_pv_t[1] = ((2.0, 2.0), 3.0) * self.pv_t_unit
assert q_pv_t[1].value == np.array(((2.0, 2.0), 3.0), self.pv_t_dtype)
q_pv_t[1:2] = ((1.0, 0.5), 5.0) * u.Unit("(AU, AU/day), yr")
assert q_pv_t["pv"][1] == (1.0, 0.5) * u.Unit("AU, AU/day")
assert q_pv_t["t"][1] == 5.0 * u.yr
q_pv_t["pv"] = (1.0, 0.5) * self.pv_unit
assert np.all(q_pv_t["pv"] == (1.0, 0.5) * self.pv_unit)
| TestStructuredQuantity |
python | pytorch__pytorch | test/torch_np/numpy_tests/lib/test_index_tricks.py | {
"start": 17041,
"end": 17271
} | class ____(TestCase):
@xpassIfTorchDynamo_np # (reason="c_ not implemented")
def test_c_(self):
a = np.c_[np.array([[1, 2, 3]]), 0, 0, np.array([[4, 5, 6]])]
assert_equal(a, [[1, 2, 3, 0, 0, 4, 5, 6]])
| TestC |
python | walkccc__LeetCode | solutions/495. Teemo Attacking/495.py | {
"start": 0,
"end": 279
} | class ____:
def findPoisonedDuration(self, timeSeries: list[int], duration: int) -> int:
if duration == 0:
return 0
ans = 0
for i in range(0, len(timeSeries) - 1):
ans += min(timeSeries[i + 1] - timeSeries[i], duration)
return ans + duration
| Solution |
python | kamyu104__LeetCode-Solutions | Python/count-all-possible-routes.py | {
"start": 2569,
"end": 3340
} | class ____(object):
def countRoutes(self, locations, start, finish, fuel):
"""
:type locations: List[int]
:type start: int
:type finish: int
:type fuel: int
:rtype: int
"""
MOD = 10**9+7
dp = [[0]*(fuel+1) for _ in xrange(len(locations))]
dp[start][0] = 1
for f in xrange(fuel+1):
for i in xrange(len(locations)):
for j in xrange(len(locations)):
if i == j:
continue
d = abs(locations[i]-locations[j])
if f-d < 0:
continue
dp[i][f] = (dp[i][f]+dp[j][f-d])%MOD
return reduce(lambda x, y: (x+y)%MOD, dp[finish])
| Solution2 |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/workflows.py | {
"start": 14905,
"end": 17403
} | class ____(GoogleCloudBaseOperator):
"""
Gets details of a single Workflow.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:WorkflowsGetWorkflowOperator`
:param workflow_id: Required. The ID of the workflow to be created.
:param project_id: Required. The ID of the Google Cloud project the cluster belongs to.
:param location: Required. The GCP region in which to handle the request.
:param retry: A retry object used to retry requests. If ``None`` is specified, requests will not be
retried.
:param timeout: The amount of time, in seconds, to wait for the request to complete. Note that if
``retry`` is specified, the timeout applies to each individual attempt.
:param metadata: Additional metadata that is provided to the method.
"""
template_fields: Sequence[str] = ("location", "workflow_id")
operator_extra_links = (WorkflowsWorkflowDetailsLink(),)
def __init__(
self,
*,
workflow_id: str,
location: str,
project_id: str = PROVIDE_PROJECT_ID,
retry: Retry | _MethodDefault = DEFAULT,
timeout: float | None = None,
metadata: Sequence[tuple[str, str]] = (),
gcp_conn_id: str = "google_cloud_default",
impersonation_chain: str | Sequence[str] | None = None,
**kwargs,
):
super().__init__(**kwargs)
self.workflow_id = workflow_id
self.location = location
self.project_id = project_id
self.retry = retry
self.timeout = timeout
self.metadata = metadata
self.gcp_conn_id = gcp_conn_id
self.impersonation_chain = impersonation_chain
def execute(self, context: Context):
hook = WorkflowsHook(gcp_conn_id=self.gcp_conn_id, impersonation_chain=self.impersonation_chain)
self.log.info("Retrieving workflow")
workflow = hook.get_workflow(
workflow_id=self.workflow_id,
location=self.location,
project_id=self.project_id,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
WorkflowsWorkflowDetailsLink.persist(
context=context,
location_id=self.location,
workflow_id=self.workflow_id,
project_id=self.project_id or hook.project_id,
)
return Workflow.to_dict(workflow)
| WorkflowsGetWorkflowOperator |
python | great-expectations__great_expectations | contrib/great_expectations_semantic_types_expectations/great_expectations_semantic_types_expectations/expectations/expect_column_values_to_be_valid_country.py | {
"start": 843,
"end": 1833
} | class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_country"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_country(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
| ColumnValuesToBeValidCountry |
python | walkccc__LeetCode | solutions/1861. Rotating the Box/1861.py | {
"start": 0,
"end": 424
} | class ____:
def rotateTheBox(self, box: list[list[str]]) -> list[list[str]]:
m = len(box)
n = len(box[0])
rotated = [['.'] * m for _ in range(n)]
for i in range(m):
k = n - 1
for j in reversed(range(n)):
if box[i][j] != '.':
if box[i][j] == '*':
k = j
rotated[k][m - i - 1] = box[i][j]
k -= 1
return [''.join(row) for row in rotated]
| Solution |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/core_api/datamodels/variables.py | {
"start": 1861,
"end": 2136
} | class ____(StrictBaseModel):
"""Variable serializer for bodies."""
key: str = Field(max_length=ID_LEN)
value: JsonValue = Field(serialization_alias="val")
description: str | None = Field(default=None)
team_id: UUID | None = Field(default=None)
| VariableBody |
python | openai__openai-python | src/openai/types/realtime/realtime_audio_formats.py | {
"start": 327,
"end": 551
} | class ____(BaseModel):
rate: Optional[Literal[24000]] = None
"""The sample rate of the audio. Always `24000`."""
type: Optional[Literal["audio/pcm"]] = None
"""The audio format. Always `audio/pcm`."""
| AudioPCM |
python | miyuchina__mistletoe | mistletoe/contrib/github_wiki.py | {
"start": 355,
"end": 865
} | class ____(HtmlRenderer):
def __init__(self, **kwargs):
"""
Args:
**kwargs: additional parameters to be passed to the ancestor's
constructor.
"""
super().__init__(GithubWiki, **kwargs)
def render_github_wiki(self, token):
template = '<a href="{target}">{inner}</a>'
target = self.escape_url(token.target)
inner = self.render_inner(token)
return template.format(target=target, inner=inner)
| GithubWikiRenderer |
python | django__django | tests/admin_views/models.py | {
"start": 28041,
"end": 28252
} | class ____(models.Model):
chapter = models.ForeignKey(Chapter, models.CASCADE)
language = models.ForeignKey(Language, models.CASCADE)
user = models.ForeignKey(User, models.CASCADE)
| ReadOnlyRelatedField |
python | GoogleCloudPlatform__python-docs-samples | appengine/standard/ndb/properties/snippets.py | {
"start": 1071,
"end": 1356
} | class ____(ndb.Model):
title = ndb.StringProperty()
stars = ndb.IntegerProperty()
tags = ndb.StringProperty(repeated=True)
def create_article():
article = Article(title="Python versus Ruby", stars=3, tags=["python", "ruby"])
article.put()
return article
| Article |
python | django__django | tests/model_fields/test_durationfield.py | {
"start": 1604,
"end": 2204
} | class ____(SimpleTestCase):
test_data = (
'[{"fields": {"field": "1 01:00:00"}, "model": "model_fields.durationmodel", '
'"pk": null}]'
)
def test_dumping(self):
instance = DurationModel(field=datetime.timedelta(days=1, hours=1))
data = serializers.serialize("json", [instance])
self.assertEqual(json.loads(data), json.loads(self.test_data))
def test_loading(self):
instance = list(serializers.deserialize("json", self.test_data))[0].object
self.assertEqual(instance.field, datetime.timedelta(days=1, hours=1))
| TestSerialization |
python | getsentry__sentry | src/sentry/data_export/processors/issues_by_tag.py | {
"start": 431,
"end": 4469
} | class ____:
"""
Processor for exports of issues data based on a provided tag
"""
def __init__(
self,
project_id: int,
group_id: int | str,
key: str,
environment_id: int | None,
tenant_ids: dict[str, str | int] | None = None,
):
self.project = self.get_project(project_id)
self.group = self.get_group(group_id, self.project)
self.key = key
self.environment_id = environment_id
self.header_fields = self.get_header_fields(self.key)
self.lookup_key = self.get_lookup_key(self.key)
# Ensure the tag key exists, as it may have been deleted
try:
tagstore.backend.get_tag_key(
self.project.id, environment_id, self.lookup_key, tenant_ids=tenant_ids
)
except tagstore.TagKeyNotFound:
raise ExportError("Requested key does not exist")
@staticmethod
def get_project(project_id: int) -> Project:
try:
project = Project.objects.get_from_cache(id=project_id)
return project
except Project.DoesNotExist:
raise ExportError("Requested project does not exist")
@staticmethod
def get_group(group_id: int | str, project: Project) -> Group:
try:
group, _ = get_group_with_redirect(
group_id, queryset=Group.objects.filter(project=project)
)
return group
except Group.DoesNotExist:
raise ExportError("Requested issue does not exist")
@staticmethod
def get_header_fields(key: str) -> list[str]:
if key == "user":
return [
"value",
"id",
"email",
"username",
"ip_address",
"times_seen",
"last_seen",
"first_seen",
]
else:
return ["value", "times_seen", "last_seen", "first_seen"]
@staticmethod
def get_lookup_key(key: str) -> str:
return f"sentry:{key}" if tagstore.backend.is_reserved_key(key) else key
@staticmethod
def serialize_row(item: GroupTagValueAndEventUser, key: str) -> dict[str, str]:
result = {
"value": item.value.value,
"times_seen": item.value.times_seen,
"last_seen": item.value.last_seen.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
"first_seen": item.value.first_seen.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
}
if key == "user":
euser = item.eventuser
result["id"] = euser.user_ident if euser and isinstance(euser, EventUser) else ""
result["email"] = euser.email if euser else ""
result["username"] = euser.username if euser else ""
result["ip_address"] = euser.ip_address if euser else ""
return result
def get_raw_data(self, limit: int = 1000, offset: int = 0) -> list[GroupTagValueAndEventUser]:
"""
Returns list of GroupTagValues
"""
items = tagstore.backend.get_group_tag_value_iter(
group=self.group,
environment_ids=[self.environment_id],
key=self.lookup_key,
limit=limit,
offset=offset,
tenant_ids={"organization_id": self.project.organization_id},
)
if self.key == "user":
users = EventUser.for_tags(self.group.project_id, [i.value for i in items])
else:
users = {}
return [
GroupTagValueAndEventUser(
item, users.get(item.value) if item.value is not None else None
)
for item in items
]
def get_serialized_data(self, limit: int = 1000, offset: int = 0) -> list[dict[str, str]]:
"""
Returns list of serialized GroupTagValue dictionaries
"""
raw_data = self.get_raw_data(limit=limit, offset=offset)
return [self.serialize_row(item, self.key) for item in raw_data]
| IssuesByTagProcessor |
python | pydata__xarray | xarray/tests/test_namedarray.py | {
"start": 958,
"end": 1297
} | class ____(Generic[_ShapeType_co, _DType_co]):
def __init__(self, array: duckarray[Any, _DType_co]) -> None:
self.array: duckarray[Any, _DType_co] = array
@property
def dtype(self) -> _DType_co:
return self.array.dtype
@property
def shape(self) -> _Shape:
return self.array.shape
| CustomArrayBase |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/random/random_ops_test.py | {
"start": 5975,
"end": 10161
} | class ____(test.TestCase):
def _Sampler(self, num, mu, sigma, dtype, use_gpu, seed=None):
def func():
with self.session(use_gpu=use_gpu, graph=ops.Graph()) as sess:
rng = random_ops.truncated_normal(
[num], mean=mu, stddev=sigma, dtype=dtype, seed=seed)
ret = np.empty([10, num])
for i in range(10):
ret[i, :] = self.evaluate(rng)
return ret
return func
# Asserts that different trials (1000 samples per trial) is unlikely
# to see the same sequence of values. Will catch buggy
# implementations which uses the same random number seed.
def testDistinct(self):
# NOTE: TruncatedNormal on GPU is not supported.
if not test.is_gpu_available():
for dt in get_float_types():
sampler = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=False)
x = sampler()
y = sampler()
# Number of different samples.
count = (x == y).sum()
if count >= 10:
print("x = ", x)
print("y = ", y)
print("count = ", count)
self.assertTrue(count < 10)
# Checks that the CPU and GPU implementation returns the same results,
# given the same random seed
@test_util.run_deprecated_v1
def testCPUGPUMatch(self):
# Skip the test if there is no GPU.
if not test.is_gpu_available():
return
for dt in get_float_types():
results = {}
for use_gpu in [False, True]:
# We need a particular larger number of samples to test multiple rounds
# on GPU
sampler = self._Sampler(
1000000, 0.0, 1.0, dt, use_gpu=use_gpu, seed=12345)
results[use_gpu] = sampler()
atol = rtol = 1e-6
if dt == dtypes.float16:
atol = rtol = 1e-3
if dt == dtypes.bfloat16:
atol = rtol = 1e-1
self.assertAllClose(results[False], results[True], rtol=rtol, atol=atol)
@test_util.run_deprecated_v1
def testSeed(self):
for dt in get_float_types():
sx = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
sy = self._Sampler(1000, 0.0, 1.0, dt, use_gpu=True, seed=345)
self.assertAllEqual(sx(), sy())
# The effective standard deviation of truncated normal is 85% of the
# requested one.
def testStdDev(self):
for dt in get_float_types():
stddev = 3.0
sampler = self._Sampler(100000, 0.0, stddev, dt, use_gpu=True)
x = sampler()
print("std(x)", np.std(x), abs(np.std(x) / stddev - 0.85))
self.assertLess(abs(np.std(x) / stddev - 0.85), 0.04)
def testSuccessAfterError(self):
# Force an error on the TruncatedNormal kernel.
config.enable_op_determinism()
with self.assertRaisesRegex(
errors.InvalidArgumentError,
"When determinism is enabled, random ops must have a seed specified"):
self.evaluate(gen_random_ops.truncated_normal((1,), dtypes.float32))
config.disable_op_determinism()
# Ensure the StdDev of the TruncatedNormal works as intended.
self.testStdDev()
@test_util.run_deprecated_v1
def testLargeShape(self):
with self.session():
v = variables.Variable(
array_ops.zeros(dtype=dtypes.float32, shape=[2**33, 1]))
n = random_ops.truncated_normal(v.shape)
self.assertEqual([8589934592, 1], n.shape.as_list())
@test_util.run_deprecated_v1
def testNoCSE(self):
with self.session():
shape = [2, 3, 4]
rnd1 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
rnd2 = random_ops.truncated_normal(shape, 0.0, 1.0, dtypes.float32)
diff = rnd2 - rnd1
self.assertTrue(np.linalg.norm(diff.eval()) > 0.1)
def testEagerSeed(self):
with context.eager_mode():
# Ensure a context has been created
random_ops.random_normal([])
# Set the same seed twice and check that the values match
context.set_global_seed(42)
rnd1 = random_ops.random_normal([])
context.set_global_seed(42)
rnd2 = random_ops.random_normal([])
self.assertAllEqual(rnd1, rnd2)
@test_util.with_eager_op_as_function
@test_util.for_all_test_methods(test_util.disable_xla,
"This never passed on XLA")
| TruncatedNormalTest |
python | pyqtgraph__pyqtgraph | pyqtgraph/graphicsItems/FillBetweenItem.py | {
"start": 211,
"end": 5550
} | class ____(QtWidgets.QGraphicsPathItem):
"""
GraphicsItem filling the space between two PlotDataItems.
"""
def __init__(
self,
curve1: Union[PlotDataItem, PlotCurveItem],
curve2: Union[PlotDataItem, PlotCurveItem],
brush=None,
pen=None,
fillRule: QtCore.Qt.FillRule=QtCore.Qt.FillRule.OddEvenFill
):
"""FillBetweenItem fills a region between two curves with a specified
:class:`~QtGui.QBrush`.
Parameters
----------
curve1 : :class:`~pyqtgraph.PlotDataItem` | :class:`~pyqtgraph.PlotCurveItem`
Line to draw fill from
curve2 : :class:`~pyqtgraph.PlotDataItem` | :class:`~pyqtgraph.PlotCurveItem`
Line to draw fill to
brush : color_like, optional
Arguments accepted by :func:`~pyqtgraph.mkBrush`, used
to create the :class:`~QtGui.QBrush` instance used to draw the item
by default None
pen : color_like, optional
Arguments accepted by :func:`~pyqtgraph.mkColor`, used
to create the :class:`~QtGui.QPen` instance used to draw the item
by default ``None``
fillRule : QtCore.Qt.FillRule, optional
FillRule to be applied to the underlying :class:`~QtGui.QPainterPath`
instance, by default ``QtCore.Qt.FillRule.OddEvenFill``
Raises
------
ValueError
Raised when ``None`` is passed in as either ``curve1``
or ``curve2``
TypeError
Raised when either ``curve1`` or ``curve2`` is not either
:class:`~pyqtgraph.PlotDataItem` or :class:`~pyqtgraph.PlotCurveItem`
"""
super().__init__()
self.curves = None
self._fillRule = fillRule
if curve1 is not None and curve2 is not None:
self.setCurves(curve1, curve2)
elif curve1 is not None or curve2 is not None:
raise ValueError("Must specify two curves to fill between.")
if brush is not None:
self.setBrush(brush)
self.setPen(pen)
self.updatePath()
def fillRule(self):
return self._fillRule
def setFillRule(self, fillRule: QtCore.Qt.FillRule=QtCore.Qt.FillRule.OddEvenFill):
"""Set the underlying :class:`~QtGui.QPainterPath` to the specified
:class:`~QtCore.Qt.FillRule`
This can be useful for allowing in the filling of voids.
Parameters
----------
fillRule : QtCore.Qt.FillRule
A member of the :class:`~QtCore.Qt.FillRule` enum
"""
self._fillRule = fillRule
self.updatePath()
def setBrush(self, *args, **kwds):
"""Change the fill brush. Accepts the same arguments as :func:`~pyqtgraph.mkBrush`
"""
QtWidgets.QGraphicsPathItem.setBrush(self, fn.mkBrush(*args, **kwds))
def setPen(self, *args, **kwds):
"""Change the fill pen. Accepts the same arguments as :func:`~pyqtgraph.mkColor`
"""
QtWidgets.QGraphicsPathItem.setPen(self, fn.mkPen(*args, **kwds))
def setCurves(
self,
curve1: Union[PlotDataItem, PlotCurveItem],
curve2: Union[PlotDataItem, PlotCurveItem]
):
"""Method to set the Curves to draw the FillBetweenItem between
Parameters
----------
curve1 : :class:`~pyqtgraph.PlotDataItem` | :class:`~pyqtgraph.PlotCurveItem`
Line to draw fill from
curve2 : :class:`~pyqtgraph.PlotDataItem` | :class:`~pyqtgraph.PlotCurveItem`
Line to draw fill to
Raises
------
TypeError
Raised when input arguments are not either :class:`~pyqtgraph.PlotDataItem` or
:class:`~pyqtgraph.PlotCurveItem`
"""
if self.curves is not None:
for c in self.curves:
try:
c.sigPlotChanged.disconnect(self.curveChanged)
except (TypeError, RuntimeError):
pass
curves = [curve1, curve2]
for c in curves:
if not isinstance(c, (PlotDataItem, PlotCurveItem)):
raise TypeError("Curves must be PlotDataItem or PlotCurveItem.")
self.curves = curves
curve1.sigPlotChanged.connect(self.curveChanged)
curve2.sigPlotChanged.connect(self.curveChanged)
self.setZValue(min(curve1.zValue(), curve2.zValue())-1)
self.curveChanged()
def curveChanged(self):
self.updatePath()
def updatePath(self):
if self.curves is None:
self.setPath(QtGui.QPainterPath())
return
paths = []
for c in self.curves:
if isinstance(c, PlotDataItem):
paths.append(c.curve.getPath())
elif isinstance(c, PlotCurveItem):
paths.append(c.getPath())
path = QtGui.QPainterPath()
path.setFillRule(self.fillRule())
ps1 = paths[0].toSubpathPolygons()
ps2 = paths[1].toReversed().toSubpathPolygons()
ps2.reverse()
if len(ps1) == 0 or len(ps2) == 0:
self.setPath(QtGui.QPainterPath())
return
for p1, p2 in zip(ps1, ps2):
path.addPolygon(p1 + p2)
self.setPath(path)
| FillBetweenItem |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_textbox39.py | {
"start": 315,
"end": 1028
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("textbox39.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with textbox(s)."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
worksheet.insert_textbox(
"E9", "This is some text", {"url": "https://github.com/jmcnamara"}
)
worksheet.insert_textbox(
"E19", "This is some text", {"url": "https://github.com/jmcnamara"}
)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | ray-project__ray | rllib/connectors/action/normalize.py | {
"start": 405,
"end": 1385
} | class ____(ActionConnector):
def __init__(self, ctx: ConnectorContext):
super().__init__(ctx)
self._action_space_struct = get_base_struct_from_space(ctx.action_space)
def transform(self, ac_data: ActionConnectorDataType) -> ActionConnectorDataType:
assert isinstance(
ac_data.output, tuple
), "Action connector requires PolicyOutputType data."
actions, states, fetches = ac_data.output
return ActionConnectorDataType(
ac_data.env_id,
ac_data.agent_id,
ac_data.input_dict,
(unsquash_action(actions, self._action_space_struct), states, fetches),
)
def to_state(self):
return NormalizeActionsConnector.__name__, None
@staticmethod
def from_state(ctx: ConnectorContext, params: Any):
return NormalizeActionsConnector(ctx)
register_connector(NormalizeActionsConnector.__name__, NormalizeActionsConnector)
| NormalizeActionsConnector |
python | huggingface__transformers | src/transformers/models/mimi/modeling_mimi.py | {
"start": 53968,
"end": 55893
} | class ____(nn.Module):
"""Codebook with Euclidean distance."""
def __init__(self, config: MimiConfig, epsilon: float = 1e-5):
super().__init__()
embed = torch.zeros(config.codebook_size, config.codebook_dim)
self.codebook_size = config.codebook_size
self.register_buffer("initialized", torch.tensor([True], dtype=torch.float32))
self.register_buffer("cluster_usage", torch.ones(config.codebook_size))
self.register_buffer("embed_sum", embed)
self._embed = None
self.epsilon = epsilon
@property
def embed(self) -> torch.Tensor:
if self._embed is None:
self._embed = self.embed_sum / self.cluster_usage.clamp(min=self.epsilon)[:, None]
return self._embed
def quantize(self, hidden_states):
# Projects each vector in `hidden_states` over the nearest centroid and return its index.
# `hidden_states` should be `[N, D]` with `N` the number of input vectors and `D` the dimension.
dists = torch.cdist(hidden_states[None].float(), self.embed[None].float(), p=2)[0]
embed_ind = dists.argmin(dim=-1)
return embed_ind
# Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.encode
def encode(self, hidden_states):
shape = hidden_states.shape
# pre-process
hidden_states = hidden_states.reshape((-1, shape[-1]))
# quantize
embed_ind = self.quantize(hidden_states)
# post-process
embed_ind = embed_ind.view(*shape[:-1])
return embed_ind
# Copied from transformers.models.encodec.modeling_encodec.EncodecEuclideanCodebook.decode
def decode(self, embed_ind):
quantize = nn.functional.embedding(embed_ind, self.embed)
return quantize
# Copied from transformers.models.encodec.modeling_encodec.EncodecVectorQuantization with Encodec->Mimi
| MimiEuclideanCodebook |
python | mitmproxy__pdoc | test/test__pydantic.py | {
"start": 408,
"end": 1457
} | class ____(pydantic.BaseModel):
id: int
name: str = pydantic.Field(description="desc", default="Jane Doe")
@pydantic.computed_field(description="computed") # type: ignore[misc]
@property
def computed(self) -> str:
return "computed_value"
@pydantic.computed_field(description="cached") # type: ignore[misc]
@cached_property
def cached(self) -> str:
return "computed_value"
def test_with_pydantic(monkeypatch):
assert _pydantic.is_pydantic_model(ExampleModel)
assert _pydantic.get_field_docstring(ExampleModel, "name") == "desc"
assert _pydantic.get_field_docstring(ExampleModel, "computed") == "computed"
assert _pydantic.get_field_docstring(ExampleModel, "cached") == "cached"
assert _pydantic.default_value(ExampleModel, "name", None) == "Jane Doe"
assert not _pydantic.is_pydantic_model(pdoc.doc.Module)
assert _pydantic.get_field_docstring(pdoc.doc.Module, "kind") is None
assert _pydantic.default_value(pdoc.doc.Module, "kind", "module") == "module"
| ExampleModel |
python | tiangolo__fastapi | docs_src/security/tutorial005_py310.py | {
"start": 1446,
"end": 5335
} | class ____(User):
hashed_password: str
password_hash = PasswordHash.recommended()
oauth2_scheme = OAuth2PasswordBearer(
tokenUrl="token",
scopes={"me": "Read information about the current user.", "items": "Read items."},
)
app = FastAPI()
def verify_password(plain_password, hashed_password):
return password_hash.verify(plain_password, hashed_password)
def get_password_hash(password):
return password_hash.hash(password)
def get_user(db, username: str):
if username in db:
user_dict = db[username]
return UserInDB(**user_dict)
def authenticate_user(fake_db, username: str, password: str):
user = get_user(fake_db, username)
if not user:
return False
if not verify_password(password, user.hashed_password):
return False
return user
def create_access_token(data: dict, expires_delta: timedelta | None = None):
to_encode = data.copy()
if expires_delta:
expire = datetime.now(timezone.utc) + expires_delta
else:
expire = datetime.now(timezone.utc) + timedelta(minutes=15)
to_encode.update({"exp": expire})
encoded_jwt = jwt.encode(to_encode, SECRET_KEY, algorithm=ALGORITHM)
return encoded_jwt
async def get_current_user(
security_scopes: SecurityScopes, token: str = Depends(oauth2_scheme)
):
if security_scopes.scopes:
authenticate_value = f'Bearer scope="{security_scopes.scope_str}"'
else:
authenticate_value = "Bearer"
credentials_exception = HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Could not validate credentials",
headers={"WWW-Authenticate": authenticate_value},
)
try:
payload = jwt.decode(token, SECRET_KEY, algorithms=[ALGORITHM])
username: str = payload.get("sub")
if username is None:
raise credentials_exception
scope: str = payload.get("scope", "")
token_scopes = scope.split(" ")
token_data = TokenData(scopes=token_scopes, username=username)
except (InvalidTokenError, ValidationError):
raise credentials_exception
user = get_user(fake_users_db, username=token_data.username)
if user is None:
raise credentials_exception
for scope in security_scopes.scopes:
if scope not in token_data.scopes:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Not enough permissions",
headers={"WWW-Authenticate": authenticate_value},
)
return user
async def get_current_active_user(
current_user: User = Security(get_current_user, scopes=["me"]),
):
if current_user.disabled:
raise HTTPException(status_code=400, detail="Inactive user")
return current_user
@app.post("/token")
async def login_for_access_token(
form_data: OAuth2PasswordRequestForm = Depends(),
) -> Token:
user = authenticate_user(fake_users_db, form_data.username, form_data.password)
if not user:
raise HTTPException(status_code=400, detail="Incorrect username or password")
access_token_expires = timedelta(minutes=ACCESS_TOKEN_EXPIRE_MINUTES)
access_token = create_access_token(
data={"sub": user.username, "scope": " ".join(form_data.scopes)},
expires_delta=access_token_expires,
)
return Token(access_token=access_token, token_type="bearer")
@app.get("/users/me/", response_model=User)
async def read_users_me(current_user: User = Depends(get_current_active_user)):
return current_user
@app.get("/users/me/items/")
async def read_own_items(
current_user: User = Security(get_current_active_user, scopes=["items"]),
):
return [{"item_id": "Foo", "owner": current_user.username}]
@app.get("/status/")
async def read_system_status(current_user: User = Depends(get_current_user)):
return {"status": "ok"}
| UserInDB |
python | huggingface__transformers | src/transformers/generation/continuous_batching/continuous_api.py | {
"start": 33840,
"end": 49153
} | class ____:
"""Manager for handling continuous batching of generation requests.
This class provides the user interface for submitting generation requests,
retrieving results, and managing the background generation thread.
"""
def __init__(
self,
model: nn.Module,
generation_config: GenerationConfig,
manual_eviction: bool = False,
max_queue_size: int = 0,
num_q_cuda_graphs: int = 0,
num_kv_cuda_graphs: int = 0,
allow_prefix_sharing: bool = True,
) -> None:
"""Initialize the continuous batching manager.
Args:
model: The language model for generation
generation_config: Configuration for generation parameters
max_queue_size: Maximum size of the request queue (0 = unlimited)
num_q_cuda_graphs: (optional) Number of CUDA graphs to use for the query dimension
num_kv_cuda_graphs: (optional) Number of CUDA graphs to use for the keys/values dimension
allow_prefix_sharing: (optional) Whether to allow prefix sharing if the model has only full attention layers
"""
if "paged|" not in model.config._attn_implementation:
attn_implementation = f"paged|{model.config._attn_implementation}"
model.config._attn_implementation = attn_implementation
# lazy loading flash attention including kernel variations
if "flash" in attn_implementation:
from ...modeling_flash_attention_utils import lazy_import_paged_flash_attention
lazy_import_paged_flash_attention(attn_implementation)
self.model = model.eval()
generation_config = model.generation_config if generation_config is None else generation_config
self.generation_config = generation_config
self.input_queue = queue.Queue(maxsize=max_queue_size)
self.output_queue = queue.Queue()
self.stop_event = threading.Event()
self.log_prob_generation = getattr(generation_config, "log_prob_generation", False)
self._generation_thread = None
self._request_counter = 0
self._request_lock = threading.Lock()
self.model.generation_config.top_p = None
self.do_sample = getattr(generation_config, "do_sample", True)
self.logit_processor = self.model._get_logits_processor(generation_config)
use_cuda_graph: bool | None = getattr(generation_config, "use_cuda_graph", None)
self.profile = getattr(generation_config, "profile", False) # TODO: not supported yet
self.manual_eviction = manual_eviction
self.batch_processor: ContinuousBatchProcessor | None = None
self._allow_prefix_sharing = allow_prefix_sharing
# If a number of cuda graphs was specified for either Q or KV, we activate cuda graphs
if num_q_cuda_graphs > 0 or num_kv_cuda_graphs > 0:
self.use_cuda_graph = True
# If use_cuda_graph is specified, we follow the user's choice
elif use_cuda_graph is not None:
self.use_cuda_graph = use_cuda_graph
# If the use of cuda graphs is not specified, we follow the user's choice, otherwise we have a default heuristic
else:
# Attention implementations where an attention mask is needed suffer a lot more from the padding associated
# with cuda graphs, so default is to turn cuda graphs off for those implementations
self.use_cuda_graph = not attn_mask_is_needed(self.model.config)
logger.warning(
f"No behavior specified for use_cuda_graph, defaulting to {self.use_cuda_graph = } because "
f"{self.model.config._attn_implementation = }. If you want to save memory, turn off cuda graphs, but "
"they can improve performances."
)
# If cuda graphs are activated, we set the number of cuda graphs for Q and KV if not specified
if self.use_cuda_graph:
self.num_q_cuda_graphs = num_q_cuda_graphs if num_q_cuda_graphs > 0 else NUM_Q_CUDA_GRAPHS
self.num_kv_cuda_graphs = num_kv_cuda_graphs if num_kv_cuda_graphs > 0 else NUM_KV_CUDA_GRAPHS
if self.log_prob_generation:
raise NotImplementedError("log_prob_generation is not supported yet")
@traced
def start(self) -> None:
"""Start the background generation thread."""
if self._generation_thread is not None and self._generation_thread.is_alive():
logger.warning("Manager thread is already running.")
return
self._generation_thread = threading.Thread(target=self._run_generation_loop)
self._generation_thread.start()
def is_running(self) -> bool:
"""Check if the background generation thread is running."""
return self._generation_thread is not None and self._generation_thread.is_alive()
def stop(self, block: bool = True, timeout: float | None = None) -> None:
"""Signal the background thread to stop.
Args:
block: Whether to wait for the thread to stop
timeout: Maximum time to wait for the thread to stop
"""
if self.batch_processor is None:
logger.warning("\nBatch processor was not initialized.")
else:
if self.batch_processor.cache.use_prefix_sharing:
logger.warning(
f"\nPrefix sharing was on. Total prefix length: {self.batch_processor.cache._total_prefix_length}"
)
if self._generation_thread is None:
logger.warning("Manager not started.")
return
stop_trigger_time = perf_counter()
if not self.stop_event.is_set():
self.stop_event.set()
logger.info("Stopping continuous batching manager...")
if block:
self.join(stop_trigger_time, timeout)
self.batch_processor = None
def join(self, stop_trigger_time: float, timeout: float | None = None) -> None:
"""Wait for the background thread to finish.
Args:
timeout: Maximum time to wait for the thread to stop
"""
if self._generation_thread is not None:
self._generation_thread.join(timeout=timeout)
if self._generation_thread.is_alive():
logger.warning(f"Generation thread did not exit after join timeout ({timeout}).")
else:
end = perf_counter()
logger.info(f"Continuous Batching Manager stopped after {end - stop_trigger_time:.2f}s.")
self._generation_thread = None
def add_request(
self,
input_ids: list[int],
request_id: str | None = None,
max_new_tokens: int | None = None,
streaming: bool = False,
record_timestamps: bool = False,
) -> str:
"""Add a new generation request to the queue.
Args:
input_ids: Input token IDs to use as prompt
request_id: Optional custom request ID (auto-generated if None)
**kwargs: Additional generation parameters
Returns:
str: The request ID
"""
if request_id is None:
with self._request_lock:
request_id = f"req_{self._request_counter}"
self._request_counter += 1
max_new_tokens = self.generation_config.max_new_tokens if max_new_tokens is None else max_new_tokens
# NOTE: do we want to handle a case when the user wants token ids returned instead of decoded text?
state = RequestState(
request_id=request_id,
initial_tokens=list(input_ids),
record_timestamps=record_timestamps,
tokens_to_process=list(input_ids),
max_new_tokens=max_new_tokens,
eos_token_id=self.generation_config.eos_token_id,
streaming=streaming,
)
# Use block=True with timeout to handle backpressure if queue is full
self.input_queue.put(state, block=True, timeout=10) # XXX: pass timeout as fn arg?
return request_id
def add_requests(
self,
inputs: list[list[int]],
max_new_tokens: int | None = None,
streaming: bool = False,
record_timestamps: bool = False,
) -> None:
for input_ids in inputs:
self.add_request(
input_ids, max_new_tokens=max_new_tokens, streaming=streaming, record_timestamps=record_timestamps
)
def cancel_request(self, request_id: str) -> None:
"""Cancel a request by its ID.
Args:
request_id: The ID of the request to cancel
"""
if self.batch_processor is not None:
self.batch_processor.scheduler.set_request_cancellation(request_id)
# TODO:handle benchmarking properly when updating / fixing the requeue logic
def get_result(self, request_id: str | None = None, timeout: float | None = None) -> GenerationOutput | None:
"""Retrieve one result from the output queue.
Args:
timeout: Maximum time to wait for a result
Returns:
Optional[GenerationOutput]: The result data or None if timeout
"""
if self._generation_thread is None and self.output_queue.empty():
return None
try:
result = self.output_queue.get(block=True, timeout=timeout)
# NOTE: requeue logic here
if request_id is not None and result.request_id != request_id:
self.output_queue.put(result)
return None
return result
except queue.Empty:
return None
def __iter__(self):
"""Iterate over results as they become available."""
while self._generation_thread is not None and self._generation_thread.is_alive():
result = self.get_result(timeout=0.1)
if result is not None:
yield result
# FIXME: stop iteration when request status is finished?
def request_id_iter(self, request_id: str) -> Generator[GenerationOutput]:
"""Iterate over results matching a specific request id as they become available."""
request_cancelled = False
while self._generation_thread is not None and self._generation_thread.is_alive() and not request_cancelled:
result = self.get_result(request_id=request_id, timeout=0.1)
if result is not None:
yield result
if self.batch_processor is not None:
request_cancelled = self.batch_processor.scheduler.request_is_cancelled(request_id)
@traced
def _generation_step(self) -> None:
"""Perform a single generation step. This is cuda graphed"""
self.batch_processor._generation_step(self.model, self.logit_processor, self.do_sample)
def _run_generation_loop(self) -> None:
"""Main processing loop running in the background thread."""
batch_processor: ContinuousBatchProcessor | None = None
try:
t0 = perf_counter()
paged_attention_cache = PagedAttentionCache(
self.model.config,
self.generation_config,
self.model.device,
self.model.dtype,
tp_size=getattr(self.model, "_tp_size", None), # Use model's actual TP setting
allow_prefix_sharing=self._allow_prefix_sharing,
)
logger.debug(f"PagedAttentionCache created in {perf_counter() - t0} seconds")
scheduler = None
if hasattr(self.generation_config, "scheduler"):
scheduler = SCHEDULER_MAPPING.get(self.generation_config.scheduler, None)
if scheduler is None:
logger.warning(f"Scheduler '{scheduler}' not found. Defaulting to FIFO.")
scheduler = FIFOScheduler
else:
# Default to fifo
scheduler = FIFOScheduler
t1 = perf_counter()
batch_processor = ContinuousBatchProcessor(
cache=paged_attention_cache,
config=self.model.config,
generation_config=self.generation_config,
input_queue=self.input_queue,
output_queue=self.output_queue,
stop_event=self.stop_event,
model_device=self.model.device,
model_dtype=self.model.dtype,
scheduler=scheduler(paged_attention_cache, self.manual_eviction),
manual_eviction=self.manual_eviction,
use_cuda_graph=self.use_cuda_graph,
)
self.batch_processor = batch_processor
self.current_batch = 0
logger.debug(f"batch_processor created in {perf_counter() - t1} seconds")
while (not self.stop_event.is_set()) or batch_processor.has_pending_requests():
self._inner_generation_loop(batch_processor)
self.current_batch += 1
except Exception as e:
logger.error(f"Error in generation loop: {e}", exc_info=True)
self._handle_critical_error(e, batch_processor)
finally:
logger.info("Generation loop finished.")
@traced(span_name="generation_loop")
def _inner_generation_loop(self, batch_processor: ContinuousBatchProcessor) -> None:
# Pre-loop synchronization
if torch.cuda.is_available():
torch.cuda.synchronize()
# Loop body ends if there is no requests in the batch
if not batch_processor.prepare_next_batch():
return
# Debug logging of the current memory usage
if logger.level <= logging.DEBUG:
device, total, reserved, allocated = get_device_and_memory_breakdown()
logger.debug(f"[Memory] Device: {device}, Total: {total}, Reserved: {reserved}, Allocated: {allocated}")
self._generation_step()
if torch.cuda.is_available():
torch.cuda.synchronize()
# Processor updates the batch after generation step is truly over
batch_processor.update_batch()
@traced
def _handle_critical_error(self, error: Exception, batch_processor: ContinuousBatchProcessor | None) -> None:
"""Handle critical errors that terminate the generation loop."""
# Signal stop
self.stop_event.set()
# Fail pending requests in input queue
try:
while True:
req_data = self.input_queue.get_nowait()
if batch_processor is not None:
batch_processor._handle_request_error(error, req_data)
except queue.Empty:
pass
# Fail active requests
if batch_processor is not None:
batch_processor.fail_all_requests(error)
@traced
def evict_request_from_cache(self, request_id: str) -> None:
"""Evict a request from the cache. It is assumed that the request is already finished."""
if not self.manual_eviction:
raise RuntimeError("Manual eviction is not enabled for this manager.")
if self.batch_processor is not None:
self.batch_processor.scheduler.finish_request(request_id)
| ContinuousBatchingManager |
python | getsentry__sentry | tests/sentry/tasks/test_statistical_detectors.py | {
"start": 54559,
"end": 57418
} | class ____(MetricsAPIBaseTestCase):
def setUp(self) -> None:
super().setUp()
self.num_projects = 2
self.num_transactions = 4
self.hour_ago = (self.now - timedelta(hours=1)).replace(minute=0, second=0, microsecond=0)
self.hour_ago_seconds = int(self.hour_ago.timestamp())
self.projects = [
self.create_project(organization=self.organization) for _ in range(self.num_projects)
]
for project in self.projects:
for i in range(self.num_transactions):
# Store metrics for a backend transaction
self.store_metric(
self.organization.id,
project.id,
TransactionMRI.DURATION.value,
{"transaction": f"transaction_{i}", "transaction.op": "http.server"},
self.hour_ago_seconds,
1.0,
)
self.store_metric(
self.organization.id,
project.id,
TransactionMRI.DURATION.value,
{"transaction": f"transaction_{i}", "transaction.op": "http.server"},
self.hour_ago_seconds,
9.5,
)
# Store metrics for a frontend transaction, which should be
# ignored by the query
self.store_metric(
self.organization.id,
project.id,
TransactionMRI.DURATION.value,
{"transaction": f"fe_transaction_{i}", "transaction.op": "navigation"},
self.hour_ago_seconds,
1.0,
)
self.store_metric(
self.organization.id,
project.id,
TransactionMRI.DURATION.value,
{"transaction": f"fe_transaction_{i}", "transaction.op": "navigation"},
self.hour_ago_seconds,
9.5,
)
@property
def now(self):
return MetricsAPIBaseTestCase.MOCK_DATETIME
def test_transactions_query(self) -> None:
res = query_transactions(
self.projects,
self.now,
self.num_transactions + 1, # detect if any extra transactions are returned
)
assert len(res) == len(self.projects) * self.num_transactions
for trend_payload in res:
assert trend_payload.count == 2
# p95 is calculated by a probabilistic data structure, as such the value won't actually be 9.5 since we only have
# one sample at 9.5, but it should be close
assert trend_payload.value > 9
assert trend_payload.timestamp == self.hour_ago
@pytest.mark.sentry_metrics
| TestTransactionsQuery |
python | huggingface__transformers | src/transformers/models/ernie/modular_ernie.py | {
"start": 5076,
"end": 5136
} | class ____(BertEncoder):
pass
@auto_docstring
| ErnieEncoder |
python | getsentry__sentry | src/sentry/notifications/platform/templates/sample.py | {
"start": 4679,
"end": 7165
} | class ____(NotificationTemplate[DeploymentData]):
category = NotificationCategory.DEBUG
example_data = DeploymentData(
project_name="my-app",
version="v2.1.3",
environment="production",
deployer="john.doe@acme.com",
commit_sha="a1b2c3d4e5f6g7h8i9j0k1l2m3n4o5p6q7r8s9t0",
commit_message="Fix user authentication bug",
deployment_url="https://example.com/deployment",
rollback_url="https://example.com/rollback",
)
def render(self, data: DeploymentData) -> NotificationRenderedTemplate:
return NotificationRenderedTemplate(
subject=f"Deployment to {data.environment}: {data.version}",
body=[
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"Version {data.version} has been successfully deployed to {data.environment} for project {data.project_name}. ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text=f"The deployment was initiated by {data.deployer} with commit {data.commit_sha[:8]}: {data.commit_message}. ",
)
],
),
ParagraphBlock(
type=NotificationBodyFormattingBlockType.PARAGRAPH,
blocks=[
PlainTextBlock(
type=NotificationBodyTextBlockType.PLAIN_TEXT,
text="Monitor the deployment status and be ready to rollback if any issues are detected.",
)
],
),
],
actions=[
NotificationRenderedAction(
label="View Deployment", link="https://example.com/deployment"
),
NotificationRenderedAction(label="Rollback", link="https://example.com/rollback"),
],
footer="Deployment completed at deployment-service",
)
@dataclass(frozen=True)
| DeploymentNotificationTemplate |
python | PyCQA__pylint | tests/functional/s/super/super_init_not_called.py | {
"start": 1122,
"end": 1224
} | class ____(ParentWithoutInit):
def __init__(self):
ParentWithoutInit.__init__(self)
| ChildTwo |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 936951,
"end": 937627
} | class ____(sgqlc.types.Type):
"""Repository interaction limit that applies to this object."""
__schema__ = github_schema
__field_names__ = ("expires_at", "limit", "origin")
expires_at = sgqlc.types.Field(DateTime, graphql_name="expiresAt")
"""The time the currently active limit expires."""
limit = sgqlc.types.Field(sgqlc.types.non_null(RepositoryInteractionLimit), graphql_name="limit")
"""The current limit that is enabled on this object."""
origin = sgqlc.types.Field(sgqlc.types.non_null(RepositoryInteractionLimitOrigin), graphql_name="origin")
"""The origin of the currently active interaction limit."""
| RepositoryInteractionAbility |
python | apache__airflow | providers/google/tests/unit/google/cloud/transfers/test_mssql_to_gcs.py | {
"start": 2707,
"end": 12890
} | class ____:
@pytest.mark.parametrize(
("value", "expected"),
[
("string", "string"),
(32.9, 32.9),
(-2, -2),
(datetime.date(1970, 1, 2), "1970-01-02"),
(datetime.date(1000, 1, 2), "1000-01-02"),
(datetime.datetime(1970, 1, 1, 1, 0), "1970-01-01T01:00:00"),
(datetime.time(hour=0, minute=0, second=0), "00:00:00"),
(datetime.time(hour=23, minute=59, second=59), "23:59:59"),
],
)
def test_convert_type(self, value, expected):
op = MSSQLToGCSOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
)
assert op.convert_type(value, None) == expected
def test_init(self):
"""Test MySqlToGoogleCloudStorageOperator instance is properly initialized."""
op = MSSQLToGCSOperator(task_id=TASK_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME)
assert op.task_id == TASK_ID
assert op.sql == SQL
assert op.bucket == BUCKET
assert op.filename == JSON_FILENAME
@mock.patch("airflow.providers.google.cloud.transfers.mssql_to_gcs.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_exec_success_json(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test successful run of execute function for JSON"""
op = MSSQLToGCSOperator(
task_id=TASK_ID, mssql_conn_id=MSSQL_CONN_ID, sql=SQL, bucket=BUCKET, filename=JSON_FILENAME
)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False, metadata=None):
assert bucket == BUCKET
assert JSON_FILENAME.format(0) == obj
assert mime_type == "application/json"
assert gzip == GZIP
with open(tmp_filename, "rb") as file:
assert b"".join(NDJSON_LINES) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
@mock.patch("airflow.providers.google.cloud.transfers.mssql_to_gcs.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_file_splitting(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test that ndjson is split by approx_max_file_size_bytes param."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
expected_upload = {
JSON_FILENAME.format(0): b"".join(NDJSON_LINES[:2]),
JSON_FILENAME.format(1): NDJSON_LINES[2],
}
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False, metadata=None):
assert bucket == BUCKET
assert mime_type == "application/json"
assert gzip == GZIP
with open(tmp_filename, "rb") as file:
assert expected_upload[obj] == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = MSSQLToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
approx_max_file_size_bytes=len(expected_upload[JSON_FILENAME.format(0)]),
)
op.execute(None)
@mock.patch("airflow.providers.google.cloud.transfers.mssql_to_gcs.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
@pytest.mark.parametrize(
("bit_fields", "schema_json"),
[(None, SCHEMA_JSON), (["bit_fields", SCHEMA_JSON_BIT_FIELDS])],
)
def test_schema_file(self, gcs_hook_mock_class, mssql_hook_mock_class, bit_fields, schema_json):
"""Test writing schema files."""
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
def _assert_upload(bucket, obj, tmp_filename, mime_type, gzip, metadata=None):
if obj == SCHEMA_FILENAME:
with open(tmp_filename, "rb") as file:
assert b"".join(SCHEMA_JSON) == file.read()
gcs_hook_mock.upload.side_effect = _assert_upload
op = MSSQLToGCSOperator(
task_id=TASK_ID,
sql=SQL,
bucket=BUCKET,
filename=JSON_FILENAME,
schema_filename=SCHEMA_FILENAME,
bit_fields=["some_bit"],
)
op.execute(None)
# once for the file and once for the schema
assert gcs_hook_mock.upload.call_count == 2
@pytest.mark.parametrize(
("connection_port", "default_port", "expected_port"),
[(None, 4321, 4321), (1234, None, 1234), (1234, 4321, 1234)],
)
def test_execute_openlineage_events(self, connection_port, default_port, expected_port):
class DBApiHookForTests(DbApiHook):
conn_name_attr = "sql_default"
get_conn = mock.MagicMock(name="conn")
get_connection = mock.MagicMock()
def get_openlineage_database_info(self, connection):
from airflow.providers.openlineage.sqlparser import DatabaseInfo
return DatabaseInfo(
scheme="sqlscheme",
authority=DbApiHook.get_openlineage_authority_part(connection, default_port=default_port),
)
dbapi_hook = DBApiHookForTests()
class MSSQLToGCSOperatorForTest(MSSQLToGCSOperator):
@property
def db_hook(self):
return dbapi_hook
sql = """SELECT a,b,c from my_db.my_table"""
op = MSSQLToGCSOperatorForTest(task_id=TASK_ID, sql=sql, bucket="bucket", filename="dir/file{}.csv")
DB_SCHEMA_NAME = "PUBLIC"
rows = [
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "order_day_of_week", 1, "varchar"),
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "order_placed_on", 2, "timestamp"),
(DB_SCHEMA_NAME, "popular_orders_day_of_week", "orders_placed", 3, "int4"),
]
dbapi_hook.get_connection.return_value = Connection(
conn_id="sql_default", conn_type="mssql", host="host", port=connection_port
)
dbapi_hook.get_conn.return_value.cursor.return_value.fetchall.side_effect = [rows, []]
lineage = op.get_openlineage_facets_on_start()
assert len(lineage.inputs) == 1
assert lineage.inputs[0].namespace == f"sqlscheme://host:{expected_port}"
assert lineage.inputs[0].name == "PUBLIC.popular_orders_day_of_week"
assert len(lineage.inputs[0].facets) == 1
assert lineage.inputs[0].facets["schema"].fields == [
SchemaDatasetFacetFields(name="order_day_of_week", type="varchar"),
SchemaDatasetFacetFields(name="order_placed_on", type="timestamp"),
SchemaDatasetFacetFields(name="orders_placed", type="int4"),
]
assert lineage.outputs == [
OutputDataset(
namespace="gs://bucket",
name="dir",
)
]
assert len(lineage.job_facets) == 1
assert lineage.job_facets["sql"].query == sql
assert lineage.run_facets == {}
@mock.patch("airflow.providers.google.cloud.transfers.mssql_to_gcs.MsSqlHook")
@mock.patch("airflow.providers.google.cloud.transfers.sql_to_gcs.GCSHook")
def test_bit_to_boolean_field_conversion(self, gcs_hook_mock_class, mssql_hook_mock_class):
"""Test successful run of execute function for Parquet format with boolean fields.
This test verifies that MSSQL tables with columns of type "BIT" can exported
using the bit_fields parameter, resulting in boolean fields in the Parquet file.
"""
import pyarrow
op = MSSQLToGCSOperator(
task_id=TASK_ID,
mssql_conn_id=MSSQL_CONN_ID,
sql=SQL,
bucket=BUCKET,
filename=PARQUET_FILENAME,
export_format="parquet",
bit_fields=["some_binary", "some_bit"],
)
mssql_hook_mock = mssql_hook_mock_class.return_value
mssql_hook_mock.get_conn().cursor().__iter__.return_value = iter(ROWS)
mssql_hook_mock.get_conn().cursor().description = CURSOR_DESCRIPTION
gcs_hook_mock = gcs_hook_mock_class.return_value
upload_called = False
def _assert_upload(bucket, obj, tmp_filename, mime_type=None, gzip=False, metadata=None):
nonlocal upload_called
upload_called = True
assert bucket == BUCKET
assert obj == PARQUET_FILENAME.format(0)
assert mime_type == "application/octet-stream"
assert gzip == GZIP
parquet_file = pyarrow.parquet.ParquetFile(tmp_filename)
schema = parquet_file.schema_arrow
# Verify that bit fields are mapped to boolean type in parquet schema
assert schema.field("some_binary").type.equals(pyarrow.bool_())
assert schema.field("some_bit").type.equals(pyarrow.bool_())
gcs_hook_mock.upload.side_effect = _assert_upload
op.execute(None)
assert upload_called, "Expected upload to be called"
mssql_hook_mock_class.assert_called_once_with(mssql_conn_id=MSSQL_CONN_ID)
mssql_hook_mock.get_conn().cursor().execute.assert_called_once_with(SQL)
| TestMsSqlToGoogleCloudStorageOperator |
python | pennersr__django-allauth | allauth/account/forms.py | {
"start": 25417,
"end": 26382
} | class ____(forms.Form):
uidb36 = forms.CharField()
key = forms.CharField()
reset_user = None
token_generator = default_token_generator
def _get_user(self, uidb36):
User = get_user_model()
try:
pk = url_str_to_user_pk(uidb36)
return User.objects.get(pk=pk)
except (ValueError, User.DoesNotExist):
return None
def clean(self):
cleaned_data = super(UserTokenForm, self).clean()
uidb36 = cleaned_data.get("uidb36", None)
key = cleaned_data.get("key", None)
adapter = get_adapter()
if not key:
raise adapter.validation_error("invalid_password_reset")
self.reset_user = self._get_user(uidb36)
if self.reset_user is None or not self.token_generator.check_token(
self.reset_user, key
):
raise adapter.validation_error("invalid_password_reset")
return cleaned_data
| UserTokenForm |
python | xlwings__xlwings | xlwings/constants.py | {
"start": 74280,
"end": 75079
} | class ____:
xlListDataTypeCheckbox = 9 # from enum XlListDataType
xlListDataTypeChoice = 6 # from enum XlListDataType
xlListDataTypeChoiceMulti = 7 # from enum XlListDataType
xlListDataTypeCounter = 11 # from enum XlListDataType
xlListDataTypeCurrency = 4 # from enum XlListDataType
xlListDataTypeDateTime = 5 # from enum XlListDataType
xlListDataTypeHyperLink = 10 # from enum XlListDataType
xlListDataTypeListLookup = 8 # from enum XlListDataType
xlListDataTypeMultiLineRichText = 12 # from enum XlListDataType
xlListDataTypeMultiLineText = 2 # from enum XlListDataType
xlListDataTypeNone = 0 # from enum XlListDataType
xlListDataTypeNumber = 3 # from enum XlListDataType
xlListDataTypeText = 1 # from enum XlListDataType
| ListDataType |
python | coleifer__peewee | playhouse/sqlite_ext.py | {
"start": 4841,
"end": 7853
} | class ____(TextField):
field_type = 'JSON'
unpack = False
Path = JSONPath
def __init__(self, json_dumps=None, json_loads=None, **kwargs):
self._json_dumps = json_dumps or json.dumps
self._json_loads = json_loads or json.loads
super(JSONField, self).__init__(**kwargs)
def python_value(self, value):
if value is not None:
try:
return self._json_loads(value)
except (TypeError, ValueError):
return value
def db_value(self, value):
if value is not None:
if not isinstance(value, Node):
value = fn.json(self._json_dumps(value))
return value
def _e(op):
def inner(self, rhs):
if isinstance(rhs, (list, dict)):
rhs = Value(rhs, converter=self.db_value, unpack=False)
return Expression(self, op, rhs)
return inner
__eq__ = _e(OP.EQ)
__ne__ = _e(OP.NE)
__gt__ = _e(OP.GT)
__ge__ = _e(OP.GTE)
__lt__ = _e(OP.LT)
__le__ = _e(OP.LTE)
__hash__ = Field.__hash__
def __getitem__(self, item):
return self.Path(self)[item]
def extract(self, *paths):
paths = [Value(p, converter=False) for p in paths]
return fn.json_extract(self, *paths)
def extract_json(self, path):
return Expression(self, '->', Value(path, converter=False))
def extract_text(self, path):
return Expression(self, '->>', Value(path, converter=False))
def append(self, value, as_json=None):
return self.Path(self).append(value, as_json)
def insert(self, value, as_json=None):
return self.Path(self).insert(value, as_json)
def set(self, value, as_json=None):
return self.Path(self).set(value, as_json)
def replace(self, value, as_json=None):
return self.Path(self).replace(value, as_json)
def update(self, data):
return self.Path(self).update(data)
def remove(self, *paths):
if not paths:
return self.Path(self).remove()
return fn.json_remove(self, *paths)
def json_type(self):
return fn.json_type(self)
def length(self, path=None):
args = (self, path) if path else (self,)
return fn.json_array_length(*args)
def children(self):
"""
Schema of `json_each` and `json_tree`:
key,
value,
type TEXT (object, array, string, etc),
atom (value for primitive/scalar types, NULL for array and object)
id INTEGER (unique identifier for element)
parent INTEGER (unique identifier of parent element or NULL)
fullkey TEXT (full path describing element)
path TEXT (path to the container of the current element)
json JSON hidden (1st input parameter to function)
root TEXT hidden (2nd input parameter, path at which to start)
"""
return fn.json_each(self)
def tree(self):
return fn.json_tree(self)
| JSONField |
python | PrefectHQ__prefect | tests/runtime/test_task_run.py | {
"start": 4359,
"end": 4778
} | class ____:
async def test_parameters_is_attribute(self):
assert "parameters" in dir(task_run)
async def test_parameters_is_dict_when_not_set(self):
assert task_run.parameters == {}
async def test_parameters_from_context(self):
with TaskRunContext.model_construct(parameters={"x": "foo", "y": "bar"}):
assert task_run.parameters == {"x": "foo", "y": "bar"}
| TestParameters |
python | sympy__sympy | sympy/stats/joint_rv_types.py | {
"start": 28364,
"end": 30575
} | class ____(JointDistribution):
_argnames = ('k0', 'p')
is_Continuous=False
is_Discrete = True
@staticmethod
def check(k0, p):
_value_check(k0 > 0,
"number of failures must be a positive integer")
for p_k in p:
_value_check((p_k >= 0, p_k <= 1),
"probability must be in range [0, 1].")
_value_check(sum(p) <= 1,
"success probabilities must not be greater than 1.")
@property
def set(self):
return Range(0, S.Infinity)**len(self.p)
def pdf(self, *k):
k0, p = self.k0, self.p
term_1 = (gamma(k0 + sum(k))*(1 - sum(p))**k0)/gamma(k0)
term_2 = Mul.fromiter(pi**ki/factorial(ki) for pi, ki in zip(p, k))
return term_1 * term_2
def NegativeMultinomial(syms, k0, *p):
"""
Creates a discrete random variable with Negative Multinomial Distribution.
The density of the said distribution can be found at [1].
Parameters
==========
k0 : positive integer
Represents number of failures before the experiment is stopped
p : List of event probabilities
Must be in the range of $[0, 1]$
Returns
=======
RandomSymbol
Examples
========
>>> from sympy.stats import density, NegativeMultinomial, marginal_distribution
>>> from sympy import symbols
>>> x1, x2, x3 = symbols('x1, x2, x3', nonnegative=True, integer=True)
>>> p1, p2, p3 = symbols('p1, p2, p3', positive=True)
>>> N = NegativeMultinomial('M', 3, p1, p2, p3)
>>> N_c = NegativeMultinomial('M', 3, 0.1, 0.1, 0.1)
>>> density(N)(x1, x2, x3)
p1**x1*p2**x2*p3**x3*(-p1 - p2 - p3 + 1)**3*gamma(x1 + x2 +
x3 + 3)/(2*factorial(x1)*factorial(x2)*factorial(x3))
>>> marginal_distribution(N_c, N_c[0])(1).evalf().round(2)
0.25
References
==========
.. [1] https://en.wikipedia.org/wiki/Negative_multinomial_distribution
.. [2] https://mathworld.wolfram.com/NegativeBinomialDistribution.html
"""
if not isinstance(p[0], list):
p = (list(p), )
return multivariate_rv(NegativeMultinomialDistribution, syms, k0, p[0])
| NegativeMultinomialDistribution |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte_tests/beta/test_asset_specs.py | {
"start": 7006,
"end": 8508
} | class ____(DagsterAirbyteTranslator):
def get_asset_spec(self, data: AirbyteConnectionTableProps) -> AssetSpec: # pyright: ignore[reportIncompatibleMethodOverride]
default_spec = super().get_asset_spec(data)
return default_spec.replace_attributes(group_name="my_group_name")
def test_translator_custom_group_name_with_asset_factory(
fetch_workspace_data_api_mocks: responses.RequestsMock,
resource: Union[AirbyteCloudWorkspace, AirbyteWorkspace],
) -> None:
my_airbyte_assets = build_airbyte_assets_definitions(
workspace=resource, dagster_airbyte_translator=MyCustomTranslatorWithGroupName()
)
first_assets_def = next(assets_def for assets_def in my_airbyte_assets)
first_asset_spec = next(asset_spec for asset_spec in first_assets_def.specs)
assert first_asset_spec.group_name == "my_group_name"
def test_translator_invariant_group_name_with_asset_decorator(
fetch_workspace_data_api_mocks: responses.RequestsMock,
resource: Union[AirbyteCloudWorkspace, AirbyteWorkspace],
) -> None:
with pytest.raises(
DagsterInvariantViolationError,
match="Cannot set group_name parameter on airbyte_assets",
):
@airbyte_assets(
connection_id=TEST_CONNECTION_ID,
workspace=resource,
group_name="my_asset_decorator_group_name",
dagster_airbyte_translator=MyCustomTranslatorWithGroupName(),
)
def my_airbyte_assets(): ...
| MyCustomTranslatorWithGroupName |
python | run-llama__llama_index | llama-index-integrations/llms/llama-index-llms-oci-data-science/llama_index/llms/oci_data_science/client.py | {
"start": 741,
"end": 2138
} | class ____(httpx.Auth):
"""
Custom HTTPX authentication class that uses the OCI Signer for request signing.
Attributes:
signer (oci.signer.Signer): The OCI signer used to sign requests.
"""
def __init__(self, signer: oci.signer.Signer):
"""
Initialize the OCIAuth instance.
Args:
signer (oci.signer.Signer): The OCI signer to use for signing requests.
"""
self.signer = signer
def auth_flow(self, request: httpx.Request) -> Iterator[httpx.Request]:
"""
The authentication flow that signs the HTTPX request using the OCI signer.
Args:
request (httpx.Request): The outgoing HTTPX request to be signed.
Yields:
httpx.Request: The signed HTTPX request.
"""
# Create a requests.Request object from the HTTPX request
req = requests.Request(
method=request.method,
url=str(request.url),
headers=dict(request.headers),
data=request.content,
)
prepared_request = req.prepare()
# Sign the request using the OCI Signer
self.signer.do_request_sign(prepared_request)
# Update the original HTTPX request with the signed headers
request.headers.update(prepared_request.headers)
# Proceed with the request
yield request
| OCIAuth |
python | huggingface__transformers | src/transformers/models/blip_2/modeling_blip_2.py | {
"start": 14584,
"end": 15656
} | class ____(GradientCheckpointingLayer):
def __init__(self, config: Blip2Config):
super().__init__()
self.embed_dim = config.hidden_size
self.self_attn = Blip2Attention(config)
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
self.mlp = Blip2MLP(config)
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
@auto_docstring
def forward(
self,
hidden_states: torch.Tensor,
**kwargs: Unpack[TransformersKwargs],
) -> torch.FloatTensor:
residual = hidden_states
hidden_states = self.layer_norm1(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
**kwargs,
)
hidden_states = hidden_states + residual
residual = hidden_states
hidden_states = self.layer_norm2(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = hidden_states + residual
return hidden_states
@auto_docstring
| Blip2EncoderLayer |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/eventbridge.py | {
"start": 8567,
"end": 10421
} | class ____(AwsBaseOperator[EventBridgeHook]):
"""
Disable an EventBridge Rule.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:EventBridgeDisableRuleOperator`
:param name: the name of the rule to disable
:param event_bus_name: the name or ARN of the event bus associated with the rule (default if omitted)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.htmlt
"""
aws_hook_class = EventBridgeHook
template_fields: Sequence[str] = aws_template_fields("name", "event_bus_name")
def __init__(self, *, name: str, event_bus_name: str | None = None, **kwargs):
super().__init__(**kwargs)
self.name = name
self.event_bus_name = event_bus_name
def execute(self, context: Context):
self.hook.conn.disable_rule(
**prune_dict(
{
"Name": self.name,
"EventBusName": self.event_bus_name,
}
)
)
self.log.info('Disabled rule "%s"', self.name)
| EventBridgeDisableRuleOperator |
python | dagster-io__dagster | python_modules/libraries/dagstermill/dagstermill/io_managers.py | {
"start": 3100,
"end": 4440
} | class ____(ConfigurableIOManagerFactory):
"""Built-in IO Manager for handling output notebook."""
base_dir: Optional[str] = Field(
default=None,
description=(
"Base directory to use for output notebooks. Defaults to the Dagster instance storage"
" directory if not provided."
),
)
asset_key_prefix: list[str] = Field(
default=[],
description=(
"Asset key prefix to apply to assets materialized for output notebooks. Defaults to no"
" prefix."
),
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def create_io_manager(self, context: InitResourceContext) -> "LocalOutputNotebookIOManager":
return LocalOutputNotebookIOManager(
base_dir=self.base_dir or check.not_none(context.instance).storage_directory(),
asset_key_prefix=self.asset_key_prefix,
)
@beta
@dagster_maintained_io_manager
@io_manager(config_schema=ConfigurableLocalOutputNotebookIOManager.to_config_schema())
def local_output_notebook_io_manager(init_context) -> LocalOutputNotebookIOManager:
"""Built-in IO Manager that handles output notebooks."""
return ConfigurableLocalOutputNotebookIOManager.from_resource_context(init_context)
| ConfigurableLocalOutputNotebookIOManager |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-marketo/source_marketo/source.py | {
"start": 905,
"end": 2616
} | class ____(HttpStream, ABC):
primary_key = "id"
data_field = "result"
page_size = 300
def __init__(self, config: Mapping[str, Any], stream_name: str = None, param: Mapping[str, Any] = None, export_id: int = None):
super().__init__(authenticator=config["authenticator"])
self.config = config
self.start_date = config["start_date"]
# this is done for test purposes, the field is not exposed to spec.json!
self.end_date = config.get("end_date")
self.window_in_days = config.get("window_in_days", 30)
self._url_base = config["domain_url"].rstrip("/") + "/"
self.stream_name = stream_name
self.param = param
self.export_id = export_id
@property
def url_base(self) -> str:
return self._url_base
@property
def availability_strategy(self) -> Optional["AvailabilityStrategy"]:
return None
def path(self, **kwargs) -> str:
return f"rest/v1/{self.name}.json"
def next_page_token(self, response: requests.Response) -> Optional[Mapping[str, Any]]:
next_page = response.json().get("nextPageToken")
if next_page:
return {"nextPageToken": next_page}
def request_params(self, next_page_token: Mapping[str, Any] = None, **kwargs) -> MutableMapping[str, Any]:
params = {"batchSize": self.page_size}
if next_page_token:
params.update(**next_page_token)
return params
def parse_response(self, response: requests.Response, stream_state: Mapping[str, Any], **kwargs) -> Iterable[Mapping]:
data = response.json().get(self.data_field, [])
for record in data:
yield record
| MarketoStream |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/sensors.py | {
"start": 9447,
"end": 11082
} | class ____(graphene.Mutation):
"""Disable a sensor from launching runs for a job."""
Output = graphene.NonNull(GrapheneStopSensorMutationResultOrError)
class Arguments:
id = graphene.Argument(graphene.String) # Sensor / InstigationState id
# "job" legacy name for instigators, predates current Job
job_origin_id = graphene.Argument(graphene.String)
job_selector_id = graphene.Argument(graphene.String)
class Meta:
name = "StopSensorMutation"
@capture_error
@require_permission_check(Permissions.EDIT_SENSOR)
def mutate(
self,
graphene_info: ResolveInfo,
id: Optional[str] = None,
job_origin_id: Optional[str] = None,
job_selector_id: Optional[str] = None,
):
if id:
cid = CompoundID.from_string(id)
sensor_origin_id = cid.remote_origin_id
sensor_selector_id = cid.selector_id
elif job_origin_id and CompoundID.is_valid_string(job_origin_id):
# cross-push handle if InstigationState.id being passed through as origin id
cid = CompoundID.from_string(job_origin_id)
sensor_origin_id = cid.remote_origin_id
sensor_selector_id = cid.selector_id
elif job_origin_id is None or job_selector_id is None:
raise DagsterInvariantViolationError("Must specify id or jobOriginId and jobSelectorId")
else:
sensor_origin_id = job_origin_id
sensor_selector_id = job_selector_id
return stop_sensor(graphene_info, sensor_origin_id, sensor_selector_id)
| GrapheneStopSensorMutation |
python | django__django | django/db/models/expressions.py | {
"start": 29895,
"end": 32039
} | class ____(Combinable):
"""An object capable of resolving references to existing query objects."""
allowed_default = False
def __init__(self, name):
"""
Arguments:
* name: the name of the field this expression references
"""
self.name = name
def __repr__(self):
return "{}({})".format(self.__class__.__name__, self.name)
def __getitem__(self, subscript):
return Sliced(self, subscript)
def __contains__(self, other):
# Disable old-style iteration protocol inherited from implementing
# __getitem__() to prevent this method from hanging.
raise TypeError(f"argument of type '{self.__class__.__name__}' is not iterable")
def resolve_expression(
self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False
):
return query.resolve_ref(self.name, allow_joins, reuse, summarize)
def replace_expressions(self, replacements):
if (replacement := replacements.get(self)) is not None:
return replacement
field_name, *transforms = self.name.split(LOOKUP_SEP)
# Avoid unnecessarily looking up replacements with field_name again as
# in the vast majority of cases F instances won't be composed of any
# lookups.
if not transforms:
return self
if (
replacement := replacements.get(F(field_name))
) is None or replacement._output_field_or_none is None:
return self
for transform in transforms:
transform_class = replacement.get_transform(transform)
if transform_class is None:
return self
replacement = transform_class(replacement)
return replacement
def asc(self, **kwargs):
return OrderBy(self, **kwargs)
def desc(self, **kwargs):
return OrderBy(self, descending=True, **kwargs)
def __eq__(self, other):
return self.__class__ == other.__class__ and self.name == other.name
def __hash__(self):
return hash(self.name)
def copy(self):
return copy.copy(self)
| F |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/error.py | {
"start": 3710,
"end": 5036
} | class ____(YAMLError):
def __init__(
self,
context=None,
context_mark=None,
problem=None,
problem_mark=None,
note=None,
warn=None,
):
# type: (Any, Any, Any, Any, Any, Any) -> None
self.context = context
self.context_mark = context_mark
self.problem = problem
self.problem_mark = problem_mark
self.note = note
# warn is ignored
def __str__(self):
# type: () -> Any
lines = [] # type: List[str]
if self.context is not None:
lines.append(self.context)
if self.context_mark is not None and (
self.problem is None
or self.problem_mark is None
or self.context_mark.name != self.problem_mark.name
or self.context_mark.line != self.problem_mark.line
or self.context_mark.column != self.problem_mark.column
):
lines.append(str(self.context_mark))
if self.problem is not None:
lines.append(self.problem)
if self.problem_mark is not None:
lines.append(str(self.problem_mark))
if self.note is not None and self.note:
note = textwrap.dedent(self.note)
lines.append(note)
return '\n'.join(lines)
| MarkedYAMLError |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 31404,
"end": 31791
} | class ____(SetitemCastingEquivalents):
@pytest.fixture(params=[np.nan, np.float64("NaN"), None, NA])
def val(self, request):
"""
NA values that should generally be valid_na for *all* dtypes.
Include both python float NaN and np.float64; only np.float64 has a
`dtype` attribute.
"""
return request.param
| TestSetitemCastingEquivalents |
python | pytorch__pytorch | torch/ao/pruning/_experimental/data_sparsifier/lightning/callbacks/data_sparsity.py | {
"start": 348,
"end": 2204
} | class ____(pl.callbacks.Callback):
"""Lightning callback that enables post-training sparsity.
This callback aims to sparsify the model inside lightning module after training.
**Note that the model is copied and then sparsified, so the existing model is not modified**
The sparsified model can be used for comparison and can be accessed using
<callback_obj>.sparsified
Args:
data_sparsifier_class (some implemented class of BaseDataSparsifier)
The data sparsifier object of this class is created when the
training starts.
Note: Objects should not be passed in here as they are created
once the training completes.
data_sparsifier_args (Dict)
Dictionary of args to be passed to the data sparsifier.
Note: data_list arg should be ignored
Hooks implemented:
on_fit_end()
1. copies the model and attaches it to the sparsifier
2. sparsier step() is called
3. squashes the mask()
"""
def __init__(self, data_sparsifier_class, data_sparsifier_args):
super().__init__()
self.data_sparsifier_class = data_sparsifier_class
self.data_sparsifier_args = data_sparsifier_args
self.data_sparsifier: Any = None
self.sparsified: torch.nn.Module | None = None
def on_fit_end(self, trainer, pl_module) -> None:
self.sparsified = deepcopy(pl_module.model).eval()
self.data_sparsifier = self.data_sparsifier_class(**self.data_sparsifier_args)
_attach_model_to_data_sparsifier(self.sparsified, self.data_sparsifier)
self.data_sparsifier.step()
self.data_sparsifier.squash_mask() # currently squashes params for all mask
_log_sparsified_level(self.sparsified, self.data_sparsifier)
| PostTrainingDataSparsity |
python | arrow-py__arrow | arrow/locales.py | {
"start": 99040,
"end": 100498
} | class ____(Locale):
names = ["id", "id-id"]
past = "{0} yang lalu"
future = "dalam {0}"
and_word = "dan"
timeframes = {
"now": "baru saja",
"second": "1 sebentar",
"seconds": "{0} detik",
"minute": "1 menit",
"minutes": "{0} menit",
"hour": "1 jam",
"hours": "{0} jam",
"day": "1 hari",
"days": "{0} hari",
"week": "1 minggu",
"weeks": "{0} minggu",
"month": "1 bulan",
"months": "{0} bulan",
"quarter": "1 kuartal",
"quarters": "{0} kuartal",
"year": "1 tahun",
"years": "{0} tahun",
}
meridians = {"am": "", "pm": "", "AM": "", "PM": ""}
month_names = [
"",
"Januari",
"Februari",
"Maret",
"April",
"Mei",
"Juni",
"Juli",
"Agustus",
"September",
"Oktober",
"November",
"Desember",
]
month_abbreviations = [
"",
"Jan",
"Feb",
"Mar",
"Apr",
"Mei",
"Jun",
"Jul",
"Ags",
"Sept",
"Okt",
"Nov",
"Des",
]
day_names = ["", "Senin", "Selasa", "Rabu", "Kamis", "Jumat", "Sabtu", "Minggu"]
day_abbreviations = [
"",
"Senin",
"Selasa",
"Rabu",
"Kamis",
"Jumat",
"Sabtu",
"Minggu",
]
| IndonesianLocale |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/methodOverride1.py | {
"start": 435,
"end": 3490
} | class ____:
def my_method1(self, a: int):
return 1
def my_method2(self, a: int, b: int):
return 1
def my_method3(self, a: int, b: int):
return 1
def my_method4(self, a: int, *b: int):
return 1
def my_method5(self, a: int, _b: int):
return 1
def my_method6(self, a: int, /, b: int):
return 1
def my_method7(self, a: int, /, b: int):
return 1
def my_method8(self, a: int, b: int):
return 1
def my_method9(self, a: int, b: int):
return 1
def my_method10(self, a: int, b: int):
return 1
def my_method11(self, a: int, b: int):
return 1
def my_method12(self, a: int | str) -> int | str:
return 1
def my_method13(self, a: int) -> int:
return 1
def my_method14(self, a: int) -> int:
return 1
def my_method15(self, a: int) -> int:
return 1
def my_method16(self, a: int) -> int:
return 1
def my_method17(self, a: str, b: int, c: float, d: bool) -> None: ...
def my_method18(self, a: str, b: int, c: float, d: bool) -> None: ...
def my_method19(self, a: str, b: int, c: float, d: bool) -> None: ...
@classmethod
def my_method20(cls: type[T_ParentClass], a: str) -> T_ParentClass: ...
def my_method21(self, var: int) -> None: ...
def _protected_method1(self, a: int):
return 1
def __private_method1(self, a: int):
return 1
def my_method22(self, a: str, b: int, c: float, d: bool) -> None: ...
def my_method23(self, a: str = "") -> None: ...
def my_method24(self, a: str) -> None: ...
def my_method25(self, *, a: str = "") -> None: ...
def my_method26(self, *, a: str) -> None: ...
def my_method27(self, a: object, /) -> None: ...
def my_method28(self, __a: object) -> None: ...
@classmethod
def my_method29(cls, /) -> None: ...
@classmethod
def my_method30(cls, /) -> None: ...
@staticmethod
def my_method31(a: "type[ParentClass]", /) -> None: ...
@staticmethod
def my_method32(a: "type[ParentClass]", /) -> None: ...
def my_method33(self, /) -> None: ...
def my_method34(self, /) -> None: ...
def my_method35(self, *, a: int) -> None: ...
def my_method36(self, *, a: int) -> None: ...
def my_method37(self, a: int, /) -> None: ...
def my_method38(self, a: int, /) -> None: ...
def my_method39(self, a: int, /) -> None: ...
def my_method40(self, a: int, /) -> None: ...
def my_method41(self, a: int, b: str, c: str) -> None: ...
def my_method42(self, a: int, b: int, c: str) -> None: ...
my_method43: Callable[..., None]
def my_method44(self, *args: object, **kwargs: object) -> None: ...
def my_method45(self, __i: int) -> None: ...
def __my_method46__(self, x: int) -> None: ...
def __my_method47__(self, x: int) -> None: ...
def my_method48(self, /, **kwargs: object) -> None: ...
T_ChildClass = TypeVar("T_ChildClass", bound="ChildClass")
| ParentClass |
python | agronholm__apscheduler | src/apscheduler/_events.py | {
"start": 500,
"end": 999
} | class ____:
"""
Base class for all events.
:ivar timestamp: the time when the event occurred
"""
timestamp: datetime = attrs.field(
factory=partial(datetime.now, timezone.utc), converter=as_aware_datetime
)
def marshal(self) -> dict[str, Any]:
return attrs.asdict(self)
@classmethod
def unmarshal(cls, marshalled: dict[str, Any]) -> Event:
return cls(**marshalled)
#
# Data store events
#
@attrs.define(kw_only=True, frozen=True)
| Event |
python | facelessuser__pymdown-extensions | pymdownx/blocks/__init__.py | {
"start": 18291,
"end": 18955
} | class ____(Extension):
"""Add generic Blocks extension."""
def extendMarkdown(self, md: Markdown) -> None:
"""Add Blocks to Markdown instance."""
md.registerExtension(self)
util.escape_chars(md, ['/'])
self.extension = BlocksProcessor(md.parser, md)
# We want to be right after list indentations are processed
md.parser.blockprocessors.register(self.extension, "blocks", 89.99)
tree = BlocksTreeprocessor(md, self.extension)
md.treeprocessors.register(tree, 'blocks_on_inline_end', 19.99)
def reset(self) -> None:
"""Reset."""
self.extension._reset()
| BlocksMgrExtension |
python | django__django | tests/model_formsets/tests.py | {
"start": 86029,
"end": 97220
} | class ____(TestCase):
def test_modelformset_factory_widgets(self):
widgets = {"name": forms.TextInput(attrs={"class": "poet"})}
PoetFormSet = modelformset_factory(Poet, fields="__all__", widgets=widgets)
form = PoetFormSet.form()
self.assertHTMLEqual(
str(form["name"]),
'<input id="id_name" maxlength="100" type="text" class="poet" name="name" '
"required>",
)
def test_inlineformset_factory_widgets(self):
widgets = {"title": forms.TextInput(attrs={"class": "book"})}
BookFormSet = inlineformset_factory(
Author, Book, widgets=widgets, fields="__all__"
)
form = BookFormSet.form()
self.assertHTMLEqual(
str(form["title"]),
'<input class="book" id="id_title" maxlength="100" name="title" '
'type="text" required>',
)
def test_modelformset_factory_labels_overrides(self):
BookFormSet = modelformset_factory(
Book, fields="__all__", labels={"title": "Name"}
)
form = BookFormSet.form()
self.assertHTMLEqual(
form["title"].label_tag(), '<label for="id_title">Name:</label>'
)
self.assertHTMLEqual(
form["title"].legend_tag(),
"<legend>Name:</legend>",
)
def test_inlineformset_factory_labels_overrides(self):
BookFormSet = inlineformset_factory(
Author, Book, fields="__all__", labels={"title": "Name"}
)
form = BookFormSet.form()
self.assertHTMLEqual(
form["title"].label_tag(), '<label for="id_title">Name:</label>'
)
self.assertHTMLEqual(
form["title"].legend_tag(),
"<legend>Name:</legend>",
)
def test_modelformset_factory_help_text_overrides(self):
BookFormSet = modelformset_factory(
Book, fields="__all__", help_texts={"title": "Choose carefully."}
)
form = BookFormSet.form()
self.assertEqual(form["title"].help_text, "Choose carefully.")
def test_inlineformset_factory_help_text_overrides(self):
BookFormSet = inlineformset_factory(
Author, Book, fields="__all__", help_texts={"title": "Choose carefully."}
)
form = BookFormSet.form()
self.assertEqual(form["title"].help_text, "Choose carefully.")
def test_modelformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name="Charles Baudelaire")
BookFormSet = modelformset_factory(
Book,
fields="__all__",
error_messages={"title": {"max_length": "Title too long!!"}},
)
form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id})
form.full_clean()
self.assertEqual(form.errors, {"title": ["Title too long!!"]})
def test_inlineformset_factory_error_messages_overrides(self):
author = Author.objects.create(pk=1, name="Charles Baudelaire")
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
error_messages={"title": {"max_length": "Title too long!!"}},
)
form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id})
form.full_clean()
self.assertEqual(form.errors, {"title": ["Title too long!!"]})
def test_modelformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name="Charles Baudelaire")
BookFormSet = modelformset_factory(
Book,
fields="__all__",
field_classes={
"title": forms.SlugField,
},
)
form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id})
self.assertIs(Book._meta.get_field("title").__class__, models.CharField)
self.assertIsInstance(form.fields["title"], forms.SlugField)
def test_inlineformset_factory_field_class_overrides(self):
author = Author.objects.create(pk=1, name="Charles Baudelaire")
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
field_classes={
"title": forms.SlugField,
},
)
form = BookFormSet.form(data={"title": "Foo " * 30, "author": author.id})
self.assertIs(Book._meta.get_field("title").__class__, models.CharField)
self.assertIsInstance(form.fields["title"], forms.SlugField)
def test_modelformset_factory_absolute_max(self):
AuthorFormSet = modelformset_factory(
Author, fields="__all__", absolute_max=1500
)
data = {
"form-TOTAL_FORMS": "1501",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = AuthorFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1500)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 1000 forms."],
)
def test_modelformset_factory_absolute_max_with_max_num(self):
AuthorFormSet = modelformset_factory(
Author,
fields="__all__",
max_num=20,
absolute_max=100,
)
data = {
"form-TOTAL_FORMS": "101",
"form-INITIAL_FORMS": "0",
"form-MAX_NUM_FORMS": "0",
}
formset = AuthorFormSet(data=data)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 100)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 20 forms."],
)
def test_inlineformset_factory_absolute_max(self):
author = Author.objects.create(name="Charles Baudelaire")
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
absolute_max=1500,
)
data = {
"book_set-TOTAL_FORMS": "1501",
"book_set-INITIAL_FORMS": "0",
"book_set-MAX_NUM_FORMS": "0",
}
formset = BookFormSet(data, instance=author)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 1500)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 1000 forms."],
)
def test_inlineformset_factory_absolute_max_with_max_num(self):
author = Author.objects.create(name="Charles Baudelaire")
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
max_num=20,
absolute_max=100,
)
data = {
"book_set-TOTAL_FORMS": "101",
"book_set-INITIAL_FORMS": "0",
"book_set-MAX_NUM_FORMS": "0",
}
formset = BookFormSet(data, instance=author)
self.assertIs(formset.is_valid(), False)
self.assertEqual(len(formset.forms), 100)
self.assertEqual(
formset.non_form_errors(),
["Please submit at most 20 forms."],
)
def test_modelformset_factory_can_delete_extra(self):
AuthorFormSet = modelformset_factory(
Author,
fields="__all__",
can_delete=True,
can_delete_extra=True,
extra=2,
)
formset = AuthorFormSet()
self.assertEqual(len(formset), 2)
self.assertIn("DELETE", formset.forms[0].fields)
self.assertIn("DELETE", formset.forms[1].fields)
def test_modelformset_factory_disable_delete_extra(self):
AuthorFormSet = modelformset_factory(
Author,
fields="__all__",
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = AuthorFormSet()
self.assertEqual(len(formset), 2)
self.assertNotIn("DELETE", formset.forms[0].fields)
self.assertNotIn("DELETE", formset.forms[1].fields)
def test_inlineformset_factory_can_delete_extra(self):
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
can_delete=True,
can_delete_extra=True,
extra=2,
)
formset = BookFormSet()
self.assertEqual(len(formset), 2)
self.assertIn("DELETE", formset.forms[0].fields)
self.assertIn("DELETE", formset.forms[1].fields)
def test_inlineformset_factory_can_not_delete_extra(self):
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
can_delete=True,
can_delete_extra=False,
extra=2,
)
formset = BookFormSet()
self.assertEqual(len(formset), 2)
self.assertNotIn("DELETE", formset.forms[0].fields)
self.assertNotIn("DELETE", formset.forms[1].fields)
def test_inlineformset_factory_passes_renderer(self):
from django.forms.renderers import Jinja2
renderer = Jinja2()
BookFormSet = inlineformset_factory(
Author,
Book,
fields="__all__",
renderer=renderer,
)
formset = BookFormSet()
self.assertEqual(formset.renderer, renderer)
def test_modelformset_factory_passes_renderer(self):
from django.forms.renderers import Jinja2
renderer = Jinja2()
BookFormSet = modelformset_factory(Author, fields="__all__", renderer=renderer)
formset = BookFormSet()
self.assertEqual(formset.renderer, renderer)
def test_modelformset_factory_default_renderer(self):
class CustomRenderer(DjangoTemplates):
pass
class ModelFormWithDefaultRenderer(ModelForm):
default_renderer = CustomRenderer()
BookFormSet = modelformset_factory(
Author, form=ModelFormWithDefaultRenderer, fields="__all__"
)
formset = BookFormSet()
self.assertEqual(
formset.forms[0].renderer, ModelFormWithDefaultRenderer.default_renderer
)
self.assertEqual(
formset.empty_form.renderer, ModelFormWithDefaultRenderer.default_renderer
)
self.assertIsInstance(formset.renderer, DjangoTemplates)
def test_inlineformset_factory_default_renderer(self):
class CustomRenderer(DjangoTemplates):
pass
class ModelFormWithDefaultRenderer(ModelForm):
default_renderer = CustomRenderer()
BookFormSet = inlineformset_factory(
Author,
Book,
form=ModelFormWithDefaultRenderer,
fields="__all__",
)
formset = BookFormSet()
self.assertEqual(
formset.forms[0].renderer, ModelFormWithDefaultRenderer.default_renderer
)
self.assertEqual(
formset.empty_form.renderer, ModelFormWithDefaultRenderer.default_renderer
)
self.assertIsInstance(formset.renderer, DjangoTemplates)
| TestModelFormsetOverridesTroughFormMeta |
python | allegroai__clearml | clearml/backend_api/services/v2_9/events.py | {
"start": 79492,
"end": 81816
} | class ____(Request):
"""
For each task, get a list of metrics for which the requested event type was reported
:param tasks: Task IDs
:type tasks: Sequence[str]
:param event_type: Event type
:type event_type: EventTypeEnum
"""
_service = "events"
_action = "get_task_metrics"
_version = "2.9"
_schema = {
"definitions": {
"event_type_enum": {
"enum": [
"training_stats_scalar",
"training_stats_vector",
"training_debug_image",
"plot",
"log",
],
"type": "string",
}
},
"properties": {
"event_type": {
"$ref": "#/definitions/event_type_enum",
"description": "Event type",
},
"tasks": {
"description": "Task IDs",
"items": {"type": "string"},
"type": "array",
},
},
"required": ["tasks"],
"type": "object",
}
def __init__(self, tasks: List[str], event_type: Any = None, **kwargs: Any) -> None:
super(GetTaskMetricsRequest, self).__init__(**kwargs)
self.tasks = tasks
self.event_type = event_type
@schema_property("tasks")
def tasks(self) -> List[str]:
return self._property_tasks
@tasks.setter
def tasks(self, value: List[str]) -> None:
if value is None:
self._property_tasks = None
return
self.assert_isinstance(value, "tasks", (list, tuple))
self.assert_isinstance(value, "tasks", six.string_types, is_array=True)
self._property_tasks = value
@schema_property("event_type")
def event_type(self) -> Any:
return self._property_event_type
@event_type.setter
def event_type(self, value: Any) -> None:
if value is None:
self._property_event_type = None
return
if isinstance(value, six.string_types):
try:
value = EventTypeEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "event_type", enum.Enum)
self._property_event_type = value
| GetTaskMetricsRequest |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/memberAccess20.py | {
"start": 169,
"end": 373
} | class ____(Generic[T]):
def __init__(self, value: T) -> None:
self.value: T = value
def set_value(self, value: int):
# This should generate an error.
self.value = value
| ClassA |
python | run-llama__llama_index | llama-index-core/llama_index/core/instrumentation/events/llm.py | {
"start": 1904,
"end": 2243
} | class ____(BaseEvent):
"""
LLMStructuredPredictInProgressEvent.
Args:
output (BaseModel): Predicted output class.
"""
output: SerializeAsAny[Any]
@classmethod
def class_name(cls) -> str:
"""Class name."""
return "LLMStructuredPredictInProgressEvent"
| LLMStructuredPredictInProgressEvent |
python | walkccc__LeetCode | solutions/516. Longest Palindromic Subsequence/516.py | {
"start": 0,
"end": 383
} | class ____:
def longestPalindromeSubseq(self, s: str) -> int:
@functools.lru_cache(None)
def dp(i: int, j: int) -> int:
"""Returns the length of LPS(s[i..j])."""
if i > j:
return 0
if i == j:
return 1
if s[i] == s[j]:
return 2 + dp(i + 1, j - 1)
return max(dp(i + 1, j), dp(i, j - 1))
return dp(0, len(s) - 1)
| Solution |
python | numpy__numpy | numpy/polynomial/tests/test_hermite_e.py | {
"start": 17324,
"end": 19005
} | class ____:
def test_hermefromroots(self):
res = herme.hermefromroots([])
assert_almost_equal(trim(res), [1])
for i in range(1, 5):
roots = np.cos(np.linspace(-np.pi, 0, 2 * i + 1)[1::2])
pol = herme.hermefromroots(roots)
res = herme.hermeval(roots, pol)
tgt = 0
assert_(len(pol) == i + 1)
assert_almost_equal(herme.herme2poly(pol)[-1], 1)
assert_almost_equal(res, tgt)
def test_hermeroots(self):
assert_almost_equal(herme.hermeroots([1]), [])
assert_almost_equal(herme.hermeroots([1, 1]), [-1])
for i in range(2, 5):
tgt = np.linspace(-1, 1, i)
res = herme.hermeroots(herme.hermefromroots(tgt))
assert_almost_equal(trim(res), trim(tgt))
def test_hermetrim(self):
coef = [2, -1, 1, 0]
# Test exceptions
assert_raises(ValueError, herme.hermetrim, coef, -1)
# Test results
assert_equal(herme.hermetrim(coef), coef[:-1])
assert_equal(herme.hermetrim(coef, 1), coef[:-3])
assert_equal(herme.hermetrim(coef, 2), [0])
def test_hermeline(self):
assert_equal(herme.hermeline(3, 4), [3, 4])
def test_herme2poly(self):
for i in range(10):
assert_almost_equal(herme.herme2poly([0] * i + [1]), Helist[i])
def test_poly2herme(self):
for i in range(10):
assert_almost_equal(herme.poly2herme(Helist[i]), [0] * i + [1])
def test_weight(self):
x = np.linspace(-5, 5, 11)
tgt = np.exp(-.5 * x**2)
res = herme.hermeweight(x)
assert_almost_equal(res, tgt)
| TestMisc |
python | getsentry__sentry | tests/sentry_plugins/pagerduty/test_plugin.py | {
"start": 621,
"end": 4175
} | class ____(PluginTestCase):
@cached_property
def plugin(self) -> PagerDutyPlugin:
return PagerDutyPlugin()
def test_is_configured(self) -> None:
assert self.plugin.is_configured(self.project) is False
self.plugin.set_option("service_key", "abcdef", self.project)
assert self.plugin.is_configured(self.project) is True
@responses.activate
def test_simple_notification(self) -> None:
responses.add(
"GET",
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
body=INVALID_METHOD,
)
responses.add(
"POST",
"https://events.pagerduty.com/generic/2010-04-15/create_event.json",
body=SUCCESS,
)
self.plugin.set_option("service_key", "abcdef", self.project)
event = self.store_event(
data={
"message": "Hello world",
"level": "warning",
"platform": "python",
"culprit": "foo.bar",
},
project_id=self.project.id,
)
assert event.group is not None
group = event.group
rule = Rule.objects.create(project=self.project, label="my rule")
notification = Notification(event=event, rule=rule)
with self.options({"system.url-prefix": "http://example.com"}):
self.plugin.notify(notification)
request = responses.calls[0].request
payload = orjson.loads(request.body)
assert payload == {
"client_url": "http://example.com",
"event_type": "trigger",
"contexts": [
{
"text": "View Sentry Issue Details",
"href": f"http://example.com/organizations/baz/issues/{group.id}/?referrer=pagerduty_plugin",
"type": "link",
}
],
"incident_key": str(group.id),
"client": "sentry",
"details": {
"project": self.project.name,
"release": None,
"url": f"http://example.com/organizations/baz/issues/{group.id}/?referrer=pagerduty_plugin",
"culprit": group.culprit,
"platform": "python",
"event_id": event.event_id,
"tags": {"level": "warning"},
"datetime": event.datetime.strftime("%Y-%m-%dT%H:%M:%S.%fZ"),
},
"service_key": "abcdef",
"description": event.message,
}
def test_no_secrets(self) -> None:
self.user = self.create_user("foo@example.com")
self.org = self.create_organization(owner=self.user, name="Rowdy Tiger")
self.team = self.create_team(organization=self.org, name="Mariachi Band")
self.project = self.create_project(organization=self.org, teams=[self.team], name="Bengal")
self.login_as(self.user)
self.plugin.set_option("service_key", "abcdef", self.project)
url = reverse(
"sentry-api-0-project-plugin-details",
args=[self.org.slug, self.project.slug, "pagerduty"],
)
res = self.client.get(url)
config = orjson.loads(res.content)["config"]
key_config = [item for item in config if item["name"] == "service_key"][0]
assert key_config.get("type") == "secret"
assert key_config.get("value") is None
assert key_config.get("hasSavedValue") is True
assert key_config.get("prefix") == "abcd"
| PagerDutyPluginTest |
python | walkccc__LeetCode | solutions/312. Burst Balloons/312.py | {
"start": 0,
"end": 431
} | class ____:
def maxCoins(self, nums: list[int]) -> int:
n = len(nums)
nums = [1] + nums + [1]
@functools.lru_cache(None)
def dp(i: int, j: int) -> int:
"""Returns maxCoins(nums[i..j])."""
if i > j:
return 0
return max(dp(i, k - 1) +
dp(k + 1, j) +
nums[i - 1] * nums[k] * nums[j + 1]
for k in range(i, j + 1))
return dp(1, n)
| Solution |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/plugins/test_listener.py | {
"start": 2608,
"end": 2978
} | class ____(BaseOperator):
template_fields = ["df"]
def __init__(self, df, *args, **kwargs):
self.df = df
super().__init__(*args, **kwargs)
def execute(self, context):
return self.df
def render_df():
return pd.DataFrame({"col": [1, 2]})
def regular_call(self, callable, callable_name, use_fork):
callable()
| TemplateOperator |
python | scipy__scipy | scipy/stats/tests/test_new_distributions.py | {
"start": 174,
"end": 874
} | class ____:
def test_gh23708_binomial_logcdf_method_complement(self):
# gh-23708 found that `logcdf` method='complement' was inaccurate in the tails
x = np.asarray([0., 18.])
X = stats.Binomial(n=np.asarray([18.]), p=np.asarray(0.71022842))
assert_allclose(X.logcdf(x, method='complement'), X.logcdf(x), rtol=1e-15)
assert_allclose(X.logccdf(x, method='complement'), X.logccdf(x), rtol=1e-15)
# going even deeper into the tails
X = stats.Binomial(n=100, p=0.5)
assert_allclose(X.logcdf(0, method='complement'), X.logpmf(0), rtol=1e-15)
assert_allclose(X.logccdf(99, method='complement'), X.logpmf(100), rtol=1e-15)
| TestBinomial |
python | pyca__cryptography | tests/hazmat/asn1/test_serialization.py | {
"start": 1635,
"end": 1835
} | class ____:
def test_bool(self) -> None:
assert_roundtrips(
[
(True, b"\x01\x01\xff"),
(False, b"\x01\x01\x00"),
],
)
| TestBool |
python | sqlalchemy__sqlalchemy | test/dialect/mssql/test_compiler.py | {
"start": 1441,
"end": 63571
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
__dialect__ = mssql.dialect()
@testing.fixture
def dialect_2012(self):
dialect = mssql.dialect()
dialect._supports_offset_fetch = True
return dialect
def test_true_false(self):
self.assert_compile(sql.false(), "0")
self.assert_compile(sql.true(), "1")
def test_plain_stringify_returning(self):
t = Table(
"t",
MetaData(),
Column("myid", Integer, primary_key=True),
Column("name", String, server_default="some str"),
Column("description", String, default=func.lower("hi")),
)
stmt = t.insert().values().return_defaults()
eq_ignore_whitespace(
str(stmt.compile(dialect=mssql.dialect())),
"INSERT INTO t (description) "
"OUTPUT inserted.myid, inserted.name, inserted.description "
"VALUES (lower(:lower_1))",
)
@testing.combinations(
("plain", "sometable", "sometable"),
("matched_square_brackets", "colo[u]r", "[colo[u]]r]"),
("unmatched_left_square_bracket", "colo[ur", "[colo[ur]"),
("unmatched_right_square_bracket", "colou]r", "[colou]]r]"),
("double quotes", 'Edwin "Buzz" Aldrin', '[Edwin "Buzz" Aldrin]'),
("dash", "Dash-8", "[Dash-8]"),
("slash", "tl/dr", "[tl/dr]"),
("space", "Red Deer", "[Red Deer]"),
("question mark", "OK?", "[OK?]"),
("percent", "GST%", "[GST%]"),
id_="iaa",
)
def test_identifier_rendering(self, table_name, rendered_name):
t = table(table_name, column("somecolumn"))
self.assert_compile(
t.select(), "SELECT {0}.somecolumn FROM {0}".format(rendered_name)
)
def test_select_with_nolock(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.select().with_hint(t, "WITH (NOLOCK)"),
"SELECT sometable.somecolumn FROM sometable WITH (NOLOCK)",
)
def test_select_with_nolock_schema(self):
m = MetaData()
t = Table(
"sometable", m, Column("somecolumn", Integer), schema="test_schema"
)
self.assert_compile(
t.select().with_hint(t, "WITH (NOLOCK)"),
"SELECT test_schema.sometable.somecolumn "
"FROM test_schema.sometable WITH (NOLOCK)",
)
def test_select_w_order_by_collate(self):
m = MetaData()
t = Table("sometable", m, Column("somecolumn", String))
self.assert_compile(
select(t).order_by(
t.c.somecolumn.collate("Latin1_General_CS_AS_KS_WS_CI").asc()
),
"SELECT sometable.somecolumn FROM sometable "
"ORDER BY sometable.somecolumn COLLATE "
"Latin1_General_CS_AS_KS_WS_CI ASC",
)
@testing.fixture
def column_expression_fixture(self):
class MyString(TypeEngine):
def column_expression(self, column):
return func.lower(column)
return table(
"some_table", column("name", String), column("value", MyString)
)
@testing.combinations("columns", "table", argnames="use_columns")
def test_plain_returning_column_expression(
self, column_expression_fixture, use_columns
):
"""test #8770"""
table1 = column_expression_fixture
if use_columns == "columns":
stmt = insert(table1).returning(table1)
else:
stmt = insert(table1).returning(table1.c.name, table1.c.value)
self.assert_compile(
stmt,
"INSERT INTO some_table (name, value) OUTPUT inserted.name, "
"lower(inserted.value) AS value VALUES (:name, :value)",
)
def test_join_with_hint(self):
t1 = table(
"t1",
column("a", Integer),
column("b", String),
column("c", String),
)
t2 = table(
"t2",
column("a", Integer),
column("b", Integer),
column("c", Integer),
)
join = (
t1.join(t2, t1.c.a == t2.c.a)
.select()
.with_hint(t1, "WITH (NOLOCK)")
)
self.assert_compile(
join,
"SELECT t1.a, t1.b, t1.c, t2.a AS a_1, t2.b AS b_1, t2.c AS c_1 "
"FROM t1 WITH (NOLOCK) JOIN t2 ON t1.a = t2.a",
)
def test_insert(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.insert(),
"INSERT INTO sometable (somecolumn) VALUES (:somecolumn)",
)
def test_update(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.update().where(t.c.somecolumn == 7),
"UPDATE sometable SET somecolumn=:somecolum"
"n WHERE sometable.somecolumn = "
":somecolumn_1",
dict(somecolumn=10),
)
def test_insert_hint(self):
t = table("sometable", column("somecolumn"))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.insert()
.values(somecolumn="x")
.with_hint(
"WITH (PAGLOCK)", selectable=targ, dialect_name=darg
),
"INSERT INTO sometable WITH (PAGLOCK) "
"(somecolumn) VALUES (:somecolumn)",
)
def test_update_hint(self):
t = table("sometable", column("somecolumn"))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.update()
.where(t.c.somecolumn == "q")
.values(somecolumn="x")
.with_hint(
"WITH (PAGLOCK)", selectable=targ, dialect_name=darg
),
"UPDATE sometable WITH (PAGLOCK) "
"SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1",
)
def test_update_exclude_hint(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.update()
.where(t.c.somecolumn == "q")
.values(somecolumn="x")
.with_hint("XYZ", dialect_name="mysql"),
"UPDATE sometable SET somecolumn=:somecolumn "
"WHERE sometable.somecolumn = :somecolumn_1",
)
def test_delete_hint(self):
t = table("sometable", column("somecolumn"))
for targ in (None, t):
for darg in ("*", "mssql"):
self.assert_compile(
t.delete()
.where(t.c.somecolumn == "q")
.with_hint(
"WITH (PAGLOCK)", selectable=targ, dialect_name=darg
),
"DELETE FROM sometable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = :somecolumn_1",
)
def test_delete_exclude_hint(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.delete()
.where(t.c.somecolumn == "q")
.with_hint("XYZ", dialect_name="mysql"),
"DELETE FROM sometable WHERE "
"sometable.somecolumn = :somecolumn_1",
)
def test_delete_extra_froms(self):
t1 = table("t1", column("c1"))
t2 = table("t2", column("c1"))
q = sql.delete(t1).where(t1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM t1 FROM t1, t2 WHERE t1.c1 = t2.c1"
)
def test_delete_extra_froms_alias(self):
a1 = table("t1", column("c1")).alias("a1")
t2 = table("t2", column("c1"))
q = sql.delete(a1).where(a1.c.c1 == t2.c.c1)
self.assert_compile(
q, "DELETE FROM a1 FROM t1 AS a1, t2 WHERE a1.c1 = t2.c1"
)
self.assert_compile(sql.delete(a1), "DELETE FROM t1 AS a1")
def test_update_from(self):
metadata = MetaData()
table1 = Table(
"mytable",
metadata,
Column("myid", Integer),
Column("name", String(30)),
Column("description", String(50)),
)
table2 = Table(
"myothertable",
metadata,
Column("otherid", Integer),
Column("othername", String(30)),
)
mt = table1.alias()
u = (
table1.update()
.values(name="foo")
.where(table2.c.otherid == table1.c.myid)
)
# testing mssql.base.MSSQLCompiler.update_from_clause
self.assert_compile(
u,
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable WHERE "
"myothertable.otherid = mytable.myid",
)
self.assert_compile(
u.where(table2.c.othername == mt.c.name),
"UPDATE mytable SET name=:name "
"FROM mytable, myothertable, mytable AS mytable_1 "
"WHERE myothertable.otherid = mytable.myid "
"AND myothertable.othername = mytable_1.name",
)
def test_update_from_hint(self):
t = table("sometable", column("somecolumn"))
t2 = table("othertable", column("somecolumn"))
for darg in ("*", "mssql"):
self.assert_compile(
t.update()
.where(t.c.somecolumn == t2.c.somecolumn)
.values(somecolumn="x")
.with_hint("WITH (PAGLOCK)", selectable=t2, dialect_name=darg),
"UPDATE sometable SET somecolumn=:somecolumn "
"FROM sometable, othertable WITH (PAGLOCK) "
"WHERE sometable.somecolumn = othertable.somecolumn",
)
def test_update_to_select_schema(self):
meta = MetaData()
table = Table(
"sometable",
meta,
Column("sym", String),
Column("val", Integer),
schema="schema",
)
other = Table(
"#other", meta, Column("sym", String), Column("newval", Integer)
)
stmt = table.update().values(
val=select(other.c.newval)
.where(table.c.sym == other.c.sym)
.scalar_subquery()
)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"(SELECT [#other].newval FROM [#other] "
"WHERE [schema].sometable.sym = [#other].sym)",
)
stmt = (
table.update()
.values(val=other.c.newval)
.where(table.c.sym == other.c.sym)
)
self.assert_compile(
stmt,
"UPDATE [schema].sometable SET val="
"[#other].newval FROM [schema].sometable, "
"[#other] WHERE [schema].sometable.sym = [#other].sym",
)
# TODO: not supported yet.
# def test_delete_from_hint(self):
# t = table('sometable', column('somecolumn'))
# t2 = table('othertable', column('somecolumn'))
# for darg in ("*", "mssql"):
# self.assert_compile(
# t.delete().where(t.c.somecolumn==t2.c.somecolumn).
# with_hint("WITH (PAGLOCK)",
# selectable=t2,
# dialect_name=darg),
# ""
# )
@testing.combinations(
(
lambda: select(literal("x"), literal("y")),
"SELECT __[POSTCOMPILE_param_1] AS anon_1, "
"__[POSTCOMPILE_param_2] AS anon_2",
{
"check_literal_execute": {"param_1": "x", "param_2": "y"},
"check_post_param": {},
},
),
(
lambda t: select(t).where(t.c.foo.in_(["x", "y", "z"])),
"SELECT sometable.foo FROM sometable WHERE sometable.foo "
"IN (__[POSTCOMPILE_foo_1])",
{
"check_literal_execute": {"foo_1": ["x", "y", "z"]},
"check_post_param": {},
},
),
(
lambda t: t.c.foo.in_([None]),
"sometable.foo IN (__[POSTCOMPILE_foo_1])",
{},
),
)
def test_strict_binds(self, expr, compiled, kw):
"""test the 'strict' compiler binds."""
from sqlalchemy.dialects.mssql.base import MSSQLStrictCompiler
mssql_dialect = mssql.dialect()
mssql_dialect.statement_compiler = MSSQLStrictCompiler
t = table("sometable", column("foo"))
expr = testing.resolve_lambda(expr, t=t)
self.assert_compile(expr, compiled, dialect=mssql_dialect, **kw)
def test_in_with_subqueries(self):
"""Test removal of legacy behavior that converted "x==subquery"
to use IN.
"""
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.select().where(t.c.somecolumn == t.select().scalar_subquery()),
"SELECT sometable.somecolumn FROM "
"sometable WHERE sometable.somecolumn = "
"(SELECT sometable.somecolumn FROM "
"sometable)",
)
self.assert_compile(
t.select().where(t.c.somecolumn != t.select().scalar_subquery()),
"SELECT sometable.somecolumn FROM "
"sometable WHERE sometable.somecolumn != "
"(SELECT sometable.somecolumn FROM "
"sometable)",
)
@testing.uses_deprecated
def test_count(self):
t = table("sometable", column("somecolumn"))
self.assert_compile(
t.count(),
"SELECT count(sometable.somecolumn) AS "
"tbl_row_count FROM sometable",
)
def test_noorderby_insubquery(self):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = select(table1.c.myid).order_by(table1.c.myid).alias("foo")
crit = q.c.myid == table1.c.myid
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid",
)
def test_noorderby_insubquery_limit(self):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.limit(10)
.alias("foo")
)
crit = q.c.myid == table1.c.myid
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT TOP __[POSTCOMPILE_param_1] "
"mytable.myid AS "
"myid FROM mytable ORDER BY mytable.myid) AS foo, mytable WHERE "
"foo.myid = mytable.myid",
)
@testing.variation("style", ["plain", "ties", "percent"])
def test_noorderby_insubquery_fetch(self, style):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present; test issue #10458"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
if style.plain:
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.fetch(count=10)
.alias("foo")
)
elif style.ties:
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.fetch(count=10, with_ties=True)
.alias("foo")
)
elif style.percent:
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.fetch(count=10, percent=True)
.alias("foo")
)
else:
style.fail()
crit = q.c.myid == table1.c.myid
if style.plain:
# the "plain" style of fetch doesnt use TOP right now, so
# there's an order_by implicit in the row_number part of it
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT anon_1.myid AS myid FROM "
"(SELECT mytable.myid AS myid, ROW_NUMBER() OVER "
"(ORDER BY mytable.myid) AS mssql_rn FROM mytable) AS anon_1 "
"WHERE mssql_rn <= :param_1) AS foo, mytable "
"WHERE foo.myid = mytable.myid",
)
elif style.ties:
# issue #10458 is that when TIES/PERCENT were used, and it just
# generates TOP, ORDER BY would be omitted.
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT TOP __[POSTCOMPILE_param_1] WITH "
"TIES mytable.myid AS myid FROM mytable "
"ORDER BY mytable.myid) AS foo, mytable "
"WHERE foo.myid = mytable.myid",
)
elif style.percent:
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT TOP __[POSTCOMPILE_param_1] "
"PERCENT mytable.myid AS myid FROM mytable "
"ORDER BY mytable.myid) AS foo, mytable "
"WHERE foo.myid = mytable.myid",
)
@testing.combinations(10, 0)
def test_noorderby_insubquery_offset_oldstyle(self, offset):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.offset(offset)
.alias("foo")
)
crit = q.c.myid == table1.c.myid
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT anon_1.myid AS myid FROM "
"(SELECT mytable.myid AS myid, ROW_NUMBER() OVER (ORDER BY "
"mytable.myid) AS mssql_rn FROM mytable) AS anon_1 "
"WHERE mssql_rn > :param_1) AS foo, mytable WHERE "
"foo.myid = mytable.myid",
)
@testing.combinations(10, 0, argnames="offset")
def test_noorderby_insubquery_offset_newstyle(self, dialect_2012, offset):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.offset(offset)
.alias("foo")
)
crit = q.c.myid == table1.c.myid
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT mytable.myid AS myid FROM mytable "
"ORDER BY mytable.myid OFFSET :param_1 ROWS) AS foo, "
"mytable WHERE foo.myid = mytable.myid",
dialect=dialect_2012,
)
def test_noorderby_insubquery_limit_offset_newstyle(self, dialect_2012):
"""test "no ORDER BY in subqueries unless TOP / LIMIT / OFFSET"
present"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = (
select(table1.c.myid)
.order_by(table1.c.myid)
.limit(10)
.offset(10)
.alias("foo")
)
crit = q.c.myid == table1.c.myid
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT mytable.myid AS myid FROM mytable "
"ORDER BY mytable.myid OFFSET :param_1 ROWS "
"FETCH FIRST :param_2 ROWS ONLY) AS foo, "
"mytable WHERE foo.myid = mytable.myid",
dialect=dialect_2012,
)
def test_noorderby_parameters_insubquery(self):
"""test that the ms-sql dialect does not include ORDER BY
positional parameters in subqueries"""
table1 = table(
"mytable",
column("myid", Integer),
column("name", String),
column("description", String),
)
q = (
select(table1.c.myid, sql.literal("bar").label("c1"))
.order_by(table1.c.name + "-")
.alias("foo")
)
crit = q.c.myid == table1.c.myid
dialect = mssql.dialect()
dialect.paramstyle = "qmark"
dialect.positional = True
self.assert_compile(
select("*").where(crit),
"SELECT * FROM (SELECT mytable.myid AS "
"myid, ? AS c1 FROM mytable) AS foo, mytable WHERE "
"foo.myid = mytable.myid",
dialect=dialect,
checkparams={"param_1": "bar"},
# if name_1 is included, too many parameters are passed to dbapi
checkpositional=("bar",),
)
@testing.variation("use_schema_translate", [True, False])
@testing.combinations(
"abc", "has spaces", "[abc]", "[has spaces]", argnames="schemaname"
)
def test_schema_single_token_bracketed(
self, use_schema_translate, schemaname
):
"""test for #9133.
this is not the actual regression case for #9133, which is instead
within the reflection process. However, when we implemented
#2626, we never considered the case of ``[schema]`` without any
dots in it.
"""
schema_no_brackets = schemaname.strip("[]")
if " " in schemaname:
rendered_schema = "[%s]" % (schema_no_brackets,)
else:
rendered_schema = schema_no_brackets
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema=schemaname if not use_schema_translate else None,
)
self.assert_compile(
select(tbl),
"SELECT %(name)s.test.id FROM %(name)s.test"
% {"name": rendered_schema},
schema_translate_map=(
{None: schemaname} if use_schema_translate else None
),
render_schema_translate=True if use_schema_translate else False,
)
def test_schema_many_tokens_one(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="abc.def.efg.hij",
)
# for now, we don't really know what the above means, at least
# don't lose the dot
self.assert_compile(
select(tbl),
"SELECT [abc.def.efg].hij.test.id FROM [abc.def.efg].hij.test",
)
dbname, owner = mssql_base._schema_elements("abc.def.efg.hij")
eq_(dbname, "abc.def.efg")
assert not isinstance(dbname, quoted_name)
eq_(owner, "hij")
def test_schema_many_tokens_two(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="[abc].[def].[efg].[hij]",
)
self.assert_compile(
select(tbl),
"SELECT [abc].[def].[efg].hij.test.id "
"FROM [abc].[def].[efg].hij.test",
)
def test_force_schema_quoted_name_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema=quoted_name("foo.dbo", True),
)
self.assert_compile(
select(tbl), "SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
def test_force_schema_quoted_w_dot_case_insensitive(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema=quoted_name("foo.dbo", True),
)
self.assert_compile(
select(tbl), "SELECT [foo.dbo].test.id FROM [foo.dbo].test"
)
@testing.combinations((True,), (False,), argnames="use_schema_translate")
def test_force_schema_quoted_name_w_dot_case_sensitive(
self, use_schema_translate
):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema=(
quoted_name("Foo.dbo", True)
if not use_schema_translate
else None
),
)
self.assert_compile(
select(tbl),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test",
schema_translate_map=(
{None: quoted_name("Foo.dbo", True)}
if use_schema_translate
else None
),
render_schema_translate=True if use_schema_translate else False,
)
@testing.combinations((True,), (False,), argnames="use_schema_translate")
def test_force_schema_quoted_w_dot_case_sensitive(
self, use_schema_translate
):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="[Foo.dbo]" if not use_schema_translate else None,
)
self.assert_compile(
select(tbl),
"SELECT [Foo.dbo].test.id FROM [Foo.dbo].test",
schema_translate_map=(
{None: "[Foo.dbo]"} if use_schema_translate else None
),
render_schema_translate=True if use_schema_translate else False,
)
@testing.combinations((True,), (False,), argnames="use_schema_translate")
def test_schema_autosplit_w_dot_case_insensitive(
self, use_schema_translate
):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="foo.dbo" if not use_schema_translate else None,
)
self.assert_compile(
select(tbl),
"SELECT foo.dbo.test.id FROM foo.dbo.test",
schema_translate_map=(
{None: "foo.dbo"} if use_schema_translate else None
),
render_schema_translate=True if use_schema_translate else False,
)
@testing.combinations((True,), (False,), argnames="use_schema_translate")
def test_schema_autosplit_w_dot_case_sensitive(self, use_schema_translate):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="Foo.dbo" if not use_schema_translate else None,
)
self.assert_compile(
select(tbl),
"SELECT [Foo].dbo.test.id FROM [Foo].dbo.test",
schema_translate_map=(
{None: "Foo.dbo"} if use_schema_translate else None
),
render_schema_translate=True if use_schema_translate else False,
)
def test_delete_schema(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="paj",
)
self.assert_compile(
tbl.delete().where(tbl.c.id == 1),
"DELETE FROM paj.test WHERE paj.test.id = :id_1",
)
s = select(tbl.c.id).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM paj.test WHERE paj.test.id IN "
"(SELECT paj.test.id FROM paj.test "
"WHERE paj.test.id = :id_1)",
)
def test_delete_schema_multipart(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="banana.paj",
)
self.assert_compile(
tbl.delete().where(tbl.c.id == 1),
"DELETE FROM banana.paj.test WHERE banana.paj.test.id = :id_1",
)
s = select(tbl.c.id).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM banana.paj.test WHERE "
"banana.paj.test.id IN (SELECT banana.paj.test.id "
"FROM banana.paj.test WHERE "
"banana.paj.test.id = :id_1)",
)
def test_delete_schema_multipart_needs_quoting(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="banana split.paj",
)
self.assert_compile(
tbl.delete().where(tbl.c.id == 1),
"DELETE FROM [banana split].paj.test WHERE "
"[banana split].paj.test.id = :id_1",
)
s = select(tbl.c.id).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].paj.test WHERE "
"[banana split].paj.test.id IN ("
"SELECT [banana split].paj.test.id FROM "
"[banana split].paj.test WHERE "
"[banana split].paj.test.id = :id_1)",
)
def test_delete_schema_multipart_both_need_quoting(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("id", Integer, primary_key=True),
schema="banana split.paj with a space",
)
self.assert_compile(
tbl.delete().where(tbl.c.id == 1),
"DELETE FROM [banana split].[paj with a "
"space].test WHERE [banana split].[paj "
"with a space].test.id = :id_1",
)
s = select(tbl.c.id).where(tbl.c.id == 1)
self.assert_compile(
tbl.delete().where(tbl.c.id.in_(s)),
"DELETE FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id IN "
"(SELECT [banana split].[paj with a space].test.id "
"FROM [banana split].[paj with a space].test "
"WHERE [banana split].[paj with a space].test.id = :id_1)",
)
def test_union(self):
t1 = table(
"t1",
column("col1"),
column("col2"),
column("col3"),
column("col4"),
)
t2 = table(
"t2",
column("col1"),
column("col2"),
column("col3"),
column("col4"),
)
s1, s2 = (
select(t1.c.col3.label("col3"), t1.c.col4.label("col4")).where(
t1.c.col2.in_(["t1col2r1", "t1col2r2"]),
),
select(t2.c.col3.label("col3"), t2.c.col4.label("col4")).where(
t2.c.col2.in_(["t2col2r2", "t2col2r3"]),
),
)
u = union(s1, s2).order_by("col3", "col4")
self.assert_compile(
u,
"SELECT t1.col3 AS col3, t1.col4 AS col4 "
"FROM t1 WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) "
"UNION SELECT t2.col3 AS col3, "
"t2.col4 AS col4 FROM t2 WHERE t2.col2 IN "
"(__[POSTCOMPILE_col2_2]) ORDER BY col3, col4",
checkparams={
"col2_1": ["t1col2r1", "t1col2r2"],
"col2_2": ["t2col2r2", "t2col2r3"],
},
)
self.assert_compile(
u.alias("bar").select(),
"SELECT bar.col3, bar.col4 FROM (SELECT "
"t1.col3 AS col3, t1.col4 AS col4 FROM t1 "
"WHERE t1.col2 IN (__[POSTCOMPILE_col2_1]) UNION "
"SELECT t2.col3 AS col3, t2.col4 AS col4 "
"FROM t2 WHERE t2.col2 IN (__[POSTCOMPILE_col2_2])) AS bar",
checkparams={
"col2_1": ["t1col2r1", "t1col2r2"],
"col2_2": ["t2col2r2", "t2col2r3"],
},
)
def test_function(self):
self.assert_compile(func.foo(1, 2), "foo(:foo_1, :foo_2)")
self.assert_compile(func.current_time(), "CURRENT_TIME")
self.assert_compile(func.foo(), "foo()")
m = MetaData()
t = Table(
"sometable", m, Column("col1", Integer), Column("col2", Integer)
)
self.assert_compile(
select(func.max(t.c.col1)),
"SELECT max(sometable.col1) AS max_1 FROM sometable",
)
def test_function_overrides(self):
self.assert_compile(func.current_date(), "GETDATE()")
self.assert_compile(func.length(3), "LEN(:length_1)")
def test_extract(self):
t = table("t", column("col1"))
for field in "day", "month", "year":
self.assert_compile(
select(extract(field, t.c.col1)),
"SELECT DATEPART(%s, t.col1) AS anon_1 FROM t" % field,
)
def test_update_returning(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
u = (
update(table1)
.values(dict(name="foo"))
.returning(table1.c.myid, table1.c.name)
)
self.assert_compile(
u,
"UPDATE mytable SET name=:name OUTPUT "
"inserted.myid, inserted.name",
)
u = update(table1).values(dict(name="foo")).returning(table1)
self.assert_compile(
u,
"UPDATE mytable SET name=:name OUTPUT "
"inserted.myid, inserted.name, "
"inserted.description",
)
u = (
update(table1)
.values(dict(name="foo"))
.returning(table1)
.where(table1.c.name == "bar")
)
self.assert_compile(
u,
"UPDATE mytable SET name=:name OUTPUT "
"inserted.myid, inserted.name, "
"inserted.description WHERE mytable.name = "
":name_1",
)
u = (
update(table1)
.values(dict(name="foo"))
.returning(func.length(table1.c.name))
)
self.assert_compile(
u,
"UPDATE mytable SET name=:name OUTPUT "
"LEN(inserted.name) AS length_1",
)
def test_delete_returning(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
d = delete(table1).returning(table1.c.myid, table1.c.name)
self.assert_compile(
d, "DELETE FROM mytable OUTPUT deleted.myid, deleted.name"
)
d = (
delete(table1)
.where(table1.c.name == "bar")
.returning(table1.c.myid, table1.c.name)
)
self.assert_compile(
d,
"DELETE FROM mytable OUTPUT deleted.myid, "
"deleted.name WHERE mytable.name = :name_1",
)
def test_insert_returning(self):
table1 = table(
"mytable",
column("myid", Integer),
column("name", String(128)),
column("description", String(128)),
)
i = (
insert(table1)
.values(dict(name="foo"))
.returning(table1.c.myid, table1.c.name)
)
self.assert_compile(
i,
"INSERT INTO mytable (name) OUTPUT "
"inserted.myid, inserted.name VALUES "
"(:name)",
)
i = insert(table1).values(dict(name="foo")).returning(table1)
self.assert_compile(
i,
"INSERT INTO mytable (name) OUTPUT "
"inserted.myid, inserted.name, "
"inserted.description VALUES (:name)",
)
i = (
insert(table1)
.values(dict(name="foo"))
.returning(func.length(table1.c.name))
)
self.assert_compile(
i,
"INSERT INTO mytable (name) OUTPUT "
"LEN(inserted.name) AS length_1 VALUES "
"(:name)",
)
def test_limit_using_top(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(10)
self.assert_compile(
s,
"SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={"x_1": 5, "param_1": 10},
)
def test_limit_using_top_literal_binds(self):
"""test #6863"""
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(10)
eq_ignore_whitespace(
str(
s.compile(
dialect=mssql.dialect(),
compile_kwargs={"literal_binds": True},
)
),
"SELECT TOP 10 t.x, t.y FROM t WHERE t.x = 5 ORDER BY t.y",
)
def test_limit_zero_using_top(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(0)
self.assert_compile(
s,
"SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={"x_1": 5, "param_1": 0},
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()["x"][1])
def test_offset_using_window(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).offset(20)
# test that the select is not altered with subsequent compile
# calls
for i in range(2):
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y "
"AS y, ROW_NUMBER() OVER (ORDER BY t.y) AS "
"mssql_rn FROM t WHERE t.x = :x_1) AS "
"anon_1 WHERE mssql_rn > :param_1",
checkparams={"param_1": 20, "x_1": 5},
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()["x"][1])
def test_simple_limit_expression_offset_using_window(self):
t = table("t", column("x", Integer), column("y", Integer))
s = (
select(t)
.where(t.c.x == 5)
.order_by(t.c.y)
.limit(10)
.offset(literal_column("20"))
)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > 20 AND mssql_rn <= :param_1 + 20",
checkparams={"param_1": 10, "x_1": 5},
)
def test_limit_offset_using_window(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={"param_1": 20, "param_2": 10, "x_1": 5},
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()["x"][1])
assert t.c.y in set(c._create_result_map()["y"][1])
def test_limit_offset_using_offset_fetch(self, dialect_2012):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(10).offset(20)
self.assert_compile(
s,
"SELECT t.x, t.y "
"FROM t "
"WHERE t.x = :x_1 ORDER BY t.y "
"OFFSET :param_1 ROWS "
"FETCH FIRST :param_2 ROWS ONLY",
checkparams={"param_1": 20, "param_2": 10, "x_1": 5},
dialect=dialect_2012,
)
c = s.compile(dialect=dialect_2012)
eq_(len(c._result_columns), 2)
assert t.c.x in set(c._create_result_map()["x"][1])
assert t.c.y in set(c._create_result_map()["y"][1])
def test_limit_offset_w_ambiguous_cols(self):
t = table("t", column("x", Integer), column("y", Integer))
cols = [t.c.x, t.c.x.label("q"), t.c.x.label("p"), t.c.y]
s = (
select(*cols)
.where(t.c.x == 5)
.order_by(t.c.y)
.limit(10)
.offset(20)
)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.q, anon_1.p, anon_1.y "
"FROM (SELECT t.x AS x, t.x AS q, t.x AS p, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn "
"FROM t "
"WHERE t.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={"param_1": 20, "param_2": 10, "x_1": 5},
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 4)
result_map = c._create_result_map()
for col in cols:
is_(result_map[col.key][1][0], col)
def test_limit_offset_with_correlated_order_by(self):
t1 = table("t1", column("x", Integer), column("y", Integer))
t2 = table("t2", column("x", Integer), column("y", Integer))
order_by = select(t2.c.y).where(t1.c.x == t2.c.x).scalar_subquery()
s = (
select(t1)
.where(t1.c.x == 5)
.order_by(order_by)
.limit(10)
.offset(20)
)
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y "
"FROM (SELECT t1.x AS x, t1.y AS y, "
"ROW_NUMBER() OVER (ORDER BY "
"(SELECT t2.y FROM t2 WHERE t1.x = t2.x)"
") AS mssql_rn "
"FROM t1 "
"WHERE t1.x = :x_1) AS anon_1 "
"WHERE mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
checkparams={"param_1": 20, "param_2": 10, "x_1": 5},
)
c = s.compile(dialect=mssql.dialect())
eq_(len(c._result_columns), 2)
assert t1.c.x in set(c._create_result_map()["x"][1])
assert t1.c.y in set(c._create_result_map()["y"][1])
def test_offset_dont_misapply_labelreference(self):
m = MetaData()
t = Table("t", m, Column("x", Integer))
expr1 = func.foo(t.c.x).label("x")
expr2 = func.foo(t.c.x).label("y")
stmt1 = select(expr1).order_by(expr1.desc()).offset(1)
stmt2 = select(expr2).order_by(expr2.desc()).offset(1)
self.assert_compile(
stmt1,
"SELECT anon_1.x FROM (SELECT foo(t.x) AS x, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1",
)
self.assert_compile(
stmt2,
"SELECT anon_1.y FROM (SELECT foo(t.x) AS y, "
"ROW_NUMBER() OVER (ORDER BY foo(t.x) DESC) AS mssql_rn FROM t) "
"AS anon_1 WHERE mssql_rn > :param_1",
)
def test_limit_zero_offset_using_window(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(0).offset(0)
# offset is zero but we need to cache a compatible statement
self.assert_compile(
s,
"SELECT anon_1.x, anon_1.y FROM (SELECT t.x AS x, t.y AS y, "
"ROW_NUMBER() OVER (ORDER BY t.y) AS mssql_rn FROM t "
"WHERE t.x = :x_1) AS anon_1 WHERE mssql_rn > :param_1 "
"AND mssql_rn <= :param_2 + :param_1",
checkparams={"x_1": 5, "param_1": 0, "param_2": 0},
)
def test_limit_zero_using_window(self):
t = table("t", column("x", Integer), column("y", Integer))
s = select(t).where(t.c.x == 5).order_by(t.c.y).limit(0)
# render the LIMIT of zero, but not the OFFSET
# of zero, so produces TOP 0
self.assert_compile(
s,
"SELECT TOP __[POSTCOMPILE_param_1] t.x, t.y FROM t "
"WHERE t.x = :x_1 ORDER BY t.y",
checkparams={"x_1": 5, "param_1": 0},
)
def test_table_pkc_clustering(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer, autoincrement=False),
Column("y", Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=True),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY CLUSTERED (x, y))",
)
def test_table_pkc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer, autoincrement=False),
Column("y", Integer, autoincrement=False),
PrimaryKeyConstraint("x", "y", mssql_clustered=False),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NOT NULL, "
"PRIMARY KEY NONCLUSTERED (x, y))",
)
def test_table_idx_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer, autoincrement=False),
Column("y", Integer, autoincrement=False),
)
idx = Index("myidx", tbl.c.x, tbl.c.y, mssql_clustered=False)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE NONCLUSTERED INDEX myidx ON test (x, y)",
)
def test_table_uc_explicit_nonclustered(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer, autoincrement=False),
Column("y", Integer, autoincrement=False),
UniqueConstraint("x", "y", mssql_clustered=False),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NULL, y INTEGER NULL, "
"UNIQUE NONCLUSTERED (x, y))",
)
def test_table_uc_clustering(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer, autoincrement=False),
Column("y", Integer, autoincrement=False),
PrimaryKeyConstraint("x"),
UniqueConstraint("y", mssql_clustered=True),
)
self.assert_compile(
schema.CreateTable(tbl),
"CREATE TABLE test (x INTEGER NOT NULL, y INTEGER NULL, "
"PRIMARY KEY (x), UNIQUE CLUSTERED (y))",
)
def test_index_clustering(self):
metadata = MetaData()
tbl = Table("test", metadata, Column("id", Integer))
idx = Index("foo", tbl.c.id, mssql_clustered=True)
self.assert_compile(
schema.CreateIndex(idx), "CREATE CLUSTERED INDEX foo ON test (id)"
)
def test_index_empty(self):
metadata = MetaData()
idx = Index("foo")
Table("test", metadata, Column("id", Integer)).append_constraint(idx)
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test"
)
def test_index_colstore_clustering(self):
metadata = MetaData()
idx = Index("foo", mssql_clustered=True, mssql_columnstore=True)
Table("test", metadata, Column("id", Integer)).append_constraint(idx)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE CLUSTERED COLUMNSTORE INDEX foo ON test",
)
def test_index_colstore_no_clustering(self):
metadata = MetaData()
tbl = Table("test", metadata, Column("id", Integer))
idx = Index(
"foo", tbl.c.id, mssql_clustered=False, mssql_columnstore=True
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE NONCLUSTERED COLUMNSTORE INDEX foo ON test (id)",
)
def test_index_not_colstore_clustering(self):
metadata = MetaData()
idx = Index("foo", mssql_clustered=True, mssql_columnstore=False)
Table("test", metadata, Column("id", Integer)).append_constraint(idx)
self.assert_compile(
schema.CreateIndex(idx), "CREATE CLUSTERED INDEX foo ON test"
)
def test_index_where(self):
metadata = MetaData()
tbl = Table("test", metadata, Column("data", Integer))
idx = Index("test_idx_data_1", tbl.c.data, mssql_where=tbl.c.data > 1)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx_data_1 ON test (data) WHERE data > 1",
)
idx = Index("test_idx_data_1", tbl.c.data, mssql_where="data > 1")
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX test_idx_data_1 ON test (data) WHERE data > 1",
)
def test_index_ordering(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index("foo", tbl.c.x.desc(), "y")
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test (x DESC, y)"
)
def test_create_index_expr(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer))
self.assert_compile(
schema.CreateIndex(Index("bar", t1.c.x > 5)),
"CREATE INDEX bar ON foo (x > 5)",
)
def test_drop_index_w_schema(self):
m = MetaData()
t1 = Table("foo", m, Column("x", Integer), schema="bar")
self.assert_compile(
schema.DropIndex(Index("idx_foo", t1.c.x)),
"DROP INDEX idx_foo ON bar.foo",
)
def test_index_extra_include_1(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index("foo", tbl.c.x, mssql_include=["y"])
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_extra_include_2(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index("foo", tbl.c.x, mssql_include=[tbl.c.y])
self.assert_compile(
schema.CreateIndex(idx), "CREATE INDEX foo ON test (x) INCLUDE (y)"
)
def test_index_include_where(self):
metadata = MetaData()
tbl = Table(
"test",
metadata,
Column("x", Integer),
Column("y", Integer),
Column("z", Integer),
)
idx = Index(
"foo", tbl.c.x, mssql_include=[tbl.c.y], mssql_where=tbl.c.y > 1
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y) WHERE y > 1",
)
idx = Index(
"foo", tbl.c.x, mssql_include=[tbl.c.y], mssql_where=text("y > 1")
)
self.assert_compile(
schema.CreateIndex(idx),
"CREATE INDEX foo ON test (x) INCLUDE (y) WHERE y > 1",
)
@testing.variation("use_mssql_version", [True, False])
def test_try_cast(self, use_mssql_version):
t1 = Table("t1", MetaData(), Column("id", Integer, primary_key=True))
if use_mssql_version:
stmt = select(mssql.try_cast(t1.c.id, Integer))
else:
stmt = select(try_cast(t1.c.id, Integer))
self.assert_compile(
stmt,
"SELECT TRY_CAST (t1.id AS INTEGER) AS id FROM t1",
)
@testing.combinations(
("no_persisted", "", "ignore"),
("persisted_none", "", None),
("persisted_true", " PERSISTED", True),
("persisted_false", "", False),
id_="iaa",
)
def test_column_computed(self, text, persisted):
m = MetaData()
kwargs = {"persisted": persisted} if persisted != "ignore" else {}
t = Table(
"t",
m,
Column("x", Integer),
Column("y", Integer, Computed("x + 2", **kwargs)),
)
self.assert_compile(
schema.CreateTable(t),
"CREATE TABLE t (x INTEGER NULL, y AS (x + 2)%s)" % text,
)
@testing.combinations(
(
5,
10,
{},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
{"param_1": 10, "param_2": 5},
),
(None, 10, {}, "OFFSET :param_1 ROWS", {"param_1": 10}),
(
5,
None,
{},
"OFFSET 0 ROWS FETCH FIRST :param_1 ROWS ONLY",
{"param_1": 5},
),
(
0,
0,
{},
"OFFSET :param_1 ROWS FETCH FIRST :param_2 ROWS ONLY",
{"param_1": 0, "param_2": 0},
),
(
5,
0,
{"percent": True},
"TOP __[POSTCOMPILE_param_1] PERCENT",
{"param_1": 5},
),
(
5,
None,
{"percent": True, "with_ties": True},
"TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES",
{"param_1": 5},
),
(
5,
0,
{"with_ties": True},
"TOP __[POSTCOMPILE_param_1] WITH TIES",
{"param_1": 5},
),
(
literal_column("Q"),
literal_column("Y"),
{},
"OFFSET Y ROWS FETCH FIRST Q ROWS ONLY",
{},
),
(
column("Q"),
column("Y"),
{},
"OFFSET [Y] ROWS FETCH FIRST [Q] ROWS ONLY",
{},
),
(
bindparam("Q", 3),
bindparam("Y", 7),
{},
"OFFSET :Y ROWS FETCH FIRST :Q ROWS ONLY",
{"Q": 3, "Y": 7},
),
(
literal_column("Q") + literal_column("Z"),
literal_column("Y") + literal_column("W"),
{},
"OFFSET Y + W ROWS FETCH FIRST Q + Z ROWS ONLY",
{},
),
argnames="fetch, offset, fetch_kw, exp, params",
)
def test_fetch(self, dialect_2012, fetch, offset, fetch_kw, exp, params):
t = table("t", column("a"))
if "TOP" in exp:
sel = "SELECT %s t.a FROM t ORDER BY t.a" % exp
else:
sel = "SELECT t.a FROM t ORDER BY t.a " + exp
stmt = select(t).order_by(t.c.a).fetch(fetch, **fetch_kw)
if "with_ties" not in fetch_kw and "percent" not in fetch_kw:
stmt = stmt.offset(offset)
self.assert_compile(
stmt,
sel,
checkparams=params,
dialect=dialect_2012,
)
@testing.combinations(
(
5,
10,
{},
"mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
{"param_1": 10, "param_2": 5},
),
(None, 10, {}, "mssql_rn > :param_1", {"param_1": 10}),
(
5,
None,
{},
"mssql_rn <= :param_1",
{"param_1": 5},
),
(
0,
0,
{},
"mssql_rn > :param_1 AND mssql_rn <= :param_2 + :param_1",
{"param_1": 0, "param_2": 0},
),
(
5,
0,
{"percent": True},
"TOP __[POSTCOMPILE_param_1] PERCENT",
{"param_1": 5},
),
(
5,
None,
{"percent": True, "with_ties": True},
"TOP __[POSTCOMPILE_param_1] PERCENT WITH TIES",
{"param_1": 5},
),
(
5,
0,
{"with_ties": True},
"TOP __[POSTCOMPILE_param_1] WITH TIES",
{"param_1": 5},
),
(
literal_column("Q"),
literal_column("Y"),
{},
"mssql_rn > Y AND mssql_rn <= Q + Y",
{},
),
(
column("Q"),
column("Y"),
{},
"mssql_rn > [Y] AND mssql_rn <= [Q] + [Y]",
{},
),
(
bindparam("Q", 3),
bindparam("Y", 7),
{},
"mssql_rn > :Y AND mssql_rn <= :Q + :Y",
{"Q": 3, "Y": 7},
),
(
literal_column("Q") + literal_column("Z"),
literal_column("Y") + literal_column("W"),
{},
"mssql_rn > Y + W AND mssql_rn <= Q + Z + Y + W",
{},
),
argnames="fetch, offset, fetch_kw, exp, params",
)
def test_fetch_old_version(self, fetch, offset, fetch_kw, exp, params):
t = table("t", column("a"))
if "TOP" in exp:
sel = "SELECT %s t.a FROM t ORDER BY t.a" % exp
else:
sel = (
"SELECT anon_1.a FROM (SELECT t.a AS a, ROW_NUMBER() "
"OVER (ORDER BY t.a) AS mssql_rn FROM t) AS anon_1 WHERE "
+ exp
)
stmt = select(t).order_by(t.c.a).fetch(fetch, **fetch_kw)
if "with_ties" not in fetch_kw and "percent" not in fetch_kw:
stmt = stmt.offset(offset)
self.assert_compile(
stmt,
sel,
checkparams=params,
)
_no_offset = (
"MSSQL needs TOP to use PERCENT and/or WITH TIES. "
"Only simple fetch without offset can be used."
)
_order_by = (
"MSSQL requires an order_by when using an OFFSET "
"or a non-simple LIMIT clause"
)
@testing.combinations(
(
select(tbl).order_by(tbl.c.a).fetch(5, percent=True).offset(3),
_no_offset,
),
(
select(tbl).order_by(tbl.c.a).fetch(5, with_ties=True).offset(3),
_no_offset,
),
(
select(tbl)
.order_by(tbl.c.a)
.fetch(5, percent=True, with_ties=True)
.offset(3),
_no_offset,
),
(
select(tbl)
.order_by(tbl.c.a)
.fetch(bindparam("x"), with_ties=True),
_no_offset,
),
(select(tbl).fetch(5).offset(3), _order_by),
(select(tbl).fetch(5), _order_by),
(select(tbl).offset(5), _order_by),
argnames="stmt, error",
)
def test_row_limit_compile_error(self, dialect_2012, stmt, error):
with testing.expect_raises_message(exc.CompileError, error):
print(stmt.compile(dialect=dialect_2012))
with testing.expect_raises_message(exc.CompileError, error):
print(stmt.compile(dialect=self.__dialect__))
@testing.combinations(
(lambda t: t.c.a**t.c.b, "POWER(t.a, t.b)", {}),
(lambda t: t.c.a**3, "POWER(t.a, :pow_1)", {"pow_1": 3}),
(lambda t: t.c.c.match(t.c.d), "CONTAINS (t.c, t.d)", {}),
(lambda t: t.c.c.match("w"), "CONTAINS (t.c, :c_1)", {"c_1": "w"}),
(lambda t: func.pow(t.c.a, 3), "POWER(t.a, :pow_1)", {"pow_1": 3}),
(lambda t: func.power(t.c.a, t.c.b), "power(t.a, t.b)", {}),
)
def test_simple_compile(self, fn, string, params):
t = table(
"t",
column("a", Integer),
column("b", Integer),
column("c", String),
column("d", String),
)
expr = resolve_lambda(fn, t=t)
self.assert_compile(expr, string, params)
def test_create_view_or_replace(self):
t = Table("t", MetaData(), Column("a", Integer), Column("b", String))
stmt = CreateView(
select(t.c.a, t.c.b).where(t.c.a > 5),
"my_view",
or_replace=True,
)
self.assert_compile(
stmt,
"CREATE OR ALTER VIEW my_view AS "
"SELECT t.a, t.b FROM t WHERE t.a > 5",
)
def test_create_view_basic(self):
t = Table("t", MetaData(), Column("a", Integer), Column("b", String))
stmt = CreateView(
select(t.c.a, t.c.b).where(t.c.a > 5),
"my_view",
)
self.assert_compile(
stmt,
"CREATE VIEW my_view AS SELECT t.a, t.b FROM t WHERE t.a > 5",
)
| CompileTest |
python | pytorch__pytorch | test/test_multiprocessing_spawn.py | {
"start": 2119,
"end": 6917
} | class ____:
start_method = None
def test_success(self):
mp.start_processes(_test_success_func, nprocs=2, start_method=self.start_method)
def test_success_non_blocking(self):
mp_context = mp.start_processes(_test_success_func, nprocs=2, join=False, start_method=self.start_method)
# After all processes (nproc=2) have joined it must return True
mp_context.join(timeout=None)
mp_context.join(timeout=None)
self.assertTrue(mp_context.join(timeout=None))
def test_first_argument_index(self):
context = mp.get_context(self.start_method)
queue = context.SimpleQueue()
mp.start_processes(_test_success_single_arg_func, args=(queue,), nprocs=2, start_method=self.start_method)
self.assertEqual([0, 1], sorted([queue.get(), queue.get()]))
def test_exception_single(self):
nprocs = 2
for i in range(nprocs):
with self.assertRaisesRegex(
Exception,
f"\nValueError: legitimate exception from process {i:d}$",
):
mp.start_processes(_test_exception_single_func, args=(i,), nprocs=nprocs, start_method=self.start_method)
def test_exception_all(self):
with self.assertRaisesRegex(
Exception,
"\nValueError: legitimate exception from process (0|1)$",
):
mp.start_processes(_test_exception_all_func, nprocs=2, start_method=self.start_method)
def test_terminate_signal(self):
# SIGABRT is aliased with SIGIOT
message = "process 0 terminated with signal (SIGABRT|SIGIOT)"
# Termination through with signal is expressed as a negative exit code
# in multiprocessing, so we know it was a signal that caused the exit.
# This doesn't appear to exist on Windows, where the exit code is always
# positive, and therefore results in a different exception message.
# Exit code 22 means "ERROR_BAD_COMMAND".
if IS_WINDOWS:
message = "process 0 terminated with exit code 22"
with self.assertRaisesRegex(Exception, message):
mp.start_processes(_test_terminate_signal_func, nprocs=2, start_method=self.start_method)
@parametrize("grace_period", [None, 20])
def test_terminate_exit(self, grace_period):
exitcode = 123
ctx = mp.start_processes(_test_terminate_exit_func, args=(exitcode,), nprocs=2, start_method=self.start_method, join=False)
pid1 = ctx.processes[1].pid
with self.assertRaisesRegex(
Exception,
f"process 0 terminated with exit code {exitcode:d}",
), self.assertLogs(level='WARNING') as logs:
while not ctx.join(grace_period=grace_period):
pass
if grace_period is None:
# pid1 is killed by signal.
expected_log = f"Terminating process {pid1:d} via signal"
self.assertIn(expected_log, logs.records[0].getMessage())
else:
# pid1 exits on its own.
self.assertFalse(logs.records)
# Check that no processes are left.
for p in ctx.processes:
self.assertFalse(p.is_alive())
def test_success_first_then_exception(self):
exitcode = 123
with self.assertRaisesRegex(
Exception,
"ValueError: legitimate exception",
):
mp.start_processes(_test_success_first_then_exception_func, args=(exitcode,), nprocs=2, start_method=self.start_method)
@unittest.skipIf(
sys.platform != "linux",
"Only runs on Linux; requires prctl(2)",
)
def _test_nested(self):
context = mp.get_context(self.start_method)
pids_queue = context.Queue()
nested_child_sleep = 20.0
mp_context = mp.start_processes( # noqa: F841
fn=_test_nested,
args=(pids_queue, nested_child_sleep, self.start_method),
nprocs=1,
join=False,
daemon=False,
start_method=self.start_method,
)
# Wait for nested children to terminate in time
pids = pids_queue.get()
start = time.time()
while len(pids) > 0:
for pid in pids:
try:
os.kill(pid, 0)
except ProcessLookupError:
pids.remove(pid) # noqa: B909
break
# This assert fails if any nested child process is still
# alive after (nested_child_sleep / 2) seconds. By
# extension, this test times out with an assertion error
# after (nested_child_sleep / 2) seconds.
self.assertLess(time.time() - start, nested_child_sleep / 2)
time.sleep(0.1)
| _TestMultiProcessing |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-braintree/source_braintree/schemas/merchant_account.py | {
"start": 125,
"end": 248
} | class ____(CatalogModel):
address_details: Address
dba_name: str
legal_name: str
tax_id: str
| BussinessDetails |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 53291,
"end": 55510
} | class ____(NonStrictDataModel):
"""
:param preview: Description or textual data
:type preview: str
:param content_type: System defined raw data content type
:type content_type: str
:param data_hash: Hash of raw data, without any headers or descriptive parts
:type data_hash: str
"""
_schema = {
"properties": {
"content_type": {
"description": "System defined raw data content type",
"type": ["string", "null"],
},
"data_hash": {
"description": "Hash of raw data, without any headers or descriptive parts",
"type": ["string", "null"],
},
"preview": {
"description": "Description or textual data",
"type": ["string", "null"],
},
},
"type": "object",
}
def __init__(self, preview=None, content_type=None, data_hash=None, **kwargs):
super(ArtifactTypeData, self).__init__(**kwargs)
self.preview = preview
self.content_type = content_type
self.data_hash = data_hash
@schema_property("preview")
def preview(self):
return self._property_preview
@preview.setter
def preview(self, value):
if value is None:
self._property_preview = None
return
self.assert_isinstance(value, "preview", six.string_types)
self._property_preview = value
@schema_property("content_type")
def content_type(self):
return self._property_content_type
@content_type.setter
def content_type(self, value):
if value is None:
self._property_content_type = None
return
self.assert_isinstance(value, "content_type", six.string_types)
self._property_content_type = value
@schema_property("data_hash")
def data_hash(self):
return self._property_data_hash
@data_hash.setter
def data_hash(self, value):
if value is None:
self._property_data_hash = None
return
self.assert_isinstance(value, "data_hash", six.string_types)
self._property_data_hash = value
| ArtifactTypeData |
python | django__django | tests/queries/models.py | {
"start": 10288,
"end": 10455
} | class ____(SimpleCategory):
special_name = models.CharField(max_length=35)
def __str__(self):
return self.name + " " + self.special_name
| SpecialCategory |
python | django-mptt__django-mptt | tests/myapp/models.py | {
"start": 2941,
"end": 3250
} | class ____(MPTTModel):
parent = models.ForeignKey(
"self", null=True, blank=True, related_name="children", on_delete=models.CASCADE
)
uuid = models.UUIDField(primary_key=True, default=uuid4)
name = models.CharField(max_length=50)
def __str__(self):
return self.name
| UUIDNode |
python | numpy__numpy | numpy/polynomial/chebyshev.py | {
"start": 59157,
"end": 62286
} | class ____(ABCPolyBase):
"""A Chebyshev series class.
The Chebyshev class provides the standard Python numerical methods
'+', '-', '*', '//', '%', 'divmod', '**', and '()' as well as the
attributes and methods listed below.
Parameters
----------
coef : array_like
Chebyshev coefficients in order of increasing degree, i.e.,
``(1, 2, 3)`` gives ``1*T_0(x) + 2*T_1(x) + 3*T_2(x)``.
domain : (2,) array_like, optional
Domain to use. The interval ``[domain[0], domain[1]]`` is mapped
to the interval ``[window[0], window[1]]`` by shifting and scaling.
The default value is [-1., 1.].
window : (2,) array_like, optional
Window, see `domain` for its use. The default value is [-1., 1.].
symbol : str, optional
Symbol used to represent the independent variable in string
representations of the polynomial expression, e.g. for printing.
The symbol must be a valid Python identifier. Default value is 'x'.
.. versionadded:: 1.24
"""
# Virtual Functions
_add = staticmethod(chebadd)
_sub = staticmethod(chebsub)
_mul = staticmethod(chebmul)
_div = staticmethod(chebdiv)
_pow = staticmethod(chebpow)
_val = staticmethod(chebval)
_int = staticmethod(chebint)
_der = staticmethod(chebder)
_fit = staticmethod(chebfit)
_line = staticmethod(chebline)
_roots = staticmethod(chebroots)
_fromroots = staticmethod(chebfromroots)
@classmethod
def interpolate(cls, func, deg, domain=None, args=()):
"""Interpolate a function at the Chebyshev points of the first kind.
Returns the series that interpolates `func` at the Chebyshev points of
the first kind scaled and shifted to the `domain`. The resulting series
tends to a minmax approximation of `func` when the function is
continuous in the domain.
Parameters
----------
func : function
The function to be interpolated. It must be a function of a single
variable of the form ``f(x, a, b, c...)``, where ``a, b, c...`` are
extra arguments passed in the `args` parameter.
deg : int
Degree of the interpolating polynomial.
domain : {None, [beg, end]}, optional
Domain over which `func` is interpolated. The default is None, in
which case the domain is [-1, 1].
args : tuple, optional
Extra arguments to be used in the function call. Default is no
extra arguments.
Returns
-------
polynomial : Chebyshev instance
Interpolating Chebyshev instance.
Notes
-----
See `numpy.polynomial.chebinterpolate` for more details.
"""
if domain is None:
domain = cls.domain
xfunc = lambda x: func(pu.mapdomain(x, cls.window, domain), *args)
coef = chebinterpolate(xfunc, deg)
return cls(coef, domain=domain)
# Virtual properties
domain = np.array(chebdomain)
window = np.array(chebdomain)
basis_name = 'T'
| Chebyshev |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/generator2.py | {
"start": 281,
"end": 1403
} | class ____:
pass
def generator1() -> Iterator[ClassA]:
yield from generator1()
def generator2() -> Iterator[ClassB]:
# This should generate an error because it yields
# an iterator of the wrong type.
yield from generator1()
# This should also generate an error because it
# yields the wrong type.
yield from [1]
def generator3(
arg: Generator[int, None, T] | Generator[str, None, T],
) -> Generator[int | str, None, T]:
x = yield from arg
reveal_type(x, expected_text="T@generator3")
return x
def generator4(
arg: Generator[int, None, int] | Generator[str, None, str],
) -> Generator[int | str, None, int | str]:
x = yield from arg
reveal_type(x, expected_text="int | str")
return x
def generator5() -> Generator[None, float, None]:
x: float = yield
def generator6() -> Generator[None, int, None]:
yield from generator5()
def generator7() -> Generator[None, int, None]:
x: float = yield
def generator8() -> Generator[None, float, None]:
# This should generate an error because of the send type.
yield from generator7()
| ClassC |
python | neetcode-gh__leetcode | python/1448-count-good-nodes-in-binary-tree.py | {
"start": 192,
"end": 574
} | class ____:
def goodNodes(self, root: TreeNode) -> int:
def dfs(node, maxVal):
if not node:
return 0
res = 1 if node.val >= maxVal else 0
maxVal = max(maxVal, node.val)
res += dfs(node.left, maxVal)
res += dfs(node.right, maxVal)
return res
return dfs(root, root.val)
| Solution |
python | langchain-ai__langchain | libs/core/langchain_core/output_parsers/openai_functions.py | {
"start": 5465,
"end": 6182
} | class ____(JsonOutputFunctionsParser):
"""Parse an output as the element of the JSON object."""
key_name: str
"""The name of the key to return."""
def parse_result(self, result: list[Generation], *, partial: bool = False) -> Any:
"""Parse the result of an LLM call to a JSON object.
Args:
result: The result of the LLM call.
partial: Whether to parse partial JSON objects.
Returns:
The parsed JSON object.
"""
res = super().parse_result(result, partial=partial)
if partial and res is None:
return None
return res.get(self.key_name) if partial else res[self.key_name]
| JsonKeyOutputFunctionsParser |
python | huggingface__transformers | src/transformers/models/phimoe/modeling_phimoe.py | {
"start": 20796,
"end": 21782
} | class ____(nn.Linear):
def __init__(self, config: PhimoeConfig):
super().__init__(config.hidden_size, config.num_local_experts, bias=False)
self.router_jitter_noise = config.router_jitter_noise
self.input_jitter_noise = config.input_jitter_noise
def forward(self, hidden_states: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
if self.training and self.input_jitter_noise > 0:
hidden_states *= torch.empty_like(hidden_states).uniform_(
1.0 - self.input_jitter_noise, 1.0 + self.input_jitter_noise
)
router_logits = super().forward(hidden_states)
routing_weights, selected_experts = sparsemixer(
router_logits,
jitter_eps=self.router_jitter_noise,
training=self.training,
)
routing_weights = torch.zeros_like(router_logits).scatter_(1, selected_experts, routing_weights)
return routing_weights, selected_experts
| PhimoeTopKRouter |
python | cython__cython | Cython/Compiler/Symtab.py | {
"start": 126559,
"end": 134231
} | class ____(Scope):
# Namespace of a C++ class.
is_cpp_class_scope = 1
default_constructor = None
type = None
def __init__(self, name, outer_scope, templates=None):
Scope.__init__(self, name, outer_scope, None)
self.directives = outer_scope.directives
self.inherited_var_entries = []
if templates is not None:
for T in templates:
template_entry = self.declare(
T, T, PyrexTypes.TemplatePlaceholderType(T), None, 'extern')
template_entry.is_type = 1
def declare_var(self, name, type, pos,
cname=None, visibility='extern',
api=False, in_pxd=False, is_cdef=False, defining=False, pytyping_modifiers=None):
# Add an entry for an attribute.
if not cname:
cname = name
self._reject_pytyping_modifiers(pos, pytyping_modifiers)
entry = self.lookup_here(name)
if defining and entry is not None:
if type.is_cfunction:
entry = self.declare(name, cname, type, pos, visibility)
elif entry.type.same_as(type):
# Fix with_gil vs nogil.
entry.type = entry.type.with_with_gil(type.with_gil)
else:
error(pos, "Function signature does not match previous declaration")
else:
entry = self.declare(name, cname, type, pos, visibility)
if type.is_cfunction and not defining:
entry.is_inherited = 1
entry.is_variable = 1
if type.is_cfunction:
entry.is_cfunction = 1
if self.type and not self.type.get_fused_types():
entry.func_cname = "%s::%s" % (self.type.empty_declaration_code(), cname)
if name != "this" and (defining or name != "<init>"):
self.var_entries.append(entry)
return entry
def declare_cfunction(self, name, type, pos,
cname=None, visibility='extern', api=0, in_pxd=0,
defining=0, modifiers=(), utility_code=None, overridable=False):
class_name = self.name.split('::')[-1]
if name in (class_name, '__init__') and cname is None:
cname = "%s__init__%s" % (Naming.func_prefix, class_name)
name = EncodedString('<init>')
type.return_type = PyrexTypes.CVoidType()
# This is called by the actual constructor, but need to support
# arguments that cannot by called by value.
type.original_args = type.args
def maybe_ref(arg):
if arg.type.is_cpp_class and not arg.type.is_reference:
return PyrexTypes.CFuncTypeArg(
arg.name, PyrexTypes.c_ref_type(arg.type), arg.pos)
else:
return arg
type.args = [maybe_ref(arg) for arg in type.args]
elif name == '__dealloc__' and cname is None:
cname = "%s__dealloc__%s" % (Naming.func_prefix, class_name)
name = EncodedString('<del>')
type.return_type = PyrexTypes.CVoidType()
if name in ('<init>', '<del>') and type.nogil:
for base in self.type.base_classes:
base_entry = base.scope.lookup(name)
if base_entry and not base_entry.type.nogil:
error(pos, "Constructor cannot be called without GIL unless all base constructors can also be called without GIL")
error(base_entry.pos, "Base constructor defined here.")
# The previous entries management is now done directly in Scope.declare
entry = self.declare_var(name, type, pos,
defining=defining,
cname=cname, visibility=visibility)
entry.utility_code = utility_code
type.entry = entry
return entry
def declare_inherited_cpp_attributes(self, base_class):
base_scope = base_class.scope
template_type = base_class
while getattr(template_type, 'template_type', None):
template_type = template_type.template_type
if getattr(template_type, 'templates', None):
base_templates = [T.name for T in template_type.templates]
else:
base_templates = ()
# Declare entries for all the C++ attributes of an
# inherited type, with cnames modified appropriately
# to work with this type.
for base_entry in base_scope.inherited_var_entries + base_scope.var_entries:
#constructor/destructor is not inherited
if base_entry.name in ("<init>", "<del>"):
continue
#print base_entry.name, self.entries
if base_entry.name in self.entries:
base_entry.name # FIXME: is there anything to do in this case?
entry = self.declare(base_entry.name, base_entry.cname,
base_entry.type, None, 'extern')
entry.is_variable = 1
entry.is_inherited = 1
if base_entry.is_cfunction:
entry.is_cfunction = 1
entry.func_cname = base_entry.func_cname
self.inherited_var_entries.append(entry)
for base_entry in base_scope.cfunc_entries:
entry = self.declare_cfunction(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, api=0,
modifiers=base_entry.func_modifiers,
utility_code=base_entry.utility_code)
entry.is_inherited = 1
for base_entry in base_scope.type_entries:
if base_entry.name not in base_templates:
entry = self.declare_type(base_entry.name, base_entry.type,
base_entry.pos, base_entry.cname,
base_entry.visibility, defining=False)
entry.is_inherited = 1
def specialize(self, values, type_entry):
scope = CppClassScope(self.name, self.outer_scope)
scope.type = type_entry
for entry in self.entries.values():
if entry.is_type:
scope.declare_type(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
template=1)
elif entry.type.is_cfunction:
for e in entry.all_alternatives():
scope.declare_cfunction(e.name,
e.type.specialize(values),
e.pos,
e.cname,
utility_code=e.utility_code)
else:
scope.declare_var(entry.name,
entry.type.specialize(values),
entry.pos,
entry.cname,
entry.visibility)
return scope
def lookup_here(self, name):
if name == "__init__":
name = "<init>"
elif name == "__dealloc__":
name = "<del>"
return super(CppClassScope, self).lookup_here(name)
def is_cpp(self):
# Whatever the global environment, always treat cppclass with C++ rules.
# (Cython will emit warnings elsewhere)
return True
| CppClassScope |
python | getsentry__sentry | tests/sentry/integrations/vercel/test_webhook.py | {
"start": 661,
"end": 1324
} | class ____(APITestCase):
webhook_url = "/extensions/vercel/webhook/"
def test_get(self) -> None:
response = self.client.get(self.webhook_url)
assert response.status_code == 405
def test_invalid_signature(self) -> None:
with override_options({"vercel.client-secret": SECRET}):
response = self.client.post(
path=self.webhook_url,
data=EXAMPLE_DEPLOYMENT_WEBHOOK_RESPONSE,
content_type="application/json",
HTTP_X_ZEIT_SIGNATURE="xxxinvalidsignaturexxx",
)
assert response.status_code == 401
@control_silo_test
| SignatureVercelTest |
python | tensorflow__tensorflow | tensorflow/python/distribute/collective_all_reduce_strategy.py | {
"start": 2708,
"end": 9369
} | class ____(distribute_lib.Strategy):
"""A distribution strategy for synchronous training on multiple workers.
This strategy implements synchronous distributed training across multiple
workers, each with potentially multiple GPUs. Similar to
`tf.distribute.MirroredStrategy`, it replicates all variables and computations
to each local device. The difference is that it uses a distributed collective
implementation (e.g. all-reduce), so that multiple workers can work together.
You need to launch your program on each worker and configure
`cluster_resolver` correctly. For example, if you are using
`tf.distribute.cluster_resolver.TFConfigClusterResolver`, each worker needs to
have its corresponding `task_type` and `task_id` set in the `TF_CONFIG`
environment variable. An example TF_CONFIG on worker-0 of a two worker cluster
is:
```
TF_CONFIG = '{"cluster": {"worker": ["localhost:12345", "localhost:23456"]},
"task": {"type": "worker", "index": 0} }'
```
Your program runs on each worker as-is. Note that collectives require each
worker to participate. All `tf.distribute` and non `tf.distribute` API may use
collectives internally, e.g. checkpointing and saving since reading a
`tf.Variable` with `tf.VariableSynchronization.ON_READ` all-reduces the value.
Therefore it's recommended to run exactly the same program on each worker.
Dispatching based on `task_type` or `task_id` of the worker is error-prone.
`cluster_resolver.num_accelerators()` determines the number of GPUs the
strategy uses. If it's zero, the strategy uses the CPU. All workers need to
use the same number of devices, otherwise the behavior is undefined.
This strategy is not intended for TPU. Use `tf.distribute.TPUStrategy`
instead.
After setting up TF_CONFIG, using this strategy is similar to using
`tf.distribute.MirroredStrategy` and `tf.distribute.TPUStrategy`.
```
strategy = tf.distribute.MultiWorkerMirroredStrategy()
with strategy.scope():
model = tf.keras.Sequential([
tf.keras.layers.Dense(2, input_shape=(5,)),
])
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
def dataset_fn(ctx):
x = np.random.random((2, 5)).astype(np.float32)
y = np.random.randint(2, size=(2, 1))
dataset = tf.data.Dataset.from_tensor_slices((x, y))
return dataset.repeat().batch(1, drop_remainder=True)
dist_dataset = strategy.distribute_datasets_from_function(dataset_fn)
model.compile()
model.fit(dist_dataset)
```
You can also write your own training loop:
```
@tf.function
def train_step(iterator):
def step_fn(inputs):
features, labels = inputs
with tf.GradientTape() as tape:
logits = model(features, training=True)
loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, logits)
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
strategy.run(step_fn, args=(next(iterator),))
for _ in range(NUM_STEP):
train_step(iterator)
```
See
[Multi-worker training with
Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras)
for a detailed tutorial.
__Saving__
You need to save and checkpoint on all workers instead of just one. This is
because variables whose synchronization=ON_READ triggers aggregation during
saving. It's recommended to save to a different path on each worker to avoid
race conditions. Each worker saves the same thing. See
[Multi-worker training with
Keras](https://www.tensorflow.org/tutorials/distribute/multi_worker_with_keras#model_saving_and_loading)
tutorial for examples.
__Known Issues__
* `tf.distribute.cluster_resolver.TFConfigClusterResolver` does not return the
correct number of accelerators. The strategy uses all available GPUs if
`cluster_resolver` is `tf.distribute.cluster_resolver.TFConfigClusterResolver`
or `None`.
* In eager mode, the strategy needs to be created before calling any other
Tensorflow API.
"""
# pylint: enable=line-too-long
# TODO(anjalisridhar): Update our guides with examples showing how we can use
# the cluster_resolver argument.
# The starting number for collective keys. This should only be set in tests.
_collective_key_base = 0
def __init__(self, cluster_resolver=None, communication_options=None):
"""Creates the strategy.
Args:
cluster_resolver: optional
`tf.distribute.cluster_resolver.ClusterResolver`. If `None`,
`tf.distribute.cluster_resolver.TFConfigClusterResolver` is used.
communication_options: optional
`tf.distribute.experimental.CommunicationOptions`. This configures the
default options for cross device communications. It can be overridden by
options provided to the communication APIs like
`tf.distribute.ReplicaContext.all_reduce`. See
`tf.distribute.experimental.CommunicationOptions` for details.
"""
if communication_options is None:
communication_options = collective_util.Options()
super(CollectiveAllReduceStrategy, self).__init__(
CollectiveAllReduceExtended(
self,
cluster_resolver=cluster_resolver,
communication_options=communication_options,
)
)
distribute_lib.distribution_strategy_gauge.get_cell("V2").set(
"MultiWorkerMirroredStrategy"
)
# pylint: disable=protected-access
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_workers"
).set(self.extended._num_workers)
distribute_lib.distribution_strategy_replica_gauge.get_cell(
"num_replicas_per_worker"
).set(self.extended._num_devices_per_worker)
@classmethod
def _from_local_devices(cls, devices, communication_options=None):
"""A convenience method to create an object with a list of devices."""
obj = cls(communication_options=communication_options)
obj.extended._initialize_local( # pylint: disable=protected-access
tfconfig_cluster_resolver.TFConfigClusterResolver(), devices=devices
)
return obj
@property
def cluster_resolver(self):
"""Returns the cluster resolver associated with this strategy.
As a multi-worker strategy, `tf.distribute.MultiWorkerMirroredStrategy`
provides the associated `tf.distribute.cluster_resolver.ClusterResolver`. If
the user provides one in `__init__`, that instance is returned; if the user
does not, a default `TFConfigClusterResolver` is provided.
"""
return self.extended._cluster_resolver # pylint: disable=protected-access
| CollectiveAllReduceStrategy |
python | Netflix__metaflow | metaflow/plugins/gcp/gcp_secret_manager_secrets_provider.py | {
"start": 613,
"end": 772
} | class ____(MetaflowException):
"""Raised when the SecretString response from GCP Secrets Manager is not valid JSON"""
| MetaflowGcpSecretsManagerJSONParseError |
python | apache__airflow | providers/apache/spark/tests/unit/apache/spark/decorators/test_pyspark.py | {
"start": 1266,
"end": 1641
} | class ____:
data: dict[str, Any]
def __init__(self, data: dict[str, Any] | None = None):
if data:
self.data = data
else:
self.data = {}
def get(self, key: str, default: Any = None) -> Any:
return self.data.get(key, default)
def set(self, key: str, value: Any) -> None:
self.data[key] = value
| FakeConfig |
python | kamyu104__LeetCode-Solutions | Python/maximum-number-of-groups-with-increasing-length.py | {
"start": 1329,
"end": 1797
} | class ____(object):
def maxIncreasingGroups(self, usageLimits):
"""
:type usageLimits: List[int]
:rtype: int
"""
usageLimits.sort()
result = curr = 0
for x in usageLimits:
curr += x
if curr >= result+1:
curr -= result+1
result += 1
return result
# Time: O(nlogn)
# Space: O(1)
# constructive algorithms, sort, binary search, greedy
| Solution2 |
python | numba__numba | numba/tests/test_lists.py | {
"start": 20451,
"end": 22680
} | class ____(MemoryLeakMixin, TestCase):
"""
Test unboxing of Python lists into native Numba lists.
"""
@contextlib.contextmanager
def assert_type_error(self, msg):
with self.assertRaises(TypeError) as raises:
yield
if msg is not None:
self.assertRegex(str(raises.exception), msg)
def check_unary(self, pyfunc):
cfunc = jit(nopython=True)(pyfunc)
def check(arg):
expected = pyfunc(arg)
got = cfunc(arg)
self.assertPreciseEqual(got, expected)
return check
def test_numbers(self):
check = self.check_unary(unbox_usecase)
check([1, 2])
check([1j, 2.5j])
def test_tuples(self):
check = self.check_unary(unbox_usecase2)
check([(1, 2), (3, 4)])
check([(1, 2j), (3, 4j)])
check([(), (), ()])
def test_list_inside_tuple(self):
check = self.check_unary(unbox_usecase3)
check((1, [2, 3, 4]))
def test_list_of_tuples_inside_tuple(self):
check = self.check_unary(unbox_usecase4)
check((1, [(2,), (3,)]))
def test_errors(self):
# See #1545 and #1594: error checking should ensure the list is
# homogeneous
msg = "can't unbox heterogeneous list"
pyfunc = noop
cfunc = jit(nopython=True)(pyfunc)
lst = [1, 2.5]
with self.assert_type_error(msg):
cfunc(lst)
# The list hasn't been changed (bogus reflecting)
self.assertEqual(lst, [1, 2.5])
with self.assert_type_error(msg):
cfunc([1, 2j])
# Same when the list is nested in a tuple or namedtuple
with self.assert_type_error(msg):
cfunc((1, [1, 2j]))
with self.assert_type_error(msg):
cfunc(Point(1, [1, 2j]))
# Issue #1638: tuples of different size.
# Note the check is really on the tuple side.
lst = [(1,), (2, 3)]
with self.assertRaises(TypeError) as raises:
cfunc(lst)
msg = ("can't unbox heterogeneous list: "
"UniTuple({0} x 1) != UniTuple({0} x 2)")
self.assertEqual(str(raises.exception), msg.format(types.intp))
| TestUnboxing |
python | openai__openai-python | src/openai/resources/audio/audio.py | {
"start": 3177,
"end": 3715
} | class ____:
def __init__(self, audio: Audio) -> None:
self._audio = audio
@cached_property
def transcriptions(self) -> TranscriptionsWithRawResponse:
return TranscriptionsWithRawResponse(self._audio.transcriptions)
@cached_property
def translations(self) -> TranslationsWithRawResponse:
return TranslationsWithRawResponse(self._audio.translations)
@cached_property
def speech(self) -> SpeechWithRawResponse:
return SpeechWithRawResponse(self._audio.speech)
| AudioWithRawResponse |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/matchMapping1.py | {
"start": 1600,
"end": 3369
} | class ____:
field_of_interest: Literal["release_year", "gross_earnings"]
def test_typed_dict(value_to_match: Movie):
match value_to_match:
case {"title": a1, "release_year": a2, **a3}:
reveal_type(a1, expected_text="str")
reveal_type(a2, expected_text="int")
reveal_type(a3, expected_text="dict[str, object]")
reveal_type(value_to_match, expected_text="Movie")
case {3: b1, "title": b2}:
reveal_type(b1, expected_text="Never")
reveal_type(b2, expected_text="Never")
reveal_type(value_to_match, expected_text="Never")
case {"director": c1}:
reveal_type(c1, expected_text="Never")
reveal_type(value_to_match, expected_text="Never")
case {MovieInfo.field_of_interest: d1}:
reveal_type(d1, expected_text="int | float")
reveal_type(value_to_match, expected_text="Movie")
def test_union1(value_to_match: dict[str | int, str | int] | Movie | str):
match value_to_match:
case {3: a1}:
reveal_type(a1, expected_text="str | int")
reveal_type(value_to_match, expected_text="dict[str | int, str | int]")
case {"gross_earnings": b1}:
reveal_type(b1, expected_text="str | int | float")
reveal_type(
value_to_match, expected_text="dict[str | int, str | int] | Movie"
)
def test_union2(value_to_match: dict[int, int] | Movie | str):
match value_to_match:
case {**kw}:
reveal_type(kw, expected_text="dict[int | str, int | object]")
reveal_type(value_to_match, expected_text="dict[int, int] | Movie")
case x:
reveal_type(x, expected_text="str")
| MovieInfo |
python | astropy__astropy | astropy/io/fits/column.py | {
"start": 7560,
"end": 8768
} | class ____(str):
"""
Base class for binary table column formats (just called _ColumnFormat)
and ASCII table column formats (_AsciiColumnFormat).
"""
def __eq__(self, other):
if not other:
return False
if isinstance(other, str):
if not isinstance(other, self.__class__):
try:
other = self.__class__(other)
except ValueError:
return False
else:
return False
return self.canonical == other.canonical
def __hash__(self):
return hash(self.canonical)
@lazyproperty
def dtype(self):
"""
The Numpy dtype object created from the format's associated recformat.
"""
return np.dtype(self.recformat)
@classmethod
def from_column_format(cls, format):
"""Creates a column format object from another column format object
regardless of their type.
That is, this can convert a _ColumnFormat to an _AsciiColumnFormat
or vice versa at least in cases where a direct translation is possible.
"""
return cls.from_recformat(format.recformat)
| _BaseColumnFormat |
python | pypa__warehouse | tests/common/db/subscriptions.py | {
"start": 1343,
"end": 1718
} | class ____(WarehouseFactory):
class Meta:
model = StripeSubscription
id = factory.Faker("uuid4", cast_to=None)
subscription_id = factory.Faker("uuid4")
status = StripeSubscriptionStatus.Active
subscription_price = factory.SubFactory(StripeSubscriptionPriceFactory)
customer = factory.SubFactory(StripeCustomerFactory)
| StripeSubscriptionFactory |
python | sphinx-doc__sphinx | sphinx/domains/rst.py | {
"start": 4638,
"end": 6972
} | class ____(ReSTMarkup):
"""Description of an option for reST directive."""
option_spec: ClassVar[OptionSpec] = ReSTMarkup.option_spec.copy()
option_spec.update({
'type': directives.unchanged,
})
def handle_signature(self, sig: str, signode: desc_signature) -> str:
try:
name, argument = re.split(r'\s*:\s+', sig.strip(), maxsplit=1)
except ValueError:
name, argument = sig, None
desc_name = f':{name}:'
signode['fullname'] = name.strip()
signode += addnodes.desc_name(desc_name, desc_name)
if argument:
text = f' {argument}'
signode += addnodes.desc_annotation(text, text)
if self.options.get('type'):
text = ' (%s)' % self.options['type']
signode += addnodes.desc_annotation(text, text)
return name
def add_target_and_index(
self, name: str, sig: str, signode: desc_signature
) -> None:
domain = self.env.domains.restructuredtext_domain
directive_name = self.current_directive
if directive_name:
prefix = f'{self.objtype}-{directive_name}'
objname = f'{directive_name}:{name}'
else:
prefix = self.objtype
objname = name
node_id = make_id(self.env, self.state.document, prefix, name)
signode['ids'].append(node_id)
self.state.document.note_explicit_target(signode)
domain.note_object(self.objtype, objname, node_id, location=signode)
if directive_name:
key = name[0].upper()
pair = [
_('%s (directive)') % directive_name,
_(':%s: (directive option)') % name,
]
self.indexnode['entries'].append((
'pair',
'; '.join(pair),
node_id,
'',
key,
))
else:
key = name[0].upper()
text = _(':%s: (directive option)') % name
self.indexnode['entries'].append(('single', text, node_id, '', key))
@property
def current_directive(self) -> str:
directives = self.env.ref_context.get('rst:directives')
if directives:
return directives[-1]
else:
return ''
| ReSTDirectiveOption |
python | huggingface__transformers | src/transformers/models/depth_anything/modeling_depth_anything.py | {
"start": 6318,
"end": 7708
} | class ____(nn.Module):
# Copied from transformers.models.dpt.modeling_dpt.DPTFeatureFusionStage.__init__ with DPT->DepthAnything
def __init__(self, config: DepthAnythingConfig):
super().__init__()
self.layers = nn.ModuleList()
for _ in range(len(config.neck_hidden_sizes)):
self.layers.append(DepthAnythingFeatureFusionLayer(config))
def forward(self, hidden_states, size=None):
# reversing the hidden_states, we start from the last
hidden_states = hidden_states[::-1]
fused_hidden_states = []
fused_hidden_state = None
for idx, (hidden_state, layer) in enumerate(zip(hidden_states, self.layers)):
size = hidden_states[idx + 1].shape[2:] if idx != (len(hidden_states) - 1) else None
if fused_hidden_state is None:
# first layer only uses the last hidden_state
fused_hidden_state = layer(hidden_state, size=size)
else:
fused_hidden_state = layer(fused_hidden_state, hidden_state, size=size)
fused_hidden_states.append(fused_hidden_state)
return fused_hidden_states
# Modified from transformers.models.dpt.modeling_dpt.DPTPreTrainedModel with DPT->DepthAnything,dpt->depth_anything
# avoiding sdpa and flash_attn_2 support, it's done in the backend
@auto_docstring
| DepthAnythingFeatureFusionStage |
python | huggingface__transformers | src/transformers/models/udop/modeling_udop.py | {
"start": 19976,
"end": 30405
} | class ____(nn.Module):
def __init__(
self,
config: UdopConfig,
has_relative_attention_bias=False,
layer_idx: Optional[int] = None,
):
super().__init__()
self.is_decoder = config.is_decoder
self.has_relative_attention_bias = has_relative_attention_bias
self.relative_attention_num_buckets = config.relative_attention_num_buckets
self.relative_attention_max_distance = config.relative_attention_max_distance
self.d_model = config.d_model
self.key_value_proj_dim = config.d_kv
self.n_heads = config.num_heads
self.dropout = config.dropout_rate
self.inner_dim = self.n_heads * self.key_value_proj_dim
self.layer_idx = layer_idx
if layer_idx is None and self.is_decoder:
logger.warning_once(
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
"will to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.q = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.k = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.v = nn.Linear(self.d_model, self.inner_dim, bias=False)
self.o = nn.Linear(self.inner_dim, self.d_model, bias=False)
if self.has_relative_attention_bias:
self.relative_attention_bias = nn.Embedding(self.relative_attention_num_buckets, self.n_heads)
self.gradient_checkpointing = False
@staticmethod
def _relative_position_bucket(relative_position, bidirectional=True, num_buckets=32, max_distance=128):
"""
Adapted from Mesh Tensorflow:
https://github.com/tensorflow/mesh/blob/0cb87fe07da627bf0b7e60475d59f95ed6b5be3d/mesh_tensorflow/transformer/transformer_layers.py#L593
Translate relative position to a bucket number for relative attention. The relative position is defined as
memory_position - query_position, i.e. the distance in tokens from the attending position to the attended-to
position. If bidirectional=False, then positive relative positions are invalid. We use smaller buckets for
small absolute relative_position and larger buckets for larger absolute relative_positions. All relative
positions >=max_distance map to the same bucket. All relative positions <=-max_distance map to the same bucket.
This should allow for more graceful generalization to longer sequences than the model has been trained on
Args:
relative_position: an int32 Tensor
bidirectional: a boolean - whether the attention is bidirectional
num_buckets: an integer
max_distance: an integer
Returns:
a Tensor with the same shape as relative_position, containing int32 values in the range [0, num_buckets)
"""
relative_buckets = 0
if bidirectional:
num_buckets //= 2
relative_buckets += (relative_position > 0).to(torch.long) * num_buckets
relative_position = torch.abs(relative_position)
else:
relative_position = -torch.min(relative_position, torch.zeros_like(relative_position))
# now relative_position is in the range [0, inf)
# half of the buckets are for exact increments in positions
max_exact = num_buckets // 2
is_small = relative_position < max_exact
# The other half of the buckets are for logarithmically bigger bins in positions up to max_distance
relative_position_if_large = max_exact + (
torch.log(relative_position.float() / max_exact)
/ math.log(max_distance / max_exact)
* (num_buckets - max_exact)
).to(torch.long)
relative_position_if_large = torch.min(
relative_position_if_large, torch.full_like(relative_position_if_large, num_buckets - 1)
)
relative_buckets += torch.where(is_small, relative_position, relative_position_if_large)
return relative_buckets
def compute_bias(self, query_length, key_length, device=None, cache_position=None):
"""Compute binned relative position bias"""
if device is None:
device = self.relative_attention_bias.weight.device
if cache_position is None:
context_position = torch.arange(query_length, dtype=torch.long, device=device)[:, None]
else:
context_position = cache_position[:, None].to(device)
memory_position = torch.arange(key_length, dtype=torch.long, device=device)[None, :]
relative_position = memory_position - context_position # shape (query_length, key_length)
relative_position_bucket = self._relative_position_bucket(
relative_position, # shape (query_length, key_length)
bidirectional=(not self.is_decoder),
num_buckets=self.relative_attention_num_buckets,
max_distance=self.relative_attention_max_distance,
)
values = self.relative_attention_bias(relative_position_bucket) # shape (query_length, key_length, num_heads)
values = values.permute([2, 0, 1]).unsqueeze(0) # shape (1, num_heads, query_length, key_length)
return values
def forward(
self,
hidden_states,
mask=None,
key_value_states=None,
position_bias=None,
past_key_values=None,
query_length=None,
use_cache=False,
output_attentions=False,
cache_position=None,
):
"""
Self-attention (if key_value_states is None) or attention over source sentence (provided by key_value_states).
"""
# Input is (batch_size, seq_length, dim)
# Mask is (batch_size, 1, 1, key_length) (non-causal encoder) or (batch_size, 1, seq_length, key_length) (causal decoder)
batch_size, seq_length = hidden_states.shape[:2]
# if key_value_states are provided this layer is used as a cross-attention layer for the decoder
is_cross_attention = key_value_states is not None
query_states = self.q(hidden_states)
query_states = query_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
# Check is encoder-decoder model is being used. Otherwise we'll get `DynamicCache`
is_updated = False
if isinstance(past_key_values, EncoderDecoderCache):
is_updated = past_key_values.is_updated.get(self.layer_idx)
if is_cross_attention:
# after the first generated id, we can subsequently re-use all key/value_states from cache
curr_past_key_values = past_key_values.cross_attention_cache
else:
curr_past_key_values = past_key_values.self_attention_cache
else:
curr_past_key_values = past_key_values
current_states = key_value_states if is_cross_attention else hidden_states
if is_cross_attention and past_key_values is not None and is_updated:
# reuse k,v, cross_attentions
key_states = curr_past_key_values.layers[self.layer_idx].keys
value_states = curr_past_key_values.layers[self.layer_idx].values
else:
key_states = self.k(current_states)
value_states = self.v(current_states)
key_states = key_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
value_states = value_states.view(batch_size, -1, self.n_heads, self.key_value_proj_dim).transpose(1, 2)
if past_key_values is not None:
# save all key/value_states to cache to be re-used for fast auto-regressive generation
cache_position = cache_position if not is_cross_attention else None
key_states, value_states = curr_past_key_values.update(
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
)
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
past_key_values.is_updated[self.layer_idx] = True
# compute scores, equivalent of torch.einsum("bnqd,bnkd->bnqk", query_states, key_states), compatible with onnx op>9
scores = torch.matmul(query_states, key_states.transpose(3, 2))
if position_bias is None:
key_length = key_states.shape[-2]
# cache position is 0-indexed so we add 1 to get the real length of queries (aka with past)
real_seq_length = query_length if query_length is not None else cache_position[-1] + 1
if not self.has_relative_attention_bias:
position_bias = torch.zeros(
(1, self.n_heads, seq_length, key_length), device=scores.device, dtype=scores.dtype
)
if self.gradient_checkpointing and self.training:
position_bias.requires_grad = True
else:
position_bias = self.compute_bias(
real_seq_length, key_length, device=scores.device, cache_position=cache_position
)
position_bias = position_bias[:, :, -seq_length:, :]
if mask is not None:
causal_mask = mask[:, :, :, : key_states.shape[-2]]
position_bias = position_bias + causal_mask
position_bias_masked = position_bias
scores += position_bias_masked
# (batch_size, n_heads, seq_length, key_length)
attn_weights = nn.functional.softmax(scores.float(), dim=-1).type_as(scores)
attn_weights = nn.functional.dropout(attn_weights, p=self.dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(batch_size, -1, self.inner_dim)
attn_output = self.o(attn_output)
outputs = (attn_output, position_bias)
if output_attentions:
outputs = outputs + (attn_weights,)
return outputs
# Copied from transformers.models.t5.modeling_t5.T5LayerSelfAttention with T5->Udop
| UdopAttention |
python | doocs__leetcode | solution/0700-0799/0728.Self Dividing Numbers/Solution.py | {
"start": 0,
"end": 355
} | class ____:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
def check(x: int) -> bool:
y = x
while y:
if y % 10 == 0 or x % (y % 10):
return False
y //= 10
return True
return [x for x in range(left, right + 1) if check(x)]
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.