language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | numba__numba | numba/tests/test_typedlist.py | {
"start": 21954,
"end": 23660
} | class ____(MemoryLeakMixin, TestCase):
def test_allocation(self):
# kwarg version
for i in range(16):
tl = List.empty_list(types.int32, allocated=i)
self.assertEqual(tl._allocated(), i)
# posarg version
for i in range(16):
tl = List.empty_list(types.int32, i)
self.assertEqual(tl._allocated(), i)
def test_allocation_njit(self):
# kwarg version
@njit
def foo(i):
tl = List.empty_list(types.int32, allocated=i)
return tl._allocated()
for j in range(16):
self.assertEqual(foo(j), j)
# posarg version
@njit
def foo(i):
tl = List.empty_list(types.int32, i)
return tl._allocated()
for j in range(16):
self.assertEqual(foo(j), j)
def test_growth_and_shrinkage(self):
tl = List.empty_list(types.int32)
growth_before = {0: 0, 4:4, 8:8, 16:16}
growth_after = {0: 4, 4:8, 8:16, 16:25}
for i in range(17):
if i in growth_before:
self.assertEqual(growth_before[i], tl._allocated())
tl.append(i)
if i in growth_after:
self.assertEqual(growth_after[i], tl._allocated())
shrink_before = {17: 25, 12:25, 9:18, 6:12, 4:8, 3:6, 2:5, 1:4}
shrink_after = {17: 25, 12:18, 9:12, 6:8, 4:6, 3:5, 2:4, 1:0}
for i in range(17, 0, -1):
if i in shrink_before:
self.assertEqual(shrink_before[i], tl._allocated())
tl.pop()
if i in shrink_after:
self.assertEqual(shrink_after[i], tl._allocated())
| TestAllocation |
python | kamyu104__LeetCode-Solutions | Python/number-of-1-bits.py | {
"start": 1722,
"end": 1976
} | class ____(object):
# @param n, an integer
# @return an integer
def hammingWeight(self, n):
result = 0
while n:
n &= n - 1
result += 1
return result
# Time: O(logn) = O(32)
# Space: O(1)
| Solution3 |
python | spyder-ide__spyder | spyder/plugins/editor/widgets/tests/test_panels.py | {
"start": 452,
"end": 2979
} | class ____(Panel):
"""Example external panel."""
def __init__(self):
"""Initialize panel."""
Panel.__init__(self)
self.setMouseTracking(True)
self.scrollable = True
def sizeHint(self):
"""Override Qt method.
Returns the widget size hint (based on the editor font size).
"""
fm = QFontMetrics(self.editor.font())
size_hint = QSize(fm.height(), fm.height())
if size_hint.width() > 16:
size_hint.setWidth(16)
return size_hint
def _draw_red(self, top, painter):
"""Draw emojis.
Arguments
---------
top: int
top of the line to draw the emoji
painter: QPainter
QPainter instance
"""
painter.setPen(QColor('white'))
font_height = self.editor.fontMetrics().height()
painter.drawText(0, top, self.sizeHint().width(),
font_height, int(Qt.AlignRight | Qt.AlignBottom),
'👀')
def paintEvent(self, event):
"""Override Qt method.
Paint emojis.
"""
super().paintEvent(event)
painter = QPainter(self)
painter.fillRect(event.rect(), self.editor.sideareas_color)
for top, __, __ in self.editor.visible_blocks:
self._draw_red(top, painter)
# --- Tests
# -----------------------------------------------------------------------------
@pytest.mark.parametrize('position', [
Panel.Position.LEFT, Panel.Position.RIGHT, Panel.Position.TOP,
Panel.Position.BOTTOM, Panel.Position.FLOATING])
def test_register_panel(setup_editor, position):
"""Test registering an example external panel in the editor."""
editor_stack, editor = setup_editor
# Register the panel
editor_stack.register_panel(EmojiPanel, position=position)
# Verify the panel is added in the panel manager
new_panel = editor.panels.get(EmojiPanel)
assert new_panel is not None
# Verify the panel is in the editorstack
assert (EmojiPanel, (), {}, position) in editor_stack.external_panels
# Verify that the panel is shown in new files
finfo = editor_stack.new('foo.py', 'utf-8', 'hola = 3\n')
editor2 = finfo.editor
new_panel = editor2.panels.get(EmojiPanel)
assert new_panel is not None
# Remove external panel
editor_stack.external_panels = []
editor.panels.remove(EmojiPanel)
editor2.panels.remove(EmojiPanel)
if __name__ == '__main__':
pytest.main()
| EmojiPanel |
python | sphinx-doc__sphinx | sphinx/ext/autosummary/generate.py | {
"start": 5834,
"end": 31629
} | class ____:
def __init__(
self,
obj: Any,
*,
config: Config,
events: EventManager,
) -> None:
self.config = config
self.events = events
self.object = obj
def get_object_type(self, name: str, value: Any) -> str:
return _get_documenter(value, self.object)
def is_skipped(self, name: str, value: Any, obj_type: _AutodocObjType) -> bool:
try:
return self.events.emit_firstresult(
'autodoc-skip-member', obj_type, name, value, False, {}
)
except Exception as exc:
logger.warning(
__(
'autosummary: failed to determine %r to be documented, '
'the following exception was raised:\n%s'
),
name,
exc,
type='autosummary',
)
return False
def scan(self, imported_members: bool) -> list[str]:
members = []
try:
analyzer = ModuleAnalyzer.for_module(self.object.__name__)
attr_docs = analyzer.find_attr_docs()
except PycodeError:
attr_docs = {}
for name in members_of(self.object, config=self.config):
try:
value = safe_getattr(self.object, name)
except AttributeError:
value = None
objtype = _get_documenter(value, self.object)
if self.is_skipped(name, value, objtype):
continue
try:
if ('', name) in attr_docs:
imported = False
elif inspect.ismodule(value): # NoQA: SIM114
imported = True
elif safe_getattr(value, '__module__') != self.object.__name__:
imported = True
else:
imported = False
except AttributeError:
imported = False
respect_module_all = not self.config.autosummary_ignore_module_all
if (
# list all members up
imported_members
# list not-imported members
or imported is False
# list members that have __all__ set
or (respect_module_all and '__all__' in dir(self.object))
):
members.append(name)
return members
def members_of(obj: Any, *, config: Config) -> Sequence[str]:
"""Get the members of ``obj``, possibly ignoring the ``__all__`` module attribute
Follows the ``config.autosummary_ignore_module_all`` setting.
"""
if config.autosummary_ignore_module_all:
return dir(obj)
else:
if (obj___all__ := getall(obj)) is not None:
# return __all__, even if empty.
return obj___all__
# if __all__ is not set, return dir(obj)
return dir(obj)
def generate_autosummary_content(
name: str,
obj: Any,
parent: Any,
template: AutosummaryRenderer,
template_name: str,
imported_members: bool,
recursive: bool,
context: dict[str, Any],
modname: str | None = None,
qualname: str | None = None,
*,
config: Config,
events: EventManager,
) -> str:
obj_type = _get_documenter(obj, parent)
ns: dict[str, Any] = {}
ns.update(context)
if obj_type == 'module':
scanner = ModuleScanner(obj, config=config, events=events)
ns['members'] = scanner.scan(imported_members)
respect_module_all = not config.autosummary_ignore_module_all
imported_members = imported_members or (
'__all__' in dir(obj) and respect_module_all
)
ns['functions'], ns['all_functions'] = _get_members(
obj_type,
obj,
{'function'},
config=config,
events=events,
imported=imported_members,
)
ns['classes'], ns['all_classes'] = _get_members(
obj_type,
obj,
{'class'},
config=config,
events=events,
imported=imported_members,
)
ns['exceptions'], ns['all_exceptions'] = _get_members(
obj_type,
obj,
{'exception'},
config=config,
events=events,
imported=imported_members,
)
ns['attributes'], ns['all_attributes'] = _get_module_attrs(name, ns['members'])
ispackage = hasattr(obj, '__path__')
if ispackage and recursive:
# Use members that are not modules as skip list, because it would then mean
# that module was overwritten in the package namespace
skip = (
ns['all_functions']
+ ns['all_classes']
+ ns['all_exceptions']
+ ns['all_attributes']
)
# If respect_module_all and module has a __all__ attribute, first get
# modules that were explicitly imported. Next, find the rest with the
# get_modules method, but only put in "public" modules that are in the
# __all__ list
#
# Otherwise, use get_modules method normally
if respect_module_all and '__all__' in dir(obj):
imported_modules, all_imported_modules = _get_members(
obj_type,
obj,
{'module'},
config=config,
events=events,
imported=True,
)
skip += all_imported_modules
public_members = getall(obj)
else:
imported_modules, all_imported_modules = [], []
public_members = None
modules, all_modules = _get_modules(
obj, skip=skip, name=name, public_members=public_members
)
ns['modules'] = imported_modules + modules
ns['all_modules'] = all_imported_modules + all_modules
elif obj_type == 'class':
ns['members'] = dir(obj)
ns['inherited_members'] = set(dir(obj)) - set(obj.__dict__.keys())
ns['methods'], ns['all_methods'] = _get_members(
obj_type,
obj,
{'method'},
config=config,
events=events,
include_public={'__init__'},
)
ns['attributes'], ns['all_attributes'] = _get_members(
obj_type,
obj,
{'attribute', 'property'},
config=config,
events=events,
)
if modname is None or qualname is None:
modname, qualname = _split_full_qualified_name(name)
if obj_type in {'method', 'attribute', 'property'}:
ns['class'] = qualname.rsplit('.', 1)[0]
if obj_type == 'class':
shortname = qualname
else:
shortname = qualname.rsplit('.', 1)[-1]
ns['fullname'] = name
ns['module'] = modname
ns['objname'] = qualname
ns['name'] = shortname
ns['objtype'] = obj_type
ns['underline'] = len(name) * '='
if template_name:
return template.render(template_name, ns)
else:
return template.render(obj_type, ns)
def _skip_member(
obj: Any, name: str, obj_type: _AutodocObjType, *, events: EventManager
) -> bool:
try:
return events.emit_firstresult(
'autodoc-skip-member', obj_type, name, obj, False, {}
)
except Exception as exc:
logger.warning(
__(
'autosummary: failed to determine %r to be documented, '
'the following exception was raised:\n%s'
),
name,
exc,
type='autosummary',
)
return False
def _get_class_members(obj: Any) -> dict[str, Any]:
"""Get members and attributes of target class."""
# TODO: Simplify
# the members directly defined in the class
obj_dict = safe_getattr(obj, '__dict__', {})
members_simpler: dict[str, Any] = {}
# enum members
if isenumclass(obj):
for name, defining_class, value in _filter_enum_dict(
obj, safe_getattr, obj_dict
):
# the order of occurrence of *name* matches obj's MRO,
# allowing inherited attributes to be shadowed correctly
if unmangled := unmangle(defining_class, name):
members_simpler[unmangled] = value
# members in __slots__
try:
subject___slots__ = getslots(obj)
if subject___slots__:
for name in subject___slots__:
members_simpler[name] = SLOTS_ATTR
except (TypeError, ValueError):
pass
# other members
for name in dir(obj):
try:
value = safe_getattr(obj, name)
if ismock(value):
value = undecorate(value)
unmangled = unmangle(obj, name)
if unmangled and unmangled not in members_simpler:
members_simpler[unmangled] = value
except AttributeError:
continue
try:
for cls in getmro(obj):
try:
modname = safe_getattr(cls, '__module__')
qualname = safe_getattr(cls, '__qualname__')
except AttributeError:
qualname = None
analyzer = None
else:
try:
analyzer = ModuleAnalyzer.for_module(modname)
analyzer.analyze()
except PycodeError:
analyzer = None
# annotation only member (ex. attr: int)
for name in getannotations(cls):
unmangled = unmangle(cls, name)
if unmangled and unmangled not in members_simpler:
members_simpler[unmangled] = INSTANCE_ATTR
# append or complete instance attributes (cf. self.attr1) if analyzer knows
if analyzer:
for ns, name in analyzer.attr_docs:
if ns == qualname and name not in members_simpler:
# otherwise unknown instance attribute
members_simpler[name] = INSTANCE_ATTR
except AttributeError:
pass
return members_simpler
def _get_module_members(obj: Any, *, config: Config) -> dict[str, Any]:
members = {}
for name in members_of(obj, config=config):
try:
members[name] = safe_getattr(obj, name)
except AttributeError:
continue
return members
def _get_all_members(
obj_type: _AutodocObjType, obj: Any, *, config: Config
) -> dict[str, Any]:
if obj_type == 'module':
return _get_module_members(obj, config=config)
elif obj_type == 'class':
return _get_class_members(obj)
return {}
def _get_members(
obj_type: _AutodocObjType,
obj: Any,
types: set[str],
*,
config: Config,
events: EventManager,
include_public: Set[str] = frozenset(),
imported: bool = True,
) -> tuple[list[str], list[str]]:
items: list[str] = []
public: list[str] = []
all_members = _get_all_members(obj_type, obj, config=config)
for name, value in all_members.items():
obj_type = _get_documenter(value, obj)
if obj_type in types:
# skip imported members if expected
if imported or getattr(value, '__module__', None) == obj.__name__:
skipped = _skip_member(value, name, obj_type, events=events)
if skipped is True:
pass
elif skipped is False:
# show the member forcedly
items.append(name)
public.append(name)
else:
items.append(name)
if name in include_public or not name.startswith('_'):
# considers member as public
public.append(name)
return public, items
def _get_module_attrs(name: str, members: Any) -> tuple[list[str], list[str]]:
"""Find module attributes with docstrings."""
attrs, public = [], []
try:
analyzer = ModuleAnalyzer.for_module(name)
attr_docs = analyzer.find_attr_docs()
for namespace, attr_name in attr_docs:
if not namespace and attr_name in members:
attrs.append(attr_name)
if not attr_name.startswith('_'):
public.append(attr_name)
except PycodeError:
pass # give up if ModuleAnalyzer fails to parse code
return public, attrs
def _get_modules(
obj: Any,
*,
skip: Sequence[str],
name: str,
public_members: Sequence[str] | None = None,
) -> tuple[list[str], list[str]]:
items: list[str] = []
public: list[str] = []
for _, modname, _ispkg in pkgutil.iter_modules(obj.__path__):
if modname in skip:
# module was overwritten in __init__.py, so not accessible
continue
fullname = f'{name}.{modname}'
try:
module = _import_module(fullname)
except ImportError:
pass
else:
if module and hasattr(module, '__sphinx_mock__'):
continue
items.append(modname)
if public_members is not None:
if modname in public_members:
public.append(modname)
else:
if not modname.startswith('_'):
public.append(modname)
return public, items
def generate_autosummary_docs(
sources: list[str],
output_dir: str | os.PathLike[str] | None = None,
suffix: str = '.rst',
base_path: str | os.PathLike[str] | None = None,
imported_members: bool = False,
app: Sphinx | None = None,
overwrite: bool = True,
encoding: str = 'utf-8',
) -> list[Path]:
"""Generate autosummary documentation for the given sources.
:returns: list of generated files (both new and existing ones)
"""
assert app is not None, 'app is required'
showed_sources = sorted(sources)
if len(showed_sources) > 20:
showed_sources = [*showed_sources[:10], '...', *showed_sources[-10:]]
logger.info(
__('[autosummary] generating autosummary for: %s'), ', '.join(showed_sources)
)
if output_dir:
logger.info(__('[autosummary] writing to %s'), output_dir)
if base_path is not None:
base_path = Path(base_path)
source_paths = [base_path / filename for filename in sources]
else:
source_paths = list(map(Path, sources))
template = AutosummaryRenderer(app)
# read
items = find_autosummary_in_files(source_paths)
# keep track of new files
new_files: list[Path] = []
all_files: list[Path] = []
filename_map = app.config.autosummary_filename_map
# write
for entry in sorted(set(items), key=str):
if entry.path is None:
# The corresponding autosummary:: directive did not have
# a :toctree: option
continue
path = output_dir or Path(entry.path).resolve()
ensuredir(path)
try:
name, obj, parent, modname = import_by_name(entry.name)
qualname = name.replace(modname + '.', '')
except ImportExceptionGroup as exc:
try:
# try to import as an instance attribute
name, obj, parent, modname = import_ivar_by_name(entry.name)
qualname = name.replace(modname + '.', '')
except ImportError as exc2:
if exc2.__cause__:
exceptions: list[BaseException] = [*exc.exceptions, exc2.__cause__]
else:
exceptions = [*exc.exceptions, exc2]
errors = list({f'* {type(e).__name__}: {e}' for e in exceptions})
logger.warning(
__('[autosummary] failed to import %s.\nPossible hints:\n%s'),
entry.name,
'\n'.join(errors),
)
continue
context: dict[str, Any] = {**app.config.autosummary_context}
content = generate_autosummary_content(
name,
obj,
parent,
template,
entry.template,
imported_members,
entry.recursive,
context,
modname,
qualname,
config=app.config,
events=app.events,
)
file_path = Path(path, filename_map.get(name, name) + suffix)
all_files.append(file_path)
if file_path.is_file():
with file_path.open(encoding=encoding) as f:
old_content = f.read()
if content == old_content:
continue
if overwrite: # content has changed
with file_path.open('w', encoding=encoding) as f:
f.write(content)
new_files.append(file_path)
else:
with open(file_path, 'w', encoding=encoding) as f:
f.write(content)
new_files.append(file_path)
# descend recursively to new files
if new_files:
all_files.extend(
generate_autosummary_docs(
[str(f) for f in new_files],
output_dir=output_dir,
suffix=suffix,
base_path=base_path,
imported_members=imported_members,
app=app,
overwrite=overwrite,
)
)
return all_files
# -- Finding documented entries in files ---------------------------------------
def find_autosummary_in_files(
filenames: Sequence[str | os.PathLike[str]],
) -> list[AutosummaryEntry]:
"""Find out what items are documented in source/*.rst.
See `find_autosummary_in_lines`.
"""
documented: list[AutosummaryEntry] = []
for filename in filenames:
with open(filename, encoding='utf-8', errors='ignore') as f:
lines = f.read().splitlines()
documented.extend(find_autosummary_in_lines(lines, filename=filename))
return documented
def find_autosummary_in_docstring(
name: str,
filename: str | os.PathLike[str] | None = None,
) -> list[AutosummaryEntry]:
"""Find out what items are documented in the given object's docstring.
See `find_autosummary_in_lines`.
"""
try:
_real_name, obj, _parent, _modname = import_by_name(name)
lines = pydoc.getdoc(obj).splitlines()
return find_autosummary_in_lines(lines, module=name, filename=filename)
except AttributeError:
pass
except ImportExceptionGroup as exc:
errors = '\n'.join({f'* {type(e).__name__}: {e}' for e in exc.exceptions})
logger.warning(f'Failed to import {name}.\nPossible hints:\n{errors}') # NoQA: G004
except SystemExit:
logger.warning(
"Failed to import '%s'; the module executes module level "
'statement and it might call sys.exit().',
name,
)
return []
def find_autosummary_in_lines(
lines: list[str],
module: str | None = None,
filename: str | os.PathLike[str] | None = None,
) -> list[AutosummaryEntry]:
"""Find out what items appear in autosummary:: directives in the
given lines.
Returns a list of (name, toctree, template) where *name* is a name
of an object and *toctree* the :toctree: path of the corresponding
autosummary directive (relative to the root of the file name), and
*template* the value of the :template: option. *toctree* and
*template* ``None`` if the directive does not have the
corresponding options set.
"""
autosummary_re = re.compile(r'^(\s*)\.\.\s+autosummary::\s*')
automodule_re = re.compile(r'^\s*\.\.\s+automodule::\s*([A-Za-z0-9_.]+)\s*$')
module_re = re.compile(r'^\s*\.\.\s+(current)?module::\s*([a-zA-Z0-9_.]+)\s*$')
autosummary_item_re = re.compile(r'^\s+(~?[_a-zA-Z][a-zA-Z0-9_.]*)\s*.*?')
recursive_arg_re = re.compile(r'^\s+:recursive:\s*$')
toctree_arg_re = re.compile(r'^\s+:toctree:\s*(.*?)\s*$')
template_arg_re = re.compile(r'^\s+:template:\s*(.*?)\s*$')
documented: list[AutosummaryEntry] = []
recursive = False
toctree: str | None = None
template = ''
current_module = module
in_autosummary = False
base_indent = ''
for line in lines:
if in_autosummary:
m = recursive_arg_re.match(line)
if m:
recursive = True
continue
m = toctree_arg_re.match(line)
if m:
toctree = m.group(1)
if filename:
toctree = str(Path(filename).parent / toctree)
continue
m = template_arg_re.match(line)
if m:
template = m.group(1).strip()
continue
if line.strip().startswith(':'):
continue # skip options
m = autosummary_item_re.match(line)
if m:
name = m.group(1).strip().removeprefix('~')
if current_module and not name.startswith(current_module + '.'):
name = f'{current_module}.{name}'
documented.append(AutosummaryEntry(name, toctree, template, recursive))
continue
if not line.strip() or line.startswith(base_indent + ' '):
continue
in_autosummary = False
m = autosummary_re.match(line)
if m:
in_autosummary = True
base_indent = m.group(1)
recursive = False
toctree = None
template = ''
continue
m = automodule_re.search(line)
if m:
current_module = m.group(1).strip()
# recurse into the automodule docstring
documented.extend(
find_autosummary_in_docstring(current_module, filename=filename)
)
continue
m = module_re.match(line)
if m:
current_module = m.group(2)
continue
return documented
def get_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(
usage='%(prog)s [OPTIONS] <SOURCE_FILE>...',
epilog=__('For more information, visit <https://www.sphinx-doc.org/>.'),
description=__("""
Generate ReStructuredText using autosummary directives.
sphinx-autogen is a frontend to sphinx.ext.autosummary.generate. It generates
the reStructuredText files from the autosummary directives contained in the
given input files.
The format of the autosummary directive is documented in the
``sphinx.ext.autosummary`` Python module and can be read using::
pydoc sphinx.ext.autosummary
"""),
)
parser.add_argument(
'--version',
action='version',
dest='show_version',
version='%%(prog)s %s' % __display_version__,
)
parser.add_argument(
'source_file', nargs='+', help=__('source files to generate rST files for')
)
parser.add_argument(
'-o',
'--output-dir',
action='store',
dest='output_dir',
help=__('directory to place all output in'),
)
parser.add_argument(
'-s',
'--suffix',
action='store',
dest='suffix',
default='rst',
help=__('default suffix for files (default: %(default)s)'),
)
parser.add_argument(
'-t',
'--templates',
action='store',
dest='templates',
default=None,
help=__('custom template directory (default: %(default)s)'),
)
parser.add_argument(
'-i',
'--imported-members',
action='store_true',
dest='imported_members',
default=False,
help=__('document imported members (default: %(default)s)'),
)
parser.add_argument(
'-a',
'--respect-module-all',
action='store_true',
dest='respect_module_all',
default=False,
help=__(
'document exactly the members in module __all__ attribute. '
'(default: %(default)s)'
),
)
parser.add_argument(
'--remove-old',
action='store_true',
dest='remove_old',
default=False,
help=__(
'Remove existing files in the output directory that were not generated'
),
)
return parser
def main(argv: Sequence[str] = (), /) -> None:
locale.setlocale(locale.LC_ALL, '')
sphinx.locale.init_console()
app = DummyApplication(sphinx.locale.get_translator())
logging.setup(app, sys.stdout, sys.stderr) # type: ignore[arg-type]
args = get_parser().parse_args(argv or sys.argv[1:])
if args.templates:
app.config.templates_path.append(str(Path(args.templates).resolve()))
app.config.autosummary_ignore_module_all = not args.respect_module_all
written_files = generate_autosummary_docs(
args.source_file,
args.output_dir,
'.' + args.suffix,
imported_members=args.imported_members,
app=app, # type: ignore[arg-type]
)
if args.remove_old:
for existing in Path(args.output_dir).glob(f'**/*.{args.suffix}'):
if existing not in written_files:
try:
existing.unlink()
except OSError as exc:
logger.warning(
__('Failed to remove %s: %s'),
existing,
exc.strerror,
type='autosummary',
)
if __name__ == '__main__':
main(sys.argv[1:])
| ModuleScanner |
python | pytorch__pytorch | torch/distributed/tensor/_ops/_mask_buffer.py | {
"start": 140,
"end": 1532
} | class ____:
data: torch.Tensor | None = None
# refcount allows shared usage of the MaskBuffer, as long as all users have the same data
refcount: int = 0
def materialize_mask(self, mask):
if self.refcount == 0:
self.data = mask
else:
assert self.data is not None
if not torch.equal(self.data, mask):
raise RuntimeError(
"MaskBuffer has been materialized with conflicting data"
)
self.refcount += 1
def release_mask(self):
if self.refcount == 0 or self.data is None:
raise RuntimeError("MaskBuffer has not been materialized")
self.refcount -= 1
if self.refcount == 0:
self.data = None
def apply_mask(self, tensor):
if self.refcount == 0 or self.data is None:
raise RuntimeError("MaskBuffer has not been materialized")
# NOTE: MaskPartial is being used by the embedding op and the gather op.
# For gather, the mask has the same dimension as the output tensor, whereas
# the output of the embedding op has an additional dimension compare to the input,
# hence the output masking logic below having two different cases.
if tensor.ndim == self.data.ndim:
tensor[self.data] = 0.0
else:
tensor[self.data, :] = 0.0
| MaskBuffer |
python | google__jax | jax/_src/interpreters/batching.py | {
"start": 42326,
"end": 51049
} | class ____:
def __setitem__(self, prim, batcher):
def wrapped(axis_data, vals, dims, **params):
return batcher(axis_data.size, axis_data.name, None, vals, dims, **params)
fancy_primitive_batchers[prim] = wrapped
axis_primitive_batchers = AxisPrimitiveBatchersProxy()
# Presence in this table allows fancy batchers to be skipped by batch traces for
# irrelevant axes. The Callable takes the params and returns a list of relevant
# axes.
skippable_batchers : dict[core.Primitive, Callable] = {}
def defvectorized(prim):
primitive_batchers[prim] = partial(vectorized_batcher, prim)
def vectorized_batcher(prim, batched_args, batch_dims, **params):
assert all(batch_dims[0] == bd for bd in batch_dims[1:]), batch_dims
return prim.bind(*batched_args, **params), batch_dims[0]
def defbroadcasting(prim):
primitive_batchers[prim] = partial(broadcast_batcher, prim)
def broadcast_batcher(prim, args, dims, **params):
"""Process a primitive with built-in broadcasting.
Args:
args: the possibly-batched arguments
dims: list or tuple of the same length as `args`, where each
entry indicates the batching state of the corresponding entry to `args`:
either an int indicating the batch dimension, or else `not_mapped`
indicating no batching.
"""
assert len(args) > 1
shape, dim = next((x.shape, d) for x, d in zip(args, dims)
if d is not not_mapped)
if all(core.definitely_equal_shape(shape, x.shape) and d == dim
for x, d in zip(args, dims) if np.ndim(x)):
# if there's only agreeing batch dims and scalars, just call the primitive
out = prim.bind(*args, **params)
return (out, (dim,) * len(out)) if prim.multiple_results else (out, dim)
else:
# We pass size of 1 here because (1) at least one argument has a real batch
# dimension and (2) all unmapped axes can have a singleton axis inserted and
# then rely on the primitive's built-in broadcasting.
args = [bdim_at_front(x, d, 1) if np.ndim(x) else x
for x, d in zip(args, dims)]
ndim = max(np.ndim(x) for x in args) # special-case scalar broadcasting
args = [_handle_scalar_broadcasting(ndim, x, d) for x, d in zip(args, dims)]
out = prim.bind(*args, **params)
return (out, (0,) * len(out)) if prim.multiple_results else (out, 0)
def _handle_scalar_broadcasting(nd, x, d):
# Callers of this utility, via broadcast_batcher() or defbroadcasting(),
# must be in a context where lax is importable.
from jax import lax # pytype: disable=import-error
if d is not_mapped or nd == np.ndim(x):
return x
else:
return lax.expand_dims(x, tuple(range(np.ndim(x), nd)))
def defreducer(prim, ident):
primitive_batchers[prim] = partial(reducer_batcher, prim, ident)
def reducer_batcher(prim, ident, batched_args, batch_dims, axes, **params):
def out_axis(axes, axis):
return int(list(np.delete(np.arange(operand.ndim), axes)).index(axis))
operand, = batched_args
bdim, = batch_dims
if isinstance(bdim, int):
axes = tuple(np.where(np.less(axes, bdim), axes, np.add(axes, 1)))
bdim_out = out_axis(axes, bdim)
if 'input_shape' in params:
params = dict(params, input_shape=operand.shape)
return prim.bind(operand, axes=axes, **params), bdim_out
elif isinstance(bdim, RaggedAxis):
assert ident is not None, "TODO Ragged batching a reduction requires an identity"
axes = tuple(np.where(np.less(axes, bdim.stacked_axis), axes, np.add(axes, 1)))
bdim_out = out_axis(axes, bdim.stacked_axis)
# For each ragged_axis, we either mask the operand there or append
# it to the set of axes that will be ragged in the result.
axes_to_mask = []
ragged_axes_out = []
for ragged_axis, segment_lengths in bdim.ragged_axes:
if ragged_axis in axes:
axes_to_mask.append((ragged_axis, segment_lengths))
else:
ragged_axes_out.append((out_axis(axes, ragged_axis), segment_lengths))
operand = mask_ragged_axes(
operand, ident, RaggedAxis(bdim.stacked_axis, tuple(axes_to_mask)))
result = prim.bind(operand, axes=axes, **params)
return result, make_batch_axis(operand.ndim, bdim_out, ragged_axes_out)
else:
assert False
def expand_dims_batcher(prim, args, dims, **params):
"""A batching rule for primitives that support matching leading batch
dimensions in all arguments.
"""
size, = {x.shape[bd] for x, bd in zip(args, dims) if bd is not not_mapped}
args = [bdim_at_front(x, bd, size) for x, bd in zip(args, dims)]
out = prim.bind(*args, **params)
return (out, (0,) * len(out)) if prim.multiple_results else (out, 0)
def mask_ragged_axes(operand: Array, ident, axis_spec: RaggedAxis) -> Array:
# TODO(mattjj, axch) Can we mask multiple axes more efficiently at
# once, rather than one at a time?
for ragged_axis, segment_lengths in axis_spec.ragged_axes:
this_axis_spec = RaggedAxis(
axis_spec.stacked_axis, ((ragged_axis, segment_lengths),))
operand = _mask_one_ragged_axis(operand, ident, this_axis_spec)
return operand
def _mask_one_ragged_axis(
operand: Array, ident, axis_spec: RaggedAxis) -> Array:
# Callers of this utility, via reducer_batcher() or defreducer(),
# must be in a context where lax is importable.
from jax import lax # pytype: disable=import-error
assert len(axis_spec.ragged_axes) == 1, "Mask just one ragged axis at a time"
ragged_axis, segment_lengths = axis_spec.ragged_axes[0]
value = ident(operand.dtype)
positions = lax.broadcasted_iota('int32', operand.shape, ragged_axis)
# TODO(mattjj, axch) can't get ._data, need to convert it
# lengths = lax.convert_element_type(segment_lengths._data, 'int32')
lengths = lax.convert_element_type(segment_lengths, 'int32')
limits = lax.broadcast_in_dim(
lengths, operand.shape, [axis_spec.stacked_axis])
mask = positions < limits
return lax.select(mask, operand, lax.broadcast(value, operand.shape))
def move_stacked_axis(operand, bdim, dst):
dst = canonicalize_axis(dst, operand.ndim)
if isinstance(bdim, int):
return moveaxis(operand, bdim, dst), dst
elif isinstance(bdim, RaggedAxis):
result = moveaxis(operand, bdim.stacked_axis, dst)
return result, bdim.move_stacked_axis(dst)
else:
raise TypeError(f"Unrecognized batch dimension type {bdim}")
### general utilities for manipulating axes on jaxpr types (not vmappables)
def broadcast(x, sz, axis, mesh_axis):
# Callers of this utility must be in a context where lax is importable.
from jax import lax # pytype: disable=import-error
shape = list(np.shape(x))
shape.insert(axis, sz)
broadcast_dims = tuple(np.delete(np.arange(len(shape)), axis))
x_aval = core.get_aval(x)
if x_aval.sharding.mesh.empty:
mesh_axis = None
new_spec = P(*tuple_insert(x_aval.sharding.spec, axis, mesh_axis))
sharding = x_aval.sharding.update(spec=new_spec)
# TODO(dougalm, yashkatariya): Delete this context manager once we figure
# out how to ensure jaxpr arguments always have the context mesh.
with mesh_lib.use_abstract_mesh(sharding.mesh):
x = lax.broadcast_in_dim(x, shape, broadcast_dims, out_sharding=sharding)
if config._check_vma.value:
# TODO(yashkatariya,parkers): don't do this, fix during fixit week 2026
spmd_names = core.get_axis_env().spmd_axis_names
if len(spmd_names) > 1:
raise NotImplementedError
if spmd_names:
x = core.pvary(x, tuple(spmd_names))
return x
def matchaxis2(axis_data, src, dst, x, sum_match=False):
return matchaxis(axis_data.name, axis_data.size, axis_data.explicit_mesh_axis,
src, dst, x, sum_match)
def matchaxis(axis_name, sz, mesh_axis, src, dst, x, sum_match=False):
if dst == jumble_axis:
x = bdim_at_front(x, src, sz)
elt_ty = x.aval.update(shape=x.shape[1:])
aval = JumbleTy(core.Var(core.ShapedArray((), np.dtype('int32'))),
x.shape[0], elt_ty)
return Jumble(aval, x)
try:
_ = core.get_aval(x)
except TypeError as e:
raise TypeError(f"Output from batched function {x!r} with type "
f"{type(x)} is not a valid JAX type") from e
if src == dst:
return x
elif type(src) == type(dst) == int:
return moveaxis(x, src, dst)
elif src is not_mapped and dst is not not_mapped:
return broadcast(x, sz, canonicalize_axis(dst, np.ndim(x) + 1), mesh_axis)
elif dst is not_mapped and sum_match:
return x.sum(src)
else:
if (not isinstance(axis_name, core._TempAxisName) and
axis_name is not core.no_axis_name):
raise ValueError(f'vmap has mapped output ({axis_name=}) but out_axes is {dst}')
else:
raise SpecMatchError(None, None, None)
| AxisPrimitiveBatchersProxy |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 891718,
"end": 891926
} | class ____(VegaLiteSchema):
"""PositionDef schema wrapper."""
_schema = {"$ref": "#/definitions/PositionDef"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
| PositionDef |
python | PrefectHQ__prefect | tests/cli/test_flow.py | {
"start": 402,
"end": 6462
} | class ____:
"""
These tests ensure that the `prefect flow serve` interacts with Runner
in the expected way. Behavior such as flow run
execution and cancellation are tested in test_runner.py.
"""
@pytest.fixture
async def mock_runner_start(self, monkeypatch):
mock = AsyncMock()
monkeypatch.setattr("prefect.cli.flow.Runner.start", mock)
return mock
def test_flow_serve_cli_requires_entrypoint(self):
invoke_and_assert(
command=["flow", "serve"],
expected_code=2,
expected_output_contains=[
"Missing argument 'ENTRYPOINT'.",
],
)
async def test_flow_serve_cli_creates_deployment(
self, prefect_client: PrefectClient, mock_runner_start: AsyncMock
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
],
expected_code=0,
expected_output_contains=[
"Your flow 'hello' is being served and polling for scheduled runs!",
"To trigger a run for this flow, use the following command",
"$ prefect deployment run 'hello/test'",
],
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert deployment is not None
assert deployment.name == "test"
assert (
deployment.entrypoint
== f"{__development_base_path__}/tests/cli/test_flow.py:hello"
)
mock_runner_start.assert_called_once()
async def test_flow_serve_cli_accepts_interval(
self, prefect_client: PrefectClient, mock_runner_start
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
"--interval",
"3600",
],
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert len(deployment.schedules) == 1
schedule = deployment.schedules[0].schedule
assert schedule.interval == datetime.timedelta(seconds=3600)
async def test_flow_serve_cli_accepts_cron(
self, prefect_client: PrefectClient, mock_runner_start
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
"--cron",
"* * * * *",
],
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule.cron == "* * * * *"
async def test_flow_serve_cli_accepts_rrule(
self, prefect_client: PrefectClient, mock_runner_start
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
"--rrule",
"FREQ=MINUTELY;COUNT=5",
],
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert len(deployment.schedules) == 1
assert deployment.schedules[0].schedule.rrule == "FREQ=MINUTELY;COUNT=5"
async def test_flow_serve_cli_accepts_limit(
self,
prefect_client: PrefectClient,
mock_runner_start,
monkeypatch: pytest.MonkeyPatch,
):
runner_init_args: dict[str, Any] = {}
original_runner_init = Runner.__init__
def runner_spy_init(self: Runner, *args: Any, **kwargs: Any):
runner_init_args.update(kwargs)
return original_runner_init(self, *args, **kwargs)
monkeypatch.setattr(Runner, "__init__", runner_spy_init)
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
"--limit",
"5",
"--global-limit",
"13",
],
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert deployment.global_concurrency_limit is not None
assert deployment.global_concurrency_limit.limit == 13
assert runner_init_args["limit"] == 5
async def test_flow_serve_cli_accepts_metadata_fields(
self, prefect_client: PrefectClient, mock_runner_start
):
await run_sync_in_worker_thread(
invoke_and_assert,
command=[
"flow",
"serve",
f"{__development_base_path__}/tests/cli/test_flow.py:hello",
"--name",
"test",
"--description",
"test description",
"--tag",
"test",
"--tag",
"test2",
"--version",
"1.0.0",
],
expected_code=0,
)
deployment = await prefect_client.read_deployment_by_name(name="hello/test")
assert deployment.description == "test description"
assert deployment.tags == ["test", "test2"]
assert deployment.version == "1.0.0"
| TestFlowServe |
python | numba__numba | numba/cuda/vectorizers.py | {
"start": 5276,
"end": 7246
} | class ____(UFuncMechanism):
"""
Provide CUDA specialization
"""
DEFAULT_STREAM = 0
def launch(self, func, count, stream, args):
func.forall(count, stream=stream)(*args)
def is_device_array(self, obj):
return cuda.is_cuda_array(obj)
def as_device_array(self, obj):
# We don't want to call as_cuda_array on objects that are already Numba
# device arrays, because this results in exporting the array as a
# Producer then importing it as a Consumer, which causes a
# synchronization on the array's stream (if it has one) by default.
# When we have a Numba device array, we can simply return it.
if cuda.cudadrv.devicearray.is_cuda_ndarray(obj):
return obj
return cuda.as_cuda_array(obj)
def to_device(self, hostary, stream):
return cuda.to_device(hostary, stream=stream)
def to_host(self, devary, stream):
return devary.copy_to_host(stream=stream)
def allocate_device_array(self, shape, dtype, stream):
return cuda.device_array(shape=shape, dtype=dtype, stream=stream)
def broadcast_device(self, ary, shape):
ax_differs = [ax for ax in range(len(shape))
if ax >= ary.ndim
or ary.shape[ax] != shape[ax]]
missingdim = len(shape) - len(ary.shape)
strides = [0] * missingdim + list(ary.strides)
for ax in ax_differs:
strides[ax] = 0
return cuda.cudadrv.devicearray.DeviceNDArray(shape=shape,
strides=strides,
dtype=ary.dtype,
gpu_data=ary.gpu_data)
vectorizer_stager_source = '''
def __vectorized_{name}({args}, __out__):
__tid__ = __cuda__.grid(1)
if __tid__ < __out__.shape[0]:
__out__[__tid__] = __core__({argitems})
'''
| CUDAUFuncMechanism |
python | astropy__astropy | astropy/io/votable/exceptions.py | {
"start": 13139,
"end": 13372
} | class ____(VOTableSpecWarning):
"""
The attribute must be a valid URI as defined in `RFC 2396
<https://www.ietf.org/rfc/rfc2396.txt>`_.
"""
message_template = "'{}' is not a valid URI"
default_args = ("x",)
| W05 |
python | pypa__setuptools | setuptools/_distutils/errors.py | {
"start": 2309,
"end": 2576
} | class ____(DistutilsError):
"""We don't know how to do something on the current platform (but
we do know how to do it on some platform) -- eg. trying to compile
C files on a platform not supported by a CCompiler subclass."""
pass
| DistutilsPlatformError |
python | kamyu104__LeetCode-Solutions | Python/maximum-candies-you-can-get-from-boxes.py | {
"start": 52,
"end": 1000
} | class ____(object):
def maxCandies(self, status, candies, keys, containedBoxes, initialBoxes):
"""
:type status: List[int]
:type candies: List[int]
:type keys: List[List[int]]
:type containedBoxes: List[List[int]]
:type initialBoxes: List[int]
:rtype: int
"""
result = 0
q = collections.deque(initialBoxes)
while q:
changed = False
for _ in xrange(len(q)):
box = q.popleft()
if not status[box]:
q.append(box)
continue
changed = True
result += candies[box]
for contained_key in keys[box]:
status[contained_key] = 1
for contained_box in containedBoxes[box]:
q.append(contained_box)
if not changed:
break
return result
| Solution |
python | kamyu104__LeetCode-Solutions | Python/unique-number-of-occurrences.py | {
"start": 420,
"end": 656
} | class ____(object):
def uniqueOccurrences(self, arr):
"""
:type arr: List[int]
:rtype: bool
"""
count = collections.Counter(arr)
return len(count) == len(set(count.itervalues()))
| Solution2 |
python | wandb__wandb | wandb/vendor/graphql-core-1.1/wandb_graphql/validation/rules/fragments_on_composite_types.py | {
"start": 160,
"end": 1333
} | class ____(ValidationRule):
def enter_InlineFragment(self, node, key, parent, path, ancestors):
type = self.context.get_type()
if node.type_condition and type and not is_composite_type(type):
self.context.report_error(GraphQLError(
self.inline_fragment_on_non_composite_error_message(print_ast(node.type_condition)),
[node.type_condition]
))
def enter_FragmentDefinition(self, node, key, parent, path, ancestors):
type = self.context.get_type()
if type and not is_composite_type(type):
self.context.report_error(GraphQLError(
self.fragment_on_non_composite_error_message(node.name.value, print_ast(node.type_condition)),
[node.type_condition]
))
@staticmethod
def inline_fragment_on_non_composite_error_message(type):
return 'Fragment cannot condition on non composite type "{}".'.format(type)
@staticmethod
def fragment_on_non_composite_error_message(frag_name, type):
return 'Fragment "{}" cannot condition on non composite type "{}".'.format(frag_name, type)
| FragmentsOnCompositeTypes |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis39.py | {
"start": 315,
"end": 1600
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis39.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "scatter"})
chart.axis_ids = [45884928, 45883392]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$B$1:$B$5",
}
)
chart.add_series(
{
"categories": "=Sheet1!$A$1:$A$5",
"values": "=Sheet1!$C$1:$C$5",
}
)
chart.set_x_axis({"line": {"none": True}})
chart.set_y_axis({"line": {"none": True}})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | dask__dask | dask/dataframe/dask_expr/_groupby.py | {
"start": 30524,
"end": 30776
} | class ____(GroupByApply):
@functools.cached_property
def grp_func(self):
return functools.partial(groupby_slice_transform, func=self.func)
def _fillna(group, *, what, **kwargs):
return getattr(group, what)(**kwargs)
| GroupByTransform |
python | ansible__ansible | lib/ansible/module_utils/facts/virtual/base.py | {
"start": 1820,
"end": 2465
} | class ____(BaseFactCollector):
name = 'virtual'
_fact_class = Virtual
_fact_ids = set([
'virtualization_type',
'virtualization_role',
'virtualization_tech_guest',
'virtualization_tech_host',
]) # type: t.Set[str]
def collect(self, module=None, collected_facts=None):
collected_facts = collected_facts or {}
if not module:
return {}
# Network munges cached_facts by side effect, so give it a copy
facts_obj = self._fact_class(module)
facts_dict = facts_obj.populate(collected_facts=collected_facts)
return facts_dict
| VirtualCollector |
python | tensorflow__tensorflow | tensorflow/python/distribute/cross_device_ops.py | {
"start": 33893,
"end": 39352
} | class ____(CrossDeviceOps):
"""All-reduce implementation of CrossDeviceOps.
It performs all-reduce when applicable using NCCL or hierarchical copy. For
the batch API, tensors will be repacked or aggregated for more efficient
cross-device transportation.
For reduces that are not all-reduce, it falls back to
`tf.distribute.ReductionToOneDevice`.
"""
def __init__(self, all_reduce_alg="nccl", num_packs=1):
"""Initializes the object.
Args:
all_reduce_alg: the all-reduce algorithm to use, currently only "nccl" or
"hierarchical_copy" are supported.
num_packs: a non-negative integer. The number of packs to split values
into. If zero, no packing will be done.
"""
self._all_reduce_alg = all_reduce_alg
self._num_packs = num_packs
self._simple_cross_replica_ops = ReductionToOneDevice()
super(AllReduceCrossDeviceOps, self).__init__()
def reduce_implementation(self, reduce_op, per_replica_value, destinations,
options):
del options # Unused.
# To use NCCL or all-reduce, source and destination devices should match,
# and none of the devices should be CPU.
if (_devices_match(per_replica_value, destinations) and
not any("cpu" in d.lower() for d in get_devices_from(destinations))):
return self._batch_all_reduce(reduce_op, [per_replica_value])[0]
else:
return self._simple_cross_replica_ops.reduce(reduce_op, per_replica_value,
destinations)
def batch_reduce_implementation(self, reduce_op, value_destination_pairs,
options):
if _all_devices_match(value_destination_pairs):
return self._batch_all_reduce(reduce_op,
[v[0] for v in value_destination_pairs])
else:
return [
self.reduce_implementation(reduce_op, value, dest, options)
for value, dest in value_destination_pairs
]
def _batch_all_reduce(self, reduce_op, per_replica_values):
"""All-reduce algorithm in a batch."""
dense_values, dense_indices, sparse_values, sparse_indices = (
cross_device_utils.split_by_sparsity(per_replica_values))
if dense_values:
dense_results = self._do_batch_all_reduce(reduce_op, dense_values)
else:
dense_results = []
if sparse_values:
sparse_results = self._do_batch_all_reduce_sparse(reduce_op,
sparse_values)
else:
sparse_results = []
return cross_device_utils.stitch_values(((dense_results, dense_indices),
(sparse_results, sparse_indices)))
def _do_batch_all_reduce(self, reduce_op, dense_values):
"""Run batch all-reduces."""
logging.log_first_n(
logging.INFO,
"batch_all_reduce: %d all-reduces with algorithm = %s, num_packs = %d" %
(len(dense_values), self._all_reduce_alg, self._num_packs), 10)
destinations = dense_values[0]._devices # pylint: disable=protected-access
grouped = _group_value_by_device(dense_values)
# device_grad_packs:
# [[(t0_gpu0, None), (t1_gpu0, None)], [(t0_gpu1, None), (t1_gpu1, None)]]
device_grad_packs, tensor_packer = _pack_tensors(grouped, self._num_packs)
# The actual aggregation of the repacked gradients. Note that they are
# sharded among different aggregation trees. So it is important to strike
# the balance on num_splits.
if self._all_reduce_alg == "nccl":
# TODO(yuefengz): merge this into the all-reduce library.
reduced = cross_device_utils.aggregate_gradients_using_nccl(
device_grad_packs)
else:
# TODO(yuefengz): check that gpu ids in `destinations` are in ascending
# order.
reduced = (
cross_device_utils.aggregate_gradients_using_hierarchical_copy(
destinations, device_grad_packs))
reduced = _unpack_tensors(reduced, tensor_packer)
return _ungroup_and_make_mirrored(reduced, dense_values[0], reduce_op)
def _do_batch_all_reduce_sparse(self, reduce_op, sparse_values):
"""Run batch all-reduce for sparse values."""
logging.log_first_n(
logging.WARN,
"Efficient allreduce is not supported for %d IndexedSlices" %
len(sparse_values), 10)
# Use `sparse_values` as destinations to do all-reduces. It is effectively
# an allgather under the hood but not an efficient one.
return self._simple_cross_replica_ops.batch_reduce(
reduce_op, zip(sparse_values, sparse_values))
def _gather_implementation(self, per_replica_value, destinations, axis,
options):
logging.log_first_n(
logging.WARN,
"gather/all_gather with NCCL or HierarchicalCopy is not supported. "
"Falling back to gather on one device and then broadcast. We're working"
" on a more efficient implementation.", 3)
return ReductionToOneDevice()._gather(per_replica_value, destinations, axis, # pylint: disable=protected-access
options)
# For compatibility with code using the old name of `AllReduceCrossDeviceOps`.
AllReduceCrossTowerOps = AllReduceCrossDeviceOps
AllReduceSpecTuple = collections.namedtuple("AllReduceSpecTuple",
"alg shards limit")
@tf_export("distribute.NcclAllReduce")
| AllReduceCrossDeviceOps |
python | pytorch__pytorch | test/onnx/test_models.py | {
"start": 1578,
"end": 10932
} | class ____(pytorch_test_common.ExportTestCase):
opset_version = 9 # Caffe2 doesn't support the default.
keep_initializers_as_inputs = False
def exportTest(self, model, inputs, rtol=1e-2, atol=1e-7, **kwargs):
import caffe2.python.onnx.backend as backend
with torch.onnx.select_model_mode_for_export(
model, torch.onnx.TrainingMode.EVAL
):
graph = torch.onnx.utils._trace(model, inputs, OperatorExportTypes.ONNX)
torch._C._jit_pass_lint(graph)
verify(
model,
inputs,
backend,
rtol=rtol,
atol=atol,
opset_version=self.opset_version,
)
def test_ops(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(DummyNet()), toC(x))
def test_prelu(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(PReluNet(), x)
@skipScriptTest()
def test_concat(self):
input_a = Variable(torch.randn(BATCH_SIZE, 3))
input_b = Variable(torch.randn(BATCH_SIZE, 3))
inputs = ((toC(input_a), toC(input_b)),)
self.exportTest(toC(ConcatNet()), inputs)
def test_permute(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 10, 12))
self.exportTest(PermuteNet(), x)
@skipScriptTest()
def test_embedding_sequential_1(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork1(), x)
@skipScriptTest()
def test_embedding_sequential_2(self):
x = Variable(torch.randint(0, 10, (BATCH_SIZE, 3)))
self.exportTest(EmbeddingNetwork2(), x)
@unittest.skip("This model takes too much memory")
def test_srresnet(self):
x = Variable(torch.randn(1, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(SRResNet(rescale_factor=4, n_filters=64, n_blocks=8)), toC(x)
)
@skipIfNoLapack
def test_super_resolution(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 224, 224).fill_(1.0))
self.exportTest(toC(SuperResolutionNet(upscale_factor=3)), toC(x), atol=1e-6)
def test_alexnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(alexnet()), toC(x))
def test_mnist(self):
x = Variable(torch.randn(BATCH_SIZE, 1, 28, 28).fill_(1.0))
self.exportTest(toC(MNIST()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg16(self):
# VGG 16-layer model (configuration "D")
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg16_bn(self):
# VGG 16-layer model (configuration "D") with batch normalization
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg16_bn()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg19(self):
# VGG 19-layer model (configuration "E")
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19()), toC(x))
@unittest.skip("This model takes too much memory")
def test_vgg19_bn(self):
# VGG 19-layer model (configuration "E") with batch normalization
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(vgg19_bn()), toC(x))
def test_resnet(self):
# ResNet50 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(resnet50()), toC(x), atol=1e-6)
# This test is numerically unstable. Sporadic single element mismatch occurs occasionally.
def test_inception(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 299, 299))
self.exportTest(toC(inception_v3()), toC(x), acceptable_error_percentage=0.01)
def test_squeezenet(self):
# SqueezeNet: AlexNet-level accuracy with 50x fewer parameters and
# <0.5MB model size
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_0 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_0), toC(x))
# SqueezeNet 1.1 has 2.4x less computation and slightly fewer params
# than SqueezeNet 1.0, without sacrificing accuracy.
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
sqnet_v1_1 = SqueezeNet(version=1.1)
self.exportTest(toC(sqnet_v1_1), toC(x))
def test_densenet(self):
# Densenet-121 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(densenet121()), toC(x), rtol=1e-2, atol=1e-5)
@skipScriptTest()
def test_dcgan_netD(self):
netD = _netD(1)
netD.apply(weights_init)
input = Variable(torch.empty(bsz, 3, imgsz, imgsz).normal_(0, 1))
self.exportTest(toC(netD), toC(input))
@skipScriptTest()
def test_dcgan_netG(self):
netG = _netG(1)
netG.apply(weights_init)
input = Variable(torch.empty(bsz, nz, 1, 1).normal_(0, 1))
self.exportTest(toC(netG), toC(input))
@skipIfUnsupportedMinOpsetVersion(10)
def test_fake_quant(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(FakeQuantNet()), toC(x))
@skipIfUnsupportedMinOpsetVersion(10)
def test_qat_resnet_pertensor(self):
# Quantize ResNet50 model
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
qat_resnet50 = resnet50()
# Use per tensor for weight. Per channel support will come with opset 13
qat_resnet50.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_fake_quant,
)
quantization.prepare_qat(qat_resnet50, inplace=True)
qat_resnet50.apply(torch.ao.quantization.enable_observer)
qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)
_ = qat_resnet50(x)
for module in qat_resnet50.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
qat_resnet50.apply(torch.ao.quantization.disable_observer)
self.exportTest(toC(qat_resnet50), toC(x))
@skipIfUnsupportedMinOpsetVersion(13)
def test_qat_resnet_per_channel(self):
# Quantize ResNet50 model
x = torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0)
qat_resnet50 = resnet50()
qat_resnet50.qconfig = quantization.QConfig(
activation=quantization.default_fake_quant,
weight=quantization.default_per_channel_weight_fake_quant,
)
quantization.prepare_qat(qat_resnet50, inplace=True)
qat_resnet50.apply(torch.ao.quantization.enable_observer)
qat_resnet50.apply(torch.ao.quantization.enable_fake_quant)
_ = qat_resnet50(x)
for module in qat_resnet50.modules():
if isinstance(module, quantization.FakeQuantize):
module.calculate_qparams()
qat_resnet50.apply(torch.ao.quantization.disable_observer)
self.exportTest(toC(qat_resnet50), toC(x))
@skipScriptTest(skip_before_opset_version=15, reason="None type in outputs")
def test_googlenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(googlenet()), toC(x), rtol=1e-3, atol=1e-5)
def test_mnasnet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mnasnet1_0()), toC(x), rtol=1e-3, atol=1e-5)
def test_mobilenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(mobilenet_v2()), toC(x), rtol=1e-3, atol=1e-5)
@skipScriptTest() # prim_data
def test_shufflenet(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(toC(shufflenet_v2_x1_0()), toC(x), rtol=1e-3, atol=1e-5)
@skipIfUnsupportedMinOpsetVersion(11)
def test_fcn(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(fcn_resnet101(weights=None, weights_backbone=None)),
toC(x),
rtol=1e-3,
atol=1e-5,
)
@skipIfUnsupportedMinOpsetVersion(11)
def test_deeplab(self):
x = Variable(torch.randn(BATCH_SIZE, 3, 224, 224).fill_(1.0))
self.exportTest(
toC(deeplabv3_resnet101(weights=None, weights_backbone=None)),
toC(x),
rtol=1e-3,
atol=1e-5,
)
def test_r3d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r3d_18()), toC(x), rtol=1e-3, atol=1e-5)
def test_mc3_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(mc3_18()), toC(x), rtol=1e-3, atol=1e-5)
def test_r2plus1d_18_video(self):
x = Variable(torch.randn(1, 3, 4, 112, 112).fill_(1.0))
self.exportTest(toC(r2plus1d_18()), toC(x), rtol=1e-3, atol=1e-5)
if __name__ == "__main__":
common_utils.run_tests()
| TestModels |
python | aio-libs__aiohttp | aiohttp/resolver.py | {
"start": 625,
"end": 2607
} | class ____(AbstractResolver):
"""Threaded resolver.
Uses an Executor for synchronous getaddrinfo() calls.
concurrent.futures.ThreadPoolExecutor is used by default.
"""
def __init__(self) -> None:
self._loop = asyncio.get_running_loop()
async def resolve(
self, host: str, port: int = 0, family: socket.AddressFamily = socket.AF_INET
) -> list[ResolveResult]:
infos = await self._loop.getaddrinfo(
host,
port,
type=socket.SOCK_STREAM,
family=family,
flags=_AI_ADDRCONFIG,
)
hosts: list[ResolveResult] = []
for family, _, proto, _, address in infos:
if family == socket.AF_INET6:
if len(address) < 3:
# IPv6 is not supported by Python build,
# or IPv6 is not enabled in the host
continue
if address[3]:
# This is essential for link-local IPv6 addresses.
# LL IPv6 is a VERY rare case. Strictly speaking, we should use
# getnameinfo() unconditionally, but performance makes sense.
resolved_host, _port = await self._loop.getnameinfo(
address, _NAME_SOCKET_FLAGS
)
port = int(_port)
else:
resolved_host, port = address[:2]
else: # IPv4
assert family == socket.AF_INET
resolved_host, port = address # type: ignore[misc]
hosts.append(
ResolveResult(
hostname=host,
host=resolved_host,
port=port,
family=family,
proto=proto,
flags=_NUMERIC_SOCKET_FLAGS,
)
)
return hosts
async def close(self) -> None:
pass
| ThreadedResolver |
python | kamyu104__LeetCode-Solutions | Python/total-distance-traveled.py | {
"start": 36,
"end": 370
} | class ____(object):
def distanceTraveled(self, mainTank, additionalTank):
"""
:type mainTank: int
:type additionalTank: int
:rtype: int
"""
USE, REFILL, DIST = 5, 1, 10
cnt = min((mainTank-REFILL)//(USE-REFILL), additionalTank)
return (mainTank+cnt*REFILL)*DIST
| Solution |
python | jina-ai__jina | jina/clients/websocket.py | {
"start": 222,
"end": 837
} | class ____(WebSocketBaseClient, PostMixin, ProfileMixin, HealthCheckMixin):
"""A client connecting to a Gateway using WebSocket protocol.
Instantiate this class through the :meth:`jina.Client` convenience method.
EXAMPLE USAGE
.. code-block:: python
from jina import Client
from docarray import Document
# select host address to connect to
c = Client(
protocol='websocket', asyncio=False, host='ws://my.awesome.flow:1234'
) # returns WebSocketClient instance
c.post(on='/index', inputs=Document(text='hello!'))
"""
| WebSocketClient |
python | Textualize__textual | src/textual/reactive.py | {
"start": 14780,
"end": 16065
} | class ____(Reactive[ReactiveType]):
"""Create a reactive attribute.
Args:
default: A default value or callable that returns a default.
layout: Perform a layout on change.
repaint: Perform a repaint on change.
init: Call watchers on initialize (post mount).
always_update: Call watchers even when the new value equals the old value.
recompose: Compose the widget again when the attribute changes.
bindings: Refresh bindings when the reactive changes.
toggle_class: An optional TCSS classname(s) to toggle based on the truthiness of the value.
"""
def __init__(
self,
default: ReactiveType | Callable[[], ReactiveType] | Initialize[ReactiveType],
*,
layout: bool = False,
repaint: bool = True,
init: bool = True,
always_update: bool = False,
recompose: bool = False,
bindings: bool = False,
toggle_class: str | None = None,
) -> None:
super().__init__(
default,
layout=layout,
repaint=repaint,
init=init,
always_update=always_update,
recompose=recompose,
bindings=bindings,
toggle_class=toggle_class,
)
| reactive |
python | apache__airflow | providers/common/sql/src/airflow/providers/common/sql/sensors/sql.py | {
"start": 1215,
"end": 5470
} | class ____(BaseSensorOperator):
"""
Run a SQL statement repeatedly until a criteria is met.
This will keep trying until success or failure criteria are met, or if the
first cell is not either ``0``, ``'0'``, ``''``, or ``None``. Optional
success and failure callables are called with the first cell returned as the
argument.
If success callable is defined, the sensor will keep retrying until the
criteria is met. If failure callable is defined, and the criteria is met,
the sensor will raise AirflowException. Failure criteria is evaluated before
success criteria. A fail_on_empty boolean can also be passed to the sensor
in which case it will fail if no rows have been returned.
:param conn_id: The connection to run the sensor against
:param sql: The SQL to run. To pass, it needs to return at least one cell
that contains a non-zero / empty string value.
:param parameters: The parameters to render the SQL query with (optional).
:param success: Success criteria for the sensor is a Callable that takes the output
of selector as the only argument, and returns a boolean (optional).
:param failure: Failure criteria for the sensor is a Callable that takes the output
of selector as the only argument and returns a boolean (optional).
:param selector: Function which takes the resulting row and transforms it before
it is passed to success or failure (optional). Takes the first cell by default.
:param fail_on_empty: Explicitly fail on no rows returned.
:param hook_params: Extra config params to be passed to the underlying hook.
Should match the desired hook constructor params.
"""
template_fields: Sequence[str] = ("sql", "hook_params", "parameters")
template_ext: Sequence[str] = (".hql", ".sql")
ui_color = "#7c7287"
def __init__(
self,
*,
conn_id: str,
sql: str,
parameters: Mapping[str, Any] | None = None,
success: Callable[[Any], bool] | None = None,
failure: Callable[[Any], bool] | None = None,
selector: Callable[[tuple[Any]], Any] = itemgetter(0),
fail_on_empty: bool = False,
hook_params: Mapping[str, Any] | None = None,
**kwargs,
) -> None:
self.conn_id = conn_id
self.sql = sql
self.parameters = parameters
self.success = success
self.failure = failure
self.selector = selector
self.fail_on_empty = fail_on_empty
self.hook_params = hook_params
super().__init__(**kwargs)
def _get_hook(self) -> DbApiHook:
conn = BaseHook.get_connection(self.conn_id)
hook = conn.get_hook(hook_params=self.hook_params)
if not isinstance(hook, DbApiHook):
raise AirflowException(
f"The connection type is not supported by {self.__class__.__name__}. "
f"The associated hook should be a subclass of `DbApiHook`. Got {hook.__class__.__name__}"
)
return hook
def poke(self, context: Context) -> bool:
hook = self._get_hook()
self.log.info("Poking: %s (with parameters %s)", self.sql, self.parameters)
records = hook.get_records(self.sql, self.parameters)
if not records:
if self.fail_on_empty:
message = "No rows returned, raising as per fail_on_empty flag"
raise AirflowException(message)
return False
condition = self.selector(records[0])
if self.failure is not None:
if callable(self.failure):
if self.failure(condition):
message = f"Failure criteria met. self.failure({condition}) returned True"
raise AirflowException(message)
else:
message = f"self.failure is present, but not callable -> {self.failure}"
raise AirflowException(message)
if self.success is not None:
if callable(self.success):
return self.success(condition)
message = f"self.success is present, but not callable -> {self.success}"
raise AirflowException(message)
return bool(condition)
| SqlSensor |
python | PyCQA__pylint | tests/functional/m/multiple_statements.py | {
"start": 928,
"end": 986
} | class ____(Exception): a='a' # [multiple-statements]
| MyError |
python | graphql-python__graphene | graphene/types/scalars.py | {
"start": 1401,
"end": 2345
} | class ____(Scalar):
"""
The `Int` scalar type represents non-fractional signed whole numeric
values. Int can represent values between -(2^53 - 1) and 2^53 - 1 since
represented in JSON as double-precision floating point numbers specified
by [IEEE 754](http://en.wikipedia.org/wiki/IEEE_floating_point).
"""
@staticmethod
def coerce_int(value):
try:
num = int(value)
except ValueError:
try:
num = int(float(value))
except ValueError:
return Undefined
if MIN_INT <= num <= MAX_INT:
return num
return Undefined
serialize = coerce_int
parse_value = coerce_int
@staticmethod
def parse_literal(ast, _variables=None):
if isinstance(ast, IntValueNode):
num = int(ast.value)
if MIN_INT <= num <= MAX_INT:
return num
return Undefined
| Int |
python | pytorch__pytorch | torchgen/_autoheuristic/train_decision.py | {
"start": 818,
"end": 22664
} | class ____(AHTrain):
def __init__(self):
super().__init__()
def debug_time(self, row, top_k_choices):
choices_feedback = json.loads(row["choice2time"])
timings = sorted(choices_feedback.items(), key=lambda x: x[1])
for choice, time in timings:
result = f"{choice} {time}"
if choice in top_k_choices:
result += " TOPK"
print(result)
def is_unsafe_leaf(self, row, predicted_config, choice2time):
"""
Can be overridden by subclasses to define their own logic for deciding when a leaf is unsafe. Returns a sample
that landed in the leaf, the choice predicted by the tree, and a dictionary that maps each choice to the
execution time. One can for example decide to mark a leaf as unsafe if the predicted choice is 2x slower
than the fastest choice.
If a leaf is unsafe, the learned heuristic will always return 'unsure' if an input lands in that leaf.
"""
return False
def get_unsafe_leaves(self, model, df, feature_columns):
"""
Given a trained decision tree, and a dataframe containing the training data, returns a list of unsafe leaves.
"""
X = df[feature_columns]
leaf_ids = model.apply(X)
unique_leaves = np.unique(leaf_ids)
unsafe_leaves = []
# Iterate over each leaf
for leaf in unique_leaves:
leaf_mask = leaf_ids == leaf
# Get samples that land in this leaf
leaf_X = X[leaf_mask]
predicted_config = model.predict(leaf_X.iloc[[0]])[0]
# For each sample, check if we should mark the leaf as unsafe
for idx, row in leaf_X.iterrows():
choice2time = json.loads(df.loc[idx, "choice2time"])
if self.is_unsafe_leaf(row, predicted_config, choice2time):
unsafe_leaves.append(leaf)
break
return unsafe_leaves
def get_allowed_wrong_prediction_pct(self):
"""
This is used to determine a threshold for when a learned heuristic returns 'unsure'.
If this function returns 0.01, we will set the probability required for the decision tree to return a decision
such that at most 1% of the predictions will be wrong on the validation set.
"""
return 0.01
def get_grid_search_values(self):
"""
Standard values for grid search. Can be overridden.
"""
return {
"max_depth": [5, 6, 7],
"min_samples_leaf": [1, 5, 10, 0.01, 0.05, 0.02],
"criterion": ["gini", "entropy"],
}
def predict(self, model, df, feature_columns):
"""
Returns the predictions, probabilities, and leaf ids for a given dataframe.
"""
predictions = model.predict(df[feature_columns])
proba = model.predict_proba(df[feature_columns])
leaf_ids = model.apply(df[feature_columns])
return predictions, proba, leaf_ids
def ranking_num_choices(self):
# if the heuristic is used for ranking, this function returns the number
# of choices that the heuristic will return
if self.args.ranking is None:
return 5
return self.args.ranking
def train_and_evaluate_models(
self,
datasets,
max_depths,
min_samples_leafs,
criterion_list,
feature_columns,
ranking=False,
):
"""
Does a grid search over max_depths, min_samples_leafs, and criterion_list and returns the best model.
"""
results = []
best_model = None
best_model_safe_proba = 0
best_model_num_correct = 0
best_model_unsafe_leaves = []
columns = ["set", "crit", "max_depth", "min_samples_leaf"]
metrics_columns = []
for max_depth, min_samples_leaf, criterion in itertools.product(
max_depths, min_samples_leafs, criterion_list
):
print(
f"max_depth={max_depth} min_samples_leaf={min_samples_leaf} criterion={criterion}"
)
model = DecisionTreeClassifier(
max_depth=max_depth,
min_samples_leaf=min_samples_leaf,
criterion=criterion,
random_state=42,
)
df_train = datasets["train"]
df_val = datasets["val"]
if ranking:
model.fit(
df_train[feature_columns],
df_train["winner"],
sample_weight=df_train["relative_performance"],
)
else:
model.fit(df_train[feature_columns], df_train["winner"])
model = DecisionTree(model, feature_columns)
if ranking:
model.prune(df_train, "winner", k=self.ranking_num_choices())
unsafe_leaves = self.get_unsafe_leaves(model, df_train, feature_columns)
predictions, proba, leaf_ids = self.predict(model, df_val, feature_columns)
wrong_pct = self.get_allowed_wrong_prediction_pct()
evaluator = DecisionEvaluator(
self,
model,
predictions,
df_val,
proba,
wrong_pct=wrong_pct,
unsafe_leaves=unsafe_leaves,
leaf_ids=leaf_ids,
k=self.ranking_num_choices(),
ranking=ranking,
)
safe_proba = evaluator.get_safe_proba()
print(f"safe_proba={safe_proba}")
def eval(name, df):
if ranking:
# when ranking is enabled, we duplicate each input for each choice that
# is almost as good as the best choice
# we do not want to evaluate the same input multiple times, so we remove duplicates here
df = df[df["winner"] == df["actual_winner"]]
predictions, proba, leaf_ids = self.predict(model, df, feature_columns)
evaluator = DecisionEvaluator(
self,
model,
predictions,
df,
proba,
wrong_pct=wrong_pct,
threshold=safe_proba,
unsafe_leaves=unsafe_leaves,
leaf_ids=leaf_ids,
k=self.ranking_num_choices(),
ranking=ranking,
)
return evaluator.get_results()
for dataset_name, dataset in datasets.items():
eval_result: EvalResults = eval(dataset_name, dataset)
eval_result_metrics = eval_result.to_map()
if dataset_name == "val":
num_correct = eval_result.accuracy.num_correct
num_wrong = eval_result.accuracy.num_wrong
num_total = eval_result.accuracy.total
if num_wrong <= num_total * wrong_pct:
if num_correct > best_model_num_correct:
print(
f"new best model with {num_correct} correct and {num_wrong} wrong"
)
best_model = model
best_model_num_correct = num_correct
best_model_safe_proba = safe_proba
best_model_unsafe_leaves = unsafe_leaves
result = (dataset_name, criterion, max_depth, min_samples_leaf)
result += tuple(eval_result_metrics.values())
results.append(result)
if len(metrics_columns) == 0:
metrics_columns = list(eval_result_metrics.keys())
columns += metrics_columns
return (
pd.DataFrame(results, columns=columns),
best_model,
best_model_safe_proba,
best_model_unsafe_leaves,
)
def get_test_and_val_size(self):
"""
Returns the size of the test and validation sets.
"""
return (0.15, 0.15)
def prepare_datasets(self, df, other_datasets, cat_feature2cats, ranking=False):
"""
Splits the dataframe into train, val, and test sets.
Also adds other datasets, specified by the user, to the train set.
"""
test_size, val_size = self.get_test_and_val_size()
# Split into train+val and test
df_train_val, df_test = train_test_split(
df, test_size=test_size, random_state=42
)
# Split train+val inputs into train and val
train_val_size = 1 - test_size
df_train, df_val = train_test_split(
df_train_val, test_size=val_size / train_val_size, random_state=42
)
datasets = {"train": df_train, "val": df_val, "test": df_test}
self.add_real_datasets(datasets, other_datasets, cat_feature2cats, ranking)
return datasets
def export_to_dot(self, best_model, df, feature_columns):
"""
Export a learned decision tree to a dot file.
"""
dot_str = best_model.to_dot()
with open("best_model.dot", "w") as f:
f.write(dot_str)
def get_feature_columns(self, df):
"""
The dataframe contains columns that are not features, such as 'winner', 'speedup' that are only used for
debugging purposes. This function returns the columns that are actually features.
"""
exclude_columns = [
"speedup",
"winner",
"target",
"avail_choices",
"choice2time",
"index",
"actual_winner",
"relative_performance",
]
feature_columns = [col for col in df.columns if col not in exclude_columns]
return feature_columns
def add_training_data(self, df_train, datasets):
return datasets["train"]
def main(
self,
log_path,
other_datasets,
nrows,
heuristic_name,
save_dot=False,
ranking=False,
):
"""
Main function that trains a decision tree and generates a heuristic.
"""
# TODO: Enable apply_filters
(df, choices, cat_feature2cats, dummy_col_2_col_val, metadata) = self.get_df(
log_path, nrows=nrows, apply_filters=False, add_near_best=ranking
)
self.dummy_col_2_col_val = dummy_col_2_col_val
datasets = self.prepare_datasets(df, other_datasets, cat_feature2cats, ranking)
df_train = self.add_training_data(datasets["train"], datasets)
datasets["train"] = df_train
print(datasets["train"]["winner"].value_counts().to_string())
feature_columns = self.get_feature_columns(df)
grid_search_values = self.get_grid_search_values()
max_depths = grid_search_values["max_depth"]
min_samples_leafs = grid_search_values["min_samples_leaf"]
criterion_list = grid_search_values["criterion"]
(
results_df,
best_model,
best_model_safe_proba,
unsafe_leaves,
) = self.train_and_evaluate_models(
datasets,
max_depths,
min_samples_leafs,
criterion_list,
feature_columns,
ranking=ranking,
)
if ranking:
columns_to_keep = [
"set",
"crit",
"max_depth",
"min_samples_leaf",
"total",
"top_k_correct",
"top_k_wrong",
"top_k_unsure",
"wrong_max_speedup_k",
"wrong_gmean_speedup_k",
]
results_df = results_df[columns_to_keep]
# prints results for all models and datasets
print(results_df.to_string())
sort_metric = "top_k_correct" if ranking else "correct"
# prints results grouped by dataset
for set_name in results_df["set"].unique():
dataset_results = results_df[results_df["set"] == set_name]
dataset_results = dataset_results.sort_values(by=sort_metric)
print(dataset_results.to_string() + "\n")
if best_model is not None:
if save_dot:
self.export_to_dot(best_model, df, feature_columns)
self.codegen(
best_model,
metadata,
heuristic_name,
best_model_safe_proba,
dummy_col_2_col_val,
unsafe_leaves,
)
else:
print(
"All learned models have too many wrong predictions, so no heuristic was generated"
)
def get_df(
self,
log_path,
cat_feature2cats=None,
nrows=None,
apply_filters=False,
add_near_best=False,
):
"""
Parses the log file and processes the data into a dataframe that can be used for training.
"""
(df, metadata, features, categorical_features, choices) = self.parse_log(
log_path, nrows
)
def calculate_stats(group):
count = len(group)
has_inf = np.isinf(group["feedback"]).any()
if has_inf:
relative_std = np.inf
median = np.inf
else:
mean = group["feedback"].mean()
std = group["feedback"].std()
relative_std = (std / mean) * 100 if mean != 0 else np.inf
median = group["feedback"].median()
if relative_std > 5:
times = group["feedback"].tolist()
times_str = ", ".join([f"{t:.3f}" for t in sorted(times)])
log.debug("High relative std: %f. times=%s", relative_std, times_str)
return pd.Series(
{
"count": count,
"relative_std": relative_std,
"median_execution_time": median,
}
)
feature_columns = features
stats = (
df.groupby(feature_columns + ["choice"], as_index=False)
.apply(calculate_stats, include_groups=False)
.reset_index()
)
# TODO: We have to be careful with removing certain choices, because if we e.g. remove the winner, the
# heuristic will end up learning wrong things. But, execution times with high variance are also bad
if apply_filters:
# Filter out inputs with less than 3 measurements or high relative std
valid_stats = stats[(stats["count"] >= 3) & (stats["relative_std"] <= 5)]
# Group by input features and count how many valid choices we have for each input
valid_inputs = valid_stats.groupby(feature_columns).filter(
lambda x: len(x) >= 2
)
else:
valid_inputs = stats
# Compute the winner and speedup for each valid input
def get_winner_and_speedup(group):
assert len(group) >= 2, "Need at least 2 choices"
sorted_group = group.sort_values("median_execution_time")
winner = sorted_group.iloc[0]["choice"]
winning_time = sorted_group.iloc[0]["median_execution_time"]
second_best_time = sorted_group.iloc[1]["median_execution_time"]
speedup = second_best_time / winning_time
unique_choices = group["choice"].unique()
choice2time = {}
for row in group.itertuples():
choice2time[row.choice] = row.median_execution_time
assert len(unique_choices) == len(group), (
f"len(unique_choices) != len(group): {len(unique_choices)} != {len(group)}"
)
return pd.Series(
{
"winner": winner,
"speedup": speedup,
"avail_choices": unique_choices,
"choice2time": json.dumps(choice2time),
}
)
results = (
valid_inputs.groupby(feature_columns, as_index=False)
.filter(lambda x: len(x) >= 2)
.groupby(feature_columns, as_index=False)
.apply(get_winner_and_speedup, include_groups=False)
.reset_index()
)
def add_near_best_configs(df):
new_rows = []
for index, row in df.iterrows():
dictionary = json.loads(row["choice2time"])
min_value = min(dictionary.values())
for key, value in dictionary.items():
new_row = row.copy()
relative_performance = min_value / value
new_row["relative_performance"] = relative_performance
if relative_performance is None or relative_performance is np.inf:
breakpoint()
new_row["actual_winner"] = row["winner"]
new_row["winner"] = key
if relative_performance >= 0.98:
new_rows.append(new_row)
return pd.DataFrame(new_rows).reset_index(drop=True)
if add_near_best:
results = add_near_best_configs(results)
(results, added_categorical_features) = self.add_new_features(results)
categorical_features += added_categorical_features
(
results,
cat_feature2cats,
dummy_col_2_col_val,
) = self.handle_categorical_features(
cat_feature2cats, categorical_features, results
)
return (results, choices, cat_feature2cats, dummy_col_2_col_val, metadata)
def ranking_always_included_choices(self):
return []
def gen_classes(self, classes, num_spaces):
"""
If classes=['choice1', 'choice2', 'choice3'], then this function returns
the following string:
self.choices.append('choice1')
self.choices.append('choice2')
self.choices.append('choice3')
Used in the generated heuristic to map the index of a choice to its name.
"""
indent = " " * num_spaces
return "\n".join([f"{indent}self.choices.append('{c}')" for c in classes])
def get_default_config(self, row):
"""
Returns the default config for a given sample. The default config could for example be the config that is
the chosen by a current handwritten heuristic. This can for example be used in get_unsafe_leaf to
compare the predicted config with the default config.
"""
return None
def gen_predict_fn_def(self):
"""
Generates the definition of the predict function.
"""
return "def get_best_choices(self, context: AHContext) -> Optional[list[tuple[float, int]]]:"
def codegen_boilerplate(
self, heuristic_name, opt_name, threshold, shared_memory, device_capa, classes
):
"""
Generates the boilerplate code for the generated heuristic. This includes things like imports, class definition,
etc.
"""
boiler_plate = f"""# flake8: noqa: B950
# fmt: off
# This file was generated by AutoHeuristic. Do not modify it manually!
# To regenerate this file, take a look at the steps in the README.md file inside torchgen/_autoheuristic/{opt_name}/
from typing import Optional
from torch._inductor.autoheuristic.autoheuristic_utils import (
AHContext,
AHMetadata,
Choice,
)
from torch._inductor.autoheuristic.learnedheuristic_interface import (
LearnedHeuristicDecision,
)
class {heuristic_name}(LearnedHeuristicDecision):
def __init__(self) -> None:
self.choices: list[Choice] = []
self.fill_choices()
{self.gen_precondition(opt_name, shared_memory, device_capa)}
def get_confidence_threshold(self) -> float:
return {threshold}
def get_choice(self, idx: int) -> Optional[str]:
if idx < len(self.choices):
return self.choices[idx]
return None
def fill_choices(self) -> None:
{self.gen_classes(classes, num_spaces=8)}
def get_name(self) -> str:
return '{opt_name}'"""
return boiler_plate
def add_real_datasets(
self, datasets, other_datasets, cat_feature2cats, ranking=False
):
"""
Adds datasets specified by the user to the datasets dictionary.
"""
if other_datasets:
for name, path in other_datasets:
(df_other, choices, _, _, _) = self.get_df(
path,
cat_feature2cats=cat_feature2cats,
apply_filters=False,
add_near_best=ranking,
)
datasets[name] = df_other
def codegen(
self,
tree,
metadata,
heuristic_name,
threshold,
dummy_col_2_col_val,
unsafe_leaves,
):
lines = []
device_capa = metadata["device_capa"]
device_capa_str = f"({device_capa[0]}, {device_capa[1]})"
opt_name = metadata["name"]
lines.append(
self.codegen_boilerplate(
heuristic_name,
opt_name,
threshold,
metadata["shared_memory"],
device_capa_str,
tree.classes_,
)
)
fn_def = f"\n {self.gen_predict_fn_def()}"
lines.append(fn_def)
tree.codegen(dummy_col_2_col_val, lines, unsafe_leaves)
self.write_heuristic_to_file(lines, heuristic_name)
@dataclass
| AHTrainDecisionTree |
python | ray-project__ray | python/ray/tests/test_autoscaler.py | {
"start": 10155,
"end": 11367
} | class ____(unittest.TestCase):
def testHeartbeat(self):
lm = LoadMetrics()
lm.update("1.1.1.1", mock_node_id(), {"CPU": 2}, {"CPU": 1}, 0)
lm.mark_active("2.2.2.2")
assert "1.1.1.1" in lm.last_heartbeat_time_by_ip
assert "2.2.2.2" in lm.last_heartbeat_time_by_ip
assert "3.3.3.3" not in lm.last_heartbeat_time_by_ip
def testDebugString(self):
lm = LoadMetrics()
lm.update("1.1.1.1", mock_node_id(), {"CPU": 2}, {"CPU": 0}, 0)
lm.update(
"2.2.2.2", mock_node_id(), {"CPU": 2, "GPU": 16}, {"CPU": 2, "GPU": 2}, 0
)
lm.update(
"3.3.3.3",
mock_node_id(),
{
"memory": 1.05 * 1024 * 1024 * 1024,
"object_store_memory": 2.1 * 1024 * 1024 * 1024,
},
{
"memory": 0,
"object_store_memory": 1.05 * 1024 * 1024 * 1024,
},
0,
)
debug = lm.info_string()
assert (
"ResourceUsage: 2.0/4.0 CPU, 14.0/16.0 GPU, "
"1.05 GiB/1.05 GiB memory, "
"1.05 GiB/2.1 GiB object_store_memory"
) in debug
| LoadMetricsTest |
python | pytorch__pytorch | torch/_dynamo/eval_frame.py | {
"start": 59568,
"end": 65034
} | class ____(torch.fx.Transformer):
def __init__(
self,
m: torch.fx.GraphModule,
flat_args: list[Any],
matched_input_elements_positions: list[int],
flat_results: Sequence[Any],
matched_output_elements_positions: list[int],
example_fake_inputs: list[torch.Tensor],
flat_args_dynamic_dims: list[set[int]],
fake_mode: Optional[fake_tensor.FakeTensorMode] = None,
) -> None:
super().__init__(m)
assert len(flat_args_dynamic_dims) == len(flat_args)
matched_input_elements_to_fake = {
val: example_fake_inputs[ix]
for ix, val in enumerate(matched_input_elements_positions)
}
self.new_args = []
for i in range(len(flat_args)):
arg = super().placeholder(f"arg{i}", (), {})
if i in matched_input_elements_to_fake:
arg.node.meta["val"] = matched_input_elements_to_fake[i]
else:
# Fill node.meta["val"] with faketensor from the input,
# if it's not found in matched_input_elements_positions
if fake_mode is not None and isinstance(flat_args[i], torch.Tensor):
# TODO(zhxchen17) Also preserve all the user constraints here.
arg.node.meta["val"] = fake_mode.from_tensor(
flat_args[i],
symbolic_context=StatelessSymbolicContext(
dynamic_sizes=[
(
DimDynamic.DYNAMIC
if d in flat_args_dynamic_dims[i]
else DimDynamic.STATIC
)
for d in range(len(flat_args[i].shape))
],
constraint_sizes=[None] * len(flat_args[i].shape),
),
)
elif isinstance(flat_args[i], _IntWrapper):
arg.node.meta["val"] = flat_args[i].val
else:
arg.node.meta["val"] = flat_args[i]
self.new_args.append(arg)
self.old_args_gen = (self.new_args[i] for i in matched_input_elements_positions)
self.matched_output_elements_positions = matched_output_elements_positions
self.flat_results = flat_results
def placeholder(
self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]
) -> Any:
arg = next(self.old_args_gen)
if "val" in self.current_node.meta:
arg.node.meta["val"] = self.current_node.meta["val"]
if "tensor_dict" in self.current_node.meta:
arg.node.meta["tensor_dict"] = self.current_node.meta["tensor_dict"]
if "example_value" in self.current_node.meta:
# NB: intentionally do not use set_example_value
arg.node.meta["example_value"] = self.current_node.meta["example_value"]
if "unbacked_bindings" in self.current_node.meta:
arg.node.meta["unbacked_bindings"] = self.current_node.meta[
"unbacked_bindings"
]
return arg
def output(
self, target: Target, args: tuple[Argument, ...], kwargs: dict[str, Any]
) -> Any:
dynamo_result_flat = args[0]
lookup = [*dynamo_result_flat, *self.new_args] # type: ignore[misc]
new_results_flat = []
for i in range(len(self.flat_results)):
if self.matched_output_elements_positions[i] is not None:
new_results_flat.append(
lookup[self.matched_output_elements_positions[i]]
)
else:
const_val = self.flat_results[i]
assert isinstance(const_val, tuple(common_constant_types))
new_results_flat.append(const_val)
return super().output(target, (new_results_flat,), {})
def run_node(self, n: Node) -> Any:
self.current_node = n
result_proxy = super().run_node(n)
if "val" in self.current_node.meta:
result_proxy.node.meta["val"] = self.current_node.meta["val"]
if "example_value" in self.current_node.meta:
# NB: intentionally do not use set_example_value
result_proxy.node.meta["example_value"] = self.current_node.meta[
"example_value"
]
if "unbacked_bindings" in self.current_node.meta:
result_proxy.node.meta["unbacked_bindings"] = self.current_node.meta[
"unbacked_bindings"
]
if self.current_node.op != "output":
result_proxy.node._rename(
getattr(self.current_node, "name", result_proxy.node.name)
)
return result_proxy
def transform(self) -> torch.fx.GraphModule:
result_gm = super().transform()
if "dynamo_flat_name_to_original_fqn" in self.module.meta: # type: ignore[operator]
result_gm.meta["dynamo_flat_name_to_original_fqn"] = self.module.meta[ # type: ignore[index]
"dynamo_flat_name_to_original_fqn" # type: ignore[index]
]
if "dynamo_compile_id" in self.module.meta: # type: ignore[operator]
result_gm.meta["dynamo_compile_id"] = self.module.meta["dynamo_compile_id"] # type: ignore[index]
return result_gm
| FlattenInputOutputSignature |
python | huggingface__transformers | src/transformers/integrations/executorch.py | {
"start": 1034,
"end": 6637
} | class ____:
"""
A wrapper class for exporting Vision-Language Models (VLMs) like SmolVLM2 for ExecuTorch.
This class handles the export of three main components:
1. Vision encoder (processes images to visual features)
2. Connector/projector (maps visual features to text embedding space)
3. Text decoder (generates text from combined visual and text tokens)
"""
def __init__(self, model, max_batch_size: int = 1, max_cache_len: int = 1024):
"""
Initialize the exportable VLM module.
Args:
model: The VLM (e.g. SmolVLM) model instance
max_batch_size: Maximum batch size. Always 1 for ExecuTorch
max_cache_len: Maximum cache length for text generation
"""
self.model = model
self.max_batch_size = max_batch_size
self.max_cache_len = max_cache_len
self.config = model.config
# Extract individual components
self.vision_encoder = model.model.vision_model
self.connector = model.model.connector
self.text_decoder = model.model.text_model
# Store exported programs
self.exported_vision_encoder = None
self.exported_connector = None
self.exported_text_decoder = None
def export_vision_encoder(self):
"""Export the vision encoder component."""
self.vision_encoder.eval()
# Create example input
pixel_values = torch.randn(1, 3, 384, 384, dtype=torch.float32)
# Define dynamic shapes
dynamic_shapes = {
"pixel_values": {
2: torch.export.Dim.AUTO,
3: torch.export.Dim.AUTO,
}
}
self.exported_vision_encoder = torch.export.export(
self.vision_encoder,
args=(pixel_values,),
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_vision_encoder
def export_connector(self):
"""Export the connector component."""
self.connector.eval()
# Vision encoder output shape: [batch_size, num_patches, vision_hidden_size]
vision_hidden_size = self.config.vision_config.hidden_size
image_size = self.config.vision_config.image_size
patch_size = self.config.vision_config.patch_size
patches_per_dim = image_size // patch_size
num_patches = patches_per_dim * patches_per_dim
image_hidden_states = torch.randn(1, num_patches, vision_hidden_size, dtype=torch.float32)
# Define dynamic shapes - static batch_size=1, dynamic num_patches
dynamic_shapes = {"image_hidden_states": {1: torch.export.Dim.AUTO}}
# Export the connector using torch.export
self.exported_connector = torch.export.export(
self.connector,
args=(image_hidden_states,),
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_connector
def export_text_decoder(self):
"""Export the text decoder component."""
# Create text decoder exportable wrapper
self.exportable_text_decoder = TorchExportableModuleForDecoderOnlyLM(model=self.text_decoder)
# Use the existing text decoder exportable wrapper
seq_length = 3
input_ids = torch.zeros((1, seq_length), dtype=torch.long)
cache_position = torch.arange(seq_length, dtype=torch.long)
max_seq_length = min(self.max_cache_len, self.config.text_config.max_position_embeddings)
seq_len_dim = torch.export.Dim("seq_length_dim", max=max_seq_length - 1)
dynamic_shapes = {
"input_ids": {1: seq_len_dim},
"cache_position": {0: seq_len_dim},
}
self.exported_text_decoder = self.exportable_text_decoder.export(
input_ids=input_ids,
cache_position=cache_position,
dynamic_shapes=dynamic_shapes,
strict=False,
)
return self.exported_text_decoder
def export(self, **kwargs):
"""Export all components of the VLM model."""
self.export_vision_encoder(**kwargs)
self.export_connector(**kwargs)
self.export_text_decoder(**kwargs)
return {
"vision_encoder": self.exported_vision_encoder,
"connector": self.exported_connector,
"text_decoder": self.exported_text_decoder,
}
def forward(self, pixel_values, input_ids, cache_position):
"""
Simplified forward pass for inference with guaranteed non-null input_ids and cache_position.
Args:
pixel_values: Input images [1, channels, height, width] (optional)
input_ids: Text token IDs [1, seq_len] (required - won't be None)
cache_position: Cache positions [seq_len] (required - won't be None)
Returns:
Output with logits for text generation
"""
def generate(
self, pixel_values=None, input_ids=None, max_new_tokens=50, do_sample=False, temperature=1.0, **kwargs
):
"""
Simplified generate method with guaranteed non-null input_ids.
Args:
pixel_values: Input images [1, channels, height, width] (optional)
input_ids: Initial text tokens [1, seq_len] (required - won't be None)
max_new_tokens: Maximum number of tokens to generate
do_sample: Whether to use sampling or greedy decoding
temperature: Temperature for sampling
Returns:
Generated sequences
"""
| TorchExportableModuleForVLM |
python | bokeh__bokeh | src/bokeh/events.py | {
"start": 18907,
"end": 20055
} | class ____(PointEvent):
''' Announce a pan event on a Bokeh plot.
Attributes:
delta_x (float) : the amount of scroll in the x direction
delta_y (float) : the amount of scroll in the y direction
direction (float) : the direction of scroll (1 or -1)
sx (float) : x-coordinate of the event in *screen* space
sy (float) : y-coordinate of the event in *screen* space
x (float) : x-coordinate of the event in *data* space
y (float) : y-coordinate of the event in *data* space
'''
event_name = 'pan'
def __init__(self,
model: Plot | None,
*,
delta_x: float | None = None,
delta_y: float | None = None,
direction: Literal[-1, -1] | None = None,
sx: float | None = None,
sy: float | None = None,
x: float | None = None,
y: float | None = None,
modifiers: KeyModifiers | None = None):
self.delta_x = delta_x
self.delta_y = delta_y
self.direction = direction
super().__init__(model, sx=sx, sy=sy, x=x, y=y, modifiers=modifiers)
| Pan |
python | django__django | tests/csrf_tests/views.py | {
"start": 955,
"end": 2604
} | class ____(MiddlewareMixin):
def process_response(self, request, response):
rotate_token(request)
return response
csrf_rotating_token = decorator_from_middleware(_CsrfCookieRotator)
@csrf_protect
def protected_view(request):
return HttpResponse("OK")
@ensure_csrf_cookie
def ensure_csrf_cookie_view(request):
return HttpResponse("OK")
@csrf_protect
@ensure_csrf_cookie
def ensured_and_protected_view(request):
return TestingHttpResponse("OK")
@csrf_protect
@csrf_rotating_token
@ensure_csrf_cookie
def sandwiched_rotate_token_view(request):
"""
This is a view that calls rotate_token() in process_response() between two
calls to CsrfViewMiddleware.process_response().
"""
return TestingHttpResponse("OK")
def post_form_view(request):
"""Return a POST form (without a token)."""
return HttpResponse(
content="""
<html>
<body><h1>\u00a1Unicode!<form method="post"><input type="text"></form></body>
</html>
"""
)
def token_view(request):
context = RequestContext(request, processors=[csrf])
template = Template("{% csrf_token %}")
return HttpResponse(template.render(context))
def non_token_view_using_request_processor(request):
"""Use the csrf view processor instead of the token."""
context = RequestContext(request, processors=[csrf])
template = Template("")
return HttpResponse(template.render(context))
def csrf_token_error_handler(request, **kwargs):
"""This error handler accesses the CSRF token."""
template = Template(get_token(request))
return HttpResponse(template.render(Context()), status=599)
| _CsrfCookieRotator |
python | ray-project__ray | python/ray/serve/tests/test_config_files/use_custom_autoscaling_policy.py | {
"start": 204,
"end": 352
} | class ____:
def __call__(self):
return "hello_from_custom_autoscaling_policy"
app = CustomAutoscalingPolicy.bind()
| CustomAutoscalingPolicy |
python | spyder-ide__spyder | spyder/utils/syntaxhighlighters.py | {
"start": 81258,
"end": 83832
} | class ____(RegexLexer):
"""
A lexer for logs generated by the Python builtin 'logging' library.
Taken from
https://bitbucket.org/birkenfeld/pygments-main/pull-requests/451/add-python-logging-lexer
"""
name = 'Python Logging'
aliases = ['pylog', 'pythonlogging']
filenames = ['*.log']
tokens = {
'root': [
(r'^(\d{4}-\d\d-\d\d \d\d:\d\d:\d\d\,?\d*)(\s\w+)',
bygroups(Comment.Preproc, Number.Integer), 'message'),
(r'"(.*?)"|\'(.*?)\'', String),
(r'(\d)', Number.Integer),
(r'(\s.+/n)', Text)
],
'message': [
(r'(\s-)(\sDEBUG)(\s-)(\s*[\d\w]+([.]?[\d\w]+)+\s*)',
bygroups(Text, Number, Text, Name.Builtin), '#pop'),
(r'(\s-)(\sINFO\w*)(\s-)(\s*[\d\w]+([.]?[\d\w]+)+\s*)',
bygroups(Generic.Heading, Text, Text, Name.Builtin), '#pop'),
(r'(\sWARN\w*)(\s.+)', bygroups(String, String), '#pop'),
(r'(\sERROR)(\s.+)',
bygroups(Generic.Error, Name.Constant), '#pop'),
(r'(\sCRITICAL)(\s.+)',
bygroups(Generic.Error, Name.Constant), '#pop'),
(r'(\sTRACE)(\s.+)',
bygroups(Generic.Error, Name.Constant), '#pop'),
(r'(\s\w+)(\s.+)',
bygroups(Comment, Generic.Output), '#pop'),
],
}
def guess_pygments_highlighter(filename):
"""
Factory to generate syntax highlighter for the given filename.
If a syntax highlighter is not available for a particular file, this
function will attempt to generate one based on the lexers in Pygments. If
Pygments is not available or does not have an appropriate lexer, TextSH
will be returned instead.
"""
try:
from pygments.lexers import get_lexer_for_filename, get_lexer_by_name
except Exception:
return TextSH
root, ext = os.path.splitext(filename)
if ext == '.txt':
# Pygments assigns a lexer that doesn’t highlight anything to
# txt files. So we avoid that here.
return TextSH
elif ext in custom_extension_lexer_mapping:
try:
lexer = get_lexer_by_name(custom_extension_lexer_mapping[ext])
except Exception:
return TextSH
elif ext == '.log':
lexer = PythonLoggingLexer()
else:
try:
lexer = get_lexer_for_filename(filename)
except Exception:
return TextSH
class GuessedPygmentsSH(PygmentsSH):
_lexer = lexer
return GuessedPygmentsSH
| PythonLoggingLexer |
python | apache__airflow | providers/google/src/airflow/providers/google/suite/hooks/sheets.py | {
"start": 1167,
"end": 17026
} | class ____(GoogleBaseHook):
"""
Interact with Google Sheets via Google Cloud connection.
Reading and writing cells in Google Sheet: https://developers.google.com/sheets/api/guides/values
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param api_version: API Version
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:param api_endpoint: Optional. Custom API endpoint, i.e: regional or private endpoint.
This can be used to target private VPC or restricted access endpoints.
"""
def __init__(
self,
gcp_conn_id: str = "google_cloud_default",
api_version: str = "v4",
impersonation_chain: str | Sequence[str] | None = None,
api_endpoint: str | None = None,
) -> None:
super().__init__(
gcp_conn_id=gcp_conn_id,
impersonation_chain=impersonation_chain,
)
self.gcp_conn_id = gcp_conn_id
self.api_version = api_version
self.api_endpoint = api_endpoint
self._conn = None
def get_conn(self) -> Any:
"""
Retrieve connection to Google Sheets.
:return: Google Sheets services object.
"""
if not self._conn:
http_authorized = self._authorize()
client_options = None
if self.api_endpoint:
client_options = ClientOptions(api_endpoint=self.api_endpoint)
self._conn = build(
"sheets",
self.api_version,
http=http_authorized,
cache_discovery=False,
client_options=client_options,
)
return self._conn
def get_values(
self,
spreadsheet_id: str,
range_: str,
major_dimension: str = "DIMENSION_UNSPECIFIED",
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> list:
"""
Get values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/get
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: An array of sheet values from the specified sheet.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.get(
spreadsheetId=spreadsheet_id,
range=range_,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response.get("values", [])
def batch_get_values(
self,
spreadsheet_id: str,
ranges: list,
major_dimension: str = "DIMENSION_UNSPECIFIED",
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Get values from Google Sheet from a list of ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchGet
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.batchGet(
spreadsheetId=spreadsheet_id,
ranges=ranges,
majorDimension=major_dimension,
valueRenderOption=value_render_option,
dateTimeRenderOption=date_time_render_option,
)
.execute(num_retries=self.num_retries)
)
return response
def update_values(
self,
spreadsheet_id: str,
range_: str,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Update values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/update
:param spreadsheet_id: The Google Sheet ID to interact with.
:param range_: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.update(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def batch_update_values(
self,
spreadsheet_id: str,
ranges: list,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Update values from Google Sheet for multiple ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchUpdate
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
if len(ranges) != len(values):
raise AirflowException(
f"'Ranges' and 'Lists' must be of equal length. "
f"'Ranges' is of length: {len(ranges)} and 'Values' is of length: {len(values)}."
)
service = self.get_conn()
data = []
for idx, range_ in enumerate(ranges):
value_range = {"range": range_, "majorDimension": major_dimension, "values": values[idx]}
data.append(value_range)
body = {
"valueInputOption": value_input_option,
"data": data,
"includeValuesInResponse": include_values_in_response,
"responseValueRenderOption": value_render_option,
"responseDateTimeRenderOption": date_time_render_option,
}
response = (
service.spreadsheets()
.values()
.batchUpdate(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def append_values(
self,
spreadsheet_id: str,
range_: str,
values: list,
major_dimension: str = "ROWS",
value_input_option: str = "RAW",
insert_data_option: str = "OVERWRITE",
include_values_in_response: bool = False,
value_render_option: str = "FORMATTED_VALUE",
date_time_render_option: str = "SERIAL_NUMBER",
) -> dict:
"""
Append values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/append
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:param values: Data within a range of the spreadsheet.
:param major_dimension: Indicates which dimension an operation should apply to.
DIMENSION_UNSPECIFIED, ROWS, or COLUMNS
:param value_input_option: Determines how input data should be interpreted.
RAW or USER_ENTERED
:param insert_data_option: Determines how existing data is changed when new data is input.
OVERWRITE or INSERT_ROWS
:param include_values_in_response: Determines if the update response should
include the values of the cells that were updated.
:param value_render_option: Determines how values should be rendered in the output.
FORMATTED_VALUE, UNFORMATTED_VALUE, or FORMULA
:param date_time_render_option: Determines how dates should be rendered in the output.
SERIAL_NUMBER or FORMATTED_STRING
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"range": range_, "majorDimension": major_dimension, "values": values}
response = (
service.spreadsheets()
.values()
.append(
spreadsheetId=spreadsheet_id,
range=range_,
valueInputOption=value_input_option,
insertDataOption=insert_data_option,
includeValuesInResponse=include_values_in_response,
responseValueRenderOption=value_render_option,
responseDateTimeRenderOption=date_time_render_option,
body=body,
)
.execute(num_retries=self.num_retries)
)
return response
def clear(self, spreadsheet_id: str, range_: str) -> dict:
"""
Clear values from Google Sheet from a single range.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/clear
:param spreadsheet_id: The Google Sheet ID to interact with
:param range_: The A1 notation of the values to retrieve.
:return: Google Sheets API response.
"""
service = self.get_conn()
response = (
service.spreadsheets()
.values()
.clear(spreadsheetId=spreadsheet_id, range=range_)
.execute(num_retries=self.num_retries)
)
return response
def batch_clear(self, spreadsheet_id: str, ranges: list) -> dict:
"""
Clear values from Google Sheet from a list of ranges.
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets.values/batchClear
:param spreadsheet_id: The Google Sheet ID to interact with
:param ranges: The A1 notation of the values to retrieve.
:return: Google Sheets API response.
"""
service = self.get_conn()
body = {"ranges": ranges}
response = (
service.spreadsheets()
.values()
.batchClear(spreadsheetId=spreadsheet_id, body=body)
.execute(num_retries=self.num_retries)
)
return response
def get_spreadsheet(self, spreadsheet_id: str):
"""
Retrieve spreadsheet matching the given id.
:param spreadsheet_id: The spreadsheet id.
:return: An spreadsheet that matches the sheet filter.
"""
response = (
self.get_conn()
.spreadsheets()
.get(spreadsheetId=spreadsheet_id)
.execute(num_retries=self.num_retries)
)
return response
def get_sheet_titles(self, spreadsheet_id: str, sheet_filter: list[str] | None = None):
"""
Retrieve the sheet titles from a spreadsheet matching the given id and sheet filter.
:param spreadsheet_id: The spreadsheet id.
:param sheet_filter: List of sheet title to retrieve from sheet.
:return: An list of sheet titles from the specified sheet that match
the sheet filter.
"""
response = self.get_spreadsheet(spreadsheet_id=spreadsheet_id)
if sheet_filter:
titles = [
sh["properties"]["title"]
for sh in response["sheets"]
if sh["properties"]["title"] in sheet_filter
]
else:
titles = [sh["properties"]["title"] for sh in response["sheets"]]
return titles
def create_spreadsheet(self, spreadsheet: dict[str, Any]) -> dict[str, Any]:
"""
Create a spreadsheet, returning the newly created spreadsheet.
:param spreadsheet: an instance of Spreadsheet
https://developers.google.com/sheets/api/reference/rest/v4/spreadsheets#Spreadsheet
:return: An spreadsheet object.
"""
self.log.info("Creating spreadsheet: %s", spreadsheet["properties"]["title"])
response = (
self.get_conn().spreadsheets().create(body=spreadsheet).execute(num_retries=self.num_retries)
)
self.log.info("Spreadsheet: %s created", spreadsheet["properties"]["title"])
return response
| GSheetsHook |
python | mozilla__bleach | bleach/_vendor/html5lib/treebuilders/etree_lxml.py | {
"start": 1208,
"end": 6614
} | class ____(object):
def __init__(self):
self._elementTree = None
self._childNodes = []
def appendChild(self, element):
last = self._elementTree.getroot()
for last in self._elementTree.getroot().itersiblings():
pass
last.addnext(element._element)
def _getChildNodes(self):
return self._childNodes
childNodes = property(_getChildNodes)
def testSerializer(element):
rv = []
infosetFilter = _ihatexml.InfosetFilter(preventDoubleDashComments=True)
def serializeElement(element, indent=0):
if not hasattr(element, "tag"):
if hasattr(element, "getroot"):
# Full tree case
rv.append("#document")
if element.docinfo.internalDTD:
if not (element.docinfo.public_id or
element.docinfo.system_url):
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
else:
dtd_str = """<!DOCTYPE %s "%s" "%s">""" % (
element.docinfo.root_name,
element.docinfo.public_id,
element.docinfo.system_url)
rv.append("|%s%s" % (' ' * (indent + 2), dtd_str))
next_element = element.getroot()
while next_element.getprevious() is not None:
next_element = next_element.getprevious()
while next_element is not None:
serializeElement(next_element, indent + 2)
next_element = next_element.getnext()
elif isinstance(element, str) or isinstance(element, bytes):
# Text in a fragment
assert isinstance(element, str) or sys.version_info[0] == 2
rv.append("|%s\"%s\"" % (' ' * indent, element))
else:
# Fragment case
rv.append("#document-fragment")
for next_element in element:
serializeElement(next_element, indent + 2)
elif element.tag == comment_type:
rv.append("|%s<!-- %s -->" % (' ' * indent, element.text))
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * indent, element.tail))
else:
assert isinstance(element, etree._Element)
nsmatch = etree_builders.tag_regexp.match(element.tag)
if nsmatch is not None:
ns = nsmatch.group(1)
tag = nsmatch.group(2)
prefix = constants.prefixes[ns]
rv.append("|%s<%s %s>" % (' ' * indent, prefix,
infosetFilter.fromXmlName(tag)))
else:
rv.append("|%s<%s>" % (' ' * indent,
infosetFilter.fromXmlName(element.tag)))
if hasattr(element, "attrib"):
attributes = []
for name, value in element.attrib.items():
nsmatch = tag_regexp.match(name)
if nsmatch is not None:
ns, name = nsmatch.groups()
name = infosetFilter.fromXmlName(name)
prefix = constants.prefixes[ns]
attr_string = "%s %s" % (prefix, name)
else:
attr_string = infosetFilter.fromXmlName(name)
attributes.append((attr_string, value))
for name, value in sorted(attributes):
rv.append('|%s%s="%s"' % (' ' * (indent + 2), name, value))
if element.text:
rv.append("|%s\"%s\"" % (' ' * (indent + 2), element.text))
indent += 2
for child in element:
serializeElement(child, indent)
if hasattr(element, "tail") and element.tail:
rv.append("|%s\"%s\"" % (' ' * (indent - 2), element.tail))
serializeElement(element, 0)
return "\n".join(rv)
def tostring(element):
"""Serialize an element and its child nodes to a string"""
rv = []
def serializeElement(element):
if not hasattr(element, "tag"):
if element.docinfo.internalDTD:
if element.docinfo.doctype:
dtd_str = element.docinfo.doctype
else:
dtd_str = "<!DOCTYPE %s>" % element.docinfo.root_name
rv.append(dtd_str)
serializeElement(element.getroot())
elif element.tag == comment_type:
rv.append("<!--%s-->" % (element.text,))
else:
# This is assumed to be an ordinary element
if not element.attrib:
rv.append("<%s>" % (element.tag,))
else:
attr = " ".join(["%s=\"%s\"" % (name, value)
for name, value in element.attrib.items()])
rv.append("<%s %s>" % (element.tag, attr))
if element.text:
rv.append(element.text)
for child in element:
serializeElement(child)
rv.append("</%s>" % (element.tag,))
if hasattr(element, "tail") and element.tail:
rv.append(element.tail)
serializeElement(element)
return "".join(rv)
| Document |
python | numba__numba | numba/core/types/containers.py | {
"start": 9539,
"end": 9937
} | class ____(_StarArgTupleMixin, Tuple):
"""To distinguish from Tuple() used as argument to a `*args`.
"""
def __new__(cls, types):
_HeterogeneousTuple.is_types_iterable(types)
if types and all(t == types[0] for t in types[1:]):
return StarArgUniTuple(dtype=types[0], count=len(types))
else:
return object.__new__(StarArgTuple)
| StarArgTuple |
python | scrapy__scrapy | tests/CrawlerProcess/asyncio_enabled_reactor_different_loop.py | {
"start": 291,
"end": 666
} | class ____(scrapy.Spider):
name = "no_request"
async def start(self):
return
yield
process = CrawlerProcess(
settings={
"TWISTED_REACTOR": "twisted.internet.asyncioreactor.AsyncioSelectorReactor",
"ASYNCIO_EVENT_LOOP": "uvloop.Loop",
}
)
d = process.crawl(NoRequestsSpider)
d.addErrback(log.err)
process.start()
| NoRequestsSpider |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/util/_immutabledict_cy.py | {
"start": 1904,
"end": 3104
} | class ____(Dict[_KT, _VT]):
# NOTE: this method is required in 3.9 and speeds up the use case
# ImmutableDictBase[str,int](a_dict) significantly
@classmethod
def __class_getitem__( # type: ignore[override]
cls, key: Any
) -> type[Self]:
return cls
def __delitem__(self, key: Any) -> NoReturn:
_immutable_fn(self)
def __setitem__(self, key: Any, value: Any) -> NoReturn:
_immutable_fn(self)
def __setattr__(self, key: Any, value: Any) -> NoReturn:
_immutable_fn(self)
def clear(self) -> NoReturn:
_immutable_fn(self)
def pop(self, key: Any, default: Optional[Any] = None) -> NoReturn:
_immutable_fn(self)
def popitem(self) -> NoReturn:
_immutable_fn(self)
def setdefault(self, key: Any, default: Optional[Any] = None) -> NoReturn:
_immutable_fn(self)
def update(self, *arg: Any, **kw: Any) -> NoReturn:
_immutable_fn(self)
# NOTE: can't extend from ImmutableDictBase[_KT, _VT] due to a compiler
# crash in doing so. Extending from ImmutableDictBase is ok, but requires
# a type checking section and other workaround for the crash
@cython.cclass
| ImmutableDictBase |
python | openai__openai-python | src/openai/lib/streaming/chat/_events.py | {
"start": 1800,
"end": 1935
} | class ____(BaseModel):
type: Literal["logprobs.content.done"]
content: List[ChatCompletionTokenLogprob]
| LogprobsContentDoneEvent |
python | django__django | tests/schema/test_logging.py | {
"start": 68,
"end": 740
} | class ____(TestCase):
def test_extra_args(self):
editor = connection.schema_editor(collect_sql=True)
sql = "SELECT * FROM foo WHERE id in (%s, %s)"
params = [42, 1337]
with self.assertLogs("django.db.backends.schema", "DEBUG") as cm:
editor.execute(sql, params)
if connection.features.schema_editor_uses_clientside_param_binding:
sql = "SELECT * FROM foo WHERE id in (42, 1337)"
params = None
self.assertEqual(cm.records[0].sql, sql)
self.assertEqual(cm.records[0].params, params)
self.assertEqual(cm.records[0].getMessage(), f"{sql}; (params {params})")
| SchemaLoggerTests |
python | dagster-io__dagster | python_modules/libraries/dagster-gcp/dagster_gcp/gcs/file_manager.py | {
"start": 285,
"end": 1034
} | class ____(FileHandle):
"""A reference to a file on GCS."""
def __init__(self, gcs_bucket: str, gcs_key: str):
self._gcs_bucket = check.str_param(gcs_bucket, "gcs_bucket")
self._gcs_key = check.str_param(gcs_key, "gcs_key")
@property
def gcs_bucket(self) -> str:
"""str: The name of the GCS bucket."""
return self._gcs_bucket
@property
def gcs_key(self) -> str:
"""str: The GCS key."""
return self._gcs_key
@property
def path_desc(self) -> str:
"""str: The file's GCS URL."""
return self.gcs_path
@property
def gcs_path(self) -> str:
"""str: The file's GCS URL."""
return f"gs://{self.gcs_bucket}/{self.gcs_key}"
| GCSFileHandle |
python | scrapy__scrapy | scrapy/utils/curl.py | {
"start": 316,
"end": 679
} | class ____(argparse.Action):
def __call__(
self,
parser: argparse.ArgumentParser,
namespace: argparse.Namespace,
values: str | Sequence[Any] | None,
option_string: str | None = None,
) -> None:
value = str(values)
value = value.removeprefix("$")
setattr(namespace, self.dest, value)
| DataAction |
python | pandas-dev__pandas | pandas/tests/test_nanops.py | {
"start": 3633,
"end": 27464
} | class ____:
def setup_method(self):
nanops._USE_BOTTLENECK = False
arr_shape = (11, 7)
self.arr_float = np.random.default_rng(2).standard_normal(arr_shape)
self.arr_float1 = np.random.default_rng(2).standard_normal(arr_shape)
self.arr_complex = self.arr_float + self.arr_float1 * 1j
self.arr_int = np.random.default_rng(2).integers(-10, 10, arr_shape)
self.arr_bool = np.random.default_rng(2).integers(0, 2, arr_shape) == 0
self.arr_str = np.abs(self.arr_float).astype("S")
self.arr_utf = np.abs(self.arr_float).astype("U")
self.arr_date = (
np.random.default_rng(2).integers(0, 20000, arr_shape).astype("M8[ns]")
)
self.arr_tdelta = (
np.random.default_rng(2).integers(0, 20000, arr_shape).astype("m8[ns]")
)
self.arr_nan = np.tile(np.nan, arr_shape)
self.arr_float_nan = np.vstack([self.arr_float, self.arr_nan])
self.arr_float1_nan = np.vstack([self.arr_float1, self.arr_nan])
self.arr_nan_float1 = np.vstack([self.arr_nan, self.arr_float1])
self.arr_nan_nan = np.vstack([self.arr_nan, self.arr_nan])
self.arr_inf = self.arr_float * np.inf
self.arr_float_inf = np.vstack([self.arr_float, self.arr_inf])
self.arr_nan_inf = np.vstack([self.arr_nan, self.arr_inf])
self.arr_float_nan_inf = np.vstack([self.arr_float, self.arr_nan, self.arr_inf])
self.arr_nan_nan_inf = np.vstack([self.arr_nan, self.arr_nan, self.arr_inf])
self.arr_obj = np.vstack(
[
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
self.arr_complex.astype("O"),
self.arr_str.astype("O"),
self.arr_utf.astype("O"),
self.arr_date.astype("O"),
self.arr_tdelta.astype("O"),
]
)
with np.errstate(invalid="ignore"):
self.arr_nan_nanj = self.arr_nan + self.arr_nan * 1j
self.arr_complex_nan = np.vstack([self.arr_complex, self.arr_nan_nanj])
self.arr_nan_infj = self.arr_inf * 1j
self.arr_complex_nan_infj = np.vstack([self.arr_complex, self.arr_nan_infj])
self.arr_float_2d = self.arr_float
self.arr_float1_2d = self.arr_float1
self.arr_nan_2d = self.arr_nan
self.arr_float_nan_2d = self.arr_float_nan
self.arr_float1_nan_2d = self.arr_float1_nan
self.arr_nan_float1_2d = self.arr_nan_float1
self.arr_float_1d = self.arr_float[:, 0]
self.arr_float1_1d = self.arr_float1[:, 0]
self.arr_nan_1d = self.arr_nan[:, 0]
self.arr_float_nan_1d = self.arr_float_nan[:, 0]
self.arr_float1_nan_1d = self.arr_float1_nan[:, 0]
self.arr_nan_float1_1d = self.arr_nan_float1[:, 0]
def teardown_method(self):
nanops._USE_BOTTLENECK = use_bn
def check_results(self, targ, res, axis, check_dtype=True):
res = getattr(res, "asm8", res)
if (
axis != 0
and hasattr(targ, "shape")
and targ.ndim
and targ.shape != res.shape
):
res = np.split(res, [targ.shape[0]], axis=0)[0]
try:
tm.assert_almost_equal(targ, res, check_dtype=check_dtype)
except AssertionError:
# handle timedelta dtypes
if hasattr(targ, "dtype") and targ.dtype == "m8[ns]":
raise
# There are sometimes rounding errors with
# complex and object dtypes.
# If it isn't one of those, re-raise the error.
if not hasattr(res, "dtype") or res.dtype.kind not in ["c", "O"]:
raise
# convert object dtypes to something that can be split into
# real and imaginary parts
if res.dtype.kind == "O":
if targ.dtype.kind != "O":
res = res.astype(targ.dtype)
else:
cast_dtype = "c16" if hasattr(np, "complex128") else "f8"
res = res.astype(cast_dtype)
targ = targ.astype(cast_dtype)
# there should never be a case where numpy returns an object
# but nanops doesn't, so make that an exception
elif targ.dtype.kind == "O":
raise
tm.assert_almost_equal(np.real(targ), np.real(res), check_dtype=check_dtype)
tm.assert_almost_equal(np.imag(targ), np.imag(res), check_dtype=check_dtype)
def check_fun_data(
self,
testfunc,
targfunc,
testar,
testarval,
targarval,
skipna,
check_dtype=True,
empty_targfunc=None,
**kwargs,
):
for axis in list(range(targarval.ndim)) + [None]:
targartempval = targarval if skipna else testarval
if skipna and empty_targfunc and isna(targartempval).all():
targ = empty_targfunc(targartempval, axis=axis, **kwargs)
else:
targ = targfunc(targartempval, axis=axis, **kwargs)
if targartempval.dtype == object and (
targfunc is np.any or targfunc is np.all
):
# GH#12863 the numpy functions will retain e.g. floatiness
if isinstance(targ, np.ndarray):
targ = targ.astype(bool)
else:
targ = bool(targ)
if testfunc.__name__ in ["nanargmax", "nanargmin"] and (
testar.startswith("arr_nan")
or (testar.endswith("nan") and (not skipna or axis == 1))
):
with pytest.raises(ValueError, match="Encountered .* NA value"):
testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
return
res = testfunc(testarval, axis=axis, skipna=skipna, **kwargs)
if (
isinstance(targ, np.complex128)
and isinstance(res, float)
and np.isnan(targ)
and np.isnan(res)
):
# GH#18463
targ = res
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna:
res = testfunc(testarval, axis=axis, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if axis is None:
res = testfunc(testarval, skipna=skipna, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if skipna and axis is None:
res = testfunc(testarval, **kwargs)
self.check_results(targ, res, axis, check_dtype=check_dtype)
if testarval.ndim <= 1:
return
# Recurse on lower-dimension
testarval2 = np.take(testarval, 0, axis=-1)
targarval2 = np.take(targarval, 0, axis=-1)
self.check_fun_data(
testfunc,
targfunc,
testar,
testarval2,
targarval2,
skipna=skipna,
check_dtype=check_dtype,
empty_targfunc=empty_targfunc,
**kwargs,
)
def check_fun(
self, testfunc, targfunc, testar, skipna, empty_targfunc=None, **kwargs
):
targar = testar
if testar.endswith("_nan") and hasattr(self, testar[:-4]):
targar = testar[:-4]
testarval = getattr(self, testar)
targarval = getattr(self, targar)
self.check_fun_data(
testfunc,
targfunc,
testar,
testarval,
targarval,
skipna=skipna,
empty_targfunc=empty_targfunc,
**kwargs,
)
def check_funs(
self,
testfunc,
targfunc,
skipna,
allow_complex=True,
allow_all_nan=True,
allow_date=True,
allow_tdelta=True,
allow_obj=True,
**kwargs,
):
self.check_fun(testfunc, targfunc, "arr_float", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_float_nan", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_int", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_bool", skipna, **kwargs)
objs = [
self.arr_float.astype("O"),
self.arr_int.astype("O"),
self.arr_bool.astype("O"),
]
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan", skipna, **kwargs)
if allow_complex:
self.check_fun(testfunc, targfunc, "arr_complex", skipna, **kwargs)
self.check_fun(testfunc, targfunc, "arr_complex_nan", skipna, **kwargs)
if allow_all_nan:
self.check_fun(testfunc, targfunc, "arr_nan_nanj", skipna, **kwargs)
objs += [self.arr_complex.astype("O")]
if allow_date:
targfunc(self.arr_date)
self.check_fun(testfunc, targfunc, "arr_date", skipna, **kwargs)
objs += [self.arr_date.astype("O")]
if allow_tdelta:
try:
targfunc(self.arr_tdelta)
except TypeError:
pass
else:
self.check_fun(testfunc, targfunc, "arr_tdelta", skipna, **kwargs)
objs += [self.arr_tdelta.astype("O")]
if allow_obj:
self.arr_obj = np.vstack(objs)
# some nanops handle object dtypes better than their numpy
# counterparts, so the numpy functions need to be given something
# else
if allow_obj == "convert":
targfunc = partial(
self._badobj_wrap, func=targfunc, allow_complex=allow_complex
)
self.check_fun(testfunc, targfunc, "arr_obj", skipna, **kwargs)
def _badobj_wrap(self, value, func, allow_complex=True, **kwargs):
if value.dtype.kind == "O":
if allow_complex:
value = value.astype("c16")
else:
value = value.astype("f8")
return func(value, **kwargs)
@pytest.mark.parametrize(
"nan_op,np_op", [(nanops.nanany, np.any), (nanops.nanall, np.all)]
)
def test_nan_funcs(self, nan_op, np_op, skipna):
self.check_funs(nan_op, np_op, skipna, allow_all_nan=False, allow_date=False)
def test_nansum(self, skipna):
self.check_funs(
nanops.nansum,
np.sum,
skipna,
allow_date=False,
check_dtype=False,
empty_targfunc=np.nansum,
)
def test_nanmean(self, skipna):
self.check_funs(
nanops.nanmean, np.mean, skipna, allow_obj=False, allow_date=False
)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanmedian(self, skipna):
self.check_funs(
nanops.nanmedian,
np.median,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanvar(self, ddof, skipna):
self.check_funs(
nanops.nanvar,
np.var,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize("ddof", range(3))
def test_nanstd(self, ddof, skipna):
self.check_funs(
nanops.nanstd,
np.std,
skipna,
allow_complex=False,
allow_date=False,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.parametrize("ddof", range(3))
def test_nansem(self, ddof, skipna):
sp_stats = pytest.importorskip("scipy.stats")
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nansem,
sp_stats.sem,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
allow_obj="convert",
ddof=ddof,
)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
@pytest.mark.parametrize(
"nan_op,np_op", [(nanops.nanmin, np.min), (nanops.nanmax, np.max)]
)
def test_nanops_with_warnings(self, nan_op, np_op, skipna):
self.check_funs(nan_op, np_op, skipna, allow_obj=False)
def _argminmax_wrap(self, value, axis=None, func=None):
res = func(value, axis)
nans = np.min(value, axis)
nullnan = isna(nans)
if res.ndim:
res[nullnan] = -1
elif (hasattr(nullnan, "all") and nullnan.all()) or (
not hasattr(nullnan, "all") and nullnan
):
res = -1
return res
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanargmax(self, skipna):
func = partial(self._argminmax_wrap, func=np.argmax)
self.check_funs(nanops.nanargmax, func, skipna, allow_obj=False)
@pytest.mark.filterwarnings("ignore::RuntimeWarning")
def test_nanargmin(self, skipna):
func = partial(self._argminmax_wrap, func=np.argmin)
self.check_funs(nanops.nanargmin, func, skipna, allow_obj=False)
def _skew_kurt_wrap(self, values, axis=None, func=None):
if not isinstance(values.dtype.type, np.floating):
values = values.astype("f8")
result = func(values, axis=axis, bias=False)
# fix for handling cases where all elements in an axis are the same
if isinstance(result, np.ndarray):
result[np.max(values, axis=axis) == np.min(values, axis=axis)] = 0
return result
elif np.max(values) == np.min(values):
return 0.0
return result
def test_nanskew(self, skipna):
sp_stats = pytest.importorskip("scipy.stats")
func = partial(self._skew_kurt_wrap, func=sp_stats.skew)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nanskew,
func,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
)
def test_nankurt(self, skipna):
sp_stats = pytest.importorskip("scipy.stats")
func1 = partial(sp_stats.kurtosis, fisher=True)
func = partial(self._skew_kurt_wrap, func=func1)
with np.errstate(invalid="ignore"):
self.check_funs(
nanops.nankurt,
func,
skipna,
allow_complex=False,
allow_date=False,
allow_tdelta=False,
)
def test_nanprod(self, skipna):
self.check_funs(
nanops.nanprod,
np.prod,
skipna,
allow_date=False,
allow_tdelta=False,
empty_targfunc=np.nanprod,
)
def check_nancorr_nancov_2d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_2d, self.arr_float1_2d, **kwargs)
res01 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_2d, self.arr_float1_nan_2d, **kwargs)
res11 = checkfun(
self.arr_float_nan_2d,
self.arr_float1_nan_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_2d, self.arr_float1_2d, **kwargs)
res21 = checkfun(self.arr_float_2d, self.arr_nan_2d, **kwargs)
res22 = checkfun(self.arr_nan_2d, self.arr_nan_2d, **kwargs)
res23 = checkfun(self.arr_float_nan_2d, self.arr_nan_float1_2d, **kwargs)
res24 = checkfun(
self.arr_float_nan_2d,
self.arr_nan_float1_2d,
min_periods=len(self.arr_float_2d) - 1,
**kwargs,
)
res25 = checkfun(
self.arr_float_2d,
self.arr_float1_2d,
min_periods=len(self.arr_float_2d) + 1,
**kwargs,
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def check_nancorr_nancov_1d(self, checkfun, targ0, targ1, **kwargs):
res00 = checkfun(self.arr_float_1d, self.arr_float1_1d, **kwargs)
res01 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ0, res00)
tm.assert_almost_equal(targ0, res01)
res10 = checkfun(self.arr_float_nan_1d, self.arr_float1_nan_1d, **kwargs)
res11 = checkfun(
self.arr_float_nan_1d,
self.arr_float1_nan_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
tm.assert_almost_equal(targ1, res10)
tm.assert_almost_equal(targ1, res11)
targ2 = np.nan
res20 = checkfun(self.arr_nan_1d, self.arr_float1_1d, **kwargs)
res21 = checkfun(self.arr_float_1d, self.arr_nan_1d, **kwargs)
res22 = checkfun(self.arr_nan_1d, self.arr_nan_1d, **kwargs)
res23 = checkfun(self.arr_float_nan_1d, self.arr_nan_float1_1d, **kwargs)
res24 = checkfun(
self.arr_float_nan_1d,
self.arr_nan_float1_1d,
min_periods=len(self.arr_float_1d) - 1,
**kwargs,
)
res25 = checkfun(
self.arr_float_1d,
self.arr_float1_1d,
min_periods=len(self.arr_float_1d) + 1,
**kwargs,
)
tm.assert_almost_equal(targ2, res20)
tm.assert_almost_equal(targ2, res21)
tm.assert_almost_equal(targ2, res22)
tm.assert_almost_equal(targ2, res23)
tm.assert_almost_equal(targ2, res24)
tm.assert_almost_equal(targ2, res25)
def test_nancorr(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1)
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_pearson(self):
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="pearson")
targ0 = np.corrcoef(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.corrcoef(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="pearson")
def test_nancorr_kendall(self):
sp_stats = pytest.importorskip("scipy.stats")
targ0 = sp_stats.kendalltau(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = sp_stats.kendalltau(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="kendall")
targ0 = sp_stats.kendalltau(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = sp_stats.kendalltau(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="kendall")
def test_nancorr_spearman(self):
sp_stats = pytest.importorskip("scipy.stats")
targ0 = sp_stats.spearmanr(self.arr_float_2d, self.arr_float1_2d)[0]
targ1 = sp_stats.spearmanr(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0]
self.check_nancorr_nancov_2d(nanops.nancorr, targ0, targ1, method="spearman")
targ0 = sp_stats.spearmanr(self.arr_float_1d, self.arr_float1_1d)[0]
targ1 = sp_stats.spearmanr(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0]
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="spearman")
def test_invalid_method(self):
pytest.importorskip("scipy")
targ0 = np.corrcoef(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.corrcoef(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
msg = "Unknown method 'foo', expected one of 'kendall', 'spearman'"
with pytest.raises(ValueError, match=msg):
self.check_nancorr_nancov_1d(nanops.nancorr, targ0, targ1, method="foo")
def test_nancov(self):
targ0 = np.cov(self.arr_float_2d, self.arr_float1_2d)[0, 1]
targ1 = np.cov(self.arr_float_2d.flat, self.arr_float1_2d.flat)[0, 1]
self.check_nancorr_nancov_2d(nanops.nancov, targ0, targ1)
targ0 = np.cov(self.arr_float_1d, self.arr_float1_1d)[0, 1]
targ1 = np.cov(self.arr_float_1d.flat, self.arr_float1_1d.flat)[0, 1]
self.check_nancorr_nancov_1d(nanops.nancov, targ0, targ1)
@pytest.mark.parametrize(
"arr, correct",
[
("arr_complex", False),
("arr_int", False),
("arr_bool", False),
("arr_str", False),
("arr_utf", False),
("arr_complex_nan", False),
("arr_nan_nanj", False),
("arr_nan_infj", True),
("arr_complex_nan_infj", True),
],
)
def test_has_infs_non_float(request, arr, correct, disable_bottleneck):
val = request.getfixturevalue(arr)
while getattr(val, "ndim", True):
res0 = nanops._has_infs(val)
if correct:
assert res0
else:
assert not res0
if not hasattr(val, "ndim"):
break
# Reduce dimension for next step in the loop
val = np.take(val, 0, axis=-1)
@pytest.mark.parametrize(
"arr, correct",
[
("arr_float", False),
("arr_nan", False),
("arr_float_nan", False),
("arr_nan_nan", False),
("arr_float_inf", True),
("arr_inf", True),
("arr_nan_inf", True),
("arr_float_nan_inf", True),
("arr_nan_nan_inf", True),
],
)
@pytest.mark.parametrize("astype", [None, "f4", "f2"])
def test_has_infs_floats(request, arr, correct, astype, disable_bottleneck):
val = request.getfixturevalue(arr)
if astype is not None:
val = val.astype(astype)
while getattr(val, "ndim", True):
res0 = nanops._has_infs(val)
if correct:
assert res0
else:
assert not res0
if not hasattr(val, "ndim"):
break
# Reduce dimension for next step in the loop
val = np.take(val, 0, axis=-1)
@pytest.mark.parametrize(
"fixture", ["arr_float", "arr_complex", "arr_int", "arr_bool", "arr_str", "arr_utf"]
)
def test_bn_ok_dtype(fixture, request, disable_bottleneck):
obj = request.getfixturevalue(fixture)
assert nanops._bn_ok_dtype(obj.dtype, "test")
@pytest.mark.parametrize(
"fixture",
[
"arr_date",
"arr_tdelta",
"arr_obj",
],
)
def test_bn_not_ok_dtype(fixture, request, disable_bottleneck):
obj = request.getfixturevalue(fixture)
assert not nanops._bn_ok_dtype(obj.dtype, "test")
| TestnanopsDataFrame |
python | aimacode__aima-python | knowledge.py | {
"start": 6738,
"end": 13291
} | class ____(FolKB):
"""Hold the kb and other necessary elements required by FOIL."""
def __init__(self, clauses=None):
self.const_syms = set()
self.pred_syms = set()
super().__init__(clauses)
def tell(self, sentence):
if is_definite_clause(sentence):
self.clauses.append(sentence)
self.const_syms.update(constant_symbols(sentence))
self.pred_syms.update(predicate_symbols(sentence))
else:
raise Exception('Not a definite clause: {}'.format(sentence))
def foil(self, examples, target):
"""Learn a list of first-order horn clauses
'examples' is a tuple: (positive_examples, negative_examples).
positive_examples and negative_examples are both lists which contain substitutions."""
clauses = []
pos_examples = examples[0]
neg_examples = examples[1]
while pos_examples:
clause, extended_pos_examples = self.new_clause((pos_examples, neg_examples), target)
# remove positive examples covered by clause
pos_examples = self.update_examples(target, pos_examples, extended_pos_examples)
clauses.append(clause)
return clauses
def new_clause(self, examples, target):
"""Find a horn clause which satisfies part of the positive
examples but none of the negative examples.
The horn clause is specified as [consequent, list of antecedents]
Return value is the tuple (horn_clause, extended_positive_examples)."""
clause = [target, []]
extended_examples = examples
while extended_examples[1]:
l = self.choose_literal(self.new_literals(clause), extended_examples)
clause[1].append(l)
extended_examples = [sum([list(self.extend_example(example, l)) for example in
extended_examples[i]], []) for i in range(2)]
return clause, extended_examples[0]
def extend_example(self, example, literal):
"""Generate extended examples which satisfy the literal."""
# find all substitutions that satisfy literal
for s in self.ask_generator(subst(example, literal)):
s.update(example)
yield s
def new_literals(self, clause):
"""Generate new literals based on known predicate symbols.
Generated literal must share at least one variable with clause"""
share_vars = variables(clause[0])
for l in clause[1]:
share_vars.update(variables(l))
for pred, arity in self.pred_syms:
new_vars = {standardize_variables(expr('x')) for _ in range(arity - 1)}
for args in product(share_vars.union(new_vars), repeat=arity):
if any(var in share_vars for var in args):
# make sure we don't return an existing rule
if not Expr(pred, args) in clause[1]:
yield Expr(pred, *[var for var in args])
def choose_literal(self, literals, examples):
"""Choose the best literal based on the information gain."""
return max(literals, key=partial(self.gain, examples=examples))
def gain(self, l, examples):
"""
Find the utility of each literal when added to the body of the clause.
Utility function is:
gain(R, l) = T * (log_2 (post_pos / (post_pos + post_neg)) - log_2 (pre_pos / (pre_pos + pre_neg)))
where:
pre_pos = number of possitive bindings of rule R (=current set of rules)
pre_neg = number of negative bindings of rule R
post_pos = number of possitive bindings of rule R' (= R U {l} )
post_neg = number of negative bindings of rule R'
T = number of possitive bindings of rule R that are still covered
after adding literal l
"""
pre_pos = len(examples[0])
pre_neg = len(examples[1])
post_pos = sum([list(self.extend_example(example, l)) for example in examples[0]], [])
post_neg = sum([list(self.extend_example(example, l)) for example in examples[1]], [])
if pre_pos + pre_neg == 0 or len(post_pos) + len(post_neg) == 0:
return -1
# number of positive example that are represented in extended_examples
T = 0
for example in examples[0]:
represents = lambda d: all(d[x] == example[x] for x in example)
if any(represents(l_) for l_ in post_pos):
T += 1
value = T * (np.log2(len(post_pos) / (len(post_pos) + len(post_neg)) + 1e-12) -
np.log2(pre_pos / (pre_pos + pre_neg)))
return value
def update_examples(self, target, examples, extended_examples):
"""Add to the kb those examples what are represented in extended_examples
List of omitted examples is returned."""
uncovered = []
for example in examples:
represents = lambda d: all(d[x] == example[x] for x in example)
if any(represents(l) for l in extended_examples):
self.tell(subst(example, target))
else:
uncovered.append(example)
return uncovered
# ______________________________________________________________________________
def check_all_consistency(examples, h):
"""Check for the consistency of all examples under h."""
for e in examples:
if not is_consistent(e, h):
return False
return True
def check_negative_consistency(examples, h):
"""Check if the negative examples are consistent under h."""
for e in examples:
if e['GOAL']:
continue
if not is_consistent(e, [h]):
return False
return True
def disjunction_value(e, d):
"""The value of example e under disjunction d."""
for k, v in d.items():
if v[0] == '!':
# v is a NOT expression
# e[k], thus, should not be equal to v
if e[k] == v[1:]:
return False
elif e[k] != v:
return False
return True
def guess_value(e, h):
"""Guess value of example e under hypothesis h."""
for d in h:
if disjunction_value(e, d):
return True
return False
def is_consistent(e, h):
return e['GOAL'] == guess_value(e, h)
def false_positive(e, h):
return guess_value(e, h) and not e['GOAL']
def false_negative(e, h):
return e['GOAL'] and not guess_value(e, h)
| FOILContainer |
python | PrefectHQ__prefect | tests/server/models/test_orm.py | {
"start": 2559,
"end": 2820
} | class ____:
async def test_repr(self, db, session, flow):
assert repr(flow) == f"Flow(id={flow.id})"
assert repr(db.Flow()) == "Flow(id=None)"
flow_id = uuid4()
assert repr(db.Flow(id=flow_id)) == f"Flow(id={flow_id})"
| TestBase |
python | apache__airflow | airflow-core/src/airflow/cli/commands/info_command.py | {
"start": 1785,
"end": 1986
} | class ____(Anonymizer):
"""Do nothing."""
def _identity(self, value) -> str:
return value
process_path = process_username = process_url = _identity
del _identity
| NullAnonymizer |
python | apache__airflow | providers/databricks/tests/unit/databricks/hooks/test_databricks.py | {
"start": 86000,
"end": 87645
} | class ____:
"""
Tests for DatabricksHook using async methods when auth is done with Service
Principal Oauth token.
"""
@pytest.fixture(autouse=True)
def setup_connections(self, create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="databricks",
host=HOST,
login="c64f6d12-f6e4-45a4-846e-032b42b27758",
password="secret",
extra=json.dumps({"service_principal_oauth": True}),
)
)
self.hook = DatabricksHook(retry_args=DEFAULT_RETRY_ARGS)
@pytest.mark.asyncio
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.get")
@mock.patch("airflow.providers.databricks.hooks.databricks_base.aiohttp.ClientSession.post")
async def test_get_run_state(self, mock_post, mock_get):
mock_post.return_value.__aenter__.return_value.json = AsyncMock(
return_value=create_sp_token_for_resource()
)
mock_get.return_value.__aenter__.return_value.json = AsyncMock(return_value=GET_RUN_RESPONSE)
async with self.hook:
run_state = await self.hook.a_get_run_state(RUN_ID)
assert run_state == RunState(LIFE_CYCLE_STATE, RESULT_STATE, STATE_MESSAGE)
mock_get.assert_called_once_with(
get_run_endpoint(HOST),
json={"run_id": RUN_ID},
auth=BearerAuth(TOKEN),
headers=self.hook.user_agent_header,
timeout=self.hook.timeout_seconds,
)
| TestDatabricksHookAsyncSpToken |
python | sympy__sympy | sympy/physics/quantum/grover.py | {
"start": 6055,
"end": 10452
} | class ____(Gate):
"""General n qubit W Gate in Grover's algorithm.
The gate performs the operation ``2|phi><phi| - 1`` on some qubits.
``|phi> = (tensor product of n Hadamards)*(|0> with n qubits)``
Parameters
==========
nqubits : int
The number of qubits to operate on
"""
gate_name = 'W'
gate_name_latex = 'W'
@classmethod
def _eval_args(cls, args):
if len(args) != 1:
raise QuantumError(
'Insufficient/excessive arguments to W gate. Please ' +
'supply the number of qubits to operate on.'
)
args = UnitaryOperator._eval_args(args)
if not args[0].is_Integer:
raise TypeError('Integer expected, got: %r' % args[0])
return args
#-------------------------------------------------------------------------
# Properties
#-------------------------------------------------------------------------
@property
def targets(self):
return sympify(tuple(reversed(range(self.args[0]))))
#-------------------------------------------------------------------------
# Apply
#-------------------------------------------------------------------------
def _apply_operator_Qubit(self, qubits, **options):
"""
qubits: a set of qubits (Qubit)
Returns: quantum object (quantum expression - QExpr)
"""
if qubits.nqubits != self.nqubits:
raise QuantumError(
'WGate operates on %r qubits, got: %r'
% (self.nqubits, qubits.nqubits)
)
# See 'Quantum Computer Science' by David Mermin p.92 -> W|a> result
# Return (2/(sqrt(2^n)))|phi> - |a> where |a> is the current basis
# state and phi is the superposition of basis states (see function
# create_computational_basis above)
basis_states = superposition_basis(self.nqubits)
change_to_basis = (2/sqrt(2**self.nqubits))*basis_states
return change_to_basis - qubits
def grover_iteration(qstate, oracle):
"""Applies one application of the Oracle and W Gate, WV.
Parameters
==========
qstate : Qubit
A superposition of qubits.
oracle : OracleGate
The black box operator that flips the sign of the desired basis qubits.
Returns
=======
Qubit : The qubits after applying the Oracle and W gate.
Examples
========
Perform one iteration of grover's algorithm to see a phase change::
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.grover import OracleGate
>>> from sympy.physics.quantum.grover import superposition_basis
>>> from sympy.physics.quantum.grover import grover_iteration
>>> numqubits = 2
>>> basis_states = superposition_basis(numqubits)
>>> f = lambda qubits: qubits == IntQubit(2)
>>> v = OracleGate(numqubits, f)
>>> qapply(grover_iteration(basis_states, v))
|2>
"""
wgate = WGate(oracle.nqubits)
return wgate*oracle*qstate
def apply_grover(oracle, nqubits, iterations=None):
"""Applies grover's algorithm.
Parameters
==========
oracle : callable
The unknown callable function that returns true when applied to the
desired qubits and false otherwise.
Returns
=======
state : Expr
The resulting state after Grover's algorithm has been iterated.
Examples
========
Apply grover's algorithm to an even superposition of 2 qubits::
>>> from sympy.physics.quantum.qapply import qapply
>>> from sympy.physics.quantum.qubit import IntQubit
>>> from sympy.physics.quantum.grover import apply_grover
>>> f = lambda qubits: qubits == IntQubit(2)
>>> qapply(apply_grover(f, 2))
|2>
"""
if nqubits <= 0:
raise QuantumError(
'Grover\'s algorithm needs nqubits > 0, received %r qubits'
% nqubits
)
if iterations is None:
iterations = floor(sqrt(2**nqubits)*(pi/4))
v = OracleGate(nqubits, oracle)
iterated = superposition_basis(nqubits)
for iter in range(iterations):
iterated = grover_iteration(iterated, v)
iterated = qapply(iterated)
return iterated
| WGate |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_gridlines05.py | {
"start": 315,
"end": 1664
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_gridlines05.xlsx")
def test_create_file(self):
"""Test XlsxWriter gridlines."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [80072064, 79959168]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_x_axis(
{
"major_gridlines": {"visible": 1},
"minor_gridlines": {"visible": 1},
}
)
chart.set_y_axis(
{
"major_gridlines": {"visible": 1},
"minor_gridlines": {"visible": 1},
}
)
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | pandas-dev__pandas | pandas/tests/scalar/timestamp/test_arithmetic.py | {
"start": 10855,
"end": 11286
} | class ____(datetime):
pass
@pytest.mark.parametrize(
"lh,rh",
[
(SubDatetime(2000, 1, 1), Timedelta(hours=1)),
(Timedelta(hours=1), SubDatetime(2000, 1, 1)),
],
)
def test_dt_subclass_add_timedelta(lh, rh):
# GH#25851
# ensure that subclassed datetime works for
# Timedelta operations
result = lh + rh
expected = SubDatetime(2000, 1, 1, 1)
assert result == expected
| SubDatetime |
python | py-pdf__pypdf | pypdf/constants.py | {
"start": 20643,
"end": 22239
} | class ____(IntFlag):
NONE = 0
XOBJECT_IMAGES = auto()
INLINE_IMAGES = auto()
DRAWING_IMAGES = auto()
ALL = XOBJECT_IMAGES | INLINE_IMAGES | DRAWING_IMAGES
IMAGES = ALL # for consistency with ObjectDeletionFlag
_INLINE_IMAGE_VALUE_MAPPING = {
"/G": "/DeviceGray",
"/RGB": "/DeviceRGB",
"/CMYK": "/DeviceCMYK",
"/I": "/Indexed",
"/AHx": "/ASCIIHexDecode",
"/A85": "/ASCII85Decode",
"/LZW": "/LZWDecode",
"/Fl": "/FlateDecode",
"/RL": "/RunLengthDecode",
"/CCF": "/CCITTFaxDecode",
"/DCT": "/DCTDecode",
"/DeviceGray": "/DeviceGray",
"/DeviceRGB": "/DeviceRGB",
"/DeviceCMYK": "/DeviceCMYK",
"/Indexed": "/Indexed",
"/ASCIIHexDecode": "/ASCIIHexDecode",
"/ASCII85Decode": "/ASCII85Decode",
"/LZWDecode": "/LZWDecode",
"/FlateDecode": "/FlateDecode",
"/RunLengthDecode": "/RunLengthDecode",
"/CCITTFaxDecode": "/CCITTFaxDecode",
"/DCTDecode": "/DCTDecode",
"/RelativeColorimetric": "/RelativeColorimetric",
}
_INLINE_IMAGE_KEY_MAPPING = {
"/BPC": "/BitsPerComponent",
"/CS": "/ColorSpace",
"/D": "/Decode",
"/DP": "/DecodeParms",
"/F": "/Filter",
"/H": "/Height",
"/W": "/Width",
"/I": "/Interpolate",
"/Intent": "/Intent",
"/IM": "/ImageMask",
"/BitsPerComponent": "/BitsPerComponent",
"/ColorSpace": "/ColorSpace",
"/Decode": "/Decode",
"/DecodeParms": "/DecodeParms",
"/Filter": "/Filter",
"/Height": "/Height",
"/Width": "/Width",
"/Interpolate": "/Interpolate",
"/ImageMask": "/ImageMask",
}
| ImageType |
python | tornadoweb__tornado | tornado/test/httpclient_test.py | {
"start": 2691,
"end": 3033
} | class ____(RequestHandler):
def get(self):
self.set_status(304)
self.set_header("Content-Length", 42)
def _clear_representation_headers(self):
# Tornado strips content-length from 304 responses, but here we
# want to simulate servers that include the headers anyway.
pass
| ContentLength304Handler |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/ruff/RUF008.py | {
"start": 835,
"end": 1147
} | class ____:
mutable_default: 'list[int]' = []
immutable_annotation: 'typing.Sequence[int]' = []
without_annotation = []
correct_code: 'list[int]' = KNOWINGLY_MUTABLE_DEFAULT
perfectly_fine: 'list[int]' = field(default_factory=list)
class_variable: 'typing.ClassVar[list[int]]'= []
| AWithQuotes |
python | realpython__materials | python-built-in-functions/point_v2.py | {
"start": 0,
"end": 507
} | class ____:
def __init__(self, x, y):
self.x = x
self.y = y
@property
def x(self):
return self._x
@x.setter
def x(self, value):
self._x = self.validate(value)
@property
def y(self):
return self._y
@y.setter
def y(self, value):
self._y = self.validate(value)
def validate(self, value):
if not isinstance(value, int | float):
raise ValueError("coordinates must be numbers")
return value
| Point |
python | huggingface__transformers | src/transformers/models/gemma3n/modeling_gemma3n.py | {
"start": 74868,
"end": 75862
} | class ____(PreTrainedModel):
config: Gemma3nConfig
base_model_prefix = "model"
supports_gradient_checkpointing = True
_no_split_modules = ["Gemma3nTextDecoderLayer"]
_skip_keys_device_placement = ["past_key_values"]
_supports_flash_attn = True
_supports_sdpa = True
_supports_flex_attn = True
_can_compile_fullgraph = True
_supports_attention_backend = True
_can_record_outputs = {
"hidden_states": Gemma3nDecoderLayer,
"attentions": Gemma3nAttention,
}
input_modalities = ("image", "text", "audio")
@torch.no_grad()
def _init_weights(self, module):
super()._init_weights(module)
if isinstance(module, Gemma3nAudioCumulativeGroupNorm):
init.ones_(module.weight)
elif isinstance(module, Gemma3nAudioAttention):
init.zeros_(module.per_dim_scale)
elif isinstance(module, Gemma3nTextAltUp):
init.zeros_(module.correct_output_scale)
| Gemma3nPreTrainedModel |
python | doocs__leetcode | solution/1200-1299/1272.Remove Interval/Solution.py | {
"start": 0,
"end": 441
} | class ____:
def removeInterval(
self, intervals: List[List[int]], toBeRemoved: List[int]
) -> List[List[int]]:
x, y = toBeRemoved
ans = []
for a, b in intervals:
if a >= y or b <= x:
ans.append([a, b])
else:
if a < x:
ans.append([a, x])
if b > y:
ans.append([y, b])
return ans
| Solution |
python | google__pytype | pytype/tests/test_errors2.py | {
"start": 20636,
"end": 22551
} | class ____(test_base.BaseTest):
"""Test for name errors on the attributes of partially defined classes.
For code like:
class C:
x = 0
class D:
print(x) # name error!
unlike the similar examples in ClassAttributeNameErrorTest, using 'C.x' does
not work because 'C' has not yet been fully defined. We add this explanation
to the error message.
"""
POST = "before the class is fully defined"
def test_nested_classes(self):
errors = self.CheckWithErrors("""
class C:
x = 0
class D:
y = 1
class E:
print(x) # name-error[e1]
print(y) # name-error[e2]
""")
self.assertErrorSequences(
errors,
{
"e1": ["Cannot reference 'x' from class 'C'", self.POST],
"e2": ["Cannot reference 'y' from class 'C.D'", self.POST],
},
)
def test_nested_classes_in_function(self):
errors = self.CheckWithErrors("""
def f():
class C:
x = 0
class D:
print(x) # name-error[e]
""")
self.assertErrorSequences(
errors, {"e": ["Cannot reference 'x' from class 'f.C'", self.POST]}
)
def test_unbound_local_precedence(self):
# We should report the UnboundLocalError in preference to one about C not
# being fully defined, since print(x) would resolve to f.x, not f.C.x, if
# the redefinition in D were removed.
errors = self.CheckWithErrors("""
def f():
x = 0
class C:
x = 1
class D:
print(x) # name-error[e]
x = 2
""")
self.assertErrorSequences(
errors,
{
"e": [
"Add `nonlocal x` in class 'f.C.D' to",
"reference 'x' from function 'f'",
]
},
)
if __name__ == "__main__":
test_base.main()
| PartiallyDefinedClassNameErrorTest |
python | getsentry__sentry | src/sentry/deletions/models/scheduleddeletion.py | {
"start": 5709,
"end": 6509
} | class ____(BaseScheduledDeletion):
"""
This model schedules deletions to be processed in region and monolith silo modes. As new region silo test coverage
increases, new scheduled deletions will begin to occur in this table. Monolith (current saas) will continue
processing them alongside the original scheduleddeletions table, but in the future this table will only be
processed by region silos.
"""
class Meta:
unique_together = (("app_label", "model_name", "object_id"),)
app_label = "sentry"
db_table = "sentry_regionscheduleddeletion"
def get_regional_scheduled_deletion(mode: SiloMode) -> type[BaseScheduledDeletion]:
if mode != SiloMode.CONTROL:
return RegionScheduledDeletion
return ScheduledDeletion
| RegionScheduledDeletion |
python | getsentry__sentry | tests/sentry/tasks/test_web_vitals_issue_detection.py | {
"start": 508,
"end": 26367
} | class ____(TestCase, SnubaTestCase, SpanTestCase):
def setUp(self):
super().setUp()
self.ten_mins_ago = before_now(minutes=10)
@contextmanager
def mock_seer_ack(self):
with (
patch(
"sentry.tasks.web_vitals_issue_detection.get_seer_org_acknowledgement"
) as mock_ack,
):
mock_ack.return_value = True
yield {"mock_ack": mock_ack}
@contextmanager
def mock_code_mapping(self):
with (
patch(
"sentry.tasks.web_vitals_issue_detection.get_autofix_repos_from_project_code_mappings"
) as mock_repos,
):
mock_repos.return_value = [
{
"provider": "integrations:github",
"owner": "test-owner",
"name": "test-repo",
}
]
yield {"mock_repos": mock_repos}
@patch("sentry.tasks.web_vitals_issue_detection.detect_web_vitals_issues_for_project.delay")
def test_run_detection_dispatches_sub_tasks_when_enabled(self, mock_delay):
project = self.create_project()
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
):
run_web_vitals_issue_detection()
assert mock_delay.called
assert mock_delay.call_args[0][0] == project.id
@patch("sentry.tasks.web_vitals_issue_detection.detect_web_vitals_issues_for_project.delay")
def test_run_detection_skips_when_seer_not_acknowledged(self, mock_delay):
project = self.create_project()
with (
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
):
run_web_vitals_issue_detection()
assert not mock_delay.called
@patch("sentry.tasks.web_vitals_issue_detection.detect_web_vitals_issues_for_project.delay")
def test_run_detection_skips_when_no_github_code_mappings(self, mock_delay):
project = self.create_project()
with (
self.mock_seer_ack(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
):
run_web_vitals_issue_detection()
assert not mock_delay.called
@patch("sentry.tasks.web_vitals_issue_detection.detect_web_vitals_issues_for_project.delay")
def test_run_detection_skips_when_not_allowlisted(self, mock_delay):
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [],
}
),
self.feature("organizations:gen-ai-features"),
):
run_web_vitals_issue_detection()
assert not mock_delay.called
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_produces_occurrences(self, mock_produce_occurrence_to_kafka):
project = self.create_project()
spans = []
# web vital issue detection requires at least 10 samples per vital to create an issue
for _ in range(10):
spans.extend(
[
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 3500},
},
),
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.cls",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"cls": {"value": 0.15},
},
),
self.create_span(
project=project,
extra_data={
"description": "pageload",
"sentry_tags": {
"op": "ui.interaction.click",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.inp": {"value": 0.85},
"inp": {"value": 200},
},
),
self.create_span(
project=project,
extra_data={
"description": "pageload",
"sentry_tags": {
"op": "pageload",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=3000,
measurements={
"score.ratio.fcp": {"value": 0.8},
"score.ratio.ttfb": {"value": 0.9},
"fcp": {"value": 1800},
"ttfb": {"value": 200},
},
),
]
)
self.store_spans(spans, is_eap=True)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert mock_produce_occurrence_to_kafka.call_count == 2
call_args_list = mock_produce_occurrence_to_kafka.call_args_list
# Common attributes
for call in call_args_list:
call_kwargs = call.kwargs
occurrence = call_kwargs["occurrence"]
event_data = call_kwargs["event_data"]
assert occurrence.type == WebVitalsGroup
assert occurrence.project_id == project.id
assert occurrence.evidence_data == {"transaction": "/home"}
assert len(occurrence.evidence_display) == 1
assert occurrence.evidence_display[0].name == "Transaction"
assert occurrence.evidence_display[0].value == "/home"
assert occurrence.level == "info"
assert occurrence.culprit == "/home"
assert event_data["project_id"] == project.id
assert event_data["tags"]["transaction"] == "/home"
assert "trace" in event_data["contexts"]
lcp_call = call_args_list[0]
lcp_occurrence = lcp_call.kwargs["occurrence"]
assert lcp_occurrence.fingerprint == ["d94185e6d794589212c74476702515734b703f86"]
assert lcp_occurrence.issue_title == "The page /home was slow to load and render"
assert (
lcp_occurrence.subtitle == "/home has an LCP score of 0.5 and an FCP score of 0.8"
)
lcp_event_data = lcp_call.kwargs["event_data"]
assert lcp_event_data["tags"]["lcp_score"] == "0.5"
assert lcp_event_data["tags"]["lcp"] == "3500.0"
inp_call = call_args_list[1]
inp_occurrence = inp_call.kwargs["occurrence"]
assert inp_occurrence.fingerprint == ["d8b421cb6e5476121654d1383e80f4515a7f58b9"]
assert (
inp_occurrence.issue_title == "The page /home responded slowly to user interactions"
)
assert inp_occurrence.subtitle == "/home has an INP score of 0.85"
inp_event_data = inp_call.kwargs["event_data"]
assert inp_event_data["tags"]["inp_score"] == "0.85"
assert inp_event_data["tags"]["inp"] == "200.0"
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_groups_rendering_vitals(self, mock_produce_occurrence_to_kafka):
project = self.create_project()
spans = []
# web vital issue detection requires at least 10 samples per vital to create an issue
for _ in range(10):
spans.extend(
[
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 3500},
},
),
self.create_span(
project=project,
extra_data={
"description": "pageload",
"sentry_tags": {
"op": "pageload",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=3000,
measurements={
"score.ratio.fcp": {"value": 0.8},
"score.ratio.ttfb": {"value": 0.6},
"fcp": {"value": 1800},
"ttfb": {"value": 2000},
},
),
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/settings",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 3500},
},
),
]
)
self.store_spans(spans, is_eap=True)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert mock_produce_occurrence_to_kafka.call_count == 2
call_args_list = mock_produce_occurrence_to_kafka.call_args_list
# Common attributes
for call in call_args_list:
call_kwargs = call.kwargs
occurrence = call_kwargs["occurrence"]
event_data = call_kwargs["event_data"]
assert occurrence.type == WebVitalsGroup
assert occurrence.project_id == project.id
assert len(occurrence.evidence_display) == 1
assert occurrence.evidence_display[0].name == "Transaction"
assert occurrence.level == "info"
assert event_data["project_id"] == project.id
assert "trace" in event_data["contexts"]
assert call_args_list[0].kwargs["event_data"]["tags"]["transaction"] == "/home"
assert call_args_list[1].kwargs["event_data"]["tags"]["transaction"] == "/settings"
lcp_call = call_args_list[0]
lcp_occurrence = lcp_call.kwargs["occurrence"]
assert lcp_occurrence.fingerprint == ["d94185e6d794589212c74476702515734b703f86"]
assert lcp_occurrence.issue_title == "The page /home was slow to load and render"
assert (
lcp_occurrence.subtitle
== "/home has an LCP score of 0.5, an FCP score of 0.8 and a TTFB score of 0.6"
)
lcp_event_data = lcp_call.kwargs["event_data"]
assert lcp_event_data["tags"]["lcp_score"] == "0.5"
assert lcp_event_data["tags"]["fcp_score"] == "0.8"
assert lcp_event_data["tags"]["ttfb_score"] == "0.6"
assert lcp_event_data["tags"]["lcp"] == "3500.0"
assert lcp_event_data["tags"]["fcp"] == "1800.0"
assert lcp_event_data["tags"]["ttfb"] == "2000.0"
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_does_not_produce_occurrences_for_existing_issues(
self, mock_produce_occurrence_to_kafka
):
project = self.create_project()
spans = [
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 3500},
},
)
for _ in range(10)
] # web vital issue detection requires at least 10 samples to create an issue
self.store_spans(spans, is_eap=True)
# Create an existing issue group so that the web vital issue detection does not produce a new occurrence
group = self.create_group(project=project)
rendering_fingerprint = "d94185e6d794589212c74476702515734b703f86"
hashed_fingerprint = hash_fingerprint([rendering_fingerprint])
GroupHash.objects.create(
project=project,
group=group,
hash=hashed_fingerprint[0],
)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
# Should not produce any occurrences because the LCP issue already exists
assert mock_produce_occurrence_to_kafka.call_count == 0
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_does_not_create_issue_on_insufficient_samples(
self, mock_produce_occurrence_to_kafka
):
project = self.create_project()
spans = [
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 3500},
},
)
for _ in range(9)
]
self.store_spans(spans, is_eap=True)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert mock_produce_occurrence_to_kafka.call_count == 0
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_selects_trace_closest_to_p75_web_vital_value(
self, mock_produce_occurrence_to_kafka
):
project = self.create_project()
spans = [
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.1},
"lcp": {"value": 100},
},
)
for _ in range(7)
]
p75_span = self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 2000},
},
)
spans.append(p75_span)
spans.extend(
[
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.2},
"lcp": {"value": 3500},
},
)
for _ in range(2)
]
)
self.store_spans(spans, is_eap=True)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert mock_produce_occurrence_to_kafka.call_count == 1
call_args_list = mock_produce_occurrence_to_kafka.call_args_list
assert call_args_list[0].kwargs["event_data"]["tags"]["lcp"] == "2000.0"
assert (
call_args_list[0].kwargs["event_data"]["contexts"]["trace"]["trace_id"]
== p75_span["trace_id"]
)
@pytest.mark.snuba
@patch("sentry.web_vitals.issue_platform_adapter.produce_occurrence_to_kafka")
def test_run_detection_selects_trace_from_worst_score(self, mock_produce_occurrence_to_kafka):
project = self.create_project()
spans = [
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.1},
"lcp": {"value": 100},
},
)
for _ in range(7)
]
p75_span = self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.5},
"lcp": {"value": 2000},
},
)
spans.append(p75_span)
spans.extend(
[
self.create_span(
project=project,
extra_data={
"sentry_tags": {
"op": "ui.webvitals.lcp",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=100,
measurements={
"score.ratio.lcp": {"value": 0.2},
"lcp": {"value": 3500},
},
)
for _ in range(2)
]
)
for _ in range(10):
spans.extend(
[
self.create_span(
project=project,
extra_data={
"description": "pageload",
"sentry_tags": {
"op": "pageload",
"transaction": "/home",
},
},
start_ts=self.ten_mins_ago,
duration=3000,
measurements={
"score.ratio.fcp": {"value": 0.8},
"score.ratio.ttfb": {"value": 0.6},
"fcp": {"value": 1800},
"ttfb": {"value": 2000},
},
),
]
)
self.store_spans(spans, is_eap=True)
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert mock_produce_occurrence_to_kafka.call_count == 1
call_args_list = mock_produce_occurrence_to_kafka.call_args_list
assert call_args_list[0].kwargs["event_data"]["tags"]["lcp"] == "2000.0"
assert call_args_list[0].kwargs["event_data"]["tags"]["fcp"] == "1800.0"
assert call_args_list[0].kwargs["event_data"]["tags"]["ttfb"] == "2000.0"
assert (
call_args_list[0].kwargs["event_data"]["contexts"]["trace"]["trace_id"]
== p75_span["trace_id"]
)
@patch("sentry.tasks.web_vitals_issue_detection.detect_web_vitals_issues_for_project.delay")
@patch("sentry.tasks.web_vitals_issue_detection.get_merged_settings")
def test_run_detection_does_not_run_for_project_when_user_has_disabled(
self, mock_get_merged_settings, mock_detect_web_vitals_issues_for_project
):
mock_get_merged_settings.return_value = {
"web_vitals_detection_enabled": False,
}
project = self.create_project()
with (
self.mock_seer_ack(),
self.mock_code_mapping(),
self.options(
{
"issue-detection.web-vitals-detection.enabled": True,
"issue-detection.web-vitals-detection.projects-allowlist": [project.id],
}
),
self.feature("organizations:gen-ai-features"),
TaskRunner(),
):
run_web_vitals_issue_detection()
assert not mock_detect_web_vitals_issues_for_project.called
| WebVitalsIssueDetectionDataTest |
python | eth-brownie__brownie | brownie/network/rpc/__init__.py | {
"start": 1194,
"end": 8598
} | class ____(metaclass=_Singleton):
def __init__(self) -> None:
self.process: Union[psutil.Popen, psutil.Process] = None
self.backend: Any = ganache
atexit.register(self._at_exit)
def _at_exit(self) -> None:
if not self.is_active():
return
if self.process.parent() == psutil.Process():
if getattr(self.process, "stdout", None) is not None:
self.process.stdout.close()
if getattr(self.process, "stderr", None) is not None:
self.process.stderr.close()
self.kill(False)
def launch(self, cmd: str, **kwargs: Dict) -> None:
if self.is_active():
raise SystemError("RPC is already active.")
for key, module in LAUNCH_BACKENDS.items():
if cmd.lower().startswith(key):
self.backend = module
break
self.process = self.backend.launch(cmd, **kwargs)
# check that web3 can connect
if not web3.provider:
chain._network_disconnected()
return
uri = web3.provider.endpoint_uri if web3.provider else None
for i in range(100):
if web3.isConnected():
web3.reset_middlewares()
self.backend.on_connection()
chain._network_connected()
return
time.sleep(0.1)
if isinstance(self.process, psutil.Popen):
self.process.poll()
if not self.process.is_running():
self.kill(False)
raise RPCProcessError(cmd, uri)
self.kill(False)
raise RPCConnectionError(cmd, self.process, uri)
def attach(self, laddr: Union[str, Tuple]) -> None:
"""Attaches to an already running RPC client subprocess.
Args:
laddr: Address that the client is listening at. Can be supplied as a
string "http://127.0.0.1:8545" or tuple ("127.0.0.1", 8545)"""
if self.is_active():
raise SystemError("RPC is already active.")
if isinstance(laddr, str):
o = urlparse(laddr)
if not o.port:
raise ValueError("No RPC port given")
laddr = (o.hostname, o.port)
ip = socket.gethostbyname(laddr[0])
resolved_addr = (ip, laddr[1])
pid = self._find_rpc_process_pid(resolved_addr)
print(f"Attached to local RPC client listening at '{laddr[0]}:{laddr[1]}'...")
self.process = psutil.Process(pid)
for key, module in ATTACH_BACKENDS.items():
if web3.client_version.lower().startswith(key):
self.backend = module
break
web3.reset_middlewares()
self.backend.on_connection()
chain._network_connected()
def kill(self, exc: bool = True) -> None:
"""Terminates the RPC process and all children with SIGKILL.
Args:
exc: if True, raises SystemError if subprocess is not active."""
if not self.is_active():
if not exc:
return
raise SystemError("RPC is not active.")
try:
print("Terminating local RPC client...")
except ValueError:
pass
for child in self.process.children(recursive=True):
try:
child.kill()
except psutil.NoSuchProcess:
pass
self.process.kill()
self.process.wait()
chain._network_disconnected()
def is_active(self) -> bool:
"""Returns True if Rpc client is currently active."""
if not self.process:
return False
if isinstance(self.process, psutil.Popen):
self.process.poll()
return self.process.is_running()
def is_child(self) -> bool:
"""Returns True if the Rpc client is active and was launched by Brownie."""
if not self.is_active():
return False
return self.process.parent() == psutil.Process()
@internal
def sleep(self, seconds: int) -> int:
return self.backend.sleep(seconds)
@internal
def mine(self, timestamp: int = None) -> int:
self.backend.mine(timestamp)
return web3.eth.block_number
@internal
def snapshot(self) -> int:
return self.backend.snapshot()
@internal
def revert(self, snapshot_id: int) -> int:
self.backend.revert(snapshot_id)
return web3.eth.block_number
def unlock_account(self, address: str) -> None:
self.backend.unlock_account(address)
def _find_rpc_process_pid(self, laddr: Tuple) -> int:
try:
# default case with an already running local RPC process
return self._get_pid_from_connections(laddr)
except ProcessLookupError:
# if no local RPC process could be found we can try to find a dockerized one
if platform.system() == "Darwin":
return self._get_pid_from_docker_backend()
else:
return self._get_pid_from_net_connections(laddr)
def _check_proc_connections(self, proc: psutil.Process, laddr: Tuple) -> bool:
try:
return laddr in [i.laddr for i in proc.connections()]
except psutil.AccessDenied:
return False
except psutil.ZombieProcess:
return False
except psutil.NoSuchProcess:
return False
def _check_net_connections(self, connection: Any, laddr: Tuple) -> bool:
if connection.pid is None:
return False
if connection.laddr == laddr:
return True
elif connection.raddr == laddr:
return True
else:
return False
def _get_pid_from_connections(self, laddr: Tuple) -> int:
try:
proc = next(i for i in psutil.process_iter() if self._check_proc_connections(i, laddr))
return self._get_proc_pid(proc)
except StopIteration:
raise ProcessLookupError(
"Could not attach to RPC process by querying 'proc.connections()'"
) from None
def _get_pid_from_net_connections(self, laddr: Tuple) -> int:
try:
proc = next(
i
for i in psutil.net_connections(kind="tcp")
if self._check_net_connections(i, laddr)
)
return self._get_proc_pid(proc)
except StopIteration:
raise ProcessLookupError(
"Could not attach to RPC process by querying 'proc.net_connections()'"
) from None
def _get_pid_from_docker_backend(self) -> int:
# OSX workaround for https://github.com/giampaolo/psutil/issues/1219
proc = self._find_proc_by_name("com.docker.backend")
return self._get_proc_pid(proc)
def _get_proc_pid(self, proc: psutil.Process) -> int:
if proc:
return proc.pid
else:
raise ProcessLookupError(
"Could not attach to RPC process. If this issue persists, try killing "
"the RPC and let Brownie launch it as a child process."
) from None
def _find_proc_by_name(self, process_name: str) -> psutil.Process:
for proc in psutil.process_iter():
if process_name.lower() in proc.name().lower():
return proc
| Rpc |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 56999,
"end": 57354
} | class ____(BaseModel):
usage: Optional["Usage"] = Field(default=None, description="")
time: Optional[float] = Field(default=None, description="Time spent to process this request")
status: Optional[str] = Field(default=None, description="")
result: Optional[List[List["ScoredPoint"]]] = Field(default=None, description="")
| InlineResponse20017 |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 26210,
"end": 26802
} | class ____(PrefectBaseModel):
"""Filter by `WorkQueue.name`."""
any_: Optional[List[str]] = Field(
default=None,
description="A list of work queue names to include",
examples=[["wq-1", "wq-2"]],
)
startswith_: Optional[List[str]] = Field(
default=None,
description=(
"A list of case-insensitive starts-with matches. For example, "
" passing 'marvin' will match "
"'marvin', and 'Marvin-robot', but not 'sad-marvin'."
),
examples=[["marvin", "Marvin-robot"]],
)
| WorkQueueFilterName |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 69902,
"end": 70181
} | class ____(sgqlc.types.Input):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("direction",)
direction = sgqlc.types.Field(
sgqlc.types.non_null(OrderDirection), graphql_name="direction"
)
| ContributionOrder |
python | Farama-Foundation__Gymnasium | gymnasium/envs/classic_control/cartpole.py | {
"start": 491,
"end": 13958
} | class ____(gym.Env[np.ndarray, Union[int, np.ndarray]]):
"""
## Description
This environment corresponds to the version of the cart-pole problem described by Barto, Sutton, and Anderson in
["Neuronlike Adaptive Elements That Can Solve Difficult Learning Control Problem"](https://ieeexplore.ieee.org/document/6313077).
A pole is attached by an un-actuated joint to a cart, which moves along a frictionless track.
The pendulum is placed upright on the cart and the goal is to balance the pole by applying forces
in the left and right direction on the cart.
## Action Space
The action is a `ndarray` with shape `(1,)` which can take values `{0, 1}` indicating the direction
of the fixed force the cart is pushed with.
- 0: Push cart to the left
- 1: Push cart to the right
**Note**: The velocity that is reduced or increased by the applied force is not fixed and it depends on the angle
the pole is pointing. The center of gravity of the pole varies the amount of energy needed to move the cart underneath it
## Observation Space
The observation is a `ndarray` with shape `(4,)` with the values corresponding to the following positions and velocities:
| Num | Observation | Min | Max |
|-----|-----------------------|---------------------|-------------------|
| 0 | Cart Position | -4.8 | 4.8 |
| 1 | Cart Velocity | -Inf | Inf |
| 2 | Pole Angle | ~ -0.418 rad (-24°) | ~ 0.418 rad (24°) |
| 3 | Pole Angular Velocity | -Inf | Inf |
**Note:** While the ranges above denote the possible values for observation space of each element,
it is not reflective of the allowed values of the state space in an unterminated episode. Particularly:
- The cart x-position (index 0) can be take values between `(-4.8, 4.8)`, but the episode terminates
if the cart leaves the `(-2.4, 2.4)` range.
- The pole angle can be observed between `(-.418, .418)` radians (or **±24°**), but the episode terminates
if the pole angle is not in the range `(-.2095, .2095)` (or **±12°**)
## Rewards
Since the goal is to keep the pole upright for as long as possible, by default, a reward of `+1` is given for every step taken, including the termination step. The default reward threshold is 500 for v1 and 200 for v0 due to the time limit on the environment.
If `sutton_barto_reward=True`, then a reward of `0` is awarded for every non-terminating step and `-1` for the terminating step. As a result, the reward threshold is 0 for v0 and v1.
## Starting State
All observations are assigned a uniformly random value in `(-0.05, 0.05)`
## Episode End
The episode ends if any one of the following occurs:
1. Termination: Pole Angle is greater than ±12°
2. Termination: Cart Position is greater than ±2.4 (center of the cart reaches the edge of the display)
3. Truncation: Episode length is greater than 500 (200 for v0)
## Arguments
Cartpole only has `render_mode` as a keyword for `gymnasium.make`.
On reset, the `options` parameter allows the user to change the bounds used to determine the new random state.
```python
>>> import gymnasium as gym
>>> env = gym.make("CartPole-v1", render_mode="rgb_array")
>>> env
<TimeLimit<OrderEnforcing<PassiveEnvChecker<CartPoleEnv<CartPole-v1>>>>>
>>> env.reset(seed=123, options={"low": -0.1, "high": 0.1}) # default low=-0.05, high=0.05
(array([ 0.03647037, -0.0892358 , -0.05592803, -0.06312564], dtype=float32), {})
```
| Parameter | Type | Default | Description |
|-------------------------|------------|-------------------------|-----------------------------------------------------------------------------------------------|
| `sutton_barto_reward` | **bool** | `False` | If `True` the reward function matches the original sutton barto implementation |
## Vectorized environment
To increase steps per seconds, users can use a custom vector environment or with an environment vectorizor.
```python
>>> import gymnasium as gym
>>> envs = gym.make_vec("CartPole-v1", num_envs=3, vectorization_mode="vector_entry_point")
>>> envs
CartPoleVectorEnv(CartPole-v1, num_envs=3)
>>> envs = gym.make_vec("CartPole-v1", num_envs=3, vectorization_mode="sync")
>>> envs
SyncVectorEnv(CartPole-v1, num_envs=3)
```
## Version History
* v1: `max_time_steps` raised to 500.
- In Gymnasium `1.0.0a2` the `sutton_barto_reward` argument was added (related [GitHub issue](https://github.com/Farama-Foundation/Gymnasium/issues/790))
* v0: Initial versions release.
"""
metadata = {
"render_modes": ["human", "rgb_array"],
"render_fps": 50,
}
def __init__(
self, sutton_barto_reward: bool = False, render_mode: str | None = None
):
self._sutton_barto_reward = sutton_barto_reward
self.gravity = 9.8
self.masscart = 1.0
self.masspole = 0.1
self.total_mass = self.masspole + self.masscart
self.length = 0.5 # actually half the pole's length
self.polemass_length = self.masspole * self.length
self.force_mag = 10.0
self.tau = 0.02 # seconds between state updates
self.kinematics_integrator = "euler"
# Angle at which to fail the episode
self.theta_threshold_radians = 12 * 2 * math.pi / 360
self.x_threshold = 2.4
# Angle limit set to 2 * theta_threshold_radians so failing observation
# is still within bounds.
high = np.array(
[
self.x_threshold * 2,
np.inf,
self.theta_threshold_radians * 2,
np.inf,
],
dtype=np.float32,
)
self.action_space = spaces.Discrete(2)
self.observation_space = spaces.Box(-high, high, dtype=np.float32)
self.render_mode = render_mode
self.screen_width = 600
self.screen_height = 400
self.screen = None
self.clock = None
self.isopen = True
self.state: np.ndarray | None = None
self.steps_beyond_terminated = None
def step(self, action):
assert self.action_space.contains(
action
), f"{action!r} ({type(action)}) invalid"
assert self.state is not None, "Call reset before using step method."
x, x_dot, theta, theta_dot = self.state
force = self.force_mag if action == 1 else -self.force_mag
costheta = np.cos(theta)
sintheta = np.sin(theta)
# For the interested reader:
# https://coneural.org/florian/papers/05_cart_pole.pdf
temp = (
force + self.polemass_length * np.square(theta_dot) * sintheta
) / self.total_mass
thetaacc = (self.gravity * sintheta - costheta * temp) / (
self.length
* (4.0 / 3.0 - self.masspole * np.square(costheta) / self.total_mass)
)
xacc = temp - self.polemass_length * thetaacc * costheta / self.total_mass
if self.kinematics_integrator == "euler":
x = x + self.tau * x_dot
x_dot = x_dot + self.tau * xacc
theta = theta + self.tau * theta_dot
theta_dot = theta_dot + self.tau * thetaacc
else: # semi-implicit euler
x_dot = x_dot + self.tau * xacc
x = x + self.tau * x_dot
theta_dot = theta_dot + self.tau * thetaacc
theta = theta + self.tau * theta_dot
self.state = np.array((x, x_dot, theta, theta_dot), dtype=np.float64)
terminated = bool(
x < -self.x_threshold
or x > self.x_threshold
or theta < -self.theta_threshold_radians
or theta > self.theta_threshold_radians
)
if not terminated:
reward = 0.0 if self._sutton_barto_reward else 1.0
elif self.steps_beyond_terminated is None:
# Pole just fell!
self.steps_beyond_terminated = 0
reward = -1.0 if self._sutton_barto_reward else 1.0
else:
if self.steps_beyond_terminated == 0:
logger.warn(
"You are calling 'step()' even though this environment has already returned terminated = True. "
"You should always call 'reset()' once you receive 'terminated = True' -- any further steps are undefined behavior."
)
self.steps_beyond_terminated += 1
reward = -1.0 if self._sutton_barto_reward else 0.0
if self.render_mode == "human":
self.render()
# truncation=False as the time limit is handled by the `TimeLimit` wrapper added during `make`
return np.array(self.state, dtype=np.float32), reward, terminated, False, {}
def reset(
self,
*,
seed: int | None = None,
options: dict | None = None,
):
super().reset(seed=seed)
# Note that if you use custom reset bounds, it may lead to out-of-bound
# state/observations.
low, high = utils.maybe_parse_reset_bounds(
options, -0.05, 0.05 # default low
) # default high
self.state = self.np_random.uniform(low=low, high=high, size=(4,))
self.steps_beyond_terminated = None
if self.render_mode == "human":
self.render()
return np.array(self.state, dtype=np.float32), {}
def render(self):
if self.render_mode is None:
assert self.spec is not None
gym.logger.warn(
"You are calling render method without specifying any render mode. "
"You can specify the render_mode at initialization, "
f'e.g. gym.make("{self.spec.id}", render_mode="rgb_array")'
)
return
try:
import pygame
from pygame import gfxdraw
except ImportError as e:
raise DependencyNotInstalled(
'pygame is not installed, run `pip install "gymnasium[classic-control]"`'
) from e
if self.screen is None:
pygame.init()
if self.render_mode == "human":
pygame.display.init()
self.screen = pygame.display.set_mode(
(self.screen_width, self.screen_height)
)
else: # mode == "rgb_array"
self.screen = pygame.Surface((self.screen_width, self.screen_height))
if self.clock is None:
self.clock = pygame.time.Clock()
world_width = self.x_threshold * 2
scale = self.screen_width / world_width
polewidth = 10.0
polelen = scale * (2 * self.length)
cartwidth = 50.0
cartheight = 30.0
if self.state is None:
return None
x = self.state
self.surf = pygame.Surface((self.screen_width, self.screen_height))
self.surf.fill((255, 255, 255))
l, r, t, b = -cartwidth / 2, cartwidth / 2, cartheight / 2, -cartheight / 2
axleoffset = cartheight / 4.0
cartx = x[0] * scale + self.screen_width / 2.0 # MIDDLE OF CART
carty = 100 # TOP OF CART
cart_coords = [(l, b), (l, t), (r, t), (r, b)]
cart_coords = [(c[0] + cartx, c[1] + carty) for c in cart_coords]
gfxdraw.aapolygon(self.surf, cart_coords, (0, 0, 0))
gfxdraw.filled_polygon(self.surf, cart_coords, (0, 0, 0))
l, r, t, b = (
-polewidth / 2,
polewidth / 2,
polelen - polewidth / 2,
-polewidth / 2,
)
pole_coords = []
for coord in [(l, b), (l, t), (r, t), (r, b)]:
coord = pygame.math.Vector2(coord).rotate_rad(-x[2])
coord = (coord[0] + cartx, coord[1] + carty + axleoffset)
pole_coords.append(coord)
gfxdraw.aapolygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.filled_polygon(self.surf, pole_coords, (202, 152, 101))
gfxdraw.aacircle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.filled_circle(
self.surf,
int(cartx),
int(carty + axleoffset),
int(polewidth / 2),
(129, 132, 203),
)
gfxdraw.hline(self.surf, 0, self.screen_width, carty, (0, 0, 0))
self.surf = pygame.transform.flip(self.surf, False, True)
self.screen.blit(self.surf, (0, 0))
if self.render_mode == "human":
pygame.event.pump()
self.clock.tick(self.metadata["render_fps"])
pygame.display.flip()
elif self.render_mode == "rgb_array":
return np.transpose(
np.array(pygame.surfarray.pixels3d(self.screen)), axes=(1, 0, 2)
)
def close(self):
if self.screen is not None:
import pygame
pygame.display.quit()
pygame.quit()
self.isopen = False
| CartPoleEnv |
python | pola-rs__polars | py-polars/src/polars/io/partition.py | {
"start": 17067,
"end": 17495
} | class ____:
"""
Holds sink options that are generic over file / target type.
For internal use. Most of the options will parse into `UnifiedSinkArgs`.
"""
mkdir: bool
maintain_order: bool
sync_on_close: SyncOnCloseMethod | None = None
# Cloud
storage_options: list[tuple[str, str]] | None = None
credential_provider: CredentialProviderBuilder | None = None
retries: int = 2
| _SinkOptions |
python | pyca__cryptography | src/cryptography/hazmat/primitives/serialization/ssh.py | {
"start": 38374,
"end": 53700
} | class ____:
def __init__(
self,
_public_key: SSHCertPublicKeyTypes | None = None,
_serial: int | None = None,
_type: SSHCertificateType | None = None,
_key_id: bytes | None = None,
_valid_principals: list[bytes] = [],
_valid_for_all_principals: bool = False,
_valid_before: int | None = None,
_valid_after: int | None = None,
_critical_options: list[tuple[bytes, bytes]] = [],
_extensions: list[tuple[bytes, bytes]] = [],
):
self._public_key = _public_key
self._serial = _serial
self._type = _type
self._key_id = _key_id
self._valid_principals = _valid_principals
self._valid_for_all_principals = _valid_for_all_principals
self._valid_before = _valid_before
self._valid_after = _valid_after
self._critical_options = _critical_options
self._extensions = _extensions
def public_key(
self, public_key: SSHCertPublicKeyTypes
) -> SSHCertificateBuilder:
if not isinstance(
public_key,
(
ec.EllipticCurvePublicKey,
rsa.RSAPublicKey,
ed25519.Ed25519PublicKey,
),
):
raise TypeError("Unsupported key type")
if self._public_key is not None:
raise ValueError("public_key already set")
return SSHCertificateBuilder(
_public_key=public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def serial(self, serial: int) -> SSHCertificateBuilder:
if not isinstance(serial, int):
raise TypeError("serial must be an integer")
if not 0 <= serial < 2**64:
raise ValueError("serial must be between 0 and 2**64")
if self._serial is not None:
raise ValueError("serial already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def type(self, type: SSHCertificateType) -> SSHCertificateBuilder:
if not isinstance(type, SSHCertificateType):
raise TypeError("type must be an SSHCertificateType")
if self._type is not None:
raise ValueError("type already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def key_id(self, key_id: bytes) -> SSHCertificateBuilder:
if not isinstance(key_id, bytes):
raise TypeError("key_id must be bytes")
if self._key_id is not None:
raise ValueError("key_id already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def valid_principals(
self, valid_principals: list[bytes]
) -> SSHCertificateBuilder:
if self._valid_for_all_principals:
raise ValueError(
"Principals can't be set because the cert is valid "
"for all principals"
)
if (
not all(isinstance(x, bytes) for x in valid_principals)
or not valid_principals
):
raise TypeError(
"principals must be a list of bytes and can't be empty"
)
if self._valid_principals:
raise ValueError("valid_principals already set")
if len(valid_principals) > _SSHKEY_CERT_MAX_PRINCIPALS:
raise ValueError(
"Reached or exceeded the maximum number of valid_principals"
)
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def valid_for_all_principals(self):
if self._valid_principals:
raise ValueError(
"valid_principals already set, can't set "
"valid_for_all_principals"
)
if self._valid_for_all_principals:
raise ValueError("valid_for_all_principals already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=True,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def valid_before(self, valid_before: int | float) -> SSHCertificateBuilder:
if not isinstance(valid_before, (int, float)):
raise TypeError("valid_before must be an int or float")
valid_before = int(valid_before)
if valid_before < 0 or valid_before >= 2**64:
raise ValueError("valid_before must [0, 2**64)")
if self._valid_before is not None:
raise ValueError("valid_before already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def valid_after(self, valid_after: int | float) -> SSHCertificateBuilder:
if not isinstance(valid_after, (int, float)):
raise TypeError("valid_after must be an int or float")
valid_after = int(valid_after)
if valid_after < 0 or valid_after >= 2**64:
raise ValueError("valid_after must [0, 2**64)")
if self._valid_after is not None:
raise ValueError("valid_after already set")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=valid_after,
_critical_options=self._critical_options,
_extensions=self._extensions,
)
def add_critical_option(
self, name: bytes, value: bytes
) -> SSHCertificateBuilder:
if not isinstance(name, bytes) or not isinstance(value, bytes):
raise TypeError("name and value must be bytes")
# This is O(n**2)
if name in [name for name, _ in self._critical_options]:
raise ValueError("Duplicate critical option name")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=[*self._critical_options, (name, value)],
_extensions=self._extensions,
)
def add_extension(
self, name: bytes, value: bytes
) -> SSHCertificateBuilder:
if not isinstance(name, bytes) or not isinstance(value, bytes):
raise TypeError("name and value must be bytes")
# This is O(n**2)
if name in [name for name, _ in self._extensions]:
raise ValueError("Duplicate extension name")
return SSHCertificateBuilder(
_public_key=self._public_key,
_serial=self._serial,
_type=self._type,
_key_id=self._key_id,
_valid_principals=self._valid_principals,
_valid_for_all_principals=self._valid_for_all_principals,
_valid_before=self._valid_before,
_valid_after=self._valid_after,
_critical_options=self._critical_options,
_extensions=[*self._extensions, (name, value)],
)
def sign(self, private_key: SSHCertPrivateKeyTypes) -> SSHCertificate:
if not isinstance(
private_key,
(
ec.EllipticCurvePrivateKey,
rsa.RSAPrivateKey,
ed25519.Ed25519PrivateKey,
),
):
raise TypeError("Unsupported private key type")
if self._public_key is None:
raise ValueError("public_key must be set")
# Not required
serial = 0 if self._serial is None else self._serial
if self._type is None:
raise ValueError("type must be set")
# Not required
key_id = b"" if self._key_id is None else self._key_id
# A zero length list is valid, but means the certificate
# is valid for any principal of the specified type. We require
# the user to explicitly set valid_for_all_principals to get
# that behavior.
if not self._valid_principals and not self._valid_for_all_principals:
raise ValueError(
"valid_principals must be set if valid_for_all_principals "
"is False"
)
if self._valid_before is None:
raise ValueError("valid_before must be set")
if self._valid_after is None:
raise ValueError("valid_after must be set")
if self._valid_after > self._valid_before:
raise ValueError("valid_after must be earlier than valid_before")
# lexically sort our byte strings
self._critical_options.sort(key=lambda x: x[0])
self._extensions.sort(key=lambda x: x[0])
key_type = _get_ssh_key_type(self._public_key)
cert_prefix = key_type + _CERT_SUFFIX
# Marshal the bytes to be signed
nonce = os.urandom(32)
kformat = _lookup_kformat(key_type)
f = _FragList()
f.put_sshstr(cert_prefix)
f.put_sshstr(nonce)
kformat.encode_public(self._public_key, f)
f.put_u64(serial)
f.put_u32(self._type.value)
f.put_sshstr(key_id)
fprincipals = _FragList()
for p in self._valid_principals:
fprincipals.put_sshstr(p)
f.put_sshstr(fprincipals.tobytes())
f.put_u64(self._valid_after)
f.put_u64(self._valid_before)
fcrit = _FragList()
for name, value in self._critical_options:
fcrit.put_sshstr(name)
if len(value) > 0:
foptval = _FragList()
foptval.put_sshstr(value)
fcrit.put_sshstr(foptval.tobytes())
else:
fcrit.put_sshstr(value)
f.put_sshstr(fcrit.tobytes())
fext = _FragList()
for name, value in self._extensions:
fext.put_sshstr(name)
if len(value) > 0:
fextval = _FragList()
fextval.put_sshstr(value)
fext.put_sshstr(fextval.tobytes())
else:
fext.put_sshstr(value)
f.put_sshstr(fext.tobytes())
f.put_sshstr(b"") # RESERVED FIELD
# encode CA public key
ca_type = _get_ssh_key_type(private_key)
caformat = _lookup_kformat(ca_type)
caf = _FragList()
caf.put_sshstr(ca_type)
caformat.encode_public(private_key.public_key(), caf)
f.put_sshstr(caf.tobytes())
# Sigs according to the rules defined for the CA's public key
# (RFC4253 section 6.6 for ssh-rsa, RFC5656 for ECDSA,
# and RFC8032 for Ed25519).
if isinstance(private_key, ed25519.Ed25519PrivateKey):
signature = private_key.sign(f.tobytes())
fsig = _FragList()
fsig.put_sshstr(ca_type)
fsig.put_sshstr(signature)
f.put_sshstr(fsig.tobytes())
elif isinstance(private_key, ec.EllipticCurvePrivateKey):
hash_alg = _get_ec_hash_alg(private_key.curve)
signature = private_key.sign(f.tobytes(), ec.ECDSA(hash_alg))
r, s = asym_utils.decode_dss_signature(signature)
fsig = _FragList()
fsig.put_sshstr(ca_type)
fsigblob = _FragList()
fsigblob.put_mpint(r)
fsigblob.put_mpint(s)
fsig.put_sshstr(fsigblob.tobytes())
f.put_sshstr(fsig.tobytes())
else:
assert isinstance(private_key, rsa.RSAPrivateKey)
# Just like Golang, we're going to use SHA512 for RSA
# https://cs.opensource.google/go/x/crypto/+/refs/tags/
# v0.4.0:ssh/certs.go;l=445
# RFC 8332 defines SHA256 and 512 as options
fsig = _FragList()
fsig.put_sshstr(_SSH_RSA_SHA512)
signature = private_key.sign(
f.tobytes(), padding.PKCS1v15(), hashes.SHA512()
)
fsig.put_sshstr(signature)
f.put_sshstr(fsig.tobytes())
cert_data = binascii.b2a_base64(f.tobytes()).strip()
# load_ssh_public_identity returns a union, but this is
# guaranteed to be an SSHCertificate, so we cast to make
# mypy happy.
return typing.cast(
SSHCertificate,
load_ssh_public_identity(b"".join([cert_prefix, b" ", cert_data])),
)
| SSHCertificateBuilder |
python | HypothesisWorks__hypothesis | hypothesis-python/src/hypothesis/stateful.py | {
"start": 23344,
"end": 25950
} | class ____(SearchStrategy[Ex]):
"""A collection of values for use in stateful testing.
Bundles are a kind of strategy where values can be added by rules,
and (like any strategy) used as inputs to future rules.
The ``name`` argument they are passed is the they are referred to
internally by the state machine; no two bundles may have
the same name. It is idiomatic to use the attribute
being assigned to as the name of the Bundle::
class MyStateMachine(RuleBasedStateMachine):
keys = Bundle("keys")
Bundles can contain the same value more than once; this becomes
relevant when using :func:`~hypothesis.stateful.consumes` to remove
values again.
If the ``consume`` argument is set to True, then all values that are
drawn from this bundle will be consumed (as above) when requested.
"""
def __init__(
self, name: str, *, consume: bool = False, draw_references: bool = True
) -> None:
super().__init__()
self.name = name
self.__reference_strategy = BundleReferenceStrategy(name, consume=consume)
self.draw_references = draw_references
def do_draw(self, data):
machine = data.draw(self_strategy)
reference = data.draw(self.__reference_strategy)
return machine.names_to_values[reference.name]
def __repr__(self):
consume = self.__reference_strategy.consume
if consume is False:
return f"Bundle(name={self.name!r})"
return f"Bundle(name={self.name!r}, {consume=})"
def calc_is_empty(self, recur):
# We assume that a bundle will grow over time
return False
def is_currently_empty(self, data):
# ``self_strategy`` is an instance of the ``st.runner()`` strategy.
# Hence drawing from it only returns the current state machine without
# modifying the underlying choice sequence.
machine = data.draw(self_strategy)
return not bool(machine.bundle(self.name))
def flatmap(self, expand):
if self.draw_references:
return type(self)(
self.name,
consume=self.__reference_strategy.consume,
draw_references=False,
).flatmap(expand)
return super().flatmap(expand)
def __hash__(self):
# Making this hashable means we hit the fast path of "everything is
# hashable" in st.sampled_from label calculation when sampling which rule
# to invoke next.
# Mix in "Bundle" for collision resistance
return hash(("Bundle", self.name))
| Bundle |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_numerictypes.py | {
"start": 4638,
"end": 5779
} | class ____(TestCase):
# gh-9799
numeric_types = [
np.byte,
np.short,
np.intc,
np.int_, # , np.longlong, NB: torch does not properly have longlong
np.ubyte,
np.half,
np.single,
np.double,
np.csingle,
np.cdouble,
]
def test_names_are_unique(self):
# none of the above may be aliases for each other
assert len(set(self.numeric_types)) == len(self.numeric_types)
# names must be unique
names = [t.__name__ for t in self.numeric_types]
assert len(set(names)) == len(names)
@parametrize("t", numeric_types)
def test_names_reflect_attributes(self, t):
"""Test that names correspond to where the type is under ``np.``"""
assert getattr(np, t.__name__) is t
@skipIfTorchDynamo() # XXX: weird, some names are not OK
@parametrize("t", numeric_types)
def test_names_are_undersood_by_dtype(self, t):
"""Test the dtype constructor maps names back to the type"""
assert np.dtype(t.__name__).type is t
if __name__ == "__main__":
run_tests()
| TestScalarTypeNames |
python | kamyu104__LeetCode-Solutions | Python/lexicographically-smallest-string-after-substring-operation.py | {
"start": 38,
"end": 505
} | class ____(object):
def smallestString(self, s):
"""
:type s: str
:rtype: str
"""
result = list(s)
i = next((i for i in xrange(len(s)) if s[i] != 'a'), len(s))
if i == len(s):
result[-1] = 'z'
else:
for i in xrange(i, len(s)):
if result[i] == 'a':
break
result[i] = chr(ord(result[i])-1)
return "".join(result)
| Solution |
python | getsentry__sentry | src/sentry/hybridcloud/models/outbox.py | {
"start": 19781,
"end": 20894
} | class ____(threading.local):
flushing_enabled: bool | None = None
_outbox_context = OutboxContext()
@contextlib.contextmanager
def outbox_context(
inner: Atomic | None = None, flush: bool | None = None
) -> Generator[Atomic | None]:
# If we don't specify our flush, use the outer specified override
if flush is None:
flush = _outbox_context.flushing_enabled
# But if there is no outer override, default to True
if flush is None:
flush = True
assert not flush or inner, "Must either set a transaction or flush=False"
original = _outbox_context.flushing_enabled
if inner:
assert inner.using is not None
with unguarded_write(using=inner.using), enforce_constraints(inner):
_outbox_context.flushing_enabled = flush
try:
yield inner
finally:
_outbox_context.flushing_enabled = original
else:
_outbox_context.flushing_enabled = flush
try:
yield None
finally:
_outbox_context.flushing_enabled = original
| OutboxContext |
python | tiangolo__fastapi | docs_src/security/tutorial002_py310.py | {
"start": 193,
"end": 711
} | class ____(BaseModel):
username: str
email: str | None = None
full_name: str | None = None
disabled: bool | None = None
def fake_decode_token(token):
return User(
username=token + "fakedecoded", email="john@example.com", full_name="John Doe"
)
async def get_current_user(token: str = Depends(oauth2_scheme)):
user = fake_decode_token(token)
return user
@app.get("/users/me")
async def read_users_me(current_user: User = Depends(get_current_user)):
return current_user
| User |
python | kamyu104__LeetCode-Solutions | Python/isomorphic-strings.py | {
"start": 86,
"end": 607
} | class ____(object):
def isIsomorphic(self, s, t):
"""
:type s: str
:type t: str
:rtype: bool
"""
if len(s) != len(t):
return False
s2t, t2s = {}, {}
for p, w in izip(s, t):
if w not in s2t and p not in t2s:
s2t[w] = p
t2s[p] = w
elif w not in s2t or s2t[w] != p:
# Contradict mapping.
return False
return True
# Time: O(n)
# Space: O(1)
| Solution |
python | python-excel__xlwt | xlwt/BIFFRecords.py | {
"start": 92846,
"end": 94406
} | class ____(BiffRecord):
"""
In BIFF8 the record stores a list with indexes to SUPBOOK
records (list of REF structures, 6.100). See 5.10.3 for
details about external references in BIFF8.
Record EXTERNSHEET, BIFF8:
Offset Size Contents
0 2 Number of following REF structures (nm)
2 6nm List of nm REF structures. Each REF contains the following data:
Offset Size Contents
0 2 Index to SUPBOOK record
2 2 Index to first SUPBOOK sheet
4 2 Index to last SUPBOOK sheet
"""
_REC_ID = 0x0017
def __init__(self, refs):
# do we always need this ref? or only if there are no refs?
# (I believe that if there are no refs then we should not generate the link table - Ruben)
#refs.insert(0, (0,0,0))
self.refs = refs
def get(self):
res = []
nrefs = len(self.refs)
for idx in range(0, nrefs, _maxRefPerRecord):
chunk = self.refs[idx:idx+_maxRefPerRecord]
krefs = len(chunk)
if idx: # CONTINUE record
header = pack("<HH", 0x003C, 6 * krefs)
else: # ExternSheetRecord
header = pack("<HHH", self._REC_ID, 6 * krefs + 2, nrefs)
res.append(header)
res.extend(pack("<HHH", *r) for r in chunk)
return b''.join(res)
| ExternSheetRecord |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config_vectorizers.py | {
"start": 14218,
"end": 14519
} | class ____(_VectorizerConfigCreate):
vectorizer: Union[Vectorizers, _EnumLikeStr] = Field(
default=Vectorizers.TEXT2VEC_NVIDIA, frozen=True, exclude=True
)
model: Optional[str]
baseURL: Optional[str]
truncate: Optional[bool]
vectorizeClassName: bool
| _Text2VecNvidiaConfig |
python | getsentry__sentry | src/sentry/relay/config/experimental.py | {
"start": 1261,
"end": 3006
} | class ____(Protocol):
def __call__(self, timeout: TimeChecker, *args, **kwargs) -> Any:
pass
#: Timeout for an experimental feature build.
_FEATURE_BUILD_TIMEOUT = timedelta(seconds=20)
def add_experimental_config(
config: MutableMapping[str, Any],
key: str,
function: ExperimentalConfigBuilder,
*args: Any,
**kwargs: Any,
) -> None:
"""Try to set `config[key] = function(*args, **kwargs)`.
If the result of the function call is None, the key is not set.
If the function call raises an exception, we log it to sentry and the key remains unset.
NOTE: Only use this function if you expect Relay to behave reasonably
if ``key`` is missing from the config.
"""
if subconfig := build_safe_config(key, function, *args, **kwargs):
config[key] = subconfig
R = TypeVar("R")
P = ParamSpec("P")
def build_safe_config(
key: str,
function: Callable[Concatenate[TimeChecker, P], R],
*args: P.args,
**kwargs: P.kwargs,
) -> R | None:
"""
Runs a config builder function with a timeout.
If the function call raises an exception, we log it to sentry and return None
"""
timeout = TimeChecker(_FEATURE_BUILD_TIMEOUT)
with sentry_sdk.start_span(op=f"project_config.build_safe_config.{key}"):
try:
return function(timeout, *args, **kwargs)
except TimeoutException as e:
logger.exception(
"Project config feature build timed out: %s",
key,
extra={"hard_timeout": e._timeout, "elapsed": e._elapsed},
)
except Exception:
logger.exception("Exception while building Relay project config field")
return None
| ExperimentalConfigBuilder |
python | Lightning-AI__lightning | src/lightning/pytorch/callbacks/model_summary.py | {
"start": 1309,
"end": 4052
} | class ____(Callback):
r"""Generates a summary of all layers in a :class:`~lightning.pytorch.core.LightningModule`.
Args:
max_depth: The maximum depth of layer nesting that the summary will include. A value of 0 turns the
layer summary off.
**summarize_kwargs: Additional arguments to pass to the `summarize` method.
Example::
>>> from lightning.pytorch import Trainer
>>> from lightning.pytorch.callbacks import ModelSummary
>>> trainer = Trainer(callbacks=[ModelSummary(max_depth=1)])
"""
def __init__(self, max_depth: int = 1, **summarize_kwargs: Any) -> None:
self._max_depth: int = max_depth
self._summarize_kwargs: dict[str, Any] = summarize_kwargs
@override
def on_fit_start(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> None:
if not self._max_depth:
return
model_summary = self._summary(trainer, pl_module)
summary_data = model_summary._get_summary_data()
total_parameters = model_summary.total_parameters
trainable_parameters = model_summary.trainable_parameters
model_size = model_summary.model_size
total_training_modes = model_summary.total_training_modes
# todo Add `total_flops` in DeepSpeedSummary.
total_flops = model_summary.total_flops if hasattr(model_summary, "total_flops") else 0
if trainer.is_global_zero:
self.summarize(
summary_data,
total_parameters,
trainable_parameters,
model_size,
total_training_modes,
total_flops=total_flops,
**self._summarize_kwargs,
)
def _summary(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule") -> Union[DeepSpeedSummary, Summary]:
from lightning.pytorch.strategies.deepspeed import DeepSpeedStrategy
if isinstance(trainer.strategy, DeepSpeedStrategy) and trainer.strategy.zero_stage_3:
return DeepSpeedSummary(pl_module, max_depth=self._max_depth)
return summarize(pl_module, max_depth=self._max_depth)
@staticmethod
def summarize(
summary_data: list[tuple[str, list[str]]],
total_parameters: int,
trainable_parameters: int,
model_size: float,
total_training_modes: dict[str, int],
total_flops: int,
**summarize_kwargs: Any,
) -> None:
summary_table = _format_summary_table(
total_parameters,
trainable_parameters,
model_size,
total_training_modes,
total_flops,
*summary_data,
)
log.info("\n" + summary_table)
| ModelSummary |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pylint/invalid_return_type_length.py | {
"start": 390,
"end": 561
} | class ____:
def __len__(self):
return -42 # [invalid-length-return]
# TODO: Once Ruff has better type checking
def return_int():
return "3"
| LengthNegative |
python | sqlalchemy__sqlalchemy | test/base/test_utils.py | {
"start": 19457,
"end": 25011
} | class ____(testing.AssertsCompiledSQL):
def _assert_collection_integrity(self, coll):
eq_(coll._colset, {c for k, c, _ in coll._collection})
d = {}
for k, col, _ in coll._collection:
d.setdefault(k, (k, col))
d.update(
{idx: (k, col) for idx, (k, col, _) in enumerate(coll._collection)}
)
eq_(coll._index, d)
if not coll._proxy_index:
coll._init_proxy_index()
all_metrics = {
metrics for mm in coll._proxy_index.values() for metrics in mm
}
eq_(
all_metrics,
{m for (_, _, m) in coll._collection},
)
for mm in all_metrics:
for eps_col in mm.get_expanded_proxy_set():
assert mm in coll._proxy_index[eps_col]
for mm_ in coll._proxy_index[eps_col]:
assert eps_col in mm_.get_expanded_proxy_set()
def test_keys(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
keys = cc.keys()
eq_(keys, ["c1", "foo", "c3"])
ne_(id(keys), id(cc.keys()))
ci = cc.as_readonly()
eq_(ci.keys(), ["c1", "foo", "c3"])
def test_values(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
val = cc.values()
eq_(val, [c1, c2, c3])
ne_(id(val), id(cc.values()))
ci = cc.as_readonly()
eq_(ci.values(), [c1, c2, c3])
def test_items(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
items = cc.items()
eq_(items, [("c1", c1), ("foo", c2), ("c3", c3)])
ne_(id(items), id(cc.items()))
ci = cc.as_readonly()
eq_(ci.items(), [("c1", c1), ("foo", c2), ("c3", c3)])
def test_getitem_tuple_str(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
sub_cc = cc["c3", "foo"]
is_(sub_cc.c3, c3)
eq_(list(sub_cc), [c3, c2])
def test_getitem_tuple_int(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
c2.key = "foo"
cc = self._column_collection(
columns=[("c1", c1), ("foo", c2), ("c3", c3)]
)
sub_cc = cc[2, 1]
is_(sub_cc.c3, c3)
eq_(list(sub_cc), [c3, c2])
def test_key_index_error(self):
cc = self._column_collection(
columns=[
("col1", sql.column("col1")),
("col2", sql.column("col2")),
]
)
assert_raises(KeyError, lambda: cc["foo"])
assert_raises(KeyError, lambda: cc[object()])
assert_raises(IndexError, lambda: cc[5])
def test_contains_column(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
cc = self._column_collection(columns=[("c1", c1), ("c2", c2)])
is_true(cc.contains_column(c1))
is_false(cc.contains_column(c3))
def test_contains_column_not_column(self):
c1, c2, c3 = sql.column("c1"), sql.column("c2"), sql.column("c3")
cc = self._column_collection(columns=[("c1", c1), ("c2", c2)])
is_false(cc.contains_column(c3 == 2))
with testing.expect_raises_message(
exc.ArgumentError,
"contains_column cannot be used with string arguments",
):
cc.contains_column("c1")
with testing.expect_raises_message(
exc.ArgumentError,
"contains_column cannot be used with string arguments",
):
cc.contains_column("foo")
def test_in(self):
col1 = sql.column("col1")
cc = self._column_collection(
columns=[
("col1", col1),
("col2", sql.column("col2")),
("col3", sql.column("col3")),
]
)
assert "col1" in cc
assert "col2" in cc
assert_raises_message(
exc.ArgumentError,
"__contains__ requires a string argument",
lambda: col1 in cc,
)
def test_compare(self):
c1 = sql.column("col1")
c2 = c1.label("col2")
c3 = sql.column("col3")
is_true(
self._column_collection(
[("col1", c1), ("col2", c2), ("col3", c3)]
).compare(
self._column_collection(
[("col1", c1), ("col2", c2), ("col3", c3)]
)
)
)
is_false(
self._column_collection(
[("col1", c1), ("col2", c2), ("col3", c3)]
).compare(self._column_collection([("col1", c1), ("col2", c2)]))
)
def test_str(self):
c1 = sql.column("col1")
c2 = c1.label("col2")
c3 = sql.column("col3")
cc = self._column_collection(
[("col1", c1), ("col2", c2), ("col3", c3)]
)
eq_(str(cc), "%s(%s, %s, %s)" % (type(cc).__name__, c1, c2, c3))
eq_(repr(cc), object.__repr__(cc))
| ColumnCollectionCommon |
python | django__django | django/contrib/gis/forms/fields.py | {
"start": 4472,
"end": 4535
} | class ____(GeometryField):
geom_type = "POLYGON"
| PolygonField |
python | apache__airflow | providers/celery/src/airflow/providers/celery/executors/celery_executor_utils.py | {
"start": 11692,
"end": 16257
} | class ____(LoggingMixin):
"""
Gets status for many Celery tasks using the best method available.
If BaseKeyValueStoreBackend is used as result backend, the mget method is used.
If DatabaseBackend is used as result backend, the SELECT ...WHERE task_id IN (...) query is used
Otherwise, multiprocessing.Pool will be used. Each task status will be downloaded individually.
"""
def __init__(self, sync_parallelism: int):
super().__init__()
self._sync_parallelism = sync_parallelism
def _tasks_list_to_task_ids(self, async_tasks: Collection[AsyncResult]) -> set[str]:
return {a.task_id for a in async_tasks}
def get_many(self, async_results: Collection[AsyncResult]) -> Mapping[str, EventBufferValueType]:
"""Get status for many Celery tasks using the best method available."""
if isinstance(app.backend, BaseKeyValueStoreBackend):
result = self._get_many_from_kv_backend(async_results)
elif isinstance(app.backend, DatabaseBackend):
result = self._get_many_from_db_backend(async_results)
else:
result = self._get_many_using_multiprocessing(async_results)
self.log.debug("Fetched %d state(s) for %d task(s)", len(result), len(async_results))
return result
def _get_many_from_kv_backend(
self, async_tasks: Collection[AsyncResult]
) -> Mapping[str, EventBufferValueType]:
task_ids = self._tasks_list_to_task_ids(async_tasks)
keys = [app.backend.get_key_for_task(k) for k in task_ids]
values = app.backend.mget(keys)
task_results = [app.backend.decode_result(v) for v in values if v]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
@retry
def _query_task_cls_from_db_backend(self, task_ids: set[str], **kwargs):
session = app.backend.ResultSession()
task_cls = getattr(app.backend, "task_cls", TaskDb)
with session_cleanup(session):
return session.scalars(select(task_cls).where(task_cls.task_id.in_(task_ids))).all()
def _get_many_from_db_backend(
self, async_tasks: Collection[AsyncResult]
) -> Mapping[str, EventBufferValueType]:
task_ids = self._tasks_list_to_task_ids(async_tasks)
tasks = self._query_task_cls_from_db_backend(task_ids)
task_results = [app.backend.meta_from_decoded(task.to_dict()) for task in tasks]
task_results_by_task_id = {task_result["task_id"]: task_result for task_result in task_results}
return self._prepare_state_and_info_by_task_dict(task_ids, task_results_by_task_id)
@staticmethod
def _prepare_state_and_info_by_task_dict(
task_ids: set[str], task_results_by_task_id: dict[str, dict[str, Any]]
) -> Mapping[str, EventBufferValueType]:
state_info: MutableMapping[str, EventBufferValueType] = {}
for task_id in task_ids:
task_result = task_results_by_task_id.get(task_id)
if task_result:
state = task_result["status"]
info = task_result.get("info")
else:
state = celery_states.PENDING
info = None
state_info[task_id] = state, info
return state_info
def _get_many_using_multiprocessing(
self, async_results: Collection[AsyncResult]
) -> Mapping[str, EventBufferValueType]:
num_process = min(len(async_results), self._sync_parallelism)
with ProcessPoolExecutor(max_workers=num_process) as sync_pool:
chunksize = max(1, math.ceil(len(async_results) / self._sync_parallelism))
task_id_to_states_and_info = list(
sync_pool.map(fetch_celery_task_state, async_results, chunksize=chunksize)
)
states_and_info_by_task_id: MutableMapping[str, EventBufferValueType] = {}
for task_id, state_or_exception, info in task_id_to_states_and_info:
if isinstance(state_or_exception, ExceptionWithTraceback):
self.log.error(
"%s:%s\n%s\n",
CELERY_FETCH_ERR_MSG_HEADER,
state_or_exception.exception,
state_or_exception.traceback,
)
else:
states_and_info_by_task_id[task_id] = state_or_exception, info
return states_and_info_by_task_id
| BulkStateFetcher |
python | pandas-dev__pandas | pandas/tests/indexes/base_class/test_constructors.py | {
"start": 147,
"end": 2397
} | class ____:
# Tests for the Index constructor, specifically for cases that do
# not return a subclass
@pytest.mark.parametrize("value", [1, np.int64(1)])
def test_constructor_corner(self, value):
# corner case
msg = (
r"Index\(\.\.\.\) must be called with a collection of some "
f"kind, {value} was passed"
)
with pytest.raises(TypeError, match=msg):
Index(value)
@pytest.mark.parametrize("index_vals", [[("A", 1), "B"], ["B", ("A", 1)]])
def test_construction_list_mixed_tuples(self, index_vals):
# see gh-10697: if we are constructing from a mixed list of tuples,
# make sure that we are independent of the sorting order.
index = Index(index_vals)
assert isinstance(index, Index)
assert not isinstance(index, MultiIndex)
def test_constructor_cast(self):
msg = "could not convert string to float"
with pytest.raises(ValueError, match=msg):
Index(["a", "b", "c"], dtype=float)
@pytest.mark.parametrize("tuple_list", [[()], [(), ()]])
def test_construct_empty_tuples(self, tuple_list):
# GH #45608
result = Index(tuple_list)
expected = MultiIndex.from_tuples(tuple_list)
tm.assert_index_equal(result, expected)
def test_index_string_inference(self):
# GH#54430
expected = Index(["a", "b"], dtype=pd.StringDtype(na_value=np.nan))
with pd.option_context("future.infer_string", True):
ser = Index(["a", "b"])
tm.assert_index_equal(ser, expected)
expected = Index(["a", 1], dtype="object")
with pd.option_context("future.infer_string", True):
ser = Index(["a", 1])
tm.assert_index_equal(ser, expected)
@pytest.mark.parametrize("klass", [Series, Index])
def test_inference_on_pandas_objects(self, klass):
# GH#56012
obj = klass([pd.Timestamp("2019-12-31")], dtype=object)
result = Index(obj)
assert result.dtype == np.object_
def test_constructor_not_read_only(self):
# GH#57130
ser = Series([1, 2], dtype=object)
idx = Index(ser)
assert idx._values.flags.writeable
| TestIndexConstructor |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/typedDictClosed3.py | {
"start": 2703,
"end": 2839
} | class ____(TypedDict, extra_items=str):
pass
# This should generate an error because added fields
# cannot be ReadOnly.
| ParentNonOpen7 |
python | mlflow__mlflow | mlflow/utils/search_logged_model_utils.py | {
"start": 860,
"end": 2076
} | class ____:
type: EntityType
key: str
IDENTIFIER_RE = re.compile(r"^([a-z]+)\.(.+)$")
def __repr__(self) -> str:
return f"{self.type.value}.{self.key}"
@classmethod
def from_str(cls, s: str) -> "Entity":
if m := Entity.IDENTIFIER_RE.match(s):
return cls(
type=EntityType.from_str(m.group(1)),
key=m.group(2).strip("`"),
)
return cls(type=EntityType.ATTRIBUTE, key=SqlLoggedModel.ALIASES.get(s, s).strip("`"))
def is_numeric(self) -> bool:
"""
Does this entity represent a numeric column?
"""
return self.type == EntityType.METRIC or (
self.type == EntityType.ATTRIBUTE and SqlLoggedModel.is_numeric(self.key)
)
def validate_op(self, op: str) -> None:
numeric_ops = ("<", "<=", ">", ">=", "=", "!=")
string_ops = ("=", "!=", "LIKE", "ILIKE", "IN", "NOT IN")
ops = numeric_ops if self.is_numeric() else string_ops
if op not in ops:
raise MlflowException.invalid_parameter_value(
f"Invalid comparison operator for {self}: {op!r}. Expected one of {string_ops}."
)
@dataclass
| Entity |
python | EpistasisLab__tpot | tpot/search_spaces/pipelines/wrapper.py | {
"start": 1758,
"end": 5024
} | class ____(SklearnIndividual):
def __init__(
self,
method: type,
space: ConfigurationSpace,
estimator_search_space: SearchSpace,
hyperparameter_parser: callable = None,
wrapped_param_name: str = None,
rng=None) -> None:
super().__init__()
self.method = method
self.space = space
self.estimator_search_space = estimator_search_space
self.hyperparameters_parser = hyperparameter_parser
self.wrapped_param_name = wrapped_param_name
rng = np.random.default_rng(rng)
self.node = self.estimator_search_space.generate(rng)
if isinstance(space, dict):
self.hyperparameters = space
else:
rng = np.random.default_rng(rng)
self.space.seed(rng.integers(0, 2**32))
self.hyperparameters = dict(self.space.sample_configuration())
def mutate(self, rng=None):
rng = np.random.default_rng(rng)
if rng.choice([True, False]):
return self._mutate_hyperparameters(rng)
else:
return self._mutate_node(rng)
def _mutate_hyperparameters(self, rng=None):
if isinstance(self.space, dict):
return False
rng = np.random.default_rng(rng)
self.space.seed(rng.integers(0, 2**32))
self.hyperparameters = dict(self.space.sample_configuration())
return True
def _mutate_node(self, rng=None):
return self.node.mutate(rng)
def crossover(self, other, rng=None):
rng = np.random.default_rng(rng)
if rng.choice([True, False]):
return self._crossover_hyperparameters(other, rng)
else:
self.node.crossover(other.estimator_search_space, rng)
def _crossover_hyperparameters(self, other, rng=None):
if isinstance(self.space, dict):
return False
rng = np.random.default_rng(rng)
if self.method != other.method:
return False
#loop through hyperparameters, randomly swap items in self.hyperparameters with items in other.hyperparameters
for hyperparameter in self.space:
if rng.choice([True, False]):
if hyperparameter in other.hyperparameters:
self.hyperparameters[hyperparameter] = other.hyperparameters[hyperparameter]
return True
def export_pipeline(self, **kwargs):
if self.hyperparameters_parser is not None:
final_params = self.hyperparameters_parser(self.hyperparameters)
else:
final_params = self.hyperparameters
est = self.node.export_pipeline(**kwargs)
wrapped_est = self.method(est, **final_params)
return wrapped_est
def unique_id(self):
#return a dictionary of the method and the hyperparameters
method_str = self.method.__name__
params = list(self.hyperparameters.keys())
params = sorted(params)
id_str = f"{method_str}({', '.join([f'{param}={self.hyperparameters[param]}' for param in params])})"
return TupleIndex(("WrapperPipeline", id_str, self.node.unique_id()))
| WrapperPipelineIndividual |
python | graphql-python__graphene | examples/complex_example.py | {
"start": 290,
"end": 478
} | class ____(graphene.ObjectType):
address = graphene.Field(Address, geo=GeoInput(required=True))
def resolve_address(root, info, geo):
return Address(latlng=geo.latlng)
| Query |
python | kamyu104__LeetCode-Solutions | Python/finding-3-digit-even-numbers.py | {
"start": 901,
"end": 1446
} | class ____(object):
def findEvenNumbers(self, digits):
"""
:type digits: List[int]
:rtype: List[int]
"""
result, cnt = [], collections.Counter(digits)
for i in xrange(1, 10):
for j in xrange(10):
for k in xrange(0, 10, 2):
if cnt[i] > 0 and cnt[j] > (j == i) and cnt[k] > (k == i) + (k == j):
result.append(i*100 + j*10 + k)
return result
# Time: O(1) ~ O(n), n is 10^3
# Space: O(1)
import collections
| Solution2 |
python | doocs__leetcode | solution/1500-1599/1545.Find Kth Bit in Nth Binary String/Solution.py | {
"start": 0,
"end": 375
} | class ____:
def findKthBit(self, n: int, k: int) -> str:
def dfs(n: int, k: int) -> int:
if k == 1:
return 0
if (k & (k - 1)) == 0:
return 1
m = 1 << n
if k * 2 < m - 1:
return dfs(n - 1, k)
return dfs(n - 1, m - k) ^ 1
return str(dfs(n, k))
| Solution |
python | pytorch__pytorch | test/onnx/test_op_consistency.py | {
"start": 10573,
"end": 13374
} | class ____(onnx_test_common._TestONNXRuntime):
"""Test output consistency between exported ONNX models and PyTorch eager mode.
This is a parameterized test suite.
"""
opset_version = -1
@common_device_type.ops(
[op for op in OPS_DB if op.name in TESTED_OPS],
allowed_dtypes=onnx_test_common.INT_TYPES
+ onnx_test_common.FLOAT_TYPES
+ onnx_test_common.BOOL_TYPES,
)
def test_output_match(self, device: str, dtype: torch.dtype, op):
"""Test the ONNX exporter."""
# device is provided by instantiate_device_type_tests, but we only want to run in cpu.
assert device == "cpu"
samples = op.sample_inputs(
device,
dtype,
requires_grad=False,
)
for i, cpu_sample in enumerate(samples):
inputs = (cpu_sample.input, *cpu_sample.args)
# Provide the repr to subtest because tensors are not serializable in parallel test runs
with self.subTest(
opset=self.opset_version,
sample_num=i,
inputs=repr(inputs),
kwargs=repr(cpu_sample.kwargs),
):
test_behavior, reason = _should_skip_xfail_test_sample(
op.name, cpu_sample
)
with onnx_test_common.normal_xfail_skip_test_behaviors(
test_behavior, reason
):
model = SingleOpModel(op, cpu_sample.kwargs)
model.eval()
if dtype == torch.float32:
# Relax atol and rtol for float32 based on empirical results
# The current most relaxed values are for aten::stft
rtol = 1e-5
atol = 2e-5
elif dtype == torch.float64:
# The current most relaxed values are for aten::stft
rtol = 1e-5
atol = 2e-5
else:
rtol = None
atol = None
# Run the test
self.run_test(model, inputs, rtol=rtol, atol=atol)
for opset in onnx_test_common.TESTED_OPSETS:
# The name needs to match the parameterized_class name.
test_class_name = f"TestOnnxModelOutputConsistency_opset{opset}"
onnx_test_common.add_decorate_info(
OPS_DB,
test_class_name,
"test_output_match",
opset=opset,
skip_or_xfails=EXPECTED_SKIPS_OR_FAILS,
)
common_device_type.instantiate_device_type_tests(
globals()[test_class_name], globals(), only_for="cpu"
)
if __name__ == "__main__":
common_utils.run_tests()
| TestOnnxModelOutputConsistency |
python | pytorch__pytorch | test/distributed/_composable/fsdp/test_fully_shard_training.py | {
"start": 8616,
"end": 10913
} | class ____(FSDPTestMultiThread):
@property
def world_size(self) -> int:
return 2
@skip_if_lt_x_gpu(1)
@wrapSwapTensorsTest(True)
def test_to_float64_after_init(self):
"""Tests that the user can cast the module to float64 after init."""
# NOTE: Test fp64 instead of a lower precision dtype like bf16 for
# better numerics. The important part is changing the dtype.
torch.manual_seed(42)
mlp_dim, device, dtype = 4, device_type, torch.float64
model = MLP(mlp_dim, device=device)
for param in model.parameters():
dist.broadcast(param, src=0)
ref_model = copy.deepcopy(model).to(dtype)
replicate(ref_model)
ref_optim = torch.optim.Adam(ref_model.parameters(), lr=1e-2)
for module in (model.in_proj, model.out_proj, model):
fully_shard(module)
model.to(dtype)
for param in model.parameters():
self.assertEqual(param.dtype, dtype)
self.assertEqual(param.to_local().dtype, dtype)
self.assertEqual(param._spec.tensor_meta.dtype, dtype)
optim = torch.optim.Adam(model.parameters(), lr=1e-2, foreach=True)
check_sharded_parity(self, ref_model, model)
torch.manual_seed(42 + self.rank + 1)
inp = torch.randn((2, mlp_dim), device=device_type.type, dtype=dtype)
for iter_idx in range(10):
losses: list[torch.Tensor] = []
for _model in (ref_model, model):
losses.append(_model(inp).sum())
losses[-1].backward()
self.assertEqual(losses[0], losses[1])
check_sharded_parity(self, ref_model, model)
for param in model.parameters():
self.assertEqual(param.dtype, dtype)
self.assertEqual(param.to_local().dtype, dtype)
self.assertEqual(param._spec.tensor_meta.dtype, dtype)
self.assertEqual(param.grad.dtype, dtype)
self.assertEqual(param.grad.to_local().dtype, dtype)
self.assertEqual(param.grad._spec.tensor_meta.dtype, dtype)
for _optim in (ref_optim, optim):
_optim.step()
_optim.zero_grad(set_to_none=(iter_idx % 2 == 0))
| TestFullyShardCastAfterInit |
python | numpy__numpy | numpy/testing/tests/test_utils.py | {
"start": 16307,
"end": 25777
} | class ____(_GenericTest):
def _assert_func(self, *args, **kwargs):
assert_array_almost_equal(*args, **kwargs)
def test_closeness(self):
# Note that in the course of time we ended up with
# `abs(x - y) < 1.5 * 10**(-decimal)`
# instead of the previously documented
# `abs(x - y) < 0.5 * 10**(-decimal)`
# so this check serves to preserve the wrongness.
# test scalars
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Max absolute difference among violations: 1.5\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(1.5, 0.0, decimal=0)
# test arrays
self._assert_func([1.499999], [0.0], decimal=0)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Mismatch at index:\n'
' [0]: 1.5 (ACTUAL), 0.0 (DESIRED)\n'
'Max absolute difference among violations: 1.5\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func([1.5], [0.0], decimal=0)
a = [1.4999999, 0.00003]
b = [1.49999991, 0]
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Mismatch at index:\n'
' [1]: 3e-05 (ACTUAL), 0.0 (DESIRED)\n'
'Max absolute difference among violations: 3.e-05\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=7)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Mismatch at index:\n'
' [1]: 0.0 (ACTUAL), 3e-05 (DESIRED)\n'
'Max absolute difference among violations: 3.e-05\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=7)
def test_simple(self):
x = np.array([1234.2222])
y = np.array([1234.2223])
self._assert_func(x, y, decimal=3)
self._assert_func(x, y, decimal=4)
expected_msg = ('Mismatched elements: 1 / 1 (100%)\n'
'Mismatch at index:\n'
' [0]: 1234.2222 (ACTUAL), 1234.2223 (DESIRED)\n'
'Max absolute difference among violations: '
'1.e-04\n'
'Max relative difference among violations: '
'8.10226812e-08')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(x, y, decimal=5)
def test_array_vs_scalar(self):
a = [5498.42354, 849.54345, 0.00]
b = 5498.42354
expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n'
'Mismatch at indices:\n'
' [1]: 849.54345 (ACTUAL), 5498.42354 (DESIRED)\n'
' [2]: 0.0 (ACTUAL), 5498.42354 (DESIRED)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: 1.')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=9)
expected_msg = ('Mismatched elements: 2 / 3 (66.7%)\n'
'Mismatch at indices:\n'
' [1]: 5498.42354 (ACTUAL), 849.54345 (DESIRED)\n'
' [2]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: 5.4722099')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=9)
a = [5498.42354, 0.00]
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Mismatch at index:\n'
' [1]: 5498.42354 (ACTUAL), 0.0 (DESIRED)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(b, a, decimal=7)
b = 0
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Mismatch at index:\n'
' [0]: 5498.42354 (ACTUAL), 0 (DESIRED)\n'
'Max absolute difference among violations: '
'5498.42354\n'
'Max relative difference among violations: inf')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b, decimal=7)
def test_nan(self):
anan = np.array([np.nan])
aone = np.array([1])
ainf = np.array([np.inf])
self._assert_func(anan, anan)
assert_raises(AssertionError,
lambda: self._assert_func(anan, aone))
assert_raises(AssertionError,
lambda: self._assert_func(anan, ainf))
assert_raises(AssertionError,
lambda: self._assert_func(ainf, anan))
def test_inf(self):
a = np.array([[1., 2.], [3., 4.]])
b = a.copy()
a[0, 0] = np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
b[0, 0] = -np.inf
assert_raises(AssertionError,
lambda: self._assert_func(a, b))
def test_complex_inf(self):
a = np.array([np.inf + 1.j, 2. + 1.j, 3. + 1.j])
b = a.copy()
self._assert_func(a, b)
b[1] = 3. + 1.j
expected_msg = ('Mismatched elements: 1 / 3 (33.3%)\n'
'Mismatch at index:\n'
' [1]: (2+1j) (ACTUAL), (3+1j) (DESIRED)\n'
'Max absolute difference among violations: 1.\n')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_subclass(self):
a = np.array([[1., 2.], [3., 4.]])
b = np.ma.masked_array([[1., 2.], [0., 4.]],
[[False, False], [True, False]])
self._assert_func(a, b)
self._assert_func(b, a)
self._assert_func(b, b)
# Test fully masked as well (see gh-11123).
a = np.ma.MaskedArray(3.5, mask=True)
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.masked
b = np.array([3., 4., 6.5])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array([1., 2., 3.])
self._test_equal(a, b)
self._test_equal(b, a)
a = np.ma.MaskedArray([3., 4., 6.5], mask=[True, True, True])
b = np.array(1.)
self._test_equal(a, b)
self._test_equal(b, a)
def test_subclass_2(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
return all(self)
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
z = np.array([True, True]).view(MyArray)
all(z)
b = np.array([1., 202]).view(MyArray)
expected_msg = ('Mismatched elements: 1 / 2 (50%)\n'
'Mismatch at index:\n'
' [1]: 2.0 (ACTUAL), 202.0 (DESIRED)\n'
'Max absolute difference among violations: 200.\n'
'Max relative difference among violations: 0.99009')
with pytest.raises(AssertionError, match=re.escape(expected_msg)):
self._assert_func(a, b)
def test_subclass_that_cannot_be_bool(self):
# While we cannot guarantee testing functions will always work for
# subclasses, the tests should ideally rely only on subclasses having
# comparison operators, not on them being able to store booleans
# (which, e.g., astropy Quantity cannot usefully do). See gh-8452.
class MyArray(np.ndarray):
def __eq__(self, other):
return super().__eq__(other).view(np.ndarray)
def __lt__(self, other):
return super().__lt__(other).view(np.ndarray)
def all(self, *args, **kwargs):
raise NotImplementedError
a = np.array([1., 2.]).view(MyArray)
self._assert_func(a, a)
| TestArrayAlmostEqual |
python | google__pytype | pytype/file_utils_test.py | {
"start": 3143,
"end": 3897
} | class ____(unittest.TestCase):
"""Tests for file_utils.expand_path(s?)."""
def test_expand_one_path(self):
full_path = path_utils.join(path_utils.getcwd(), "foo.py")
self.assertEqual(file_utils.expand_path("foo.py"), full_path)
def test_expand_two_paths(self):
full_path1 = path_utils.join(path_utils.getcwd(), "foo.py")
full_path2 = path_utils.join(path_utils.getcwd(), "bar.py")
self.assertEqual(
file_utils.expand_paths(["foo.py", "bar.py"]), [full_path1, full_path2]
)
def test_expand_with_cwd(self):
with test_utils.Tempdir() as d:
f = d.create_file("foo.py")
self.assertEqual(
file_utils.expand_path("foo.py", d.path),
file_utils.expand_path(f),
)
| TestPathExpansion |
python | hynek__structlog | tests/processors/test_renderers.py | {
"start": 10666,
"end": 14688
} | class ____:
def test_disallows_non_utc_unix_timestamps(self):
"""
A asking for a UNIX timestamp with a timezone that's not UTC raises a
ValueError.
"""
with pytest.raises(ValueError, match="UNIX timestamps are always UTC"):
TimeStamper(utc=False)
def test_inserts_utc_unix_timestamp_by_default(self):
"""
Per default a float UNIX timestamp is used.
"""
ts = TimeStamper()
d = ts(None, None, {})
assert isinstance(d["timestamp"], float)
@time_machine.travel("1980-03-25 16:00:00")
def test_local(self):
"""
Timestamp in local timezone work. Due to historic reasons, the default
format does not include a timezone.
"""
ts = TimeStamper(fmt="iso", utc=False)
d = ts(None, None, {})
assert "1980-03-25T16:00:00" == d["timestamp"]
@time_machine.travel("1980-03-25 16:00:00")
def test_formats(self):
"""
The fmt string is respected.
"""
ts = TimeStamper(fmt="%Y")
d = ts(None, None, {})
assert "1980" == d["timestamp"]
@time_machine.travel(
datetime.datetime(1980, 3, 25, 16, 0, 0, tzinfo=datetime.timezone.utc)
)
def test_inserts_formatted_utc(self):
"""
The fmt string in UTC timezone works.
"""
ts = TimeStamper(fmt="%Y-%m-%d %H:%M:%S %Z")
d = ts(None, None, {})
assert "1980-03-25 16:00:00 UTC" == d["timestamp"]
@time_machine.travel("1980-03-25 16:00:00")
def test_inserts_formatted_local(self):
"""
The fmt string in local timezone works.
"""
local_tz = datetime.datetime.now().astimezone().tzname()
ts = TimeStamper(fmt="%Y-%m-%d %H:%M:%S %Z", utc=False)
d = ts(None, None, {})
assert f"1980-03-25 16:00:00 {local_tz}" == d["timestamp"]
@time_machine.travel("1980-03-25 16:00:00")
def test_tz_aware(self):
"""
The timestamp that is used for formatting is timezone-aware.
"""
ts = TimeStamper(fmt="%z")
d = ts(None, None, {})
assert "" == datetime.datetime.now().strftime("%z") # noqa: DTZ005
assert "" != d["timestamp"]
@time_machine.travel(
datetime.datetime(1980, 3, 25, 16, 0, 0, tzinfo=datetime.timezone.utc)
)
def test_adds_Z_to_iso(self):
"""
stdlib's isoformat is buggy, so we fix it.
"""
ts = TimeStamper(fmt="iso", utc=True)
d = ts(None, None, {})
assert "1980-03-25T16:00:00Z" == d["timestamp"]
@time_machine.travel("1980-03-25 16:00:00")
def test_key_can_be_specified(self):
"""
Timestamp is stored with the specified key.
"""
ts = TimeStamper(fmt="%m", key="month")
d = ts(None, None, {})
assert "03" == d["month"]
@time_machine.travel("1980-03-25 16:00:00", tick=False)
@pytest.mark.parametrize("fmt", [None, "%Y"])
@pytest.mark.parametrize("utc", [True, False])
@pytest.mark.parametrize("key", [None, "other-key"])
@pytest.mark.parametrize("proto", range(pickle.HIGHEST_PROTOCOL + 1))
def test_pickle(self, fmt, utc, key, proto):
"""
TimeStamper is serializable.
"""
# UNIX timestamps must be UTC.
if fmt is None and not utc:
pytest.skip()
ts = TimeStamper()
assert ts(None, None, {}) == pickle.loads(pickle.dumps(ts, proto))(
None, None, {}
)
def test_apply_time_machine_after_instantiation(self):
"""
Freezing time after instantiation of TimeStamper works.
"""
ts = TimeStamper(fmt="iso", utc=False)
# Simulate a different local time by traveling to a different timestamp
# after the stamper was created.
with time_machine.travel("1980-03-25 17:00:00"):
d = ts(None, None, {})
assert "1980-03-25T17:00:00" == d["timestamp"]
| TestTimeStamper |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 1596904,
"end": 1597099
} | class ____(VegaLiteSchema):
"""WindowOnlyOp schema wrapper."""
_schema = {"$ref": "#/definitions/WindowOnlyOp"}
def __init__(self, *args):
super().__init__(*args)
| WindowOnlyOp |
python | great-expectations__great_expectations | great_expectations/datasource/fluent/data_asset/path/file_asset.py | {
"start": 1730,
"end": 2062
} | class ____(ValueError):
def __init__(self, unknown_groups: set[str]):
message = (
"Regex has the following group(s) which do not match "
f"batch parameters: {', '.join(unknown_groups)}"
)
super().__init__(message)
self.unknown_groups = unknown_groups
| RegexUnknownGroupsError |
python | huggingface__transformers | src/transformers/models/evolla/modeling_evolla.py | {
"start": 7954,
"end": 10803
} | class ____(nn.Module):
"""
Rotary position embeddings based on those in
[RoFormer](https://huggingface.co/docs/transformers/model_doc/roformer). Query and keys are transformed by rotation
matrices which depend on their relative positions.
"""
inv_freq: torch.Tensor # fix linting for `register_buffer`
def __init__(self, dim: int):
super().__init__()
# Generate and save the inverse frequency buffer (non trainable)
inv_freq = 1.0 / (10000 ** (torch.arange(0, dim, 2, dtype=torch.int64).float() / dim))
self.register_buffer("inv_freq", inv_freq)
self._seq_len_cached = None
self._cos_cached = None
self._sin_cached = None
def _update_cos_sin_tables(self, x, seq_dimension=2):
seq_len = x.shape[seq_dimension]
# Reset the tables if the sequence length has changed,
# or if we're on a new device (possibly due to tracing for instance)
if seq_len != self._seq_len_cached or self._cos_cached.device != x.device:
self._seq_len_cached = seq_len
t = torch.arange(x.shape[seq_dimension], device=x.device).type_as(self.inv_freq)
freqs = torch.outer(t, self.inv_freq)
emb = torch.cat((freqs, freqs), dim=-1).to(x.device)
self._cos_cached = emb.cos()[None, None, :, :]
self._sin_cached = emb.sin()[None, None, :, :]
return self._cos_cached, self._sin_cached
def forward(self, q: torch.Tensor, k: torch.Tensor) -> tuple[torch.Tensor, torch.Tensor]:
self._cos_cached, self._sin_cached = self._update_cos_sin_tables(k, seq_dimension=-2)
return (
apply_rotary_pos_emb_esm(q, self._cos_cached, self._sin_cached).to(dtype=q.dtype),
apply_rotary_pos_emb_esm(k, self._cos_cached, self._sin_cached).to(dtype=k.dtype),
)
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: Optional[float] = None,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
if scaling is None:
scaling = query.size(-1) ** -0.5
# Take the dot product between "query" and "key" to get the raw attention scores.
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
if attention_mask is not None:
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
attn_weights = attn_weights + attention_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
| EvollaSaProtRotaryEmbedding |
python | pytorch__pytorch | torch/_inductor/autoheuristic/autoheuristic_utils.py | {
"start": 186,
"end": 607
} | class ____:
"""
The context, that AutoHeuristic stores, is a list of features. AutoHeuristic needs to know whether a feature is
categorical (i.e., not a continuous variable) to learn a machine learning model.
"""
def __init__(self, name: str, value: Value, is_categorical: bool = False) -> None:
self.name = name
self.value = value
self.is_categorical = is_categorical
| AHFeature |
python | sqlalchemy__sqlalchemy | test/sql/test_defaults.py | {
"start": 4132,
"end": 11689
} | class ____(fixtures.TestBase):
def test_bad_arg_signature(self):
ex_msg = (
"ColumnDefault Python function takes zero "
"or one positional arguments"
)
def fn1(x, y):
pass
def fn2(x, y, z=3):
pass
class fn3:
def __init__(self, x, y):
pass
class FN4:
def __call__(self, x, y):
pass
fn4 = FN4()
for fn in fn1, fn2, fn3, fn4:
assert_raises_message(
sa.exc.ArgumentError, ex_msg, sa.ColumnDefault, fn
)
def test_arg_signature(self):
def fn1():
pass
def fn2():
pass
def fn3(x=1):
eq_(x, 1)
def fn4(x=1, y=2, z=3):
eq_(x, 1)
fn5 = list
class fn6a:
def __init__(self, x):
eq_(x, "context")
class fn6b:
def __init__(self, x, y=3):
eq_(x, "context")
class FN7:
def __call__(self, x):
eq_(x, "context")
fn7 = FN7()
class FN8:
def __call__(self, x, y=3):
eq_(x, "context")
fn8 = FN8()
for fn in fn1, fn2, fn3, fn4, fn5, fn6a, fn6b, fn7, fn8:
c = sa.ColumnDefault(fn)
c.arg("context")
def _check_default_slots(self, tbl, name, *wanted):
slots = [
"default",
"onupdate",
"server_default",
"server_onupdate",
]
col = tbl.c[name]
for slot in wanted:
slots.remove(slot)
assert getattr(col, slot) is not None, getattr(col, slot)
for slot in slots:
assert getattr(col, slot) is None, getattr(col, slot)
def test_py_vs_server_default_detection_one(self):
has_ = self._check_default_slots
metadata = MetaData()
tbl = Table(
"default_test",
metadata,
# python function
Column("col1", Integer, primary_key=True, default="1"),
# python literal
Column(
"col2",
String(20),
default="imthedefault",
onupdate="im the update",
),
# preexecute expression
Column(
"col3",
Integer,
default=func.length("abcdef"),
onupdate=func.length("abcdefghijk"),
),
# SQL-side default from sql expression
Column("col4", Integer, server_default="1"),
# SQL-side default from literal expression
Column("col5", Integer, server_default="1"),
# preexecute + update timestamp
Column(
"col6",
sa.Date,
default=datetime.datetime.today,
onupdate=datetime.datetime.today,
),
Column("boolcol1", sa.Boolean, default=True),
Column("boolcol2", sa.Boolean, default=False),
# python function which uses ExecutionContext
Column(
"col7",
Integer,
default=lambda: 5,
onupdate=lambda: 10,
),
# python builtin
Column(
"col8",
sa.Date,
default=datetime.date.today,
onupdate=datetime.date.today,
),
Column("col9", String(20), default="py", server_default="ddl"),
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "onupdate")
has_(tbl, "col3", "default", "onupdate")
has_(tbl, "col4", "server_default")
has_(tbl, "col5", "server_default")
has_(tbl, "col6", "default", "onupdate")
has_(tbl, "boolcol1", "default")
has_(tbl, "boolcol2", "default")
has_(tbl, "col7", "default", "onupdate")
has_(tbl, "col8", "default", "onupdate")
has_(tbl, "col9", "default", "server_default")
def test_py_vs_server_default_detection_two(self):
has_ = self._check_default_slots
metadata = MetaData()
ColumnDefault, DefaultClause = sa.ColumnDefault, sa.DefaultClause
tbl = Table(
"t2",
metadata,
Column("col1", Integer, Sequence("foo")),
Column(
"col2", Integer, default=Sequence("foo"), server_default="y"
),
Column("col3", Integer, Sequence("foo"), server_default="x"),
Column("col4", Integer, ColumnDefault("x"), DefaultClause("y")),
Column(
"col5",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
onupdate="z",
),
Column(
"col6",
Integer,
ColumnDefault("x"),
server_default="y",
onupdate="z",
),
Column(
"col7", Integer, default="x", server_default="y", onupdate="z"
),
Column(
"col8",
Integer,
server_onupdate="u",
default="x",
server_default="y",
onupdate="z",
),
)
tbl.append_column(
Column(
"col4",
Integer,
ColumnDefault("x"),
DefaultClause("y"),
DefaultClause("y", for_update=True),
),
replace_existing=True,
)
has_(tbl, "col1", "default")
has_(tbl, "col2", "default", "server_default")
has_(tbl, "col3", "default", "server_default")
has_(tbl, "col4", "default", "server_default", "server_onupdate")
has_(tbl, "col5", "default", "server_default", "onupdate")
has_(tbl, "col6", "default", "server_default", "onupdate")
has_(tbl, "col7", "default", "server_default", "onupdate")
has_(
tbl,
"col8",
"default",
"server_default",
"onupdate",
"server_onupdate",
)
def test_no_embed_in_sql(self):
"""Using a DefaultGenerator, Sequence, DefaultClause
in the columns, where clause of a select, or in the values
clause of insert, update, raises an informative error"""
t = Table("some_table", MetaData(), Column("id", Integer))
for const in (
sa.Sequence("y"),
sa.ColumnDefault("y"),
sa.DefaultClause("y"),
):
assert_raises_message(
sa.exc.ArgumentError,
r"SQL expression for WHERE/HAVING role expected, "
r"got (?:Sequence|(?:ScalarElement)ColumnDefault|"
r"DefaultClause)\('y'.*\)",
t.select().where,
const,
)
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression element expected, got %s"
% const.__class__.__name__,
t.insert().values,
col4=const,
)
assert_raises_message(
sa.exc.ArgumentError,
"SQL expression element expected, got %s"
% const.__class__.__name__,
t.update().values,
col4=const,
)
| DefaultObjectTest |
python | python__mypy | mypy/test/testdiff.py | {
"start": 576,
"end": 2510
} | class ____(DataSuite):
files = ["diff.test"]
def run_case(self, testcase: DataDrivenTestCase) -> None:
first_src = "\n".join(testcase.input)
files_dict = dict(testcase.files)
second_src = files_dict["tmp/next.py"]
options = parse_options(first_src, testcase, 1)
if options.python_version > sys.version_info:
pytest.skip("Test case requires a newer Python version")
messages1, files1 = self.build(first_src, options)
messages2, files2 = self.build(second_src, options)
a = []
if messages1:
a.extend(messages1)
if messages2:
a.append("== next ==")
a.extend(messages2)
assert (
files1 is not None and files2 is not None
), "cases where CompileError occurred should not be run"
prefix = "__main__"
snapshot1 = snapshot_symbol_table(prefix, files1["__main__"].names)
snapshot2 = snapshot_symbol_table(prefix, files2["__main__"].names)
diff = compare_symbol_table_snapshots(prefix, snapshot1, snapshot2)
for trigger in sorted(diff):
a.append(trigger)
assert_string_arrays_equal(
testcase.output, a, f"Invalid output ({testcase.file}, line {testcase.line})"
)
def build(self, source: str, options: Options) -> tuple[list[str], dict[str, MypyFile] | None]:
options.use_builtins_fixtures = True
options.show_traceback = True
options.cache_dir = os.devnull
options.allow_empty_bodies = True
try:
result = build.build(
sources=[BuildSource("main", None, source)],
options=options,
alt_lib_path=test_temp_dir,
)
except CompileError as e:
# TODO: Is it okay to return None?
return e.messages, None
return result.errors, result.files
| ASTDiffSuite |
python | altair-viz__altair | altair/vegalite/v6/schema/core.py | {
"start": 819348,
"end": 857902
} | class ____(VegaLiteSchema):
"""
OverlayMarkDef schema wrapper.
Parameters
----------
align : dict, :class:`Align`, :class:`ExprRef`, Literal['left', 'center', 'right']
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle : dict, float, :class:`ExprRef`
The rotation angle of the text, in degrees.
aria : bool, dict, :class:`ExprRef`
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole : str, dict, :class:`ExprRef`
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription : str, dict, :class:`ExprRef`
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect : bool, dict, :class:`ExprRef`
Whether to keep aspect ratio of image marks.
baseline : dict, :class:`ExprRef`, :class:`Baseline`, :class:`TextBaseline`, Literal['alphabetic', 'line-bottom', 'line-top', 'top', 'middle', 'bottom']
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
blend : dict, :class:`Blend`, :class:`ExprRef`, Literal[None, 'multiply', 'screen', 'overlay', 'darken', 'lighten', 'color-dodge', 'color-burn', 'hard-light', 'soft-light', 'difference', 'exclusion', 'hue', 'saturation', 'color', 'luminosity']
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
clip : bool, dict, :class:`ExprRef`
Whether a mark be clipped to the enclosing group's width and height.
color : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple']
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
cornerRadius : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusTopLeft : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight : dict, float, :class:`ExprRef`
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor : dict, :class:`Cursor`, :class:`ExprRef`, Literal['auto', 'default', 'none', 'context-menu', 'help', 'pointer', 'progress', 'wait', 'cell', 'crosshair', 'text', 'vertical-text', 'alias', 'copy', 'move', 'no-drop', 'not-allowed', 'e-resize', 'n-resize', 'ne-resize', 'nw-resize', 's-resize', 'se-resize', 'sw-resize', 'w-resize', 'ew-resize', 'ns-resize', 'nesw-resize', 'nwse-resize', 'col-resize', 'row-resize', 'all-scroll', 'zoom-in', 'zoom-out', 'grab', 'grabbing']
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description : str, dict, :class:`ExprRef`
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir : dict, :class:`ExprRef`, :class:`TextDirection`, Literal['ltr', 'rtl']
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
dx : dict, float, :class:`ExprRef`
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy : dict, float, :class:`ExprRef`
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis : str, dict, :class:`ExprRef`
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle : dict, float, :class:`ExprRef`
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity : dict, float, :class:`ExprRef`
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled : bool
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font : str, dict, :class:`ExprRef`
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize : dict, float, :class:`ExprRef`
The font size, in pixels.
**Default value:** ``11``
fontStyle : str, dict, :class:`ExprRef`, :class:`FontStyle`
The font style (e.g., ``"italic"``).
fontWeight : dict, :class:`ExprRef`, :class:`FontWeight`, Literal['normal', 'bold', 'lighter', 'bolder', 100, 200, 300, 400, 500, 600, 700, 800, 900]
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height : dict, float, :class:`ExprRef`
Height of the marks.
href : str, dict, :class:`URI`, :class:`ExprRef`
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius : dict, float, :class:`ExprRef`
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate : dict, :class:`ExprRef`, :class:`Interpolate`, Literal['basis', 'basis-open', 'basis-closed', 'bundle', 'cardinal', 'cardinal-open', 'cardinal-closed', 'catmull-rom', 'linear', 'linear-closed', 'monotone', 'natural', 'step', 'step-before', 'step-after']
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid : :class:`MarkInvalidDataMode`, Literal['filter', 'break-paths-filter-domains', 'break-paths-show-domains', 'break-paths-show-path-domains', 'show'], None
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit : dict, float, :class:`ExprRef`
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
lineBreak : str, dict, :class:`ExprRef`
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight : dict, float, :class:`ExprRef`
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
opacity : dict, float, :class:`ExprRef`
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order : bool, None
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient : :class:`Orientation`, Literal['horizontal', 'vertical']
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius : dict, float, :class:`ExprRef`
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle : dict, float, :class:`ExprRef`
The angular padding applied to sides of the arc, in radians.
radius : dict, float, :class:`ExprRef`
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2 : dict, float, :class:`ExprRef`
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
radius2Offset : dict, float, :class:`ExprRef`
Offset for radius2.
radiusOffset : dict, float, :class:`ExprRef`
Offset for radius.
shape : str, dict, :class:`ExprRef`, :class:`SymbolShape`
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size : dict, float, :class:`ExprRef`
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth : bool, dict, :class:`ExprRef`
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle : dict, float, :class:`ExprRef`
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke : str, dict, :class:`Color`, :class:`ExprRef`, :class:`Gradient`, :class:`HexColor`, :class:`ColorName`, :class:`LinearGradient`, :class:`RadialGradient`, Literal['black', 'silver', 'gray', 'white', 'maroon', 'red', 'purple', 'fuchsia', 'green', 'lime', 'olive', 'yellow', 'navy', 'blue', 'teal', 'aqua', 'orange', 'aliceblue', 'antiquewhite', 'aquamarine', 'azure', 'beige', 'bisque', 'blanchedalmond', 'blueviolet', 'brown', 'burlywood', 'cadetblue', 'chartreuse', 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', 'crimson', 'cyan', 'darkblue', 'darkcyan', 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', 'forestgreen', 'gainsboro', 'ghostwhite', 'gold', 'goldenrod', 'greenyellow', 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', 'lightgoldenrodyellow', 'lightgray', 'lightgreen', 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', 'lightskyblue', 'lightslategray', 'lightslategrey', 'lightsteelblue', 'lightyellow', 'limegreen', 'linen', 'magenta', 'mediumaquamarine', 'mediumblue', 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', 'midnightblue', 'mintcream', 'mistyrose', 'moccasin', 'navajowhite', 'oldlace', 'olivedrab', 'orangered', 'orchid', 'palegoldenrod', 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', 'rosybrown', 'royalblue', 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', 'seashell', 'sienna', 'skyblue', 'slateblue', 'slategray', 'slategrey', 'snow', 'springgreen', 'steelblue', 'tan', 'thistle', 'tomato', 'turquoise', 'violet', 'wheat', 'whitesmoke', 'yellowgreen', 'rebeccapurple'], None
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap : dict, :class:`ExprRef`, :class:`StrokeCap`, Literal['butt', 'round', 'square']
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash : dict, Sequence[float], :class:`ExprRef`
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset : dict, float, :class:`ExprRef`
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin : dict, :class:`ExprRef`, :class:`StrokeJoin`, Literal['miter', 'round', 'bevel']
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit : dict, float, :class:`ExprRef`
The miter limit at which to bevel a line join.
strokeOffset : dict, float, :class:`ExprRef`
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity : dict, float, :class:`ExprRef`
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth : dict, float, :class:`ExprRef`
The stroke width, in pixels.
style : str, Sequence[str]
A string or array of strings indicating the name of custom styles to apply to the
mark. A style is a named collection of mark property defaults defined within the
`style configuration
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__. If style is an
array, later styles will override earlier styles. Any `mark properties
<https://vega.github.io/vega-lite/docs/encoding.html#mark-prop>`__ explicitly
defined within the ``encoding`` will override a style default.
**Default value:** The mark's name. For example, a bar mark will have style
``"bar"`` by default. **Note:** Any specified style will augment the default style.
For example, a bar mark with ``"style": "foo"`` will receive from
``config.style.bar`` and ``config.style.foo`` (the specified style ``"foo"`` has
higher precedence).
tension : dict, float, :class:`ExprRef`
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text : str, dict, :class:`Text`, Sequence[str], :class:`ExprRef`
Placeholder text if the ``text`` channel is not specified
theta : dict, float, :class:`ExprRef`
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2 : dict, float, :class:`ExprRef`
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
theta2Offset : dict, float, :class:`ExprRef`
Offset for theta2.
thetaOffset : dict, float, :class:`ExprRef`
Offset for theta.
time : dict, float, :class:`ExprRef`
timeUnitBandPosition : float
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize : float
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip : str, bool, dict, float, :class:`ExprRef`, :class:`TooltipContent`, None
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url : str, dict, :class:`URI`, :class:`ExprRef`
The URL of the image file for image marks.
width : dict, float, :class:`ExprRef`
Width of the marks.
x : dict, float, :class:`ExprRef`, Literal['width']
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2 : dict, float, :class:`ExprRef`, Literal['width']
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2Offset : dict, float, :class:`ExprRef`
Offset for x2-position.
xOffset : dict, float, :class:`ExprRef`
Offset for x-position.
y : dict, float, :class:`ExprRef`, Literal['height']
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2 : dict, float, :class:`ExprRef`, Literal['height']
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2Offset : dict, float, :class:`ExprRef`
Offset for y2-position.
yOffset : dict, float, :class:`ExprRef`
Offset for y-position.
"""
_schema = {"$ref": "#/definitions/OverlayMarkDef"}
def __init__(
self,
align: Optional[Parameter | SchemaBase | Map | Align_T] = Undefined,
angle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
aria: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
ariaRole: Optional[str | Parameter | SchemaBase | Map] = Undefined,
ariaRoleDescription: Optional[str | Parameter | SchemaBase | Map] = Undefined,
aspect: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
baseline: Optional[Parameter | SchemaBase | Map | TextBaseline_T] = Undefined,
blend: Optional[Parameter | SchemaBase | Map | Blend_T] = Undefined,
clip: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
color: Optional[str | Parameter | SchemaBase | Map | ColorName_T] = Undefined,
cornerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusBottomLeft: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusBottomRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cornerRadiusTopLeft: Optional[float | Parameter | SchemaBase | Map] = Undefined,
cornerRadiusTopRight: Optional[
float | Parameter | SchemaBase | Map
] = Undefined,
cursor: Optional[Parameter | SchemaBase | Map | Cursor_T] = Undefined,
description: Optional[str | Parameter | SchemaBase | Map] = Undefined,
dir: Optional[Parameter | SchemaBase | Map | TextDirection_T] = Undefined,
dx: Optional[float | Parameter | SchemaBase | Map] = Undefined,
dy: Optional[float | Parameter | SchemaBase | Map] = Undefined,
ellipsis: Optional[str | Parameter | SchemaBase | Map] = Undefined,
endAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fill: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
fillOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
filled: Optional[bool] = Undefined,
font: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontSize: Optional[float | Parameter | SchemaBase | Map] = Undefined,
fontStyle: Optional[str | Parameter | SchemaBase | Map] = Undefined,
fontWeight: Optional[Parameter | SchemaBase | Map | FontWeight_T] = Undefined,
height: Optional[float | Parameter | SchemaBase | Map] = Undefined,
href: Optional[str | Parameter | SchemaBase | Map] = Undefined,
innerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
interpolate: Optional[Parameter | SchemaBase | Map | Interpolate_T] = Undefined,
invalid: Optional[SchemaBase | MarkInvalidDataMode_T | None] = Undefined,
limit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
lineBreak: Optional[str | Parameter | SchemaBase | Map] = Undefined,
lineHeight: Optional[float | Parameter | SchemaBase | Map] = Undefined,
opacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
order: Optional[bool | None] = Undefined,
orient: Optional[SchemaBase | Orientation_T] = Undefined,
outerRadius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
padAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radius2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
radiusOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
shape: Optional[str | Parameter | SchemaBase | Map] = Undefined,
size: Optional[float | Parameter | SchemaBase | Map] = Undefined,
smooth: Optional[bool | Parameter | SchemaBase | Map] = Undefined,
startAngle: Optional[float | Parameter | SchemaBase | Map] = Undefined,
stroke: Optional[
str | Parameter | SchemaBase | Map | ColorName_T | None
] = Undefined,
strokeCap: Optional[Parameter | SchemaBase | Map | StrokeCap_T] = Undefined,
strokeDash: Optional[
Parameter | SchemaBase | Sequence[float] | Map
] = Undefined,
strokeDashOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeJoin: Optional[Parameter | SchemaBase | Map | StrokeJoin_T] = Undefined,
strokeMiterLimit: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeOpacity: Optional[float | Parameter | SchemaBase | Map] = Undefined,
strokeWidth: Optional[float | Parameter | SchemaBase | Map] = Undefined,
style: Optional[str | Sequence[str]] = Undefined,
tension: Optional[float | Parameter | SchemaBase | Map] = Undefined,
text: Optional[str | Parameter | SchemaBase | Sequence[str] | Map] = Undefined,
theta: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2: Optional[float | Parameter | SchemaBase | Map] = Undefined,
theta2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
thetaOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
time: Optional[float | Parameter | SchemaBase | Map] = Undefined,
timeUnitBandPosition: Optional[float] = Undefined,
timeUnitBandSize: Optional[float] = Undefined,
tooltip: Optional[
str | bool | float | Parameter | SchemaBase | Map | None
] = Undefined,
url: Optional[str | Parameter | SchemaBase | Map] = Undefined,
width: Optional[float | Parameter | SchemaBase | Map] = Undefined,
x: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2: Optional[
float | Parameter | SchemaBase | Literal["width"] | Map
] = Undefined,
x2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
xOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
y: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2: Optional[
float | Parameter | SchemaBase | Literal["height"] | Map
] = Undefined,
y2Offset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
yOffset: Optional[float | Parameter | SchemaBase | Map] = Undefined,
**kwds,
):
super().__init__(
align=align,
angle=angle,
aria=aria,
ariaRole=ariaRole,
ariaRoleDescription=ariaRoleDescription,
aspect=aspect,
baseline=baseline,
blend=blend,
clip=clip,
color=color,
cornerRadius=cornerRadius,
cornerRadiusBottomLeft=cornerRadiusBottomLeft,
cornerRadiusBottomRight=cornerRadiusBottomRight,
cornerRadiusTopLeft=cornerRadiusTopLeft,
cornerRadiusTopRight=cornerRadiusTopRight,
cursor=cursor,
description=description,
dir=dir,
dx=dx,
dy=dy,
ellipsis=ellipsis,
endAngle=endAngle,
fill=fill,
fillOpacity=fillOpacity,
filled=filled,
font=font,
fontSize=fontSize,
fontStyle=fontStyle,
fontWeight=fontWeight,
height=height,
href=href,
innerRadius=innerRadius,
interpolate=interpolate,
invalid=invalid,
limit=limit,
lineBreak=lineBreak,
lineHeight=lineHeight,
opacity=opacity,
order=order,
orient=orient,
outerRadius=outerRadius,
padAngle=padAngle,
radius=radius,
radius2=radius2,
radius2Offset=radius2Offset,
radiusOffset=radiusOffset,
shape=shape,
size=size,
smooth=smooth,
startAngle=startAngle,
stroke=stroke,
strokeCap=strokeCap,
strokeDash=strokeDash,
strokeDashOffset=strokeDashOffset,
strokeJoin=strokeJoin,
strokeMiterLimit=strokeMiterLimit,
strokeOffset=strokeOffset,
strokeOpacity=strokeOpacity,
strokeWidth=strokeWidth,
style=style,
tension=tension,
text=text,
theta=theta,
theta2=theta2,
theta2Offset=theta2Offset,
thetaOffset=thetaOffset,
time=time,
timeUnitBandPosition=timeUnitBandPosition,
timeUnitBandSize=timeUnitBandSize,
tooltip=tooltip,
url=url,
width=width,
x=x,
x2=x2,
x2Offset=x2Offset,
xOffset=xOffset,
y=y,
y2=y2,
y2Offset=y2Offset,
yOffset=yOffset,
**kwds,
)
| OverlayMarkDef |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.