language
stringclasses 1
value | repo
stringclasses 346
values | path
stringlengths 6
201
| class_span
dict | source
stringlengths 21
2.38M
| target
stringlengths 1
96
|
|---|---|---|---|---|---|
python
|
sphinx-doc__sphinx
|
sphinx/ext/autodoc/_dynamic/_member_finder.py
|
{
"start": 1658,
"end": 31515
}
|
class ____:
"""A member of object.
This is used for the result of `_get_members_to_document()` to
represent each member of the object.
"""
__slots__ = '__name__', 'object', 'docstring', 'class_'
__name__: str
object: Any
docstring: Sequence[str] | None
class_: Any
skipped: bool
def __init__(
self,
name: str,
obj: INSTANCE_ATTR_T | SLOTS_ATTR_T | Any,
*,
docstring: Sequence[str] | None = None,
class_: Any = None,
) -> None:
self.__name__ = name
self.object = obj
self.docstring = docstring
self.class_ = class_
def __repr__(self) -> str:
return (
f'ObjectMember('
f'name={self.__name__!r}, '
f'obj={self.object!r}, '
f'docstring={self.docstring!r}, '
f'class_={self.class_!r}'
f')'
)
def _gather_members(
*,
want_all: bool,
indent: str,
analyzer_order: dict[str, int],
attr_docs: dict[tuple[str, str], list[str]],
config: _AutodocConfig,
current_document: _CurrentDocument,
events: EventManager,
get_attr: _AttrGetter,
options: _AutoDocumenterOptions,
parent_modname: str,
props: _ItemProperties,
ref_context: Mapping[str, str | None],
reread_always: MutableSet[str],
) -> list[tuple[_ItemProperties, bool, str]]:
"""Generate reST for member documentation.
If *want_all* is True, document all members, else those given by
*self.options.members*.
"""
if props.obj_type not in {'module', 'class', 'exception'}:
msg = 'must be implemented in subclasses'
raise NotImplementedError(msg)
assert isinstance(props, (_ModuleProperties, _ClassDefProperties))
indent += ' ' * (props.obj_type != 'module')
# set current namespace for finding members
current_document.autodoc_module = props.module_name
if props.parts:
current_document.autodoc_class = props.parts[0]
inherited_members = frozenset(options.inherited_members or ())
found_members = _get_members_to_document(
want_all=want_all,
get_attr=get_attr,
class_signature=config.autodoc_class_signature,
inherit_docstrings=config.autodoc_inherit_docstrings,
props=props,
opt_members=options.members or (),
inherited_members=inherited_members,
opt_private_members=options.private_members,
opt_special_members=options.special_members,
ignore_module_all=bool(options.ignore_module_all),
attr_docs=attr_docs,
)
filtered_members = _filter_members(
found_members,
want_all=want_all,
events=events,
get_attr=get_attr,
class_signature=config.autodoc_class_signature,
inherit_docstrings=config.autodoc_inherit_docstrings,
options=options,
props=props,
inherited_members=inherited_members,
exclude_members=options.exclude_members,
special_members=options.special_members,
private_members=options.private_members,
undoc_members=options.undoc_members,
attr_docs=attr_docs,
)
# document non-skipped members
member_documenters: list[tuple[_ItemProperties, bool, str]] = []
for member_name, member, is_attr in filtered_members:
# prefer the object type with the highest priority
obj_type = _best_object_type_for_member(
member=member,
member_name=member_name,
is_attr=is_attr,
parent_obj_type=props.obj_type,
parent_props=props,
)
if not obj_type:
# don't know how to document this member
continue
# give explicitly separated module name, so that members
# of inner classes can be documented
dotted_parts = '.'.join((*props.parts, member_name))
full_name = f'{props.module_name}::{dotted_parts}'
# We now try to import all objects before ordering them. This is to
# avoid possible circular imports if we were to import objects after
# their associated documenters have been sorted.
member_props = _load_object_by_name(
name=full_name,
objtype=obj_type,
current_document=current_document,
config=config,
events=events,
get_attr=get_attr,
options=options,
parent_modname=parent_modname,
ref_context=ref_context,
reread_always=reread_always,
)
if member_props is None:
continue
member_documenters.append((member_props, is_attr, indent))
member_order = options.member_order or config.autodoc_member_order
member_documenters = _sort_members(
member_documenters,
member_order,
ignore_module_all=bool(options.ignore_module_all),
analyzer_order=analyzer_order,
props=props,
)
# reset current objects
current_document.autodoc_module = ''
current_document.autodoc_class = ''
return member_documenters
def _get_members_to_document(
*,
want_all: bool,
get_attr: _AttrGetter,
class_signature: Literal['mixed', 'separated'],
inherit_docstrings: bool,
props: _ModuleProperties | _ClassDefProperties,
opt_members: ALL_T | Sequence[str],
inherited_members: Set[str],
opt_private_members: ALL_T | Sequence[str] | None,
opt_special_members: ALL_T | Sequence[str] | None,
ignore_module_all: bool,
attr_docs: dict[tuple[str, str], list[str]],
) -> list[ObjectMember]:
"""Find out which members are documentable
If *want_all* is True, return all members. Else, only return those
members given by *self.options.members* (which may also be None).
Filter the given member list.
Members are skipped if
- they are private (except if given explicitly or the private-members
option is set)
- they are special methods (except if given explicitly or the
special-members option is set)
- they are undocumented (except if the undoc-members option is set)
The user can override the skipping decision by connecting to the
``autodoc-skip-member`` event.
"""
wanted_members: ALL_T | Set[str]
if want_all:
if (
props.obj_type == 'module'
and not ignore_module_all
and props.all is not None
):
wanted_members = frozenset(props.all)
else:
wanted_members = ALL
else:
# specific members given
assert opt_members is not ALL
# Merge :private-members: and :special-members: into :members:
combined_members = set(opt_members)
if opt_private_members is not None and opt_private_members is not ALL:
combined_members.update(opt_private_members)
if opt_special_members is not None and opt_special_members is not ALL:
combined_members.update(opt_special_members)
if class_signature == 'separated' and props.obj_type in {'class', 'exception'}:
combined_members |= {'__new__', '__init__'} # show __init__() method
wanted_members = frozenset(combined_members)
object_members_map: dict[str, ObjectMember] = {}
if props.obj_type == 'module':
for name in dir(props._obj):
try:
value = safe_getattr(props._obj, name, None)
if ismock(value):
value = undecorate(value)
if name in wanted_members:
object_members_map[name] = ObjectMember(
name, value, docstring=attr_docs.get(('', name), [])
)
except AttributeError:
continue
# annotation only member (e.g. attr: int)
for name in inspect.getannotations(props._obj):
if name not in object_members_map and name in wanted_members:
object_members_map[name] = ObjectMember(
name, INSTANCE_ATTR, docstring=attr_docs.get(('', name), [])
)
obj_members_seq = list(object_members_map.values())
elif props.obj_type in {'class', 'exception'}:
# the members directly defined in the class
obj_dict = get_attr(props._obj, '__dict__', {})
# enum members
if isenumclass(props._obj):
for name, defining_class, value in _filter_enum_dict(
props._obj, get_attr, obj_dict
):
# the order of occurrence of *name* matches obj's MRO,
# allowing inherited attributes to be shadowed correctly
if unmangled := unmangle(defining_class, name):
if unmangled in wanted_members:
object_members_map[unmangled] = ObjectMember(
unmangled, value, class_=defining_class
)
# members in __slots__
try:
subject___slots__ = getslots(props._obj)
if subject___slots__:
for name, subject_docstring in subject___slots__.items():
if name not in wanted_members:
continue
if isinstance(subject_docstring, str):
subject_doclines = subject_docstring.splitlines()
else:
subject_doclines = None
object_members_map[name] = ObjectMember(
name,
SLOTS_ATTR,
class_=props._obj,
docstring=subject_doclines,
)
except (TypeError, ValueError):
pass
# other members
for name in dir(props._obj):
try:
value = get_attr(props._obj, name)
if ismock(value):
value = undecorate(value)
unmangled = unmangle(props._obj, name)
if (
unmangled
and unmangled not in object_members_map
and unmangled in wanted_members
):
if name in obj_dict:
object_members_map[unmangled] = ObjectMember(
unmangled, value, class_=props._obj
)
else:
object_members_map[unmangled] = ObjectMember(unmangled, value)
except AttributeError:
continue
try:
for cls in getmro(props._obj):
try:
modname = safe_getattr(cls, '__module__')
qualname = safe_getattr(cls, '__qualname__')
except AttributeError:
qualname = None
analyzer = None
else:
try:
analyzer = ModuleAnalyzer.for_module(modname)
analyzer.analyze()
except PycodeError:
analyzer = None
# annotation only member (ex. attr: int)
for name in getannotations(cls):
unmangled = unmangle(cls, name)
if (
unmangled
and unmangled not in object_members_map
and unmangled in wanted_members
):
if analyzer and (qualname, unmangled) in analyzer.attr_docs:
attr_docstring = analyzer.attr_docs[qualname, unmangled]
else:
attr_docstring = None
object_members_map[unmangled] = ObjectMember(
unmangled,
INSTANCE_ATTR,
class_=cls,
docstring=attr_docstring,
)
# append or complete instance attributes (cf. self.attr1) if analyzer knows
if analyzer:
for (ns, name), attr_docstring in analyzer.attr_docs.items():
if ns == qualname and name not in object_members_map:
# otherwise unknown instance attribute
if name in wanted_members:
object_members_map[name] = ObjectMember(
name,
INSTANCE_ATTR,
class_=cls,
docstring=attr_docstring,
)
elif (
ns == qualname
and attr_docstring
and not object_members_map[name].docstring
):
if cls != props._obj and not inherit_docstrings:
# If we are in the MRO of the class and not the class itself,
# and we do not want to inherit docstrings, then skip setting
# the docstring below
continue
# attribute is already known, because dir(props._obj)
# enumerates it. But it has no docstring yet
object_members_map[name].docstring = attr_docstring
except AttributeError:
pass
if want_all and not inherited_members:
obj_members_seq = [
m for m in object_members_map.values() if m.class_ == props._obj
]
else:
obj_members_seq = list(object_members_map.values())
else:
raise ValueError
if not want_all and opt_members is not ALL:
for name in opt_members:
if name in object_members_map:
continue
msg = __(
'attribute %s is listed in :members: but is missing '
'as it was not found in object %r'
)
logger.warning(msg, name, props._obj, type='autodoc')
return obj_members_seq
def _filter_members(
obj_members_seq: Iterable[ObjectMember],
*,
want_all: bool,
events: EventManager,
get_attr: _AttrGetter,
options: _AutoDocumenterOptions,
props: _ModuleProperties | _ClassDefProperties,
class_signature: Literal['mixed', 'separated'],
inherit_docstrings: bool,
inherited_members: Set[str],
exclude_members: EMPTY_T | Set[str] | None,
special_members: ALL_T | Sequence[str] | None,
private_members: ALL_T | Sequence[str] | None,
undoc_members: Literal[True] | None,
attr_docs: dict[tuple[str, str], list[str]],
) -> Iterator[tuple[str, Any, bool]]:
# search for members in source code too
namespace = props.dotted_parts # will be empty for modules
# process members and determine which to skip
for obj in obj_members_seq:
member_name = obj.__name__
member_obj = obj.object
has_attr_doc = (namespace, member_name) in attr_docs
try:
keep = _should_keep_member(
member_name=member_name,
member_obj=member_obj,
member_docstring=obj.docstring,
member_cls=obj.class_,
get_attr=get_attr,
has_attr_doc=has_attr_doc,
class_signature=class_signature,
inherit_docstrings=inherit_docstrings,
inherited_members=inherited_members,
parent=props._obj,
want_all=want_all,
exclude_members=exclude_members,
special_members=special_members,
private_members=private_members,
undoc_members=undoc_members,
)
except Exception as exc:
logger.warning(
__(
'autodoc: failed to determine %s.%s (%r) to be documented, '
'the following exception was raised:\n%s'
),
props.full_name,
member_name,
member_obj,
exc,
type='autodoc',
)
keep = False
# give the user a chance to decide whether this member
# should be skipped
if events is not None:
# let extensions preprocess docstrings
skip_user = events.emit_firstresult(
'autodoc-skip-member',
props.obj_type,
member_name,
member_obj,
not keep,
options,
)
if skip_user is not None:
keep = not skip_user
if keep:
# if is_attr is True, the member is documented as an attribute
is_attr = member_obj is INSTANCE_ATTR or has_attr_doc
yield member_name, member_obj, is_attr
def _best_object_type_for_member(
member: Any,
member_name: str,
is_attr: bool,
*,
parent_obj_type: str,
parent_props: _ItemProperties | None,
) -> _AutodocObjType | None:
"""Return the best object type that supports documenting *member*."""
filtered = []
# Don't document submodules automatically: 'module' is never returned.
try:
if isinstance(member, type) and issubclass(member, BaseException):
# priority must be higher than 'class'
filtered.append((20, 'exception'))
except TypeError as exc:
# It's possible for a member to be considered a type, but fail
# issubclass checks due to not being a class. For example:
# https://github.com/sphinx-doc/sphinx/issues/11654#issuecomment-1696790436
msg = f'Failed to discern if member {member} is a BaseException subclass.'
raise ValueError(msg) from exc
if isinstance(member, type) or (is_attr and isinstance(member, (NewType, TypeVar))):
# priority must be higher than 'function', 'class', and 'attribute'
# as NewType can be an attribute and is a class after Python 3.10.
filtered.append((15, 'class'))
if parent_obj_type in {'class', 'exception'}:
if inspect.isproperty(member):
# priority must be higher than 'attribute'
filtered.append((11, 'property'))
# See _get_documenter() in autosummary, parent_props might be None.
elif parent_props is not None:
# Support for class properties. Note: these only work on Python 3.9.
__dict__ = safe_getattr(parent_props._obj, '__dict__', {})
obj = __dict__.get(member_name)
if isinstance(obj, classmethod) and inspect.isproperty(obj.__func__):
# priority must be higher than 'attribute'
filtered.append((11, 'property'))
if parent_obj_type != 'module':
if inspect.isattributedescriptor(member) or not (
inspect.isroutine(member) or isinstance(member, type)
):
# priority must be higher than 'method', else it will recognise
# some non-data descriptors as methods
filtered.append((10, 'attribute'))
if inspect.isroutine(member) and parent_obj_type != 'module':
# priority must be higher than 'function'
filtered.append((1, 'method'))
if (
inspect.isfunction(member)
or inspect.isbuiltin(member)
or (inspect.isroutine(member) and parent_obj_type == 'module')
):
# supports functions, builtins and bound methods exported
# at the module level
filtered.extend(((0, 'function'), (-1, 'decorator')))
if isinstance(member, AnyTypeAliasType):
filtered.append((0, 'type'))
if parent_obj_type == 'module' and is_attr:
filtered.append((-10, 'data'))
if filtered:
# return the highest priority object type
return max(filtered, key=operator.itemgetter(0))[1] # type: ignore[return-value]
return None
def _sort_members(
documenters: list[tuple[_ItemProperties, bool, str]],
order: Literal['alphabetical', 'bysource', 'groupwise'],
*,
ignore_module_all: bool,
analyzer_order: dict[str, int],
props: _ItemProperties,
) -> list[tuple[_ItemProperties, bool, str]]:
"""Sort the given member list."""
if order == 'groupwise':
# sort by group; alphabetically within groups
def group_order(entry: tuple[_ItemProperties, bool, str]) -> tuple[int, str]:
return entry[0]._groupwise_order_key, entry[0].full_name
documenters.sort(key=group_order)
elif order == 'bysource':
if (
isinstance(props, _ModuleProperties)
and not ignore_module_all
and (module_all := props.all)
):
# Sort by __all__
module_all_idx = {name: idx for idx, name in enumerate(module_all)}
module_all_len = len(module_all)
def source_order(entry: tuple[_ItemProperties, bool, str]) -> int:
fullname = entry[0].dotted_parts
return module_all_idx.get(fullname, module_all_len)
documenters.sort(key=source_order)
# By default, member discovery order matches source order,
# as dicts are insertion-ordered from Python 3.7.
elif analyzer_order:
# sort by source order, by virtue of the module analyzer
order_len = len(analyzer_order)
def source_order(entry: tuple[_ItemProperties, bool, str]) -> int:
fullname = entry[0].dotted_parts
return analyzer_order.get(fullname, order_len)
documenters.sort(key=source_order)
else: # alphabetical
documenters.sort(key=lambda entry: entry[0].full_name)
return documenters
def unmangle(subject: Any, name: str) -> str | None:
"""Unmangle the given name."""
try:
if isclass(subject) and not name.endswith('__'):
prefix = f'_{subject.__name__}__'
if name.startswith(prefix):
return name.replace(prefix, '__', 1)
else:
for cls in subject.__mro__:
prefix = f'_{cls.__name__}__'
if name.startswith(prefix):
# mangled attribute defined in parent class
return None
except AttributeError:
pass
return name
def _filter_enum_dict(
enum_class: type[Enum],
attrgetter: _AttrGetter,
enum_class_dict: Mapping[str, object],
) -> Iterator[tuple[str, type, Any]]:
"""Find the attributes to document of an enumeration class.
The output consists of triplets ``(attribute name, defining class, value)``
where the attribute name can appear more than once during the iteration
but with different defining class. The order of occurrence is guided by
the MRO of *enum_class*.
"""
# attributes that were found on a mixin type or the data type
candidate_in_mro: set[str] = set()
# sunder names that were picked up (and thereby allowed to be redefined)
# see: https://docs.python.org/3/howto/enum.html#supported-dunder-names
sunder_names = {
'_name_',
'_value_',
'_missing_',
'_order_',
'_generate_next_value_',
}
# attributes that can be picked up on a mixin type or the enum's data type
public_names = {'name', 'value', *object.__dict__, *sunder_names}
# names that are ignored by default
ignore_names = Enum.__dict__.keys() - public_names
def should_ignore(name: str, value: Any) -> bool:
if name in sunder_names:
return _is_native_enum_api(value, name)
return name in ignore_names
sentinel = object()
def query(name: str, defining_class: type) -> tuple[str, type, Any] | None:
value = attrgetter(enum_class, name, sentinel)
if value is not sentinel:
return name, defining_class, value
return None
# attributes defined on a parent type, possibly shadowed later by
# the attributes defined directly inside the enumeration class
for parent in enum_class.__mro__:
if parent in {enum_class, Enum, object}:
continue
parent_dict = attrgetter(parent, '__dict__', {})
for name, value in parent_dict.items():
if should_ignore(name, value):
continue
candidate_in_mro.add(name)
if (item := query(name, parent)) is not None:
yield item
# exclude members coming from the native Enum unless
# they were redefined on a mixin type or the data type
excluded_members = Enum.__dict__.keys() - candidate_in_mro
yield from filter(
None,
(
query(name, enum_class)
for name in enum_class_dict
if name not in excluded_members
),
)
# check if allowed members from ``Enum`` were redefined at the enum level
special_names = sunder_names | public_names
special_names &= enum_class_dict.keys()
special_names &= Enum.__dict__.keys()
for name in special_names:
if (
not _is_native_enum_api(enum_class_dict[name], name)
and (item := query(name, enum_class)) is not None
):
yield item
def _is_native_enum_api(obj: object, name: str) -> bool:
"""Check whether *obj* is the same as ``Enum.__dict__[name]``."""
return unwrap_all(obj) is unwrap_all(Enum.__dict__[name])
def _should_keep_member(
*,
member_name: str,
member_obj: Any,
member_docstring: Sequence[str] | None,
member_cls: Any,
get_attr: _AttrGetter,
has_attr_doc: bool,
class_signature: Literal['mixed', 'separated'],
inherit_docstrings: bool,
inherited_members: Set[str],
parent: Any,
want_all: bool,
exclude_members: EMPTY_T | Set[str] | None,
special_members: ALL_T | Sequence[str] | None,
private_members: ALL_T | Sequence[str] | None,
undoc_members: Literal[True] | None,
) -> bool:
if member_docstring:
# hack for ClassDocumenter to inject docstring
doclines: Sequence[str] | None = member_docstring
else:
doc = getdoc(
member_obj,
get_attr,
inherit_docstrings,
parent,
member_name,
)
# Ignore non-string __doc__
doclines = doc.splitlines() if isinstance(doc, str) else None
# if the member __doc__ is the same as self's __doc__, it's just
# inherited and therefore not the member's doc
cls = get_attr(member_obj, '__class__', None)
if cls:
cls_doc = get_attr(cls, '__doc__', None)
if cls_doc == doc:
doclines = None
if doclines is not None:
doc, metadata = separate_metadata('\n'.join(doclines))
else:
doc = ''
metadata = {}
has_doc = bool(doc or undoc_members)
if 'private' in metadata:
# consider a member private if docstring has "private" metadata
is_private = True
elif 'public' in metadata:
# consider a member public if docstring has "public" metadata
is_private = False
else:
is_private = member_name.startswith('_')
if ismock(member_obj) and not has_attr_doc:
# mocked module or object
return False
if exclude_members and member_name in exclude_members:
# remove members given by exclude-members
return False
if not want_all:
# keep documented attributes
return has_doc or has_attr_doc
is_filtered_inherited_member = _is_filtered_inherited_member(
member_name,
member_cls=member_cls,
parent=parent,
inherited_members=inherited_members,
get_attr=get_attr,
)
if special_member_re.match(member_name):
# special __methods__
if member_name == '__doc__' or is_filtered_inherited_member:
return False
if special_members and member_name in special_members:
return has_doc
if (
class_signature == 'separated'
and member_name in {'__new__', '__init__'}
and inspect.isclass(parent)
):
return has_doc # show __init__() method
return False
if is_private:
if has_attr_doc or has_doc:
if private_members is None: # NoQA: SIM114
return False
elif has_doc and is_filtered_inherited_member:
return False
return member_name in private_members
return False
if has_attr_doc:
# keep documented attributes
return True
if is_filtered_inherited_member:
return False
# ignore undocumented members if :undoc-members: is not given
return has_doc
def _is_filtered_inherited_member(
member_name: str,
*,
member_cls: Any,
parent: Any,
inherited_members: Set[str],
get_attr: _AttrGetter,
) -> bool:
if not inspect.isclass(parent):
return False
seen = set()
for cls in parent.__mro__:
if member_name in cls.__dict__:
seen.add(cls)
if (
cls.__name__ in inherited_members
and cls != parent
and any(issubclass(potential_child, cls) for potential_child in seen)
):
# given member is a member of specified *super class*
return True
if member_cls is cls:
return False
if member_name in cls.__dict__:
return False
if member_name in get_attr(cls, '__annotations__', {}):
return False
return False
|
ObjectMember
|
python
|
doocs__leetcode
|
solution/0000-0099/0052.N-Queens II/Solution.py
|
{
"start": 0,
"end": 587
}
|
class ____:
def totalNQueens(self, n: int) -> int:
def dfs(i: int):
if i == n:
nonlocal ans
ans += 1
return
for j in range(n):
a, b = i + j, i - j + n
if cols[j] or dg[a] or udg[b]:
continue
cols[j] = dg[a] = udg[b] = True
dfs(i + 1)
cols[j] = dg[a] = udg[b] = False
cols = [False] * 10
dg = [False] * 20
udg = [False] * 20
ans = 0
dfs(0)
return ans
|
Solution
|
python
|
doocs__leetcode
|
solution/0500-0599/0562.Longest Line of Consecutive One in Matrix/Solution.py
|
{
"start": 0,
"end": 717
}
|
class ____:
def longestLine(self, mat: List[List[int]]) -> int:
m, n = len(mat), len(mat[0])
a = [[0] * (n + 2) for _ in range(m + 2)]
b = [[0] * (n + 2) for _ in range(m + 2)]
c = [[0] * (n + 2) for _ in range(m + 2)]
d = [[0] * (n + 2) for _ in range(m + 2)]
ans = 0
for i in range(1, m + 1):
for j in range(1, n + 1):
if mat[i - 1][j - 1]:
a[i][j] = a[i - 1][j] + 1
b[i][j] = b[i][j - 1] + 1
c[i][j] = c[i - 1][j - 1] + 1
d[i][j] = d[i - 1][j + 1] + 1
ans = max(ans, a[i][j], b[i][j], c[i][j], d[i][j])
return ans
|
Solution
|
python
|
great-expectations__great_expectations
|
great_expectations/data_context/types/base.py
|
{
"start": 59120,
"end": 60860
}
|
class ____(BaseStoreBackendDefaults):
"""Default store configs for in memory backends.
This is useful for testing without persistence.
"""
def __init__(
self,
init_temp_docs_sites: bool = False,
) -> None:
# Initialize base defaults
super().__init__()
self.stores = {
self.expectations_store_name: {
"class_name": "ExpectationsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.validation_results_store_name: {
"class_name": "ValidationResultsStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.checkpoint_store_name: {
"class_name": "CheckpointStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
self.validation_definition_store_name: {
"class_name": "ValidationDefinitionStore",
"store_backend": {
"class_name": "InMemoryStoreBackend",
},
},
}
if init_temp_docs_sites:
temp_dir = tempfile.TemporaryDirectory()
path = temp_dir.name
logger.info(f"Created temporary directory '{path}' for ephemeral docs site")
self.data_docs_sites[DataContextConfigDefaults.DEFAULT_DATA_DOCS_SITE_NAME.value][ # type: ignore[index] # FIXME CoP
"store_backend"
]["base_directory"] = path
else:
self.data_docs_sites = {}
|
InMemoryStoreBackendDefaults
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/mssql/base.py
|
{
"start": 41518,
"end": 41717
}
|
class ____(_DateTimeBase, sqltypes.DateTime):
__visit_name__ = "DATETIME2"
def __init__(self, precision=None, **kw):
super().__init__(**kw)
self.precision = precision
|
DATETIME2
|
python
|
dagster-io__dagster
|
python_modules/dagster-pipes/dagster_pipes/__init__.py
|
{
"start": 14459,
"end": 15408
}
|
class ____(logging.Handler):
def __init__(self, context: "PipesContext") -> None:
super().__init__()
self._context = context
def emit(self, record: logging.LogRecord) -> None:
self._context._write_message( # noqa: SLF001
"log", {"message": record.getMessage(), "level": record.levelname}
)
def _pipes_exc_from_tb(tb: TracebackException):
if sys.version_info >= (3, 13):
name = tb.exc_type_str.split(".")[-1]
else:
name = tb.exc_type.__name__ if tb.exc_type is not None else None
return PipesException(
message="".join(list(tb.format_exception_only())),
stack=tb.stack.format(),
name=name,
cause=_pipes_exc_from_tb(tb.__cause__) if tb.__cause__ else None,
context=_pipes_exc_from_tb(tb.__context__) if tb.__context__ else None,
)
# ########################
# ##### IO - BASE
# ########################
|
_PipesLoggerHandler
|
python
|
getsentry__sentry
|
src/sentry/issue_detection/detectors/sql_injection_detector.py
|
{
"start": 1666,
"end": 11111
}
|
class ____(PerformanceDetector):
type = DetectorType.SQL_INJECTION
settings_key = DetectorType.SQL_INJECTION
def __init__(self, settings: dict[DetectorType, Any], event: dict[str, Any]) -> None:
super().__init__(settings, event)
self.stored_problems = {}
self.request_parameters: list[Sequence[Any]] = []
self.extract_request_data(event)
def extract_request_data(self, event: dict[str, Any]) -> None:
"""
Extracts valid request data from the query string and body of the event. There are a couple conditions we filter for:
First, any pairs where the key and value are the same are ignored. This is so that when we look for user inputs in the description, we don't match on the key.
Second, if the value is a SQL keyword, it is ignored. For example, we don't want to match on the value "SELECT".
"""
self.query_string = event.get("request", {}).get("query_string", None)
self.request_body = event.get("request", {}).get("data", None)
self.request_url = event.get("request", {}).get("url", None)
if not self.query_string and not self.request_body:
return
valid_parameters = []
request_data = []
if self.query_string and isinstance(self.query_string, list):
request_data.extend(self.query_string)
if self.request_body and isinstance(self.request_body, dict):
request_data.extend(self.request_body.items())
for query_pair in request_data:
# Skip None values or pairs that don't have at least 2 elements
if not query_pair or not isinstance(query_pair, Sequence) or len(query_pair) < 2:
continue
query_value = query_pair[1]
query_key = query_pair[0]
# Filters out empty strings or single character strings
if (
not isinstance(query_value, str)
or not isinstance(query_key, str)
or not query_value
or len(query_value) < self.settings["query_value_length_threshold"]
):
continue
if query_key == query_value:
continue
if query_value.upper() in EXCLUDED_KEYWORDS or query_key.upper() in EXCLUDED_KEYWORDS:
continue
valid_parameters.append(query_pair)
self.request_parameters = valid_parameters
def visit_span(self, span: Span) -> None:
if not self._is_span_eligible(span) or not self.request_parameters:
return
description = span.get("description") or ""
op = span.get("op") or ""
spans_involved = [span["span_id"]]
vulnerable_parameters = []
for key, value in self.request_parameters:
regex_key = rf'(?<![\w.$])"?{re.escape(key)}"?(?![\w.$"])'
regex_value = rf"(?<![\w.$])(['\"]?){re.escape(value)}\1(?![\w.$'\"])"
where_index = description.upper().find("WHERE")
# Search for comments only in the portion after WHERE clause
description_after_where = description[where_index:]
comment_index = description_after_where.find("--")
if comment_index != -1:
description_to_search = description_after_where[:comment_index]
description_after_comment = description_after_where[comment_index:]
else:
description_to_search = description_after_where
description_after_comment = ""
if re.search(regex_key, description_to_search) and re.search(
regex_value, description_to_search
):
description = (
description[:where_index]
+ re.sub(regex_value, "[UNTRUSTED_INPUT]", description_to_search)
+ description_after_comment
)
vulnerable_parameters.append((key, value))
if len(vulnerable_parameters) == 0:
return
parameterized_description = span.get("sentry_tags", {}).get("description")
# If the query description is not parameterized, use the original description with replacements
if not parameterized_description:
parameterized_description = description
vulnerable_keys = [key for key, _ in vulnerable_parameters]
fingerprint_description = f"{'-'.join(vulnerable_keys)}-{parameterized_description}"
fingerprint = self._fingerprint(fingerprint_description)
issue_description = (
f"Untrusted Inputs [{', '.join(vulnerable_keys)}] in `{parameterized_description}`"
)
self.stored_problems[fingerprint] = PerformanceProblem(
type=QueryInjectionVulnerabilityGroupType,
fingerprint=fingerprint,
op=op,
desc=issue_description[:MAX_EVIDENCE_VALUE_LENGTH],
cause_span_ids=[],
parent_span_ids=[],
offender_span_ids=spans_involved,
evidence_data={
"op": op,
"cause_span_ids": [],
"parent_span_ids": [],
"offender_span_ids": spans_involved,
"transaction_name": self._event.get("transaction", ""),
"vulnerable_parameters": vulnerable_parameters,
"request_url": self.request_url,
},
evidence_display=[
IssueEvidence(
name="Offending Spans",
value=get_notification_attachment_body(
op,
description,
)[:MAX_EVIDENCE_VALUE_LENGTH],
# Has to be marked important to be displayed in the notifications
important=True,
)
],
)
def is_creation_allowed_for_organization(self, organization: Organization) -> bool:
return True
def is_creation_allowed_for_project(self, project: Project | None) -> bool:
return self.settings["detection_enabled"]
def _is_span_eligible(self, span: Span) -> bool:
if not span.get("span_id"):
return False
op = span.get("op", None)
# If the span is not a database span, we can skip the detection. `db.sql.active_record` is known to cause false positives so it is excluded.
if (
not op
or not op.startswith("db")
or op.startswith("db.redis")
or op == "db.sql.active_record"
):
return False
# Auto-generated rails queries can contain interpolated values
origin = span.get("origin", "")
if origin == "auto.db.rails" or (
isinstance(origin, str) and origin.startswith("auto.db.otel.")
):
return False
# If bindings are present, we can assume the query is safe
span_data = span.get("data", {})
if span_data and span_data.get("db.sql.bindings"):
return False
description = span.get("description", None)
if not description:
return False
# Only look at SELECT queries that have a WHERE clause and don't have any parameterized keywords
description = description.strip()
if (
description[:6].upper() != "SELECT"
or "WHERE" not in description.upper()
or any(keyword in description for keyword in PARAMETERIZED_KEYWORDS)
or re.search(r"&[A-Za-z_][A-Za-z0-9_]*", description)
):
return False
# If the description contains multiple occurrences of alias chaining, likely coming from an ORM
if len(re.findall(r"\w+(->\w+)+", description)) > 3:
return False
# If the description contains multiple deleted_at IS NULL clauses, likely coming from an ORM
if len(re.findall(r'"?deleted[_aA]+t"?\s+IS\s+NULL', description)) > 3:
return False
# Laravel queries with this pattern can contain interpolated values
if span.get("sentry_tags", {}).get("sdk.name") == "sentry.php.laravel" and re.search(
r"IN\s*\(\s*(\d+\s*,\s*)*\d+\s*\)", description.upper()
):
return False
# Zend1 can cause false positives
if span.get("sentry_tags", {}).get("platform") == "php":
span_data = span.get("data", {})
event_traces = span_data.get("event.trace", []) if span_data else []
if isinstance(event_traces, list) and any(
[trace.get("function", "").startswith("Zend_") for trace in event_traces]
):
return False
return True
@classmethod
def is_event_eligible(cls, event: dict[str, Any], project: Project | None = None) -> bool:
packages = event.get("modules", {})
if not packages or not isinstance(packages, dict):
return True
# Filter out events with packages known to internally escape inputs
for package_name in packages.keys():
if package_name in EXCLUDED_PACKAGES:
return False
return True
def _fingerprint(self, description: str) -> str:
signature = description.encode("utf-8")
full_fingerprint = hashlib.sha1(signature).hexdigest()
return f"1-{QueryInjectionVulnerabilityGroupType.type_id}-{full_fingerprint}"
|
SQLInjectionDetector
|
python
|
Pylons__pyramid
|
tests/test_config/test_predicates.py
|
{
"start": 22297,
"end": 22488
}
|
class ____:
package = 'dummy package'
registry = 'dummy registry'
def get_settings(self):
return {}
def maybe_dotted(self, thing):
return thing
|
DummyConfigurator
|
python
|
patrick-kidger__equinox
|
equinox/nn/_embedding.py
|
{
"start": 4012,
"end": 10617
}
|
class ____(Module):
"""A rotary positional encoding module, as described in the paper
"RoFormer: Enhanced Transformer with Rotary Position Embedding". While this module
can be used in any context, it is particularly useful for providing positional
information to transformer models.
!!! Example
The following example demonstrates how to use `RotaryPositionalEmbedding` in
a simple transformer model.
```python
class TransformerBlock(eqx.Module):
rope_embeddings: RotaryPositionalEmbedding
def __init__(...):
self.rope_embeddings = RotaryPositionalEmbedding(...)
def __call__(...):
def process_heads(
query_heads: Float[Array, "seq_length num_heads qk_size"],
key_heads: Float[Array, "seq_length num_heads qk_size"],
value_heads: Float[Array, "seq_length num_heads vo_size"]
) -> tuple[
Float[Array, "seq_length num_heads qk_size"],
Float[Array, "seq_length num_heads qk_size"],
Float[Array, "seq_length num_heads vo_size"]
]:
query_heads = jax.vmap(self.rope_embeddings,
in_axes=1,
out_axes=1)(query_heads)
key_heads = jax.vmap(self.rope_embeddings,
in_axes=1,
out_axes=1)(key_heads)
return query_heads, key_heads, value_heads
x = self.mha_attention(... process_heads=process_heads)
...
```
??? cite
[RoFormer: Enhanced Transformer with Rotary Position Embedding](https://arxiv.org/pdf/2104.09864)
```bibtex
@misc{su2023roformer,
title={RoFormer: Enhanced Transformer with Rotary Position Embedding},
author={Jianlin Su and Yu Lu and Shengfeng Pan and Ahmed Murtadha and
Bo Wen and Yunfeng Liu},
year={2023},
eprint={arXiv:2104.09864},
}
```
"""
embedding_size: int = field(static=True)
theta: float = field(static=True, default=10_000.0)
dtype: Any = field(static=True, default_factory=default_floating_dtype)
def __check_init__(self):
if self.embedding_size < 0:
raise ValueError("`embedding_size` must not be negative.")
if (self.embedding_size % 2) != 0:
raise ValueError("`embedding_size` must be even.")
@staticmethod
def rotate_half(x: Float[Array, "seq_length embedding_size"]):
d_2 = x.shape[-1] // 2
return jnp.concatenate([-x[..., d_2:], x[..., :d_2]], axis=-1)
@staticmethod
def precompute_freqs_cis(
embedding_size: int, end: int, theta: float, dtype: Any
) -> tuple[Float[Array, "end half_emb_size"], Float[Array, "end half_emb_size"]]:
freqs = 1.0 / (
theta
** (jnp.arange(0.0, embedding_size, 2)[jnp.newaxis, :] / embedding_size)
)
t = jnp.arange(float(end))
freqs_outer = jnp.outer(t, freqs)
# we assign the type at the very end to minimize the loss of precision
return jnp.cos(freqs_outer).astype(dtype), jnp.sin(freqs_outer).astype(dtype)
@named_scope("eqx.nn.RotaryPositionalEmbedding")
def __call__(
self,
x: Float[Array, "seq_length embedding_size"],
*,
key: PRNGKeyArray | None = None,
) -> Float[Array, "seq_length embedding_size"]:
"""**Arguments:**
- `x`: A JAX array of shape `(seq_length, embedding_size)`.
- `key`: Ignored; provided for compatibility with the rest of the Equinox API.
(Keyword only argument.)
**Returns:**
A JAX array of shape `(seq_length, embedding_size)`, with the rotary positional
encoding applied to the input.
"""
seq_len, embedding_size = x.shape
if embedding_size != self.embedding_size:
raise ValueError(
f"x.shape[-1] must match self.embedding_size, "
f"but {x.shape[-1]} != {self.embedding_size}"
)
with jax.ensure_compile_time_eval():
cache_key = (embedding_size, self.dtype)
if cache_key not in internal_rope_embedding_cache:
internal_rope_embedding_cache[cache_key] = self.precompute_freqs_cis(
embedding_size, seq_len, self.theta, self.dtype
)
freqs_cos, freqs_sin = internal_rope_embedding_cache[cache_key]
freqs_seq_len, _ = freqs_cos.shape
if seq_len > freqs_seq_len:
internal_rope_embedding_cache[cache_key] = self.precompute_freqs_cis(
embedding_size, seq_len, self.theta, self.dtype
)
freqs_cos, freqs_sin = internal_rope_embedding_cache[cache_key]
freqs_cos = freqs_cos[:seq_len]
freqs_sin = freqs_sin[:seq_len]
freqs_cos = jnp.tile(freqs_cos, (1, 2))
freqs_sin = jnp.tile(freqs_sin, (1, 2))
rotate_x = self.rotate_half(x)
try:
x_rope = (x * freqs_cos) + (rotate_x * freqs_sin)
except TypePromotionError as e:
inp_dtype = jnp.dtype(x.dtype)
rope_dtype = jnp.dtype(self.dtype)
raise TypePromotionError(
f"The type of the passed value differs from the type "
f"of the rotary embeddings ({inp_dtype} != {rope_dtype}), thus leading "
"to a conflict when numpy_dtype_promotion is set to strict. To avoid "
f"this error, either initialiaze RoPE module with {inp_dtype} "
f"dtype, or explicitly cast the input argument to {rope_dtype}."
) from e
return x_rope.astype(x.dtype)
RotaryPositionalEmbedding.__init__.__doc__ = """**Arguments:**
- `embedding_size`: Size of each embedding vector. Must be non-negative and even.
- `theta`: The base frequency for the sinusoidal functions used in positional encoding.
Specifies how quickly the inner-product will decay with relative distance between
tokens. Larger values of theta will result in slower oscillations. Default is
10_000, as per the original paper.
- `dtype`: The dtype to use for the precomputed frequencies. Defaults to either
`jax.numpy.float32` or `jax.numpy.float64` depending on whether JAX is in
64-bit mode.
"""
|
RotaryPositionalEmbedding
|
python
|
fastai__fastai
|
fastai/torch_core.py
|
{
"start": 21652,
"end": 23304
}
|
class ____:
"Slice and int indexing into a list of lists"
def __init__(self, chunks, lens=None):
self.chunks = chunks
self.lens = L(map(len,self.chunks) if lens is None else lens)
self.cumlens = np.cumsum(0+self.lens)
self.totlen = self.cumlens[-1]
def __getitem__(self,i):
if isinstance(i,slice): return retain_type(self.getslice(i), old=self.chunks[0])
di,idx = self.doc_idx(i)
return retain_type(self.chunks[di][idx], old=self.chunks[0])
def getslice(self, i):
st_d,st_i = self.doc_idx(ifnone(i.start,0))
en_d,en_i = self.doc_idx(ifnone(i.stop,self.totlen+1))
res = [self.chunks[st_d][st_i:(en_i if st_d==en_d else sys.maxsize)]]
for b in range(st_d+1,en_d): res.append(self.chunks[b])
if st_d!=en_d and en_d<len(self.chunks): res.append(self.chunks[en_d][:en_i])
return concat(*res)
def doc_idx(self, i):
if i<0: i=self.totlen+i # count from end
docidx = np.searchsorted(self.cumlens, i+1)-1
cl = self.cumlens[docidx]
return docidx,i-cl
# %% ../nbs/00_torch_core.ipynb 140
def show_title(o, ax=None, ctx=None, label=None, color='black', **kwargs):
"Set title of `ax` to `o`, or print `o` if `ax` is `None`"
ax = ifnone(ax,ctx)
if ax is None: print(o)
elif hasattr(ax, 'set_title'):
t = ax.title.get_text()
if len(t) > 0: o = t+'\n'+str(o)
ax.set_title(o, color=color)
elif isinstance(ax, pd.Series):
while label in ax: label += '_'
ax = pd.concat([ax,pd.Series({label: o})])
return ax
# %% ../nbs/00_torch_core.ipynb 142
|
Chunks
|
python
|
great-expectations__great_expectations
|
contrib/great_expectations_zipcode_expectations/great_expectations_zipcode_expectations/expectations/expect_column_values_to_be_valid_georgia_zip.py
|
{
"start": 742,
"end": 1743
}
|
class ____(ColumnMapMetricProvider):
# This is the id string that will be used to reference your metric.
condition_metric_name = "column_values.valid_georgia_zip"
# This method implements the core logic for the PandasExecutionEngine
@column_condition_partial(engine=PandasExecutionEngine)
def _pandas(cls, column, **kwargs):
return column.apply(lambda x: is_valid_georgia_zip(x))
# This method defines the business logic for evaluating your metric when using a SqlAlchemyExecutionEngine
# @column_condition_partial(engine=SqlAlchemyExecutionEngine)
# def _sqlalchemy(cls, column, _dialect, **kwargs):
# raise NotImplementedError
# This method defines the business logic for evaluating your metric when using a SparkDFExecutionEngine
# @column_condition_partial(engine=SparkDFExecutionEngine)
# def _spark(cls, column, **kwargs):
# raise NotImplementedError
# This class defines the Expectation itself
|
ColumnValuesToBeValidGeorgiaZip
|
python
|
gawel__pyquery
|
pyquery/pyquery.py
|
{
"start": 3852,
"end": 4976
}
|
class ____(object):
"""property to allow a flexible api"""
def __init__(self, pget, pset=no_default, pdel=no_default):
self.pget = pget
self.pset = pset
self.pdel = pdel
def __get__(self, instance, klass):
class _element(object):
"""real element to support set/get/del attr and item and js call
style"""
def __call__(prop, *args, **kwargs):
return self.pget(instance, *args, **kwargs)
__getattr__ = __getitem__ = __setattr__ = __setitem__ = __call__
def __delitem__(prop, name):
if self.pdel is not no_default:
return self.pdel(instance, name)
else:
raise NotImplementedError()
__delattr__ = __delitem__
def __repr__(prop):
return '<flexible_element %s>' % self.pget.__name__
return _element()
def __set__(self, instance, value):
if self.pset is not no_default:
self.pset(instance, value)
else:
raise NotImplementedError()
|
FlexibleElement
|
python
|
ansible__ansible
|
test/lib/ansible_test/_internal/commands/integration/filters.py
|
{
"start": 9528,
"end": 9654
}
|
class ____(RemoteTargetFilter[NetworkRemoteConfig]):
"""Target filter for remote network hosts."""
|
NetworkRemoteTargetFilter
|
python
|
kamyu104__LeetCode-Solutions
|
Python/longest-common-prefix-between-adjacent-strings-after-removals.py
|
{
"start": 46,
"end": 891
}
|
class ____(object):
def longestCommonPrefix(self, words):
"""
:type words: List[str]
:rtype: List[int]
"""
def lcp(i, j):
if i < 0 or j >= len(words):
return 0
s1, s2 = words[i], words[j]
for k in xrange(min(len(s1), len(s2))):
if s1[k] != s2[k]:
return k
return k+1
lcps = [lcp(i, i+1) for i in xrange(len(words)-1)]
right = [0]*(len(lcps)+2)
for i in reversed(xrange(len(lcps))):
right[i] = max(right[i+1], lcps[i])
result = [0]*len(words)
left = 0
for i in xrange(len(words)):
if i-2 >= 0:
left = max(left, lcps[i-2])
result[i] = max(left, right[i+1], lcp(i-1, i+1))
return result
|
Solution
|
python
|
py-pdf__pypdf
|
pypdf/generic/_base.py
|
{
"start": 2327,
"end": 7021
}
|
class ____(PdfObjectProtocol):
# function for calculating a hash value
hash_func: Callable[..., "hashlib._Hash"] = hashlib.sha1
indirect_reference: Optional["IndirectObject"]
def hash_bin(self) -> int:
"""
Used to detect modified object.
Returns:
Hash considering type and value.
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement .hash_bin() so far"
)
def hash_value_data(self) -> bytes:
return f"{self}".encode()
def hash_value(self) -> bytes:
return (
f"{self.__class__.__name__}:"
f"{self.hash_func(self.hash_value_data()).hexdigest()}"
).encode()
def replicate(
self,
pdf_dest: PdfWriterProtocol,
) -> "PdfObject":
"""
Clone object into pdf_dest (PdfWriterProtocol which is an interface for PdfWriter)
without ensuring links. This is used in clone_document_from_root with incremental = True.
Args:
pdf_dest: Target to clone to.
Returns:
The cloned PdfObject
"""
return self.clone(pdf_dest)
def clone(
self,
pdf_dest: PdfWriterProtocol,
force_duplicate: bool = False,
ignore_fields: Optional[Sequence[Union[str, int]]] = (),
) -> "PdfObject":
"""
Clone object into pdf_dest (PdfWriterProtocol which is an interface for PdfWriter).
By default, this method will call ``_reference_clone`` (see ``_reference``).
Args:
pdf_dest: Target to clone to.
force_duplicate: By default, if the object has already been cloned and referenced,
the copy will be returned; when ``True``, a new copy will be created.
(Default value = ``False``)
ignore_fields: List/tuple of field names (for dictionaries) that will be ignored
during cloning (applies to children duplication as well). If fields are to be
considered for a limited number of levels, you have to add it as integer, for
example ``[1,"/B","/TOTO"]`` means that ``"/B"`` will be ignored at the first
level only but ``"/TOTO"`` on all levels.
Returns:
The cloned PdfObject
"""
raise NotImplementedError(
f"{self.__class__.__name__} does not implement .clone so far"
)
def _reference_clone(
self, clone: Any, pdf_dest: PdfWriterProtocol, force_duplicate: bool = False
) -> PdfObjectProtocol:
"""
Reference the object within the _objects of pdf_dest only if
indirect_reference attribute exists (which means the objects was
already identified in xref/xobjstm) if object has been already
referenced do nothing.
Args:
clone:
pdf_dest:
Returns:
The clone
"""
try:
if not force_duplicate and clone.indirect_reference.pdf == pdf_dest:
return clone
except Exception:
pass
# if hasattr(clone, "indirect_reference"):
try:
ind = self.indirect_reference
except AttributeError:
return clone
if (
pdf_dest.incremental
and ind is not None
and ind.pdf == pdf_dest._reader
and ind.idnum <= len(pdf_dest._objects)
):
i = ind.idnum
else:
i = len(pdf_dest._objects) + 1
if ind is not None:
if id(ind.pdf) not in pdf_dest._id_translated:
pdf_dest._id_translated[id(ind.pdf)] = {}
pdf_dest._id_translated[id(ind.pdf)]["PreventGC"] = ind.pdf # type: ignore[index]
if (
not force_duplicate
and ind.idnum in pdf_dest._id_translated[id(ind.pdf)]
):
obj = pdf_dest.get_object(
pdf_dest._id_translated[id(ind.pdf)][ind.idnum]
)
assert obj is not None
return obj
pdf_dest._id_translated[id(ind.pdf)][ind.idnum] = i
try:
pdf_dest._objects[i - 1] = clone
except IndexError:
pdf_dest._objects.append(clone)
i = len(pdf_dest._objects)
clone.indirect_reference = IndirectObject(i, 0, pdf_dest)
return clone
def get_object(self) -> Optional["PdfObject"]:
"""Resolve indirect references."""
return self
def write_to_stream(
self, stream: StreamType, encryption_key: Union[None, str, bytes] = None
) -> None:
raise NotImplementedError
|
PdfObject
|
python
|
doocs__leetcode
|
lcci/16.02.Words Frequency/Solution.py
|
{
"start": 0,
"end": 291
}
|
class ____:
def __init__(self, book: List[str]):
self.cnt = Counter(book)
def get(self, word: str) -> int:
return self.cnt[word]
# Your WordsFrequency object will be instantiated and called as such:
# obj = WordsFrequency(book)
# param_1 = obj.get(word)
|
WordsFrequency
|
python
|
pexpect__pexpect
|
tests/test_missing_command.py
|
{
"start": 1023,
"end": 1407
}
|
class ____ (PexpectTestCase.PexpectTestCase):
def testMissingCommand(self):
try:
i = pexpect.spawn ('ZXQYQZX')
except Exception:
pass
else:
self.fail('Expected an Exception.')
if __name__ == '__main__':
unittest.main()
suite = unittest.TestLoader().loadTestsFromTestCase(MissingCommandTestCase)
|
MissingCommandTestCase
|
python
|
ray-project__ray
|
python/ray/tests/test_gcs_fault_tolerance.py
|
{
"start": 36152,
"end": 40841
}
|
class ____(RuntimeEnvPlugin):
name = MyPlugin
async def create(
self,
uri,
runtime_env,
ctx,
logger, # noqa: F821
) -> float:
signal_path = runtime_env[self.name].get("signal_path")
if signal_path is not None:
with open(signal_path, "w") as f:
f.write("hello world!")
f.flush()
await asyncio.time.sleep(1000)
@staticmethod
def validate(runtime_env_dict: dict) -> str:
return 1
@pytest.mark.parametrize(
"ray_start_regular_with_external_redis",
[
generate_system_config_map(
testing_asio_delay_us="NodeManagerService.grpc_server.CancelResourceReserve=500000000:500000000", # noqa: E501
),
],
indirect=True,
)
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + MY_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_pg_removal_after_gcs_restarts(
set_runtime_env_plugins, ray_start_regular_with_external_redis
):
@ray.remote
def task():
pass
# Use a temporary file to deterministically wait for the runtime_env setup to start.
with tempfile.TemporaryDirectory() as tmpdir:
signal_path = os.path.join(tmpdir, "signal")
pg = ray.util.placement_group(bundles=[{"CPU": 1}])
_ = task.options(
max_retries=0,
num_cpus=1,
scheduling_strategy=PlacementGroupSchedulingStrategy(
placement_group=pg,
),
runtime_env={
MyPlugin: {"signal_path": signal_path},
"config": {"setup_timeout_seconds": -1},
},
).remote()
# Wait until the runtime_env is setting up, which means we are in the process of
# popping a worker in the raylet.
wait_for_condition(lambda: os.path.exists(signal_path))
ray.util.remove_placement_group(pg)
# The PG is marked as REMOVED in redis but not removed yet from raylet
# due to the injected delay of CancelResourceReserve rpc
wait_for_condition(lambda: list_placement_groups()[0].state == "REMOVED")
ray._private.worker._global_node.kill_gcs_server()
# After GCS restarts, it will try to remove the PG resources
# again via ReleaseUnusedBundles rpc
ray._private.worker._global_node.start_gcs_server()
def verify_pg_resources_cleaned():
r_keys = ray.available_resources().keys()
return all("group" not in k for k in r_keys)
wait_for_condition(verify_pg_resources_cleaned, timeout=30)
def test_mark_job_finished_rpc_retry_and_idempotency(shutdown_only, monkeypatch):
"""
Test that MarkJobFinished RPC retries work correctly and are idempotent
when network failures occur.
This test verifies the fix for issue #53645 where duplicate MarkJobFinished
calls would crash the GCS due to non-idempotent RemoveJobReference().
Uses RPC failure injection to simulate network retry scenarios.
"""
# Inject RPC failures for MarkJobFinished - simulate network failures
# We inject request failures to force retries and test idempotency
monkeypatch.setenv(
"RAY_testing_rpc_failure",
"ray::rpc::JobInfoGcsService.grpc_client.MarkJobFinished=3:50:0:0",
)
ray.init(num_cpus=1)
@ray.remote
def test_task(i):
return i * 2
# Submit several tasks to ensure job has some work
futures = [test_task.remote(i) for i in range(5)]
results = ray.get(futures)
assert results == [0, 2, 4, 6, 8]
# Get job ID for verification
job_id = ray.get_runtime_context().get_job_id()
assert job_id is not None
# Shutdown Ray - this will trigger MarkJobFinished with potential retries
# The RPC failure injection will cause some calls to fail, forcing retries
# The fix ensures that multiple calls to RemoveJobReference are handled gracefully
ray.shutdown()
# If we reach here without crashing, the test passes
assert True
def test_concurrent_mark_job_finished(shutdown_only):
"""
Test that concurrent or rapid successive calls to job finish operations
don't cause issues.
"""
ray.init(num_cpus=2)
@ray.remote
def concurrent_task(task_id):
_ = sum(i * i for i in range(100))
return f"task_{task_id}_completed"
# Submit multiple tasks
futures = [concurrent_task.remote(i) for i in range(10)]
results = ray.get(futures)
# Verify all tasks completed
expected = [f"task_{i}_completed" for i in range(10)]
assert results == expected
if __name__ == "__main__":
sys.exit(pytest.main(["-sv", __file__]))
|
HangPlugin
|
python
|
marshmallow-code__apispec
|
src/apispec/exceptions.py
|
{
"start": 381,
"end": 508
}
|
class ____(APISpecError):
"""Raised when registering a parameter already existing in a given scope"""
|
DuplicateParameterError
|
python
|
huggingface__transformers
|
src/transformers/models/mlcd/modeling_mlcd.py
|
{
"start": 4096,
"end": 9607
}
|
class ____(nn.Module):
def __init__(self, config: MLCDVisionConfig):
super().__init__()
self.config = config
self.embed_dim = config.hidden_size
self.image_size = config.image_size
self.patch_size = config.patch_size
self.class_embedding = nn.Parameter(torch.randn(self.embed_dim))
self.patch_embedding = nn.Conv2d(
in_channels=config.num_channels,
out_channels=self.embed_dim,
kernel_size=self.patch_size,
stride=self.patch_size,
bias=False,
)
self.num_patches = (self.image_size // self.patch_size) ** 2
self.num_positions = self.num_patches + 1
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
"""
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
images. This method is also adapted to support torch.jit tracing.
Adapted from:
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
"""
num_patches = embeddings.shape[1] - 1
position_embedding = self.position_embedding.weight.unsqueeze(0)
num_positions = position_embedding.shape[1] - 1
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
return self.position_embedding(self.position_ids)
class_pos_embed = position_embedding[:, :1]
patch_pos_embed = position_embedding[:, 1:]
dim = embeddings.shape[-1]
new_height = height // self.patch_size
new_width = width // self.patch_size
sqrt_num_positions = torch_int(num_positions**0.5)
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
patch_pos_embed = nn.functional.interpolate(
patch_pos_embed,
size=(new_height, new_width),
mode="bicubic",
align_corners=False,
)
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
return torch.cat((class_pos_embed, patch_pos_embed), dim=1)
def forward(self, pixel_values: torch.FloatTensor) -> torch.Tensor:
batch_size = pixel_values.shape[0]
target_dtype = self.patch_embedding.weight.dtype
# patch_embeds -> shape = [batch, width, grid, grid]
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype))
patch_embeds = patch_embeds.flatten(2).transpose(1, 2)
class_embeds = self.class_embedding.expand(batch_size, 1, -1)
embeddings = torch.cat([class_embeds, patch_embeds], dim=1)
return embeddings
def eager_attention_forward(
module: nn.Module,
query: torch.Tensor,
key: torch.Tensor,
value: torch.Tensor,
attention_mask: Optional[torch.Tensor],
scaling: float,
dropout: float = 0.0,
**kwargs: Unpack[TransformersKwargs],
):
key_states = repeat_kv(key, module.num_key_value_groups)
value_states = repeat_kv(value, module.num_key_value_groups)
attn_weights = torch.matmul(query, key_states.transpose(2, 3)) * scaling
if attention_mask is not None:
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
attn_output = torch.matmul(attn_weights, value_states)
attn_output = attn_output.transpose(1, 2).contiguous()
return attn_output, attn_weights
def rotate_half(x):
"""Rotates half the hidden dims of the input."""
x1 = x[..., : x.shape[-1] // 2]
x2 = x[..., x.shape[-1] // 2 :]
return torch.cat((-x2, x1), dim=-1)
def repeat_kv(hidden_states: torch.Tensor, n_rep: int) -> torch.Tensor:
"""
This is the equivalent of torch.repeat_interleave(x, dim=1, repeats=n_rep). The hidden states go from (batch,
num_key_value_heads, seqlen, head_dim) to (batch, num_attention_heads, seqlen, head_dim)
"""
batch, num_key_value_heads, slen, head_dim = hidden_states.shape
if n_rep == 1:
return hidden_states
hidden_states = hidden_states[:, :, None, :, :].expand(batch, num_key_value_heads, n_rep, slen, head_dim)
return hidden_states.reshape(batch, num_key_value_heads * n_rep, slen, head_dim)
def apply_rotary_pos_emb_vision(
q: torch.Tensor, k: torch.Tensor, cos: torch.Tensor, sin: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor]:
orig_q_dtype = q.dtype
orig_k_dtype = k.dtype
q, k = q.float(), k.float()
cos, sin = cos.unsqueeze(-2).float(), sin.unsqueeze(-2).float()
q_embed = (q * cos) + (rotate_half(q) * sin)
k_embed = (k * cos) + (rotate_half(k) * sin)
q_embed = q_embed.to(orig_q_dtype)
k_embed = k_embed.to(orig_k_dtype)
return q_embed, k_embed
|
MLCDVisionEmbeddings
|
python
|
conda__conda
|
conda/api.py
|
{
"start": 10278,
"end": 14511
}
|
class ____:
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
High-level management and usage of package caches.
"""
def __init__(self, pkgs_dir):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Args:
pkgs_dir (str):
"""
self._internal = _PackageCacheData(pkgs_dir)
def get(self, package_ref, default=NULL):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Args:
package_ref (PackageRef):
A :obj:`PackageRef` instance representing the key for the
:obj:`PackageCacheRecord` being sought.
default: The default value to return if the record does not exist. If not
specified and no record exists, :exc:`KeyError` is raised.
Returns:
PackageCacheRecord
"""
return self._internal.get(package_ref, default)
def query(self, package_ref_or_match_spec):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Run a query against this specific package cache instance.
Args:
package_ref_or_match_spec (PackageRef or MatchSpec or str):
Either an exact :obj:`PackageRef` to match against, or a :obj:`MatchSpec`
query object. A :obj:`str` will be turned into a :obj:`MatchSpec` automatically.
Returns:
tuple[PackageCacheRecord]
"""
return tuple(self._internal.query(package_ref_or_match_spec))
@staticmethod
def query_all(package_ref_or_match_spec, pkgs_dirs=None):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Run a query against all package caches.
Args:
package_ref_or_match_spec (PackageRef or MatchSpec or str):
Either an exact :obj:`PackageRef` to match against, or a :obj:`MatchSpec`
query object. A :obj:`str` will be turned into a :obj:`MatchSpec` automatically.
pkgs_dirs (Iterable[str] or None):
If None, will fall back to context.pkgs_dirs.
Returns:
tuple[PackageCacheRecord]
"""
return tuple(_PackageCacheData.query_all(package_ref_or_match_spec, pkgs_dirs))
def iter_records(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Returns:
Iterable[PackageCacheRecord]: A generator over all records contained in the package
cache instance. Warning: this is a generator that is exhausted on first use.
"""
return self._internal.iter_records()
@property
def is_writable(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Indicates if the package cache location is writable or read-only.
Returns:
bool
"""
return self._internal.is_writable
@staticmethod
def first_writable(pkgs_dirs=None):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Get an instance object for the first writable package cache.
Args:
pkgs_dirs (Iterable[str]):
If None, will fall back to context.pkgs_dirs.
Returns:
PackageCacheData:
An instance for the first writable package cache.
"""
return PackageCacheData(_PackageCacheData.first_writable(pkgs_dirs).pkgs_dir)
def reload(self):
"""
**Beta** While in beta, expect both major and minor changes across minor releases.
Update the instance with new information. Backing information (i.e. contents of
the pkgs_dir) is lazily loaded on first use by the other methods of this class. You
should only use this method if you are *sure* you have outdated data.
Returns:
PackageCacheData
"""
self._internal = self._internal.reload()
return self
|
PackageCacheData
|
python
|
sympy__sympy
|
sympy/stats/crv.py
|
{
"start": 10463,
"end": 17094
}
|
class ____(PSpace):
""" Continuous Probability Space
Represents the likelihood of an event space defined over a continuum.
Represented with a ContinuousDomain and a PDF (Lambda-Like)
"""
is_Continuous = True
is_real = True
@property
def pdf(self):
return self.density(*self.domain.symbols)
def compute_expectation(self, expr, rvs=None, evaluate=False, **kwargs):
if rvs is None:
rvs = self.values
else:
rvs = frozenset(rvs)
expr = expr.xreplace({rv: rv.symbol for rv in rvs})
domain_symbols = frozenset(rv.symbol for rv in rvs)
return self.domain.compute_expectation(self.pdf * expr,
domain_symbols, **kwargs)
def compute_density(self, expr, **kwargs):
# Common case Density(X) where X in self.values
if expr in self.values:
# Marginalize all other random symbols out of the density
randomsymbols = tuple(set(self.values) - frozenset([expr]))
symbols = tuple(rs.symbol for rs in randomsymbols)
pdf = self.domain.compute_expectation(self.pdf, symbols, **kwargs)
return Lambda(expr.symbol, pdf)
z = Dummy('z', real=True)
return Lambda(z, self.compute_expectation(DiracDelta(expr - z), **kwargs))
@cacheit
def compute_cdf(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"CDF not well defined on multivariate expressions")
d = self.compute_density(expr, **kwargs)
x, z = symbols('x, z', real=True, cls=Dummy)
left_bound = self.domain.set.start
# CDF is integral of PDF from left bound to z
cdf = integrate(d(x), (x, left_bound, z), **kwargs)
# CDF Ensure that CDF left of left_bound is zero
cdf = Piecewise((cdf, z >= left_bound), (0, True))
return Lambda(z, cdf)
@cacheit
def compute_characteristic_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Characteristic function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
cf = integrate(exp(I*t*x)*d(x), (x, -oo, oo), **kwargs)
return Lambda(t, cf)
@cacheit
def compute_moment_generating_function(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise NotImplementedError("Moment generating function of multivariate expressions not implemented")
d = self.compute_density(expr, **kwargs)
x, t = symbols('x, t', real=True, cls=Dummy)
mgf = integrate(exp(t * x) * d(x), (x, -oo, oo), **kwargs)
return Lambda(t, mgf)
@cacheit
def compute_quantile(self, expr, **kwargs):
if not self.domain.set.is_Interval:
raise ValueError(
"Quantile not well defined on multivariate expressions")
d = self.compute_cdf(expr, **kwargs)
x = Dummy('x', real=True)
p = Dummy('p', positive=True)
quantile = solveset(d(x) - p, x, self.set)
return Lambda(p, quantile)
def probability(self, condition, **kwargs):
z = Dummy('z', real=True)
cond_inv = False
if isinstance(condition, Ne):
condition = Eq(condition.args[0], condition.args[1])
cond_inv = True
# Univariate case can be handled by where
try:
domain = self.where(condition)
rv = [rv for rv in self.values if rv.symbol == domain.symbol][0]
# Integrate out all other random variables
pdf = self.compute_density(rv, **kwargs)
# return S.Zero if `domain` is empty set
if domain.set is S.EmptySet or isinstance(domain.set, FiniteSet):
return S.Zero if not cond_inv else S.One
if isinstance(domain.set, Union):
return sum(
Integral(pdf(z), (z, subset), **kwargs) for subset in
domain.set.args if isinstance(subset, Interval))
# Integrate out the last variable over the special domain
return Integral(pdf(z), (z, domain.set), **kwargs)
# Other cases can be turned into univariate case
# by computing a density handled by density computation
except NotImplementedError:
from sympy.stats.rv import density
expr = condition.lhs - condition.rhs
if not is_random(expr):
dens = self.density
comp = condition.rhs
else:
dens = density(expr, **kwargs)
comp = 0
if not isinstance(dens, ContinuousDistribution):
from sympy.stats.crv_types import ContinuousDistributionHandmade
dens = ContinuousDistributionHandmade(dens, set=self.domain.set)
# Turn problem into univariate case
space = SingleContinuousPSpace(z, dens)
result = space.probability(condition.__class__(space.value, comp))
return result if not cond_inv else S.One - result
def where(self, condition):
rvs = frozenset(random_symbols(condition))
if not (len(rvs) == 1 and rvs.issubset(self.values)):
raise NotImplementedError(
"Multiple continuous random variables not supported")
rv = tuple(rvs)[0]
interval = reduce_rational_inequalities_wrap(condition, rv)
interval = interval.intersect(self.domain.set)
return SingleContinuousDomain(rv.symbol, interval)
def conditional_space(self, condition, normalize=True, **kwargs):
condition = condition.xreplace({rv: rv.symbol for rv in self.values})
domain = ConditionalContinuousDomain(self.domain, condition)
if normalize:
# create a clone of the variable to
# make sure that variables in nested integrals are different
# from the variables outside the integral
# this makes sure that they are evaluated separately
# and in the correct order
replacement = {rv: Dummy(str(rv)) for rv in self.symbols}
norm = domain.compute_expectation(self.pdf, **kwargs)
pdf = self.pdf / norm.xreplace(replacement)
# XXX: Converting set to tuple. The order matters to Lambda though
# so we shouldn't be starting with a set here...
density = Lambda(tuple(domain.symbols), pdf)
return ContinuousPSpace(domain, density)
|
ContinuousPSpace
|
python
|
jmcnamara__XlsxWriter
|
xlsxwriter/test/comparison/test_excel2003_style05.py
|
{
"start": 315,
"end": 1117
}
|
class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("excel2003_style05.xlsx")
self.ignore_elements = {
"xl/drawings/drawing1.xml": [
"<xdr:cNvPr",
"<a:picLocks",
"<a:srcRect/>",
"<xdr:spPr",
"<a:noFill/>",
]
}
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename, {"excel2003_style": True})
worksheet = workbook.add_worksheet()
worksheet.insert_image("B3", self.image_dir + "red.jpg")
workbook.close()
self.assertExcelEqual()
|
TestCompareXLSXFiles
|
python
|
kamyu104__LeetCode-Solutions
|
Python/string-without-aaa-or-bbb.py
|
{
"start": 33,
"end": 583
}
|
class ____(object):
def strWithout3a3b(self, A, B):
"""
:type A: int
:type B: int
:rtype: str
"""
result = []
put_A = None
while A or B:
if len(result) >= 2 and result[-1] == result[-2]:
put_A = result[-1] == 'b'
else:
put_A = A >= B
if put_A:
A -= 1
result.append('a')
else:
B -= 1
result.append('b')
return "".join(result)
|
Solution
|
python
|
django__django
|
tests/bulk_create/models.py
|
{
"start": 668,
"end": 746
}
|
class ____(ProxyCountry):
class Meta:
proxy = True
|
ProxyProxyCountry
|
python
|
django__django
|
tests/signals/models.py
|
{
"start": 701,
"end": 818
}
|
class ____(models.Model):
book = models.ForeignKey(Book, on_delete=models.CASCADE)
text = models.TextField()
|
Page
|
python
|
python-pillow__Pillow
|
src/PIL/ImageFile.py
|
{
"start": 23616,
"end": 23897
}
|
class ____:
def __init__(self) -> None:
self.xsize = 0
self.ysize = 0
self.xoff = 0
self.yoff = 0
def extents(self) -> tuple[int, int, int, int]:
return self.xoff, self.yoff, self.xoff + self.xsize, self.yoff + self.ysize
|
PyCodecState
|
python
|
doocs__leetcode
|
solution/2100-2199/2140.Solving Questions With Brainpower/Solution2.py
|
{
"start": 0,
"end": 308
}
|
class ____:
def mostPoints(self, questions: List[List[int]]) -> int:
n = len(questions)
f = [0] * (n + 1)
for i in range(n - 1, -1, -1):
p, b = questions[i]
j = i + b + 1
f[i] = max(f[i + 1], p + (0 if j > n else f[j]))
return f[0]
|
Solution
|
python
|
eriklindernoren__ML-From-Scratch
|
mlfromscratch/supervised_learning/particle_swarm_optimization.py
|
{
"start": 80,
"end": 5985
}
|
class ____():
""" Particle Swarm Optimization of Neural Network.
Parameters:
-----------
n_individuals: int
The number of neural networks that are allowed in the population at a time.
model_builder: method
A method which returns a user specified NeuralNetwork instance.
inertia_weight: float [0,1)
cognitive_weight: float [0,1)
social_weight: float [0,1)
max_velocity: float
The maximum allowed value for the velocity.
Reference:
Neural Network Training Using Particle Swarm Optimization
https://visualstudiomagazine.com/articles/2013/12/01/neural-network-training-using-particle-swarm-optimization.aspx
"""
def __init__(self, population_size,
model_builder,
inertia_weight=0.8,
cognitive_weight=2,
social_weight=2,
max_velocity=20):
self.population_size = population_size
self.model_builder = model_builder
self.best_individual = None
# Parameters used to update velocity
self.cognitive_w = cognitive_weight
self.inertia_w = inertia_weight
self.social_w = social_weight
self.min_v = -max_velocity
self.max_v = max_velocity
def _build_model(self, id):
""" Returns a new individual """
model = self.model_builder(n_inputs=self.X.shape[1], n_outputs=self.y.shape[1])
model.id = id
model.fitness = 0
model.highest_fitness = 0
model.accuracy = 0
# Set intial best as the current initialization
model.best_layers = copy.copy(model.layers)
# Set initial velocity to zero
model.velocity = []
for layer in model.layers:
velocity = {"W": 0, "w0": 0}
if hasattr(layer, 'W'):
velocity = {"W": np.zeros_like(layer.W), "w0": np.zeros_like(layer.w0)}
model.velocity.append(velocity)
return model
def _initialize_population(self):
""" Initialization of the neural networks forming the population"""
self.population = []
for i in range(self.population_size):
model = self._build_model(id=i)
self.population.append(model)
def _update_weights(self, individual):
""" Calculate the new velocity and update weights for each layer """
# Two random parameters used to update the velocity
r1 = np.random.uniform()
r2 = np.random.uniform()
for i, layer in enumerate(individual.layers):
if hasattr(layer, 'W'):
# Layer weights velocity
first_term_W = self.inertia_w * individual.velocity[i]["W"]
second_term_W = self.cognitive_w * r1 * (individual.best_layers[i].W - layer.W)
third_term_W = self.social_w * r2 * (self.best_individual.layers[i].W - layer.W)
new_velocity = first_term_W + second_term_W + third_term_W
individual.velocity[i]["W"] = np.clip(new_velocity, self.min_v, self.max_v)
# Bias weight velocity
first_term_w0 = self.inertia_w * individual.velocity[i]["w0"]
second_term_w0 = self.cognitive_w * r1 * (individual.best_layers[i].w0 - layer.w0)
third_term_w0 = self.social_w * r2 * (self.best_individual.layers[i].w0 - layer.w0)
new_velocity = first_term_w0 + second_term_w0 + third_term_w0
individual.velocity[i]["w0"] = np.clip(new_velocity, self.min_v, self.max_v)
# Update layer weights with velocity
individual.layers[i].W += individual.velocity[i]["W"]
individual.layers[i].w0 += individual.velocity[i]["w0"]
def _calculate_fitness(self, individual):
""" Evaluate the individual on the test set to get fitness scores """
loss, acc = individual.test_on_batch(self.X, self.y)
individual.fitness = 1 / (loss + 1e-8)
individual.accuracy = acc
def evolve(self, X, y, n_generations):
""" Will evolve the population for n_generations based on dataset X and labels y"""
self.X, self.y = X, y
self._initialize_population()
# The best individual of the population is initialized as population's first ind.
self.best_individual = copy.copy(self.population[0])
for epoch in range(n_generations):
for individual in self.population:
# Calculate new velocity and update the NN weights
self._update_weights(individual)
# Calculate the fitness of the updated individual
self._calculate_fitness(individual)
# If the current fitness is higher than the individual's previous highest
# => update the individual's best layer setup
if individual.fitness > individual.highest_fitness:
individual.best_layers = copy.copy(individual.layers)
individual.highest_fitness = individual.fitness
# If the individual's fitness is higher than the highest recorded fitness for the
# whole population => update the best individual
if individual.fitness > self.best_individual.fitness:
self.best_individual = copy.copy(individual)
print ("[%d Best Individual - ID: %d Fitness: %.5f, Accuracy: %.1f%%]" % (epoch,
self.best_individual.id,
self.best_individual.fitness,
100*float(self.best_individual.accuracy)))
return self.best_individual
|
ParticleSwarmOptimizedNN
|
python
|
django__django
|
django/db/backends/ddl_references.py
|
{
"start": 7355,
"end": 8619
}
|
class ____(TableColumns):
def __init__(self, table, expressions, compiler, quote_value):
self.compiler = compiler
self.expressions = expressions
self.quote_value = quote_value
columns = [
col.target.column
for col in self.compiler.query._gen_cols([self.expressions])
]
super().__init__(table, columns)
def rename_table_references(self, old_table, new_table):
if self.table != old_table:
return
self.expressions = self.expressions.relabeled_clone({old_table: new_table})
super().rename_table_references(old_table, new_table)
def rename_column_references(self, table, old_column, new_column):
if self.table != table:
return
expressions = deepcopy(self.expressions)
self.columns = []
for col in self.compiler.query._gen_cols([expressions]):
if col.target.column == old_column:
col.target.column = new_column
self.columns.append(col.target.column)
self.expressions = expressions
def __str__(self):
sql, params = self.compiler.compile(self.expressions)
params = map(self.quote_value, params)
return sql % tuple(params)
|
Expressions
|
python
|
ipython__ipython
|
tests/test_interactiveshell.py
|
{
"start": 30347,
"end": 30730
}
|
class ____(ast.NodeTransformer):
"""Throws an InputRejected when it sees a string literal.
Used to verify that NodeTransformers can signal that a piece of code should
not be executed by throwing an InputRejected.
"""
def visit_Constant(self, node):
if isinstance(node.value, str):
raise InputRejected("test")
return node
|
StringRejector
|
python
|
eventlet__eventlet
|
tests/mock.py
|
{
"start": 9801,
"end": 9988
}
|
class ____:
"A unique, named, sentinel object."
def __init__(self, name):
self.name = name
def __repr__(self):
return 'sentinel.%s' % self.name
|
_SentinelObject
|
python
|
getsentry__sentry
|
src/sentry/digests/types.py
|
{
"start": 348,
"end": 424
}
|
class ____(StrEnum):
RULE = "rule"
WORKFLOW = "workflow"
|
IdentifierKey
|
python
|
getsentry__sentry
|
src/sentry/models/dashboard_widget.py
|
{
"start": 2324,
"end": 4496
}
|
class ____(Enum):
"""
Ambiguous queries that haven't been or couldn't be categorized into a
specific dataset.
"""
UNKNOWN = 0
"""
Dataset inferred by either running the query or using heuristics.
"""
INFERRED = 1
"""
Canonical dataset, user explicitly selected it.
"""
USER = 2
"""
Was an ambiguous dataset forced to split (i.e. we picked a default)
"""
FORCED = 3
"""
Dataset inferred by split script, version 1
"""
SPLIT_VERSION_1 = 4
"""
Dataset inferred by split script, version 2
"""
SPLIT_VERSION_2 = 5
"""
Dataset modified by transaction -> span migration
"""
SPAN_MIGRATION_VERSION_1 = 6
"""
Dataset modified by using the widget snapshot to restore the original transaction query
"""
RESTORED_SPAN_MIGRATION_VERSION_1 = 7
"""
Dataset modified by the transaction -> span migration version 2
"""
SPAN_MIGRATION_VERSION_2 = 8
"""
Dataset modified by the transaction -> span migration version 3
"""
SPAN_MIGRATION_VERSION_3 = 9
"""
Dataset modified by the transaction -> span migration version 4 (fixing boolean bug)
"""
SPAN_MIGRATION_VERSION_4 = 10
"""
Dataset modified by the transaction -> span migration version 5 (fixing boolean bug again)
"""
SPAN_MIGRATION_VERSION_5 = 11
@classmethod
def as_choices(cls):
return tuple((source.value, source.name.lower()) for source in cls)
@classmethod
def as_text_choices(cls):
return tuple((source.name.lower(), source.value) for source in cls)
# TODO: Can eventually be replaced solely with TRANSACTION_MULTI once no more dashboards use Discover.
TransactionWidgetType = [DashboardWidgetTypes.DISCOVER, DashboardWidgetTypes.TRANSACTION_LIKE]
# TODO: Can be replaced once conditions are replaced at all callsite to split transaction and error behaviour, and once dashboard no longer have saved Discover dataset.
DiscoverFullFallbackWidgetType = [
DashboardWidgetTypes.DISCOVER,
DashboardWidgetTypes.ERROR_EVENTS,
DashboardWidgetTypes.TRANSACTION_LIKE,
]
|
DatasetSourcesTypes
|
python
|
openai__openai-python
|
src/openai/resources/responses/input_tokens.py
|
{
"start": 1002,
"end": 7228
}
|
class ____(SyncAPIResource):
@cached_property
def with_raw_response(self) -> InputTokensWithRawResponse:
"""
This property can be used as a prefix for any HTTP method call to return
the raw response object instead of the parsed content.
For more information, see https://www.github.com/openai/openai-python#accessing-raw-response-data-eg-headers
"""
return InputTokensWithRawResponse(self)
@cached_property
def with_streaming_response(self) -> InputTokensWithStreamingResponse:
"""
An alternative to `.with_raw_response` that doesn't eagerly read the response body.
For more information, see https://www.github.com/openai/openai-python#with_streaming_response
"""
return InputTokensWithStreamingResponse(self)
def count(
self,
*,
conversation: Optional[input_token_count_params.Conversation] | Omit = omit,
input: Union[str, Iterable[ResponseInputItemParam], None] | Omit = omit,
instructions: Optional[str] | Omit = omit,
model: Optional[str] | Omit = omit,
parallel_tool_calls: Optional[bool] | Omit = omit,
previous_response_id: Optional[str] | Omit = omit,
reasoning: Optional[Reasoning] | Omit = omit,
text: Optional[input_token_count_params.Text] | Omit = omit,
tool_choice: Optional[input_token_count_params.ToolChoice] | Omit = omit,
tools: Optional[Iterable[ToolParam]] | Omit = omit,
truncation: Literal["auto", "disabled"] | Omit = omit,
# Use the following arguments if you need to pass additional parameters to the API that aren't available via kwargs.
# The extra values given here take precedence over values defined on the client or passed to this method.
extra_headers: Headers | None = None,
extra_query: Query | None = None,
extra_body: Body | None = None,
timeout: float | httpx.Timeout | None | NotGiven = not_given,
) -> InputTokenCountResponse:
"""
Get input token counts
Args:
conversation: The conversation that this response belongs to. Items from this conversation are
prepended to `input_items` for this response request. Input items and output
items from this response are automatically added to this conversation after this
response completes.
input: Text, image, or file inputs to the model, used to generate a response
instructions: A system (or developer) message inserted into the model's context. When used
along with `previous_response_id`, the instructions from a previous response
will not be carried over to the next response. This makes it simple to swap out
system (or developer) messages in new responses.
model: Model ID used to generate the response, like `gpt-4o` or `o3`. OpenAI offers a
wide range of models with different capabilities, performance characteristics,
and price points. Refer to the
[model guide](https://platform.openai.com/docs/models) to browse and compare
available models.
parallel_tool_calls: Whether to allow the model to run tool calls in parallel.
previous_response_id: The unique ID of the previous response to the model. Use this to create
multi-turn conversations. Learn more about
[conversation state](https://platform.openai.com/docs/guides/conversation-state).
Cannot be used in conjunction with `conversation`.
reasoning: **gpt-5 and o-series models only** Configuration options for
[reasoning models](https://platform.openai.com/docs/guides/reasoning).
text: Configuration options for a text response from the model. Can be plain text or
structured JSON data. Learn more:
- [Text inputs and outputs](https://platform.openai.com/docs/guides/text)
- [Structured Outputs](https://platform.openai.com/docs/guides/structured-outputs)
tool_choice: How the model should select which tool (or tools) to use when generating a
response. See the `tools` parameter to see how to specify which tools the model
can call.
tools: An array of tools the model may call while generating a response. You can
specify which tool to use by setting the `tool_choice` parameter.
truncation: The truncation strategy to use for the model response. - `auto`: If the input to
this Response exceeds the model's context window size, the model will truncate
the response to fit the context window by dropping items from the beginning of
the conversation. - `disabled` (default): If the input size will exceed the
context window size for a model, the request will fail with a 400 error.
extra_headers: Send extra headers
extra_query: Add additional query parameters to the request
extra_body: Add additional JSON properties to the request
timeout: Override the client-level default timeout for this request, in seconds
"""
return self._post(
"/responses/input_tokens",
body=maybe_transform(
{
"conversation": conversation,
"input": input,
"instructions": instructions,
"model": model,
"parallel_tool_calls": parallel_tool_calls,
"previous_response_id": previous_response_id,
"reasoning": reasoning,
"text": text,
"tool_choice": tool_choice,
"tools": tools,
"truncation": truncation,
},
input_token_count_params.InputTokenCountParams,
),
options=make_request_options(
extra_headers=extra_headers, extra_query=extra_query, extra_body=extra_body, timeout=timeout
),
cast_to=InputTokenCountResponse,
)
|
InputTokens
|
python
|
spack__spack
|
lib/spack/spack/test/binary_distribution.py
|
{
"start": 40470,
"end": 50359
}
|
class ____(NamedTuple):
manifest_contents: Dict[str, Any]
index_contents: str
index_hash: str
manifest_path: str
index_path: str
manifest_etag: str
fetched_blob: Callable[[], bool]
@pytest.fixture
def mock_index(tmp_path: pathlib.Path, monkeypatch) -> IndexInformation:
mirror_root = tmp_path / "mymirror"
index_json = '{"Hello": "World"}'
index_json_hash = spack.binary_distribution.compute_hash(index_json)
fetched = False
cache_class = get_url_buildcache_class(
layout_version=spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
)
index_blob_path = os.path.join(
str(mirror_root),
*cache_class.get_relative_path_components(BuildcacheComponent.BLOB),
"sha256",
index_json_hash[:2],
index_json_hash,
)
os.makedirs(os.path.dirname(index_blob_path))
with open(index_blob_path, "w", encoding="utf-8") as fd:
fd.write(index_json)
index_blob_record = spack.binary_distribution.BlobRecord(
os.stat(index_blob_path).st_size,
cache_class.BUILDCACHE_INDEX_MEDIATYPE,
"none",
"sha256",
index_json_hash,
)
index_manifest = {
"version": cache_class.get_layout_version(),
"data": [index_blob_record.to_dict()],
}
manifest_json_path = cache_class.get_index_url(str(mirror_root))
os.makedirs(os.path.dirname(manifest_json_path))
with open(manifest_json_path, "w", encoding="utf-8") as f:
json.dump(index_manifest, f)
def fetch_patch(stage, mirror_only: bool = False, err_msg: Optional[str] = None):
nonlocal fetched
fetched = True
@property # type: ignore
def save_filename_patch(stage):
return str(index_blob_path)
monkeypatch.setattr(spack.stage.Stage, "fetch", fetch_patch)
monkeypatch.setattr(spack.stage.Stage, "save_filename", save_filename_patch)
def get_did_fetch():
# nonlocal fetched
return fetched
return IndexInformation(
index_manifest,
index_json,
index_json_hash,
manifest_json_path,
index_blob_path,
"59bcc3ad6775562f845953cf01624225",
get_did_fetch,
)
def test_etag_fetching_304():
# Test conditional fetch with etags. If the remote hasn't modified the file
# it returns 304, which is an HTTPError in urllib-land. That should be
# handled as success, since it means the local cache is up-to-date.
def response_304(request: urllib.request.Request):
url = request.get_full_url()
if url.endswith(INDEX_MANIFEST_FILE):
assert request.get_header("If-none-match") == '"112a8bbc1b3f7f185621c1ee335f0502"'
raise urllib.error.HTTPError(
url, 304, "Not Modified", hdrs={}, fp=None # type: ignore[arg-type]
)
assert False, "Unexpected request {}".format(url)
fetcher = spack.binary_distribution.EtagIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
etag="112a8bbc1b3f7f185621c1ee335f0502",
urlopen=response_304,
)
result = fetcher.conditional_fetch()
assert isinstance(result, spack.binary_distribution.FetchIndexResult)
assert result.fresh
def test_etag_fetching_200(mock_index):
# Test conditional fetch with etags. The remote has modified the file.
def response_200(request: urllib.request.Request):
url = request.get_full_url()
if url.endswith(INDEX_MANIFEST_FILE):
assert request.get_header("If-none-match") == '"112a8bbc1b3f7f185621c1ee335f0502"'
return urllib.response.addinfourl(
io.BytesIO(json.dumps(mock_index.manifest_contents).encode()),
headers={"Etag": f'"{mock_index.manifest_etag}"'}, # type: ignore[arg-type]
url=url,
code=200,
)
assert False, "Unexpected request {}".format(url)
fetcher = spack.binary_distribution.EtagIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
etag="112a8bbc1b3f7f185621c1ee335f0502",
urlopen=response_200,
)
result = fetcher.conditional_fetch()
assert isinstance(result, spack.binary_distribution.FetchIndexResult)
assert not result.fresh
assert mock_index.fetched_blob()
assert result.etag == mock_index.manifest_etag
assert result.data == mock_index.index_contents
assert result.hash == mock_index.index_hash
def test_etag_fetching_404():
# Test conditional fetch with etags. The remote has modified the file.
def response_404(request: urllib.request.Request):
raise urllib.error.HTTPError(
request.get_full_url(),
404,
"Not found",
hdrs={"Etag": '"59bcc3ad6775562f845953cf01624225"'}, # type: ignore[arg-type]
fp=None,
)
fetcher = spack.binary_distribution.EtagIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
etag="112a8bbc1b3f7f185621c1ee335f0502",
urlopen=response_404,
)
with pytest.raises(spack.binary_distribution.FetchIndexError):
fetcher.conditional_fetch()
def test_default_index_fetch_200(mock_index):
# We fetch the manifest and then the index blob if the hash is outdated
def urlopen(request: urllib.request.Request):
url = request.get_full_url()
if url.endswith(INDEX_MANIFEST_FILE):
return urllib.response.addinfourl( # type: ignore[arg-type]
io.BytesIO(json.dumps(mock_index.manifest_contents).encode()),
headers={"Etag": f'"{mock_index.manifest_etag}"'}, # type: ignore[arg-type]
url=url,
code=200,
)
assert False, "Unexpected request {}".format(url)
fetcher = spack.binary_distribution.DefaultIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
local_hash="outdated",
urlopen=urlopen,
)
result = fetcher.conditional_fetch()
assert isinstance(result, spack.binary_distribution.FetchIndexResult)
assert not result.fresh
assert mock_index.fetched_blob()
assert result.etag == mock_index.manifest_etag
assert result.data == mock_index.index_contents
assert result.hash == mock_index.index_hash
def test_default_index_404():
# We get a fetch error if the index can't be fetched
def urlopen(request: urllib.request.Request):
raise urllib.error.HTTPError(
request.get_full_url(),
404,
"Not found",
hdrs={"Etag": '"59bcc3ad6775562f845953cf01624225"'}, # type: ignore[arg-type]
fp=None,
)
fetcher = spack.binary_distribution.DefaultIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
local_hash=None,
urlopen=urlopen,
)
with pytest.raises(spack.binary_distribution.FetchIndexError):
fetcher.conditional_fetch()
def test_default_index_not_modified(mock_index):
# We don't fetch the index blob if hash didn't change
def urlopen(request: urllib.request.Request):
url = request.get_full_url()
if url.endswith(INDEX_MANIFEST_FILE):
return urllib.response.addinfourl(
io.BytesIO(json.dumps(mock_index.manifest_contents).encode()),
headers={}, # type: ignore[arg-type]
url=url,
code=200,
)
# No other request should be made.
assert False, "Unexpected request {}".format(url)
fetcher = spack.binary_distribution.DefaultIndexFetcher(
spack.binary_distribution.MirrorURLAndVersion(
"https://www.example.com", spack.binary_distribution.CURRENT_BUILD_CACHE_LAYOUT_VERSION
),
local_hash=mock_index.index_hash,
urlopen=urlopen,
)
assert fetcher.conditional_fetch().fresh
assert not mock_index.fetched_blob()
@pytest.mark.usefixtures("install_mockery", "mock_packages")
def test_get_entries_from_cache_nested_mirrors(monkeypatch, tmp_path: pathlib.Path):
"""Make sure URLBuildcacheEntry behaves as expected"""
# Create a temp mirror directory for buildcache usage
mirror_dir = tmp_path / "mirror_dir"
mirror_url = url_util.path_to_file_url(str(mirror_dir))
# Install and push libdwarf to the root mirror
s = spack.concretize.concretize_one("libdwarf")
install_cmd("--fake", s.name)
buildcache_cmd("push", "-u", str(mirror_dir), s.name)
# Install and push libzlib to the nested mirror
s = spack.concretize.concretize_one("zlib")
install_cmd("--fake", s.name)
buildcache_cmd("push", "-u", str(mirror_dir / "nested"), s.name)
spec_manifests, _ = get_entries_from_cache(
str(mirror_url), str(tmp_path / "stage"), BuildcacheComponent.SPEC
)
nested_mirror_url = url_util.path_to_file_url(str(mirror_dir / "nested"))
spec_manifests_nested, _ = get_entries_from_cache(
str(nested_mirror_url), str(tmp_path / "stage"), BuildcacheComponent.SPEC
)
# Expected specs in root mirror
# - gcc-runtime
# - compiler-wrapper
# - libelf
# - libdwarf
assert len(spec_manifests) == 4
# Expected specs in nested mirror
# - zlib
assert len(spec_manifests_nested) == 1
|
IndexInformation
|
python
|
scrapy__scrapy
|
tests/test_pipeline_files.py
|
{
"start": 22671,
"end": 24834
}
|
class ____:
@inlineCallbacks
def test_persist(self):
uri = os.environ.get("GCS_TEST_FILE_URI")
if not uri:
pytest.skip("No GCS URI available for testing")
data = b"TestGCSFilesStore: \xe2\x98\x83"
buf = BytesIO(data)
meta = {"foo": "bar"}
path = "full/filename"
store = GCSFilesStore(uri)
store.POLICY = "authenticatedRead"
expected_policy = {"role": "READER", "entity": "allAuthenticatedUsers"}
yield store.persist_file(path, buf, info=None, meta=meta, headers=None)
s = yield store.stat_file(path, info=None)
assert "last_modified" in s
assert "checksum" in s
assert s["checksum"] == "cdcda85605e46d0af6110752770dce3c"
u = urlparse(uri)
content, acl, blob = get_gcs_content_and_delete(u.hostname, u.path[1:] + path)
assert content == data
assert blob.metadata == {"foo": "bar"}
assert blob.cache_control == GCSFilesStore.CACHE_CONTROL
assert blob.content_type == "application/octet-stream"
assert expected_policy in acl
@inlineCallbacks
def test_blob_path_consistency(self):
"""Test to make sure that paths used to store files is the same as the one used to get
already uploaded files.
"""
try:
import google.cloud.storage # noqa: F401,PLC0415
except ModuleNotFoundError:
pytest.skip("google-cloud-storage is not installed")
with (
mock.patch("google.cloud.storage"),
mock.patch("scrapy.pipelines.files.time"),
):
uri = "gs://my_bucket/my_prefix/"
store = GCSFilesStore(uri)
store.bucket = mock.Mock()
path = "full/my_data.txt"
yield store.persist_file(
path, mock.Mock(), info=None, meta=None, headers=None
)
yield store.stat_file(path, info=None)
expected_blob_path = store.prefix + path
store.bucket.blob.assert_called_with(expected_blob_path)
store.bucket.get_blob.assert_called_with(expected_blob_path)
|
TestGCSFilesStore
|
python
|
HypothesisWorks__hypothesis
|
hypothesis-python/tests/cover/test_pretty.py
|
{
"start": 16498,
"end": 16617
}
|
class ____(Flag):
A = 1
B = 2
def __repr__(self):
return "can't parse this nonsense"
|
EvilReprOptions
|
python
|
huggingface__transformers
|
src/transformers/models/timesfm/modeling_timesfm.py
|
{
"start": 10562,
"end": 11651
}
|
class ____(nn.Module):
"""Transformer layer."""
def __init__(self, config: TimesFmConfig, layer_idx: int):
super().__init__()
self.self_attn = TimesFmAttention(config, layer_idx=layer_idx)
self.mlp = TimesFmMLP(config)
self.input_layernorm = TimesFmRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: torch.Tensor,
paddings: torch.Tensor,
output_attentions: bool = False,
) -> tuple[Optional[torch.Tensor], torch.Tensor]:
# Self Attention
residual = hidden_states
hidden_states = self.input_layernorm(hidden_states)
hidden_states, scores = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
output_attentions=output_attentions,
)
hidden_states = residual + hidden_states
# MLP
hidden_states = self.mlp(hidden_states, paddings=paddings)
return scores, hidden_states
@auto_docstring
|
TimesFmDecoderLayer
|
python
|
kubernetes-client__python
|
kubernetes/client/models/v1_photon_persistent_disk_volume_source.py
|
{
"start": 383,
"end": 4940
}
|
class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'fs_type': 'str',
'pd_id': 'str'
}
attribute_map = {
'fs_type': 'fsType',
'pd_id': 'pdID'
}
def __init__(self, fs_type=None, pd_id=None, local_vars_configuration=None): # noqa: E501
"""V1PhotonPersistentDiskVolumeSource - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._fs_type = None
self._pd_id = None
self.discriminator = None
if fs_type is not None:
self.fs_type = fs_type
self.pd_id = pd_id
@property
def fs_type(self):
"""Gets the fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:return: The fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._fs_type
@fs_type.setter
def fs_type(self, fs_type):
"""Sets the fs_type of this V1PhotonPersistentDiskVolumeSource.
fsType is the filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified. # noqa: E501
:param fs_type: The fs_type of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
self._fs_type = fs_type
@property
def pd_id(self):
"""Gets the pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
pdID is the ID that identifies Photon Controller persistent disk # noqa: E501
:return: The pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
:rtype: str
"""
return self._pd_id
@pd_id.setter
def pd_id(self, pd_id):
"""Sets the pd_id of this V1PhotonPersistentDiskVolumeSource.
pdID is the ID that identifies Photon Controller persistent disk # noqa: E501
:param pd_id: The pd_id of this V1PhotonPersistentDiskVolumeSource. # noqa: E501
:type: str
"""
if self.local_vars_configuration.client_side_validation and pd_id is None: # noqa: E501
raise ValueError("Invalid value for `pd_id`, must not be `None`") # noqa: E501
self._pd_id = pd_id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PhotonPersistentDiskVolumeSource):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PhotonPersistentDiskVolumeSource):
return True
return self.to_dict() != other.to_dict()
|
V1PhotonPersistentDiskVolumeSource
|
python
|
numba__numba
|
numba/core/types/npytypes.py
|
{
"start": 18284,
"end": 19667
}
|
class ____(Array):
"""
A NestedArray is an array nested within a structured type (which are "void"
type in NumPy parlance). Unlike an Array, the shape, and not just the number
of dimensions is part of the type of a NestedArray.
"""
def __init__(self, dtype, shape):
if isinstance(dtype, NestedArray):
tmp = Array(dtype.dtype, dtype.ndim, 'C')
shape += dtype.shape
dtype = tmp.dtype
assert dtype.bitwidth % 8 == 0, \
"Dtype bitwidth must be a multiple of bytes"
self._shape = shape
name = "nestedarray(%s, %s)" % (dtype, shape)
ndim = len(shape)
super(NestedArray, self).__init__(dtype, ndim, 'C', name=name)
@property
def shape(self):
return self._shape
@property
def nitems(self):
l = 1
for s in self.shape:
l = l * s
return l
@property
def size(self):
return self.dtype.bitwidth // 8
@property
def strides(self):
stride = self.size
strides = []
for i in reversed(self._shape):
strides.append(stride)
stride *= i
return tuple(reversed(strides))
@property
def key(self):
return self.dtype, self.shape
def __repr__(self):
return f"NestedArray({repr(self.dtype)}, {self.shape})"
|
NestedArray
|
python
|
django__django
|
tests/fixtures/models.py
|
{
"start": 1377,
"end": 1860
}
|
class ____(models.Model):
name = models.CharField(max_length=100)
tagged_type = models.ForeignKey(
ContentType, models.CASCADE, related_name="fixtures_tag_set"
)
tagged_id = models.PositiveIntegerField(default=0)
tagged = GenericForeignKey(ct_field="tagged_type", fk_field="tagged_id")
def __str__(self):
return '<%s: %s> tagged "%s"' % (
self.tagged.__class__.__name__,
self.tagged,
self.name,
)
|
Tag
|
python
|
wandb__wandb
|
wandb/vendor/pygments/lexers/robotframework.py
|
{
"start": 2486,
"end": 3419
}
|
class ____(object):
def tokenize(self, string, token):
var = VariableSplitter(string, identifiers='$@%&')
if var.start < 0 or token in (COMMENT, ERROR):
yield string, token
return
for value, token in self._tokenize(var, string, token):
if value:
yield value, token
def _tokenize(self, var, string, orig_token):
before = string[:var.start]
yield before, orig_token
yield var.identifier + '{', SYNTAX
for value, token in self.tokenize(var.base, VARIABLE):
yield value, token
yield '}', SYNTAX
if var.index:
yield '[', SYNTAX
for value, token in self.tokenize(var.index, VARIABLE):
yield value, token
yield ']', SYNTAX
for value, token in self.tokenize(string[var.end:], orig_token):
yield value, token
|
VariableTokenizer
|
python
|
dask__distributed
|
distributed/worker_state_machine.py
|
{
"start": 23981,
"end": 25379
}
|
class ____(ExecuteDoneEvent):
run_id: int # FIXME: Utilize the run ID in all ExecuteDoneEvents
value: object
start: float
stop: float
nbytes: int
type: type | None
__slots__ = tuple(__annotations__)
def to_loggable(self, *, handled: float) -> StateMachineEvent:
out = copy(self)
out.handled = handled
out.value = None
return out
def _to_dict(self, *, exclude: Container[str] = ()) -> dict:
d = super()._to_dict(exclude=exclude)
# This is excluded by the parent class as it is a callable
if "type" not in exclude:
d["type"] = str(self.type)
return d
def _after_from_dict(self) -> None:
self.value = None
self.type = None
@staticmethod
def dummy(
key: Key,
value: object = None,
*,
run_id: int = 1,
nbytes: int = 1,
stimulus_id: str,
) -> ExecuteSuccessEvent:
"""Build a dummy event, with most attributes set to a reasonable default.
This is a convenience method to be used in unit testing only.
"""
return ExecuteSuccessEvent(
key=key,
run_id=run_id,
value=value,
start=0.0,
stop=1.0,
nbytes=nbytes,
type=None,
stimulus_id=stimulus_id,
)
@dataclass
|
ExecuteSuccessEvent
|
python
|
kamyu104__LeetCode-Solutions
|
Python/simplified-fractions.py
|
{
"start": 58,
"end": 420
}
|
class ____(object):
def simplifiedFractions(self, n):
"""
:type n: int
:rtype: List[str]
"""
lookup = set()
for b in xrange(1, n+1):
for a in xrange(1, b):
g = fractions.gcd(a, b)
lookup.add((a//g, b//g))
return map(lambda x: "{}/{}".format(*x), lookup)
|
Solution
|
python
|
scikit-learn__scikit-learn
|
sklearn/compose/tests/test_target.py
|
{
"start": 9658,
"end": 10002
}
|
class ____(TransformerMixin, BaseEstimator):
def fit(self, X, y=None):
assert isinstance(X, np.ndarray)
return self
def transform(self, X):
assert isinstance(X, np.ndarray)
return X
def inverse_transform(self, X):
assert isinstance(X, np.ndarray)
return X
|
DummyCheckerArrayTransformer
|
python
|
keras-team__keras
|
keras/src/callbacks/remote_monitor.py
|
{
"start": 260,
"end": 2727
}
|
class ____(Callback):
"""Callback used to stream events to a server.
Requires the `requests` library.
Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
HTTP POST, with a `data` argument which is a
JSON-encoded dictionary of event data.
If `send_as_json=True`, the content type of the request will be
`"application/json"`.
Otherwise the serialized JSON will be sent within a form.
Args:
root: String; root url of the target server.
path: String; path relative to `root` to which the events will be sent.
field: String; JSON field under which the data will be stored.
The field is used only if the payload is sent within a form
(i.e. when `send_as_json=False`).
headers: Dictionary; optional custom HTTP headers.
send_as_json: Boolean; whether the request should be
sent as `"application/json"`.
"""
def __init__(
self,
root="http://localhost:9000",
path="/publish/epoch/end/",
field="data",
headers=None,
send_as_json=False,
):
super().__init__()
self.root = root
self.path = path
self.field = field
self.headers = headers
self.send_as_json = send_as_json
def on_epoch_end(self, epoch, logs=None):
if requests is None:
raise ImportError("RemoteMonitor requires the `requests` library.")
logs = logs or {}
send = {}
send["epoch"] = epoch
for k, v in logs.items():
# np.ndarray and np.generic are not scalar types
# therefore we must unwrap their scalar values and
# pass to the json-serializable dict 'send'
if isinstance(v, (np.ndarray, np.generic)):
send[k] = v.item()
else:
send[k] = v
try:
if self.send_as_json:
requests.post(
self.root + self.path, json=send, headers=self.headers
)
else:
requests.post(
self.root + self.path,
{self.field: json.dumps(send)},
headers=self.headers,
)
except requests.exceptions.RequestException:
warnings.warn(
f"Could not reach RemoteMonitor root server at {self.root}",
stacklevel=2,
)
|
RemoteMonitor
|
python
|
doocs__leetcode
|
solution/3000-3099/3032.Count Numbers With Unique Digits II/Solution2.py
|
{
"start": 0,
"end": 149
}
|
class ____:
def numberCount(self, a: int, b: int) -> int:
return sum(len(set(str(num))) == len(str(num)) for num in range(a, b + 1))
|
Solution
|
python
|
altair-viz__altair
|
altair/vegalite/v6/schema/core.py
|
{
"start": 224168,
"end": 224494
}
|
class ____(ConditionalMarkPropFieldOrDatumDef):
"""ConditionalParameterMarkPropFieldOrDatumDef schema wrapper."""
_schema = {"$ref": "#/definitions/ConditionalParameter<MarkPropFieldOrDatumDef>"}
def __init__(self, *args, **kwds):
super().__init__(*args, **kwds)
|
ConditionalParameterMarkPropFieldOrDatumDef
|
python
|
getsentry__sentry
|
src/sentry/apidocs/examples/issue_alert_examples.py
|
{
"start": 51,
"end": 7378
}
|
class ____:
GET_PROJECT_RULE = [
OpenApiExample(
"Get detailed view about an issue alert rule",
value={
"id": "7",
"conditions": [
{
"id": "sentry.rules.conditions.regression_event.RegressionEventCondition",
}
],
"filters": [
{
"id": "sentry.rules.filters.age_comparison.AgeComparisonFilter",
"comparison_type": "older",
"value": 4,
"time": "week",
},
{
"id": "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter",
"value": 1000,
},
],
"actions": [
{
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": 976462356,
"channel": "#fatal",
"tags": "browser,release",
}
],
"actionMatch": "all",
"filterMatch": "all",
"frequency": 60,
"name": "Many Old Regressions!",
"dateCreated": "2023-02-17T18:31:14.246012Z",
"owner": "user:635623",
"createdBy": {"id": 635623, "name": "John Doe", "email": "john.doe@email.com"},
"environment": None,
"projects": ["javascript"],
"status": "active",
"snooze": False,
},
status_codes=["200"],
response_only=True,
)
]
UPDATE_PROJECT_RULE = [
OpenApiExample(
"Get detailed view about an issue alert rule",
value={
"id": "7",
"conditions": [
{
"id": "sentry.rules.conditions.regression_event.RegressionEventCondition",
}
],
"filters": [
{
"id": "sentry.rules.filters.age_comparison.AgeComparisonFilter",
"comparison_type": "older",
"value": 4,
"time": "week",
},
{
"id": "sentry.rules.filters.issue_occurrences.IssueOccurrencesFilter",
"value": 1000,
},
{"id": "sentry.rules.filters.level.LevelFilter", "match": "gte", "level": "40"},
],
"actions": [
{
"id": "sentry.integrations.slack.notify_action.SlackNotifyServiceAction",
"workspace": 976462356,
"channel": "#fatal",
"tags": "browser,release",
}
],
"actionMatch": "all",
"filterMatch": "all",
"frequency": 60,
"name": "Many Old Regressions!",
"dateCreated": "2023-02-17T18:31:14.246012Z",
"owner": "user:635623",
"createdBy": {"id": 635623, "name": "John Doe", "email": "john.doe@email.com"},
"environment": None,
"projects": ["javascript"],
"status": "active",
"snooze": False,
},
status_codes=["200"],
response_only=True,
)
]
LIST_PROJECT_RULES = [
OpenApiExample(
"List issue alert rules for a project",
value=[
{
"id": "3",
"conditions": [
{
"interval": "1h",
"id": "sentry.rules.conditions.event_frequency.EventFrequencyCondition",
"value": 1000,
}
],
"filters": [
{
"value": "1",
"id": "sentry.rules.filters.issue_category.IssueCategoryFilter",
},
{
"value": "2",
"id": "sentry.rules.filters.issue_category.IssueCategoryFilter",
},
],
"actions": [
{
"targetType": "Team",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": 4367234414355,
}
],
"actionMatch": "any",
"filterMatch": "any",
"frequency": 60,
"name": "High Number of Issues with Production",
"dateCreated": "2023-01-15T06:45:34.353346Z",
"owner": "team:63562",
"createdBy": {
"id": 2435786,
"name": "John Doe",
"email": "john.doe@example.com",
},
"environment": "prod",
"projects": ["melody"],
"status": "active",
"lastTriggered": "2023-07-15T00:00:00.351236Z",
"snooze": False,
},
],
status_codes=["200"],
response_only=True,
)
]
CREATE_ISSUE_ALERT_RULE = [
OpenApiExample(
"Issue alert successfully created",
value={
"id": "1",
"conditions": [
{
"id": "sentry.rules.conditions.first_seen_event.FirstSeenEventCondition",
}
],
"filters": [
{
"targetType": "Unassigned",
"id": "sentry.rules.filters.assigned_to.AssignedToFilter",
"targetIdentifier": "",
}
],
"actions": [
{
"targetType": "Member",
"fallthroughType": "ActiveMembers",
"id": "sentry.mail.actions.NotifyEmailAction",
"targetIdentifier": 1523125,
}
],
"actionMatch": "any",
"filterMatch": "all",
"frequency": 1440,
"name": "Owner Alert",
"dateCreated": "2023-09-08T20:00:07.244602Z",
"owner": "team:74234",
"createdBy": {"id": 24601, "name": "Jean Valjean", "email": "jean@example.com"},
"environment": None,
"projects": ["python"],
"status": "active",
"snooze": False,
},
status_codes=["201"],
response_only=True,
)
]
|
IssueAlertExamples
|
python
|
walkccc__LeetCode
|
solutions/1952. Three Divisors/1952.py
|
{
"start": 0,
"end": 320
}
|
class ____:
def isThree(self, n: int) -> bool:
if n == 1:
return False
# The numbers with exactly three divisors are perfect squares of a prime
# number.
root = math.isqrt(n)
return (root**2 == n and
all(root % i != 0
for i in range(2, math.isqrt(root) + 1)))
|
Solution
|
python
|
tensorflow__tensorflow
|
tensorflow/python/distribute/v1/input_lib.py
|
{
"start": 9029,
"end": 11077
}
|
class ____(DistributedIteratorV1):
"""Iterator created from input dataset."""
def __init__(self,
dataset,
input_workers,
strategy,
num_replicas_in_sync=None,
input_context=None):
"""Make an iterator for the dataset on given devices.
If `num_replicas_in_sync` is not None, we split each batch of the dataset
into `num_replicas_in_sync` smaller batches, to be distributed among that
worker's replicas, so that the batch size for a global step (across all
workers and replicas) is as expected.
Args:
dataset: `tf.data.Dataset` that will be used as the input source.
input_workers: an `InputWorkers` object.
strategy: a `tf.distribute.Strategy` object, used to run all-reduce to
handle last partial batch.
num_replicas_in_sync: Optional integer. If this is not None, the value is
used to decide how to rebatch datasets into smaller batches so that the
total batch size for each step (across all workers and replicas) adds up
to `dataset`'s batch size.
input_context: `InputContext` for sharding. Only pass this in for between
graph multi-worker cases where there is only one `input_worker`. In
these cases, we will shard based on the `input_pipeline_id` and
`num_input_pipelines` in the `InputContext`.
"""
dist_dataset = DistributedDatasetV1(
dataset,
input_workers,
strategy,
num_replicas_in_sync=num_replicas_in_sync,
input_context=input_context)
# pylint: disable=protected-access
worker_iterators = _create_iterators_per_worker(
dist_dataset._cloned_datasets, input_workers)
super(DatasetIterator,
self).__init__(input_workers, worker_iterators, strategy,
dist_dataset.cardinality,
dist_dataset._enable_get_next_as_optional)
self._element_spec = dist_dataset.element_spec
# pylint: enable=protected-access
|
DatasetIterator
|
python
|
pydantic__pydantic
|
tests/benchmarks/shared.py
|
{
"start": 1617,
"end": 1695
}
|
class ____(IntEnum):
spanner = 1
wrench = 2
screwdriver = 3
|
ToolEnum
|
python
|
walkccc__LeetCode
|
solutions/3016. Minimum Number of Pushes to Type Word II/3016.py
|
{
"start": 0,
"end": 256
}
|
class ____:
# Same as 3014. Minimum Number of Pushes to Type Word I
def minimumPushes(self, word: str) -> int:
freqs = sorted(collections.Counter(word).values(), reverse=True)
return sum(freq * (i // 8 + 1) for i, freq in enumerate(freqs))
|
Solution
|
python
|
spyder-ide__spyder
|
spyder/plugins/tours/tours.py
|
{
"start": 9161,
"end": 9254
}
|
class ____:
IntroductionTour = "introduction_tour"
TestTour = "test_tour"
|
TourIdentifiers
|
python
|
sqlalchemy__sqlalchemy
|
lib/sqlalchemy/dialects/oracle/oracledb.py
|
{
"start": 23108,
"end": 23209
}
|
class ____(
_cx_oracle.OracleExecutionContext_cx_oracle
):
pass
|
OracleExecutionContext_oracledb
|
python
|
doocs__leetcode
|
solution/3500-3599/3574.Maximize Subarray GCD Score/Solution.py
|
{
"start": 0,
"end": 649
}
|
class ____:
def maxGCDScore(self, nums: List[int], k: int) -> int:
n = len(nums)
cnt = [0] * n
for i, x in enumerate(nums):
while x % 2 == 0:
cnt[i] += 1
x //= 2
ans = 0
for l in range(n):
g = 0
mi = inf
t = 0
for r in range(l, n):
g = gcd(g, nums[r])
if cnt[r] < mi:
mi = cnt[r]
t = 1
elif cnt[r] == mi:
t += 1
ans = max(ans, (g if t > k else g * 2) * (r - l + 1))
return ans
|
Solution
|
python
|
kamyu104__LeetCode-Solutions
|
Python/path-with-maximum-probability.py
|
{
"start": 240,
"end": 1287
}
|
class ____(object):
def maxProbability(self, n, edges, succProb, start, end):
"""
:type n: int
:type edges: List[List[int]]
:type succProb: List[float]
:type start: int
:type end: int
:rtype: float
"""
adj = collections.defaultdict(list)
for (u, v), p in itertools.izip(edges, succProb):
adj[u].append((v, p))
adj[v].append((u, p))
max_heap = [(-1.0, start)]
result, lookup = collections.defaultdict(float), set()
result[start] = 1.0
while max_heap and len(lookup) != len(adj):
curr, u = heapq.heappop(max_heap)
if u in lookup:
continue
lookup.add(u)
for v, w in adj[u]:
if v in lookup:
continue
if v in result and result[v] >= -curr*w:
continue
result[v] = -curr*w
heapq.heappush(max_heap, (-result[v], v))
return result[end]
|
Solution
|
python
|
pyqtgraph__pyqtgraph
|
pyqtgraph/parametertree/parameterTypes/action.py
|
{
"start": 1768,
"end": 2700
}
|
class ____(ParameterItem):
"""ParameterItem displaying a clickable button."""
def __init__(self, param, depth):
ParameterItem.__init__(self, param, depth)
self.layoutWidget = QtWidgets.QWidget()
self.layout = QtWidgets.QHBoxLayout()
self.layout.setContentsMargins(0, 0, 0, 0)
self.layoutWidget.setLayout(self.layout)
self.button = ParameterControlledButton(param, self.layoutWidget)
#self.layout.addSpacing(100)
self.layout.addWidget(self.button)
self.layout.addStretch()
self.titleChanged()
def treeWidgetChanged(self):
ParameterItem.treeWidgetChanged(self)
tree = self.treeWidget()
if tree is None:
return
self.setFirstColumnSpanned(True)
tree.setItemWidget(self, 0, self.layoutWidget)
def titleChanged(self):
self.setSizeHint(0, self.button.sizeHint())
|
ActionParameterItem
|
python
|
facebookresearch__faiss
|
benchs/distributed_ondisk/search_server.py
|
{
"start": 489,
"end": 1820
}
|
class ____(rpc.Server):
""" Assign version that can be exposed via RPC """
def __init__(self, s, index):
rpc.Server.__init__(self, s)
self.index = index
def __getattr__(self, f):
return getattr(self.index, f)
def main():
parser = argparse.ArgumentParser()
def aa(*args, **kwargs):
group.add_argument(*args, **kwargs)
group = parser.add_argument_group('server options')
aa('--port', default=12012, type=int, help='server port')
aa('--when_ready_dir', default=None,
help='store host:port to this file when ready')
aa('--ipv4', default=False, action='store_true', help='force ipv4')
aa('--rank', default=0, type=int,
help='rank used as index in the client table')
args = parser.parse_args()
when_ready = None
if args.when_ready_dir:
when_ready = '%s/%d' % (args.when_ready_dir, args.rank)
print('loading index')
index = combined_index.CombinedIndexDeep1B()
print('starting server')
rpc.run_server(
lambda s: MyServer(s, index),
args.port, report_to_file=when_ready,
v6=not args.ipv4)
if __name__ == '__main__':
main()
############################################################
# Client implementation
############################################################
|
MyServer
|
python
|
pennersr__django-allauth
|
allauth/socialaccount/providers/draugiem/provider.py
|
{
"start": 759,
"end": 1511
}
|
class ____(Provider):
id = "draugiem"
name = "Draugiem"
account_class = DraugiemAccount
def get_login_url(self, request, **kwargs):
url = reverse(self.id + "_login")
if kwargs:
url = url + "?" + urlencode(kwargs)
return url
def extract_uid(self, data):
return str(data["uid"])
def extract_common_fields(self, data):
uid = self.extract_uid(data)
user_data = data["users"][uid]
return dict(
first_name=user_data.get("name"),
last_name=user_data.get("surname"),
)
def extract_extra_data(self, data):
uid = self.extract_uid(data)
return data["users"][uid]
provider_classes = [DraugiemProvider]
|
DraugiemProvider
|
python
|
scrapy__scrapy
|
tests/test_exporters.py
|
{
"start": 21218,
"end": 21943
}
|
class ____(TestBaseItemExporter):
def _get_exporter(self, **kwargs):
kwargs["encoding"] = "latin"
return JsonItemExporter(self.output, **kwargs)
def test_two_items_with_failure_between(self):
i1 = MyItem(name="Joseph", age="22")
i2 = MyItem(name="\u263a", age="11")
i3 = MyItem(name="Jesus", age="44")
self.ie.start_exporting()
self.ie.export_item(i1)
with pytest.raises(UnicodeEncodeError):
self.ie.export_item(i2)
self.ie.export_item(i3)
self.ie.finish_exporting()
exported = json.loads(to_unicode(self.output.getvalue(), encoding="latin"))
assert exported == [dict(i1), dict(i3)]
|
TestJsonItemExporterToBytes
|
python
|
hynek__structlog
|
src/structlog/processors.py
|
{
"start": 16008,
"end": 17567
}
|
class ____:
"""
A timestamper that only adds a timestamp if there is none.
This allows you to overwrite the ``timestamp`` key in the event dict for
example when the event is coming from another system.
It takes the same arguments as `TimeStamper`.
.. versionadded:: 23.2.0
"""
__slots__ = ("stamper",)
def __init__(
self,
fmt: str | None = None,
utc: bool = True,
key: str = "timestamp",
):
self.stamper = TimeStamper(fmt=fmt, utc=utc, key=key)
def __call__(
self, logger: WrappedLogger, name: str, event_dict: EventDict
) -> EventDict:
if self.stamper.key not in event_dict:
return self.stamper(logger, name, event_dict)
return event_dict
def _figure_out_exc_info(v: Any) -> ExcInfo | None:
"""
Try to convert *v* into an ``exc_info`` tuple.
Return ``None`` if *v* does not represent an exception or if there is no
current exception.
"""
if isinstance(v, BaseException):
return (v.__class__, v, v.__traceback__)
if isinstance(v, tuple) and len(v) == 3:
has_type = isinstance(v[0], type) and issubclass(v[0], BaseException)
has_exc = isinstance(v[1], BaseException)
has_tb = v[2] is None or isinstance(v[2], TracebackType)
if has_type and has_exc and has_tb:
return v
if v:
result = sys.exc_info()
if result == (None, None, None):
return None
return cast(ExcInfo, result)
return None
|
MaybeTimeStamper
|
python
|
django__django
|
tests/fixtures/models.py
|
{
"start": 2242,
"end": 2369
}
|
class ____(PersonManager):
def get_queryset(self):
return super().get_queryset().filter(cover_blown=False)
|
SpyManager
|
python
|
numba__numba
|
numba/core/types/containers.py
|
{
"start": 2053,
"end": 2522
}
|
class ____(Type):
"""
Convenience base class for some container payloads.
Derived classes must implement the *container_class* attribute.
"""
def __init__(self, container):
assert isinstance(container, self.container_class)
self.container = container
name = "payload(%s)" % container
super(BaseContainerPayload, self).__init__(name)
@property
def key(self):
return self.container
|
BaseContainerPayload
|
python
|
sympy__sympy
|
sympy/diffgeom/diffgeom.py
|
{
"start": 29826,
"end": 33855
}
|
class ____(Expr):
r"""Base vector field over a manifold for a given coordinate system.
Explanation
===========
A vector field is an operator taking a scalar field and returning a
directional derivative (which is also a scalar field).
A base vector field is the same type of operator, however the derivation is
specifically done with respect to a chosen coordinate.
To define a base vector field you need to choose the coordinate system and
the index of the coordinate.
The use of the vector field after its definition is independent of the
coordinate system in which it was defined, however due to limitations in the
simplification routines you may arrive at more complicated expression if you
use unappropriate coordinate systems.
Parameters
==========
coord_sys : CoordSystem
index : integer
Examples
========
>>> from sympy import Function
>>> from sympy.diffgeom.rn import R2_p, R2_r
>>> from sympy.diffgeom import BaseVectorField
>>> from sympy import pprint
>>> x, y = R2_r.symbols
>>> rho, theta = R2_p.symbols
>>> fx, fy = R2_r.base_scalars()
>>> point_p = R2_p.point([rho, theta])
>>> point_r = R2_r.point([x, y])
>>> g = Function('g')
>>> s_field = g(fx, fy)
>>> v = BaseVectorField(R2_r, 1)
>>> pprint(v(s_field))
/ d \|
|---(g(x, xi))||
\dxi /|xi=y
>>> pprint(v(s_field).rcall(point_r).doit())
d
--(g(x, y))
dy
>>> pprint(v(s_field).rcall(point_p))
/ d \|
|---(g(rho*cos(theta), xi))||
\dxi /|xi=rho*sin(theta)
"""
is_commutative = False
def __new__(cls, coord_sys, index, **kwargs):
index = _sympify(index)
obj = super().__new__(cls, coord_sys, index)
obj._coord_sys = coord_sys
obj._index = index
return obj
@property
def coord_sys(self):
return self.args[0]
@property
def index(self):
return self.args[1]
@property
def patch(self):
return self.coord_sys.patch
@property
def manifold(self):
return self.coord_sys.manifold
@property
def dim(self):
return self.manifold.dim
def __call__(self, scalar_field):
"""Apply on a scalar field.
The action of a vector field on a scalar field is a directional
differentiation.
If the argument is not a scalar field an error is raised.
"""
if covariant_order(scalar_field) or contravariant_order(scalar_field):
raise ValueError('Only scalar fields can be supplied as arguments to vector fields.')
if scalar_field is None:
return self
base_scalars = list(scalar_field.atoms(BaseScalarField))
# First step: e_x(x+r**2) -> e_x(x) + 2*r*e_x(r)
d_var = self._coord_sys._dummy
# TODO: you need a real dummy function for the next line
d_funcs = [Function('_#_%s' % i)(d_var) for i,
b in enumerate(base_scalars)]
d_result = scalar_field.subs(list(zip(base_scalars, d_funcs)))
d_result = d_result.diff(d_var)
# Second step: e_x(x) -> 1 and e_x(r) -> cos(atan2(x, y))
coords = self._coord_sys.symbols
d_funcs_deriv = [f.diff(d_var) for f in d_funcs]
d_funcs_deriv_sub = []
for b in base_scalars:
jac = self._coord_sys.jacobian(b._coord_sys, coords)
d_funcs_deriv_sub.append(jac[b._index, self._index])
d_result = d_result.subs(list(zip(d_funcs_deriv, d_funcs_deriv_sub)))
# Remove the dummies
result = d_result.subs(list(zip(d_funcs, base_scalars)))
result = result.subs(list(zip(coords, self._coord_sys.coord_functions())))
return result.doit()
def _find_coords(expr):
# Finds CoordinateSystems existing in expr
fields = expr.atoms(BaseScalarField, BaseVectorField)
return {f._coord_sys for f in fields}
|
BaseVectorField
|
python
|
pypa__warehouse
|
tests/common/db/accounts.py
|
{
"start": 3858,
"end": 4027
}
|
class ____(WarehouseFactory):
class Meta:
model = UserUniqueLogin
user = factory.SubFactory(UserFactory)
ip_address = REMOTE_ADDR
|
UserUniqueLoginFactory
|
python
|
prompt-toolkit__python-prompt-toolkit
|
src/prompt_toolkit/contrib/telnet/server.py
|
{
"start": 7848,
"end": 13461
}
|
class ____:
"""
Telnet server implementation.
Example::
async def interact(connection):
connection.send("Welcome")
session = PromptSession()
result = await session.prompt_async(message="Say something: ")
connection.send(f"You said: {result}\n")
async def main():
server = TelnetServer(interact=interact, port=2323)
await server.run()
"""
def __init__(
self,
host: str = "127.0.0.1",
port: int = 23,
interact: Callable[
[TelnetConnection], Coroutine[Any, Any, None]
] = _dummy_interact,
encoding: str = "utf-8",
style: BaseStyle | None = None,
enable_cpr: bool = True,
) -> None:
self.host = host
self.port = port
self.interact = interact
self.encoding = encoding
self.style = style
self.enable_cpr = enable_cpr
self._run_task: asyncio.Task[None] | None = None
self._application_tasks: list[asyncio.Task[None]] = []
self.connections: set[TelnetConnection] = set()
@classmethod
def _create_socket(cls, host: str, port: int) -> socket.socket:
# Create and bind socket
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
s.bind((host, port))
s.listen(4)
return s
async def run(self, ready_cb: Callable[[], None] | None = None) -> None:
"""
Run the telnet server, until this gets cancelled.
:param ready_cb: Callback that will be called at the point that we're
actually listening.
"""
socket = self._create_socket(self.host, self.port)
logger.info(
"Listening for telnet connections on %s port %r", self.host, self.port
)
get_running_loop().add_reader(socket, lambda: self._accept(socket))
if ready_cb:
ready_cb()
try:
# Run forever, until cancelled.
await asyncio.Future()
finally:
get_running_loop().remove_reader(socket)
socket.close()
# Wait for all applications to finish.
for t in self._application_tasks:
t.cancel()
# (This is similar to
# `Application.cancel_and_wait_for_background_tasks`. We wait for the
# background tasks to complete, but don't propagate exceptions, because
# we can't use `ExceptionGroup` yet.)
if len(self._application_tasks) > 0:
await asyncio.wait(
self._application_tasks,
timeout=None,
return_when=asyncio.ALL_COMPLETED,
)
def start(self) -> None:
"""
Deprecated: Use `.run()` instead.
Start the telnet server (stop by calling and awaiting `stop()`).
"""
if self._run_task is not None:
# Already running.
return
self._run_task = get_running_loop().create_task(self.run())
async def stop(self) -> None:
"""
Deprecated: Use `.run()` instead.
Stop a telnet server that was started using `.start()` and wait for the
cancellation to complete.
"""
if self._run_task is not None:
self._run_task.cancel()
try:
await self._run_task
except asyncio.CancelledError:
pass
def _accept(self, listen_socket: socket.socket) -> None:
"""
Accept new incoming connection.
"""
conn, addr = listen_socket.accept()
logger.info("New connection %r %r", *addr)
# Run application for this connection.
async def run() -> None:
try:
with create_pipe_input() as vt100_input:
connection = TelnetConnection(
conn,
addr,
self.interact,
self,
encoding=self.encoding,
style=self.style,
vt100_input=vt100_input,
enable_cpr=self.enable_cpr,
)
self.connections.add(connection)
logger.info("Starting interaction %r %r", *addr)
try:
await connection.run_application()
finally:
self.connections.remove(connection)
logger.info("Stopping interaction %r %r", *addr)
except EOFError:
# Happens either when the connection is closed by the client
# (e.g., when the user types 'control-]', then 'quit' in the
# telnet client) or when the user types control-d in a prompt
# and this is not handled by the interact function.
logger.info("Unhandled EOFError in telnet application.")
except KeyboardInterrupt:
# Unhandled control-c propagated by a prompt.
logger.info("Unhandled KeyboardInterrupt in telnet application.")
except BaseException as e:
print(f"Got {type(e).__name__}", e)
import traceback
traceback.print_exc()
finally:
self._application_tasks.remove(task)
task = get_running_loop().create_task(run())
self._application_tasks.append(task)
|
TelnetServer
|
python
|
ray-project__ray
|
python/ray/data/exceptions.py
|
{
"start": 873,
"end": 3926
}
|
class ____(Exception):
"""Represents an Exception originating from Ray Data internal code
or Ray Core private code paths, as opposed to user code. When
Exceptions of this form are raised, it likely indicates a bug
in Ray Data or Ray Core."""
pass
@DeveloperAPI
def omit_traceback_stdout(fn: Callable) -> Callable:
"""Decorator which runs the function, and if there is an exception raised,
drops the stack trace before re-raising the exception. The original exception,
including the full unmodified stack trace, is always written to the Ray Data
log file at `data_exception_logger._log_path`.
This is useful for stripping long stack traces of internal Ray Data code,
which can otherwise obfuscate user code errors."""
def handle_trace(*args, **kwargs):
try:
return fn(*args, **kwargs)
except Exception as e:
# Only log the full internal stack trace to stdout when configured
# via DataContext, or when the Ray Debugger is enabled.
# The full stack trace will always be emitted to the Ray Data log file.
log_to_stdout = DataContext.get_current().log_internal_stack_trace_to_stdout
if _is_ray_debugger_post_mortem_enabled():
logger.exception("Full stack trace:")
raise e
is_user_code_exception = isinstance(e, UserCodeException)
if is_user_code_exception:
# Exception has occurred in user code.
if not log_to_stdout and log_once("ray_data_exception_internal_hidden"):
logger.error(
"Exception occurred in user code, with the abbreviated stack "
"trace below. By default, the Ray Data internal stack trace "
"is omitted from stdout, and only written to the Ray Data log "
f"files at `{get_log_directory()}`. To "
"output the full stack trace to stdout, set "
"`DataContext.log_internal_stack_trace_to_stdout` to True."
)
else:
# Exception has occurred in internal Ray Data / Ray Core code.
logger.error(
"Exception occurred in Ray Data or Ray Core internal code. "
"If you continue to see this error, please open an issue on "
"the Ray project GitHub page with the full stack trace below: "
"https://github.com/ray-project/ray/issues/new/choose"
)
should_hide_traceback = is_user_code_exception and not log_to_stdout
logger.exception(
"Full stack trace:",
exc_info=True,
extra={"hide": should_hide_traceback},
)
if is_user_code_exception:
raise e.with_traceback(None)
else:
raise e.with_traceback(None) from SystemException()
return handle_trace
|
SystemException
|
python
|
pytorch__pytorch
|
torch/fx/passes/infra/partitioner.py
|
{
"start": 2031,
"end": 17472
}
|
class ____:
def __init__(
self,
graph_module: GraphModule,
operator_support: OperatorSupportBase,
allows_single_node_partition: bool = False,
non_compute_ops: Optional[Sequence[str]] = None,
allowed_single_node_partition_ops: Optional[Sequence[str]] = None,
) -> None:
self.graph_module = graph_module
self.operator_support = operator_support
self.allows_single_node_partition = allows_single_node_partition
self.non_compute_ops = non_compute_ops if non_compute_ops is not None else []
self.allowed_single_node_partition_ops = (
allowed_single_node_partition_ops
if allowed_single_node_partition_ops is not None
else []
)
self.dependency_viewer = _DependencyViewer(graph_module)
def _is_node_supported(self, node: Node) -> bool:
return self.operator_support.is_node_supported(
dict(self.graph_module.named_modules()), node
)
def propose_partitions(self) -> list[Partition]:
# partition_map is a mapping from partition id to a set of partition id's.
# The value set contains all the partition ids that can be reached by doing a
# DFS starting from the partition id in the key.
partition_map: dict[int, set] = collections.defaultdict(set)
# assumptions: nodes in candidate list is sorted in topological order
assignment: dict[Node, int] = {} # mapping from node to partition_id
partitions_by_id: dict[
int, Partition
] = {} # mapping from partition_id to partition
nodes_order: dict[
Node, int
] = {} # mapping from nodes to reversed topological order
partitions_order: dict[
int, int
] = {} # mapping from partition_id to minimum topo order of nodes in partition
partition_users: dict[
int, set
] = {} # mapping from partition_id to partition users
new_partition_id = itertools.count()
# try to merge partition other_id into partition self_id
# merge only happens if the end graph doesn't contain cyclic dependency
# returns `True` when merge happens, `False` otherwise.
def maybe_merge_partition(self_id: int, other_id: int):
# merged_nodes is the union of nodes in two partition to-be-merged
self_nodes = partitions_by_id[self_id].nodes
other_nodes = partitions_by_id[other_id].nodes
def dfs_iter_find_cycle(all_user_nodes: set[Node]):
for user_node in all_user_nodes:
visited_partition_ids = set()
for path_node in self.dependency_viewer.downstreams_of(user_node):
# If any of the nodes in the dfs path of this node are in the merged_nodes
# list then there is a cycle in the graph.
if path_node in self_nodes or path_node in other_nodes:
return True
# If any of the nodes in the dfs path of this node are in the assignment
# map then we have to make sure that the partitions that these nodes belong
# to do not form a cycle with the current partitions being merged. This means
# iterating through all the nodes in all the parititons that are traversed in
# the dfs path and checking if they are in the merged_nodes list.
if path_node in assignment:
partition_id = assignment[path_node]
# If the partition id has already been visited then we know that it doesn't
# form a cycle with the current partitions being merged.
if partition_id in visited_partition_ids:
continue
p_map = partition_map[partition_id]
if self_id in p_map or other_id in p_map:
return True
visited_partition_ids.add(partition_id)
return False
# find new partition users if merge.
all_user_nodes = partition_users[self_id] | partition_users[other_id]
all_user_nodes.difference_update(other_nodes, self_nodes)
# check if merge would create cyclic dependency.
if dfs_iter_find_cycle(all_user_nodes):
# return false indicating cyclic dependency found and
# merge is aborted
return self_id, False
# merge the smaller partition into the larger.
merge_id, removed_id = self_id, other_id
if len(self_nodes) < len(other_nodes):
merge_id, removed_id = removed_id, merge_id
# no cyclic dependency found, move forward with the merge
# updating partition nodes
partitions_by_id[merge_id].nodes.update(partitions_by_id[removed_id].nodes)
# updating assignment map
for node in partitions_by_id[removed_id].nodes:
assignment[node] = merge_id
# delete other partition
del partitions_by_id[removed_id]
partitions_order[merge_id] = min(
partitions_order[merge_id], partitions_order[removed_id]
)
del partitions_order[removed_id]
partition_map[merge_id] = partition_map[merge_id].union(
partition_map[removed_id]
)
del partition_map[removed_id]
partition_users[merge_id] = all_user_nodes
del partition_users[removed_id]
return merge_id, True
def merge_single_node(node: Node, node_order: Optional[int], id: Optional[int]):
def _update_partition_map(node: Node, id: int):
# Iterate through all the users of this node and update the partition map to indicate
# that there is a path from the partition id of this node to the target partition id.
for user_node in node.users:
target_id = assignment.get(user_node)
if target_id is not None:
partition_map[id].add(target_id)
partition_map[id].update(partition_map[target_id])
if node in assignment:
partitions_by_id[assignment[node]].remove_node(node)
if id is None:
assignment.pop(node)
elif id not in partitions_by_id:
assignment[node] = id
assert node_order is not None
partitions_by_id[id] = Partition(
id=id, nodes=[node], node_orders=[node_order]
)
partition_users[id] = set(node.users)
_update_partition_map(node, id)
else:
assignment[node] = id
partitions_by_id[id].add_node(node, node_order)
logger.debug("Proposing partitions...")
for node_order, node in enumerate(reversed(self.graph_module.graph.nodes)):
# use Dict as an ordered set to ensure deterministic partitioning result, don't care value
merge_candidates: dict[int, None] = {}
# Note a limited horizontal fusion is enabled:
# when `node` is not supported, the code below attempts to fuse consumer of `node`.
#
# I don't see a need to add a knob to disable horizontal fusion yet, we can short-cut
# the fusion by adding an `else` block here to skip horizontal fusion.
if self._is_node_supported(node) and node not in assignment:
partition_id = next(new_partition_id)
nodes_order[node] = partition_id
partitions_order[partition_id] = partition_id
merge_single_node(node, node_order, partition_id)
merge_candidates[partition_id] = None
# merge all possible partitions
for partition_id, _ in sorted(
partitions_order.items(), key=operator.itemgetter(1)
):
merge_candidates[partition_id] = None
merge_candidates_list = list(merge_candidates.keys())
if len(merge_candidates_list) > 1:
self_id = merge_candidates_list[0]
for other_id in merge_candidates_list[1:]:
# note: merge partitions if it doesn't create cyclic dependency
# in the graph, otherwise, this is a no-op
self_id, _ = maybe_merge_partition(self_id, other_id)
# sort partition nodes based on descending node order
for partition in partitions_by_id.values():
partition.nodes = dict(
sorted(
partition.nodes.items(), key=operator.itemgetter(1), reverse=True
)
)
# post processing to re-assign "getitem" nodes into upstream partition
logger.debug("Reassigning getitem nodes to its producer node's partition...")
nodes_reassignment: dict[Node, int] = {}
for node in self.graph_module.graph.nodes:
is_tuple_output = True
for user in node.users:
if (
user.op != "call_function"
or _get_qualified_name(user.target) != "_operator.getitem"
): # type: ignore[arg-type]
is_tuple_output = False
break
# node has tuple outputs, re-assign all following getitem node into node's partition
if is_tuple_output:
id = assignment.get(node) # type: ignore[arg-type]
for user in node.users:
if assignment.get(user) != id: # type: ignore[arg-type]
nodes_reassignment[user] = id # type: ignore[assignment]
for node, id in nodes_reassignment.items():
merge_single_node(node, None, id)
# filter out single node partitions
if not self.allows_single_node_partition:
logger.debug("Filtering out single node partitions...")
default_non_compute_ops = {"torch.ops.aten.view", "_operator.getitem"}
non_compute_ops = default_non_compute_ops.union(set(self.non_compute_ops))
partitions_to_remove: list[int] = []
for id, partition in partitions_by_id.items():
compute_node_count = 0
for node in partition.nodes:
if node.op == "call_function":
assert callable(node.target)
if _get_qualified_name(node.target) not in non_compute_ops:
compute_node_count += 1
if (
_get_qualified_name(node.target)
in self.allowed_single_node_partition_ops
):
compute_node_count += 1
if compute_node_count <= 1:
partitions_to_remove.append(id)
for id in partitions_to_remove:
del partitions_by_id[id]
logger.debug("Partitions proposed:")
for id, partition in partitions_by_id.items():
logger.debug(
"partition #%s: %s", id, [node.name for node in partition.nodes]
)
return [
partition for partition in partitions_by_id.values() if partition.size() > 0
]
def fuse_partitions(
self, partitions: list[Partition], prefix: str = "fused_"
) -> GraphModule:
logger.debug("Fusing partitions...")
# fuse_by_partitions expects partitions in List[Dict[Node, None]]: [ {node0 : None}, {node1 : None} ]
return fuse_by_partitions(
self.graph_module,
[partition.nodes for partition in partitions],
prefix=prefix,
)
# remove non-compute-ops that sits at the boundary of a partition.
def remove_bookend_non_compute_ops(self, partitions: list[Partition]):
non_compute_ops = set(self.non_compute_ops)
def is_non_compute_node(node: Node):
return (
node.op == "call_function"
and _get_qualified_name(node.target) in non_compute_ops # type: ignore[arg-type]
)
# cache transparent nodes
transparent_input_nodes: dict[Node, bool] = {}
transparent_output_nodes: dict[Node, bool] = {}
def is_transparent_input_node(
node: Node, partition: set[Node], removed_nodes: set[Node]
):
if (
node.op == "placeholder"
or (node not in partition)
or (node in removed_nodes)
):
return True
if node in transparent_input_nodes:
return transparent_input_nodes[node]
if is_non_compute_node(node):
for input_n in node.all_input_nodes:
if not is_transparent_input_node(input_n, partition, removed_nodes):
transparent_input_nodes[node] = False
return False
transparent_input_nodes[node] = True
return True
transparent_input_nodes[node] = False
return False
def is_transparent_output_node(
node: Node, partition: set[Node], removed_nodes: set[Node]
):
if (
node.op == "placeholder"
or (node not in partition)
or (node in removed_nodes)
):
return True
if node in transparent_output_nodes:
return transparent_output_nodes[node]
if is_non_compute_node(node):
for output_n in node.users:
if not is_transparent_output_node(
output_n, partition, removed_nodes
):
transparent_output_nodes[node] = False
return False
transparent_output_nodes[node] = True
return True
transparent_output_nodes[node] = False
return False
for partition in partitions:
# Note it's ok to use `set` here, since we are only query if a node
# has been removed. We are NEVER going to iterate on nodes inside
# the set.
remove_node: set[Node] = set()
for node in partition.nodes:
if is_non_compute_node(node) and (
is_transparent_input_node(node, set(partition.nodes), remove_node)
or is_transparent_output_node(
node, set(partition.nodes), remove_node
)
):
remove_node.add(node)
if len(remove_node) != 0:
for node in remove_node:
partition.nodes.pop(node, None)
def partition_and_fuse(self, prefix: str = "fused_") -> GraphModule:
partitions = self.propose_partitions()
fused_gm = self.fuse_partitions(partitions, prefix=prefix)
return fused_gm
|
CapabilityBasedPartitioner
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 655377,
"end": 655807
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("actor", "client_mutation_id", "unlocked_record")
actor = sgqlc.types.Field(Actor, graphql_name="actor")
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
unlocked_record = sgqlc.types.Field(Lockable, graphql_name="unlockedRecord")
|
UnlockLockablePayload
|
python
|
pytorch__pytorch
|
torch/testing/_internal/distributed/nn/api/remote_module_test.py
|
{
"start": 3949,
"end": 18550
}
|
class ____(CommonRemoteModuleTest):
@dist_utils.dist_init
def test_bad_module(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
remote_device = f"{dst_worker_name}/cpu"
args = (1,)
kwargs = dict(first_kwarg=2)
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
with self.assertRaisesRegex(
ValueError,
r"Expect `module_cls\(\*args, \*\*kwargs\)` returns an instance of <class nn.Module>,",
):
RemoteModule(remote_device, BadModule, args, kwargs).forward()
@dist_utils.dist_init
def test_forward_async(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret_fut = remote_module.forward_async(*args)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_async_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward_async(scripted_remote_module: RemoteMyModuleInterface):
ret_fut = scripted_remote_module.forward_async(torch.ones(1), 2, "3")
ret = ret_fut.wait()
return ret
ret = run_forward_async(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_sync(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2, "3")
for remote_module in self._create_remote_module_iter(dst_worker_name):
ret = remote_module.forward(*args)
self.assertEqual(ret, tuple(reversed(args)))
@dist_utils.dist_init
def test_forward_sync_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
scripted_remote_module = next(
self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
)
)
@torch.jit.script
def run_forward(scripted_remote_module: MyModuleInterface):
ret = scripted_remote_module.forward(torch.ones(1), 2, "3")
return ret
ret = run_forward(scripted_remote_module)
self.assertEqual(ret, ("3", 2, torch.ones(1)))
@dist_utils.dist_init
def test_forward_with_kwargs(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
args = (torch.ones(1), 2)
kwargs = dict(word="3")
# Only test Python nn.Module, because script module methods don't support taking kwargs.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
ret_fut = remote_module.forward_async(*args, **kwargs)
ret = ret_fut.wait()
self.assertEqual(ret, tuple(reversed(args + ("3",))))
ret = remote_module.forward(*args, **kwargs)
self.assertEqual(ret, tuple(reversed(args + ("3",))))
@dist_utils.dist_init
def test_remote_parameters(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``remote_parameters``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
param_rrefs = remote_module.remote_parameters()
self.assertEqual(len(param_rrefs), 1)
self.assertTrue(torch.equal(param_rrefs[0].to_here(), _PARAM_VAL))
@dist_utils.dist_init
def test_get_module_rref(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# Only test Python nn.Module, because script module methods don't support ``get_module_rref``.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
rref = remote_module.get_module_rref()
self.assertEqual(rref, remote_module.module_rref)
for param in rref.to_here().parameters():
self.assertTrue(torch.equal(param, _PARAM_VAL))
@dist_utils.dist_init
def test_train_eval(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
remote_module.train()
ret1 = rpc.rpc_sync(
dst_worker_name,
get_remote_training_arg,
args=(remote_module.get_module_rref(),),
)
self.assertEqual(ret1, True)
remote_module.eval()
ret2 = rpc.rpc_sync(
dst_worker_name,
get_remote_training_arg,
args=(remote_module.get_module_rref(),),
)
self.assertEqual(ret2, False)
@dist_utils.dist_init
def test_unsupported_methods(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with self.assertRaisesRegex(
ValueError, r"Method ``register_buffer`` not supported for RemoteModule"
):
remote_module.register_buffer("buffer", torch.ones(5))
with self.assertRaisesRegex(
ValueError,
r"Method ``register_parameter`` not supported for RemoteModule",
):
remote_module.register_parameter(
"param", torch.nn.Parameter(torch.ones(1))
)
with self.assertRaisesRegex(
ValueError, r"Method ``add_module`` not supported for RemoteModule"
):
remote_module.add_module("empty", None)
with self.assertRaisesRegex(
ValueError, r"Method ``apply`` not supported for RemoteModule"
):
fn = torch.rand((3, 3), requires_grad=False)
remote_module.apply(fn)
with self.assertRaisesRegex(
ValueError, r"Method ``cuda`` not supported for RemoteModule"
):
remote_module.cuda()
with self.assertRaisesRegex(
ValueError, r"Method ``cpu`` not supported for RemoteModule"
):
remote_module.cpu()
with self.assertRaisesRegex(
ValueError, r"Method ``type`` not supported for RemoteModule"
):
remote_module.type(torch.FloatTensor)
with self.assertRaisesRegex(
ValueError, r"Method ``float`` not supported for RemoteModule"
):
remote_module.float()
with self.assertRaisesRegex(
ValueError, r"Method ``double`` not supported for RemoteModule"
):
remote_module.double()
with self.assertRaisesRegex(
ValueError, r"Method ``bfloat16`` not supported for RemoteModule"
):
remote_module.bfloat16()
with self.assertRaisesRegex(
ValueError, r"Method ``to`` not supported for RemoteModule"
):
remote_module.to("cpu", dtype=torch.int32)
def hook(module, grad_input, grad_output):
pass
with self.assertRaisesRegex(
ValueError,
r"Method ``register_backward_hook`` not supported for RemoteModule",
):
remote_module.register_backward_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_pre_hook`` not supported for RemoteModule",
):
remote_module.register_forward_pre_hook(hook)
with self.assertRaisesRegex(
ValueError,
r"Method ``register_forward_hook`` not supported for RemoteModule",
):
remote_module.register_forward_hook(hook)
with self.assertRaisesRegex(
ValueError, r"Method ``state_dict`` not supported for RemoteModule"
):
remote_module.state_dict()
with self.assertRaisesRegex(
ValueError, r"Method ``load_state_dict`` not supported for RemoteModule"
):
remote_module.load_state_dict({})
with self.assertRaisesRegex(
ValueError,
r"Method ``parameters`` not supported for RemoteModule. Please use ``remote_parameters`` instead.",
):
remote_module.parameters()
with self.assertRaisesRegex(
ValueError,
r"Method ``named_parameters`` not supported for RemoteModule",
):
remote_module.named_parameters()
with self.assertRaisesRegex(
ValueError, r"Method ``buffers`` not supported for RemoteModule"
):
remote_module.buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``named_buffers`` not supported for RemoteModule"
):
remote_module.named_buffers()
with self.assertRaisesRegex(
ValueError, r"Method ``children`` not supported for RemoteModule"
):
remote_module.children()
with self.assertRaisesRegex(
ValueError, r"Method ``named_children`` not supported for RemoteModule"
):
remote_module.named_children()
with self.assertRaisesRegex(
ValueError, r"Method ``modules`` not supported for RemoteModule"
):
remote_module.modules()
with self.assertRaisesRegex(
ValueError, r"Method ``named_modules`` not supported for RemoteModule"
):
remote_module.named_modules()
with self.assertRaisesRegex(
ValueError, r"Method ``requires_grad_`` not supported for RemoteModule"
):
remote_module.requires_grad_()
with self.assertRaisesRegex(
ValueError, r"Method ``zero_grad`` not supported for RemoteModule"
):
remote_module.zero_grad()
with self.assertRaisesRegex(
ValueError, r"Method ``share_memory`` not supported for RemoteModule"
):
remote_module.share_memory()
with self.assertRaisesRegex(
ValueError, r"Method ``extra_repr`` not supported for RemoteModule"
):
remote_module.extra_repr()
@dist_utils.dist_init
def test_send_remote_module_with_a_new_attribute_not_pickled_over_the_wire(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
# If a new attribute is added to this RemoteModule after the initialization,
# and it will be sent over the wire by RPC,
# this new field will not be pickled, because it's not specified in _REMOTE_MODULE_PICKLED_ATTRIBUTES.
# Note that adding a new attribute out of constructor should rarely happen.
# If a new attribute is added to RemoteModule constructor,
# there is a sanity check to enforce developers to add this attribute to either
# _REMOTE_MODULE_PICKLED_ATTRIBUTES or _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING.
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
new_attr_name = "new_attr"
setattr(remote_module, new_attr_name, 1)
attrs = rpc.rpc_sync(
dst_worker_name, remote_module_attributes, (remote_module,)
)
self.assertNotIn(new_attr_name, attrs)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR]
):
with TemporaryFileName() as fname:
with self.assertRaisesRegex(
RuntimeError,
"Cannot pickle RemoteModule in python pickler. RemoteModule can only be pickled when using RPC",
):
torch.save(remote_module, fname)
@dist_utils.dist_init
def test_remote_module_py_pickle_not_supported_script(self):
if self.rank != 0:
return
dst_worker_name = dist_utils.worker_name((self.rank + 1) % self.world_size)
for remote_module in self._create_remote_module_iter(
dst_worker_name, modes=[ModuleCreationMode.MODULE_CTOR_WITH_INTERFACE]
):
with (
TemporaryFileName() as fname,
self.assertRaisesRegex(
torch.jit.Error, "can only be pickled when using RPC"
),
):
torch.save(remote_module, fname)
|
RemoteModuleTest
|
python
|
huggingface__transformers
|
tests/models/bart/test_modeling_bart.py
|
{
"start": 87684,
"end": 95530
}
|
class ____:
def __init__(
self,
parent,
vocab_size=99,
batch_size=13,
d_model=16,
decoder_seq_length=7,
is_training=True,
is_decoder=True,
use_attention_mask=True,
use_cache=False,
use_labels=True,
decoder_start_token_id=2,
decoder_ffn_dim=32,
decoder_layers=2,
encoder_attention_heads=4,
decoder_attention_heads=4,
max_position_embeddings=50,
is_encoder_decoder=False,
pad_token_id=0,
bos_token_id=1,
eos_token_id=2,
scope=None,
):
self.parent = parent
self.batch_size = batch_size
self.decoder_seq_length = decoder_seq_length
# For common tests
self.seq_length = self.decoder_seq_length
self.is_training = is_training
self.use_attention_mask = use_attention_mask
self.use_labels = use_labels
self.vocab_size = vocab_size
self.d_model = d_model
self.hidden_size = d_model
self.num_hidden_layers = decoder_layers
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.decoder_attention_heads = decoder_attention_heads
self.num_attention_heads = decoder_attention_heads
self.eos_token_id = eos_token_id
self.bos_token_id = bos_token_id
self.pad_token_id = pad_token_id
self.decoder_start_token_id = decoder_start_token_id
self.use_cache = use_cache
self.max_position_embeddings = max_position_embeddings
self.is_encoder_decoder = is_encoder_decoder
self.scope = None
self.decoder_key_length = decoder_seq_length
self.base_model_out_len = 2
self.decoder_attention_idx = 1
def prepare_config_and_inputs(self):
input_ids = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
attention_mask = None
if self.use_attention_mask:
attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
lm_labels = None
if self.use_labels:
lm_labels = ids_tensor([self.batch_size, self.decoder_seq_length], self.vocab_size)
config = BartConfig(
vocab_size=self.vocab_size,
d_model=self.d_model,
encoder_layers=self.decoder_layers,
decoder_layers=self.decoder_layers,
decoder_ffn_dim=self.decoder_ffn_dim,
encoder_attention_heads=self.encoder_attention_heads,
decoder_attention_heads=self.decoder_attention_heads,
eos_token_id=self.eos_token_id,
bos_token_id=self.bos_token_id,
use_cache=self.use_cache,
pad_token_id=self.pad_token_id,
decoder_start_token_id=self.decoder_start_token_id,
max_position_embeddings=self.max_position_embeddings,
is_encoder_decoder=self.is_encoder_decoder,
forced_eos_token_id=None,
)
return (
config,
input_ids,
attention_mask,
lm_labels,
)
def prepare_config_and_inputs_for_decoder(self):
(
config,
input_ids,
attention_mask,
lm_labels,
) = self.prepare_config_and_inputs()
encoder_hidden_states = floats_tensor([self.batch_size, self.decoder_seq_length, self.hidden_size])
encoder_attention_mask = ids_tensor([self.batch_size, self.decoder_seq_length], vocab_size=2)
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
lm_labels,
)
def create_and_check_decoder_model_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
config.use_cache = True
model = BartDecoder(config=config).to(torch_device).eval()
# first forward pass
outputs = model(input_ids, use_cache=True)
outputs_use_cache_conf = model(input_ids)
outputs_no_past = model(input_ids, use_cache=False)
self.parent.assertTrue(len(outputs) == len(outputs_use_cache_conf))
self.parent.assertTrue(len(outputs) == len(outputs_no_past) + 1)
past_key_values = outputs["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# append to next input_ids and
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
output_from_no_past = model(next_input_ids)["last_hidden_state"]
output_from_past = model(next_tokens, past_key_values=past_key_values)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def create_and_check_decoder_model_attention_mask_past(
self,
config,
input_ids,
attention_mask,
lm_labels,
):
model = BartDecoder(config=config).to(torch_device).eval()
# create attention mask
attn_mask = torch.ones(input_ids.shape, dtype=torch.long, device=torch_device)
half_seq_length = input_ids.shape[-1] // 2
attn_mask[:, half_seq_length:] = 0
# first forward pass
past_key_values = model(input_ids, attention_mask=attn_mask, use_cache=True)["past_key_values"]
# create hypothetical next token and extent to next_input_ids
next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size)
# change a random masked slice from input_ids
random_seq_idx_to_change = ids_tensor((1,), half_seq_length).item() + 1
random_other_next_tokens = ids_tensor((self.batch_size, 1), config.vocab_size).squeeze(-1)
input_ids[:, -random_seq_idx_to_change] = random_other_next_tokens
# append to next input_ids and attn_mask
next_input_ids = torch.cat([input_ids, next_tokens], dim=-1)
attn_mask = torch.cat(
[attn_mask, torch.ones((attn_mask.shape[0], 1), dtype=torch.long, device=torch_device)],
dim=1,
)
# get two different outputs
output_from_no_past = model(next_input_ids, attention_mask=attn_mask)["last_hidden_state"]
output_from_past = model(
next_tokens, attention_mask=attn_mask, past_key_values=past_key_values, use_cache=True
)["last_hidden_state"]
# select random slice
random_slice_idx = ids_tensor((1,), output_from_past.shape[-1]).item()
output_from_no_past_slice = output_from_no_past[:, next_input_ids.shape[-1] - 1, random_slice_idx].detach()
output_from_past_slice = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
assert torch.allclose(output_from_past_slice, output_from_no_past_slice, atol=1e-3)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
(
config,
input_ids,
attention_mask,
lm_labels,
) = config_and_inputs
inputs_dict = {
"input_ids": input_ids,
"attention_mask": attention_mask,
}
return config, inputs_dict
@require_torch
|
BartStandaloneDecoderModelTester
|
python
|
astropy__astropy
|
astropy/coordinates/calculation.py
|
{
"start": 394,
"end": 7138
}
|
class ____(ValueError):
pass
def get_sign(dt):
""" """
if (int(dt.month) == 12 and int(dt.day) >= 22) or (
int(dt.month) == 1 and int(dt.day) <= 19
):
zodiac_sign = "capricorn"
elif (int(dt.month) == 1 and int(dt.day) >= 20) or (
int(dt.month) == 2 and int(dt.day) <= 17
):
zodiac_sign = "aquarius"
elif (int(dt.month) == 2 and int(dt.day) >= 18) or (
int(dt.month) == 3 and int(dt.day) <= 19
):
zodiac_sign = "pisces"
elif (int(dt.month) == 3 and int(dt.day) >= 20) or (
int(dt.month) == 4 and int(dt.day) <= 19
):
zodiac_sign = "aries"
elif (int(dt.month) == 4 and int(dt.day) >= 20) or (
int(dt.month) == 5 and int(dt.day) <= 20
):
zodiac_sign = "taurus"
elif (int(dt.month) == 5 and int(dt.day) >= 21) or (
int(dt.month) == 6 and int(dt.day) <= 20
):
zodiac_sign = "gemini"
elif (int(dt.month) == 6 and int(dt.day) >= 21) or (
int(dt.month) == 7 and int(dt.day) <= 22
):
zodiac_sign = "cancer"
elif (int(dt.month) == 7 and int(dt.day) >= 23) or (
int(dt.month) == 8 and int(dt.day) <= 22
):
zodiac_sign = "leo"
elif (int(dt.month) == 8 and int(dt.day) >= 23) or (
int(dt.month) == 9 and int(dt.day) <= 22
):
zodiac_sign = "virgo"
elif (int(dt.month) == 9 and int(dt.day) >= 23) or (
int(dt.month) == 10 and int(dt.day) <= 22
):
zodiac_sign = "libra"
elif (int(dt.month) == 10 and int(dt.day) >= 23) or (
int(dt.month) == 11 and int(dt.day) <= 21
):
zodiac_sign = "scorpio"
elif (int(dt.month) == 11 and int(dt.day) >= 22) or (
int(dt.month) == 12 and int(dt.day) <= 21
):
zodiac_sign = "sagittarius"
return zodiac_sign
_VALID_SIGNS = [
"capricorn",
"aquarius",
"pisces",
"aries",
"taurus",
"gemini",
"cancer",
"leo",
"virgo",
"libra",
"scorpio",
"sagittarius",
]
# Some of the constellation names map to different astrological "sign names".
# Astrologers really needs to talk to the IAU...
_CONST_TO_SIGNS = {"capricornus": "capricorn", "scorpius": "scorpio"}
_ZODIAC = (
(1900, "rat"),
(1901, "ox"),
(1902, "tiger"),
(1903, "rabbit"),
(1904, "dragon"),
(1905, "snake"),
(1906, "horse"),
(1907, "goat"),
(1908, "monkey"),
(1909, "rooster"),
(1910, "dog"),
(1911, "pig"),
)
# https://stackoverflow.com/questions/12791871/chinese-zodiac-python-program
def _get_zodiac(yr):
return _ZODIAC[(yr - _ZODIAC[0][0]) % 12][1]
def horoscope(birthday, corrected=True, chinese=False):
"""
Enter your birthday as an `astropy.time.Time` object and
receive a mystical horoscope about things to come.
Parameters
----------
birthday : `astropy.time.Time` or str
Your birthday as a `datetime.datetime` or `astropy.time.Time` object
or "YYYY-MM-DD"string.
corrected : bool
Whether to account for the precession of the Earth instead of using the
ancient Greek dates for the signs. After all, you do want your *real*
horoscope, not a cheap inaccurate approximation, right?
chinese : bool
Chinese annual zodiac wisdom instead of Western one.
Returns
-------
Infinite wisdom, condensed into astrologically precise prose.
Notes
-----
This function was implemented on April 1. Take note of that date.
"""
from bs4 import BeautifulSoup
today = datetime.now()
err_msg = "Invalid response from celestial gods (failed to load horoscope)."
headers = {"User-Agent": "foo/bar"}
special_words = {
"([sS]tar[s^ ]*)": "yellow",
"([yY]ou[^ ]*)": "magenta",
"([pP]lay[^ ]*)": "blue",
"([hH]eart)": "red",
"([fF]ate)": "lightgreen",
}
if isinstance(birthday, str):
birthday = datetime.strptime(birthday, "%Y-%m-%d")
if chinese:
# TODO: Make this more accurate by using the actual date, not just year
# Might need third-party tool like https://pypi.org/project/lunardate
zodiac_sign = _get_zodiac(birthday.year)
url = (
"https://www.horoscope.com/us/horoscopes/yearly/"
f"{today.year}-chinese-horoscope-{zodiac_sign}.aspx"
)
summ_title_sfx = f"in {today.year}"
try:
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, "html.parser")
# TODO: Also include Love, Family & Friends, Work, Money, More?
item = doc.find(id="overview")
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
except Exception:
raise CelestialError(err_msg)
else:
birthday = atime.Time(birthday)
if corrected:
with warnings.catch_warnings():
warnings.simplefilter("ignore") # Ignore ErfaWarning
zodiac_sign = get_sun(birthday).get_constellation().lower()
zodiac_sign = _CONST_TO_SIGNS.get(zodiac_sign, zodiac_sign)
if zodiac_sign not in _VALID_SIGNS:
raise HumanError(
f"On your birthday the sun was in {zodiac_sign.title()}, which is"
" not a sign of the zodiac. You must not exist. Or maybe you can"
" settle for corrected=False."
)
else:
zodiac_sign = get_sign(birthday.to_datetime())
url = f"https://astrology.com/horoscope/daily/{zodiac_sign}.html"
summ_title_sfx = f"on {today.strftime('%Y-%m-%d')}"
res = Request(url, headers=headers)
with urlopen(res) as f:
try:
doc = BeautifulSoup(f, "html.parser")
item = doc.find("div", {"id": "content"})
desc = item.getText()
except Exception:
raise CelestialError(err_msg)
print("*" * 79)
color_print(f"Horoscope for {zodiac_sign.capitalize()} {summ_title_sfx}:", "green")
print("*" * 79)
for block in textwrap.wrap(desc, 79):
split_block = block.split()
for i, word in enumerate(split_block):
for re_word, color in special_words.items():
match = re.search(re_word, word)
if match is None:
continue
split_block[i] = _color_text(match.groups()[0], color)
print(" ".join(split_block))
def inject_horoscope():
import astropy
astropy._yourfuture = horoscope
inject_horoscope()
|
CelestialError
|
python
|
huggingface__transformers
|
src/transformers/feature_extraction_utils.py
|
{
"start": 8252,
"end": 28367
}
|
class ____(PushToHubMixin):
"""
This is a feature extraction mixin used to provide saving/loading functionality for sequential and audio feature
extractors.
"""
_auto_class = None
def __init__(self, **kwargs):
"""Set elements of `kwargs` as attributes."""
# Pop "processor_class" as it should be saved as private attribute
self._processor_class = kwargs.pop("processor_class", None)
# Additional attributes without default values
for key, value in kwargs.items():
try:
setattr(self, key, value)
except AttributeError as err:
logger.error(f"Can't set {key} with value {value} for {self}")
raise err
def _set_processor_class(self, processor_class: str):
"""Sets processor class as an attribute."""
self._processor_class = processor_class
@classmethod
def from_pretrained(
cls: type[SpecificFeatureExtractorType],
pretrained_model_name_or_path: Union[str, os.PathLike],
cache_dir: Optional[Union[str, os.PathLike]] = None,
force_download: bool = False,
local_files_only: bool = False,
token: Optional[Union[str, bool]] = None,
revision: str = "main",
**kwargs,
) -> SpecificFeatureExtractorType:
r"""
Instantiate a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a feature extractor, *e.g.* a
derived class of [`SequenceFeatureExtractor`].
Args:
pretrained_model_name_or_path (`str` or `os.PathLike`):
This can be either:
- a string, the *model id* of a pretrained feature_extractor hosted inside a model repo on
huggingface.co.
- a path to a *directory* containing a feature extractor file saved using the
[`~feature_extraction_utils.FeatureExtractionMixin.save_pretrained`] method, e.g.,
`./my_model_directory/`.
- a path or url to a saved feature extractor JSON *file*, e.g.,
`./my_model_directory/preprocessor_config.json`.
cache_dir (`str` or `os.PathLike`, *optional*):
Path to a directory in which a downloaded pretrained model feature extractor should be cached if the
standard cache should not be used.
force_download (`bool`, *optional*, defaults to `False`):
Whether or not to force to (re-)download the feature extractor files and override the cached versions
if they exist.
proxies (`dict[str, str]`, *optional*):
A dictionary of proxy servers to use by protocol or endpoint, e.g., `{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}.` The proxies are used on each request.
token (`str` or `bool`, *optional*):
The token to use as HTTP bearer authorization for remote files. If `True`, or not specified, will use
the token generated when running `hf auth login` (stored in `~/.huggingface`).
revision (`str`, *optional*, defaults to `"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so `revision` can be any
identifier allowed by git.
<Tip>
To test a pull request you made on the Hub, you can pass `revision="refs/pr/<pr_number>"`.
</Tip>
return_unused_kwargs (`bool`, *optional*, defaults to `False`):
If `False`, then this function returns just the final feature extractor object. If `True`, then this
functions returns a `Tuple(feature_extractor, unused_kwargs)` where *unused_kwargs* is a dictionary
consisting of the key/value pairs whose keys are not feature extractor attributes: i.e., the part of
`kwargs` which has not been used to update `feature_extractor` and is otherwise ignored.
kwargs (`dict[str, Any]`, *optional*):
The values in kwargs of any keys which are feature extractor attributes will be used to override the
loaded values. Behavior concerning key/value pairs whose keys are *not* feature extractor attributes is
controlled by the `return_unused_kwargs` keyword parameter.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`].
Examples:
```python
# We can't instantiate directly the base class *FeatureExtractionMixin* nor *SequenceFeatureExtractor* so let's show the examples on a
# derived class: *Wav2Vec2FeatureExtractor*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h"
) # Download feature_extraction_config from huggingface.co and cache.
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"./test/saved_model/"
) # E.g. feature_extractor (or model) was saved using *save_pretrained('./test/saved_model/')*
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained("./test/saved_model/preprocessor_config.json")
feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False
)
assert feature_extractor.return_attention_mask is False
feature_extractor, unused_kwargs = Wav2Vec2FeatureExtractor.from_pretrained(
"facebook/wav2vec2-base-960h", return_attention_mask=False, foo=False, return_unused_kwargs=True
)
assert feature_extractor.return_attention_mask is False
assert unused_kwargs == {"foo": False}
```"""
kwargs["cache_dir"] = cache_dir
kwargs["force_download"] = force_download
kwargs["local_files_only"] = local_files_only
kwargs["revision"] = revision
if token is not None:
kwargs["token"] = token
feature_extractor_dict, kwargs = cls.get_feature_extractor_dict(pretrained_model_name_or_path, **kwargs)
return cls.from_dict(feature_extractor_dict, **kwargs)
def save_pretrained(self, save_directory: Union[str, os.PathLike], push_to_hub: bool = False, **kwargs):
"""
Save a feature_extractor object to the directory `save_directory`, so that it can be re-loaded using the
[`~feature_extraction_utils.FeatureExtractionMixin.from_pretrained`] class method.
Args:
save_directory (`str` or `os.PathLike`):
Directory where the feature extractor JSON file will be saved (will be created if it does not exist).
push_to_hub (`bool`, *optional*, defaults to `False`):
Whether or not to push your model to the Hugging Face model hub after saving it. You can specify the
repository you want to push to with `repo_id` (will default to the name of `save_directory` in your
namespace).
kwargs (`dict[str, Any]`, *optional*):
Additional key word arguments passed along to the [`~utils.PushToHubMixin.push_to_hub`] method.
"""
if os.path.isfile(save_directory):
raise AssertionError(f"Provided path ({save_directory}) should be a directory, not a file")
os.makedirs(save_directory, exist_ok=True)
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo_id = kwargs.pop("repo_id", save_directory.split(os.path.sep)[-1])
repo_id = create_repo(repo_id, exist_ok=True, **kwargs).repo_id
files_timestamps = self._get_files_timestamps(save_directory)
# If we have a custom config, we copy the file defining it in the folder and set the attributes so it can be
# loaded from the Hub.
if self._auto_class is not None:
custom_object_save(self, save_directory, config=self)
# If we save using the predefined names, we can load using `from_pretrained`
output_feature_extractor_file = os.path.join(save_directory, FEATURE_EXTRACTOR_NAME)
self.to_json_file(output_feature_extractor_file)
logger.info(f"Feature extractor saved in {output_feature_extractor_file}")
if push_to_hub:
self._upload_modified_files(
save_directory,
repo_id,
files_timestamps,
commit_message=commit_message,
token=kwargs.get("token"),
)
return [output_feature_extractor_file]
@classmethod
def get_feature_extractor_dict(
cls, pretrained_model_name_or_path: Union[str, os.PathLike], **kwargs
) -> tuple[dict[str, Any], dict[str, Any]]:
"""
From a `pretrained_model_name_or_path`, resolve to a dictionary of parameters, to be used for instantiating a
feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] using `from_dict`.
Parameters:
pretrained_model_name_or_path (`str` or `os.PathLike`):
The identifier of the pre-trained checkpoint from which we want the dictionary of parameters.
Returns:
`tuple[Dict, Dict]`: The dictionary(ies) that will be used to instantiate the feature extractor object.
"""
cache_dir = kwargs.pop("cache_dir", None)
force_download = kwargs.pop("force_download", False)
proxies = kwargs.pop("proxies", None)
subfolder = kwargs.pop("subfolder", None)
token = kwargs.pop("token", None)
local_files_only = kwargs.pop("local_files_only", False)
revision = kwargs.pop("revision", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "feature extractor", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
pretrained_model_name_or_path = str(pretrained_model_name_or_path)
is_local = os.path.isdir(pretrained_model_name_or_path)
if os.path.isdir(pretrained_model_name_or_path):
feature_extractor_file = os.path.join(pretrained_model_name_or_path, FEATURE_EXTRACTOR_NAME)
if os.path.isfile(pretrained_model_name_or_path):
resolved_feature_extractor_file = pretrained_model_name_or_path
resolved_processor_file = None
is_local = True
else:
feature_extractor_file = FEATURE_EXTRACTOR_NAME
try:
# Load from local folder or from cache or download from model Hub and cache
resolved_processor_file = cached_file(
pretrained_model_name_or_path,
filename=PROCESSOR_NAME,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
resolved_feature_extractor_file = cached_file(
pretrained_model_name_or_path,
filename=feature_extractor_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
local_files_only=local_files_only,
token=token,
user_agent=user_agent,
revision=revision,
subfolder=subfolder,
_raise_exceptions_for_missing_entries=False,
)
except OSError:
# Raise any environment error raise by `cached_file`. It will have a helpful error message adapted to
# the original exception.
raise
except Exception:
# For any other exception, we throw a generic error.
raise OSError(
f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {FEATURE_EXTRACTOR_NAME} file"
)
# Load feature_extractor dict. Priority goes as (nested config if found -> image processor config)
# We are downloading both configs because almost all models have a `processor_config.json` but
# not all of these are nested. We need to check if it was saved recebtly as nested or if it is legacy style
feature_extractor_dict = None
if resolved_processor_file is not None:
processor_dict = safe_load_json_file(resolved_processor_file)
if "feature_extractor" in processor_dict or "audio_processor" in processor_dict:
feature_extractor_dict = processor_dict.get("feature_extractor", processor_dict.get("audio_processor"))
if resolved_feature_extractor_file is not None and feature_extractor_dict is None:
feature_extractor_dict = safe_load_json_file(resolved_feature_extractor_file)
if feature_extractor_dict is None:
raise OSError(
f"Can't load feature extractor for '{pretrained_model_name_or_path}'. If you were trying to load"
" it from 'https://huggingface.co/models', make sure you don't have a local directory with the"
f" same name. Otherwise, make sure '{pretrained_model_name_or_path}' is the correct path to a"
f" directory containing a {feature_extractor_file} file"
)
if is_local:
logger.info(f"loading configuration file {resolved_feature_extractor_file}")
else:
logger.info(
f"loading configuration file {feature_extractor_file} from cache at {resolved_feature_extractor_file}"
)
return feature_extractor_dict, kwargs
@classmethod
def from_dict(
cls, feature_extractor_dict: dict[str, Any], **kwargs
) -> Union["FeatureExtractionMixin", tuple["FeatureExtractionMixin", dict[str, Any]]]:
"""
Instantiates a type of [`~feature_extraction_utils.FeatureExtractionMixin`] from a Python dictionary of
parameters.
Args:
feature_extractor_dict (`dict[str, Any]`):
Dictionary that will be used to instantiate the feature extractor object. Such a dictionary can be
retrieved from a pretrained checkpoint by leveraging the
[`~feature_extraction_utils.FeatureExtractionMixin.to_dict`] method.
kwargs (`dict[str, Any]`):
Additional parameters from which to initialize the feature extractor object.
Returns:
[`~feature_extraction_utils.FeatureExtractionMixin`]: The feature extractor object instantiated from those
parameters.
"""
return_unused_kwargs = kwargs.pop("return_unused_kwargs", False)
# Update feature_extractor with kwargs if needed
to_remove = []
for key, value in kwargs.items():
if key in feature_extractor_dict:
feature_extractor_dict[key] = value
to_remove.append(key)
for key in to_remove:
kwargs.pop(key, None)
feature_extractor = cls(**feature_extractor_dict)
logger.info(f"Feature extractor {feature_extractor}")
if return_unused_kwargs:
return feature_extractor, kwargs
else:
return feature_extractor
def to_dict(self) -> dict[str, Any]:
"""
Serializes this instance to a Python dictionary. Returns:
`dict[str, Any]`: Dictionary of all the attributes that make up this configuration instance.
"""
output = copy.deepcopy(self.__dict__)
output["feature_extractor_type"] = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "window" in output:
del output["window"]
return output
@classmethod
def from_json_file(cls, json_file: Union[str, os.PathLike]) -> "FeatureExtractionMixin":
"""
Instantiates a feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`] from the path to
a JSON file of parameters.
Args:
json_file (`str` or `os.PathLike`):
Path to the JSON file containing the parameters.
Returns:
A feature extractor of type [`~feature_extraction_utils.FeatureExtractionMixin`]: The feature_extractor
object instantiated from that JSON file.
"""
with open(json_file, encoding="utf-8") as reader:
text = reader.read()
feature_extractor_dict = json.loads(text)
return cls(**feature_extractor_dict)
def to_json_string(self) -> str:
"""
Serializes this instance to a JSON string.
Returns:
`str`: String containing all the attributes that make up this feature_extractor instance in JSON format.
"""
dictionary = self.to_dict()
for key, value in dictionary.items():
if isinstance(value, np.ndarray):
dictionary[key] = value.tolist()
# make sure private name "_processor_class" is correctly
# saved as "processor_class"
_processor_class = dictionary.pop("_processor_class", None)
if _processor_class is not None:
dictionary["processor_class"] = _processor_class
return json.dumps(dictionary, indent=2, sort_keys=True) + "\n"
def to_json_file(self, json_file_path: Union[str, os.PathLike]):
"""
Save this instance to a JSON file.
Args:
json_file_path (`str` or `os.PathLike`):
Path to the JSON file in which this feature_extractor instance's parameters will be saved.
"""
with open(json_file_path, "w", encoding="utf-8") as writer:
writer.write(self.to_json_string())
def __repr__(self):
return f"{self.__class__.__name__} {self.to_json_string()}"
@classmethod
def register_for_auto_class(cls, auto_class="AutoFeatureExtractor"):
"""
Register this class with a given auto class. This should only be used for custom feature extractors as the ones
in the library are already mapped with `AutoFeatureExtractor`.
Args:
auto_class (`str` or `type`, *optional*, defaults to `"AutoFeatureExtractor"`):
The auto class to register this new feature extractor with.
"""
if not isinstance(auto_class, str):
auto_class = auto_class.__name__
import transformers.models.auto as auto_module
if not hasattr(auto_module, auto_class):
raise ValueError(f"{auto_class} is not a valid auto class.")
cls._auto_class = auto_class
FeatureExtractionMixin.push_to_hub = copy_func(FeatureExtractionMixin.push_to_hub)
if FeatureExtractionMixin.push_to_hub.__doc__ is not None:
FeatureExtractionMixin.push_to_hub.__doc__ = FeatureExtractionMixin.push_to_hub.__doc__.format(
object="feature extractor", object_class="AutoFeatureExtractor", object_files="feature extractor file"
)
|
FeatureExtractionMixin
|
python
|
doocs__leetcode
|
solution/2500-2599/2576.Find the Maximum Number of Marked Indices/Solution.py
|
{
"start": 0,
"end": 241
}
|
class ____:
def maxNumOfMarkedIndices(self, nums: List[int]) -> int:
nums.sort()
i, n = 0, len(nums)
for x in nums[(n + 1) // 2 :]:
if nums[i] * 2 <= x:
i += 1
return i * 2
|
Solution
|
python
|
numpy__numpy
|
numpy/testing/tests/test_utils.py
|
{
"start": 45280,
"end": 47686
}
|
class ____:
def test_warn(self):
def f():
warnings.warn("yo")
return 3
before_filters = sys.modules['warnings'].filters[:]
assert_equal(assert_warns(UserWarning, f), 3)
after_filters = sys.modules['warnings'].filters
assert_raises(AssertionError, assert_no_warnings, f)
assert_equal(assert_no_warnings(lambda x: x, 1), 1)
# Check that the warnings state is unchanged
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_context_manager(self):
before_filters = sys.modules['warnings'].filters[:]
with assert_warns(UserWarning):
warnings.warn("yo")
after_filters = sys.modules['warnings'].filters
def no_warnings():
with assert_no_warnings():
warnings.warn("yo")
assert_raises(AssertionError, no_warnings)
assert_equal(before_filters, after_filters,
"assert_warns does not preserver warnings state")
def test_args(self):
def f(a=0, b=1):
warnings.warn("yo")
return a + b
assert assert_warns(UserWarning, f, b=20) == 20
with pytest.raises(RuntimeError) as exc:
# assert_warns cannot do regexp matching, use pytest.warns
with assert_warns(UserWarning, match="A"):
warnings.warn("B", UserWarning)
assert "assert_warns" in str(exc)
assert "pytest.warns" in str(exc)
with pytest.raises(RuntimeError) as exc:
# assert_warns cannot do regexp matching, use pytest.warns
with assert_warns(UserWarning, wrong="A"):
warnings.warn("B", UserWarning)
assert "assert_warns" in str(exc)
assert "pytest.warns" not in str(exc)
def test_warn_wrong_warning(self):
def f():
warnings.warn("yo", DeprecationWarning)
failed = False
with warnings.catch_warnings():
warnings.simplefilter("error", DeprecationWarning)
try:
# Should raise a DeprecationWarning
assert_warns(UserWarning, f)
failed = True
except DeprecationWarning:
pass
if failed:
raise AssertionError("wrong warning caught by assert_warn")
|
TestWarns
|
python
|
Lightning-AI__lightning
|
tests/tests_pytorch/checkpointing/test_model_checkpoint.py
|
{
"start": 42709,
"end": 42896
}
|
class ____(Callback):
def on_validation_end(self, trainer, pl_module):
if not trainer.sanity_checking:
raise RuntimeError("Trouble!")
|
TroubledCallbackOnValidationEnd
|
python
|
tornadoweb__tornado
|
tornado/test/web_test.py
|
{
"start": 115754,
"end": 116980
}
|
class ____(WebTestCase):
def get_handlers(self):
class RemoveSlashHandler(RequestHandler):
@removeslash
def get(self):
pass
class AddSlashHandler(RequestHandler):
@addslash
def get(self):
pass
return [("/removeslash/", RemoveSlashHandler), ("/addslash", AddSlashHandler)]
def test_removeslash(self):
response = self.fetch("/removeslash/", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers["Location"], "/removeslash")
response = self.fetch("/removeslash/?foo=bar", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers["Location"], "/removeslash?foo=bar")
def test_addslash(self):
response = self.fetch("/addslash", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers["Location"], "/addslash/")
response = self.fetch("/addslash?foo=bar", follow_redirects=False)
self.assertEqual(response.code, 301)
self.assertEqual(response.headers["Location"], "/addslash/?foo=bar")
|
DecoratorTest
|
python
|
getsentry__sentry
|
src/sentry/integrations/cursor/models.py
|
{
"start": 341,
"end": 436
}
|
class ____(BaseModel):
url: str
secret: str | None = None
|
CursorAgentLaunchRequestWebhook
|
python
|
ansible__ansible
|
lib/ansible/module_utils/facts/network/generic_bsd.py
|
{
"start": 790,
"end": 12595
}
|
class ____(Network):
"""
This is a generic BSD subclass of Network using the ifconfig command.
It defines
- interfaces (a list of interface names)
- interface_<name> dictionary of ipv4, ipv6, and mac address information.
- all_ipv4_addresses and all_ipv6_addresses: lists of all configured addresses.
"""
platform = 'Generic_BSD_Ifconfig'
def populate(self, collected_facts=None):
network_facts = {}
ifconfig_path = self.module.get_bin_path('ifconfig')
if ifconfig_path is None:
return network_facts
route_path = self.module.get_bin_path('route')
if route_path is None:
return network_facts
default_ipv4, default_ipv6 = self.get_default_interfaces(route_path)
interfaces, ips = self.get_interfaces_info(ifconfig_path)
interfaces = self.detect_type_media(interfaces)
self.merge_default_interface(default_ipv4, interfaces, 'ipv4')
self.merge_default_interface(default_ipv6, interfaces, 'ipv6')
network_facts['interfaces'] = sorted(list(interfaces.keys()))
for iface in interfaces:
network_facts[iface] = interfaces[iface]
network_facts['default_ipv4'] = default_ipv4
network_facts['default_ipv6'] = default_ipv6
network_facts['all_ipv4_addresses'] = ips['all_ipv4_addresses']
network_facts['all_ipv6_addresses'] = ips['all_ipv6_addresses']
return network_facts
def detect_type_media(self, interfaces):
for iface in interfaces:
if 'media' in interfaces[iface]:
if 'ether' in interfaces[iface]['media'].lower():
interfaces[iface]['type'] = 'ether'
return interfaces
def get_default_interfaces(self, route_path):
# Use the commands:
# route -n get default
# route -n get -inet6 default
# to find out the default outgoing interface, address, and gateway
command = dict(v4=[route_path, '-n', 'get', 'default'],
v6=[route_path, '-n', 'get', '-inet6', 'default'])
interface = dict(v4={}, v6={})
for v in 'v4', 'v6':
if v == 'v6' and not socket.has_ipv6:
continue
rc, out, err = self.module.run_command(command[v])
if not out:
# v6 routing may result in
# RTNETLINK answers: Invalid argument
continue
for line in out.splitlines():
words = line.strip().split(': ')
# Collect output from route command
if len(words) > 1:
if words[0] == 'interface':
interface[v]['interface'] = words[1]
if words[0] == 'gateway':
interface[v]['gateway'] = words[1]
# help pick the right interface address on OpenBSD
if words[0] == 'if address':
interface[v]['address'] = words[1]
# help pick the right interface address on NetBSD
if words[0] == 'local addr':
interface[v]['address'] = words[1]
return interface['v4'], interface['v6']
def get_interfaces_info(self, ifconfig_path, ifconfig_options='-a'):
interfaces = {}
current_if = {}
ips = dict(
all_ipv4_addresses=[],
all_ipv6_addresses=[],
)
# FreeBSD, DragonflyBSD, NetBSD, OpenBSD and macOS all implicitly add '-a'
# when running the command 'ifconfig'.
# Solaris must explicitly run the command 'ifconfig -a'.
rc, out, err = self.module.run_command([ifconfig_path, ifconfig_options])
for line in out.splitlines():
if line:
words = line.split()
if words[0] == 'pass':
continue
elif re.match(r'^\S', line) and len(words) > 3:
current_if = self.parse_interface_line(words)
interfaces[current_if['device']] = current_if
elif words[0].startswith('options='):
self.parse_options_line(words, current_if, ips)
elif words[0] == 'nd6':
self.parse_nd6_line(words, current_if, ips)
elif words[0] == 'ether':
self.parse_ether_line(words, current_if, ips)
elif words[0] == 'media:':
self.parse_media_line(words, current_if, ips)
elif words[0] == 'status:':
self.parse_status_line(words, current_if, ips)
elif words[0] == 'lladdr':
self.parse_lladdr_line(words, current_if, ips)
elif words[0] == 'inet':
self.parse_inet_line(words, current_if, ips)
elif words[0] == 'inet6':
self.parse_inet6_line(words, current_if, ips)
elif words[0] == 'tunnel':
self.parse_tunnel_line(words, current_if, ips)
else:
self.parse_unknown_line(words, current_if, ips)
return interfaces, ips
def parse_interface_line(self, words):
device = words[0][0:-1]
current_if = {'device': device, 'ipv4': [], 'ipv6': [], 'type': 'unknown'}
current_if['flags'] = self.get_options(words[1])
if 'LOOPBACK' in current_if['flags']:
current_if['type'] = 'loopback'
current_if['macaddress'] = 'unknown' # will be overwritten later
if len(words) >= 5: # Newer FreeBSD versions
current_if['metric'] = words[3]
current_if['mtu'] = words[5]
else:
current_if['mtu'] = words[3]
return current_if
def parse_options_line(self, words, current_if, ips):
# Mac has options like this...
current_if['options'] = self.get_options(words[0])
def parse_nd6_line(self, words, current_if, ips):
# FreeBSD has options like this...
current_if['options'] = self.get_options(words[1])
def parse_ether_line(self, words, current_if, ips):
current_if['macaddress'] = words[1]
current_if['type'] = 'ether'
def parse_media_line(self, words, current_if, ips):
# not sure if this is useful - we also drop information
current_if['media'] = words[1]
if len(words) > 2:
current_if['media_select'] = words[2]
if len(words) > 3:
current_if['media_type'] = words[3][1:]
if len(words) > 4:
current_if['media_options'] = self.get_options(words[4])
def parse_status_line(self, words, current_if, ips):
current_if['status'] = words[1]
def parse_lladdr_line(self, words, current_if, ips):
current_if['lladdr'] = words[1]
def parse_inet_line(self, words, current_if, ips):
# netbsd show aliases like this
# lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 33184
# inet 127.0.0.1 netmask 0xff000000
# inet alias 127.1.1.1 netmask 0xff000000
if words[1] == 'alias':
del words[1]
address = {'address': words[1]}
# cidr style ip address (eg, 127.0.0.1/24) in inet line
# used in netbsd ifconfig -e output after 7.1
if '/' in address['address']:
ip_address, cidr_mask = address['address'].split('/')
address['address'] = ip_address
netmask_length = int(cidr_mask)
netmask_bin = (1 << 32) - (1 << 32 >> int(netmask_length))
address['netmask'] = socket.inet_ntoa(struct.pack('!L', netmask_bin))
if len(words) > 5:
address['broadcast'] = words[3]
else:
# Don't just assume columns, use "netmask" as the index for the prior column
try:
netmask_idx = words.index('netmask') + 1
except ValueError:
netmask_idx = 3
# deal with hex netmask
if re.match('([0-9a-f]){8}$', words[netmask_idx]):
netmask = '0x' + words[netmask_idx]
else:
netmask = words[netmask_idx]
if netmask.startswith('0x'):
address['netmask'] = socket.inet_ntoa(struct.pack('!L', int(netmask, base=16)))
else:
# otherwise assume this is a dotted quad
address['netmask'] = netmask
# calculate the network
address_bin = struct.unpack('!L', socket.inet_aton(address['address']))[0]
netmask_bin = struct.unpack('!L', socket.inet_aton(address['netmask']))[0]
address['network'] = socket.inet_ntoa(struct.pack('!L', address_bin & netmask_bin))
if 'broadcast' not in address:
# broadcast may be given or we need to calculate
try:
broadcast_idx = words.index('broadcast') + 1
except ValueError:
address['broadcast'] = socket.inet_ntoa(struct.pack('!L', address_bin | (~netmask_bin & 0xffffffff)))
else:
address['broadcast'] = words[broadcast_idx]
# add to our list of addresses
if not words[1].startswith('127.'):
ips['all_ipv4_addresses'].append(address['address'])
current_if['ipv4'].append(address)
def parse_inet6_line(self, words, current_if, ips):
address = {'address': words[1]}
# using cidr style addresses, ala NetBSD ifconfig post 7.1
if '/' in address['address']:
ip_address, cidr_mask = address['address'].split('/')
address['address'] = ip_address
address['prefix'] = cidr_mask
if len(words) > 5:
address['scope'] = words[5]
else:
if (len(words) >= 4) and (words[2] == 'prefixlen'):
address['prefix'] = words[3]
if (len(words) >= 6) and (words[4] == 'scopeid'):
address['scope'] = words[5]
localhost6 = ['::1', '::1/128', 'fe80::1%lo0']
if address['address'] not in localhost6:
ips['all_ipv6_addresses'].append(address['address'])
current_if['ipv6'].append(address)
def parse_tunnel_line(self, words, current_if, ips):
current_if['type'] = 'tunnel'
def parse_unknown_line(self, words, current_if, ips):
# we are going to ignore unknown lines here - this may be
# a bad idea - but you can override it in your subclass
pass
# TODO: these are module scope static function candidates
# (most of the class is really...)
def get_options(self, option_string):
start = option_string.find('<') + 1
end = option_string.rfind('>')
if (start > 0) and (end > 0) and (end > start + 1):
option_csv = option_string[start:end]
return option_csv.split(',')
else:
return []
def merge_default_interface(self, defaults, interfaces, ip_type):
if 'interface' not in defaults:
return
if not defaults['interface'] in interfaces:
return
ifinfo = interfaces[defaults['interface']]
# copy all the interface values across except addresses
for item in ifinfo:
if item != 'ipv4' and item != 'ipv6':
defaults[item] = ifinfo[item]
ipinfo = []
if 'address' in defaults:
ipinfo = [x for x in ifinfo[ip_type] if x['address'] == defaults['address']]
if len(ipinfo) == 0:
ipinfo = ifinfo[ip_type]
if len(ipinfo) > 0:
for item in ipinfo[0]:
defaults[item] = ipinfo[0][item]
|
GenericBsdIfconfigNetwork
|
python
|
dagster-io__dagster
|
python_modules/libraries/dagster-msteams/dagster_msteams/resources.py
|
{
"start": 243,
"end": 3771
}
|
class ____(ConfigurableResource):
"""This resource is for connecting to Microsoft Teams.
Provides a `dagster_msteams.TeamsClient` which can be used to
interface with the MS Teams API.
By configuring this resource, you can post messages to MS Teams from any Dagster op,
asset, schedule, or sensor:
Examples:
.. code-block:: python
import os
from dagster import op, job, Definitions, EnvVar
from dagster_msteams import Card, MSTeamsResource
@op
def teams_op(msteams: MSTeamsResource):
card = Card()
card.add_attachment(text_message="Hello There !!")
msteams.get_client().post_message(payload=card.payload)
@job
def teams_job():
teams_op()
Definitions(
jobs=[teams_job],
resources={
"msteams": MSTeamsResource(
hook_url=EnvVar("TEAMS_WEBHOOK_URL")
)
}
)
"""
hook_url: str = Field(
description=(
"To send messages to MS Teams channel, an incoming webhook has to be created. The"
" incoming webhook url must be given as a part of the resource config to the"
" MSTeamsResource in Dagster. For more information on how to create an incoming"
" webhook, see"
" https://docs.microsoft.com/en-us/microsoftteams/platform/webhooks-and-connectors/how-to/add-incoming-webhook"
),
)
http_proxy: Optional[str] = Field(default=None, description="HTTP proxy URL")
https_proxy: Optional[str] = Field(default=None, description="HTTPS proxy URL")
timeout: float = Field(default=60, description="Timeout for requests to MS Teams")
verify: bool = Field(
default=True, description="Whether to verify SSL certificates, defaults to True"
)
@classmethod
def _is_dagster_maintained(cls) -> bool:
return True
def get_client(self) -> TeamsClient:
return TeamsClient(
hook_url=self.hook_url,
http_proxy=self.http_proxy,
https_proxy=self.https_proxy,
timeout=self.timeout,
verify=self.verify,
)
@dagster_maintained_resource
@resource(
config_schema=MSTeamsResource.to_config_schema(),
description="This resource is for connecting to MS Teams",
)
def msteams_resource(context) -> TeamsClient:
"""This resource is for connecting to Microsoft Teams.
The resource object is a `dagster_msteams.TeamsClient`.
By configuring this resource, you can post messages to MS Teams from any Dagster solid:
Examples:
.. code-block:: python
import os
from dagster import op, job
from dagster_msteams import Card, msteams_resource
@op(required_resource_keys={"msteams"})
def teams_op(context):
card = Card()
card.add_attachment(text_message="Hello There !!")
context.resources.msteams.post_message(payload=card.payload)
@job(resource_defs={"msteams": msteams_resource})
def teams_job():
teams_op()
teams_job.execute_in_process(
{"resources": {"msteams": {"config": {"hook_url": os.getenv("TEAMS_WEBHOOK_URL")}}}}
)
"""
return MSTeamsResource.from_resource_context(context).get_client()
|
MSTeamsResource
|
python
|
kamyu104__LeetCode-Solutions
|
Python/all-ancestors-of-a-node-in-a-directed-acyclic-graph.py
|
{
"start": 911,
"end": 1877
}
|
class ____(object):
def getAncestors(self, n, edges):
"""
:type n: int
:type edges: List[List[int]]
:rtype: List[List[int]]
"""
def bfs(adj, i, result):
lookup = [False]*len(adj)
q = [i]
lookup[i] = True
while q:
new_q = []
for u in q:
for v in adj[u]:
if lookup[v]:
continue
lookup[v] = True
new_q.append(v)
result[i].append(v)
q = new_q
result[i].sort()
adj = [[] for _ in xrange(n)]
for u, v in edges:
adj[v].append(u)
result = [[] for _ in xrange(n)]
for u in xrange(n):
bfs(adj, u, result)
return result
# Time: O(|V| * |E| * log(|V| * |E|))
# Space: O(|V| + |E|)
# topological sort
|
Solution2
|
python
|
google__jax
|
jax/_src/api.py
|
{
"start": 103901,
"end": 109463
}
|
class ____:
idx: int
primal: bool
si_vjp = saved_input_vjp
def vjp3(f, *primals, has_aux=False):
dbg = debug_info("vjp", f, primals, {})
fun = lu.wrap_init(f, debug_info=dbg)
return _vjp3(fun, *primals, has_aux=has_aux)
def _vjp3(fun, *primals, has_aux=False):
canon = lambda x: x if isinstance(x, core.Tracer) else canonicalize_value(x)
primals = tree_map(canon, primals)
primals_flat, in_tree = tree_flatten(primals)
for arg in primals_flat: dispatch.check_arg(arg)
if not has_aux:
flat_fun, out_tree = flatten_fun_nokwargs(fun, in_tree)
out_primals_flat, out_pvals, jaxpr, residuals = ad.linearize(
flat_fun, *primals_flat)
out_tree = out_tree()
else:
flat_fun, out_aux_trees = flatten_fun_nokwargs2(fun, in_tree)
out_primals_flat, out_pvals, jaxpr, residuals, aux = ad.linearize(
flat_fun, *primals_flat, has_aux=True)
out_tree, aux_tree = out_aux_trees()
del out_aux_trees
out_known = [pval.is_known() for pval in out_pvals]
id_map = {id(x): i for i, x in enumerate(primals_flat)}
used, opaque_residuals = set(), []
spec = [used.add(id(r)) or RSpec(id_map[id(r)], True) if id(r) in id_map else # type: ignore
RSpec(opaque_residuals.append(r) or (len(opaque_residuals) - 1), False) # type: ignore
for r in residuals]
args_res = tuptree_map(lambda x: x if id(x) in used else NotNeeded(),
in_tree, primals_flat)
out_primal_avals = [typeof(x) for x in out_primals_flat]
f_vjp = VJP(partial(_vjp3_callable, spec, out_known, jaxpr, out_primal_avals),
in_tree, out_tree, list(args_res), opaque_residuals)
out_primals = tree_unflatten(out_tree, out_primals_flat)
if not has_aux:
return out_primals, f_vjp
else:
return out_primals, f_vjp, tree_unflatten(aux_tree, aux)
def tuptree_map(f, treedef, x):
return treedef.walk(lambda xs, _: tuple(xs), f, x)
def _is_ref(x):
from jax._src.state.types import AbstractRef
try: return isinstance(typeof(x), AbstractRef)
except: return False
def _vjp3_callable(spec, out_known, jaxpr, out_primal_avals, in_tree, out_tree,
args_res, opaque_res, *maybe_ct_refs):
if not maybe_ct_refs:
maybe_ct_refs_flat = [GradValue()] * in_tree.num_leaves
else:
maybe_ct_refs_flat, in_tree_ = tree_flatten(maybe_ct_refs)
if in_tree != in_tree_: raise Exception # TODO accept isomorph tuple tree
args_res_ = tree_leaves(args_res, is_leaf=lambda x: isinstance(x, NotNeeded))
residuals = [args_res_[i.idx] if i.primal else opaque_res[i.idx] for i in spec]
maybe_refs = [ad.RefAccum(v.aval, x) if _is_ref(x) else ad.ValAccum(v.aval)
for v, x in zip(jaxpr.invars, maybe_ct_refs_flat)]
return Partial(partial(_vjp3_bwd, in_tree, out_tree, out_known, jaxpr,
out_primal_avals), residuals, maybe_refs)
def _vjp3_bwd(in_tree, out_tree, out_known, jaxpr, out_primal_avals, residuals,
maybe_refs, out_ct):
cts_flat, out_tree_ = tree_flatten(out_ct)
if out_tree != out_tree_: _vjp_ct_tree_error(jaxpr, out_tree, out_tree_)
_vjp_check_ct_avals(cts_flat, out_primal_avals)
cts_flat = [ct for ct, k in zip(cts_flat, out_known) if not k]
ad.backward_pass3(jaxpr, True, residuals, maybe_refs, cts_flat)
arg_cts = [x.freeze() if isinstance(x, ad.ValAccum) else GradRef()
for x in maybe_refs]
arg_cts = map(ad.instantiate_zeros, arg_cts)
return tree_unflatten(in_tree, arg_cts)
_vjp_too_many_args = """
The function returned by `jax.vjp` applied to {} was called with {} arguments,
but functions returned by `jax.vjp` must be called with a single argument
corresponding to the single value returned by the function being differentiated
(even if that returned value is a tuple or other container).
For example, if we have:
def f(x):
return (x, x)
_, f_vjp = jax.vjp(f, 1.0)
the function `f` returns a single tuple as output, and so we call `f_vjp` with a
single tuple as its argument:
x_bar, = f_vjp((2.0, 2.0))
If we instead call `f_vjp(2.0, 2.0)`, with the values 'splatted out' as
arguments rather than in a tuple, this error can arise.
""".format
def _vjp_ct_tree_error(jaxpr, out_tree, ct_tree):
msg = f"""unexpected tree structure.
The argument to a VJP function returned by `jax.vjp` must match the pytree
structure of the differentiated function {jaxpr.debug_info.func_src_info}.
But the tree structures differ:
"""
msg += '\n'.join(f" * out{keystr(path)} was a {thing1} in the original "
f" output, but a {thing2} here, so {explanation}."
for path, thing1, thing2, explanation
in equality_errors_pytreedef(out_tree, ct_tree))
raise ValueError(msg)
def _vjp_check_ct_avals(cts, primal_avals):
# TODO(mattjj): improve this error by flattening with keys in the first place
for ct, aval in zip(cts, primal_avals):
ct_aval = typeof(ct)
ct_aval_expected = (
aval.to_cotangent_aval() if hasattr(aval, 'to_cotangent_aval') else
aval.to_tangent_aval())
if (not core.typecompat(ct_aval, ct_aval_expected) and
not _temporary_dtype_exception(ct_aval, ct_aval_expected)):
raise ValueError(
"unexpected JAX type (e.g. shape/dtype) for argument to VJP function: "
f"got {ct_aval.str_short()}, but expected {ct_aval_expected.str_short()} "
"because the corresponding output of the differentiated function had JAX type "
f"{aval.str_short()}")
@register_dataclass
@dataclasses.dataclass(frozen=True)
|
RSpec
|
python
|
PrefectHQ__prefect
|
tests/cli/cloud/test_cloud.py
|
{
"start": 36912,
"end": 45420
}
|
class ____:
@pytest.fixture
def workspaces(self, respx_mock: respx.MockRouter):
foo_workspace = gen_test_workspace(
account_handle="test1", workspace_handle="foo"
)
bar_workspace = gen_test_workspace(
account_handle="test2", workspace_handle="bar"
)
respx_mock.get(PREFECT_CLOUD_API_URL.value() + "/me/workspaces").mock(
return_value=httpx.Response(
status.HTTP_200_OK,
json=[
foo_workspace.model_dump(mode="json"),
bar_workspace.model_dump(mode="json"),
],
)
)
cloud_profile = "cloud-foo"
save_profiles(
ProfilesCollection(
[
Profile(
name=cloud_profile,
settings={
PREFECT_API_URL: foo_workspace.api_url(),
PREFECT_API_KEY: "fake-key",
},
)
],
active=None,
)
)
with use_profile(cloud_profile):
yield foo_workspace, bar_workspace
def test_ls(self, workspaces: tuple[Workspace, Workspace]):
_, _ = workspaces
invoke_and_assert(
["cloud", "workspace", "ls"],
expected_code=0,
expected_output_contains=[
"* test1/foo",
"test2/bar",
],
)
def test_ls_without_active_workspace(self, workspaces: tuple[Workspace, Workspace]):
"""
Regression test for https://github.com/PrefectHQ/prefect/issues/16098
"""
_, _ = workspaces
wonky_profile = "wonky-profile"
save_profiles(
ProfilesCollection(
[
Profile(
name=wonky_profile,
settings={
PREFECT_API_URL: "http://something-else.com/api",
PREFECT_API_KEY: "fake-key",
},
)
],
active=None,
)
)
with use_profile(wonky_profile):
invoke_and_assert(
["cloud", "workspace", "ls"],
expected_code=0,
expected_output_contains=[
"test1/foo",
"test2/bar",
],
expected_output_does_not_contain=[
"* test1/foo",
"* test2/bar",
],
)
@pytest.mark.usefixtures("interactive_console")
def test_set_workspace_with_go_back_to_account_selection():
# Create workspaces in different accounts - need more than 10 total to trigger account selection
account1_id = uuid.uuid4()
account2_id = uuid.uuid4()
# Create 6 workspaces for account1
account1_workspaces: list[Workspace] = []
for i in range(1, 7):
workspace = gen_test_workspace(
account_handle="account1",
workspace_handle=f"workspace{i}",
account_id=account1_id,
)
account1_workspaces.append(workspace)
# Create 6 workspaces for account2
account2_workspaces: list[Workspace] = []
for i in range(1, 7):
workspace = gen_test_workspace(
account_handle="account2",
workspace_handle=f"workspace{i}",
account_id=account2_id,
)
account2_workspaces.append(workspace)
# Combine all workspaces
all_workspaces = account1_workspaces + account2_workspaces
# We'll target selecting the second workspace in account2
target_workspace = account2_workspaces[1] # workspace2 in account2
with respx.mock(
using="httpx", base_url=PREFECT_CLOUD_API_URL.value()
) as respx_mock:
respx_mock.get("/me/workspaces").mock(
return_value=httpx.Response(
status.HTTP_200_OK,
json=[
workspace.model_dump(mode="json") for workspace in all_workspaces
],
)
)
cloud_profile = "cloud-foo"
save_profiles(
ProfilesCollection(
[
Profile(
name=cloud_profile,
settings={
PREFECT_API_URL: account1_workspaces[0].api_url(),
PREFECT_API_KEY: "fake-key",
},
)
],
active=None,
)
)
with use_profile(cloud_profile):
invoke_and_assert(
["cloud", "workspace", "set"],
expected_code=0,
user_input=(
# First select account1
readchar.key.ENTER
# Then select "Go back to account selection" option (last option) - using UP once
+ readchar.key.UP
+ readchar.key.ENTER
# Now select account2
+ readchar.key.DOWN
+ readchar.key.ENTER
# Select workspace2 in account2
+ readchar.key.DOWN
+ readchar.key.ENTER
),
expected_output_contains=[
"Which account would you like to use?",
"Which workspace would you like to use?",
"Go back to account selection",
f"Successfully set workspace to {target_workspace.handle!r} in profile {cloud_profile!r}.",
],
)
profiles = load_profiles()
assert profiles[cloud_profile].settings == {
PREFECT_API_URL: target_workspace.api_url(),
PREFECT_API_KEY: "fake-key",
}
@pytest.mark.usefixtures("interactive_console")
def test_login_with_go_back_to_account_selection(respx_mock: respx.MockRouter):
# Create workspaces in different accounts - need more than 10 total to trigger account selection
account1_id = uuid.uuid4()
account2_id = uuid.uuid4()
# Create 6 workspaces for account1
account1_workspaces: list[Workspace] = []
for i in range(1, 7):
workspace = gen_test_workspace(
account_handle="account1",
workspace_handle=f"workspace{i}",
account_id=account1_id,
)
account1_workspaces.append(workspace)
# Create 6 workspaces for account2
account2_workspaces: list[Workspace] = []
for i in range(1, 7):
workspace = gen_test_workspace(
account_handle="account2",
workspace_handle=f"workspace{i}",
account_id=account2_id,
)
account2_workspaces.append(workspace)
all_workspaces = account1_workspaces + account2_workspaces
target_workspace = account2_workspaces[1] # workspace2 in account2
respx_mock.get(PREFECT_CLOUD_API_URL.value() + "/me/workspaces").mock(
return_value=httpx.Response(
status.HTTP_200_OK,
json=[workspace.model_dump(mode="json") for workspace in all_workspaces],
)
)
invoke_and_assert(
["cloud", "login"],
expected_code=0,
user_input=(
# Select paste a key
readchar.key.DOWN
+ readchar.key.ENTER
# Send a key
+ "foo"
+ readchar.key.ENTER
# First select account1
+ readchar.key.ENTER
# Then select "Go back to account selection" option (last option) - using UP once
+ readchar.key.UP
+ readchar.key.ENTER
# Now select account2
+ readchar.key.DOWN
+ readchar.key.ENTER
# Select workspace2 in account2
+ readchar.key.DOWN
+ readchar.key.ENTER
),
expected_output_contains=[
"Paste your API key:",
"Which account would you like to use?",
"Which workspace would you like to use?",
"Go back to account selection",
f"Authenticated with Prefect Cloud! Using workspace {target_workspace.handle!r}.",
],
)
settings = load_current_profile().settings
assert settings[PREFECT_API_KEY] == "foo"
assert settings[PREFECT_API_URL] == target_workspace.api_url()
|
TestCloudWorkspaceLs
|
python
|
sympy__sympy
|
sympy/physics/quantum/circuitplot.py
|
{
"start": 1163,
"end": 10742
}
|
class ____:
"""A class for managing a circuit plot."""
scale = 1.0
fontsize = 20.0
linewidth = 1.0
control_radius = 0.05
not_radius = 0.15
swap_delta = 0.05
labels: list[str] = []
inits: dict[str, str] = {}
label_buffer = 0.5
def __init__(self, c, nqubits, **kwargs):
if not np or not matplotlib:
raise ImportError('numpy or matplotlib not available.')
self.circuit = c
self.ngates = len(self.circuit.args)
self.nqubits = nqubits
self.update(kwargs)
self._create_grid()
self._create_figure()
self._plot_wires()
self._plot_gates()
self._finish()
def update(self, kwargs):
"""Load the kwargs into the instance dict."""
self.__dict__.update(kwargs)
def _create_grid(self):
"""Create the grid of wires."""
scale = self.scale
wire_grid = np.arange(0.0, self.nqubits*scale, scale, dtype=float)
gate_grid = np.arange(0.0, self.ngates*scale, scale, dtype=float)
self._wire_grid = wire_grid
self._gate_grid = gate_grid
def _create_figure(self):
"""Create the main matplotlib figure."""
self._figure = pyplot.figure(
figsize=(self.ngates*self.scale, self.nqubits*self.scale),
facecolor='w',
edgecolor='w'
)
ax = self._figure.add_subplot(
1, 1, 1,
frameon=True
)
ax.set_axis_off()
offset = 0.5*self.scale
ax.set_xlim(self._gate_grid[0] - offset, self._gate_grid[-1] + offset)
ax.set_ylim(self._wire_grid[0] - offset, self._wire_grid[-1] + offset)
ax.set_aspect('equal')
self._axes = ax
def _plot_wires(self):
"""Plot the wires of the circuit diagram."""
xstart = self._gate_grid[0]
xstop = self._gate_grid[-1]
xdata = (xstart - self.scale, xstop + self.scale)
for i in range(self.nqubits):
ydata = (self._wire_grid[i], self._wire_grid[i])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
if self.labels:
init_label_buffer = 0
if self.inits.get(self.labels[i]): init_label_buffer = 0.25
self._axes.text(
xdata[0]-self.label_buffer-init_label_buffer,ydata[0],
render_label(self.labels[i],self.inits),
size=self.fontsize,
color='k',ha='center',va='center')
self._plot_measured_wires()
def _plot_measured_wires(self):
ismeasured = self._measurements()
xstop = self._gate_grid[-1]
dy = 0.04 # amount to shift wires when doubled
# Plot doubled wires after they are measured
for im in ismeasured:
xdata = (self._gate_grid[ismeasured[im]],xstop+self.scale)
ydata = (self._wire_grid[im]+dy,self._wire_grid[im]+dy)
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
# Also double any controlled lines off these wires
for i,g in enumerate(self._gates()):
if isinstance(g, (CGate, CGateS)):
wires = g.controls + g.targets
for wire in wires:
if wire in ismeasured and \
self._gate_grid[i] > self._gate_grid[ismeasured[wire]]:
ydata = min(wires), max(wires)
xdata = self._gate_grid[i]-dy, self._gate_grid[i]-dy
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def _gates(self):
"""Create a list of all gates in the circuit plot."""
gates = []
if isinstance(self.circuit, Mul):
for g in reversed(self.circuit.args):
if isinstance(g, Gate):
gates.append(g)
elif isinstance(self.circuit, Gate):
gates.append(self.circuit)
return gates
def _plot_gates(self):
"""Iterate through the gates and plot each of them."""
for i, gate in enumerate(self._gates()):
gate.plot_gate(self, i)
def _measurements(self):
"""Return a dict ``{i:j}`` where i is the index of the wire that has
been measured, and j is the gate where the wire is measured.
"""
ismeasured = {}
for i,g in enumerate(self._gates()):
if getattr(g,'measurement',False):
for target in g.targets:
if target in ismeasured:
if ismeasured[target] > i:
ismeasured[target] = i
else:
ismeasured[target] = i
return ismeasured
def _finish(self):
# Disable clipping to make panning work well for large circuits.
for o in self._figure.findobj():
o.set_clip_on(False)
def one_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a single qubit gate."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
self._axes.text(
x, y, t,
color='k',
ha='center',
va='center',
bbox={"ec": 'k', "fc": 'w', "fill": True, "lw": self.linewidth},
size=self.fontsize
)
def two_qubit_box(self, t, gate_idx, wire_idx):
"""Draw a box for a two qubit gate. Does not work yet.
"""
# x = self._gate_grid[gate_idx]
# y = self._wire_grid[wire_idx]+0.5
print(self._gate_grid)
print(self._wire_grid)
# unused:
# obj = self._axes.text(
# x, y, t,
# color='k',
# ha='center',
# va='center',
# bbox=dict(ec='k', fc='w', fill=True, lw=self.linewidth),
# size=self.fontsize
# )
def control_line(self, gate_idx, min_wire, max_wire):
"""Draw a vertical control line."""
xdata = (self._gate_grid[gate_idx], self._gate_grid[gate_idx])
ydata = (self._wire_grid[min_wire], self._wire_grid[max_wire])
line = Line2D(
xdata, ydata,
color='k',
lw=self.linewidth
)
self._axes.add_line(line)
def control_point(self, gate_idx, wire_idx):
"""Draw a control point."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.control_radius
c = Circle(
(x, y),
radius*self.scale,
ec='k',
fc='k',
fill=True,
lw=self.linewidth
)
self._axes.add_patch(c)
def not_point(self, gate_idx, wire_idx):
"""Draw a NOT gates as the circle with plus in the middle."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
radius = self.not_radius
c = Circle(
(x, y),
radius,
ec='k',
fc='w',
fill=False,
lw=self.linewidth
)
self._axes.add_patch(c)
l = Line2D(
(x, x), (y - radius, y + radius),
color='k',
lw=self.linewidth
)
self._axes.add_line(l)
def swap_point(self, gate_idx, wire_idx):
"""Draw a swap point as a cross."""
x = self._gate_grid[gate_idx]
y = self._wire_grid[wire_idx]
d = self.swap_delta
l1 = Line2D(
(x - d, x + d),
(y - d, y + d),
color='k',
lw=self.linewidth
)
l2 = Line2D(
(x - d, x + d),
(y + d, y - d),
color='k',
lw=self.linewidth
)
self._axes.add_line(l1)
self._axes.add_line(l2)
def circuit_plot(c, nqubits, **kwargs):
"""Draw the circuit diagram for the circuit with nqubits.
Parameters
==========
c : circuit
The circuit to plot. Should be a product of Gate instances.
nqubits : int
The number of qubits to include in the circuit. Must be at least
as big as the largest ``min_qubits`` of the gates.
"""
return CircuitPlot(c, nqubits, **kwargs)
def render_label(label, inits={}):
"""Slightly more flexible way to render labels.
>>> from sympy.physics.quantum.circuitplot import render_label
>>> render_label('q0')
'$\\\\left|q0\\\\right\\\\rangle$'
>>> render_label('q0', {'q0':'0'})
'$\\\\left|q0\\\\right\\\\rangle=\\\\left|0\\\\right\\\\rangle$'
"""
init = inits.get(label)
if init:
return r'$\left|%s\right\rangle=\left|%s\right\rangle$' % (label, init)
return r'$\left|%s\right\rangle$' % label
def labeller(n, symbol='q'):
"""Autogenerate labels for wires of quantum circuits.
Parameters
==========
n : int
number of qubits in the circuit.
symbol : string
A character string to precede all gate labels. E.g. 'q_0', 'q_1', etc.
>>> from sympy.physics.quantum.circuitplot import labeller
>>> labeller(2)
['q_1', 'q_0']
>>> labeller(3,'j')
['j_2', 'j_1', 'j_0']
"""
return ['%s_%d' % (symbol,n-i-1) for i in range(n)]
|
CircuitPlot
|
python
|
numpy__numpy
|
numpy/ma/tests/test_core.py
|
{
"start": 200761,
"end": 202359
}
|
class ____:
def test_getitem(self):
arr = np.ma.array([None, None])
for dt in [float, object]:
a0 = np.eye(2).astype(dt)
a1 = np.eye(3).astype(dt)
arr[0] = a0
arr[1] = a1
assert_(arr[0] is a0)
assert_(arr[1] is a1)
assert_(isinstance(arr[0, ...], MaskedArray))
assert_(isinstance(arr[1, ...], MaskedArray))
assert_(arr[0, ...][()] is a0)
assert_(arr[1, ...][()] is a1)
arr[0] = np.ma.masked
assert_(arr[1] is a1)
assert_(isinstance(arr[0, ...], MaskedArray))
assert_(isinstance(arr[1, ...], MaskedArray))
assert_equal(arr[0, ...].mask, True)
assert_(arr[1, ...][()] is a1)
# gh-5962 - object arrays of arrays do something special
assert_equal(arr[0].data, a0)
assert_equal(arr[0].mask, True)
assert_equal(arr[0, ...][()].data, a0)
assert_equal(arr[0, ...][()].mask, True)
def test_nested_ma(self):
arr = np.ma.array([None, None])
# set the first object to be an unmasked masked constant. A little fiddly
arr[0, ...] = np.array([np.ma.masked], object)[0, ...]
# check the above line did what we were aiming for
assert_(arr.data[0] is np.ma.masked)
# test that getitem returned the value by identity
assert_(arr[0] is np.ma.masked)
# now mask the masked value!
arr[0] = np.ma.masked
assert_(arr[0] is np.ma.masked)
|
TestMaskedObjectArray
|
python
|
tensorflow__tensorflow
|
tensorflow/python/ops/numpy_ops/np_random_test.py
|
{
"start": 4597,
"end": 4849
}
|
class ____(RandomTestBase):
def setUp(self):
self.np_func = np_random.rand
self.onp_func = onp.random.rand
super(RandTest, self).setUp()
@parameterized.parameters((), (1,), (1, 2))
def test(self, *size):
self._test(*size)
|
RandTest
|
python
|
walkccc__LeetCode
|
solutions/1114. Print in Order/1114.py
|
{
"start": 29,
"end": 536
}
|
class ____:
def __init__(self):
self.firstDone = Lock()
self.secondDone = Lock()
self.firstDone.acquire()
self.secondDone.acquire()
def first(self, printFirst: 'Callable[[], None]') -> None:
printFirst()
self.firstDone.release()
def second(self, printSecond: 'Callable[[], None]') -> None:
self.firstDone.acquire()
printSecond()
self.secondDone.release()
def third(self, printThird: 'Callable[[], None]') -> None:
self.secondDone.acquire()
printThird()
|
Foo
|
python
|
apache__airflow
|
providers/amazon/src/airflow/providers/amazon/aws/sensors/eks.py
|
{
"start": 8574,
"end": 10795
}
|
class ____(EksBaseSensor):
"""
Check the state of an EKS managed node group until it reaches the target state or another terminal state.
.. seealso::
For more information on how to use this sensor, take a look at the guide:
:ref:`howto/sensor:EksNodegroupStateSensor`
:param cluster_name: The name of the Cluster which the Nodegroup is attached to. (templated)
:param nodegroup_name: The name of the Nodegroup to watch. (templated)
:param target_state: Target state of the Nodegroup. (templated)
:param aws_conn_id: The Airflow connection used for AWS credentials.
If this is ``None`` or empty then the default boto3 behaviour is used. If
running Airflow in a distributed manner and aws_conn_id is None or
empty, then default boto3 configuration would be used (and must be
maintained on each worker node).
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
"""
template_fields: Sequence[str] = aws_template_fields("cluster_name", "nodegroup_name", "target_state")
ui_color = "#ff9900"
ui_fgcolor = "#232F3E"
def __init__(
self,
*,
nodegroup_name: str,
target_state: NodegroupStates = NodegroupStates.ACTIVE,
region: str | None = None,
**kwargs,
):
if region is not None:
warnings.warn(
message="Parameter `region` is deprecated. Use the parameter `region_name` instead",
category=AirflowProviderDeprecationWarning,
stacklevel=2,
)
kwargs["region_name"] = region
super().__init__(target_state=target_state, target_state_type=NodegroupStates, **kwargs)
self.nodegroup_name = nodegroup_name
def get_state(self) -> NodegroupStates:
return self.hook.get_nodegroup_state(clusterName=self.cluster_name, nodegroupName=self.nodegroup_name)
def get_terminal_states(self) -> frozenset:
return NODEGROUP_TERMINAL_STATES
|
EksNodegroupStateSensor
|
python
|
spyder-ide__spyder
|
external-deps/qtconsole/qtconsole/ansi_code_processor.py
|
{
"start": 12261,
"end": 15692
}
|
class ____(AnsiCodeProcessor):
""" Translates ANSI escape codes into QTextCharFormats.
"""
# A map from ANSI color codes to SVG color names or RGB(A) tuples.
darkbg_color_map = {
0 : 'black', # black
1 : 'darkred', # red
2 : 'darkgreen', # green
3 : 'gold', # yellow
4 : 'darkblue', # blue
5 : 'darkviolet', # magenta
6 : 'steelblue', # cyan
7 : 'grey', # white
8 : 'grey', # black (bright)
9 : 'red', # red (bright)
10 : 'lime', # green (bright)
11 : 'yellow', # yellow (bright)
12 : 'deepskyblue', # blue (bright)
13 : 'magenta', # magenta (bright)
14 : 'cyan', # cyan (bright)
15 : 'white' } # white (bright)
# Set the default color map for super class.
default_color_map = darkbg_color_map.copy()
def get_color(self, color, intensity=0):
""" Returns a QColor for a given color code or rgb list, or None if one
cannot be constructed.
"""
if isinstance(color, int):
constructor = self._parse_ansi_color(color, intensity)
elif isinstance(color, (tuple, list)):
constructor = color
else:
return None
if isinstance(constructor, str):
# If this is an X11 color name, we just hope there is a close SVG
# color name. We could use QColor's static method
# 'setAllowX11ColorNames()', but this is global and only available
# on X11. It seems cleaner to aim for uniformity of behavior.
return QtGui.QColor(constructor)
elif isinstance(constructor, (tuple, list)):
return QtGui.QColor(*constructor)
return None
def get_format(self):
""" Returns a QTextCharFormat that encodes the current style attributes.
"""
format = QtGui.QTextCharFormat()
# Set foreground color
qcolor = self.get_color(self.foreground_color, self.intensity)
if qcolor is not None:
format.setForeground(qcolor)
# Set background color
qcolor = self.get_color(self.background_color, self.intensity)
if qcolor is not None:
format.setBackground(qcolor)
# Set font weight/style options
if self.bold:
format.setFontWeight(QtGui.QFont.Bold)
else:
format.setFontWeight(QtGui.QFont.Normal)
format.setFontItalic(self.italic)
format.setFontUnderline(self.underline)
return format
def set_background_color(self, style):
"""
Given a syntax style, attempt to set a color map that will be
aesthetically pleasing.
"""
# Set a new default color map.
self.default_color_map = self.darkbg_color_map.copy()
if not dark_style(style):
# Colors appropriate for a terminal with a light background. For
# now, only use non-bright colors...
for i in range(8):
self.default_color_map[i + 8] = self.default_color_map[i]
# ...and replace white with black.
self.default_color_map[7] = self.default_color_map[15] = 'black'
# Update the current color map with the new defaults.
self.color_map.update(self.default_color_map)
|
QtAnsiCodeProcessor
|
python
|
bokeh__bokeh
|
src/bokeh/command/subcommands/serve.py
|
{
"start": 17966,
"end": 37848
}
|
class ____(Subcommand):
''' Subcommand to launch the Bokeh server.
'''
#: name for this subcommand
name = "serve"
help = "Run a Bokeh server hosting one or more applications"
args = (
*base_serve_args,
('files', Argument(
metavar = 'DIRECTORY-OR-SCRIPT',
nargs = '*',
help = "The app directories or scripts to serve (serve empty document if not specified)",
default = None,
)),
('--args', Argument(
metavar = 'COMMAND-LINE-ARGS',
nargs = "...",
help = "Command line arguments remaining to passed on to the application handler. "
"NOTE: if this argument precedes DIRECTORY-OR-SCRIPT then some other argument, e.g. "
"--show, must be placed before the directory or script. ",
)),
('--dev', Argument(
metavar ='FILES-TO-WATCH',
action ='store',
default = None,
type = str,
nargs = '*',
help = "Enable live reloading during app development. "
"By default it watches all *.py *.html *.css *.yaml files "
"in the app directory tree. Additional files can be passed "
"as arguments. "
"NOTE: if this argument precedes DIRECTORY-OR-SCRIPT then some other argument, e.g "
"--show, must be placed before the directory or script. "
"NOTE: This setting only works with a single app. "
"It also restricts the number of processes to 1. "
"NOTE FOR WINDOWS USERS : this option must be invoked using "
"'python -m bokeh'. If not Tornado will fail to restart the "
"server",
)),
('--show', Argument(
action = 'store_true',
help = "Open server app(s) in a browser",
)),
('--allow-websocket-origin', Argument(
metavar = 'HOST[:PORT]',
action = 'append',
type = str,
help = "Public hostnames which may connect to the Bokeh websocket "
"With unix socket, the websocket origin restrictions should be enforced by the proxy.",
)),
('--prefix', Argument(
metavar = 'PREFIX',
type = str,
help = "URL prefix for Bokeh server URLs",
default = None,
)),
('--ico-path', Argument(
metavar = "ICO_PATH",
type = str,
help = "Path to a .ico file to use as the favicon.ico, or 'none' to "
"disable favicon.ico support. If unset, a default Bokeh .ico "
"file will be used",
default = None,
)),
('--keep-alive', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to send a keep-alive ping to clients, 0 to disable.",
default = None,
)),
('--check-unused-sessions', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to check for unused sessions",
default = None,
)),
('--unused-session-lifetime', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How long unused sessions last",
default = None,
)),
('--stats-log-frequency', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to log stats",
default = None,
)),
('--mem-log-frequency', Argument(
metavar = 'MILLISECONDS',
type = int,
help = "How often to log memory usage information",
default = None,
)),
('--use-xheaders', Argument(
action = 'store_true',
help = "Prefer X-headers for IP/protocol information",
)),
('--ssl-certfile', Argument(
metavar = 'CERTFILE',
action = 'store',
default = None,
help = 'Absolute path to a certificate file for SSL termination',
)),
('--ssl-keyfile', Argument(
metavar = 'KEYFILE',
action = 'store',
default = None,
help = 'Absolute path to a private key file for SSL termination',
)),
('--session-ids', Argument(
metavar = 'MODE',
action = 'store',
default = None,
choices = SESSION_ID_MODES,
help = f"One of: {nice_join(SESSION_ID_MODES)}",
)),
('--auth-module', Argument(
metavar = 'AUTH_MODULE',
action = 'store',
default = None,
help = 'Absolute path to a Python module that implements auth hooks',
)),
('--enable-xsrf-cookies', Argument(
action = 'store_true',
default = False,
help = 'Whether to enable Tornado support for XSRF cookies. All '
'PUT, POST, or DELETE handlers must be properly instrumented '
'when this setting is enabled.',
)),
('--exclude-headers', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request headers to exclude from the session '
'context (by default all headers are included).',
)),
('--exclude-cookies', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request cookies to exclude from the session '
'context (by default all cookies are included).',
)),
('--include-headers', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request headers to make available in the session '
'context (by default all headers are included).',
)),
('--include-cookies', Argument(
action = 'store',
default = None,
nargs='+',
help = 'A list of request cookies to make available in the session '
'context (by default all cookies are included).',
)),
('--cookie-secret', Argument(
metavar = 'COOKIE_SECRET',
action = 'store',
default = None,
help = 'Configure to enable getting/setting secure cookies',
)),
('--index', Argument(
metavar = 'INDEX',
action = 'store',
default = None,
help = 'Path to a template to use for the site index',
)),
('--disable-index', Argument(
action = 'store_true',
help = 'Do not use the default index on the root path',
)),
('--disable-index-redirect', Argument(
action = 'store_true',
help = 'Do not redirect to running app from root path',
)),
('--num-procs', Argument(
metavar = 'N',
action = 'store',
help = "Number of worker processes for an app. Using "
"0 will autodetect number of cores (defaults to 1)",
default = 1,
type = int,
)),
('--session-token-expiration', Argument(
metavar = 'N',
action = 'store',
help = "Duration in seconds that a new session token "
"is valid for session creation. After the expiry "
"time has elapsed, the token will not be able "
"create a new session (defaults to seconds).",
default = DEFAULT_SESSION_TOKEN_EXPIRATION,
type = int,
)),
('--websocket-max-message-size', Argument(
metavar = 'BYTES',
action = 'store',
help = "Set the Tornado websocket_max_message_size value "
"(default: 20MB)",
default = DEFAULT_WEBSOCKET_MAX_MESSAGE_SIZE_BYTES,
type = int,
)),
('--websocket-compression-level', Argument(
metavar = 'LEVEL',
action = 'store',
help = "Set the Tornado WebSocket compression_level",
default = None,
type = int,
)),
('--websocket-compression-mem-level', Argument(
metavar = 'LEVEL',
action = 'store',
help = "Set the Tornado WebSocket compression mem_level",
default = None,
type = int,
)),
('--glob', Argument(
action='store_true',
help='Process all filename arguments as globs',
)),
)
def customize_applications(self, args: argparse.Namespace, applications: dict[str, Any]) -> dict[str, Any]:
'''Allows subclasses to customize ``applications``.
Should modify and return a copy of the ``applications`` dictionary.
'''
return dict(applications)
def customize_kwargs(self, args: argparse.Namespace, server_kwargs: dict[str, Any]) -> dict[str, Any]:
'''Allows subclasses to customize ``server_kwargs``.
Should modify and return a copy of the ``server_kwargs`` dictionary.
'''
return dict(server_kwargs)
def customize_server(self, server: Server) -> Server:
'''Allows subclasses to customize the ``server``.
Should apply modifications to the server and wrap it or return the same instance.
'''
return server
def invoke(self, args: argparse.Namespace) -> None:
'''
'''
basicConfig(format=args.log_format, filename=args.log_file)
# This is a bit of a fudge. We want the default log level for non-server
# cases to be None, i.e. we don't set a log level. But for the server we
# do want to set the log level to INFO if nothing else overrides that.
log_level = settings.py_log_level(args.log_level)
if log_level is None:
log_level = logging.INFO
logging.getLogger('bokeh').setLevel(log_level)
if args.use_config is not None:
log.info(f"Using override config file: {args.use_config}")
settings.load_config(args.use_config)
# protect this import inside a function so that "bokeh info" can work
# even if Tornado is not installed
from bokeh.server.server import Server
files: list[str] = []
for f in args.files:
if args.glob:
files.extend(glob(f))
else:
files.append(f)
argvs = {f: args.args for f in files}
applications = build_single_handler_applications(files, argvs)
if len(applications) == 0:
# create an empty application by default
applications['/'] = Application()
# rename args to be compatible with Server
if args.keep_alive is not None:
args.keep_alive_milliseconds = args.keep_alive
if args.check_unused_sessions is not None:
args.check_unused_sessions_milliseconds = args.check_unused_sessions
if args.unused_session_lifetime is not None:
args.unused_session_lifetime_milliseconds = args.unused_session_lifetime
if args.stats_log_frequency is not None:
args.stats_log_frequency_milliseconds = args.stats_log_frequency
if args.mem_log_frequency is not None:
args.mem_log_frequency_milliseconds = args.mem_log_frequency
server_kwargs = { key: getattr(args, key) for key in ['port',
'address',
'unix_socket',
'allow_websocket_origin',
'num_procs',
'prefix',
'index',
'keep_alive_milliseconds',
'check_unused_sessions_milliseconds',
'unused_session_lifetime_milliseconds',
'stats_log_frequency_milliseconds',
'mem_log_frequency_milliseconds',
'use_xheaders',
'websocket_max_message_size',
'websocket_compression_level',
'websocket_compression_mem_level',
'include_cookies',
'include_headers',
'exclude_cookies',
'exclude_headers',
'session_token_expiration',
]
if getattr(args, key, None) is not None }
server_kwargs['sign_sessions'] = settings.sign_sessions()
server_kwargs['secret_key'] = settings.secret_key_bytes()
server_kwargs['ssl_certfile'] = settings.ssl_certfile(getattr(args, 'ssl_certfile', None))
server_kwargs['ssl_keyfile'] = settings.ssl_keyfile(getattr(args, 'ssl_keyfile', None))
server_kwargs['ssl_password'] = settings.ssl_password()
server_kwargs['generate_session_ids'] = True
if args.session_ids is None:
# no --session-ids means use the env vars
pass
elif args.session_ids == 'unsigned':
server_kwargs['sign_sessions'] = False
elif args.session_ids == 'signed':
server_kwargs['sign_sessions'] = True
elif args.session_ids == 'external-signed':
server_kwargs['sign_sessions'] = True
server_kwargs['generate_session_ids'] = False
else:
raise RuntimeError("argparse should have filtered out --session-ids mode " +
args.session_ids)
if server_kwargs['sign_sessions'] and not server_kwargs['secret_key']:
die("To sign sessions, the BOKEH_SECRET_KEY environment variable must be set; " +
"the `bokeh secret` command can be used to generate a new key.")
if 'unix_socket' in server_kwargs:
if server_kwargs['port'] != DEFAULT_SERVER_PORT:
die("--port arg is not supported with a unix socket")
invalid_args = ['address', 'ssl_certfile', 'ssl_keyfile']
if any(server_kwargs.get(x) for x in invalid_args):
die(f"{[*invalid_args, 'port']} args are not supported with a unix socket")
auth_module_path = settings.auth_module(getattr(args, 'auth_module', None))
if auth_module_path:
server_kwargs['auth_provider'] = AuthModule(auth_module_path)
else:
server_kwargs['auth_provider'] = NullAuth()
server_kwargs['xsrf_cookies'] = settings.xsrf_cookies(getattr(args, 'enable_xsrf_cookies', False))
server_kwargs['cookie_secret'] = settings.cookie_secret(getattr(args, 'cookie_secret', None))
server_kwargs['use_index'] = not args.disable_index
server_kwargs['redirect_root'] = not args.disable_index_redirect
server_kwargs['autoreload'] = args.dev is not None
server_kwargs['ico_path'] = settings.ico_path(getattr(args, 'ico_path', None))
def find_autoreload_targets(app_path: str) -> None:
path = os.path.abspath(app_path)
if not os.path.isdir(path):
return
for path, _, files in os.walk(path):
for name in files:
if (fnmatch(name, '*.html') or
fnmatch(name, '*.css') or
fnmatch(name, '*.yaml')):
log.info("Watching: " + os.path.join(path, name))
watch(os.path.join(path, name))
def add_optional_autoreload_files(file_list: list[str]) -> None:
for filen in file_list:
if os.path.isdir(filen):
log.warning("Cannot watch directory " + filen)
continue
log.info("Watching: " + filen)
watch(filen)
if server_kwargs['autoreload']:
if len(applications.keys()) != 1:
die("Bokeh server --dev option can only support a single app")
if len(args.files) == 0:
die("Bokeh server --dev option requires an app script or directory be provided")
if server_kwargs['num_procs'] != 1:
log.info("Running in --dev mode. --num-procs is limited to 1.")
server_kwargs['num_procs'] = 1
find_autoreload_targets(args.files[0])
add_optional_autoreload_files(args.dev)
applications = self.customize_applications(args, applications)
server_kwargs = self.customize_kwargs(args, server_kwargs)
with report_server_init_errors(**server_kwargs):
server = Server(applications, **server_kwargs)
if args.show:
# we have to defer opening in browser until we start up the server
def show_callback() -> None:
for route in applications.keys():
server.show(route)
server.io_loop.add_callback(show_callback)
# Server may not have a port when bound to a unix socket
if server.port:
address_string = 'localhost'
if server.address is not None and server.address != '':
address_string = server.address
if server_kwargs['ssl_certfile'] and (server_kwargs['ssl_certfile'].endswith('.pem') or server_kwargs['ssl_keyfile']):
protocol = 'https'
else:
protocol = 'http'
for route in sorted(applications.keys()):
url = f"{protocol}://{address_string}:{server.port}{server.prefix}{route}"
log.info(f"Bokeh app running at: {url}")
log.info(f"Starting Bokeh server with process id: {os.getpid()}")
server = self.customize_server(server)
server.run_until_shutdown()
#-----------------------------------------------------------------------------
# Dev API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Private API
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
# Code
#-----------------------------------------------------------------------------
__doc__ = format_docstring(__doc__,
DEFAULT_PORT=DEFAULT_SERVER_PORT,
LOGLEVELS=nice_join(LOGLEVELS),
SESSION_ID_MODES=nice_join(SESSION_ID_MODES),
DEFAULT_LOG_FORMAT=DEFAULT_LOG_FORMAT,
)
|
Serve
|
python
|
huggingface__transformers
|
src/transformers/models/lightglue/image_processing_lightglue.py
|
{
"start": 2059,
"end": 5381
}
|
class ____(ImagesKwargs, total=False):
r"""
do_grayscale (`bool`, *optional*, defaults to `True`):
Whether to convert the image to grayscale. Can be overridden by `do_grayscale` in the `preprocess` method.
"""
do_grayscale: bool
def is_grayscale(
image: np.ndarray,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
):
if input_data_format == ChannelDimension.FIRST:
if image.shape[0] == 1:
return True
return np.all(image[0, ...] == image[1, ...]) and np.all(image[1, ...] == image[2, ...])
elif input_data_format == ChannelDimension.LAST:
if image.shape[-1] == 1:
return True
return np.all(image[..., 0] == image[..., 1]) and np.all(image[..., 1] == image[..., 2])
def convert_to_grayscale(
image: ImageInput,
input_data_format: Optional[Union[str, ChannelDimension]] = None,
) -> ImageInput:
"""
Converts an image to grayscale format using the NTSC formula. Only support numpy and PIL Image.
This function is supposed to return a 1-channel image, but it returns a 3-channel image with the same value in each
channel, because of an issue that is discussed in :
https://github.com/huggingface/transformers/pull/25786#issuecomment-1730176446
Args:
image (Image):
The image to convert.
input_data_format (`ChannelDimension` or `str`, *optional*):
The channel dimension format for the input image.
"""
requires_backends(convert_to_grayscale, ["vision"])
if isinstance(image, np.ndarray):
if is_grayscale(image, input_data_format=input_data_format):
return image
if input_data_format == ChannelDimension.FIRST:
gray_image = image[0, ...] * 0.2989 + image[1, ...] * 0.5870 + image[2, ...] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=0)
elif input_data_format == ChannelDimension.LAST:
gray_image = image[..., 0] * 0.2989 + image[..., 1] * 0.5870 + image[..., 2] * 0.1140
gray_image = np.stack([gray_image] * 3, axis=-1)
return gray_image
if not isinstance(image, PIL.Image.Image):
return image
image = image.convert("L")
return image
def validate_and_format_image_pairs(images: ImageInput):
error_message = (
"Input images must be a one of the following :",
" - A pair of PIL images.",
" - A pair of 3D arrays.",
" - A list of pairs of PIL images.",
" - A list of pairs of 3D arrays.",
)
def _is_valid_image(image):
"""images is a PIL Image or a 3D array."""
return is_pil_image(image) or (
is_valid_image(image) and get_image_type(image) != ImageType.PIL and len(image.shape) == 3
)
if isinstance(images, list):
if len(images) == 2 and all((_is_valid_image(image)) for image in images):
return images
if all(
isinstance(image_pair, list)
and len(image_pair) == 2
and all(_is_valid_image(image) for image in image_pair)
for image_pair in images
):
return [image for image_pair in images for image in image_pair]
raise ValueError(error_message)
@requires(backends=("torch",))
|
LightGlueImageProcessorKwargs
|
python
|
PrefectHQ__prefect
|
src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py
|
{
"start": 527607,
"end": 528046
}
|
class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("id", "name", "name_html")
id = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="id")
name = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="name")
name_html = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="nameHTML")
|
ProjectV2SingleSelectFieldOption
|
python
|
Lightning-AI__lightning
|
src/lightning/pytorch/loops/fetchers.py
|
{
"start": 5105,
"end": 6469
}
|
class ____(_DataFetcher):
"""This class is used to return directly the `dataloader_iter` to the ``LightningModule`` training_step for users
to implement their own pre-fetching logic. This feature can be activated as follows:
Example::
Class MyModel(LightningModule):
def training_step(self, dataloader_iter: Iterator) -> None:
# it is the user responsibility to fetch and move the batch to the right device.
batch, batch_idx, dataloader_idx = next(dataloader_iter)
batch = batch.to(self.device)
...
"""
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self._batch: Any = None
self._batch_idx: int = 0
self._dataloader_idx: int = 0
@override
def __iter__(self) -> "_DataLoaderIterDataFetcher":
super().__iter__()
self.iterator_wrapper = iter(_DataFetcherWrapper(self))
return self
@override
def __next__(self) -> Iterator["_DataFetcherWrapper"]: # type: ignore[override]
if self.done:
raise StopIteration
return self.iterator_wrapper
@override
def reset(self) -> None:
super().reset()
self._batch = None
self._batch_idx = 0
self._dataloader_idx = 0
|
_DataLoaderIterDataFetcher
|
python
|
spyder-ide__spyder
|
spyder/utils/environ.py
|
{
"start": 9577,
"end": 11333
}
|
class ____(RemoteEnvDialog):
"""User Environment Variables Viewer/Editor"""
def __init__(self, parent=None):
title = _("User environment variables")
readonly = True
if os.name == 'nt':
title = _(r"User environment variables in Windows registry")
readonly = False
super().__init__(parent=parent, title=title, readonly=readonly)
if os.name == 'nt':
if parent is None:
parent = self
QMessageBox.warning(
parent, _("Warning"),
_("If you accept changes, "
"this will modify the current user environment "
"variables directly <b>in Windows registry</b>. "
"Use it with precautions, at your own risks.<br>"
"<br>Note that for changes to take effect, you will "
"need to restart the parent process of this applica"
"tion (simply restart Spyder if you have executed it "
"from a Windows shortcut, otherwise restart any "
"application from which you may have executed it, "
"like <i>Python(x,y) Home</i> for example)")
)
def accept(self):
"""Reimplement Qt method"""
if os.name == 'nt':
set_user_env(listdict2envdict(self.get_value()), parent=self)
super().accept()
def test():
"""Run Windows environment variable editor"""
import sys
from spyder.utils.qthelpers import qapplication
_ = qapplication()
dlg = UserEnvDialog()
dlg.show()
sys.exit(dlg.exec())
if __name__ == "__main__":
import logging
logging.basicConfig()
logger.setLevel(10)
test()
|
UserEnvDialog
|
python
|
readthedocs__readthedocs.org
|
readthedocs/config/models.py
|
{
"start": 2673,
"end": 2828
}
|
class ____(ConfigBaseModel):
include: list[str] | Literal["all"] = []
exclude: list[str] | Literal["all"] = []
recursive: bool = False
|
Submodules
|
python
|
getsentry__sentry
|
src/sentry/analytics/events/sentry_app_uninstalled.py
|
{
"start": 79,
"end": 239
}
|
class ____(analytics.Event):
user_id: int
organization_id: int
sentry_app: str
analytics.register(SentryAppUninstalledEvent)
|
SentryAppUninstalledEvent
|
python
|
sqlalchemy__sqlalchemy
|
test/orm/test_relationships.py
|
{
"start": 171111,
"end": 182179
}
|
class ____(_fixtures.FixtureTest):
run_inserts = None
@testing.flag_combinations(
dict(
detached=False,
raiseload=False,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=False,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=False,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=True,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=True,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=True,
),
#####
dict(
detached=False,
raiseload=False,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=False,
raiseload=False,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=False,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=False,
raiseload=True,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=True,
backref=True,
delete=False,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=False,
raiseload=False,
backref=False,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=False,
backref=True,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=False,
backref=True,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=True,
backref=True,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=True,
backref=True,
delete=False,
active_history=True,
legacy_inactive_history_style=True,
),
####
dict(
detached=False,
raiseload=False,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=True,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=True,
),
###
dict(
detached=False,
raiseload=False,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=False,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=True,
active_history=False,
legacy_inactive_history_style=False,
),
#
dict(
detached=False,
raiseload=False,
backref=False,
delete=True,
active_history=True,
),
dict(
detached=True,
raiseload=False,
backref=False,
delete=True,
active_history=True,
),
dict(
detached=False,
raiseload=True,
backref=False,
delete=True,
active_history=True,
),
dict(
detached=True,
raiseload=True,
backref=False,
delete=True,
active_history=True,
),
)
def test_m2o(
self,
detached,
raiseload,
backref,
active_history,
delete,
legacy_inactive_history_style,
):
if delete:
assert not backref, "delete and backref are mutually exclusive"
Address, addresses, users, User = (
self.classes.Address,
self.tables.addresses,
self.tables.users,
self.classes.User,
)
opts = {}
if active_history:
opts["active_history"] = True
if raiseload:
opts["lazy"] = "raise"
opts["_legacy_inactive_history_style"] = legacy_inactive_history_style
self.mapper_registry.map_imperatively(
Address,
addresses,
properties={
"user": relationship(User, back_populates="addresses", **opts)
},
)
self.mapper_registry.map_imperatively(
User,
users,
properties={
"addresses": relationship(Address, back_populates="user")
},
)
s = fixture_session()
a1 = Address(email_address="a1")
u1 = User(name="u1", addresses=[a1])
s.add_all([a1, u1])
s.commit()
if backref:
u1.addresses
if detached:
s.expunge(a1)
def go():
u1.addresses = []
if active_history:
if raiseload:
assert_raises_message(
exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
go,
)
return
elif detached:
assert_raises_message(
orm_exc.DetachedInstanceError,
"lazy load operation of attribute 'user' "
"cannot proceed",
go,
)
return
go()
else:
if detached:
s.expunge(a1)
if delete:
def go():
del a1.user
else:
def go():
a1.user = None
if active_history:
if raiseload:
assert_raises_message(
exc.InvalidRequestError,
"'Address.user' is not available due to lazy='raise'",
go,
)
return
elif detached:
assert_raises_message(
orm_exc.DetachedInstanceError,
"lazy load operation of attribute 'user' "
"cannot proceed",
go,
)
return
go()
if detached:
s.add(a1)
s.commit()
eq_(s.query(Address).count(), 1)
eq_(s.query(User).count(), 1)
# test for issue #4997
# delete of Address should proceed, as User object does not
# need to be loaded
s.delete(a1)
s.commit()
eq_(s.query(Address).count(), 0)
eq_(s.query(User).count(), 1)
|
InactiveHistoryNoRaiseTest
|
python
|
wandb__wandb
|
wandb/sdk/artifacts/_generated/artifact_collection_membership_files.py
|
{
"start": 992,
"end": 1212
}
|
class ____(
GQLResult
):
files: Optional[
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembershipFiles
]
|
ArtifactCollectionMembershipFilesProjectArtifactCollectionArtifactMembership
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.