language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | zarr-developers__zarr-python | src/zarr/core/dtype/common.py | {
"start": 5907,
"end": 6331
} | class ____:
"""
A mix-in class for data types with a length attribute, such as fixed-size collections
of unicode strings, or bytes.
Attributes
----------
length : int
The length of the scalars belonging to this data type. Note that this class does not assign
a unit to the length. Child classes may assign units.
"""
length: int
@dataclass(frozen=True, kw_only=True)
| HasLength |
python | lxml__lxml | src/lxml/html/_difflib.py | {
"start": 69233,
"end": 84954
} | class ____(object):
"""For producing HTML side by side comparison with change highlights.
This class can be used to create an HTML table (or a complete HTML file
containing the table) showing a side by side, line by line comparison
of text with inter-line and intra-line change highlights. The table can
be generated in either full or contextual difference mode.
The following methods are provided for HTML generation:
make_table -- generates HTML for a single side by side table
make_file -- generates complete HTML file with a single side by side table
See Doc/includes/diff.py for an example usage of this class.
"""
_file_template = _file_template
_styles = _styles
_table_template = _table_template
_legend = _legend
_default_prefix = 0
def __init__(self,tabsize=8,wrapcolumn=None,linejunk=None,
charjunk=IS_CHARACTER_JUNK):
"""HtmlDiff instance initializer
Arguments:
tabsize -- tab stop spacing, defaults to 8.
wrapcolumn -- column number where lines are broken and wrapped,
defaults to None where lines are not wrapped.
linejunk,charjunk -- keyword arguments passed into ndiff() (used by
HtmlDiff() to generate the side by side HTML differences). See
ndiff() documentation for argument default values and descriptions.
"""
self._tabsize = tabsize
self._wrapcolumn = wrapcolumn
self._linejunk = linejunk
self._charjunk = charjunk
def make_file(self, fromlines, tolines, fromdesc='', todesc='',
context=False, numlines=5, *, charset='utf-8'):
"""Returns HTML file of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
charset -- charset of the HTML document
"""
return (self._file_template % dict(
styles=self._styles,
legend=self._legend,
table=self.make_table(fromlines, tolines, fromdesc, todesc,
context=context, numlines=numlines),
charset=charset
)).encode(charset, 'xmlcharrefreplace').decode(charset)
def _tab_newline_replace(self,fromlines,tolines):
"""Returns from/to line lists with tabs expanded and newlines removed.
Instead of tab characters being replaced by the number of spaces
needed to fill in to the next tab stop, this function will fill
the space with tab characters. This is done so that the difference
algorithms can identify changes in a file when tabs are replaced by
spaces and vice versa. At the end of the HTML generation, the tab
characters will be replaced with a nonbreakable space.
"""
def expand_tabs(line):
# hide real spaces
line = line.replace(' ','\0')
# expand tabs into spaces
line = line.expandtabs(self._tabsize)
# replace spaces from expanded tabs back into tab characters
# (we'll replace them with markup after we do differencing)
line = line.replace(' ','\t')
return line.replace('\0',' ').rstrip('\n')
fromlines = [expand_tabs(line) for line in fromlines]
tolines = [expand_tabs(line) for line in tolines]
return fromlines,tolines
def _split_line(self,data_list,line_num,text):
"""Builds list of text lines by splitting text lines at wrap point
This function will determine if the input text line needs to be
wrapped (split) into separate lines. If so, the first wrap point
will be determined and the first line appended to the output
text line list. This function is used recursively to handle
the second part of the split line to further split it.
"""
# if blank line or context separator, just add it to the output list
if not line_num:
data_list.append((line_num,text))
return
# if line text doesn't need wrapping, just add it to the output list
size = len(text)
max = self._wrapcolumn
if (size <= max) or ((size -(text.count('\0')*3)) <= max):
data_list.append((line_num,text))
return
# scan text looking for the wrap point, keeping track if the wrap
# point is inside markers
i = 0
n = 0
mark = ''
while n < max and i < size:
if text[i] == '\0':
i += 1
mark = text[i]
i += 1
elif text[i] == '\1':
i += 1
mark = ''
else:
i += 1
n += 1
# wrap point is inside text, break it up into separate lines
line1 = text[:i]
line2 = text[i:]
# if wrap point is inside markers, place end marker at end of first
# line and start marker at beginning of second line because each
# line will have its own table tag markup around it.
if mark:
line1 = line1 + '\1'
line2 = '\0' + mark + line2
# tack on first line onto the output list
data_list.append((line_num,line1))
# use this routine again to wrap the remaining text
self._split_line(data_list,'>',line2)
def _line_wrapper(self,diffs):
"""Returns iterator that splits (wraps) mdiff text lines"""
# pull from/to data and flags from mdiff iterator
for fromdata,todata,flag in diffs:
# check for context separators and pass them through
if flag is None:
yield fromdata,todata,flag
continue
(fromline,fromtext),(toline,totext) = fromdata,todata
# for each from/to line split it at the wrap column to form
# list of text lines.
fromlist,tolist = [],[]
self._split_line(fromlist,fromline,fromtext)
self._split_line(tolist,toline,totext)
# yield from/to line in pairs inserting blank lines as
# necessary when one side has more wrapped lines
while fromlist or tolist:
if fromlist:
fromdata = fromlist.pop(0)
else:
fromdata = ('',' ')
if tolist:
todata = tolist.pop(0)
else:
todata = ('',' ')
yield fromdata,todata,flag
def _collect_lines(self,diffs):
"""Collects mdiff output into separate lists
Before storing the mdiff from/to data into a list, it is converted
into a single line of text with HTML markup.
"""
fromlist,tolist,flaglist = [],[],[]
# pull from/to data and flags from mdiff style iterator
for fromdata,todata,flag in diffs:
try:
# store HTML markup of the lines into the lists
fromlist.append(self._format_line(0,flag,*fromdata))
tolist.append(self._format_line(1,flag,*todata))
except TypeError:
# exceptions occur for lines where context separators go
fromlist.append(None)
tolist.append(None)
flaglist.append(flag)
return fromlist,tolist,flaglist
def _format_line(self,side,flag,linenum,text):
"""Returns HTML markup of "from" / "to" text lines
side -- 0 or 1 indicating "from" or "to" text
flag -- indicates if difference on line
linenum -- line number (used for line number column)
text -- line text to be marked up
"""
try:
linenum = '%d' % linenum
id = ' id="%s%s"' % (self._prefix[side],linenum)
except TypeError:
# handle blank lines where linenum is '>' or ''
id = ''
# replace those things that would get confused with HTML symbols
text=text.replace("&","&").replace(">",">").replace("<","<")
# make space non-breakable so they don't get compressed or line wrapped
text = text.replace(' ',' ').rstrip()
return '<td class="diff_header"%s>%s</td><td nowrap="nowrap">%s</td>' \
% (id,linenum,text)
def _make_prefix(self):
"""Create unique anchor prefixes"""
# Generate a unique anchor prefix so multiple tables
# can exist on the same HTML page without conflicts.
fromprefix = "from%d_" % HtmlDiff._default_prefix
toprefix = "to%d_" % HtmlDiff._default_prefix
HtmlDiff._default_prefix += 1
# store prefixes so line format method has access
self._prefix = [fromprefix,toprefix]
def _convert_flags(self,fromlist,tolist,flaglist,context,numlines):
"""Makes list of "next" links"""
# all anchor names will be generated using the unique "to" prefix
toprefix = self._prefix[1]
# process change flags, generating middle column of next anchors/links
next_id = ['']*len(flaglist)
next_href = ['']*len(flaglist)
num_chg, in_change = 0, False
last = 0
for i,flag in enumerate(flaglist):
if flag:
if not in_change:
in_change = True
last = i
# at the beginning of a change, drop an anchor a few lines
# (the context lines) before the change for the previous
# link
i = max([0,i-numlines])
next_id[i] = ' id="difflib_chg_%s_%d"' % (toprefix,num_chg)
# at the beginning of a change, drop a link to the next
# change
num_chg += 1
next_href[last] = '<a href="#difflib_chg_%s_%d">n</a>' % (
toprefix,num_chg)
else:
in_change = False
# check for cases where there is no content to avoid exceptions
if not flaglist:
flaglist = [False]
next_id = ['']
next_href = ['']
last = 0
if context:
fromlist = ['<td></td><td> No Differences Found </td>']
tolist = fromlist
else:
fromlist = tolist = ['<td></td><td> Empty File </td>']
# if not a change on first line, drop a link
if not flaglist[0]:
next_href[0] = '<a href="#difflib_chg_%s_0">f</a>' % toprefix
# redo the last link to link to the top
next_href[last] = '<a href="#difflib_chg_%s_top">t</a>' % (toprefix)
return fromlist,tolist,flaglist,next_href,next_id
def make_table(self,fromlines,tolines,fromdesc='',todesc='',context=False,
numlines=5):
"""Returns HTML table of side by side comparison with change highlights
Arguments:
fromlines -- list of "from" lines
tolines -- list of "to" lines
fromdesc -- "from" file column header string
todesc -- "to" file column header string
context -- set to True for contextual differences (defaults to False
which shows full differences).
numlines -- number of context lines. When context is set True,
controls number of lines displayed before and after the change.
When context is False, controls the number of lines to place
the "next" link anchors before the next change (so click of
"next" link jumps to just before the change).
"""
# make unique anchor prefixes so that multiple tables may exist
# on the same page without conflict.
self._make_prefix()
# change tabs to spaces before it gets more difficult after we insert
# markup
fromlines,tolines = self._tab_newline_replace(fromlines,tolines)
# create diffs iterator which generates side by side from/to data
if context:
context_lines = numlines
else:
context_lines = None
diffs = _mdiff(fromlines,tolines,context_lines,linejunk=self._linejunk,
charjunk=self._charjunk)
# set up iterator to wrap lines that exceed desired width
if self._wrapcolumn:
diffs = self._line_wrapper(diffs)
# collect up from/to lines and flags into lists (also format the lines)
fromlist,tolist,flaglist = self._collect_lines(diffs)
# process change flags, generating middle column of next anchors/links
fromlist,tolist,flaglist,next_href,next_id = self._convert_flags(
fromlist,tolist,flaglist,context,numlines)
s = []
fmt = ' <tr><td class="diff_next"%s>%s</td>%s' + \
'<td class="diff_next">%s</td>%s</tr>\n'
for i in range(len(flaglist)):
if flaglist[i] is None:
# mdiff yields None on separator lines skip the bogus ones
# generated for the first line
if i > 0:
s.append(' </tbody> \n <tbody>\n')
else:
s.append( fmt % (next_id[i],next_href[i],fromlist[i],
next_href[i],tolist[i]))
if fromdesc or todesc:
header_row = '<thead><tr>%s%s%s%s</tr></thead>' % (
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % fromdesc,
'<th class="diff_next"><br /></th>',
'<th colspan="2" class="diff_header">%s</th>' % todesc)
else:
header_row = ''
table = self._table_template % dict(
data_rows=''.join(s),
header_row=header_row,
prefix=self._prefix[1])
return table.replace('\0+','<span class="diff_add">'). \
replace('\0-','<span class="diff_sub">'). \
replace('\0^','<span class="diff_chg">'). \
replace('\1','</span>'). \
replace('\t',' ')
def restore(delta, which):
r"""
Generate one of the two sequences that generated a delta.
Given a `delta` produced by `Differ.compare()` or `ndiff()`, extract
lines originating from file 1 or 2 (parameter `which`), stripping off line
prefixes.
Examples:
>>> diff = ndiff('one\ntwo\nthree\n'.splitlines(keepends=True),
... 'ore\ntree\nemu\n'.splitlines(keepends=True))
>>> diff = list(diff)
>>> print(''.join(restore(diff, 1)), end="")
one
two
three
>>> print(''.join(restore(diff, 2)), end="")
ore
tree
emu
"""
try:
tag = {1: "- ", 2: "+ "}[int(which)]
except KeyError:
raise ValueError('unknown delta choice (must be 1 or 2): %r'
% which) from None
prefixes = (" ", tag)
for line in delta:
if line[:2] in prefixes:
yield line[2:]
| HtmlDiff |
python | sqlalchemy__sqlalchemy | examples/versioned_rows/versioned_rows_w_versionid.py | {
"start": 3263,
"end": 3653
} | class ____(Base):
__tablename__ = "parent"
id = Column(Integer, primary_key=True)
child_id = Column(Integer)
child_version_id = Column(Integer)
child = relationship("Child", backref=backref("parent", uselist=False))
__table_args__ = (
ForeignKeyConstraint(
["child_id", "child_version_id"], ["child.id", "child.version_id"]
),
)
| Parent |
python | doocs__leetcode | lcci/02.07.Intersection of Two Linked Lists/Solution.py | {
"start": 136,
"end": 378
} | class ____:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
a, b = headA, headB
while a != b:
a = a.next if a else headB
b = b.next if b else headA
return a
| Solution |
python | getsentry__sentry | src/sentry/utils/patch_set.py | {
"start": 1039,
"end": 2881
} | class ____:
added: list[FileModification]
removed: list[FileModification]
modified: list[FileModification]
def patch_to_file_modifications(patch: str) -> FileModifications:
try:
patch_set = unidiff.PatchSet.from_string(patch)
except UnidiffParseError:
raise PatchParseError("Failed to parse file modifications from patch")
return FileModifications(
added=_patched_files_to_file_modifications(patch_set.added_files),
removed=_patched_files_to_file_modifications(patch_set.removed_files),
modified=_patched_files_to_file_modifications(patch_set.modified_files),
)
def _patched_files_to_file_modifications(
patched_files: list[unidiff.PatchedFile],
) -> list[FileModification]:
result: list[FileModification] = []
for patched_file in patched_files:
lines_added = 0
lines_removed = 0
lines_modified = 0
for hunk in patched_file:
lines = list(hunk)
i = 0
while i < len(lines):
line = lines[i]
if line.is_removed:
# Check if next line is an adjacent addition (potential modification)
if i + 1 < len(lines) and lines[i + 1].is_added:
lines_modified += 1
i += 2 # Skip the added line as well
continue
else:
lines_removed += 1
elif line.is_added:
lines_added += 1
i += 1
result.append(
FileModification(
path=patched_file.path,
lines_added=lines_added,
lines_removed=lines_removed,
lines_modified=lines_modified,
)
)
return result
| FileModifications |
python | PrefectHQ__prefect | src/integrations/prefect-azure/prefect_azure/container_instance.py | {
"start": 3074,
"end": 3701
} | class ____(BaseModel):
"""
Use a Managed Identity to access Azure Container registry. Requires the
user-assigned managed identity be available to the ACI container group.
"""
registry_url: str = Field(
default=...,
title="Registry URL",
description=(
"The URL to the registry, such as myregistry.azurecr.io. Generally, 'http' "
"or 'https' can be omitted."
),
)
identity: str = Field(
default=...,
description=(
"The user-assigned Azure managed identity for the private registry."
),
)
| ACRManagedIdentity |
python | sympy__sympy | sympy/geometry/line.py | {
"start": 70496,
"end": 75378
} | class ____(LinearEntity3D, Line):
"""An infinite 3D line in space.
A line is declared with two distinct points or a point and direction_ratio
as defined using keyword `direction_ratio`.
Parameters
==========
p1 : Point3D
pt : Point3D
direction_ratio : list
See Also
========
sympy.geometry.point.Point3D
sympy.geometry.line.Line
sympy.geometry.line.Line2D
Examples
========
>>> from sympy import Line3D, Point3D
>>> L = Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L
Line3D(Point3D(2, 3, 4), Point3D(3, 5, 1))
>>> L.points
(Point3D(2, 3, 4), Point3D(3, 5, 1))
"""
def __new__(cls, p1, pt=None, direction_ratio=(), **kwargs):
if isinstance(p1, LinearEntity3D):
if pt is not None:
raise ValueError('if p1 is a LinearEntity, pt must be None.')
p1, pt = p1.args
else:
p1 = Point(p1, dim=3)
if pt is not None and len(direction_ratio) == 0:
pt = Point(pt, dim=3)
elif len(direction_ratio) == 3 and pt is None:
pt = Point3D(p1.x + direction_ratio[0], p1.y + direction_ratio[1],
p1.z + direction_ratio[2])
else:
raise ValueError('A 2nd Point or keyword "direction_ratio" must '
'be used.')
return LinearEntity3D.__new__(cls, p1, pt, **kwargs)
def equation(self, x='x', y='y', z='z'):
"""Return the equations that define the line in 3D.
Parameters
==========
x : str, optional
The name to use for the x-axis, default value is 'x'.
y : str, optional
The name to use for the y-axis, default value is 'y'.
z : str, optional
The name to use for the z-axis, default value is 'z'.
Returns
=======
equation : Tuple of simultaneous equations
Examples
========
>>> from sympy import Point3D, Line3D, solve
>>> from sympy.abc import x, y, z
>>> p1, p2 = Point3D(1, 0, 0), Point3D(5, 3, 0)
>>> l1 = Line3D(p1, p2)
>>> eq = l1.equation(x, y, z); eq
(-3*x + 4*y + 3, z)
>>> solve(eq.subs(z, 0), (x, y, z))
{x: 4*y/3 + 1}
"""
x, y, z, k = [_symbol(i, real=True) for i in (x, y, z, 'k')]
p1, p2 = self.points
d1, d2, d3 = p1.direction_ratio(p2)
x1, y1, z1 = p1
eqs = [-d1*k + x - x1, -d2*k + y - y1, -d3*k + z - z1]
# eliminate k from equations by solving first eq with k for k
for i, e in enumerate(eqs):
if e.has(k):
kk = solve(e, k)[0]
eqs.pop(i)
break
return Tuple(*[i.subs(k, kk).as_numer_denom()[0] for i in eqs])
def distance(self, other):
"""
Finds the shortest distance between a line and another object.
Parameters
==========
Point3D, Line3D, Plane, tuple, list
Returns
=======
distance
Notes
=====
This method accepts only 3D entities as it's parameter
Tuples and lists are converted to Point3D and therefore must be of
length 3, 2 or 1.
NotImplementedError is raised if `other` is not an instance of one
of the specified classes: Point3D, Line3D, or Plane.
Examples
========
>>> from sympy.geometry import Line3D
>>> l1 = Line3D((0, 0, 0), (0, 0, 1))
>>> l2 = Line3D((0, 1, 0), (1, 1, 1))
>>> l1.distance(l2)
1
The computed distance may be symbolic, too:
>>> from sympy.abc import x, y
>>> l1 = Line3D((0, 0, 0), (0, 0, 1))
>>> l2 = Line3D((0, x, 0), (y, x, 1))
>>> l1.distance(l2)
Abs(x*y)/Abs(sqrt(y**2))
"""
from .plane import Plane # Avoid circular import
if isinstance(other, (tuple, list)):
try:
other = Point3D(other)
except ValueError:
pass
if isinstance(other, Point3D):
return super().distance(other)
if isinstance(other, Line3D):
if self == other:
return S.Zero
if self.is_parallel(other):
return super().distance(other.p1)
# Skew lines
self_direction = Matrix(self.direction_ratio)
other_direction = Matrix(other.direction_ratio)
normal = self_direction.cross(other_direction)
plane_through_self = Plane(p1=self.p1, normal_vector=normal)
return other.p1.distance(plane_through_self)
if isinstance(other, Plane):
return other.distance(self)
msg = f"{other} has type {type(other)}, which is unsupported"
raise NotImplementedError(msg)
| Line3D |
python | sqlalchemy__sqlalchemy | examples/versioned_history/test_versioning.py | {
"start": 1288,
"end": 29673
} | class ____(AssertsCompiledSQL):
__dialect__ = "default"
def setUp(self):
self.engine = engine = create_engine("sqlite://")
self.session = Session(engine)
self.make_base()
versioned_session(self.session)
def tearDown(self):
self.session.close()
clear_mappers()
self.Base.metadata.drop_all(self.engine)
def make_base(self):
self.Base = declarative_base()
def create_tables(self):
self.Base.metadata.create_all(self.engine)
def test_plain(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1")
sess.add(sc)
sess.commit()
sc.name = "sc1modified"
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory)
.filter(SomeClassHistory.version == 1)
.all(),
[SomeClassHistory(version=1, name="sc1")],
)
sc.name = "sc1modified2"
eq_(
sess.query(SomeClassHistory)
.order_by(SomeClassHistory.version)
.all(),
[
SomeClassHistory(version=1, name="sc1"),
SomeClassHistory(version=2, name="sc1modified"),
],
)
assert sc.version == 3
sess.commit()
sc.name = "temp"
sc.name = "sc1modified2"
sess.commit()
eq_(
sess.query(SomeClassHistory)
.order_by(SomeClassHistory.version)
.all(),
[
SomeClassHistory(version=1, name="sc1"),
SomeClassHistory(version=2, name="sc1modified"),
],
)
sess.delete(sc)
sess.commit()
eq_(
sess.query(SomeClassHistory)
.order_by(SomeClassHistory.version)
.all(),
[
SomeClassHistory(version=1, name="sc1"),
SomeClassHistory(version=2, name="sc1modified"),
SomeClassHistory(version=3, name="sc1modified2"),
],
)
@testing.variation(
"constraint_type",
[
"index_single_col",
"composite_index",
"explicit_name_index",
"unique_constraint",
"unique_constraint_naming_conv",
"unique_constraint_explicit_name",
"fk_constraint",
"fk_constraint_naming_conv",
"fk_constraint_explicit_name",
],
)
def test_index_naming(self, constraint_type):
"""test #10920"""
if (
constraint_type.unique_constraint_naming_conv
or constraint_type.fk_constraint_naming_conv
):
self.Base.metadata.naming_convention = {
"ix": "ix_%(column_0_label)s",
"uq": "uq_%(table_name)s_%(column_0_name)s",
"fk": (
"fk_%(table_name)s_%(column_0_name)s"
"_%(referred_table_name)s"
),
}
if (
constraint_type.fk_constraint
or constraint_type.fk_constraint_naming_conv
or constraint_type.fk_constraint_explicit_name
):
class Related(self.Base):
__tablename__ = "related"
id = Column(Integer, primary_key=True)
class SomeClass(Versioned, self.Base):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
x = Column(Integer)
y = Column(Integer)
# Index objects are copied and these have to have a new name
if constraint_type.index_single_col:
__table_args__ = (
Index(
None,
x,
),
)
elif constraint_type.composite_index:
__table_args__ = (Index(None, x, y),)
elif constraint_type.explicit_name_index:
__table_args__ = (Index("my_index", x, y),)
# unique constraint objects are discarded.
elif (
constraint_type.unique_constraint
or constraint_type.unique_constraint_naming_conv
):
__table_args__ = (UniqueConstraint(x, y),)
elif constraint_type.unique_constraint_explicit_name:
__table_args__ = (UniqueConstraint(x, y, name="my_uq"),)
# foreign key constraint objects are copied and have the same
# name, but no database in Core has any problem with this as the
# names are local to the parent table.
elif (
constraint_type.fk_constraint
or constraint_type.fk_constraint_naming_conv
):
__table_args__ = (ForeignKeyConstraint([x], [Related.id]),)
elif constraint_type.fk_constraint_explicit_name:
__table_args__ = (
ForeignKeyConstraint([x], [Related.id], name="my_fk"),
)
else:
constraint_type.fail()
eq_(
set(idx.name + "_history" for idx in SomeClass.__table__.indexes),
set(
idx.name
for idx in SomeClass.__history_mapper__.local_table.indexes
),
)
self.create_tables()
def test_discussion_9546(self):
class ThingExternal(Versioned, self.Base):
__tablename__ = "things_external"
id = Column(Integer, primary_key=True)
external_attribute = Column(String)
class ThingLocal(Versioned, self.Base):
__tablename__ = "things_local"
id = Column(
Integer, ForeignKey(ThingExternal.id), primary_key=True
)
internal_attribute = Column(String)
is_(ThingExternal.__table__, inspect(ThingExternal).local_table)
class Thing(self.Base):
__table__ = join(ThingExternal, ThingLocal)
id = column_property(ThingExternal.id, ThingLocal.id)
version = column_property(
ThingExternal.version, ThingLocal.version
)
eq_ignore_whitespace(
str(select(Thing)),
"SELECT things_external.id, things_local.id AS id_1, "
"things_external.external_attribute, things_external.version, "
"things_local.version AS version_1, "
"things_local.internal_attribute FROM things_external "
"JOIN things_local ON things_external.id = things_local.id",
)
def test_w_mapper_versioning(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
use_mapper_versioning = True
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1")
sess.add(sc)
sess.commit()
s2 = Session(sess.bind)
sc2 = s2.query(SomeClass).first()
sc2.name = "sc1modified"
sc.name = "sc1modified_again"
sess.commit()
eq_(sc.version, 2)
assert_raises(orm_exc.StaleDataError, s2.flush)
def test_from_null(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = self.session
sc = SomeClass()
sess.add(sc)
sess.commit()
sc.name = "sc1"
sess.commit()
assert sc.version == 2
def test_insert_null(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
boole = Column(Boolean, default=False)
self.create_tables()
sess = self.session
sc = SomeClass(boole=True)
sess.add(sc)
sess.commit()
sc.boole = None
sess.commit()
sc.boole = False
sess.commit()
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory.boole)
.order_by(SomeClassHistory.id)
.all(),
[(True,), (None,)],
)
eq_(sc.version, 3)
def test_deferred(self):
"""test versioning of unloaded, deferred columns."""
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
data = deferred(Column(String(25)))
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1", data="somedata")
sess.add(sc)
sess.commit()
sess.close()
sc = sess.query(SomeClass).first()
assert "data" not in sc.__dict__
sc.name = "sc1modified"
sess.commit()
assert sc.version == 2
SomeClassHistory = SomeClass.__history_mapper__.class_
eq_(
sess.query(SomeClassHistory)
.filter(SomeClassHistory.version == 1)
.all(),
[SomeClassHistory(version=1, name="sc1", data="somedata")],
)
def test_joined_inheritance(self):
class BaseClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "basetable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "base",
}
class SubClassSeparatePk(BaseClass):
__tablename__ = "subtable1"
id = column_property(
Column(Integer, primary_key=True), BaseClass.id
)
base_id = Column(Integer, ForeignKey("basetable.id"))
subdata1 = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "sep"}
class SubClassSamePk(BaseClass):
__tablename__ = "subtable2"
id = Column(Integer, ForeignKey("basetable.id"), primary_key=True)
subdata2 = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "same"}
self.create_tables()
sess = self.session
sep1 = SubClassSeparatePk(name="sep1", subdata1="sep1subdata")
base1 = BaseClass(name="base1")
same1 = SubClassSamePk(name="same1", subdata2="same1subdata")
sess.add_all([sep1, base1, same1])
sess.commit()
base1.name = "base1mod"
same1.subdata2 = "same1subdatamod"
sep1.name = "sep1mod"
sess.commit()
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassSeparatePkHistory = (
SubClassSeparatePk.__history_mapper__.class_
)
SubClassSamePkHistory = SubClassSamePk.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory).order_by(BaseClassHistory.id).all(),
[
SubClassSeparatePkHistory(
id=1, name="sep1", type="sep", version=1
),
BaseClassHistory(id=2, name="base1", type="base", version=1),
SubClassSamePkHistory(
id=3, name="same1", type="same", version=1
),
],
)
same1.subdata2 = "same1subdatamod2"
eq_(
sess.query(BaseClassHistory)
.order_by(BaseClassHistory.id, BaseClassHistory.version)
.all(),
[
SubClassSeparatePkHistory(
id=1, name="sep1", type="sep", version=1
),
BaseClassHistory(id=2, name="base1", type="base", version=1),
SubClassSamePkHistory(
id=3, name="same1", type="same", version=1
),
SubClassSamePkHistory(
id=3, name="same1", type="same", version=2
),
],
)
base1.name = "base1mod2"
eq_(
sess.query(BaseClassHistory)
.order_by(BaseClassHistory.id, BaseClassHistory.version)
.all(),
[
SubClassSeparatePkHistory(
id=1, name="sep1", type="sep", version=1
),
BaseClassHistory(id=2, name="base1", type="base", version=1),
BaseClassHistory(
id=2, name="base1mod", type="base", version=2
),
SubClassSamePkHistory(
id=3, name="same1", type="same", version=1
),
SubClassSamePkHistory(
id=3, name="same1", type="same", version=2
),
],
)
def test_joined_inheritance_multilevel(self):
class BaseClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "basetable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "base",
}
class SubClass(BaseClass):
__tablename__ = "subtable"
id = column_property(
Column(Integer, primary_key=True), BaseClass.id
)
base_id = Column(Integer, ForeignKey("basetable.id"))
subdata1 = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "sub"}
class SubSubClass(SubClass):
__tablename__ = "subsubtable"
id = Column(Integer, ForeignKey("subtable.id"), primary_key=True)
subdata2 = Column(String(50))
__mapper_args__ = {"polymorphic_identity": "subsub"}
self.create_tables()
SubSubHistory = SubSubClass.__history_mapper__.class_
sess = self.session
q = sess.query(SubSubHistory)
self.assert_compile(
q,
"SELECT "
"basetable_history.name AS basetable_history_name, "
"basetable_history.type AS basetable_history_type, "
"subsubtable_history.version AS subsubtable_history_version, "
"subtable_history.version AS subtable_history_version, "
"basetable_history.version AS basetable_history_version, "
"subtable_history.base_id AS subtable_history_base_id, "
"subtable_history.subdata1 AS subtable_history_subdata1, "
"subsubtable_history.id AS subsubtable_history_id, "
"subtable_history.id AS subtable_history_id, "
"basetable_history.id AS basetable_history_id, "
"subsubtable_history.changed AS subsubtable_history_changed, "
"subtable_history.changed AS subtable_history_changed, "
"basetable_history.changed AS basetable_history_changed, "
"subsubtable_history.subdata2 AS subsubtable_history_subdata2 "
"FROM basetable_history "
"JOIN subtable_history "
"ON basetable_history.id = subtable_history.base_id "
"AND basetable_history.version = subtable_history.version "
"JOIN subsubtable_history ON subtable_history.id = "
"subsubtable_history.id AND subtable_history.version = "
"subsubtable_history.version",
)
ssc = SubSubClass(name="ss1", subdata1="sd1", subdata2="sd2")
sess.add(ssc)
sess.commit()
eq_(sess.query(SubSubHistory).all(), [])
ssc.subdata1 = "sd11"
ssc.subdata2 = "sd22"
sess.commit()
eq_(
sess.query(SubSubHistory).all(),
[
SubSubHistory(
name="ss1",
subdata1="sd1",
subdata2="sd2",
type="subsub",
version=1,
)
],
)
eq_(
ssc,
SubSubClass(
name="ss1", subdata1="sd11", subdata2="sd22", version=2
),
)
def test_joined_inheritance_changed(self):
class BaseClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "basetable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(20))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "base",
}
class SubClass(BaseClass):
__tablename__ = "subtable"
id = Column(Integer, ForeignKey("basetable.id"), primary_key=True)
__mapper_args__ = {"polymorphic_identity": "sep"}
self.create_tables()
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassHistory = SubClass.__history_mapper__.class_
sess = self.session
s1 = SubClass(name="s1")
sess.add(s1)
sess.commit()
s1.name = "s2"
sess.commit()
actual_changed_base = sess.scalar(
select(BaseClass.__history_mapper__.local_table.c.changed)
)
actual_changed_sub = sess.scalar(
select(SubClass.__history_mapper__.local_table.c.changed)
)
h1 = sess.query(BaseClassHistory).first()
eq_(h1.changed, actual_changed_base)
eq_(h1.changed, actual_changed_sub)
h1 = sess.query(SubClassHistory).first()
eq_(h1.changed, actual_changed_base)
eq_(h1.changed, actual_changed_sub)
def test_single_inheritance(self):
class BaseClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "basetable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
type = Column(String(50))
__mapper_args__ = {
"polymorphic_on": type,
"polymorphic_identity": "base",
}
class SubClass(BaseClass):
subname = Column(String(50), unique=True)
__mapper_args__ = {"polymorphic_identity": "sub"}
self.create_tables()
sess = self.session
b1 = BaseClass(name="b1")
sc = SubClass(name="s1", subname="sc1")
sess.add_all([b1, sc])
sess.commit()
b1.name = "b1modified"
BaseClassHistory = BaseClass.__history_mapper__.class_
SubClassHistory = SubClass.__history_mapper__.class_
eq_(
sess.query(BaseClassHistory)
.order_by(BaseClassHistory.id, BaseClassHistory.version)
.all(),
[BaseClassHistory(id=1, name="b1", type="base", version=1)],
)
sc.name = "s1modified"
b1.name = "b1modified2"
eq_(
sess.query(BaseClassHistory)
.order_by(BaseClassHistory.id, BaseClassHistory.version)
.all(),
[
BaseClassHistory(id=1, name="b1", type="base", version=1),
BaseClassHistory(
id=1, name="b1modified", type="base", version=2
),
SubClassHistory(id=2, name="s1", type="sub", version=1),
],
)
# test the unique constraint on the subclass
# column
sc.name = "modifyagain"
sess.flush()
def test_unique(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50), unique=True)
data = Column(String(50))
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1", data="sc1")
sess.add(sc)
sess.commit()
sc.data = "sc1modified"
sess.commit()
assert sc.version == 2
sc.data = "sc1modified2"
sess.commit()
assert sc.version == 3
def test_relationship(self):
class SomeRelated(self.Base, ComparableEntity):
__tablename__ = "somerelated"
id = Column(Integer, primary_key=True)
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
related_id = Column(Integer, ForeignKey("somerelated.id"))
related = relationship("SomeRelated", backref="classes")
SomeClassHistory = SomeClass.__history_mapper__.class_
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1")
sess.add(sc)
sess.commit()
assert sc.version == 1
sr1 = SomeRelated()
sc.related = sr1
sess.commit()
assert sc.version == 2
eq_(
sess.query(SomeClassHistory)
.filter(SomeClassHistory.version == 1)
.all(),
[SomeClassHistory(version=1, name="sc1", related_id=None)],
)
sc.related = None
eq_(
sess.query(SomeClassHistory)
.order_by(SomeClassHistory.version)
.all(),
[
SomeClassHistory(version=1, name="sc1", related_id=None),
SomeClassHistory(version=2, name="sc1", related_id=sr1.id),
],
)
assert sc.version == 3
def test_backref_relationship(self):
class SomeRelated(self.Base, ComparableEntity):
__tablename__ = "somerelated"
id = Column(Integer, primary_key=True)
name = Column(String(50))
related_id = Column(Integer, ForeignKey("sometable.id"))
related = relationship("SomeClass", backref="related")
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
self.create_tables()
sess = self.session
sc = SomeClass()
sess.add(sc)
sess.commit()
assert sc.version == 1
sr = SomeRelated(name="sr", related=sc)
sess.add(sr)
sess.commit()
assert sc.version == 1
sr.name = "sr2"
sess.commit()
assert sc.version == 1
sess.delete(sr)
sess.commit()
assert sc.version == 1
def test_create_double_flush(self):
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(30))
other = Column(String(30))
self.create_tables()
sc = SomeClass()
self.session.add(sc)
self.session.flush()
sc.name = "Foo"
self.session.flush()
assert sc.version == 2
def test_mutate_plain_column(self):
class Document(self.Base, Versioned):
__tablename__ = "document"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=True)
description_ = Column("description", String, nullable=True)
self.create_tables()
document = Document()
self.session.add(document)
document.name = "Foo"
self.session.commit()
document.name = "Bar"
self.session.commit()
DocumentHistory = Document.__history_mapper__.class_
v2 = self.session.query(Document).one()
v1 = self.session.query(DocumentHistory).one()
eq_(v1.id, v2.id)
eq_(v2.name, "Bar")
eq_(v1.name, "Foo")
def test_mutate_named_column(self):
class Document(self.Base, Versioned):
__tablename__ = "document"
id = Column(Integer, primary_key=True, autoincrement=True)
name = Column(String, nullable=True)
description_ = Column("description", String, nullable=True)
self.create_tables()
document = Document()
self.session.add(document)
document.description_ = "Foo"
self.session.commit()
document.description_ = "Bar"
self.session.commit()
DocumentHistory = Document.__history_mapper__.class_
v2 = self.session.query(Document).one()
v1 = self.session.query(DocumentHistory).one()
eq_(v1.id, v2.id)
eq_(v2.description_, "Bar")
eq_(v1.description_, "Foo")
def test_unique_identifiers_across_deletes(self):
"""Ensure unique integer values are used for the primary table.
Checks whether the database assigns the same identifier twice
within the span of a table. SQLite will do this if
sqlite_autoincrement is not set (e.g. SQLite's AUTOINCREMENT flag).
"""
class SomeClass(Versioned, self.Base, ComparableEntity):
__tablename__ = "sometable"
id = Column(Integer, primary_key=True)
name = Column(String(50))
self.create_tables()
sess = self.session
sc = SomeClass(name="sc1")
sess.add(sc)
sess.commit()
sess.delete(sc)
sess.commit()
sc2 = SomeClass(name="sc2")
sess.add(sc2)
sess.commit()
SomeClassHistory = SomeClass.__history_mapper__.class_
# only one entry should exist in the history table; one()
# ensures that
scdeleted = sess.query(SomeClassHistory).one()
# If sc2 has the same id that deleted sc1 had,
# it will fail when modified or deleted
# because of the violation of the uniqueness of the primary key on
# sometable_history
ne_(sc2.id, scdeleted.id)
# If previous assertion fails, this will also fail:
sc2.name = "sc2 modified"
sess.commit()
def test_external_id(self):
class ObjectExternal(Versioned, self.Base, ComparableEntity):
__tablename__ = "externalobjects"
id1 = Column(String(3), primary_key=True)
id2 = Column(String(3), primary_key=True)
name = Column(String(50))
self.create_tables()
sess = self.session
sc = ObjectExternal(id1="aaa", id2="bbb", name="sc1")
sess.add(sc)
sess.commit()
sc.name = "sc1modified"
sess.commit()
assert sc.version == 2
ObjectExternalHistory = ObjectExternal.__history_mapper__.class_
eq_(
sess.query(ObjectExternalHistory).all(),
[
ObjectExternalHistory(
version=1, id1="aaa", id2="bbb", name="sc1"
),
],
)
sess.delete(sc)
sess.commit()
assert sess.query(ObjectExternal).count() == 0
eq_(
sess.query(ObjectExternalHistory).all(),
[
ObjectExternalHistory(
version=1, id1="aaa", id2="bbb", name="sc1"
),
ObjectExternalHistory(
version=2, id1="aaa", id2="bbb", name="sc1modified"
),
],
)
sc = ObjectExternal(id1="aaa", id2="bbb", name="sc1reappeared")
sess.add(sc)
sess.commit()
assert sc.version == 3
sc.name = "sc1reappearedmodified"
sess.commit()
assert sc.version == 4
eq_(
sess.query(ObjectExternalHistory).all(),
[
ObjectExternalHistory(
version=1, id1="aaa", id2="bbb", name="sc1"
),
ObjectExternalHistory(
version=2, id1="aaa", id2="bbb", name="sc1modified"
),
ObjectExternalHistory(
version=3, id1="aaa", id2="bbb", name="sc1reappeared"
),
],
)
| TestVersioning |
python | ipython__ipython | tests/test_completer.py | {
"start": 7117,
"end": 95922
} | class ____(unittest.TestCase):
def setUp(self):
"""
We want to silence all PendingDeprecationWarning when testing the completer
"""
self._assertwarns = self.assertWarns(PendingDeprecationWarning)
self._assertwarns.__enter__()
def tearDown(self):
try:
self._assertwarns.__exit__(None, None, None)
except AssertionError:
pass
def test_custom_completion_error(self):
"""Test that errors from custom attribute completers are silenced."""
ip = get_ipython()
class A:
pass
ip.user_ns["x"] = A()
@complete_object.register(A)
def complete_A(a, existing_completions):
raise TypeError("this should be silenced")
ip.complete("x.")
def test_custom_completion_ordering(self):
"""Test that errors from custom attribute completers are silenced."""
ip = get_ipython()
_, matches = ip.complete("in")
assert matches.index("input") < matches.index("int")
def complete_example(a):
return ["example2", "example1"]
ip.Completer.custom_completers.add_re("ex*", complete_example)
_, matches = ip.complete("ex")
assert matches.index("example2") < matches.index("example1")
def test_unicode_completions(self):
ip = get_ipython()
# Some strings that trigger different types of completion. Check them both
# in str and unicode forms
s = ["ru", "%ru", "cd /", "floa", "float(x)/"]
for t in s + list(map(str, s)):
# We don't need to check exact completion values (they may change
# depending on the state of the namespace, but at least no exceptions
# should be thrown and the return value should be a pair of text, list
# values.
text, matches = ip.complete(t)
self.assertIsInstance(text, str)
self.assertIsInstance(matches, list)
def test_latex_completions(self):
ip = get_ipython()
# Test some random unicode symbols
keys = random.sample(sorted(latex_symbols), 10)
for k in keys:
text, matches = ip.complete(k)
self.assertEqual(text, k)
self.assertEqual(matches, [latex_symbols[k]])
# Test a more complex line
text, matches = ip.complete("print(\\alpha")
self.assertEqual(text, "\\alpha")
self.assertEqual(matches[0], latex_symbols["\\alpha"])
# Test multiple matching latex symbols
text, matches = ip.complete("\\al")
self.assertIn("\\alpha", matches)
self.assertIn("\\aleph", matches)
def test_latex_no_results(self):
"""
forward latex should really return nothing in either field if nothing is found.
"""
ip = get_ipython()
text, matches = ip.Completer.latex_matches("\\really_i_should_match_nothing")
self.assertEqual(text, "")
self.assertEqual(matches, ())
def test_back_latex_completion(self):
ip = get_ipython()
# do not return more than 1 matches for \beta, only the latex one.
name, matches = ip.complete("\\β")
self.assertEqual(matches, ["\\beta"])
def test_back_unicode_completion(self):
ip = get_ipython()
name, matches = ip.complete("\\Ⅴ")
self.assertEqual(matches, ["\\ROMAN NUMERAL FIVE"])
def test_forward_unicode_completion(self):
ip = get_ipython()
name, matches = ip.complete("\\ROMAN NUMERAL FIVE")
self.assertEqual(matches, ["Ⅴ"]) # This is not a V
self.assertEqual(matches, ["\u2164"]) # same as above but explicit.
def test_delim_setting(self):
sp = completer.CompletionSplitter()
sp.delims = " "
self.assertEqual(sp.delims, " ")
self.assertEqual(sp._delim_expr, r"[\ ]")
def test_spaces(self):
"""Test with only spaces as split chars."""
sp = completer.CompletionSplitter()
sp.delims = " "
t = [("foo", "", "foo"), ("run foo", "", "foo"), ("run foo", "bar", "foo")]
check_line_split(sp, t)
def test_has_open_quotes1(self):
for s in ["'", "'''", "'hi' '"]:
self.assertEqual(completer.has_open_quotes(s), "'")
def test_has_open_quotes2(self):
for s in ['"', '"""', '"hi" "']:
self.assertEqual(completer.has_open_quotes(s), '"')
def test_has_open_quotes3(self):
for s in ["''", "''' '''", "'hi' 'ipython'"]:
self.assertFalse(completer.has_open_quotes(s))
def test_has_open_quotes4(self):
for s in ['""', '""" """', '"hi" "ipython"']:
self.assertFalse(completer.has_open_quotes(s))
@pytest.mark.xfail(
sys.platform == "win32", reason="abspath completions fail on Windows"
)
def test_abspath_file_completions(self):
ip = get_ipython()
with TemporaryDirectory() as tmpdir:
prefix = os.path.join(tmpdir, "foo")
suffixes = ["1", "2"]
names = [prefix + s for s in suffixes]
for n in names:
open(n, "w", encoding="utf-8").close()
# Check simple completion
c = ip.complete(prefix)[1]
self.assertEqual(c, names)
# Now check with a function call
cmd = 'a = f("%s' % prefix
c = ip.complete(prefix, cmd)[1]
comp = [prefix + s for s in suffixes]
self.assertEqual(c, comp)
def test_local_file_completions(self):
ip = get_ipython()
with TemporaryWorkingDirectory():
prefix = "./foo"
suffixes = ["1", "2"]
names = [prefix + s for s in suffixes]
for n in names:
open(n, "w", encoding="utf-8").close()
# Check simple completion
c = ip.complete(prefix)[1]
self.assertEqual(c, names)
test_cases = {
"function call": 'a = f("',
"shell bang": "!ls ",
"ls magic": r"%ls ",
"alias ls": "ls ",
}
for name, code in test_cases.items():
cmd = f"{code}{prefix}"
c = ip.complete(prefix, cmd)[1]
comp = {prefix + s for s in suffixes}
self.assertTrue(comp.issubset(set(c)), msg=f"completes in {name}")
def test_quoted_file_completions(self):
ip = get_ipython()
def _(text):
return ip.Completer._complete(
cursor_line=0, cursor_pos=len(text), full_text=text
)["IPCompleter.file_matcher"]["completions"]
with TemporaryWorkingDirectory():
name = "foo'bar"
open(name, "w", encoding="utf-8").close()
# Don't escape Windows
escaped = name if sys.platform == "win32" else "foo\\'bar"
# Single quote matches embedded single quote
c = _("open('foo")[0]
self.assertEqual(c.text, escaped)
# Double quote requires no escape
c = _('open("foo')[0]
self.assertEqual(c.text, name)
# No quote requires an escape
c = _("%ls foo")[0]
self.assertEqual(c.text, escaped)
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_all_completions_dups(self):
"""
Make sure the output of `IPCompleter.all_completions` does not have
duplicated prefixes.
"""
ip = get_ipython()
c = ip.Completer
ip.ex("class TestClass():\n\ta=1\n\ta1=2")
for jedi_status in [True, False]:
with provisionalcompleter():
ip.Completer.use_jedi = jedi_status
matches = c.all_completions("TestCl")
assert matches == ["TestClass"], (jedi_status, matches)
matches = c.all_completions("TestClass.")
assert len(matches) > 2, (jedi_status, matches)
matches = c.all_completions("TestClass.a")
if jedi_status:
assert matches == ["TestClass.a", "TestClass.a1"], jedi_status
else:
assert matches == [".a", ".a1"], jedi_status
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_jedi(self):
"""
A couple of issue we had with Jedi
"""
ip = get_ipython()
def _test_complete(reason, s, comp, start=None, end=None):
l = len(s)
start = start if start is not None else l
end = end if end is not None else l
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = set(ip.Completer.completions(s, l))
ip.Completer.use_jedi = False
assert Completion(start, end, comp) in completions, reason
def _test_not_complete(reason, s, comp):
l = len(s)
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = set(ip.Completer.completions(s, l))
ip.Completer.use_jedi = False
assert Completion(l, l, comp) not in completions, reason
import jedi
jedi_version = tuple(int(i) for i in jedi.__version__.split(".")[:3])
if jedi_version > (0, 10):
_test_complete("jedi >0.9 should complete and not crash", "a=1;a.", "real")
_test_complete("can infer first argument", 'a=(1,"foo");a[0].', "real")
_test_complete("can infer second argument", 'a=(1,"foo");a[1].', "capitalize")
_test_complete("cover duplicate completions", "im", "import", 0, 2)
_test_not_complete("does not mix types", 'a=(1,"foo");a[0].', "capitalize")
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_completion_have_signature(self):
"""
Lets make sure jedi is capable of pulling out the signature of the function we are completing.
"""
ip = get_ipython()
with provisionalcompleter():
ip.Completer.use_jedi = True
completions = ip.Completer.completions("ope", 3)
c = next(completions) # should be `open`
ip.Completer.use_jedi = False
assert "file" in c.signature, "Signature of function was not found by completer"
assert (
"encoding" in c.signature
), "Signature of function was not found by completer"
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_completions_have_type(self):
"""
Lets make sure matchers provide completion type.
"""
ip = get_ipython()
with provisionalcompleter():
ip.Completer.use_jedi = False
completions = ip.Completer.completions("%tim", 3)
c = next(completions) # should be `%time` or similar
assert c.type == "magic", "Type of magic was not assigned by completer"
@pytest.mark.xfail(
parse(version("jedi")) <= parse("0.18.0"),
reason="Known failure on jedi<=0.18.0",
strict=True,
)
def test_deduplicate_completions(self):
"""
Test that completions are correctly deduplicated (even if ranges are not the same)
"""
ip = get_ipython()
ip.ex(
textwrap.dedent(
"""
class Z:
zoo = 1
"""
)
)
with provisionalcompleter():
ip.Completer.use_jedi = True
l = list(
_deduplicate_completions("Z.z", ip.Completer.completions("Z.z", 3))
)
ip.Completer.use_jedi = False
assert len(l) == 1, "Completions (Z.z<tab>) correctly deduplicate: %s " % l
assert l[0].text == "zoo" # and not `it.accumulate`
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_greedy_completions(self):
"""
Test the capability of the Greedy completer.
Most of the test here does not really show off the greedy completer, for proof
each of the text below now pass with Jedi. The greedy completer is capable of more.
See the :any:`test_dict_key_completion_contexts`
"""
ip = get_ipython()
ip.ex("a=list(range(5))")
ip.ex("b,c = 1, 1.2")
ip.ex("d = {'a b': str}")
ip.ex("x=y='a'")
_, c = ip.complete(".", line="a[0].")
self.assertFalse(".real" in c, "Shouldn't have completed on a[0]: %s" % c)
def _(line, cursor_pos, expect, message, completion):
with greedy_completion(), provisionalcompleter():
ip.Completer.use_jedi = False
_, c = ip.complete(".", line=line, cursor_pos=cursor_pos)
self.assertIn(expect, c, message % c)
ip.Completer.use_jedi = True
with provisionalcompleter():
completions = ip.Completer.completions(line, cursor_pos)
self.assertIn(completion, list(completions))
with provisionalcompleter():
_(
"a[0].",
5,
".real",
"Should have completed on a[0].: %s",
Completion(5, 5, "real"),
)
_(
"a[0].r",
6,
".real",
"Should have completed on a[0].r: %s",
Completion(5, 6, "real"),
)
_(
"a[0].from_",
10,
".from_bytes",
"Should have completed on a[0].from_: %s",
Completion(5, 10, "from_bytes"),
)
_(
"assert str.star",
14,
".startswith",
"Should have completed on `assert str.star`: %s",
Completion(11, 14, "startswith"),
)
_(
"d['a b'].str",
12,
".strip",
"Should have completed on `d['a b'].str`: %s",
Completion(9, 12, "strip"),
)
_(
"a.app",
4,
".append",
"Should have completed on `a.app`: %s",
Completion(2, 4, "append"),
)
_(
"x.upper() == y.",
15,
".upper",
"Should have completed on `x.upper() == y.`: %s",
Completion(15, 15, "upper"),
)
_(
"(x.upper() == y.",
16,
".upper",
"Should have completed on `(x.upper() == y.`: %s",
Completion(16, 16, "upper"),
)
_(
"(x.upper() == y).",
17,
".bit_length",
"Should have completed on `(x.upper() == y).`: %s",
Completion(17, 17, "bit_length"),
)
_(
"{'==', 'abc'}.",
14,
".add",
"Should have completed on `{'==', 'abc'}.`: %s",
Completion(14, 14, "add"),
)
_(
"b + c.",
6,
".hex",
"Should have completed on `b + c.`: %s",
Completion(6, 6, "hex"),
)
def test_omit__names(self):
# also happens to test IPCompleter as a configurable
ip = get_ipython()
ip._hidden_attr = 1
ip._x = {}
c = ip.Completer
ip.ex("ip=get_ipython()")
cfg = Config()
cfg.IPCompleter.omit__names = 0
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
self.assertIn(".__str__", matches)
self.assertIn("._hidden_attr", matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# self.assertIn(Completion(3, 3, '__str__'), completions)
# self.assertIn(Completion(3,3, "_hidden_attr"), completions)
cfg = Config()
cfg.IPCompleter.omit__names = 1
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
self.assertNotIn(".__str__", matches)
# self.assertIn('ip._hidden_attr', matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# self.assertNotIn(Completion(3,3,'__str__'), completions)
# self.assertIn(Completion(3,3, "_hidden_attr"), completions)
cfg = Config()
cfg.IPCompleter.omit__names = 2
c.update_config(cfg)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip.")
self.assertNotIn(".__str__", matches)
self.assertNotIn("._hidden_attr", matches)
# c.use_jedi = True
# completions = set(c.completions('ip.', 3))
# self.assertNotIn(Completion(3,3,'__str__'), completions)
# self.assertNotIn(Completion(3,3, "_hidden_attr"), completions)
with provisionalcompleter():
c.use_jedi = False
s, matches = c.complete("ip._x.")
self.assertIn(".keys", matches)
# c.use_jedi = True
# completions = set(c.completions('ip._x.', 6))
# self.assertIn(Completion(6,6, "keys"), completions)
del ip._hidden_attr
del ip._x
def test_limit_to__all__False_ok(self):
"""
Limit to all is deprecated, once we remove it this test can go away.
"""
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("class D: x=24")
ip.ex("d=D()")
cfg = Config()
cfg.IPCompleter.limit_to__all__ = False
c.update_config(cfg)
s, matches = c.complete("d.")
self.assertIn(".x", matches)
def test_get__all__entries_ok(self):
class A:
__all__ = ["x", 1]
words = completer.get__all__entries(A())
self.assertEqual(words, ["x"])
def test_get__all__entries_no__all__ok(self):
class A:
pass
words = completer.get__all__entries(A())
self.assertEqual(words, [])
def test_completes_globals_as_args_of_methods(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("long_variable_name = 1")
ip.ex("a = []")
s, matches = c.complete(None, "a.sort(lo")
self.assertIn("long_variable_name", matches)
def test_completes_attributes_in_fstring_expressions(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
class CustomClass:
def method_one(self):
pass
ip.user_ns["custom_obj"] = CustomClass()
# Test completion inside f-string expressions
s, matches = c.complete(None, "f'{custom_obj.meth")
self.assertIn(".method_one", matches)
def test_completes_in_dict_expressions(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("class Test: pass")
ip.ex("test_obj = Test()")
ip.ex("test_obj.attribute = 'value'")
# Test completion in dictionary expressions
s, matches = c.complete(None, "d = {'key': test_obj.attr")
self.assertIn(".attribute", matches)
# Test global completion in dictionary expressions with dots
s, matches = c.complete(None, "d = {'k.e.y': Te")
self.assertIn("Test", matches)
def test_func_kw_completions(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
ip.ex("def myfunc(a=1,b=2): return a+b")
s, matches = c.complete(None, "myfunc(1,b")
self.assertIn("b=", matches)
# Simulate completing with cursor right after b (pos==10):
s, matches = c.complete(None, "myfunc(1,b)", 10)
self.assertIn("b=", matches)
s, matches = c.complete(None, 'myfunc(a="escaped\\")string",b')
self.assertIn("b=", matches)
# builtin function
s, matches = c.complete(None, "min(k, k")
self.assertIn("key=", matches)
def test_default_arguments_from_docstring(self):
ip = get_ipython()
c = ip.Completer
kwd = c._default_arguments_from_docstring("min(iterable[, key=func]) -> value")
self.assertEqual(kwd, ["key"])
# with cython type etc
kwd = c._default_arguments_from_docstring(
"Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
)
self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
# white spaces
kwd = c._default_arguments_from_docstring(
"\n Minuit.migrad(self, int ncall=10000, resume=True, int nsplit=1)\n"
)
self.assertEqual(kwd, ["ncall", "resume", "nsplit"])
def test_line_magics(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "lsmag")
self.assertIn("%lsmagic", matches)
s, matches = c.complete(None, "%lsmag")
self.assertIn("%lsmagic", matches)
def test_cell_magics(self):
from IPython.core.magic import register_cell_magic
@register_cell_magic
def _foo_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "_foo_ce")
self.assertIn("%%_foo_cellm", matches)
s, matches = c.complete(None, "%%_foo_ce")
self.assertIn("%%_foo_cellm", matches)
def test_line_cell_magics(self):
from IPython.core.magic import register_line_cell_magic
@register_line_cell_magic
def _bar_cellm(line, cell):
pass
ip = get_ipython()
c = ip.Completer
# The policy here is trickier, see comments in completion code. The
# returned values depend on whether the user passes %% or not explicitly,
# and this will show a difference if the same name is both a line and cell
# magic.
s, matches = c.complete(None, "_bar_ce")
self.assertIn("%_bar_cellm", matches)
self.assertIn("%%_bar_cellm", matches)
s, matches = c.complete(None, "%_bar_ce")
self.assertIn("%_bar_cellm", matches)
self.assertIn("%%_bar_cellm", matches)
s, matches = c.complete(None, "%%_bar_ce")
self.assertNotIn("%_bar_cellm", matches)
self.assertIn("%%_bar_cellm", matches)
def test_line_magics_with_code_argument(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
# attribute completion
text, matches = c.complete("%timeit -n 2 -r 1 float.as_integer")
self.assertEqual(matches, [".as_integer_ratio"])
text, matches = c.complete("%debug --breakpoint test float.as_integer")
self.assertEqual(matches, [".as_integer_ratio"])
text, matches = c.complete("%time --no-raise-error float.as_integer")
self.assertEqual(matches, [".as_integer_ratio"])
text, matches = c.complete("%prun -l 0.5 -r float.as_integer")
self.assertEqual(matches, [".as_integer_ratio"])
# implicit magics
text, matches = c.complete("timeit -n 2 -r 1 float.as_integer")
self.assertEqual(matches, [".as_integer_ratio"])
# built-ins completion
text, matches = c.complete("%timeit -n 2 -r 1 flo")
self.assertEqual(matches, ["float"])
# dict completion
text, matches = c.complete("%timeit -n 2 -r 1 {'my_key': 1}['my")
self.assertEqual(matches, ["my_key"])
# invalid arguments - should not throw
text, matches = c.complete("%timeit -n 2 -r 1 -invalid float.as_integer")
self.assertEqual(matches, [])
text, matches = c.complete("%debug --invalid float.as_integer")
self.assertEqual(matches, [])
def test_line_magics_with_code_argument_shadowing(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
# shadow
ip.run_cell("timeit = 1")
# should not suggest on implict magic when shadowed
text, matches = c.complete("timeit -n 2 -r 1 flo")
self.assertEqual(matches, [])
# should suggest on explicit magic
text, matches = c.complete("%timeit -n 2 -r 1 flo")
self.assertEqual(matches, ["float"])
# remove shadow
del ip.user_ns["timeit"]
# should suggest on implicit magic after shadow removal
text, matches = c.complete("timeit -n 2 -r 1 flo")
self.assertEqual(matches, ["float"])
def test_magic_completion_order(self):
ip = get_ipython()
c = ip.Completer
# Test ordering of line and cell magics.
text, matches = c.complete("timeit")
self.assertEqual(matches, ["%timeit", "%%timeit"])
def test_magic_completion_shadowing(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = False
# Before importing matplotlib, %matplotlib magic should be the only option.
text, matches = c.complete("mat")
self.assertEqual(matches, ["%matplotlib"])
# The newly introduced name should shadow the magic.
ip.run_cell("matplotlib = 1")
text, matches = c.complete("mat")
self.assertEqual(matches, ["matplotlib"])
# After removing matplotlib from namespace, the magic should again be
# the only option.
del ip.user_ns["matplotlib"]
text, matches = c.complete("mat")
self.assertEqual(matches, ["%matplotlib"])
def test_magic_completion_shadowing_explicit(self):
"""
If the user try to complete a shadowed magic, and explicit % start should
still return the completions.
"""
ip = get_ipython()
c = ip.Completer
# Before importing matplotlib, %matplotlib magic should be the only option.
text, matches = c.complete("%mat")
self.assertEqual(matches, ["%matplotlib"])
ip.run_cell("matplotlib = 1")
# After removing matplotlib from namespace, the magic should still be
# the only option.
text, matches = c.complete("%mat")
self.assertEqual(matches, ["%matplotlib"])
def test_magic_config(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "conf")
self.assertIn("%config", matches)
s, matches = c.complete(None, "conf")
self.assertNotIn("AliasManager", matches)
s, matches = c.complete(None, "config ")
self.assertIn("AliasManager", matches)
s, matches = c.complete(None, "%config ")
self.assertIn("AliasManager", matches)
s, matches = c.complete(None, "config Ali")
self.assertListEqual(["AliasManager"], matches)
s, matches = c.complete(None, "%config Ali")
self.assertListEqual(["AliasManager"], matches)
s, matches = c.complete(None, "config AliasManager")
self.assertListEqual(["AliasManager"], matches)
s, matches = c.complete(None, "%config AliasManager")
self.assertListEqual(["AliasManager"], matches)
s, matches = c.complete(None, "config AliasManager.")
self.assertIn("AliasManager.default_aliases", matches)
s, matches = c.complete(None, "%config AliasManager.")
self.assertIn("AliasManager.default_aliases", matches)
s, matches = c.complete(None, "config AliasManager.de")
self.assertListEqual(["AliasManager.default_aliases"], matches)
s, matches = c.complete(None, "config AliasManager.de")
self.assertListEqual(["AliasManager.default_aliases"], matches)
def test_magic_color(self):
ip = get_ipython()
c = ip.Completer
s, matches = c.complete(None, "colo")
assert "%colors" in matches
s, matches = c.complete(None, "colo")
assert "nocolor" not in matches
s, matches = c.complete(None, "%colors") # No trailing space
assert "nocolor" not in matches
s, matches = c.complete(None, "colors ")
assert "nocolor" in matches
s, matches = c.complete(None, "%colors ")
assert "nocolor" in matches
s, matches = c.complete(None, "colors noco")
assert ["nocolor"] == matches
s, matches = c.complete(None, "%colors noco")
assert ["nocolor"] == matches
def test_match_dict_keys(self):
"""
Test that match_dict_keys works on a couple of use case does return what
expected, and does not crash
"""
delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
def match(*args, **kwargs):
quote, offset, matches = match_dict_keys(*args, delims=delims, **kwargs)
return quote, offset, list(matches)
keys = ["foo", b"far"]
assert match(keys, "b'") == ("'", 2, ["far"])
assert match(keys, "b'f") == ("'", 2, ["far"])
assert match(keys, 'b"') == ('"', 2, ["far"])
assert match(keys, 'b"f') == ('"', 2, ["far"])
assert match(keys, "'") == ("'", 1, ["foo"])
assert match(keys, "'f") == ("'", 1, ["foo"])
assert match(keys, '"') == ('"', 1, ["foo"])
assert match(keys, '"f') == ('"', 1, ["foo"])
# Completion on first item of tuple
keys = [("foo", 1111), ("foo", 2222), (3333, "bar"), (3333, "test")]
assert match(keys, "'f") == ("'", 1, ["foo"])
assert match(keys, "33") == ("", 0, ["3333"])
# Completion on numbers
keys = [
0xDEADBEEF,
1111,
1234,
"1999",
0b10101,
22,
] # 0xDEADBEEF = 3735928559; 0b10101 = 21
assert match(keys, "0xdead") == ("", 0, ["0xdeadbeef"])
assert match(keys, "1") == ("", 0, ["1111", "1234"])
assert match(keys, "2") == ("", 0, ["21", "22"])
assert match(keys, "0b101") == ("", 0, ["0b10101", "0b10110"])
# Should yield on variables
assert match(keys, "a_variable") == ("", 0, [])
# Should pass over invalid literals
assert match(keys, "'' ''") == ("", 0, [])
def test_match_dict_keys_tuple(self):
"""
Test that match_dict_keys called with extra prefix works on a couple of use case,
does return what expected, and does not crash.
"""
delims = " \t\n`!@#$^&*()=+[{]}\\|;:'\",<>?"
keys = [("foo", "bar"), ("foo", "oof"), ("foo", b"bar"), ("other", "test")]
def match(*args, extra=None, **kwargs):
quote, offset, matches = match_dict_keys(
*args, delims=delims, extra_prefix=extra, **kwargs
)
return quote, offset, list(matches)
# Completion on first key == "foo"
assert match(keys, "'", extra=("foo",)) == ("'", 1, ["bar", "oof"])
assert match(keys, '"', extra=("foo",)) == ('"', 1, ["bar", "oof"])
assert match(keys, "'o", extra=("foo",)) == ("'", 1, ["oof"])
assert match(keys, '"o', extra=("foo",)) == ('"', 1, ["oof"])
assert match(keys, "b'", extra=("foo",)) == ("'", 2, ["bar"])
assert match(keys, 'b"', extra=("foo",)) == ('"', 2, ["bar"])
assert match(keys, "b'b", extra=("foo",)) == ("'", 2, ["bar"])
assert match(keys, 'b"b', extra=("foo",)) == ('"', 2, ["bar"])
# No Completion
assert match(keys, "'", extra=("no_foo",)) == ("'", 1, [])
assert match(keys, "'", extra=("fo",)) == ("'", 1, [])
keys = [("foo1", "foo2", "foo3", "foo4"), ("foo1", "foo2", "bar", "foo4")]
assert match(keys, "'foo", extra=("foo1",)) == ("'", 1, ["foo2"])
assert match(keys, "'foo", extra=("foo1", "foo2")) == ("'", 1, ["foo3"])
assert match(keys, "'foo", extra=("foo1", "foo2", "foo3")) == ("'", 1, ["foo4"])
assert match(keys, "'foo", extra=("foo1", "foo2", "foo3", "foo4")) == (
"'",
1,
[],
)
keys = [("foo", 1111), ("foo", "2222"), (3333, "bar"), (3333, 4444)]
assert match(keys, "'", extra=("foo",)) == ("'", 1, ["2222"])
assert match(keys, "", extra=("foo",)) == ("", 0, ["1111", "'2222'"])
assert match(keys, "'", extra=(3333,)) == ("'", 1, ["bar"])
assert match(keys, "", extra=(3333,)) == ("", 0, ["'bar'", "4444"])
assert match(keys, "'", extra=("3333",)) == ("'", 1, [])
assert match(keys, "33") == ("", 0, ["3333"])
def test_dict_key_completion_closures(self):
ip = get_ipython()
complete = ip.Completer.complete
ip.Completer.auto_close_dict_keys = True
ip.user_ns["d"] = {
# tuple only
("aa", 11): None,
# tuple and non-tuple
("bb", 22): None,
"bb": None,
# non-tuple only
"cc": None,
# numeric tuple only
(77, "x"): None,
# numeric tuple and non-tuple
(88, "y"): None,
88: None,
# numeric non-tuple only
99: None,
}
_, matches = complete(line_buffer="d[")
# should append `, ` if matches a tuple only
self.assertIn("'aa', ", matches)
# should not append anything if matches a tuple and an item
self.assertIn("'bb'", matches)
# should append `]` if matches and item only
self.assertIn("'cc']", matches)
# should append `, ` if matches a tuple only
self.assertIn("77, ", matches)
# should not append anything if matches a tuple and an item
self.assertIn("88", matches)
# should append `]` if matches and item only
self.assertIn("99]", matches)
_, matches = complete(line_buffer="d['aa', ")
# should restrict matches to those matching tuple prefix
self.assertIn("11]", matches)
self.assertNotIn("'bb'", matches)
self.assertNotIn("'bb', ", matches)
self.assertNotIn("'bb']", matches)
self.assertNotIn("'cc'", matches)
self.assertNotIn("'cc', ", matches)
self.assertNotIn("'cc']", matches)
ip.Completer.auto_close_dict_keys = False
def test_dict_key_completion_string(self):
"""Test dictionary key completion for string keys"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"abc": None}
# check completion at different stages
_, matches = complete(line_buffer="d[")
self.assertIn("'abc'", matches)
self.assertNotIn("'abc']", matches)
_, matches = complete(line_buffer="d['")
self.assertIn("abc", matches)
self.assertNotIn("abc']", matches)
_, matches = complete(line_buffer="d['a")
self.assertIn("abc", matches)
self.assertNotIn("abc']", matches)
# check use of different quoting
_, matches = complete(line_buffer='d["')
self.assertIn("abc", matches)
self.assertNotIn('abc"]', matches)
_, matches = complete(line_buffer='d["a')
self.assertIn("abc", matches)
self.assertNotIn('abc"]', matches)
# check sensitivity to following context
_, matches = complete(line_buffer="d[]", cursor_pos=2)
self.assertIn("'abc'", matches)
_, matches = complete(line_buffer="d['']", cursor_pos=3)
self.assertIn("abc", matches)
self.assertNotIn("abc'", matches)
self.assertNotIn("abc']", matches)
# check multiple solutions are correctly returned and that noise is not
ip.user_ns["d"] = {
"abc": None,
"abd": None,
"bad": None,
object(): None,
5: None,
("abe", None): None,
(None, "abf"): None,
}
_, matches = complete(line_buffer="d['a")
self.assertIn("abc", matches)
self.assertIn("abd", matches)
self.assertNotIn("bad", matches)
self.assertNotIn("abe", matches)
self.assertNotIn("abf", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
# check escaping and whitespace
ip.user_ns["d"] = {"a\nb": None, "a'b": None, 'a"b': None, "a word": None}
_, matches = complete(line_buffer="d['a")
self.assertIn("a\\nb", matches)
self.assertIn("a\\'b", matches)
self.assertIn('a"b', matches)
self.assertIn("a word", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
# - can complete on non-initial word of the string
_, matches = complete(line_buffer="d['a w")
self.assertIn("word", matches)
# - understands quote escaping
_, matches = complete(line_buffer="d['a\\'")
self.assertIn("b", matches)
# - default quoting should work like repr
_, matches = complete(line_buffer="d[")
self.assertIn('"a\'b"', matches)
# - when opening quote with ", possible to match with unescaped apostrophe
_, matches = complete(line_buffer="d[\"a'")
self.assertIn("b", matches)
# need to not split at delims that readline won't split at
if "-" not in ip.Completer.splitter.delims:
ip.user_ns["d"] = {"before-after": None}
_, matches = complete(line_buffer="d['before-af")
self.assertIn("before-after", matches)
# check completion on tuple-of-string keys at different stage - on first key
ip.user_ns["d"] = {("foo", "bar"): None}
_, matches = complete(line_buffer="d[")
self.assertIn("'foo'", matches)
self.assertNotIn("'foo']", matches)
self.assertNotIn("'bar'", matches)
self.assertNotIn("foo", matches)
self.assertNotIn("bar", matches)
# - match the prefix
_, matches = complete(line_buffer="d['f")
self.assertIn("foo", matches)
self.assertNotIn("foo']", matches)
self.assertNotIn('foo"]', matches)
_, matches = complete(line_buffer="d['foo")
self.assertIn("foo", matches)
# - can complete on second key
_, matches = complete(line_buffer="d['foo', ")
self.assertIn("'bar'", matches)
_, matches = complete(line_buffer="d['foo', 'b")
self.assertIn("bar", matches)
self.assertNotIn("foo", matches)
# - does not propose missing keys
_, matches = complete(line_buffer="d['foo', 'f")
self.assertNotIn("bar", matches)
self.assertNotIn("foo", matches)
# check sensitivity to following context
_, matches = complete(line_buffer="d['foo',]", cursor_pos=8)
self.assertIn("'bar'", matches)
self.assertNotIn("bar", matches)
self.assertNotIn("'foo'", matches)
self.assertNotIn("foo", matches)
_, matches = complete(line_buffer="d['']", cursor_pos=3)
self.assertIn("foo", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
_, matches = complete(line_buffer='d[""]', cursor_pos=3)
self.assertIn("foo", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
_, matches = complete(line_buffer='d["foo","]', cursor_pos=9)
self.assertIn("bar", matches)
assert not any(m.endswith(("]", '"', "'")) for m in matches), matches
_, matches = complete(line_buffer='d["foo",]', cursor_pos=8)
self.assertIn("'bar'", matches)
self.assertNotIn("bar", matches)
# Can complete with longer tuple keys
ip.user_ns["d"] = {("foo", "bar", "foobar"): None}
# - can complete second key
_, matches = complete(line_buffer="d['foo', 'b")
self.assertIn("bar", matches)
self.assertNotIn("foo", matches)
self.assertNotIn("foobar", matches)
# - can complete third key
_, matches = complete(line_buffer="d['foo', 'bar', 'fo")
self.assertIn("foobar", matches)
self.assertNotIn("foo", matches)
self.assertNotIn("bar", matches)
def test_dict_key_completion_numbers(self):
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {
0xDEADBEEF: None, # 3735928559
1111: None,
1234: None,
"1999": None,
0b10101: None, # 21
22: None,
}
_, matches = complete(line_buffer="d[1")
self.assertIn("1111", matches)
self.assertIn("1234", matches)
self.assertNotIn("1999", matches)
self.assertNotIn("'1999'", matches)
_, matches = complete(line_buffer="d[0xdead")
self.assertIn("0xdeadbeef", matches)
_, matches = complete(line_buffer="d[2")
self.assertIn("21", matches)
self.assertIn("22", matches)
_, matches = complete(line_buffer="d[0b101")
self.assertIn("0b10101", matches)
self.assertIn("0b10110", matches)
def test_dict_key_completion_contexts(self):
"""Test expression contexts in which dict key completion occurs"""
ip = get_ipython()
complete = ip.Completer.complete
d = {"abc": None}
ip.user_ns["d"] = d
class C:
data = d
ip.user_ns["C"] = C
ip.user_ns["get"] = lambda: d
ip.user_ns["nested"] = {"x": d}
def assert_no_completion(**kwargs):
_, matches = complete(**kwargs)
self.assertNotIn("abc", matches)
self.assertNotIn("abc'", matches)
self.assertNotIn("abc']", matches)
self.assertNotIn("'abc'", matches)
self.assertNotIn("'abc']", matches)
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
self.assertIn("'abc'", matches)
self.assertNotIn("'abc']", matches)
# no completion after string closed, even if reopened
assert_no_completion(line_buffer="d['a'")
assert_no_completion(line_buffer='d["a"')
assert_no_completion(line_buffer="d['a' + ")
assert_no_completion(line_buffer="d['a' + '")
# completion in non-trivial expressions
assert_completion(line_buffer="+ d[")
assert_completion(line_buffer="(d[")
assert_completion(line_buffer="C.data[")
# nested dict completion
assert_completion(line_buffer="nested['x'][")
with evaluation_policy("minimal"):
with pytest.raises(AssertionError):
assert_completion(line_buffer="nested['x'][")
# greedy flag
def assert_completion(**kwargs):
_, matches = complete(**kwargs)
self.assertIn("get()['abc']", matches)
assert_no_completion(line_buffer="get()[")
with greedy_completion():
assert_completion(line_buffer="get()[")
assert_completion(line_buffer="get()['")
assert_completion(line_buffer="get()['a")
assert_completion(line_buffer="get()['ab")
assert_completion(line_buffer="get()['abc")
def test_completion_autoimport(self):
ip = get_ipython()
complete = ip.Completer.complete
with (
evaluation_policy("limited", allow_auto_import=True),
jedi_status(False),
):
_, matches = complete(line_buffer="math.")
self.assertIn(".pi", matches)
def test_completion_no_autoimport(self):
ip = get_ipython()
complete = ip.Completer.complete
with (
evaluation_policy("limited", allow_auto_import=False),
jedi_status(False),
):
_, matches = complete(line_buffer="math.")
self.assertNotIn(".pi", matches)
def test_completion_allow_custom_getattr_per_module(self):
factory_code = textwrap.dedent(
"""
class ListFactory:
def __getattr__(self, attr):
return []
"""
)
safe_lib = types.ModuleType("my.safe.lib")
sys.modules["my.safe.lib"] = safe_lib
exec(factory_code, safe_lib.__dict__)
unsafe_lib = types.ModuleType("my.unsafe.lib")
sys.modules["my.unsafe.lib"] = unsafe_lib
exec(factory_code, unsafe_lib.__dict__)
fake_safe_lib = types.ModuleType("my_fake_lib")
sys.modules["my_fake_lib"] = fake_safe_lib
exec(factory_code, fake_safe_lib.__dict__)
ip = get_ipython()
ip.user_ns["safe_list_factory"] = safe_lib.ListFactory()
ip.user_ns["unsafe_list_factory"] = unsafe_lib.ListFactory()
ip.user_ns["fake_safe_factory"] = fake_safe_lib.ListFactory()
complete = ip.Completer.complete
with (
evaluation_policy("limited", allowed_getattr_external={"my.safe.lib"}),
jedi_status(False),
):
_, matches = complete(line_buffer="safe_list_factory.example.")
self.assertIn(".append", matches)
# this also checks against https://github.com/ipython/ipython/issues/14916
# because removing "un" would cause this test to incorrectly pass
_, matches = complete(line_buffer="unsafe_list_factory.example.")
self.assertNotIn(".append", matches)
sys.modules["my"] = types.ModuleType("my")
with (
evaluation_policy("limited", allowed_getattr_external={"my"}),
jedi_status(False),
):
_, matches = complete(line_buffer="safe_list_factory.example.")
self.assertIn(".append", matches)
_, matches = complete(line_buffer="unsafe_list_factory.example.")
self.assertIn(".append", matches)
_, matches = complete(line_buffer="fake_safe_factory.example.")
self.assertNotIn(".append", matches)
with (
evaluation_policy("limited"),
jedi_status(False),
):
_, matches = complete(line_buffer="safe_list_factory.example.")
self.assertNotIn(".append", matches)
_, matches = complete(line_buffer="unsafe_list_factory.example.")
self.assertNotIn(".append", matches)
def test_completion_allow_subclass_of_trusted_module(self):
factory_code = textwrap.dedent(
"""
class ListFactory:
def __getattr__(self, attr):
return []
"""
)
trusted_lib = types.ModuleType("my.trusted.lib")
sys.modules["my.trusted.lib"] = trusted_lib
exec(factory_code, trusted_lib.__dict__)
ip = get_ipython()
# Create a subclass in __main__ (untrusted namespace)
subclass_code = textwrap.dedent(
"""
class SubclassFactory(trusted_lib.ListFactory):
pass
"""
)
ip.user_ns["trusted_lib"] = trusted_lib
exec(subclass_code, ip.user_ns)
ip.user_ns["subclass_factory"] = ip.user_ns["SubclassFactory"]()
complete = ip.Completer.complete
with (
evaluation_policy("limited", allowed_getattr_external={"my.trusted.lib"}),
jedi_status(False),
):
_, matches = complete(line_buffer="subclass_factory.example.")
self.assertIn(".append", matches)
# Test that overriding __getattr__ in subclass in untrusted namespace prevents completion
overriding_subclass_code = textwrap.dedent(
"""
class OverridingSubclass(trusted_lib.ListFactory):
def __getattr__(self, attr):
return {}
"""
)
exec(overriding_subclass_code, ip.user_ns)
ip.user_ns["overriding_factory"] = ip.user_ns["OverridingSubclass"]()
with (
evaluation_policy("limited", allowed_getattr_external={"my.trusted.lib"}),
jedi_status(False),
):
_, matches = complete(line_buffer="overriding_factory.example.")
self.assertNotIn(".append", matches)
self.assertNotIn(".keys", matches)
def test_completion_fallback_to_annotation_for_attribute(self):
code = textwrap.dedent(
"""
class StringMethods:
def a():
pass
class Test:
str: StringMethods
def __init__(self):
self.str = StringMethods()
def __getattr__(self, name):
raise AttributeError(f"{name} not found")
"""
)
repro = types.ModuleType("repro")
sys.modules["repro"] = repro
exec(code, repro.__dict__)
ip = get_ipython()
ip.user_ns["repro"] = repro
exec("r = repro.Test()", ip.user_ns)
complete = ip.Completer.complete
try:
with evaluation_policy("limited"), jedi_status(False):
_, matches = complete(line_buffer="r.str.")
self.assertIn(".a", matches)
finally:
sys.modules.pop("repro", None)
ip.user_ns.pop("r", None)
def test_policy_warnings(self):
with self.assertWarns(
UserWarning,
msg="Override 'allowed_getattr_external' is not valid with 'unsafe' evaluation policy",
):
with evaluation_policy("unsafe", allowed_getattr_external=[]):
pass
with self.assertWarns(
UserWarning,
msg="Override 'test' is not valid with 'limited' evaluation policy",
):
with evaluation_policy("limited", test=[]):
pass
def test_dict_key_completion_bytes(self):
"""Test handling of bytes in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"abc": None, b"abd": None}
_, matches = complete(line_buffer="d[")
self.assertIn("'abc'", matches)
self.assertIn("b'abd'", matches)
if False: # not currently implemented
_, matches = complete(line_buffer="d[b")
self.assertIn("b'abd'", matches)
self.assertNotIn("b'abc'", matches)
_, matches = complete(line_buffer="d[b'")
self.assertIn("abd", matches)
self.assertNotIn("abc", matches)
_, matches = complete(line_buffer="d[B'")
self.assertIn("abd", matches)
self.assertNotIn("abc", matches)
_, matches = complete(line_buffer="d['")
self.assertIn("abc", matches)
self.assertNotIn("abd", matches)
def test_dict_key_completion_unicode_py3(self):
"""Test handling of unicode in dict key completion"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = {"a\u05d0": None}
# query using escape
if sys.platform != "win32":
# Known failure on Windows
_, matches = complete(line_buffer="d['a\\u05d0")
self.assertIn("u05d0", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
self.assertIn("a\u05d0", matches)
with greedy_completion():
# query using escape
_, matches = complete(line_buffer="d['a\\u05d0")
self.assertIn("d['a\\u05d0']", matches) # tokenized after \\
# query using character
_, matches = complete(line_buffer="d['a\u05d0")
self.assertIn("d['a\u05d0']", matches)
@dec.skip_without("numpy")
def test_struct_array_key_completion(self):
"""Test dict key completion applies to numpy struct arrays"""
import numpy
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = numpy.array([], dtype=[("hello", "f"), ("world", "f")])
_, matches = complete(line_buffer="d['")
self.assertIn("hello", matches)
self.assertIn("world", matches)
# complete on the numpy struct itself
dt = numpy.dtype(
[("my_head", [("my_dt", ">u4"), ("my_df", ">u4")]), ("my_data", ">f4", 5)]
)
x = numpy.zeros(2, dtype=dt)
ip.user_ns["d"] = x[1]
_, matches = complete(line_buffer="d['")
self.assertIn("my_head", matches)
self.assertIn("my_data", matches)
def completes_on_nested():
ip.user_ns["d"] = numpy.zeros(2, dtype=dt)
_, matches = complete(line_buffer="d[1]['my_head']['")
self.assertTrue(any(["my_dt" in m for m in matches]))
self.assertTrue(any(["my_df" in m for m in matches]))
# complete on a nested level
with greedy_completion():
completes_on_nested()
with evaluation_policy("limited"):
completes_on_nested()
with evaluation_policy("minimal"):
with pytest.raises(AssertionError):
completes_on_nested()
@dec.skip_without("pandas")
def test_dataframe_key_completion(self):
"""Test dict key completion applies to pandas DataFrames"""
import pandas
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["d"] = pandas.DataFrame({"hello": [1], "world": [2]})
_, matches = complete(line_buffer="d['")
self.assertIn("hello", matches)
self.assertIn("world", matches)
_, matches = complete(line_buffer="d.loc[:, '")
self.assertIn("hello", matches)
self.assertIn("world", matches)
_, matches = complete(line_buffer="d.loc[1:, '")
self.assertIn("hello", matches)
_, matches = complete(line_buffer="d.loc[1:1, '")
self.assertIn("hello", matches)
_, matches = complete(line_buffer="d.loc[1:1:-1, '")
self.assertIn("hello", matches)
_, matches = complete(line_buffer="d.loc[::, '")
self.assertIn("hello", matches)
def test_dict_key_completion_invalids(self):
"""Smoke test cases dict key completion can't handle"""
ip = get_ipython()
complete = ip.Completer.complete
ip.user_ns["no_getitem"] = None
ip.user_ns["no_keys"] = []
ip.user_ns["cant_call_keys"] = dict
ip.user_ns["empty"] = {}
ip.user_ns["d"] = {"abc": 5}
_, matches = complete(line_buffer="no_getitem['")
_, matches = complete(line_buffer="no_keys['")
_, matches = complete(line_buffer="cant_call_keys['")
_, matches = complete(line_buffer="empty['")
_, matches = complete(line_buffer="name_error['")
_, matches = complete(line_buffer="d['\\") # incomplete escape
def test_object_key_completion(self):
ip = get_ipython()
ip.user_ns["key_completable"] = KeyCompletable(["qwerty", "qwick"])
_, matches = ip.Completer.complete(line_buffer="key_completable['qw")
self.assertIn("qwerty", matches)
self.assertIn("qwick", matches)
def test_class_key_completion(self):
ip = get_ipython()
NamedInstanceClass("qwerty")
NamedInstanceClass("qwick")
ip.user_ns["named_instance_class"] = NamedInstanceClass
_, matches = ip.Completer.complete(line_buffer="named_instance_class['qw")
self.assertIn("qwerty", matches)
self.assertIn("qwick", matches)
def test_tryimport(self):
"""
Test that try-import don't crash on trailing dot, and import modules before
"""
from IPython.core.completerlib import try_import
assert try_import("IPython.")
def test_aimport_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("i", "%aimport i")
self.assertIn("io", matches)
self.assertNotIn("int", matches)
def test_nested_import_module_completer(self):
ip = get_ipython()
_, matches = ip.complete(None, "import IPython.co", 17)
self.assertIn("IPython.core", matches)
self.assertNotIn("import IPython.core", matches)
self.assertNotIn("IPython.display", matches)
def test_import_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("i", "import i")
self.assertIn("io", matches)
self.assertNotIn("int", matches)
def test_from_module_completer(self):
ip = get_ipython()
_, matches = ip.complete("B", "from io import B", 16)
self.assertIn("BytesIO", matches)
self.assertNotIn("BaseException", matches)
def test_snake_case_completion(self):
ip = get_ipython()
ip.Completer.use_jedi = False
ip.user_ns["some_three"] = 3
ip.user_ns["some_four"] = 4
_, matches = ip.complete("s_", "print(s_f")
self.assertIn("some_three", matches)
self.assertIn("some_four", matches)
def test_mix_terms(self):
ip = get_ipython()
from textwrap import dedent
ip.Completer.use_jedi = False
ip.ex(
dedent(
"""
class Test:
def meth(self, meth_arg1):
print("meth")
def meth_1(self, meth1_arg1, meth1_arg2):
print("meth1")
def meth_2(self, meth2_arg1, meth2_arg2):
print("meth2")
test = Test()
"""
)
)
_, matches = ip.complete(None, "test.meth(")
self.assertIn("meth_arg1=", matches)
self.assertNotIn("meth2_arg1=", matches)
def test_percent_symbol_restrict_to_magic_completions(self):
ip = get_ipython()
completer = ip.Completer
text = "%a"
with provisionalcompleter():
completer.use_jedi = True
completions = completer.completions(text, len(text))
for c in completions:
self.assertEqual(c.text[0], "%")
def test_fwd_unicode_restricts(self):
ip = get_ipython()
completer = ip.Completer
text = "\\ROMAN NUMERAL FIVE"
with provisionalcompleter():
completer.use_jedi = True
completions = [
completion.text for completion in completer.completions(text, len(text))
]
self.assertEqual(completions, ["\u2164"])
def test_dict_key_restrict_to_dicts(self):
"""Test that dict key suppresses non-dict completion items"""
ip = get_ipython()
c = ip.Completer
d = {"abc": None}
ip.user_ns["d"] = d
text = 'd["a'
def _():
with provisionalcompleter():
c.use_jedi = True
return [
completion.text for completion in c.completions(text, len(text))
]
completions = _()
self.assertEqual(completions, ["abc"])
# check that it can be disabled in granular manner:
cfg = Config()
cfg.IPCompleter.suppress_competing_matchers = {
"IPCompleter.dict_key_matcher": False
}
c.update_config(cfg)
completions = _()
self.assertIn("abc", completions)
self.assertGreater(len(completions), 1)
def test_matcher_suppression(self):
@completion_matcher(identifier="a_matcher")
def a_matcher(text):
return ["completion_a"]
@completion_matcher(identifier="b_matcher", api_version=2)
def b_matcher(context: CompletionContext):
text = context.token
result = {"completions": [SimpleCompletion("completion_b")]}
if text == "suppress c":
result["suppress"] = {"c_matcher"}
if text.startswith("suppress all"):
result["suppress"] = True
if text == "suppress all but c":
result["do_not_suppress"] = {"c_matcher"}
if text == "suppress all but a":
result["do_not_suppress"] = {"a_matcher"}
return result
@completion_matcher(identifier="c_matcher")
def c_matcher(text):
return ["completion_c"]
with custom_matchers([a_matcher, b_matcher, c_matcher]):
ip = get_ipython()
c = ip.Completer
def _(text, expected):
c.use_jedi = False
s, matches = c.complete(text)
self.assertEqual(expected, matches)
_("do not suppress", ["completion_a", "completion_b", "completion_c"])
_("suppress all", ["completion_b"])
_("suppress all but a", ["completion_a", "completion_b"])
_("suppress all but c", ["completion_b", "completion_c"])
def configure(suppression_config):
cfg = Config()
cfg.IPCompleter.suppress_competing_matchers = suppression_config
c.update_config(cfg)
# test that configuration takes priority over the run-time decisions
configure(False)
_("suppress all", ["completion_a", "completion_b", "completion_c"])
configure({"b_matcher": False})
_("suppress all", ["completion_a", "completion_b", "completion_c"])
configure({"a_matcher": False})
_("suppress all", ["completion_b"])
configure({"b_matcher": True})
_("do not suppress", ["completion_b"])
configure(True)
_("do not suppress", ["completion_a"])
def test_matcher_suppression_with_iterator(self):
@completion_matcher(identifier="matcher_returning_iterator")
def matcher_returning_iterator(text):
return iter(["completion_iter"])
@completion_matcher(identifier="matcher_returning_list")
def matcher_returning_list(text):
return ["completion_list"]
with custom_matchers([matcher_returning_iterator, matcher_returning_list]):
ip = get_ipython()
c = ip.Completer
def _(text, expected):
c.use_jedi = False
s, matches = c.complete(text)
self.assertEqual(expected, matches)
def configure(suppression_config):
cfg = Config()
cfg.IPCompleter.suppress_competing_matchers = suppression_config
c.update_config(cfg)
configure(False)
_("---", ["completion_iter", "completion_list"])
configure(True)
_("---", ["completion_iter"])
configure(None)
_("--", ["completion_iter", "completion_list"])
@pytest.mark.xfail(
sys.version_info.releaselevel in ("alpha",),
reason="Parso does not yet parse 3.13",
)
def test_matcher_suppression_with_jedi(self):
ip = get_ipython()
c = ip.Completer
c.use_jedi = True
def configure(suppression_config):
cfg = Config()
cfg.IPCompleter.suppress_competing_matchers = suppression_config
c.update_config(cfg)
def _():
with provisionalcompleter():
matches = [completion.text for completion in c.completions("dict.", 5)]
self.assertIn("keys", matches)
configure(False)
_()
configure(True)
_()
configure(None)
_()
def test_matcher_disabling(self):
@completion_matcher(identifier="a_matcher")
def a_matcher(text):
return ["completion_a"]
@completion_matcher(identifier="b_matcher")
def b_matcher(text):
return ["completion_b"]
def _(expected):
s, matches = c.complete("completion_")
self.assertEqual(expected, matches)
with custom_matchers([a_matcher, b_matcher]):
ip = get_ipython()
c = ip.Completer
_(["completion_a", "completion_b"])
cfg = Config()
cfg.IPCompleter.disable_matchers = ["b_matcher"]
c.update_config(cfg)
_(["completion_a"])
cfg.IPCompleter.disable_matchers = []
c.update_config(cfg)
def test_matcher_priority(self):
@completion_matcher(identifier="a_matcher", priority=0, api_version=2)
def a_matcher(text):
return {"completions": [SimpleCompletion("completion_a")], "suppress": True}
@completion_matcher(identifier="b_matcher", priority=2, api_version=2)
def b_matcher(text):
return {"completions": [SimpleCompletion("completion_b")], "suppress": True}
def _(expected):
s, matches = c.complete("completion_")
self.assertEqual(expected, matches)
with custom_matchers([a_matcher, b_matcher]):
ip = get_ipython()
c = ip.Completer
_(["completion_b"])
a_matcher.matcher_priority = 3
_(["completion_a"])
@pytest.mark.parametrize(
"use_jedi,evaluation",
[
[True, "minimal"],
[False, "limited"],
],
)
@pytest.mark.parametrize(
"code,insert_text",
[
[
"\n".join(
[
"class NotYetDefined:",
" def my_method(self) -> str:",
" return 1",
"my_instance = NotYetDefined()",
"my_insta",
]
),
"my_instance",
],
[
"\n".join(
[
"class NotYetDefined:",
" def my_method(self) -> str:",
" return 1",
"instance = NotYetDefined()",
"instance.",
]
),
"my_method",
],
[
"\n".join(
[
"class NotYetDefined:",
" def my_method(self) -> str:",
" return 1",
"my_instance = NotYetDefined()",
"my_instance.my_method().",
]
),
"capitalize",
],
[
"\n".join(
[
"class NotYetDefined:",
" def my_method(self):",
" return []",
"my_instance = NotYetDefined()",
"my_instance.my_method().",
]
),
"append",
],
[
"\n".join(
[
"class NotYetDefined:",
" @property",
" def my_property(self):",
" return 1.1",
"my_instance = NotYetDefined()",
"my_instance.my_property.",
]
),
"as_integer_ratio",
],
[
"\n".join(
[
"my_instance = 1.1",
"assert my_instance.",
]
),
"as_integer_ratio",
],
[
"\n".join(
[
"def my_test() -> float:",
" pass",
"my_test().",
]
),
"as_integer_ratio",
],
[
"\n".join(
[
"def my_test():",
" return {}",
"my_test().",
]
),
"keys",
],
[
"\n".join(
[
"l = []",
"def my_test():",
" return l",
"my_test().",
]
),
"append",
],
[
"\n".join(
[
"num = {1: 'one'}",
"num[2] = 'two'",
"num.",
]
),
"keys",
],
[
"\n".join(
[
"num = {1: 'one'}",
"num[2] = ['two']",
"num[2].",
]
),
"append",
],
[
"\n".join(
[
"l = []",
"class NotYetDefined:",
" def my_method(self):",
" return l",
"my_instance = NotYetDefined()",
"my_instance.my_method().",
]
),
"append",
],
[
"\n".join(
[
"def string_or_int(flag):",
" if flag:",
" return 'test'",
" return 1",
"string_or_int().",
]
),
["capitalize", "as_integer_ratio"],
],
[
"\n".join(
[
"def foo():",
" l = []",
" return l",
"foo().",
]
),
"append",
],
[
"\n".join(
[
"class NotYetDefined:",
" def __init__(self):",
" self.test = []",
"instance = NotYetDefined()",
"instance.",
]
),
"test",
],
[
"\n".join(
[
"class NotYetDefined:",
" def __init__(instance):",
" instance.test = []",
"instance = NotYetDefined()",
"instance.test.",
]
),
"append",
],
[
"\n".join(
[
"class NotYetDefined:",
" def __init__(this):",
" this.test:str = []",
"instance = NotYetDefined()",
"instance.test.",
]
),
"capitalize",
],
[
"\n".join(
[
"l = []",
"class NotYetDefined:",
" def __init__(me):",
" me.test = l",
"instance = NotYetDefined()",
"instance.test.",
]
),
"append",
],
[
"\n".join(
[
"class NotYetDefined:",
" def test(self):",
" self.l = []",
" return self.l",
"instance = NotYetDefined()",
"instance.test().",
]
),
"append",
],
[
"\n".join(
[
"class NotYetDefined:",
" def test():",
" return []",
"instance = NotYetDefined()",
"instance.test().",
]
),
"append",
],
[
"\n".join(
[
"def foo():",
" if some_condition:",
" return {'top':{'mid':{'leaf': 2}}}",
" return {'top': {'mid':[]}}",
"foo()['top']['mid'].",
]
),
["keys", "append"],
],
[
"\n".join(
[
"def foo():",
" if some_condition:",
" return {'top':{'mid':{'leaf': 2}}}",
" return {'top': {'mid':[]}}",
"foo()['top']['mid']['leaf'].",
]
),
"as_integer_ratio",
],
[
"\n".join(
[
"async def async_func():",
" return []",
"async_func().",
]
),
"cr_await",
],
[
"\n".join(
[
"async def async_func():",
" return []",
"(await async_func()).",
]
),
"append",
],
[
"\n".join(["t = []", "if some_condition:", " t."]),
"append",
],
[
"\n".join(
[
"t = []",
"if some_condition:",
" t = 'string'",
"t.",
]
),
["append", "capitalize"],
],
[
"\n".join(
[
"t = []",
"if some_condition:",
" t = 'string'",
"else:",
" t.",
]
),
"append",
],
[
"\n".join(
[
"t = []",
"if some_condition:",
" t = 'string'",
"else:",
" t = 1",
"t.",
]
),
["append", "capitalize", "as_integer_ratio"],
],
[
"\n".join(
[
"t = []",
"if condition_1:",
" t = 'string'",
"elif condition_2:",
" t = 1",
"elif condition_3:",
" t.",
]
),
"append",
],
[
"\n".join(
[
"t = []",
"if condition_1:",
" t = 'string'",
"elif condition_2:",
" t = 1",
"elif condition_3:",
" t = {}",
"t.",
]
),
["append", "capitalize", "as_integer_ratio", "keys"],
],
[
"\n".join(
[
"t = []",
"if condition_1:",
" if condition_2:",
" t = 'nested'",
"t.",
]
),
["append", "capitalize"],
],
[
"\n".join(
[
"a = []",
"while condition:",
" a.",
]
),
"append",
],
[
"\n".join(
[
"t = []",
"while condition:",
" t = 'str'",
"t.",
]
),
["append", "capitalize"],
],
[
"\n".join(
[
"t = []",
"while condition_1:",
" while condition_2:",
" t = 'str'",
"t.",
]
),
["append", "capitalize"],
],
[
"\n".join(
[
"for i in range(10):",
" i.",
]
),
"bit_length",
],
[
"\n".join(
[
"for i in range(10):",
" if i % 2 == 0:",
" i.",
]
),
"bit_length",
],
[
"\n".join(
[
"for item in ['a', 'b', 'c']:",
" item.",
]
),
"capitalize",
],
[
"\n".join(
[
"for key, value in {'a': 1, 'b': 2}.items():",
" key.",
]
),
"capitalize",
],
[
"\n".join(
[
"for key, value in {'a': 1, 'b': 2}.items():",
" value.",
]
),
"bit_length",
],
[
"\n".join(
[
"for sublist in [[1, 2], [3, 4]]:",
" sublist.",
]
),
"append",
],
[
"\n".join(
[
"for sublist in [[1, 2], [3, 4]]:",
" for item in sublist:",
" item.",
]
),
"bit_length",
],
[
"\n".join(
[
"t: list[str]",
"t[0].",
]
),
["capitalize"],
],
],
)
def test_undefined_variables(use_jedi, evaluation, code, insert_text):
offset = len(code)
ip.Completer.use_jedi = use_jedi
ip.Completer.evaluation = evaluation
with provisionalcompleter():
completions = list(ip.Completer.completions(text=code, offset=offset))
insert_texts = insert_text if isinstance(insert_text, list) else [insert_text]
for text in insert_texts:
match = [c for c in completions if c.text.lstrip(".") == text]
message_on_fail = f"{text} not found among {[c.text for c in completions]}"
assert len(match) == 1, message_on_fail
@pytest.mark.parametrize(
"code,insert_text",
[
[
"\n".join(
[
"t: dict = {'a': []}",
"t['a'].",
]
),
["append"],
],
[
"\n".join(
[
"t: int | dict = {'a': []}",
"t.",
]
),
["keys", "bit_length"],
],
[
"\n".join(
[
"t: int | dict = {'a': []}",
"t['a'].",
]
),
"append",
],
# Test union types
[
"\n".join(
[
"t: int | str",
"t.",
]
),
["bit_length", "capitalize"],
],
[
"\n".join(
[
"def func() -> int | str: pass",
"func().",
]
),
["bit_length", "capitalize"],
],
[
"\n".join(
[
"t: list = ['test']",
"t[0].",
]
),
["capitalize"],
],
[
"\n".join(
[
"class T:",
" @property",
" def p(self) -> int | str: pass",
"t = T()",
"t.p.",
]
),
["bit_length", "capitalize"],
],
],
)
def test_undefined_variables_without_jedi(code, insert_text):
offset = len(code)
ip.Completer.use_jedi = False
ip.Completer.evaluation = "limited"
with provisionalcompleter():
completions = list(ip.Completer.completions(text=code, offset=offset))
insert_texts = insert_text if isinstance(insert_text, list) else [insert_text]
for text in insert_texts:
match = [c for c in completions if c.text.lstrip(".") == text]
message_on_fail = f"{text} not found among {[c.text for c in completions]}"
assert len(match) == 1, message_on_fail
@pytest.mark.parametrize(
"code",
[
"\n".join(
[
"def my_test() -> float:",
" return 1.1",
"my_test().",
]
),
"\n".join(
[
"class MyClass():",
" b: list[str]",
"x = MyClass()",
"x.b[0].",
]
),
"\n".join(
[
"class MyClass():",
" b: list[str]",
"x = MyClass()",
"x.fake_attr().",
]
),
],
)
def test_no_file_completions_in_attr_access(code):
"""Test that files are not suggested during attribute/method completion."""
with TemporaryWorkingDirectory():
open(".hidden", "w", encoding="utf-8").close()
offset = len(code)
for use_jedi in (True, False):
with provisionalcompleter(), jedi_status(use_jedi):
completions = list(ip.Completer.completions(text=code, offset=offset))
matches = [c for c in completions if c.text.lstrip(".") == "hidden"]
assert (
len(matches) == 0
), f"File '.hidden' should not appear in attribute completion"
@pytest.mark.parametrize(
"line,expected",
[
# Basic test cases
("np.", "attribute"),
("np.ran", "attribute"),
("np.random.rand(np.random.ran", "attribute"),
("np.random.rand(n", "global"),
("d['k.e.y.'](ran", "global"),
("d[0].k", "attribute"),
("a = { 'a': np.ran", "attribute"),
("n", "global"),
("", "global"),
# Dots in string literals
('some_var = "this is a string with a dot.', "global"),
("text = 'another string with a dot.", "global"),
('f"greeting {user.na', "attribute"), # Cursor in f-string expression
('t"welcome {guest.na', "attribute"), # Cursor in t-string expression
('f"hello {name} worl', "global"), # Cursor in f-string outside expression
('f"hello {{a.', "global"),
('f"hello {{{a.', "attribute"),
# Backslash escapes in strings
('var = "string with \\"escaped quote and a dot.', "global"),
("escaped = 'single \\'quote\\' with a dot.", "global"),
# Multi-line strings
('multi = """This is line one\nwith a dot.', "global"),
("multi_single = '''Another\nmulti-line\nwith a dot.", "global"),
# Inline comments
("x = 5 # This is a comment", "global"),
("y = obj.method() # Comment after dot.method", "global"),
# Hash symbol within string literals should not be treated as comments
("d['#'] = np.", "attribute"),
# Nested parentheses with dots
("complex_expr = (func((obj.method(param.attr", "attribute"),
("multiple_nesting = {key: [value.attr", "attribute"),
# Numbers
("3.", "global"),
("3.14", "global"),
("-42.14", "global"),
("x = func(3.14", "global"),
("x = func(a3.", "attribute"),
("x = func(a3.12", "global"),
("3.1.", "attribute"),
("-3.1.", "attribute"),
("(3).", "attribute"),
# Additional cases
("", "global"),
('str_with_code = "x.attr', "global"),
('f"formatted {obj.attr', "attribute"),
('f"formatted {obj.attr}', "global"),
("dict_with_dots = {'key.with.dots': value.attr", "attribute"),
("d[f'{a}']['{a.", "global"),
("ls .", "global"),
],
)
def test_completion_context(line, expected):
"""Test completion context"""
ip = get_ipython()
get_context = ip.Completer._determine_completion_context
result = get_context(line)
assert result.value == expected, f"Failed on input: '{line}'"
@pytest.mark.parametrize(
"line,expected,expected_after_assignment",
[
("test_alias file", True, False), # overshadowed by variable
("test_alias .", True, False),
("test_alias file.", True, False),
("%test_alias .file.ext", True, True), # magic, not affected by variable
("!test_alias file.", True, True), # bang, not affected by variable
],
)
def test_completion_in_cli_context(line, expected, expected_after_assignment):
"""Test completion context with and without variable overshadowing"""
ip = get_ipython()
ip.run_cell("alias test_alias echo test_alias")
get_context = ip.Completer._is_completing_in_cli_context
# Normal case
result = get_context(line)
assert result == expected, f"Failed on input: '{line}'"
# Test with alias assigned as a variable
try:
ip.user_ns["test_alias"] = "some_value"
result_after_assignment = get_context(line)
assert (
result_after_assignment == expected_after_assignment
), f"Failed after assigning 'ls' as a variable for input: '{line}'"
finally:
ip.user_ns.pop("test_alias", None)
@pytest.mark.xfail(reason="Completion context not yet supported")
@pytest.mark.parametrize(
"line, expected",
[
("f'{f'a.", "global"), # Nested f-string
("3a.", "global"), # names starting with numbers or other symbols
("$).", "global"), # random things with dot at end
],
)
def test_unsupported_completion_context(line, expected):
"""Test unsupported completion context"""
ip = get_ipython()
get_context = ip.Completer._determine_completion_context
result = get_context(line)
assert result.value == expected, f"Failed on input: '{line}'"
@pytest.mark.parametrize(
"setup,code,expected,not_expected",
[
('a="str"; b=1', "(a, b.", [".bit_count", ".conjugate"], [".count"]),
('a="str"; b=1', "(a, b).", [".count"], [".bit_count", ".capitalize"]),
('x="str"; y=1', "x = {1, y.", [".bit_count"], [".count"]),
('x="str"; y=1', "x = [1, y.", [".bit_count"], [".count"]),
('x="str"; y=1; fun=lambda x:x', "x = fun(1, y.", [".bit_count"], [".count"]),
],
)
def test_misc_no_jedi_completions(setup, code, expected, not_expected):
ip = get_ipython()
c = ip.Completer
ip.ex(setup)
with provisionalcompleter(), jedi_status(False):
matches = c.all_completions(code)
assert set(expected) - set(matches) == set(), set(matches)
assert set(matches).intersection(set(not_expected)) == set()
@pytest.mark.parametrize(
"code,expected",
[
(" (a, b", "b"),
("(a, b", "b"),
("(a, b)", ""), # trim always start by trimming
(" (a, b)", "(a, b)"),
(" [a, b]", "[a, b]"),
(" a, b", "b"),
("x = {1, y", "y"),
("x = [1, y", "y"),
("x = fun(1, y", "y"),
(" assert a", "a"),
],
)
def test_trim_expr(code, expected):
c = get_ipython().Completer
assert c._trim_expr(code) == expected
@pytest.mark.parametrize(
"input, expected",
[
["1.234", "1.234"],
# should match signed numbers
["+1", "+1"],
["-1", "-1"],
["-1.0", "-1.0"],
["-1.", "-1."],
["+1.", "+1."],
[".1", ".1"],
# should not match non-numbers
["1..", None],
["..", None],
[".1.", None],
# should match after comma
[",1", "1"],
[", 1", "1"],
[", .1", ".1"],
[", +.1", "+.1"],
# should not match after trailing spaces
[".1 ", None],
# some complex cases
["0b_0011_1111_0100_1110", "0b_0011_1111_0100_1110"],
["0xdeadbeef", "0xdeadbeef"],
["0b_1110_0101", "0b_1110_0101"],
# should not match if in an operation
["1 + 1", None],
[", 1 + 1", None],
],
)
def test_match_numeric_literal_for_dict_key(input, expected):
assert _match_number_in_dict_key_prefix(input) == expected
| TestCompleter |
python | pennersr__django-allauth | allauth/socialaccount/providers/xing/views.py | {
"start": 353,
"end": 1029
} | class ____(OAuthAdapter):
provider_id = "xing"
request_token_url = "https://api.xing.com/v1/request_token" # nosec
access_token_url = "https://api.xing.com/v1/access_token" # nosec
authorize_url = "https://www.xing.com/v1/authorize"
def complete_login(self, request, app, token, response):
client = XingAPI(request, app.client_id, app.secret, self.request_token_url)
extra_data = client.get_user_info()["users"][0]
return self.get_provider().sociallogin_from_response(request, extra_data)
oauth_login = OAuthLoginView.adapter_view(XingOAuthAdapter)
oauth_callback = OAuthCallbackView.adapter_view(XingOAuthAdapter)
| XingOAuthAdapter |
python | django__django | tests/generic_views/forms.py | {
"start": 224,
"end": 339
} | class ____(forms.Form):
name = forms.CharField()
message = forms.CharField(widget=forms.Textarea)
| ContactForm |
python | pytorch__pytorch | tools/testing/target_determination/heuristics/edited_by_pr.py | {
"start": 985,
"end": 2038
} | class ____(HeuristicInterface):
def __init__(self, **kwargs: dict[str, Any]) -> None:
super().__init__(**kwargs)
def get_prediction_confidence(self, tests: list[str]) -> TestPrioritizations:
critical_tests = _get_modified_tests()
return TestPrioritizations(
tests, {TestRun(test): 1 for test in critical_tests if test in tests}
)
def _get_modified_tests() -> set[str]:
try:
changed_files = query_changed_files()
should_run = python_test_file_to_test_name(set(changed_files))
for test_file, regexes in ADDITIONAL_MAPPINGS.items():
if any(
re.search(regex, changed_file) is not None
for regex in regexes
for changed_file in changed_files
):
should_run.add(test_file)
return should_run
except Exception as e:
warn(f"Can't query changed test files due to {e}")
# If unable to get changed files from git, quit without doing any sorting
return set()
| EditedByPR |
python | streamlit__streamlit | lib/streamlit/elements/deck_gl_json_chart.py | {
"start": 3730,
"end": 7000
} | class ____(TypedDict, total=False):
r"""
The schema for the PyDeck chart selection state.
The selection state is stored in a dictionary-like object that supports
both key and attribute notation. Selection states cannot be
programmatically changed or set through Session State.
You must define ``id`` in ``pydeck.Layer`` to ensure statefulness when
using selections with ``st.pydeck_chart``.
Attributes
----------
indices : dict[str, list[int]]
A dictionary of selected objects by layer. Each key in the dictionary
is a layer id, and each value is a list of object indices within that
layer.
objects : dict[str, list[dict[str, Any]]]
A dictionary of object attributes by layer. Each key in the dictionary
is a layer id, and each value is a list of metadata dictionaries for
the selected objects in that layer.
Examples
--------
The following example has multi-object selection enabled. The chart
displays US state capitals by population (2023 US Census estimate). You
can access this `data
<https://github.com/streamlit/docs/blob/main/python/api-examples-source/data/capitals.csv>`_
from GitHub.
>>> import streamlit as st
>>> import pydeck
>>> import pandas as pd
>>>
>>> capitals = pd.read_csv(
... "capitals.csv",
... header=0,
... names=[
... "Capital",
... "State",
... "Abbreviation",
... "Latitude",
... "Longitude",
... "Population",
... ],
... )
>>> capitals["size"] = capitals.Population / 10
>>>
>>> point_layer = pydeck.Layer(
... "ScatterplotLayer",
... data=capitals,
... id="capital-cities",
... get_position=["Longitude", "Latitude"],
... get_color="[255, 75, 75]",
... pickable=True,
... auto_highlight=True,
... get_radius="size",
... )
>>>
>>> view_state = pydeck.ViewState(
... latitude=40, longitude=-117, controller=True, zoom=2.4, pitch=30
... )
>>>
>>> chart = pydeck.Deck(
... point_layer,
... initial_view_state=view_state,
... tooltip={"text": "{Capital}, {Abbreviation}\nPopulation: {Population}"},
... )
>>>
>>> event = st.pydeck_chart(chart, on_select="rerun", selection_mode="multi-object")
>>>
>>> event.selection
.. output ::
https://doc-pydeck-event-state-selections.streamlit.app/
height: 700px
This is an example of the selection state when selecting a single object
from a layer with id, ``"captial-cities"``:
>>> {
>>> "indices":{
>>> "capital-cities":[
>>> 2
>>> ]
>>> },
>>> "objects":{
>>> "capital-cities":[
>>> {
>>> "Abbreviation":" AZ"
>>> "Capital":"Phoenix"
>>> "Latitude":33.448457
>>> "Longitude":-112.073844
>>> "Population":1650070
>>> "State":" Arizona"
>>> "size":165007.0
>>> }
>>> ]
>>> }
>>> }
"""
indices: dict[str, list[int]]
objects: dict[str, list[dict[str, Any]]]
| PydeckSelectionState |
python | xlwings__xlwings | xlwings/pro/reports/markdown.py | {
"start": 1380,
"end": 3180
} | class ____:
"""
``MarkdownStyle`` defines how ``Markdown`` objects are being rendered in Excel cells
or shapes. Start by instantiating a ``MarkdownStyle`` object. Printing it will show
you the current (default) style:
>>> style = MarkdownStyle()
>>> style
<MarkdownStyle>
h1.font: .bold: True
h1.blank_lines_after: 1
paragraph.blank_lines_after: 1
unordered_list.bullet_character: •
unordered_list.blank_lines_after: 1
strong.bold: True
emphasis.italic: True
You can override the defaults, e.g., to make ``**strong**`` text red instead of
bold, do this:
>>> style.strong.bold = False
>>> style.strong.color = (255, 0, 0)
>>> style.strong
strong.color: (255, 0, 0)
.. versionadded:: 0.23.0
"""
class __Heading1(Style):
def __init__(self):
super().__init__(display_name="h1")
self.font = FontStyle(bold=True)
self.blank_lines_after = 0
class __Paragraph(Style):
def __init__(self):
super().__init__(display_name="paragraph")
self.blank_lines_after = 1
class __UnorderedList(Style):
def __init__(self):
super().__init__(display_name="unordered_list")
self.bullet_character = "\u2022"
self.blank_lines_after = 1
def __init__(self):
self.h1 = self.__Heading1()
self.paragraph = self.__Paragraph()
self.unordered_list = self.__UnorderedList()
self.strong = FontStyle(display_name="strong", bold=True)
self.emphasis = FontStyle(display_name="emphasis", italic=True)
def __repr__(self):
s = "<MarkdownStyle>\n"
for attribute in vars(self):
s += f"{getattr(self, attribute)}"
return s
| MarkdownStyle |
python | jazzband__django-formtools | formtools/wizard/views.py | {
"start": 28912,
"end": 29114
} | class ____(NamedUrlWizardView):
"""
A NamedUrlFormWizard with pre-configured CookieStorageBackend.
"""
storage_name = 'formtools.wizard.storage.cookie.CookieStorage'
| NamedUrlCookieWizardView |
python | scipy__scipy | benchmarks/benchmarks/go_benchmark_functions/go_funcs_P.py | {
"start": 13559,
"end": 14542
} | class ____(Benchmark):
r"""
Plateau objective function.
This class defines the Plateau [1]_ global optimization problem. This
is a multimodal minimization problem defined as follows:
.. math::
f_{\text{Plateau}}(x) = 30 + \sum_{i=1}^n \lfloor \lvert x_i
\rvert\rfloor
Here, :math:`n` represents the number of dimensions and
:math:`x_i \in [-5.12, 5.12]` for :math:`i = 1, ..., n`.
*Global optimum*: :math:`f(x) = 30` for :math:`x_i = 0` for
:math:`i = 1, ..., n`
.. [1] Gavana, A. Global Optimization Benchmarks and AMPGO retrieved 2015
"""
change_dimensionality = True
def __init__(self, dimensions=2):
Benchmark.__init__(self, dimensions)
self._bounds = list(zip([-5.12] * self.N, [5.12] * self.N))
self.global_optimum = [[0.0 for _ in range(self.N)]]
self.fglob = 30.0
def fun(self, x, *args):
self.nfev += 1
return 30.0 + sum(floor(abs(x)))
| Plateau |
python | dask__dask | dask/array/_array_expr/_creation.py | {
"start": 4584,
"end": 18375
} | class ____(BroadcastTrick):
func = staticmethod(np.full_like)
def wrap_func_shape_as_first_arg(*args, klass, **kwargs):
"""
Transform np creation function into blocked version
"""
if "shape" not in kwargs:
shape, args = args[0], args[1:]
else:
shape = kwargs.pop("shape")
if isinstance(shape, ArrayExpr):
raise TypeError(
"Dask array input not supported. "
"Please use tuple, list, or a 1D numpy array instead."
)
parsed = _parse_wrap_args(klass.func, args, kwargs, shape)
return new_collection(
klass(
parsed["shape"],
parsed["dtype"],
parsed["chunks"],
kwargs.get("meta"),
kwargs,
)
)
def wrap(func, **kwargs):
return partial(func, **kwargs)
ones = wrap(wrap_func_shape_as_first_arg, klass=Ones, dtype="f8")
zeros = wrap(wrap_func_shape_as_first_arg, klass=Zeros, dtype="f8")
empty = wrap(wrap_func_shape_as_first_arg, klass=Empty, dtype="f8")
_full = wrap(wrap_func_shape_as_first_arg, klass=Full, dtype="f8")
def arange(start=0, stop=None, step=1, *, chunks="auto", like=None, dtype=None):
"""
Return evenly spaced values from `start` to `stop` with step size `step`.
The values are half-open [start, stop), so including start and excluding
stop. This is basically the same as python's range function but for dask
arrays.
When using a non-integer step, such as 0.1, the results will often not be
consistent. It is better to use linspace for these cases.
Parameters
----------
start : int, optional
The starting value of the sequence. The default is 0.
stop : int
The end of the interval, this value is excluded from the interval.
step : int, optional
The spacing between the values. The default is 1 when not specified.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
Defaults to "auto" which will automatically determine chunk sizes.
dtype : numpy.dtype
Output dtype. Omit to infer it from start, stop, step
Defaults to ``None``.
like : array type or ``None``
Array to extract meta from. Defaults to ``None``.
Returns
-------
samples : dask array
See Also
--------
dask.array.linspace
"""
if stop is None:
stop = start
start = 0
return new_collection(Arange(start, stop, step, chunks, like, dtype))
def linspace(
start, stop, num=50, endpoint=True, retstep=False, chunks="auto", dtype=None
):
"""
Return `num` evenly spaced values over the closed interval [`start`,
`stop`].
Parameters
----------
start : scalar
The starting value of the sequence.
stop : scalar
The last value of the sequence.
num : int, optional
Number of samples to include in the returned dask array, including the
endpoints. Default is 50.
endpoint : bool, optional
If True, ``stop`` is the last sample. Otherwise, it is not included.
Default is True.
retstep : bool, optional
If True, return (samples, step), where step is the spacing between
samples. Default is False.
chunks : int
The number of samples on each block. Note that the last block will have
fewer samples if `num % blocksize != 0`
dtype : dtype, optional
The type of the output array.
Returns
-------
samples : dask array
step : float, optional
Only returned if ``retstep`` is True. Size of spacing between samples.
See Also
--------
dask.array.arange
"""
num = int(num)
result = new_collection(Linspace(start, stop, num, endpoint, chunks, dtype))
if retstep:
return result, result.expr.step
else:
return result
def empty_like(a, dtype=None, order="C", chunks=None, name=None, shape=None):
"""
Return a new array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of the
returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
chunks : sequence of ints
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
keyword arguments.
shape : int or sequence of ints, optional.
Overrides the shape of the result.
Returns
-------
out : ndarray
Array of uninitialized (arbitrary) data with the same
shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
zeros_like : Return an array of zeros with shape and type of input.
empty : Return a new uninitialized array.
ones : Return a new array setting values to one.
zeros : Return a new array setting values to zero.
Notes
-----
This function does *not* initialize the returned array; to do that use
`zeros_like` or `ones_like` instead. It may be marginally faster than
the functions that do set the array values.
"""
a = asarray(a, name=False)
shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)
# if shape is nan we cannot rely on regular empty function, we use
# generic map_blocks.
if np.isnan(shape).any():
return a.map_blocks(partial(np.empty_like, dtype=(dtype or a.dtype)))
return empty(
shape,
dtype=(dtype or a.dtype),
order=order,
chunks=chunks,
name=name,
meta=a._meta,
)
def ones_like(a, dtype=None, order="C", chunks=None, name=None, shape=None):
"""
Return an array of ones with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
chunks : sequence of ints
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
keyword arguments.
shape : int or sequence of ints, optional.
Overrides the shape of the result.
Returns
-------
out : ndarray
Array of ones with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
"""
a = asarray(a, name=False)
shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)
# if shape is nan we cannot rely on regular ones function, we use
# generic map_blocks.
if np.isnan(shape).any():
return a.map_blocks(partial(np.ones_like, dtype=(dtype or a.dtype)))
return ones(
shape,
dtype=(dtype or a.dtype),
order=order,
chunks=chunks,
name=name,
meta=a._meta,
)
def zeros_like(a, dtype=None, order="C", chunks=None, name=None, shape=None):
"""
Return an array of zeros with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
chunks : sequence of ints
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
keyword arguments.
shape : int or sequence of ints, optional.
Overrides the shape of the result.
Returns
-------
out : ndarray
Array of zeros with the same shape and type as `a`.
See Also
--------
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
"""
a = asarray(a, name=False)
shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)
# if shape is nan we cannot rely on regular zeros function, we use
# generic map_blocks.
if np.isnan(shape).any():
return a.map_blocks(partial(np.zeros_like, dtype=(dtype or a.dtype)))
return zeros(
shape,
dtype=(dtype or a.dtype),
order=order,
chunks=chunks,
name=name,
meta=a._meta,
)
def full(shape, fill_value, *args, **kwargs):
# np.isscalar has somewhat strange behavior:
# https://docs.scipy.org/doc/numpy/reference/generated/numpy.isscalar.html
if np.ndim(fill_value) != 0:
raise ValueError(
f"fill_value must be scalar. Received {type(fill_value).__name__} instead."
)
if kwargs.get("dtype") is None:
if hasattr(fill_value, "dtype"):
kwargs["dtype"] = fill_value.dtype
else:
kwargs["dtype"] = type(fill_value)
return _full(*args, shape=shape, fill_value=fill_value, **kwargs)
def full_like(a, fill_value, order="C", dtype=None, chunks=None, name=None, shape=None):
"""
Return a full array with the same shape and type as a given array.
Parameters
----------
a : array_like
The shape and data-type of `a` define these same attributes of
the returned array.
fill_value : scalar
Fill value.
dtype : data-type, optional
Overrides the data type of the result.
order : {'C', 'F'}, optional
Whether to store multidimensional data in C- or Fortran-contiguous
(row- or column-wise) order in memory.
chunks : sequence of ints
The number of samples on each block. Note that the last block will have
fewer samples if ``len(array) % chunks != 0``.
name : str, optional
An optional keyname for the array. Defaults to hashing the input
keyword arguments.
shape : int or sequence of ints, optional.
Overrides the shape of the result.
Returns
-------
out : ndarray
Array of `fill_value` with the same shape and type as `a`.
See Also
--------
zeros_like : Return an array of zeros with shape and type of input.
ones_like : Return an array of ones with shape and type of input.
empty_like : Return an empty array with shape and type of input.
zeros : Return a new array setting values to zero.
ones : Return a new array setting values to one.
empty : Return a new uninitialized array.
full : Fill a new array.
"""
a = asarray(a, name=False)
shape, chunks = _get_like_function_shapes_chunks(a, chunks, shape)
# if shape is nan we cannot rely on regular full function, we use
# generic map_blocks.
if np.isnan(shape).any():
return a.map_blocks(partial(np.full_like, dtype=(dtype or a.dtype)), fill_value)
return full(
shape,
fill_value,
dtype=(dtype or a.dtype),
order=order,
chunks=chunks,
name=name,
meta=a._meta,
)
@derived_from(np)
def repeat(a, repeats, axis=None):
if axis is None:
if a.ndim == 1:
axis = 0
else:
raise NotImplementedError("Must supply an integer axis value")
if not isinstance(repeats, Integral):
raise NotImplementedError("Only integer valued repeats supported")
if -a.ndim <= axis < 0:
axis += a.ndim
elif not 0 <= axis <= a.ndim - 1:
raise ValueError(f"axis(={axis}) out of bounds")
if repeats == 0:
return a[tuple(slice(None) if d != axis else slice(0) for d in range(a.ndim))]
elif repeats == 1:
return a
cchunks = cached_cumsum(a.chunks[axis], initial_zero=True)
slices = []
for c_start, c_stop in sliding_window(2, cchunks):
ls = np.linspace(c_start, c_stop, repeats).round(0)
for ls_start, ls_stop in sliding_window(2, ls):
if ls_start != ls_stop:
slices.append(slice(ls_start, ls_stop))
all_slice = slice(None, None, None)
slices = [
(all_slice,) * axis + (s,) + (all_slice,) * (a.ndim - axis - 1) for s in slices
]
slabs = [a[slc] for slc in slices]
out = []
for slab in slabs:
chunks = list(slab.chunks)
assert len(chunks[axis]) == 1
chunks[axis] = (chunks[axis][0] * repeats,)
chunks = tuple(chunks)
result = slab.map_blocks(
np.repeat, repeats, axis=axis, chunks=chunks, dtype=slab.dtype
)
out.append(result)
return concatenate(out, axis=axis)
| Full |
python | Pylons__pyramid | tests/test_config/test_actions.py | {
"start": 32635,
"end": 32827
} | class ____:
def __init__(self):
self.registered = []
def register(self, introspector, action_info):
self.registered.append((introspector, action_info))
| DummyIntrospectable |
python | getsentry__sentry | src/sentry/monitors/consumers/monitor_consumer.py | {
"start": 43284,
"end": 45445
} | class ____(ProcessingStrategyFactory[KafkaPayload]):
parallel_executor: ThreadPoolExecutor | None = None
batched_parallel = False
"""
Does the consumer process unrelated check-ins in parallel?
"""
max_batch_size = 500
"""
How many messages will be batched at once when in parallel mode.
"""
max_batch_time = 10
"""
The maximum time in seconds to accumulate a bach of check-ins.
"""
def __init__(
self,
mode: Literal["batched-parallel", "serial"] | None = None,
max_batch_size: int | None = None,
max_batch_time: int | None = None,
max_workers: int | None = None,
) -> None:
if mode == "batched-parallel":
self.batched_parallel = True
self.parallel_executor = ThreadPoolExecutor(max_workers=max_workers)
if max_batch_size is not None:
self.max_batch_size = max_batch_size
if max_batch_time is not None:
self.max_batch_time = max_batch_time
def shutdown(self) -> None:
if self.parallel_executor:
self.parallel_executor.shutdown()
def create_parallel_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]:
assert self.parallel_executor is not None
batch_processor = RunTask(
function=partial(process_batch, self.parallel_executor),
next_step=CommitOffsets(commit),
)
return BatchStep(
max_batch_size=self.max_batch_size,
max_batch_time=self.max_batch_time,
next_step=batch_processor,
)
def create_synchronous_worker(self, commit: Commit) -> ProcessingStrategy[KafkaPayload]:
return RunTask(
function=process_single,
next_step=CommitOffsets(commit),
)
def create_with_partitions(
self,
commit: Commit,
partitions: Mapping[Partition, int],
) -> ProcessingStrategy[KafkaPayload]:
if self.batched_parallel:
return self.create_parallel_worker(commit)
else:
return self.create_synchronous_worker(commit)
| StoreMonitorCheckInStrategyFactory |
python | pypa__pipenv | pipenv/patched/pip/_internal/models/search_scope.py | {
"start": 503,
"end": 5075
} | class ____:
"""
Encapsulates the locations that pip is configured to search.
"""
find_links: List[str]
index_urls: List[str]
no_index: bool
index_lookup: Optional[Dict[str, str]] = None
index_restricted: Optional[bool] = None
@classmethod
def create(
cls,
find_links: List[str],
index_urls: List[str],
no_index: bool,
index_lookup: Optional[Dict[str, List[str]]] = None,
index_restricted: bool = False,
) -> "SearchScope":
"""
Create a SearchScope object after normalizing the `find_links`.
"""
# Build find_links. If an argument starts with ~, it may be
# a local file relative to a home directory. So try normalizing
# it and if it exists, use the normalized version.
# This is deliberately conservative - it might be fine just to
# blindly normalize anything starting with a ~...
built_find_links: List[str] = []
for link in find_links:
if link.startswith("~"):
new_link = normalize_path(link)
if os.path.exists(new_link):
link = new_link
built_find_links.append(link)
# If we don't have TLS enabled, then WARN if anyplace we're looking
# relies on TLS.
if not has_tls():
for link in itertools.chain(index_urls, built_find_links):
parsed = urllib.parse.urlparse(link)
if parsed.scheme == "https":
logger.warning(
"pip is configured with locations that require "
"TLS/SSL, however the ssl module in Python is not "
"available."
)
break
return cls(
find_links=built_find_links,
index_urls=index_urls,
no_index=no_index,
index_lookup=index_lookup or {},
index_restricted=index_restricted,
)
def get_formatted_locations(self) -> str:
lines = []
redacted_index_urls = []
if self.index_urls and self.index_urls != [PyPI.simple_url]:
for url in self.index_urls:
redacted_index_url = redact_auth_from_url(url)
# Parse the URL
purl = urllib.parse.urlsplit(redacted_index_url)
# URL is generally invalid if scheme and netloc is missing
# there are issues with Python and URL parsing, so this test
# is a bit crude. See bpo-20271, bpo-23505. Python doesn't
# always parse invalid URLs correctly - it should raise
# exceptions for malformed URLs
if not purl.scheme and not purl.netloc:
logger.warning(
'The index url "%s" seems invalid, please provide a scheme.',
redacted_index_url,
)
redacted_index_urls.append(redacted_index_url)
lines.append(
"Looking in indexes: {}".format(", ".join(redacted_index_urls))
)
if self.find_links:
lines.append(
"Looking in links: {}".format(
", ".join(redact_auth_from_url(url) for url in self.find_links)
)
)
return "\n".join(lines)
def get_index_urls_locations(self, project_name: str) -> List[str]:
"""Returns the locations found via self.index_urls
Checks the url_name on the main (first in the list) index and
use this url_name to produce all locations
"""
def mkurl_pypi_url(url: str) -> str:
loc = posixpath.join(
url, urllib.parse.quote(canonicalize_name(project_name))
)
# For maximum compatibility with easy_install, ensure the path
# ends in a trailing slash. Although this isn't in the spec
# (and PyPI can handle it without the slash) some other index
# implementations might break if they relied on easy_install's
# behavior.
if not loc.endswith("/"):
loc = loc + "/"
return loc
index_urls = self.index_urls
if project_name in self.index_lookup:
index_urls = [self.index_lookup[project_name]]
elif self.index_restricted and self.index_urls:
index_urls = [self.index_urls[0]]
return [mkurl_pypi_url(url) for url in index_urls]
| SearchScope |
python | pytorch__pytorch | test/inductor/test_move_constructors_to_gpu.py | {
"start": 543,
"end": 3317
} | class ____(TestCase):
def _check_fn(self, func, expect_cpu, *args):
out_eager = func(*args)
out_compiled, code = run_and_get_code(torch.compile(func), *args)
self.assertEqual(out_eager, out_compiled)
assert len(code) == 1
if expect_cpu:
FileCheck().check("cpp_fused").run(code[0])
else:
FileCheck().check_not("cpp_fused").run(code[0])
def test_simple(self):
def foo(x):
return x[torch.arange(x.shape[0])]
inp = torch.rand(32, 77, 512, device=GPU_TYPE)
self._check_fn(foo, False, inp)
def test_output_failure(self):
def foo(x):
tmp1 = torch.arange(x.shape[0])
return tmp1, x[tmp1]
inp = torch.rand(32, 77, 512, device=GPU_TYPE)
self._check_fn(foo, True, inp)
def test_non_convertable_op_failure(self):
def foo(x):
y = torch.arange(x.shape[0])
return x + y, torch.ones([4], device=GPU_TYPE)
inp = torch.rand([100])
self._check_fn(foo, True, inp)
def test_multiple_constructors(self):
def foo(x):
tmp1 = torch.arange(x.shape[0])
o1 = x[tmp1]
tmp2 = torch.arange(x.shape[1]).view([1, x.shape[1]])
o2 = x[tmp2]
return o1, o2, o1 + o2
inp = torch.rand([200, 200])
self._check_fn(foo, True, inp)
def test_sets_equiv(self):
@torch.compile()
def foo(x):
c1 = torch.ones([4], dtype=torch.long)
c2 = torch.arange(-1, 3)
return x[c1 + c2], c2 - 4 * 2
inp = torch.rand([4]).to(GPU_TYPE)
_, code = run_and_get_code(foo, inp)
FileCheck().check_not("triton.jit").run(code[0])
@torch.compile()
def foo(x):
c2 = torch.arange(-1, 3)
c1 = torch.ones([4], dtype=torch.long)
return x[c1 + c2], c2 - 4 * 2
_, code = run_and_get_code(foo, inp)
FileCheck().check_not("triton.jit").run(code[0])
@requires_multigpu()
@unittest.skip("https://github.com/pytorch/pytorch/issues/139520")
def test_multi_gpu(self):
def foo(x):
return (
x[torch.arange(x.shape[0])],
torch.ones([4], device=f"{GPU_TYPE}:0"),
torch.ones([4], device=f"{GPU_TYPE}:1"),
)
# nyi, multi-gpu
inp = torch.rand([100], device=GPU_TYPE)
self._check_fn(foo, True, inp)
def test_no_gpu(self):
def foo(x):
return x[torch.arange(x.shape[0])]
inp = torch.rand([100])
self._check_fn(foo, True, inp)
if __name__ == "__main__":
if IS_LINUX and HAS_GPU_AND_TRITON:
run_tests()
| TestMoveConstructorsToGpu |
python | davidhalter__jedi | jedi/inference/value/iterable.py | {
"start": 10864,
"end": 14850
} | class ____(Sequence):
_TUPLE_LIKE = 'testlist_star_expr', 'testlist', 'subscriptlist'
mapping = {'(': 'tuple',
'[': 'list',
'{': 'set'}
def __init__(self, inference_state, defining_context, atom):
super().__init__(inference_state)
self.atom = atom
self._defining_context = defining_context
if self.atom.type in self._TUPLE_LIKE:
self.array_type = 'tuple'
else:
self.array_type = SequenceLiteralValue.mapping[atom.children[0]]
"""The builtin name of the array (list, set, tuple or dict)."""
def _get_generics(self):
if self.array_type == 'tuple':
return tuple(x.infer().py__class__() for x in self.py__iter__())
return super()._get_generics()
def py__simple_getitem__(self, index):
"""Here the index is an int/str. Raises IndexError/KeyError."""
if isinstance(index, slice):
return ValueSet([self])
else:
with reraise_getitem_errors(TypeError, KeyError, IndexError):
node = self.get_tree_entries()[index]
if node == ':' or node.type == 'subscript':
return NO_VALUES
return self._defining_context.infer_node(node)
def py__iter__(self, contextualized_node=None):
"""
While values returns the possible values for any array field, this
function returns the value for a certain index.
"""
for node in self.get_tree_entries():
if node == ':' or node.type == 'subscript':
# TODO this should probably use at least part of the code
# of infer_subscript_list.
yield LazyKnownValue(Slice(self._defining_context, None, None, None))
else:
yield LazyTreeValue(self._defining_context, node)
yield from check_array_additions(self._defining_context, self)
def py__len__(self):
# This function is not really used often. It's more of a try.
return len(self.get_tree_entries())
def get_tree_entries(self):
c = self.atom.children
if self.atom.type in self._TUPLE_LIKE:
return c[::2]
array_node = c[1]
if array_node in (']', '}', ')'):
return [] # Direct closing bracket, doesn't contain items.
if array_node.type == 'testlist_comp':
# filter out (for now) pep 448 single-star unpacking
return [value for value in array_node.children[::2]
if value.type != "star_expr"]
elif array_node.type == 'dictorsetmaker':
kv = []
iterator = iter(array_node.children)
for key in iterator:
if key == "**":
# dict with pep 448 double-star unpacking
# for now ignoring the values imported by **
next(iterator)
next(iterator, None) # Possible comma.
else:
op = next(iterator, None)
if op is None or op == ',':
if key.type == "star_expr":
# pep 448 single-star unpacking
# for now ignoring values imported by *
pass
else:
kv.append(key) # A set.
else:
assert op == ':' # A dict.
kv.append((key, next(iterator)))
next(iterator, None) # Possible comma.
return kv
else:
if array_node.type == "star_expr":
# pep 448 single-star unpacking
# for now ignoring values imported by *
return []
else:
return [array_node]
def __repr__(self):
return "<%s of %s>" % (self.__class__.__name__, self.atom)
| SequenceLiteralValue |
python | explosion__spaCy | spacy/lang/af/__init__.py | {
"start": 153,
"end": 255
} | class ____(Language):
lang = "af"
Defaults = AfrikaansDefaults
__all__ = ["Afrikaans"]
| Afrikaans |
python | walkccc__LeetCode | solutions/1357. Apply Discount Every n Orders/1357.py | {
"start": 0,
"end": 538
} | class ____:
def __init__(
self,
n: int,
discount: int,
products: list[int],
prices: list[int],
):
self.n = n
self.discount = discount
self.productToPrice = dict(zip(products, prices))
self.count = 0
def getBill(self, product: list[int], amount: list[int]) -> float:
self.count += 1
total = sum(self.productToPrice[p] * amount[i]
for i, p in enumerate(product))
if self.count % self.n == 0:
return total * (1 - self.discount / 100)
return total
| Cashier |
python | PyCQA__pydocstyle | src/tests/test_cases/test.py | {
"start": 11888,
"end": 12157
} | class ____: # noqa: D203,D213
"""A Blah.
Parameters
----------
x : int
"""
def __init__(self, x):
pass
expect(os.path.normcase(__file__ if __file__[-1] != 'c' else __file__[:-1]),
'D100: Missing docstring in public module')
| Blah |
python | pydantic__pydantic | pydantic/v1/types.py | {
"start": 25916,
"end": 27114
} | class ____(SecretField):
min_length: OptionalInt = None
max_length: OptionalInt = None
@classmethod
def __modify_schema__(cls, field_schema: Dict[str, Any]) -> None:
update_not_none(
field_schema,
type='string',
writeOnly=True,
format='password',
minLength=cls.min_length,
maxLength=cls.max_length,
)
@classmethod
def __get_validators__(cls) -> 'CallableGenerator':
yield cls.validate
yield constr_length_validator
@classmethod
def validate(cls, value: Any) -> 'SecretStr':
if isinstance(value, cls):
return value
value = str_validator(value)
return cls(value)
def __init__(self, value: str):
self._secret_value = value
def __repr__(self) -> str:
return f"SecretStr('{self}')"
def __len__(self) -> int:
return len(self._secret_value)
def display(self) -> str:
warnings.warn('`secret_str.display()` is deprecated, use `str(secret_str)` instead', DeprecationWarning)
return str(self)
def get_secret_value(self) -> str:
return self._secret_value
| SecretStr |
python | apache__airflow | airflow-ctl/src/airflowctl/api/operations.py | {
"start": 13468,
"end": 14202
} | class ____(BaseOperations):
"""Config operations."""
def get(self, section: str, option: str) -> Config | ServerResponseError:
"""Get a config from the API server."""
try:
self.response = self.client.get(f"/config/section/{section}/option/{option}")
return Config.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
def list(self) -> Config | ServerResponseError:
"""List all configs from the API server."""
try:
self.response = self.client.get("/config")
return Config.model_validate_json(self.response.content)
except ServerResponseError as e:
raise e
| ConfigOperations |
python | realpython__materials | inheritance-and-composition/inheritance/employees.py | {
"start": 841,
"end": 1056
} | class ____(Employee, FactoryRole, HourlyPolicy):
def __init__(self, id, name, hours_worked, hour_rate):
HourlyPolicy.__init__(self, hours_worked, hour_rate)
super().__init__(id, name)
| FactoryWorker |
python | getsentry__sentry | tests/sentry/issues/test_group.py | {
"start": 729,
"end": 4622
} | class ____(OccurrenceTestMixin, TestCase):
def test_simple_fingerprint(self) -> None:
group = self.create_group(project=self.project)
fingerprint = "test-fingerprint-1"
hashed_fingerprint = hash_fingerprint([fingerprint])
GroupHash.objects.create(
project=self.project,
group=group,
hash=hashed_fingerprint[0],
)
result = get_group_by_occurrence_fingerprint(self.project.id, fingerprint)
assert result.id == group.id
def test_multiple_part_fingerprint(self) -> None:
group = self.create_group(project=self.project)
fingerprints = ["error", "type", "location"]
hashed_fingerprint = hash_fingerprint(fingerprints)
for part in hashed_fingerprint:
GroupHash.objects.create(
project=self.project,
group=group,
hash=part,
)
for part in fingerprints:
result = get_group_by_occurrence_fingerprint(self.project.id, part)
assert result.id == group.id
def test_group_not_found(self) -> None:
fingerprint = "non-existent-fingerprint"
with pytest.raises(Group.DoesNotExist):
get_group_by_occurrence_fingerprint(self.project.id, fingerprint)
def test_empty_fingerprint(self) -> None:
group = self.create_group(project=self.project)
hashed_empty = md5(b"").hexdigest()
GroupHash.objects.create(
project=self.project,
group=group,
hash=hashed_empty,
)
result = get_group_by_occurrence_fingerprint(self.project.id, "")
assert result.id == group.id
@with_feature("organizations:profile-file-io-main-thread-ingest")
@requires_snuba
def test_group_created_via_issue_platform(self) -> None:
fingerprint = "issue-platform-fingerprint"
event = self.store_event(data=load_data("transaction"), project_id=self.project.id)
occurrence = self.build_occurrence(
event_id=event.event_id,
project_id=self.project.id,
fingerprint=[fingerprint],
type=ProfileFileIOGroupType.type_id,
issue_title="File I/O Issue",
subtitle="High file I/O detected",
)
# Override the fingerprint to be unhashed since produce_occurrence_to_kafka expects
# unhashed fingerprints (it will hash them during processing)
occurrence = replace(occurrence, fingerprint=[fingerprint])
produce_occurrence_to_kafka(
payload_type=PayloadType.OCCURRENCE,
occurrence=occurrence,
)
stored_occurrence = IssueOccurrence.fetch(occurrence.id, occurrence.project_id)
assert stored_occurrence is not None
result = get_group_by_occurrence_fingerprint(self.project.id, fingerprint)
assert result.title == "File I/O Issue"
def test_same_fingerprint_different_projects(self) -> None:
project1 = self.project
project2 = self.create_project(organization=self.organization)
group1 = self.create_group(project=project1, message="Group 1")
group2 = self.create_group(project=project2, message="Group 2")
fingerprint = "shared-fingerprint"
hashed_fingerprint = hash_fingerprint([fingerprint])[0]
GroupHash.objects.create(
project=project1,
group=group1,
hash=hashed_fingerprint,
)
GroupHash.objects.create(
project=project2,
group=group2,
hash=hashed_fingerprint,
)
result1 = get_group_by_occurrence_fingerprint(project1.id, fingerprint)
result2 = get_group_by_occurrence_fingerprint(project2.id, fingerprint)
assert result1.id == group1.id
assert result2.id == group2.id
| GetGroupByOccurrenceFingerprintTest |
python | great-expectations__great_expectations | contrib/cli/great_expectations_contrib/package.py | {
"start": 1524,
"end": 1680
} | class ____(str, Enum):
CONCEPT_ONLY = "CONCEPT_ONLY"
EXPERIMENTAL = "EXPERIMENTAL"
BETA = "BETA"
PRODUCTION = "PRODUCTION"
@dataclass
| Maturity |
python | pypa__warehouse | warehouse/oidc/models/_core.py | {
"start": 13500,
"end": 14011
} | class ____(OIDCPublisherMixin, db.Model):
__tablename__ = "oidc_publishers"
projects: Mapped[list[Project]] = orm.relationship(
secondary=OIDCPublisherProjectAssociation.__table__,
back_populates="oidc_publishers",
)
macaroons: Mapped[list[Macaroon]] = orm.relationship(
cascade="all, delete-orphan", lazy=True
)
__mapper_args__ = {
"polymorphic_identity": "oidc_publishers",
"polymorphic_on": OIDCPublisherMixin.discriminator,
}
| OIDCPublisher |
python | numpy__numpy | tools/swig/test/testMatrix.py | {
"start": 12041,
"end": 12312
} | class ____(MatrixTestCase):
def __init__(self, methodName="runTest"):
MatrixTestCase.__init__(self, methodName)
self.typeStr = "longLong"
self.typeCode = "q"
######################################################################
| longLongTestCase |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/super13.py | {
"start": 116,
"end": 155
} | class ____:
pass
func1(ClassA)
| ClassA |
python | python__mypy | mypy/checker.py | {
"start": 6572,
"end": 6866
} | class ____(NamedTuple):
node: DeferredNodeType
# And its TypeInfo (for semantic analysis self type handling)
active_typeinfo: TypeInfo | None
# Same as above, but for fine-grained mode targets. Only top-level functions/methods
# and module top levels are allowed as such.
| DeferredNode |
python | pytorch__pytorch | torch/onnx/_internal/fx/passes/type_promotion.py | {
"start": 3477,
"end": 7576
} | class ____(TypePromotionRule):
"""Defines how to perform elementwise type promotion for 'torch.ops.{namespace}.{op_name}'."""
_USE_OPMATH: bool = False
"""Whether to use opmath to compute the promoted input dtype.
If used, upcasts will be inserted everywhere for lower precision models.
Set to False and have torchlib handle upcasts in op implementation internally.
"""
def __init__(
self,
namespace: str,
op_name: str,
promote_args_positions: Sequence[int],
promote_kwargs_names: Sequence[str],
promotion_kind: _prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND,
) -> None:
"""Constructs a TypePromotionRule for elementwise operators.
Args:
namespace: Namespace of the op. E.g. 'aten' in 'torch.ops.aten.add'.
op_name: Name of the op. E.g. 'add' in 'torch.ops.aten.add'.
promote_args_positions: Positions of args to promote.
promote_kwargs_names: Names of kwargs to promote.
promotion_kind: Type promotion kind. Refer to [_prims_common.elementwise_dtypes](https://github.com/pytorch/pytorch/blob/main/torch/_prims_common/__init__.py) for detail. # noqa: B950
"""
super().__init__(namespace, op_name)
self.promote_args_positions = promote_args_positions
self.promote_kwargs_names = promote_kwargs_names
self.promotion_kind = promotion_kind
def __repr__(self) -> str:
return (
f"ElementwiseTypePromotionRule('{self.namespace}', '{self.op_name}', "
f"{self.promote_args_positions}, {self.promote_kwargs_names}, {self.promotion_kind})"
)
# pyrefly: ignore [bad-override]
def __eq__(self, other: object, /) -> bool:
if not isinstance(other, ElementwiseTypePromotionRule):
return False
return (
self.namespace == other.namespace
and self.op_name == other.op_name
and self.promote_args_positions == other.promote_args_positions
and self.promote_kwargs_names == other.promote_kwargs_names
and self.promotion_kind == other.promotion_kind
)
def __hash__(self) -> int:
return f"{type(self)}:{self.namespace}.{self.op_name}".__hash__()
def _consolidate_input_dtype(
self, computed_dtype: torch.dtype, result_dtype: torch.dtype
) -> torch.dtype:
"""
Although opmath is the right thing to do to retain on-par precision, it inserts
upcasts everywhere in the graph. This is particularly hard for backend to optimize
since there is no way to differentiate between inserted upcasts and model code
casts. Hence we consolidate the input dtype to the result dtype to avoid this.
"""
if not self._USE_OPMATH and self.promotion_kind in (
_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.DEFAULT,
_prims_common.ELEMENTWISE_TYPE_PROMOTION_KIND.INT_TO_FLOAT,
):
return result_dtype
return computed_dtype
def preview_type_promotion(
self, args: tuple, kwargs: dict
) -> TypePromotionSnapshot:
candidate_args = {
i: args[i]
for i in self.promote_args_positions
if i < len(args) and args[i] is not None
}
candidate_kwargs = {
name: kwargs[name]
for name in self.promote_kwargs_names
if name in kwargs and kwargs[name] is not None
}
computed_dtype, result_dtype = _prims_common.elementwise_dtypes(
*_pytree.arg_tree_leaves(*candidate_args.values(), **candidate_kwargs),
type_promotion_kind=self.promotion_kind,
)
consolidated_input_dtype = self._consolidate_input_dtype(
computed_dtype, result_dtype
)
return TypePromotionSnapshot(
dict.fromkeys(candidate_args.keys(), consolidated_input_dtype),
dict.fromkeys(candidate_kwargs.keys(), consolidated_input_dtype),
result_dtype,
)
| ElementwiseTypePromotionRule |
python | sympy__sympy | sympy/physics/units/unitsystem.py | {
"start": 447,
"end": 7593
} | class ____(_QuantityMapper):
"""
UnitSystem represents a coherent set of units.
A unit system is basically a dimension system with notions of scales. Many
of the methods are defined in the same way.
It is much better if all base units have a symbol.
"""
_unit_systems: dict[str, UnitSystem] = {}
def __init__(self, base_units, units=(), name="", descr="", dimension_system=None, derived_units: dict[Dimension, Quantity]={}):
UnitSystem._unit_systems[name] = self
self.name = name
self.descr = descr
self._base_units = base_units
self._dimension_system = dimension_system
self._units = tuple(set(base_units) | set(units))
self._base_units = tuple(base_units)
self._derived_units = derived_units
super().__init__()
def __str__(self):
"""
Return the name of the system.
If it does not exist, then it makes a list of symbols (or names) of
the base dimensions.
"""
if self.name != "":
return self.name
else:
return "UnitSystem((%s))" % ", ".join(
str(d) for d in self._base_units)
def __repr__(self):
return '<UnitSystem: %s>' % repr(self._base_units)
def extend(self, base, units=(), name="", description="", dimension_system=None, derived_units: dict[Dimension, Quantity]={}):
"""Extend the current system into a new one.
Take the base and normal units of the current system to merge
them to the base and normal units given in argument.
If not provided, name and description are overridden by empty strings.
"""
base = self._base_units + tuple(base)
units = self._units + tuple(units)
return UnitSystem(base, units, name, description, dimension_system, {**self._derived_units, **derived_units})
def get_dimension_system(self):
return self._dimension_system
def get_quantity_dimension(self, unit):
qdm = self.get_dimension_system()._quantity_dimension_map
if unit in qdm:
return qdm[unit]
return super().get_quantity_dimension(unit)
def get_quantity_scale_factor(self, unit):
qsfm = self.get_dimension_system()._quantity_scale_factors
if unit in qsfm:
return qsfm[unit]
return super().get_quantity_scale_factor(unit)
@staticmethod
def get_unit_system(unit_system):
if isinstance(unit_system, UnitSystem):
return unit_system
if unit_system not in UnitSystem._unit_systems:
raise ValueError(
"Unit system is not supported. Currently"
"supported unit systems are {}".format(
", ".join(sorted(UnitSystem._unit_systems))
)
)
return UnitSystem._unit_systems[unit_system]
@staticmethod
def get_default_unit_system():
return UnitSystem._unit_systems["SI"]
@property
def dim(self):
"""
Give the dimension of the system.
That is return the number of units forming the basis.
"""
return len(self._base_units)
@property
def is_consistent(self):
"""
Check if the underlying dimension system is consistent.
"""
# test is performed in DimensionSystem
return self.get_dimension_system().is_consistent
@property
def derived_units(self) -> dict[Dimension, Quantity]:
return self._derived_units
def get_dimensional_expr(self, expr):
from sympy.physics.units import Quantity
if isinstance(expr, Mul):
return Mul(*[self.get_dimensional_expr(i) for i in expr.args])
elif isinstance(expr, Pow):
return self.get_dimensional_expr(expr.base) ** expr.exp
elif isinstance(expr, Add):
return self.get_dimensional_expr(expr.args[0])
elif isinstance(expr, Derivative):
dim = self.get_dimensional_expr(expr.expr)
for independent, count in expr.variable_count:
dim /= self.get_dimensional_expr(independent)**count
return dim
elif isinstance(expr, Function):
args = [self.get_dimensional_expr(arg) for arg in expr.args]
if all(i == 1 for i in args):
return S.One
return expr.func(*args)
elif isinstance(expr, Quantity):
return self.get_quantity_dimension(expr).name
return S.One
def _collect_factor_and_dimension(self, expr):
"""
Return tuple with scale factor expression and dimension expression.
"""
from sympy.physics.units import Quantity
if isinstance(expr, Quantity):
return expr.scale_factor, expr.dimension
elif isinstance(expr, Mul):
factor = 1
dimension = Dimension(1)
for arg in expr.args:
arg_factor, arg_dim = self._collect_factor_and_dimension(arg)
factor *= arg_factor
dimension *= arg_dim
return factor, dimension
elif isinstance(expr, Pow):
factor, dim = self._collect_factor_and_dimension(expr.base)
exp_factor, exp_dim = self._collect_factor_and_dimension(expr.exp)
if self.get_dimension_system().is_dimensionless(exp_dim):
exp_dim = 1
return factor ** exp_factor, dim ** (exp_factor * exp_dim)
elif isinstance(expr, Add):
factor, dim = self._collect_factor_and_dimension(expr.args[0])
for addend in expr.args[1:]:
addend_factor, addend_dim = \
self._collect_factor_and_dimension(addend)
if not self.get_dimension_system().equivalent_dims(dim, addend_dim):
raise ValueError(
'Dimension of "{}" is {}, '
'but it should be {}'.format(
addend, addend_dim, dim))
factor += addend_factor
return factor, dim
elif isinstance(expr, Derivative):
factor, dim = self._collect_factor_and_dimension(expr.args[0])
for independent, count in expr.variable_count:
ifactor, idim = self._collect_factor_and_dimension(independent)
factor /= ifactor**count
dim /= idim**count
return factor, dim
elif isinstance(expr, Function):
fds = [self._collect_factor_and_dimension(arg) for arg in expr.args]
dims = [Dimension(1) if self.get_dimension_system().is_dimensionless(d[1]) else d[1] for d in fds]
return (expr.func(*(f[0] for f in fds)), *dims)
elif isinstance(expr, Dimension):
return S.One, expr
else:
return expr, Dimension(1)
def get_units_non_prefixed(self) -> set[Quantity]:
"""
Return the units of the system that do not have a prefix.
"""
return set(filter(lambda u: not u.is_prefixed and not u.is_physical_constant, self._units))
| UnitSystem |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_table15.py | {
"start": 315,
"end": 1053
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("table15.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file with tables."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
data = [
["Foo", 1234, 2000, 4321],
["Bar", 1256, 0, 4320],
["Baz", 2234, 3000, 4332],
["Bop", 1324, 1000, 4333],
]
worksheet.set_column("C:F", 10.288)
worksheet.add_table("C2:F6", {"data": data})
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | huggingface__transformers | tests/models/speecht5/test_feature_extraction_speecht5.py | {
"start": 1476,
"end": 4567
} | class ____:
def __init__(
self,
parent,
batch_size=7,
min_seq_length=400,
max_seq_length=2000,
feature_size=1,
padding_value=0.0,
sampling_rate=16000,
do_normalize=True,
num_mel_bins=80,
hop_length=16,
win_length=64,
win_function="hann_window",
fmin=80,
fmax=7600,
mel_floor=1e-10,
return_attention_mask=True,
):
self.parent = parent
self.batch_size = batch_size
self.min_seq_length = min_seq_length
self.max_seq_length = max_seq_length
self.seq_length_diff = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
self.feature_size = feature_size
self.padding_value = padding_value
self.sampling_rate = sampling_rate
self.do_normalize = do_normalize
self.num_mel_bins = num_mel_bins
self.hop_length = hop_length
self.win_length = win_length
self.win_function = win_function
self.fmin = fmin
self.fmax = fmax
self.mel_floor = mel_floor
self.return_attention_mask = return_attention_mask
def prepare_feat_extract_dict(self):
return {
"feature_size": self.feature_size,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"do_normalize": self.do_normalize,
"num_mel_bins": self.num_mel_bins,
"hop_length": self.hop_length,
"win_length": self.win_length,
"win_function": self.win_function,
"fmin": self.fmin,
"fmax": self.fmax,
"mel_floor": self.mel_floor,
"return_attention_mask": self.return_attention_mask,
}
def prepare_inputs_for_common(self, equal_length=False, numpify=False):
def _flatten(list_of_lists):
return list(itertools.chain(*list_of_lists))
if equal_length:
speech_inputs = floats_list((self.batch_size, self.max_seq_length))
else:
# make sure that inputs increase in size
speech_inputs = [
_flatten(floats_list((x, self.feature_size)))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
def prepare_inputs_for_target(self, equal_length=False, numpify=False):
if equal_length:
speech_inputs = [floats_list((self.max_seq_length, self.num_mel_bins)) for _ in range(self.batch_size)]
else:
# make sure that inputs increase in size
speech_inputs = [
floats_list((x, self.num_mel_bins))
for x in range(self.min_seq_length, self.max_seq_length, self.seq_length_diff)
]
if numpify:
speech_inputs = [np.asarray(x) for x in speech_inputs]
return speech_inputs
@require_torch
| SpeechT5FeatureExtractionTester |
python | pikepdf__pikepdf | src/pikepdf/canvas.py | {
"start": 2607,
"end": 3448
} | class ____(Font):
"""Base class for fonts that have dimensional information.
Specifically, these fonts can provide leading and ascent/descent values, and
encode strings to the encoding used by the font.
.. versionadded:: 9.8.1
"""
@property
@abstractmethod
def leading(self) -> Decimal | int:
"""Default leading (line spacing) value for this font; 0 if not applicable."""
@property
@abstractmethod
def ascent(self) -> Decimal | int | None:
"""The max height of the font above the baseline."""
@property
@abstractmethod
def descent(self) -> Decimal | int | None:
"""The max height of the font below the baseline."""
@abstractmethod
def encode(self, text: str) -> bytes:
"""Encode a string in the encoding used by this font."""
| DimensionedFont |
python | django__django | tests/template_tests/filter_tests/test_timesince.py | {
"start": 265,
"end": 6101
} | class ____(TimezoneTestCase):
"""
#20246 - \xa0 in output avoids line-breaks between value and unit
"""
# Default compare with datetime.now()
@setup({"timesince01": "{{ a|timesince }}"})
def test_timesince01(self):
output = self.engine.render_to_string(
"timesince01", {"a": datetime.now() + timedelta(minutes=-1, seconds=-10)}
)
self.assertEqual(output, "1\xa0minute")
@setup({"timesince02": "{{ a|timesince }}"})
def test_timesince02(self):
output = self.engine.render_to_string(
"timesince02", {"a": datetime.now() - timedelta(days=1, minutes=1)}
)
self.assertEqual(output, "1\xa0day")
@setup({"timesince03": "{{ a|timesince }}"})
def test_timesince03(self):
output = self.engine.render_to_string(
"timesince03",
{"a": datetime.now() - timedelta(hours=1, minutes=25, seconds=10)},
)
self.assertEqual(output, "1\xa0hour, 25\xa0minutes")
# Compare to a given parameter
@setup({"timesince04": "{{ a|timesince:b }}"})
def test_timesince04(self):
output = self.engine.render_to_string(
"timesince04",
{"a": self.now - timedelta(days=2), "b": self.now - timedelta(days=1)},
)
self.assertEqual(output, "1\xa0day")
@setup({"timesince05": "{{ a|timesince:b }}"})
def test_timesince05(self):
output = self.engine.render_to_string(
"timesince05",
{
"a": self.now - timedelta(days=2, minutes=1),
"b": self.now - timedelta(days=2),
},
)
self.assertEqual(output, "1\xa0minute")
# Timezone is respected
@setup({"timesince06": "{{ a|timesince:b }}"})
def test_timesince06(self):
output = self.engine.render_to_string(
"timesince06", {"a": self.now_tz - timedelta(hours=8), "b": self.now_tz}
)
self.assertEqual(output, "8\xa0hours")
# Tests for #7443
@setup({"timesince07": "{{ earlier|timesince }}"})
def test_timesince07(self):
output = self.engine.render_to_string(
"timesince07", {"earlier": self.now - timedelta(days=7)}
)
self.assertEqual(output, "1\xa0week")
@setup({"timesince08": "{{ earlier|timesince:now }}"})
def test_timesince08(self):
output = self.engine.render_to_string(
"timesince08", {"now": self.now, "earlier": self.now - timedelta(days=7)}
)
self.assertEqual(output, "1\xa0week")
@setup({"timesince09": "{{ later|timesince }}"})
def test_timesince09(self):
output = self.engine.render_to_string(
"timesince09", {"later": self.now + timedelta(days=7)}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timesince10": "{{ later|timesince:now }}"})
def test_timesince10(self):
output = self.engine.render_to_string(
"timesince10", {"now": self.now, "later": self.now + timedelta(days=7)}
)
self.assertEqual(output, "0\xa0minutes")
# Differing timezones are calculated correctly.
@setup({"timesince11": "{{ a|timesince }}"})
def test_timesince11(self):
output = self.engine.render_to_string("timesince11", {"a": self.now})
self.assertEqual(output, "0\xa0minutes")
@requires_tz_support
@setup({"timesince12": "{{ a|timesince }}"})
def test_timesince12(self):
output = self.engine.render_to_string("timesince12", {"a": self.now_tz})
self.assertEqual(output, "0\xa0minutes")
@requires_tz_support
@setup({"timesince13": "{{ a|timesince }}"})
def test_timesince13(self):
output = self.engine.render_to_string("timesince13", {"a": self.now_tz_i})
self.assertEqual(output, "0\xa0minutes")
@setup({"timesince14": "{{ a|timesince:b }}"})
def test_timesince14(self):
output = self.engine.render_to_string(
"timesince14", {"a": self.now_tz, "b": self.now_tz_i}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timesince15": "{{ a|timesince:b }}"})
def test_timesince15(self):
output = self.engine.render_to_string(
"timesince15", {"a": self.now, "b": self.now_tz_i}
)
self.assertEqual(output, "")
@setup({"timesince16": "{{ a|timesince:b }}"})
def test_timesince16(self):
output = self.engine.render_to_string(
"timesince16", {"a": self.now_tz_i, "b": self.now}
)
self.assertEqual(output, "")
# Tests for #9065 (two date objects).
@setup({"timesince17": "{{ a|timesince:b }}"})
def test_timesince17(self):
output = self.engine.render_to_string(
"timesince17", {"a": self.today, "b": self.today}
)
self.assertEqual(output, "0\xa0minutes")
@setup({"timesince18": "{{ a|timesince:b }}"})
def test_timesince18(self):
output = self.engine.render_to_string(
"timesince18", {"a": self.today, "b": self.today + timedelta(hours=24)}
)
self.assertEqual(output, "1\xa0day")
# Tests for #33879 (wrong results for 11 months + several weeks).
@setup({"timesince19": "{{ earlier|timesince }}"})
def test_timesince19(self):
output = self.engine.render_to_string(
"timesince19", {"earlier": self.today - timedelta(days=358)}
)
self.assertEqual(output, "11\xa0months, 3\xa0weeks")
@setup({"timesince20": "{{ a|timesince:b }}"})
def test_timesince20(self):
now = datetime(2018, 5, 9)
output = self.engine.render_to_string(
"timesince20",
{"a": now, "b": now + timedelta(days=365) + timedelta(days=364)},
)
self.assertEqual(output, "1\xa0year, 11\xa0months")
| TimesinceTests |
python | mlflow__mlflow | mlflow/store/_unity_catalog/registry/prompt_info.py | {
"start": 196,
"end": 2198
} | class ____:
"""
Internal entity for prompt information from Unity Catalog. This represents
prompt metadata without version-specific details like template.
This maps to the Unity Catalog PromptInfo protobuf message.
Note: This is an internal implementation detail and not part of the public API.
"""
def __init__(
self,
name: str,
description: str | None = None,
creation_timestamp: int | None = None,
tags: dict[str, str] | None = None,
):
"""
Construct a PromptInfo entity.
Args:
name: Name of the prompt.
description: Description of the prompt.
creation_timestamp: Timestamp when the prompt was created.
tags: Prompt-level metadata as key-value pairs.
"""
self._name = name
self._description = description
self._creation_timestamp = creation_timestamp
self._tags = tags or {}
@property
def name(self) -> str:
"""The name of the prompt."""
return self._name
@property
def description(self) -> str | None:
"""The description of the prompt."""
return self._description
@property
def creation_timestamp(self) -> int | None:
"""The creation timestamp of the prompt."""
return self._creation_timestamp
@property
def tags(self) -> dict[str, str]:
"""Prompt-level metadata as key-value pairs."""
return self._tags.copy()
def __eq__(self, other) -> bool:
if not isinstance(other, PromptInfo):
return False
return (
self.name == other.name
and self.description == other.description
and self.creation_timestamp == other.creation_timestamp
and self.tags == other.tags
)
def __repr__(self) -> str:
return (
f"<PromptInfo: name='{self.name}', description='{self.description}', tags={self.tags}>"
)
| PromptInfo |
python | sqlalchemy__sqlalchemy | examples/sharding/separate_databases.py | {
"start": 1224,
"end": 2415
} | class ____(DeclarativeBase):
pass
# we need a way to create identifiers which are unique across all databases.
# one easy way would be to just use a composite primary key, where one value
# is the shard id. but here, we'll show something more "generic", an id
# generation function. we'll use a simplistic "id table" stored in database
# #1. Any other method will do just as well; UUID, hilo, application-specific,
# etc.
ids = Table("ids", Base.metadata, Column("nextid", Integer, nullable=False))
def id_generator(ctx):
# in reality, might want to use a separate transaction for this.
with db1.begin() as conn:
nextid = conn.scalar(ids.select().with_for_update())
conn.execute(ids.update().values({ids.c.nextid: ids.c.nextid + 1}))
return nextid
# table setup. we'll store a lead table of continents/cities, and a secondary
# table storing locations. a particular row will be placed in the database
# whose shard id corresponds to the 'continent'. in this setup, secondary rows
# in 'weather_reports' will be placed in the same DB as that of the parent, but
# this can be changed if you're willing to write more complex sharding
# functions.
| Base |
python | pypa__setuptools | setuptools/_vendor/platformdirs/android.py | {
"start": 190,
"end": 9016
} | class ____(PlatformDirsABC):
"""
Follows the guidance `from here <https://android.stackexchange.com/a/216132>`_.
Makes use of the `appname <platformdirs.api.PlatformDirsABC.appname>`, `version
<platformdirs.api.PlatformDirsABC.version>`, `ensure_exists <platformdirs.api.PlatformDirsABC.ensure_exists>`.
"""
@property
def user_data_dir(self) -> str:
""":return: data directory tied to the user, e.g. ``/data/user/<userid>/<packagename>/files/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "files")
@property
def site_data_dir(self) -> str:
""":return: data directory shared by users, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_config_dir(self) -> str:
"""
:return: config directory tied to the user, e.g. \
``/data/user/<userid>/<packagename>/shared_prefs/<AppName>``
"""
return self._append_app_name_and_version(cast(str, _android_folder()), "shared_prefs")
@property
def site_config_dir(self) -> str:
""":return: config directory shared by the users, same as `user_config_dir`"""
return self.user_config_dir
@property
def user_cache_dir(self) -> str:
""":return: cache directory tied to the user, e.g.,``/data/user/<userid>/<packagename>/cache/<AppName>``"""
return self._append_app_name_and_version(cast(str, _android_folder()), "cache")
@property
def site_cache_dir(self) -> str:
""":return: cache directory shared by users, same as `user_cache_dir`"""
return self.user_cache_dir
@property
def user_state_dir(self) -> str:
""":return: state directory tied to the user, same as `user_data_dir`"""
return self.user_data_dir
@property
def user_log_dir(self) -> str:
"""
:return: log directory tied to the user, same as `user_cache_dir` if not opinionated else ``log`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/log``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "log") # noqa: PTH118
return path
@property
def user_documents_dir(self) -> str:
""":return: documents directory tied to the user e.g. ``/storage/emulated/0/Documents``"""
return _android_documents_folder()
@property
def user_downloads_dir(self) -> str:
""":return: downloads directory tied to the user e.g. ``/storage/emulated/0/Downloads``"""
return _android_downloads_folder()
@property
def user_pictures_dir(self) -> str:
""":return: pictures directory tied to the user e.g. ``/storage/emulated/0/Pictures``"""
return _android_pictures_folder()
@property
def user_videos_dir(self) -> str:
""":return: videos directory tied to the user e.g. ``/storage/emulated/0/DCIM/Camera``"""
return _android_videos_folder()
@property
def user_music_dir(self) -> str:
""":return: music directory tied to the user e.g. ``/storage/emulated/0/Music``"""
return _android_music_folder()
@property
def user_desktop_dir(self) -> str:
""":return: desktop directory tied to the user e.g. ``/storage/emulated/0/Desktop``"""
return "/storage/emulated/0/Desktop"
@property
def user_runtime_dir(self) -> str:
"""
:return: runtime directory tied to the user, same as `user_cache_dir` if not opinionated else ``tmp`` in it,
e.g. ``/data/user/<userid>/<packagename>/cache/<AppName>/tmp``
"""
path = self.user_cache_dir
if self.opinion:
path = os.path.join(path, "tmp") # noqa: PTH118
return path
@property
def site_runtime_dir(self) -> str:
""":return: runtime directory shared by users, same as `user_runtime_dir`"""
return self.user_runtime_dir
@lru_cache(maxsize=1)
def _android_folder() -> str | None: # noqa: C901, PLR0912
""":return: base folder for the Android OS or None if it cannot be found"""
result: str | None = None
# type checker isn't happy with our "import android", just don't do this when type checking see
# https://stackoverflow.com/a/61394121
if not TYPE_CHECKING:
try:
# First try to get a path to android app using python4android (if available)...
from android import mActivity # noqa: PLC0415
context = cast("android.content.Context", mActivity.getApplicationContext()) # noqa: F821
result = context.getFilesDir().getParentFile().getAbsolutePath()
except Exception: # noqa: BLE001
result = None
if result is None:
try:
# ...and fall back to using plain pyjnius, if python4android isn't available or doesn't deliver any useful
# result...
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
result = context.getFilesDir().getParentFile().getAbsolutePath()
except Exception: # noqa: BLE001
result = None
if result is None:
# and if that fails, too, find an android folder looking at path on the sys.path
# warning: only works for apps installed under /data, not adopted storage etc.
pattern = re.compile(r"/data/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
if result is None:
# one last try: find an android folder looking at path on the sys.path taking adopted storage paths into
# account
pattern = re.compile(r"/mnt/expand/[a-fA-F0-9-]{36}/(data|user/\d+)/(.+)/files")
for path in sys.path:
if pattern.match(path):
result = path.split("/files")[0]
break
else:
result = None
return result
@lru_cache(maxsize=1)
def _android_documents_folder() -> str:
""":return: documents folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
documents_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOCUMENTS).getAbsolutePath()
except Exception: # noqa: BLE001
documents_dir = "/storage/emulated/0/Documents"
return documents_dir
@lru_cache(maxsize=1)
def _android_downloads_folder() -> str:
""":return: downloads folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
downloads_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DOWNLOADS).getAbsolutePath()
except Exception: # noqa: BLE001
downloads_dir = "/storage/emulated/0/Downloads"
return downloads_dir
@lru_cache(maxsize=1)
def _android_pictures_folder() -> str:
""":return: pictures folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
pictures_dir: str = context.getExternalFilesDir(environment.DIRECTORY_PICTURES).getAbsolutePath()
except Exception: # noqa: BLE001
pictures_dir = "/storage/emulated/0/Pictures"
return pictures_dir
@lru_cache(maxsize=1)
def _android_videos_folder() -> str:
""":return: videos folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
videos_dir: str = context.getExternalFilesDir(environment.DIRECTORY_DCIM).getAbsolutePath()
except Exception: # noqa: BLE001
videos_dir = "/storage/emulated/0/DCIM/Camera"
return videos_dir
@lru_cache(maxsize=1)
def _android_music_folder() -> str:
""":return: music folder for the Android OS"""
# Get directories with pyjnius
try:
from jnius import autoclass # noqa: PLC0415
context = autoclass("android.content.Context")
environment = autoclass("android.os.Environment")
music_dir: str = context.getExternalFilesDir(environment.DIRECTORY_MUSIC).getAbsolutePath()
except Exception: # noqa: BLE001
music_dir = "/storage/emulated/0/Music"
return music_dir
__all__ = [
"Android",
]
| Android |
python | pytest-dev__pytest | src/_pytest/reports.py | {
"start": 1699,
"end": 9330
} | class ____:
when: str | None
location: tuple[str, int | None, str] | None
longrepr: (
None | ExceptionInfo[BaseException] | tuple[str, int, str] | str | TerminalRepr
)
sections: list[tuple[str, str]]
nodeid: str
outcome: Literal["passed", "failed", "skipped"]
def __init__(self, **kw: Any) -> None:
self.__dict__.update(kw)
if TYPE_CHECKING:
# Can have arbitrary fields given to __init__().
def __getattr__(self, key: str) -> Any: ...
def toterminal(self, out: TerminalWriter) -> None:
if hasattr(self, "node"):
worker_info = getworkerinfoline(self.node)
if worker_info:
out.line(worker_info)
longrepr = self.longrepr
if longrepr is None:
return
if hasattr(longrepr, "toterminal"):
longrepr_terminal = cast(TerminalRepr, longrepr)
longrepr_terminal.toterminal(out)
else:
try:
s = str(longrepr)
except UnicodeEncodeError:
s = "<unprintable longrepr>"
out.line(s)
def get_sections(self, prefix: str) -> Iterator[tuple[str, str]]:
for name, content in self.sections:
if name.startswith(prefix):
yield prefix, content
@property
def longreprtext(self) -> str:
"""Read-only property that returns the full string representation of
``longrepr``.
.. versionadded:: 3.0
"""
file = StringIO()
tw = TerminalWriter(file)
tw.hasmarkup = False
self.toterminal(tw)
exc = file.getvalue()
return exc.strip()
@property
def caplog(self) -> str:
"""Return captured log lines, if log capturing is enabled.
.. versionadded:: 3.5
"""
return "\n".join(
content for (prefix, content) in self.get_sections("Captured log")
)
@property
def capstdout(self) -> str:
"""Return captured text from stdout, if capturing is enabled.
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stdout")
)
@property
def capstderr(self) -> str:
"""Return captured text from stderr, if capturing is enabled.
.. versionadded:: 3.0
"""
return "".join(
content for (prefix, content) in self.get_sections("Captured stderr")
)
@property
def passed(self) -> bool:
"""Whether the outcome is passed."""
return self.outcome == "passed"
@property
def failed(self) -> bool:
"""Whether the outcome is failed."""
return self.outcome == "failed"
@property
def skipped(self) -> bool:
"""Whether the outcome is skipped."""
return self.outcome == "skipped"
@property
def fspath(self) -> str:
"""The path portion of the reported node, as a string."""
return self.nodeid.split("::")[0]
@property
def count_towards_summary(self) -> bool:
"""**Experimental** Whether this report should be counted towards the
totals shown at the end of the test session: "1 passed, 1 failure, etc".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
return True
@property
def head_line(self) -> str | None:
"""**Experimental** The head line shown with longrepr output for this
report, more commonly during traceback representation during
failures::
________ Test.foo ________
In the example above, the head_line is "Test.foo".
.. note::
This function is considered **experimental**, so beware that it is subject to changes
even in patch releases.
"""
if self.location is not None:
_fspath, _lineno, domain = self.location
return domain
return None
def _get_verbose_word_with_markup(
self, config: Config, default_markup: Mapping[str, bool]
) -> tuple[str, Mapping[str, bool]]:
_category, _short, verbose = config.hook.pytest_report_teststatus(
report=self, config=config
)
if isinstance(verbose, str):
return verbose, default_markup
if isinstance(verbose, Sequence) and len(verbose) == 2:
word, markup = verbose
if isinstance(word, str) and isinstance(markup, Mapping):
return word, markup
fail( # pragma: no cover
"pytest_report_teststatus() hook (from a plugin) returned "
f"an invalid verbose value: {verbose!r}.\nExpected either a string "
"or a tuple of (word, markup)."
)
def _to_json(self) -> dict[str, Any]:
"""Return the contents of this report as a dict of builtin entries,
suitable for serialization.
This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
return _report_to_json(self)
@classmethod
def _from_json(cls, reportdict: dict[str, object]) -> Self:
"""Create either a TestReport or CollectReport, depending on the calling class.
It is the callers responsibility to know which class to pass here.
This was originally the serialize_report() function from xdist (ca03269).
Experimental method.
"""
kwargs = _report_kwargs_from_json(reportdict)
return cls(**kwargs)
def _report_unserialization_failure(
type_name: str, report_class: type[BaseReport], reportdict
) -> NoReturn:
url = "https://github.com/pytest-dev/pytest/issues"
stream = StringIO()
pprint("-" * 100, stream=stream)
pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream)
pprint(f"report_name: {report_class}", stream=stream)
pprint(reportdict, stream=stream)
pprint(f"Please report this bug at {url}", stream=stream)
pprint("-" * 100, stream=stream)
raise RuntimeError(stream.getvalue())
def _format_failed_longrepr(
item: Item, call: CallInfo[None], excinfo: ExceptionInfo[BaseException]
):
if call.when == "call":
longrepr = item.repr_failure(excinfo)
else:
# Exception in setup or teardown.
longrepr = item._repr_failure_py(
excinfo, style=item.config.getoption("tbstyle", "auto")
)
return longrepr
def _format_exception_group_all_skipped_longrepr(
item: Item,
excinfo: ExceptionInfo[BaseExceptionGroup[BaseException | BaseExceptionGroup]],
) -> tuple[str, int, str]:
r = excinfo._getreprcrash()
assert r is not None, (
"There should always be a traceback entry for skipping a test."
)
if all(
getattr(skip, "_use_item_location", False) for skip in excinfo.value.exceptions
):
path, line = item.reportinfo()[:2]
assert line is not None
loc = (os.fspath(path), line + 1)
default_msg = "skipped"
else:
loc = (str(r.path), r.lineno)
default_msg = r.message
# Get all unique skip messages.
msgs: list[str] = []
for exception in excinfo.value.exceptions:
m = getattr(exception, "msg", None) or (
exception.args[0] if exception.args else None
)
if m and m not in msgs:
msgs.append(m)
reason = "; ".join(msgs) if msgs else default_msg
longrepr = (*loc, reason)
return longrepr
| BaseReport |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/solver31.py | {
"start": 218,
"end": 438
} | class ____(Generic[T]):
def __init__(self, i: Iterable[T]): ...
def func1(i: Iterable[T]) -> T: ...
reveal_type(func1([0] + [""]), expected_text="str | int")
reveal_type(A([0] + [""]), expected_text="A[str | int]")
| A |
python | wandb__wandb | wandb/sdk/artifacts/_generated/fetch_registries.py | {
"start": 643,
"end": 833
} | class ____(GQLResult):
page_info: PageInfoFragment = Field(alias="pageInfo")
edges: List[FetchRegistriesOrganizationOrgEntityProjectsEdges]
| FetchRegistriesOrganizationOrgEntityProjects |
python | doocs__leetcode | solution/3300-3399/3301.Maximize the Total Height of Unique Towers/Solution.py | {
"start": 0,
"end": 311
} | class ____:
def maximumTotalSum(self, maximumHeight: List[int]) -> int:
maximumHeight.sort()
ans, mx = 0, inf
for x in maximumHeight[::-1]:
x = min(x, mx - 1)
if x <= 0:
return -1
ans += x
mx = x
return ans
| Solution |
python | ApeWorX__ape | src/ape/managers/converters.py | {
"start": 2061,
"end": 2454
} | class ____(ConverterAPI):
"""
Convert list of hex values to single concatenated ``HexBytes`` value.
"""
def is_convertible(self, value: Any) -> bool:
return isinstance(value, Iterable) and all(isinstance(v, bytes) or is_hex(v) for v in value)
def convert(self, value: Any) -> bytes:
return HexBytes(b"".join(HexBytes(v) for v in value))
| HexIterableConverter |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 184967,
"end": 186570
} | class ____(GeneratedAirbyteSource):
@public
def __init__(
self,
name: str,
client_id: str,
client_secret: str,
refresh_token: str,
dc_region: str,
environment: str,
edition: str,
start_datetime: Optional[str] = None,
):
"""Airbyte Source for Zoho Crm.
Documentation can be found at https://docs.airbyte.com/integrations/sources/zoho-crm
Args:
name (str): The name of the destination.
client_id (str): OAuth2.0 Client ID
client_secret (str): OAuth2.0 Client Secret
refresh_token (str): OAuth2.0 Refresh Token
dc_region (str): Please choose the region of your Data Center location. More info by this Link
environment (str): Please choose the environment
start_datetime (Optional[str]): ISO 8601, for instance: `YYYY-MM-DD`, `YYYY-MM-DD HH:MM:SS+HH:MM`
edition (str): Choose your Edition of Zoho CRM to determine API Concurrency Limits
"""
self.client_id = check.str_param(client_id, "client_id")
self.client_secret = check.str_param(client_secret, "client_secret")
self.refresh_token = check.str_param(refresh_token, "refresh_token")
self.dc_region = check.str_param(dc_region, "dc_region")
self.environment = check.str_param(environment, "environment")
self.start_datetime = check.opt_str_param(start_datetime, "start_datetime")
self.edition = check.str_param(edition, "edition")
super().__init__("Zoho Crm", name)
| ZohoCrmSource |
python | run-llama__llama_index | llama-index-packs/llama-index-packs-cohere-citation-chat/llama_index/packs/cohere_citation_chat/citations_context_chat_engine.py | {
"start": 1354,
"end": 6259
} | class ____(StreamingAgentChatResponse):
"""Streaming chat response to user and writing to chat history."""
citations: List[Citation] = field(default_factory=list)
documents: List[Document] = field(default_factory=list)
citations_settings: CitationsSettings = field(
default_factory=lambda: CitationsSettings(
documents_response_field="documents",
documents_request_param="documents",
documents_stream_event_type="search-results",
citations_response_field="citations",
citations_stream_event_type="citation-generation",
)
)
def write_response_to_history(
self, memory: BaseMemory, raise_error: bool = False
) -> None:
if self.chat_stream is None:
raise ValueError(
"chat_stream is None. Cannot write to history without chat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
for chat in self.chat_stream:
# LLM response queue
self.is_function = is_function(chat.message)
self.put_in_queue(chat.delta)
final_text += chat.delta or ""
if chat.raw is not None:
# Citations stream event
if (
chat.raw.get("event_type", "")
== self.citations_settings.citations_stream_event_type
):
self.citations += convert_chat_response_to_citations(
chat, self.citations_settings
)
# Documents stream event
if (
chat.raw.get("event_type", "")
== self.citations_settings.documents_stream_event_type
):
self.documents += convert_chat_response_to_documents(
chat, self.citations_settings
)
if self.is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
if not raise_error:
logger.warning(
f"Encountered exception writing response to history: {e}"
)
else:
raise
self.is_done = True
# This act as is_done events for any consumers waiting
self.is_function_not_none_thread_event.set()
async def awrite_response_to_history(
self,
memory: BaseMemory,
) -> None:
if self.achat_stream is None:
raise ValueError(
"achat_stream is None. Cannot asynchronously write to "
"history without achat_stream."
)
# try/except to prevent hanging on error
try:
final_text = ""
async for chat in self.achat_stream:
# Chat response queue
self.is_function = is_function(chat.message)
self.aput_in_queue(chat.delta)
final_text += chat.delta or ""
if self.is_function is False:
self.is_function_false_event.set()
if chat.raw is not None:
# Citations stream event
if (
chat.raw.get("event_type", "")
== self.citations_settings.citations_stream_event_type
):
self.citations += convert_chat_response_to_citations(
chat, self.citations_settings
)
# Documents stream event
if (
chat.raw.get("event_type", "")
== self.citations_settings.documents_stream_event_type
):
self.documents += convert_chat_response_to_documents(
chat, self.citations_settings
)
self.new_item_event.set()
if self.is_function is not None: # if loop has gone through iteration
# NOTE: this is to handle the special case where we consume some of the
# chat stream, but not all of it (e.g. in react agent)
chat.message.content = final_text.strip() # final message
memory.put(chat.message)
except Exception as e:
logger.warning(f"Encountered exception writing response to history: {e}")
self.is_done = True
| StreamingAgentCitationsChatResponse |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 281181,
"end": 281829
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("DeploymentStatusEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(
sgqlc.types.list_of("DeploymentStatus"), graphql_name="nodes"
)
page_info = sgqlc.types.Field(
sgqlc.types.non_null("PageInfo"), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| DeploymentStatusConnection |
python | openai__openai-python | src/openai/lib/streaming/responses/_events.py | {
"start": 2636,
"end": 2713
} | class ____(RawResponseTextDeltaEvent):
snapshot: str
| ResponseTextDeltaEvent |
python | doocs__leetcode | solution/2400-2499/2447.Number of Subarrays With GCD Equal to K/Solution.py | {
"start": 0,
"end": 254
} | class ____:
def subarrayGCD(self, nums: List[int], k: int) -> int:
ans = 0
for i in range(len(nums)):
g = 0
for x in nums[i:]:
g = gcd(g, x)
ans += g == k
return ans
| Solution |
python | django-haystack__django-haystack | test_haystack/elasticsearch_tests/test_elasticsearch_query.py | {
"start": 324,
"end": 8719
} | class ____(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
self.sq = connections["elasticsearch"].get_query()
def test_build_query_all(self):
self.assertEqual(self.sq.build_query(), "*:*")
def test_build_query_single_word(self):
self.sq.add_filter(SQ(content="hello"))
self.assertEqual(self.sq.build_query(), "(hello)")
def test_build_query_boolean(self):
self.sq.add_filter(SQ(content=True))
self.assertEqual(self.sq.build_query(), "(True)")
def test_regression_slash_search(self):
self.sq.add_filter(SQ(content="hello/"))
self.assertEqual(self.sq.build_query(), "(hello\\/)")
def test_build_query_datetime(self):
self.sq.add_filter(SQ(content=datetime.datetime(2009, 5, 8, 11, 28)))
self.assertEqual(self.sq.build_query(), "(2009-05-08T11:28:00)")
def test_build_query_multiple_words_and(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_filter(SQ(content="world"))
self.assertEqual(self.sq.build_query(), "((hello) AND (world))")
def test_build_query_multiple_words_not(self):
self.sq.add_filter(~SQ(content="hello"))
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(self.sq.build_query(), "(NOT ((hello)) AND NOT ((world)))")
def test_build_query_multiple_words_or(self):
self.sq.add_filter(~SQ(content="hello"))
self.sq.add_filter(SQ(content="hello"), use_or=True)
self.assertEqual(self.sq.build_query(), "(NOT ((hello)) OR (hello))")
def test_build_query_multiple_words_mixed(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(content="hello"), use_or=True)
self.sq.add_filter(~SQ(content="world"))
self.assertEqual(
self.sq.build_query(), "(((why) OR (hello)) AND NOT ((world)))"
)
def test_build_query_phrase(self):
self.sq.add_filter(SQ(content="hello world"))
self.assertEqual(self.sq.build_query(), "(hello AND world)")
self.sq.add_filter(SQ(content__exact="hello world"))
self.assertEqual(
self.sq.build_query(), '((hello AND world) AND ("hello world"))'
)
def test_build_query_boost(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_boost("world", 5)
self.assertEqual(self.sq.build_query(), "(hello) world^5")
def test_build_query_multiple_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__lte=Exact("2009-02-10 01:59:00")))
self.sq.add_filter(SQ(author__gt="daniel"))
self.sq.add_filter(SQ(created__lt=Exact("2009-02-12 12:13:00")))
self.sq.add_filter(SQ(title__gte="B"))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(
self.sq.build_query(),
'((why) AND pub_date:([* TO "2009-02-10 01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12 12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))',
)
def test_build_query_multiple_filter_types_with_datetimes(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__lte=datetime.datetime(2009, 2, 10, 1, 59, 0)))
self.sq.add_filter(SQ(author__gt="daniel"))
self.sq.add_filter(SQ(created__lt=datetime.datetime(2009, 2, 12, 12, 13, 0)))
self.sq.add_filter(SQ(title__gte="B"))
self.sq.add_filter(SQ(id__in=[1, 2, 3]))
self.sq.add_filter(SQ(rating__range=[3, 5]))
self.assertEqual(
self.sq.build_query(),
'((why) AND pub_date:([* TO "2009-02-10T01:59:00"]) AND author:({"daniel" TO *}) AND created:({* TO "2009-02-12T12:13:00"}) AND title:(["B" TO *]) AND id:("1" OR "2" OR "3") AND rating:(["3" TO "5"]))',
)
def test_build_query_in_filter_multiple_words(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=["A Famous Paper", "An Infamous Article"]))
self.assertEqual(
self.sq.build_query(),
'((why) AND title:("A Famous Paper" OR "An Infamous Article"))',
)
def test_build_query_in_filter_datetime(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(pub_date__in=[datetime.datetime(2009, 7, 6, 1, 56, 21)]))
self.assertEqual(
self.sq.build_query(), '((why) AND pub_date:("2009-07-06T01:56:21"))'
)
def test_build_query_in_with_set(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=set(["A Famous Paper", "An Infamous Article"])))
self.assertTrue("((why) AND title:(" in self.sq.build_query())
self.assertTrue('"A Famous Paper"' in self.sq.build_query())
self.assertTrue('"An Infamous Article"' in self.sq.build_query())
def test_build_query_wildcard_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__startswith="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack*))")
def test_build_query_fuzzy_filter_types(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__fuzzy="haystack"))
self.assertEqual(self.sq.build_query(), "((why) AND title:(haystack~))")
def test_build_query_with_contains(self):
self.sq.add_filter(SQ(content="circular"))
self.sq.add_filter(SQ(title__contains="haystack"))
self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack*))")
def test_build_query_with_endswith(self):
self.sq.add_filter(SQ(content="circular"))
self.sq.add_filter(SQ(title__endswith="haystack"))
self.assertEqual(self.sq.build_query(), "((circular) AND title:(*haystack))")
def test_clean(self):
self.assertEqual(self.sq.clean("hello world"), "hello world")
self.assertEqual(self.sq.clean("hello AND world"), "hello and world")
self.assertEqual(
self.sq.clean(
r'hello AND OR NOT TO + - && || ! ( ) { } [ ] ^ " ~ * ? : \ / world'
),
'hello and or not to \\+ \\- \\&& \\|| \\! \\( \\) \\{ \\} \\[ \\] \\^ \\" \\~ \\* \\? \\: \\\\ \\/ world',
)
self.assertEqual(
self.sq.clean("so please NOTe i am in a bAND and bORed"),
"so please NOTe i am in a bAND and bORed",
)
def test_build_query_with_models(self):
self.sq.add_filter(SQ(content="hello"))
self.sq.add_model(MockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
self.sq.add_model(AnotherMockModel)
self.assertEqual(self.sq.build_query(), "(hello)")
def test_set_result_class(self):
# Assert that we're defaulting to ``SearchResult``.
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
# Custom class.
class IttyBittyResult:
pass
self.sq.set_result_class(IttyBittyResult)
self.assertTrue(issubclass(self.sq.result_class, IttyBittyResult))
# Reset to default.
self.sq.set_result_class(None)
self.assertTrue(issubclass(self.sq.result_class, SearchResult))
def test_in_filter_values_list(self):
self.sq.add_filter(SQ(content="why"))
self.sq.add_filter(SQ(title__in=MockModel.objects.values_list("id", flat=True)))
self.assertEqual(self.sq.build_query(), '((why) AND title:("1" OR "2" OR "3"))')
def test_narrow_sq(self):
sqs = SearchQuerySet(using="elasticsearch").narrow(SQ(foo="moof"))
self.assertTrue(isinstance(sqs, SearchQuerySet))
self.assertEqual(len(sqs.query.narrow_queries), 1)
self.assertEqual(sqs.query.narrow_queries.pop(), "foo:(moof)")
def test_query__in(self):
sqs = SearchQuerySet(using="elasticsearch").filter(id__in=[1, 2, 3])
self.assertEqual(sqs.query.build_query(), 'id:("1" OR "2" OR "3")')
def test_query__in_empty_list(self):
"""Confirm that an empty list avoids a Elasticsearch exception"""
sqs = SearchQuerySet(using="elasticsearch").filter(id__in=[])
self.assertEqual(sqs.query.build_query(), "id:(!*:*)")
| ElasticsearchSearchQueryTestCase |
python | getsentry__sentry | src/sentry/analytics/events/codeowners_assignment.py | {
"start": 78,
"end": 258
} | class ____(analytics.Event):
organization_id: int
project_id: int
group_id: int
updated_assignment: bool
analytics.register(CodeOwnersAssignment)
| CodeOwnersAssignment |
python | getsentry__sentry | src/sentry/replays/usecases/query/conditions/selector.py | {
"start": 6887,
"end": 7322
} | class ____(ComputedBase):
"""Rage selector composite condition class."""
@staticmethod
def visit_eq(value: list[QueryType]) -> Condition:
return is_rage_click(ClickSelectorComposite.visit_eq(value))
@staticmethod
def visit_neq(value: list[QueryType]) -> Condition:
return is_rage_click(ClickSelectorComposite.visit_neq(value))
#
# Streaming selector condition classes.
#
| RageClickSelectorComposite |
python | huggingface__transformers | src/transformers/models/moshi/modeling_moshi.py | {
"start": 21468,
"end": 26950
} | class ____(nn.Module):
"""Multi-headed attention from 'Attention Is All You Need' paper"""
def __init__(self, config: MoshiConfig, layer_idx: Optional[int] = None, use_flexible_linear=False, use_rope=True):
super().__init__()
self.config = config
self.layer_idx = layer_idx
if layer_idx is None:
logger.warning_once(
f"Instantiating {self.__class__.__name__} without passing a `layer_idx` is not recommended and will "
"lead to errors during the forward call if caching is used. Please make sure to provide a `layer_idx` "
"when creating this class."
)
self.attention_dropout = config.attention_dropout
self.hidden_size = config.hidden_size
self.num_heads = config.num_attention_heads
self.head_dim = config.head_dim
self.num_key_value_heads = config.num_key_value_heads
self.num_key_value_groups = self.num_heads // self.num_key_value_heads
self.max_position_embeddings = config.max_position_embeddings
self.is_causal = True
self.scaling = 1 / math.sqrt(self.head_dim)
if self.hidden_size % self.num_heads != 0:
raise ValueError(
f"hidden_size must be divisible by num_heads (got `hidden_size`: {self.hidden_size}"
f" and `num_heads`: {self.num_heads})."
)
self.q_proj = MoshiLinear(
self.hidden_size, self.num_heads * self.head_dim, config.num_codebooks, use_flexible_linear
)
self.k_proj = MoshiLinear(
self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear
)
self.v_proj = MoshiLinear(
self.hidden_size, self.num_key_value_heads * self.head_dim, config.num_codebooks, use_flexible_linear
)
self.o_proj = MoshiLinear(
self.num_heads * self.head_dim, self.hidden_size, config.num_codebooks, use_flexible_linear
)
# rotary embeddings are not used in the depth decoder
self.rotary_emb = None
if use_rope:
self.rotary_emb = MoshiRotaryEmbedding(config)
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
output_attentions: bool = False,
use_cache: bool = False,
cache_position: Optional[torch.LongTensor] = None,
) -> tuple[torch.Tensor, Optional[torch.Tensor], Optional[tuple[torch.Tensor]]]:
bsz, q_len, _ = hidden_states.size()
query_states = self.q_proj(hidden_states, cache_position) # Ignore copy
key_states = self.k_proj(hidden_states, cache_position) # Ignore copy
value_states = self.v_proj(hidden_states, cache_position) # Ignore copy
query_states = query_states.view(bsz, q_len, self.num_heads, self.head_dim).transpose(1, 2)
key_states = key_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
value_states = value_states.view(bsz, q_len, self.num_key_value_heads, self.head_dim).transpose(1, 2)
if self.rotary_emb is not None: # Ignore copy
cos, sin = self.rotary_emb(value_states, position_ids) # Ignore copy
query_states, key_states = apply_rotary_pos_emb(query_states, key_states, cos, sin) # Ignore copy
if past_key_values is not None:
# sin and cos are specific to RoPE models; cache_position needed for the static cache
cache_kwargs = (
{"sin": sin, "cos": cos, "cache_position": cache_position}
if self.rotary_emb is not None
else {"cache_position": cache_position}
) # Ignore copy
key_states, value_states = past_key_values.update(key_states, value_states, self.layer_idx, cache_kwargs)
key_states = repeat_kv(key_states, self.num_key_value_groups)
value_states = repeat_kv(value_states, self.num_key_value_groups)
attn_weights = torch.matmul(query_states, key_states.transpose(2, 3)) * self.scaling
if attention_mask is not None: # no matter the length, we just slice it
causal_mask = attention_mask[:, :, :, : key_states.shape[-2]]
attn_weights = attn_weights + causal_mask
# upcast attention to fp32
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query_states.dtype)
attn_weights = nn.functional.dropout(attn_weights, p=self.attention_dropout, training=self.training)
attn_output = torch.matmul(attn_weights, value_states)
if attn_output.size() != (bsz, self.num_heads, q_len, self.head_dim):
raise ValueError(
f"`attn_output` should be of size {(bsz, self.num_heads, q_len, self.head_dim)}, but is"
f" {attn_output.size()}"
)
attn_output = attn_output.transpose(1, 2).contiguous()
attn_output = attn_output.view(bsz, q_len, -1)
attn_output = self.o_proj(attn_output, cache_position) # Ignore copy
if not output_attentions:
attn_weights = None
return attn_output, attn_weights
# NO LONGER EXIST Copied from transformers.models.gemma.modeling_gemma.GemmaFlashAttention2 with Gemma->Moshi
# TODO cyril: modular
| MoshiAttention |
python | tensorflow__tensorflow | tensorflow/python/ops/ragged/dynamic_ragged_shape.py | {
"start": 75115,
"end": 79685
} | class ____(abc.ABC):
"""A broadcaster of a single layer.
Although this class does not literally contain a gather_index, the reference
implementation is defined through a gather_index. Thus, any subclasses should
first define the gather_index property. Other functions can be overridden
for optimization, but it should not change the behavior.
"""
@property
@abc.abstractmethod
def gather_index(self):
"""Returns a 1D tensor.
The size of the 1D tensor is equal to the destination size.
The ith element of the result is the index of the source of the ith element.
"""
pass
@property
def dtype(self):
"""Returns the dtype of the broadcast."""
return self.gather_index.dtype
@abc.abstractmethod
def with_dtype(self, dtype):
"""Returns an identical _LayerBroadcaster with a different dtype."""
pass
def __repr__(self):
return str(self.gather_index)
@classmethod
def from_gather_index(cls, gather_index):
"""Create a broadcaster from a gather_index."""
return _GatherLayerBroadcaster(gather_index)
@classmethod
def first_layer(cls, nrows_source, nrows_target):
"""Create a broadcaster from a gather_index."""
gather_index = _first_layer_gather_index(nrows_source, nrows_target)
return _LayerBroadcaster.from_gather_index(gather_index)
@classmethod
def get_singleton_broadcaster(cls, target_size):
"""Broadcast from 1 element to target_size elements."""
return _LayerBroadcaster.from_gather_index(
array_ops.zeros(target_size, dtype=target_size.dtype))
@abc.abstractmethod
def with_dependencies(self, checks):
"""Add dependencies to a _LayerBroadcaster.
Args:
checks: a list of ops that need to be run before any tensors from the
Broadcaster are used.
Returns:
a copy of this _LayerBroadcaster with dependencies added.
"""
pass
@classmethod
def get_identity_broadcaster(cls, nvals, dtype=None):
"""Create an identity broadcaster.
TODO(martinz): an identity broadcaster can be far more efficient than a
generic broadcaster. Add an optimized implementation.
Args:
nvals: the number of values for the broadcaster.
dtype: the dtype of the broadcaster, or None to use the dtype of nvals.
Returns:
an identity broadcaster from [0....nvals-1] to [0...nvals-1]
"""
return _GatherLayerBroadcaster(math_ops.range(nvals, dtype=dtype))
def broadcast_tensor(self, tensor):
"""Broadcast from a dense tensor.
It is assumed that the first axis of the dense tensor is indexed by the
source shape, and at the end, the first axis of the dense tensor is
indexed by the destination shape.
Args:
tensor: a dense tensor.
Returns:
A dense tensor.
"""
return array_ops.gather(tensor, self.gather_index)
def dest_nrows(self):
"""Return the number of rows in the resulting gather, or None if tiling."""
return math_ops.cast(
array_ops.shape(self.gather_index)[0], dtype=self.dtype)
def broadcast_row_partition(self, rp):
"""Return a new shape where the rows are broadcasted.
*--self--->*
| |
rp result
| |
V V
*--------->*
This is equivalent to:
return RowPartition.from_row_lengths(self.broadcast(rp.row_lengths()))
However, if the shape has uniform row length, then that property is
maintained.
Args:
rp: a row partition.
Returns:
a RowPartition representing a broadcast version of this row partition.
"""
if not rp.is_uniform():
return RowPartition.from_row_lengths(
self.broadcast_tensor(rp.row_lengths()))
else:
return RowPartition.from_uniform_row_length(
rp.uniform_row_length(),
nvals=rp.uniform_row_length() * self.dest_nrows(),
nrows=self.dest_nrows())
def next_layer(self, original_rp, broadcast_rp):
r"""Create the next layer gather_index whether or not a broadcast happens.
*---------self------->*
| |
original_rp broadcast_rp
| |
\|/ \|/
*--next_broadcaster-->*
Args:
original_rp: the original row partition.
broadcast_rp: the target row partition.
Returns:
the gather_index for next_broadcaster.
"""
gather_index = _next_layer_gather_index(self, original_rp, broadcast_rp)
return _LayerBroadcaster.from_gather_index(gather_index)
| _LayerBroadcaster |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/partition_sets.py | {
"start": 6126,
"end": 6295
} | class ____(graphene.Union):
class Meta:
types = (GraphenePartitionTags, GraphenePythonError)
name = "PartitionTagsOrError"
| GraphenePartitionTagsOrError |
python | huggingface__transformers | src/transformers/models/sam3_tracker_video/modular_sam3_tracker_video.py | {
"start": 17844,
"end": 17918
} | class ____(Sam2VideoInferenceCache):
pass
| Sam3TrackerVideoInferenceCache |
python | PrefectHQ__prefect | src/integrations/prefect-databricks/prefect_databricks/models/jobs.py | {
"start": 21708,
"end": 22384
} | class ____(BaseModel):
"""
See source code for the fields' description.
Read-only state of the remote repository at the time the job was run. This field is only included on job runs.
"""
model_config = ConfigDict(extra="allow", frozen=True)
used_commit: Optional[str] = Field(
None,
description=(
"Commit that was used to execute the run. If git_branch was specified, this"
" points to the HEAD of the branch at the time of the run; if git_tag was"
" specified, this points to the commit the tag points to."
),
examples=["4506fdf41e9fa98090570a34df7a5bce163ff15f"],
)
| GitSnapshot |
python | huggingface__transformers | src/transformers/models/autoformer/modeling_autoformer.py | {
"start": 9828,
"end": 12407
} | class ____(nn.Module):
"""
Computes a scaling factor as the weighted average absolute value along the first dimension, and scales the data
accordingly.
"""
def __init__(self, config: AutoformerConfig):
super().__init__()
self.dim = config.scaling_dim if hasattr(config, "scaling_dim") else 1
self.keepdim = config.keepdim if hasattr(config, "keepdim") else True
self.minimum_scale = config.minimum_scale if hasattr(config, "minimum_scale") else 1e-10
self.default_scale = config.default_scale if hasattr(config, "default_scale") else None
def forward(
self, data: torch.Tensor, observed_indicator: torch.Tensor
) -> tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
"""
Parameters:
data (`torch.Tensor` of shape `(batch_size, sequence_length, num_input_channels)`):
input for Batch norm calculation
observed_indicator (`torch.BoolTensor` of shape `(batch_size, sequence_length, num_input_channels)`):
Calculating the scale on the observed indicator.
Returns:
tuple of `torch.Tensor` of shapes
(`(batch_size, sequence_length, num_input_channels)`,`(batch_size, 1, num_input_channels)`,
`(batch_size, 1, num_input_channels)`)
"""
ts_sum = (data * observed_indicator).abs().sum(self.dim, keepdim=True)
num_observed = observed_indicator.sum(self.dim, keepdim=True)
scale = ts_sum / torch.clamp(num_observed, min=1)
# If `default_scale` is provided, we use it, otherwise we use the scale
# of the batch.
if self.default_scale is None:
batch_sum = ts_sum.sum(dim=0)
batch_observations = torch.clamp(num_observed.sum(0), min=1)
default_scale = torch.squeeze(batch_sum / batch_observations)
else:
default_scale = self.default_scale * torch.ones_like(scale)
# apply default scale where there are no observations
scale = torch.where(num_observed > 0, scale, default_scale)
# ensure the scale is at least `self.minimum_scale`
scale = torch.clamp(scale, min=self.minimum_scale)
scaled_data = data / scale
if not self.keepdim:
scale = scale.squeeze(dim=self.dim)
return scaled_data, torch.zeros_like(scale), scale
# Copied from transformers.models.time_series_transformer.modeling_time_series_transformer.TimeSeriesNOPScaler with TimeSeriesTransformer->Autoformer,TimeSeries->Autoformer
| AutoformerMeanScaler |
python | dagster-io__dagster | python_modules/libraries/dagster-pandas/dagster_pandas/constraints.py | {
"start": 4607,
"end": 5288
} | class ____:
"""Base constraint object that all constraints inherit from.
Args:
error_description (Optional[str]): The plain string description that is output in the terminal if the constraint fails.
markdown_description (Optional[str]): A markdown supported description that is shown in the Dagster UI if the constraint fails.
"""
def __init__(self, error_description=None, markdown_description=None):
self.name = self.__class__.__name__
self.markdown_description = check.str_param(markdown_description, "markdown_description")
self.error_description = check.str_param(error_description, "error_description")
@beta
| Constraint |
python | pytorch__pytorch | torch/autograd/function.py | {
"start": 13167,
"end": 13811
} | class ____(type):
"""Function metaclass.
This metaclass sets up the following properties:
_backward_cls: The Function class corresponding to the differentiated
version of this function (which is generated on the fly by this
metaclass).
"""
def __init__(cls, name, bases, attrs):
backward_fn = type(
name + "Backward", (BackwardCFunction,), {"_forward_cls": cls}
)
backward_fn._autograd_function_id = next(AUTOGRAD_FUNCTION_COUNTER) # type: ignore[attr-defined]
cls._backward_cls = backward_fn
super().__init__(name, bases, attrs)
| FunctionMeta |
python | getsentry__sentry | tests/sentry/issues/auto_source_code_config/test_process_event.py | {
"start": 25859,
"end": 27014
} | class ____(LanguageSpecificDeriveCodeMappings):
platform = "csharp"
def test_auto_source_code_config_csharp_trivial(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["sentry/p/kanga.cs"]},
frames=[self.frame("/sentry/p/kanga.cs", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/", "")],
)
def test_auto_source_code_config_different_roots_csharp(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["src/sentry/p/kanga.cs"]},
frames=[self.frame("/sentry/p/kanga.cs", True)],
platform=self.platform,
expected_new_code_mappings=[self.code_mapping("/sentry/", "src/sentry/")],
)
def test_auto_source_code_config_non_in_app_frame(self) -> None:
self._process_and_assert_configuration_changes(
repo_trees={REPO1: ["sentry/src/functions.cs"]},
frames=[self.frame("/sentry/p/vendor/sentry/src/functions.cs", False)],
platform=self.platform,
)
| TestCSharpDeriveCodeMappings |
python | celery__celery | celery/contrib/sphinx.py | {
"start": 1061,
"end": 2265
} | class ____(FunctionDocumenter):
"""Document task definitions."""
objtype = 'task'
member_order = 11
@classmethod
def can_document_member(cls, member, membername, isattr, parent):
return isinstance(member, BaseTask) and getattr(member, '__wrapped__')
def format_args(self):
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped is not None:
sig = signature(wrapped)
if "self" in sig.parameters or "cls" in sig.parameters:
sig = sig.replace(parameters=list(sig.parameters.values())[1:])
return str(sig)
return ''
def document_members(self, all_members=False):
pass
def check_module(self):
# Normally checks if *self.object* is really defined in the module
# given by *self.modname*. But since functions decorated with the @task
# decorator are instances living in the celery.local, we have to check
# the wrapped function instead.
wrapped = getattr(self.object, '__wrapped__', None)
if wrapped and getattr(wrapped, '__module__') == self.modname:
return True
return super().check_module()
| TaskDocumenter |
python | numba__numba | numba/tests/test_lists.py | {
"start": 37584,
"end": 39263
} | class ____(ManagedListTestCase):
def make_jitclass_element(self):
spec = [
('many', types.float64[:]),
('scalar', types.float64),
]
JCItem = jitclass(spec)(Item)
return JCItem
def make_jitclass_container(self):
spec = {
'data': types.List(dtype=types.List(types.float64[::1])),
}
JCContainer = jitclass(spec)(Container)
return JCContainer
def assert_list_element_with_tester(self, tester, expect, got):
for x, y in zip(expect, got):
tester(x, y)
def test_jitclass_instance_elements(self):
JCItem = self.make_jitclass_element()
def pyfunc(xs):
xs[1], xs[0] = xs[0], xs[1]
return xs
def eq(x, y):
self.assertPreciseEqual(x.many, y.many)
self.assertPreciseEqual(x.scalar, y.scalar)
cfunc = jit(nopython=True)(pyfunc)
arg = [JCItem(many=np.random.random(n + 1), scalar=n * 1.2)
for n in range(5)]
expect_arg = list(arg)
got_arg = list(arg)
expect_res = pyfunc(expect_arg)
got_res = cfunc(got_arg)
self.assert_list_element_with_tester(eq, expect_arg, got_arg)
self.assert_list_element_with_tester(eq, expect_res, got_res)
def test_jitclass_containing_list(self):
JCContainer = self.make_jitclass_container()
expect = Container(n=4)
got = JCContainer(n=4)
self.assert_list_element_precise_equal(got.data, expect.data)
expect.more(3)
got.more(3)
self.assert_list_element_precise_equal(got.data, expect.data)
| TestListAndJitClasses |
python | getsentry__sentry | src/sentry/grouping/strategies/base.py | {
"start": 2947,
"end": 7907
} | class ____:
"""
A key-value store used for passing state between strategy functions and other helpers used
during grouping.
Has a dictionary-like interface, along with a context manager which allows values to be
temporarily overwritten:
context = GroupingContext()
context["some_key"] = "original_value"
value_at_some_key = context["some_key"] # will be "original_value"
value_at_some_key = context.get("some_key") # will be "original_value"
value_at_another_key = context["another_key"] # will raise a KeyError
value_at_another_key = context.get("another_key") # will be None
value_at_another_key = context.get("another_key", "some_default") # will be "some_default"
with context:
context["some_key"] = "some_other_value"
value_at_some_key = context["some_key"] # will be "some_other_value"
value_at_some_key = context["some_key"] # will be "original_value"
"""
def __init__(self, strategy_config: StrategyConfiguration, event: Event):
# The initial context is essentially the grouping config options
self._stack = [strategy_config.initial_context]
self.config = strategy_config
self.event = event
self._push_context_layer()
def __setitem__(self, key: str, value: ContextValue) -> None:
# Add the key-value pair to the context layer at the top of the stack
self._stack[-1][key] = value
def __getitem__(self, key: str) -> ContextValue:
# Walk down the stack from the top and return the first instance of `key` found
for d in reversed(self._stack):
if key in d:
return d[key]
raise KeyError(key)
def get(self, key: str, default: ContextValue | None = None) -> ContextValue | None:
try:
return self[key]
except KeyError:
return default
def __enter__(self) -> Self:
self._push_context_layer()
return self
def __exit__(self, exc_type: type[Exception], exc_value: Exception, tb: Any) -> None:
self._pop_context_layer()
def _push_context_layer(self) -> None:
self._stack.append({})
def _pop_context_layer(self) -> None:
self._stack.pop()
def get_grouping_components_by_variant(
self, interface: Interface, *, event: Event, **kwargs: Any
) -> ComponentsByVariant:
"""
Called by a strategy to invoke delegates on its child interfaces.
For example, the chained exception strategy calls this on the exceptions in the chain, and
the exception strategy calls this on each exception's stacktrace.
"""
return self._get_grouping_components_for_interface(interface, event=event, **kwargs)
@overload
def get_single_grouping_component(
self, interface: Frame, *, event: Event, **kwargs: Any
) -> FrameGroupingComponent: ...
@overload
def get_single_grouping_component(
self, interface: SingleException, *, event: Event, **kwargs: Any
) -> ExceptionGroupingComponent: ...
@overload
def get_single_grouping_component(
self, interface: Stacktrace, *, event: Event, **kwargs: Any
) -> StacktraceGroupingComponent: ...
def get_single_grouping_component(
self, interface: Interface, *, event: Event, **kwargs: Any
) -> FrameGroupingComponent | ExceptionGroupingComponent | StacktraceGroupingComponent:
"""
Invoke the delegate grouping strategy corresponding to the given interface, returning the
grouping component for the variant set on the context.
"""
variant_name = self["variant_name"]
assert variant_name is not None
components_by_variant = self._get_grouping_components_for_interface(
interface, event=event, **kwargs
)
assert len(components_by_variant) == 1
return components_by_variant[variant_name]
def _get_grouping_components_for_interface(
self, interface: Interface, *, event: Event, **kwargs: Any
) -> ComponentsByVariant:
"""
Apply a delegate strategy to the given interface to get a dictionary of grouping components
keyed by variant name.
"""
interface_id = interface.path
strategy = self.config.delegates.get(interface_id)
if strategy is None:
raise RuntimeError(f"No delegate strategy found for interface {interface_id}")
kwargs["context"] = self
kwargs["event"] = event
components_by_variant = strategy(interface, **kwargs)
assert isinstance(components_by_variant, dict)
return components_by_variant
def lookup_strategy(strategy_id: str) -> Strategy[Any]:
"""Looks up a strategy by id."""
try:
return STRATEGIES[strategy_id]
except KeyError:
raise LookupError("Unknown strategy %r" % strategy_id)
| GroupingContext |
python | sympy__sympy | sympy/polys/numberfields/galoisgroups.py | {
"start": 1041,
"end": 20671
} | class ____(GaloisGroupException):
...
def tschirnhausen_transformation(T, max_coeff=10, max_tries=30, history=None,
fixed_order=True):
r"""
Given a univariate, monic, irreducible polynomial over the integers, find
another such polynomial defining the same number field.
Explanation
===========
See Alg 6.3.4 of [1].
Parameters
==========
T : Poly
The given polynomial
max_coeff : int
When choosing a transformation as part of the process,
keep the coeffs between plus and minus this.
max_tries : int
Consider at most this many transformations.
history : set, None, optional (default=None)
Pass a set of ``Poly.rep``'s in order to prevent any of these
polynomials from being returned as the polynomial ``U`` i.e. the
transformation of the given polynomial *T*. The given poly *T* will
automatically be added to this set, before we try to find a new one.
fixed_order : bool, default True
If ``True``, work through candidate transformations A(x) in a fixed
order, from small coeffs to large, resulting in deterministic behavior.
If ``False``, the A(x) are chosen randomly, while still working our way
up from small coefficients to larger ones.
Returns
=======
Pair ``(A, U)``
``A`` and ``U`` are ``Poly``, ``A`` is the
transformation, and ``U`` is the transformed polynomial that defines
the same number field as *T*. The polynomial ``A`` maps the roots of
*T* to the roots of ``U``.
Raises
======
MaxTriesException
if could not find a polynomial before exceeding *max_tries*.
"""
X = Dummy('X')
n = T.degree()
if history is None:
history = set()
history.add(T.rep)
if fixed_order:
coeff_generators = {}
deg_coeff_sum = 3
current_degree = 2
def get_coeff_generator(degree):
gen = coeff_generators.get(degree, coeff_search(degree, 1))
coeff_generators[degree] = gen
return gen
for i in range(max_tries):
# We never use linear A(x), since applying a fixed linear transformation
# to all roots will only multiply the discriminant of T by a square
# integer. This will change nothing important. In particular, if disc(T)
# was zero before, it will still be zero now, and typically we apply
# the transformation in hopes of replacing T by a squarefree poly.
if fixed_order:
# If d is degree and c max coeff, we move through the dc-space
# along lines of constant sum. First d + c = 3 with (d, c) = (2, 1).
# Then d + c = 4 with (d, c) = (3, 1), (2, 2). Then d + c = 5 with
# (d, c) = (4, 1), (3, 2), (2, 3), and so forth. For a given (d, c)
# we go though all sets of coeffs where max = c, before moving on.
gen = get_coeff_generator(current_degree)
coeffs = next(gen)
m = max(abs(c) for c in coeffs)
if current_degree + m > deg_coeff_sum:
if current_degree == 2:
deg_coeff_sum += 1
current_degree = deg_coeff_sum - 1
else:
current_degree -= 1
gen = get_coeff_generator(current_degree)
coeffs = next(gen)
a = [ZZ(1)] + [ZZ(c) for c in coeffs]
else:
# We use a progressive coeff bound, up to the max specified, since it
# is preferable to succeed with smaller coeffs.
# Give each coeff bound five tries, before incrementing.
C = min(i//5 + 1, max_coeff)
d = random.randint(2, n - 1)
a = dup_random(d, -C, C, ZZ)
A = Poly(a, T.gen)
U = Poly(T.resultant(X - A), X)
if U.rep not in history and dup_sqf_p(U.rep.to_list(), ZZ):
return A, U
raise MaxTriesException
def has_square_disc(T):
"""Convenience to check if a Poly or dup has square discriminant. """
d = T.discriminant() if isinstance(T, Poly) else dup_discriminant(T, ZZ)
return is_square(d)
def _galois_group_degree_3(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 3.
Explanation
===========
Uses Prop 6.3.5 of [1].
"""
from sympy.combinatorics.galois import S3TransitiveSubgroups
return ((S3TransitiveSubgroups.A3, True) if has_square_disc(T)
else (S3TransitiveSubgroups.S3, False))
def _galois_group_degree_4_root_approx(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 4.
Explanation
===========
Follows Alg 6.3.7 of [1], using a pure root approximation approach.
"""
from sympy.combinatorics.permutations import Permutation
from sympy.combinatorics.galois import S4TransitiveSubgroups
X = symbols('X0 X1 X2 X3')
# We start by considering the resolvent for the form
# F = X0*X2 + X1*X3
# and the group G = S4. In this case, the stabilizer H is D4 = < (0123), (02) >,
# and a set of representatives of G/H is {I, (01), (03)}
F1 = X[0]*X[2] + X[1]*X[3]
s1 = [
Permutation(3),
Permutation(3)(0, 1),
Permutation(3)(0, 3)
]
R1 = Resolvent(F1, X, s1)
# In the second half of the algorithm (if we reach it), we use another
# form and set of coset representatives. However, we may need to permute
# them first, so cannot form their resolvent now.
F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[0]**2
s2_pre = [
Permutation(3),
Permutation(3)(0, 2)
]
history = set()
for i in range(max_tries):
if i > 0:
# If we're retrying, need a new polynomial T.
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
R_dup, _, i0 = R1.eval_for_poly(T, find_integer_root=True)
# If R is not squarefree, must retry.
if not dup_sqf_p(R_dup, ZZ):
continue
# By Prop 6.3.1 of [1], Gal(T) is contained in A4 iff disc(T) is square.
sq_disc = has_square_disc(T)
if i0 is None:
# By Thm 6.3.3 of [1], Gal(T) is not conjugate to any subgroup of the
# stabilizer H = D4 that we chose. This means Gal(T) is either A4 or S4.
return ((S4TransitiveSubgroups.A4, True) if sq_disc
else (S4TransitiveSubgroups.S4, False))
# Gal(T) is conjugate to a subgroup of H = D4, so it is either V, C4
# or D4 itself.
if sq_disc:
# Neither C4 nor D4 is contained in A4, so Gal(T) must be V.
return (S4TransitiveSubgroups.V, True)
# Gal(T) can only be D4 or C4.
# We will now use our second resolvent, with G being that conjugate of D4 that
# Gal(T) is contained in. To determine the right conjugate, we will need
# the permutation corresponding to the integer root we found.
sigma = s1[i0]
# Applying sigma means permuting the args of F, and
# conjugating the set of coset representatives.
F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True)
s2 = [sigma*tau*sigma for tau in s2_pre]
R2 = Resolvent(F2, X, s2)
R_dup, _, _ = R2.eval_for_poly(T)
d = dup_discriminant(R_dup, ZZ)
# If d is zero (R has a repeated root), must retry.
if d == 0:
continue
if is_square(d):
return (S4TransitiveSubgroups.C4, False)
else:
return (S4TransitiveSubgroups.D4, False)
raise MaxTriesException
def _galois_group_degree_4_lookup(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 4.
Explanation
===========
Based on Alg 6.3.6 of [1], but uses resolvent coeff lookup.
"""
from sympy.combinatorics.galois import S4TransitiveSubgroups
history = set()
for i in range(max_tries):
R_dup = get_resolvent_by_lookup(T, 0)
if dup_sqf_p(R_dup, ZZ):
break
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
else:
raise MaxTriesException
# Compute list L of degrees of irreducible factors of R, in increasing order:
fl = dup_factor_list(R_dup, ZZ)
L = sorted(sum([
[len(r) - 1] * e for r, e in fl[1]
], []))
if L == [6]:
return ((S4TransitiveSubgroups.A4, True) if has_square_disc(T)
else (S4TransitiveSubgroups.S4, False))
if L == [1, 1, 4]:
return (S4TransitiveSubgroups.C4, False)
if L == [2, 2, 2]:
return (S4TransitiveSubgroups.V, True)
assert L == [2, 4]
return (S4TransitiveSubgroups.D4, False)
def _galois_group_degree_5_hybrid(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 5.
Explanation
===========
Based on Alg 6.3.9 of [1], but uses a hybrid approach, combining resolvent
coeff lookup, with root approximation.
"""
from sympy.combinatorics.galois import S5TransitiveSubgroups
from sympy.combinatorics.permutations import Permutation
X5 = symbols("X0,X1,X2,X3,X4")
res = define_resolvents()
F51, _, s51 = res[(5, 1)]
F51 = F51.as_expr(*X5)
R51 = Resolvent(F51, X5, s51)
history = set()
reached_second_stage = False
for i in range(max_tries):
if i > 0:
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
R51_dup = get_resolvent_by_lookup(T, 1)
if not dup_sqf_p(R51_dup, ZZ):
continue
# First stage
# If we have not yet reached the second stage, then the group still
# might be S5, A5, or M20, so must test for that.
if not reached_second_stage:
sq_disc = has_square_disc(T)
if dup_irreducible_p(R51_dup, ZZ):
return ((S5TransitiveSubgroups.A5, True) if sq_disc
else (S5TransitiveSubgroups.S5, False))
if not sq_disc:
return (S5TransitiveSubgroups.M20, False)
# Second stage
reached_second_stage = True
# R51 must have an integer root for T.
# To choose our second resolvent, we need to know which conjugate of
# F51 is a root.
rounded_roots = R51.round_roots_to_integers_for_poly(T)
# These are integers, and candidates to be roots of R51.
# We find the first one that actually is a root.
for permutation_index, candidate_root in rounded_roots.items():
if not dup_eval(R51_dup, candidate_root, ZZ):
break
X = X5
F2_pre = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[4]**2 + X[4]*X[0]**2
s2_pre = [
Permutation(4),
Permutation(4)(0, 1)(2, 4)
]
i0 = permutation_index
sigma = s51[i0]
F2 = F2_pre.subs(zip(X, sigma(X)), simultaneous=True)
s2 = [sigma*tau*sigma for tau in s2_pre]
R2 = Resolvent(F2, X, s2)
R_dup, _, _ = R2.eval_for_poly(T)
d = dup_discriminant(R_dup, ZZ)
if d == 0:
continue
if is_square(d):
return (S5TransitiveSubgroups.C5, True)
else:
return (S5TransitiveSubgroups.D5, True)
raise MaxTriesException
def _galois_group_degree_5_lookup_ext_factor(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 5.
Explanation
===========
Based on Alg 6.3.9 of [1], but uses resolvent coeff lookup, plus
factorization over an algebraic extension.
"""
from sympy.combinatorics.galois import S5TransitiveSubgroups
_T = T
history = set()
for i in range(max_tries):
R_dup = get_resolvent_by_lookup(T, 1)
if dup_sqf_p(R_dup, ZZ):
break
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
else:
raise MaxTriesException
sq_disc = has_square_disc(T)
if dup_irreducible_p(R_dup, ZZ):
return ((S5TransitiveSubgroups.A5, True) if sq_disc
else (S5TransitiveSubgroups.S5, False))
if not sq_disc:
return (S5TransitiveSubgroups.M20, False)
# If we get this far, Gal(T) can only be D5 or C5.
# But for Gal(T) to have order 5, T must already split completely in
# the extension field obtained by adjoining a single one of its roots.
fl = Poly(_T, domain=ZZ.alg_field_from_poly(_T)).factor_list()[1]
if len(fl) == 5:
return (S5TransitiveSubgroups.C5, True)
else:
return (S5TransitiveSubgroups.D5, True)
def _galois_group_degree_6_lookup(T, max_tries=30, randomize=False):
r"""
Compute the Galois group of a polynomial of degree 6.
Explanation
===========
Based on Alg 6.3.10 of [1], but uses resolvent coeff lookup.
"""
from sympy.combinatorics.galois import S6TransitiveSubgroups
# First resolvent:
history = set()
for i in range(max_tries):
R_dup = get_resolvent_by_lookup(T, 1)
if dup_sqf_p(R_dup, ZZ):
break
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
else:
raise MaxTriesException
fl = dup_factor_list(R_dup, ZZ)
# Group the factors by degree.
factors_by_deg = defaultdict(list)
for r, _ in fl[1]:
factors_by_deg[len(r) - 1].append(r)
L = sorted(sum([
[d] * len(ff) for d, ff in factors_by_deg.items()
], []))
T_has_sq_disc = has_square_disc(T)
if L == [1, 2, 3]:
f1 = factors_by_deg[3][0]
return ((S6TransitiveSubgroups.C6, False) if has_square_disc(f1)
else (S6TransitiveSubgroups.D6, False))
elif L == [3, 3]:
f1, f2 = factors_by_deg[3]
any_square = has_square_disc(f1) or has_square_disc(f2)
return ((S6TransitiveSubgroups.G18, False) if any_square
else (S6TransitiveSubgroups.G36m, False))
elif L == [2, 4]:
if T_has_sq_disc:
return (S6TransitiveSubgroups.S4p, True)
else:
f1 = factors_by_deg[4][0]
return ((S6TransitiveSubgroups.A4xC2, False) if has_square_disc(f1)
else (S6TransitiveSubgroups.S4xC2, False))
elif L == [1, 1, 4]:
return ((S6TransitiveSubgroups.A4, True) if T_has_sq_disc
else (S6TransitiveSubgroups.S4m, False))
elif L == [1, 5]:
return ((S6TransitiveSubgroups.PSL2F5, True) if T_has_sq_disc
else (S6TransitiveSubgroups.PGL2F5, False))
elif L == [1, 1, 1, 3]:
return (S6TransitiveSubgroups.S3, False)
assert L == [6]
# Second resolvent:
history = set()
for i in range(max_tries):
R_dup = get_resolvent_by_lookup(T, 2)
if dup_sqf_p(R_dup, ZZ):
break
_, T = tschirnhausen_transformation(T, max_tries=max_tries,
history=history,
fixed_order=not randomize)
else:
raise MaxTriesException
T_has_sq_disc = has_square_disc(T)
if dup_irreducible_p(R_dup, ZZ):
return ((S6TransitiveSubgroups.A6, True) if T_has_sq_disc
else (S6TransitiveSubgroups.S6, False))
else:
return ((S6TransitiveSubgroups.G36p, True) if T_has_sq_disc
else (S6TransitiveSubgroups.G72, False))
@public
def galois_group(f, *gens, by_name=False, max_tries=30, randomize=False, **args):
r"""
Compute the Galois group for polynomials *f* up to degree 6.
Examples
========
>>> from sympy import galois_group
>>> from sympy.abc import x
>>> f = x**4 + 1
>>> G, alt = galois_group(f)
>>> print(G)
PermutationGroup([
(0 1)(2 3),
(0 2)(1 3)])
The group is returned along with a boolean, indicating whether it is
contained in the alternating group $A_n$, where $n$ is the degree of *T*.
Along with other group properties, this can help determine which group it
is:
>>> alt
True
>>> G.order()
4
Alternatively, the group can be returned by name:
>>> G_name, _ = galois_group(f, by_name=True)
>>> print(G_name)
S4TransitiveSubgroups.V
The group itself can then be obtained by calling the name's
``get_perm_group()`` method:
>>> G_name.get_perm_group()
PermutationGroup([
(0 1)(2 3),
(0 2)(1 3)])
Group names are values of the enum classes
:py:class:`sympy.combinatorics.galois.S1TransitiveSubgroups`,
:py:class:`sympy.combinatorics.galois.S2TransitiveSubgroups`,
etc.
Parameters
==========
f : Expr
Irreducible polynomial over :ref:`ZZ` or :ref:`QQ`, whose Galois group
is to be determined.
gens : optional list of symbols
For converting *f* to Poly, and will be passed on to the
:py:func:`~.poly_from_expr` function.
by_name : bool, default False
If ``True``, the Galois group will be returned by name.
Otherwise it will be returned as a :py:class:`~.PermutationGroup`.
max_tries : int, default 30
Make at most this many attempts in those steps that involve
generating Tschirnhausen transformations.
randomize : bool, default False
If ``True``, then use random coefficients when generating Tschirnhausen
transformations. Otherwise try transformations in a fixed order. Both
approaches start with small coefficients and degrees and work upward.
args : optional
For converting *f* to Poly, and will be passed on to the
:py:func:`~.poly_from_expr` function.
Returns
=======
Pair ``(G, alt)``
The first element ``G`` indicates the Galois group. It is an instance
of one of the :py:class:`sympy.combinatorics.galois.S1TransitiveSubgroups`
:py:class:`sympy.combinatorics.galois.S2TransitiveSubgroups`, etc. enum
classes if *by_name* was ``True``, and a :py:class:`~.PermutationGroup`
if ``False``.
The second element is a boolean, saying whether the group is contained
in the alternating group $A_n$ ($n$ the degree of *T*).
Raises
======
ValueError
if *f* is of an unsupported degree.
MaxTriesException
if could not complete before exceeding *max_tries* in those steps
that involve generating Tschirnhausen transformations.
See Also
========
.Poly.galois_group
"""
gens = gens or []
args = args or {}
try:
F, opt = poly_from_expr(f, *gens, **args)
except PolificationFailed as exc:
raise ComputationFailed('galois_group', 1, exc)
return F.galois_group(by_name=by_name, max_tries=max_tries,
randomize=randomize)
| MaxTriesException |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/logs/events.py | {
"start": 4677,
"end": 4866
} | class ____(graphene.Interface):
stepKey = graphene.Field(graphene.String)
solidHandleID = graphene.Field(graphene.String)
class Meta:
name = "StepEvent"
| GrapheneStepEvent |
python | doocs__leetcode | solution/2400-2499/2485.Find the Pivot Integer/Solution2.py | {
"start": 0,
"end": 152
} | class ____:
def pivotInteger(self, n: int) -> int:
y = n * (n + 1) // 2
x = int(sqrt(y))
return x if x * x == y else -1
| Solution |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/metaclass7.py | {
"start": 1195,
"end": 1387
} | class ____(metaclass=MetaClass4):
def __new__(cls, *args, **kwargs) -> Self:
return super().__new__(cls, *args, **kwargs)
v4 = Class4()
reveal_type(v4, expected_text="Class4")
| Class4 |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_finetuning_callback.py | {
"start": 15127,
"end": 16659
} | class ____(BoringModel):
def __init__(self):
super().__init__()
self.layer = nn.Linear(32, 2)
self.backbone = nn.Linear(32, 32)
def forward(self, x):
return self.layer(self.backbone(x))
def test_callbacks_restore_backbone(tmp_path):
"""Test callbacks restore is called after optimizers have been re-created but before optimizer states reload."""
ckpt = ModelCheckpoint(dirpath=tmp_path, save_last=True)
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=2,
enable_progress_bar=False,
callbacks=[ckpt, BackboneFinetuning(unfreeze_backbone_at_epoch=1)],
)
trainer.fit(BackboneBoringModel())
# initialize a trainer that continues the previous training
trainer = Trainer(
default_root_dir=tmp_path,
limit_train_batches=1,
limit_val_batches=1,
max_epochs=3,
enable_progress_bar=False,
callbacks=BackboneFinetuning(unfreeze_backbone_at_epoch=1),
)
trainer.fit(BackboneBoringModel(), ckpt_path=ckpt.last_model_path)
@RunIf(deepspeed=True)
def test_unsupported_strategies(tmp_path):
model = BackboneBoringModel()
callback = BackboneFinetuning()
trainer = Trainer(accelerator="cpu", strategy="deepspeed", callbacks=[callback])
with pytest.raises(NotImplementedError, match="does not support running with the DeepSpeed strategy"):
callback.setup(trainer, model, stage=None)
| BackboneBoringModel |
python | walkccc__LeetCode | solutions/2462. Total Cost to Hire K Workers/2462.py | {
"start": 0,
"end": 766
} | class ____:
def totalCost(self, costs: list[int], k: int, candidates: int) -> int:
ans = 0
i = 0
j = len(costs) - 1
minHeapL = [] # First half
minHeapR = [] # Second half
for _ in range(k):
while len(minHeapL) < candidates and i <= j:
heapq.heappush(minHeapL, costs[i])
i += 1
while len(minHeapR) < candidates and i <= j:
heapq.heappush(minHeapR, costs[j])
j -= 1
if not minHeapL:
ans += heapq.heappop(minHeapR)
elif not minHeapR:
ans += heapq.heappop(minHeapL)
# Both `minHeapL` and `minHeapR` are not empty.
elif minHeapL[0] <= minHeapR[0]:
ans += heapq.heappop(minHeapL)
else:
ans += heapq.heappop(minHeapR)
return ans
| Solution |
python | ray-project__ray | rllib/algorithms/impala/utils.py | {
"start": 65,
"end": 3216
} | class ____:
def __init__(self):
self.L = 0.0
self.H = 0.4
self._recompute_candidates()
# Defaultdict mapping.
self.results = defaultdict(lambda: deque(maxlen=3))
self.iteration = 0
def _recompute_candidates(self):
self.center = (self.L + self.H) / 2
self.low = (self.L + self.center) / 2
self.high = (self.H + self.center) / 2
# Expand a little if range becomes too narrow to avoid
# overoptimization.
if self.H - self.L < 0.00001:
self.L = max(self.center - 0.1, 0.0)
self.H = min(self.center + 0.1, 1.0)
self._recompute_candidates()
# Reduce results, just in case it has grown too much.
c, l, h = (
self.results[self.center],
self.results[self.low],
self.results[self.high],
)
self.results = defaultdict(lambda: deque(maxlen=3))
self.results[self.center] = c
self.results[self.low] = l
self.results[self.high] = h
@property
def current(self):
if len(self.results[self.center]) < 3:
return self.center
elif len(self.results[self.low]) < 3:
return self.low
else:
return self.high
def log_result(self, performance):
self.iteration += 1
# Skip first 2 iterations for ignoring warm-up effect.
if self.iteration < 2:
return
self.results[self.current].append(performance)
# If all candidates have at least 3 results logged, re-evaluate
# and compute new L and H.
center, low, high = self.center, self.low, self.high
if (
len(self.results[center]) == 3
and len(self.results[low]) == 3
and len(self.results[high]) == 3
):
perf_center = np.mean(self.results[center])
perf_low = np.mean(self.results[low])
perf_high = np.mean(self.results[high])
# Case: `center` is best.
if perf_center > perf_low and perf_center > perf_high:
self.L = low
self.H = high
# Erase low/high results: We'll not use these again.
self.results.pop(low, None)
self.results.pop(high, None)
# Case: `low` is best.
elif perf_low > perf_center and perf_low > perf_high:
self.H = center
# Erase center/high results: We'll not use these again.
self.results.pop(center, None)
self.results.pop(high, None)
# Case: `high` is best.
else:
self.L = center
# Erase center/low results: We'll not use these again.
self.results.pop(center, None)
self.results.pop(low, None)
self._recompute_candidates()
if __name__ == "__main__":
controller = _SleepTimeController()
for _ in range(1000):
performance = np.random.random()
controller.log_result(performance)
| _SleepTimeController |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 858337,
"end": 859731
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"column",
"content",
"created_at",
"creator",
"database_id",
"is_archived",
"note",
"project",
"resource_path",
"state",
"updated_at",
"url",
)
column = sgqlc.types.Field("ProjectColumn", graphql_name="column")
content = sgqlc.types.Field("ProjectCardItem", graphql_name="content")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field(Actor, graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
is_archived = sgqlc.types.Field(
sgqlc.types.non_null(Boolean), graphql_name="isArchived"
)
note = sgqlc.types.Field(String, graphql_name="note")
project = sgqlc.types.Field(sgqlc.types.non_null(Project), graphql_name="project")
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
state = sgqlc.types.Field(ProjectCardState, graphql_name="state")
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
| ProjectCard |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 35275,
"end": 36100
} | class ____(Source):
ind: int
def name(self) -> str:
return f"___get_torch_function_mode_stack_at({self._get_index()})"
def _get_index(self) -> int:
from .variables.torch_function import TorchFunctionModeStackVariable
return TorchFunctionModeStackVariable.get_mode_index(self.ind)
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from(
utils.__name__, "get_torch_function_mode_stack_at"
)
)
codegen.extend_output([codegen.create_load_const(self._get_index())])
codegen.extend_output(create_call_function(1, False))
def guard_source(self) -> GuardSource:
return GuardSource.GLOBAL
@dataclasses.dataclass(frozen=True)
| TorchFunctionModeStackSource |
python | dask__dask | dask/dataframe/dask_expr/_reductions.py | {
"start": 29327,
"end": 29373
} | class ____(Max):
reduction_chunk = M.min
| Min |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-shopify/source_shopify/streams/streams.py | {
"start": 9044,
"end": 9127
} | class ____(MetafieldShopifySubstream):
parent_stream_class = Pages
| MetafieldPages |
python | mlflow__mlflow | dev/clint/src/clint/rules/missing_docstring_param.py | {
"start": 36,
"end": 247
} | class ____(Rule):
def __init__(self, params: set[str]) -> None:
self.params = params
def _message(self) -> str:
return f"Missing parameters in docstring: {self.params}"
| MissingDocstringParam |
python | pyinstaller__pyinstaller | PyInstaller/lib/modulegraph/modulegraph.py | {
"start": 26740,
"end": 28354
} | class ____(BaseModule):
def __init__(self, *args, **kwds):
warnings.warn(
"This class will be removed in a future version of modulegraph",
DeprecationWarning)
super(FlatPackage, *args, **kwds)
# HTML templates for ModuleGraph generator
header = """\
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>%(TITLE)s</title>
<style>
.node { padding: 0.5em 0 0.5em; border-top: thin grey dotted; }
.moduletype { font: smaller italic }
.node a { text-decoration: none; color: #006699; }
.node a:visited { text-decoration: none; color: #2f0099; }
</style>
</head>
<body>
<h1>%(TITLE)s</h1>"""
entry = """
<div class="node">
<a name="%(NAME)s"></a>
%(CONTENT)s
</div>"""
contpl = """<tt>%(NAME)s</tt> <span class="moduletype">%(TYPE)s</span>"""
contpl_linked = """\
<a target="code" href="%(URL)s" type="text/plain"><tt>%(NAME)s</tt></a>
<span class="moduletype">%(TYPE)s</span>"""
imports = """\
<div class="import">
%(HEAD)s:
%(LINKS)s
</div>
"""
footer = """
</body>
</html>"""
def _ast_names(names):
result = []
for nm in names:
if isinstance(nm, ast.alias):
result.append(nm.name)
else:
result.append(nm)
result = [r for r in result if r != '__main__']
return result
def uniq(seq):
"""Remove duplicates from a list, preserving order"""
# Taken from https://stackoverflow.com/questions/480214
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))]
DEFAULT_IMPORT_LEVEL = 0
| ArchiveModule |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 210516,
"end": 211161
} | class ____(TestCase):
def test_complex_warning(self):
x = np.array([1, 2])
y = np.array([1 - 2j, 1 + 2j])
# np.ComplexWarning moved to np.exceptions in numpy>=2.0.0
# np.exceptions only available in numpy>=1.25.0
has_exceptions_ns = hasattr(np, "exceptions")
ComplexWarning = (
np.exceptions.ComplexWarning if has_exceptions_ns else np.ComplexWarning
)
with warnings.catch_warnings():
warnings.simplefilter("error", ComplexWarning)
assert_raises(ComplexWarning, x.__setitem__, slice(None), y)
assert_equal(x, [1, 2])
| TestWarnings |
python | cython__cython | Demos/benchmarks/hexiom2.py | {
"start": 6416,
"end": 17136
} | class ____(object):
def __init__(self, hex, tiles, done = None):
self.hex = hex
self.tiles = tiles
self.done = Done(hex.count) if done is None else done
def clone(self):
return Pos(self.hex, self.tiles, self.done.clone())
##################################
@cython.locals(pos=Pos, i=cython.long, v=cython.int,
nid=cython.int, num=cython.int,
empties=cython.int, filled=cython.int,
vmax=cython.int, vmin=cython.int, cell=list, left=cython.int[8])
def constraint_pass(pos, last_move=None):
changed = False
left = pos.tiles[:]
done = pos.done
# Remove impossible values from free cells
free_cells = (range(done.count) if last_move is None
else pos.hex.get_by_id(last_move).links)
for i in free_cells:
if not done.already_done(i):
vmax = 0
vmin = 0
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
for num in range(7):
if (num < vmin) or (num > vmax):
if done.remove(i, num):
changed = True
# Computes how many of each value is still free
for cell in done.cells:
if len(cell) == 1:
left[cell[0]] -= 1
for v in range(8):
# If there is none, remove the possibility from all tiles
if (pos.tiles[v] > 0) and (left[v] == 0):
if done.remove_unfixed(v):
changed = True
else:
possible = sum([(1 if v in cell else 0) for cell in done.cells])
# If the number of possible cells for a value is exactly the number of available tiles
# put a tile in each cell
if pos.tiles[v] == possible:
for i in range(done.count):
cell = done.cells[i]
if (not done.already_done(i)) and (v in cell):
done.set_done(i, v)
changed = True
# Force empty or non-empty around filled cells
filled_cells = (range(done.count) if last_move is None
else [last_move])
for i in filled_cells:
if done.already_done(i):
num = done[i][0]
empties = 0
filled = 0
unknown = []
cells_around = pos.hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] == EMPTY:
empties += 1
else:
filled += 1
else:
unknown.append(nid)
if len(unknown) > 0:
if num == filled:
for u in unknown:
if EMPTY in done[u]:
done.set_done(u, EMPTY)
changed = True
#else:
# raise Exception("Houston, we've got a problem")
elif num == filled + len(unknown):
for u in unknown:
if done.remove(u, EMPTY):
changed = True
return changed
ASCENDING = 1
DESCENDING = -1
def find_moves(pos, strategy, order):
done = pos.done
cell_id = done.next_cell(pos, strategy)
if cell_id < 0:
return []
if order == ASCENDING:
return [(cell_id, v) for v in done[cell_id]]
else:
# Try higher values first and EMPTY last
moves = list(reversed([(cell_id, v) for v in done[cell_id] if v != EMPTY]))
if EMPTY in done[cell_id]:
moves.append((cell_id, EMPTY))
return moves
def play_move(pos, move):
(cell_id, i) = move
pos.done.set_done(cell_id, i)
@cython.locals(x=cython.int, y=cython.int, ry=cython.int, id=cython.int)
def print_pos(pos, output):
hex = pos.hex
done = pos.done
size = hex.size
for y in range(size):
print(u" " * (size - y - 1), end=u"", file=output)
for x in range(size + y):
pos2 = (x, y)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else u"."
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
for y in range(1, size):
print(u" " * y, end=u"", file=output)
for x in range(y, size * 2 - 1):
ry = size + y - 1
pos2 = (x, ry)
id = hex.get_by_pos(pos2).id
if done.already_done(id):
c = str(done[id][0]) if done[id][0] != EMPTY else (u".")
else:
c = u"?"
print(u"%s " % c, end=u"", file=output)
print(end=u"\n", file=output)
OPEN = 0
SOLVED = 1
IMPOSSIBLE = -1
@cython.locals(i=cython.int, num=cython.int, nid=cython.int,
vmin=cython.int, vmax=cython.int, tiles=cython.int[8])
def solved(pos, output, verbose=False):
hex = pos.hex
tiles = pos.tiles[:]
done = pos.done
exact = True
all_done = True
for i in range(hex.count):
if len(done[i]) == 0:
return IMPOSSIBLE
elif done.already_done(i):
num = done[i][0]
tiles[num] -= 1
if (tiles[num] < 0):
return IMPOSSIBLE
vmax = 0
vmin = 0
if num != EMPTY:
cells_around = hex.get_by_id(i).links
for nid in cells_around:
if done.already_done(nid):
if done[nid][0] != EMPTY:
vmin += 1
vmax += 1
else:
vmax += 1
if (num < vmin) or (num > vmax):
return IMPOSSIBLE
if num != vmin:
exact = False
else:
all_done = False
if (not all_done) or (not exact):
return OPEN
print_pos(pos, output)
return SOLVED
@cython.locals(move=tuple)
def solve_step(prev, strategy, order, output, first=False):
if first:
pos = prev.clone()
while constraint_pass(pos):
pass
else:
pos = prev
moves = find_moves(pos, strategy, order)
if len(moves) == 0:
return solved(pos, output)
else:
for move in moves:
#print("Trying (%d, %d)" % (move[0], move[1]))
ret = OPEN
new_pos = pos.clone()
play_move(new_pos, move)
#print_pos(new_pos)
while constraint_pass(new_pos, move[0]):
pass
cur_status = solved(new_pos, output)
if cur_status != OPEN:
ret = cur_status
else:
ret = solve_step(new_pos, strategy, order, output)
if ret == SOLVED:
return SOLVED
return IMPOSSIBLE
@cython.locals(tot=cython.int, tiles=cython.int[8])
def check_valid(pos):
hex = pos.hex
tiles = pos.tiles
done = pos.done
# fill missing entries in tiles
tot = 0
for i in range(8):
if tiles[i] > 0:
tot += tiles[i]
else:
tiles[i] = 0
# check total
if tot != hex.count:
raise Exception("Invalid input. Expected %d tiles, got %d." % (hex.count, tot))
def solve(pos, strategy, order, output):
check_valid(pos)
return solve_step(pos, strategy, order, output, first=True)
# TODO Write an 'iterator' to go over all x,y positions
@cython.locals(x=cython.int, y=cython.int, p=cython.int, tiles=cython.int[8],
size=cython.int, inctile=cython.int, linei=cython.int)
def read_file(file):
lines = [line.strip("\r\n") for line in file.splitlines()]
size = int(lines[0])
hex = Hex(size)
linei = 1
tiles = 8 * [0]
done = Done(hex.count)
for y in range(size):
line = lines[linei][size - y - 1:]
p = 0
for x in range(size + y):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, y, hex.get_by_pos((x, y)).id))
done.set_done(hex.get_by_pos((x, y)).id, inctile)
linei += 1
for y in range(1, size):
ry = size - 1 + y
line = lines[linei][y:]
p = 0
for x in range(y, size * 2 - 1):
tile = line[p:p + 2]
p += 2
if tile[1] == ".":
inctile = EMPTY
else:
inctile = int(tile)
tiles[inctile] += 1
# Look for locked tiles
if tile[0] == "+":
print("Adding locked tile: %d at pos %d, %d, id=%d" %
(inctile, x, ry, hex.get_by_pos((x, ry)).id))
done.set_done(hex.get_by_pos((x, ry)).id, inctile)
linei += 1
hex.link_nodes()
done.filter_tiles(tiles)
return Pos(hex, tiles, done)
def solve_file(file, strategy, order, output):
pos = read_file(file)
solve(pos, strategy, order, output)
def run_level36():
f = """\
4
2 1 1 2
3 3 3 . .
2 3 3 . 4 .
. 2 . 2 4 3 2
2 2 . . . 2
4 3 4 . .
3 2 3 3
"""
order = DESCENDING
strategy = Done.FIRST_STRATEGY
output = StringIO()
solve_file(f, strategy, order, output)
expected = """\
3 4 3 2
3 4 4 . 3
2 . . 3 4 3
2 . 1 . 3 . 2
3 3 . 2 . 2
3 . 2 . 2
2 2 . 1
"""
if output.getvalue() != expected:
raise AssertionError("got a wrong answer:\n%s" % output.getvalue())
def main(n):
# only run 1/25th of the requested number of iterations.
# with the default n=50 from runner.py, this means twice.
l = []
for i in range(n):
t0 = time.time()
run_level36()
time_elapsed = time.time() - t0
l.append(time_elapsed)
return l
if __name__ == "__main__":
import util, optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the hexiom2 benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, main)
| Pos |
python | facebookresearch__faiss | tests/test_rabitq.py | {
"start": 30995,
"end": 48348
} | class ____(unittest.TestCase):
def do_comparison_vs_ivfrabitq_test(self, metric_type=faiss.METRIC_L2):
"""Test IVFRaBitQFastScan produces similar results to IVFRaBitQ"""
nlist = 64
nprobe = 8
nq = 500
ds = datasets.SyntheticDataset(128, 2048, 2048, nq)
k = 10
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
xq = ds.get_queries()
# Ground truth for evaluation
index_flat = faiss.IndexFlat(d, metric_type)
index_flat.train(xt)
index_flat.add(xb)
_, I_f = index_flat.search(xq, k)
# Test different combinations of centered and qb values
for centered in [False, True]:
for qb in [1, 4, 8]:
# IndexIVFRaBitQ baseline
quantizer = faiss.IndexFlat(d, metric_type)
index_ivf_rbq = faiss.IndexIVFRaBitQ(
quantizer, d, nlist, metric_type
)
index_ivf_rbq.qb = qb
index_ivf_rbq.nprobe = nprobe
index_ivf_rbq.train(xt)
index_ivf_rbq.add(xb)
rbq_params = faiss.IVFRaBitQSearchParameters()
rbq_params.nprobe = nprobe
rbq_params.qb = qb
rbq_params.centered = centered
_, I_ivf_rbq = index_ivf_rbq.search(xq, k, params=rbq_params)
# IndexIVFRaBitQFastScan
quantizer_fs = faiss.IndexFlat(d, metric_type)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer_fs, d, nlist, metric_type, 32
)
index_ivf_rbq_fs.qb = qb
index_ivf_rbq_fs.centered = centered
index_ivf_rbq_fs.nprobe = nprobe
index_ivf_rbq_fs.train(xt)
index_ivf_rbq_fs.add(xb)
rbq_fs_params = faiss.IVFSearchParameters()
rbq_fs_params.nprobe = nprobe
_, I_ivf_rbq_fs = index_ivf_rbq_fs.search(
xq, k, params=rbq_fs_params
)
# Evaluate against ground truth
eval_ivf_rbq = faiss.eval_intersection(
I_ivf_rbq[:, :k], I_f[:, :k]
)
eval_ivf_rbq /= ds.nq * k
eval_ivf_rbq_fs = faiss.eval_intersection(
I_ivf_rbq_fs[:, :k], I_f[:, :k]
)
eval_ivf_rbq_fs /= ds.nq * k
# Performance gap should be within 1 percent
recall_gap = abs(eval_ivf_rbq - eval_ivf_rbq_fs)
np.testing.assert_(
recall_gap < 0.01,
f"Performance gap too large for centered={centered}, "
f"qb={qb}: {recall_gap:.4f}"
)
def test_comparison_vs_ivfrabitq_L2(self):
self.do_comparison_vs_ivfrabitq_test(faiss.METRIC_L2)
def test_comparison_vs_ivfrabitq_IP(self):
self.do_comparison_vs_ivfrabitq_test(faiss.METRIC_INNER_PRODUCT)
def test_encode_decode_consistency(self):
"""Test that encoding and decoding operations are consistent"""
nlist = 32
ds = datasets.SyntheticDataset(64, 1000, 1000, 0) # No queries needed
d = ds.d
xt = ds.get_train()
xb = ds.get_database()
# Test with IndexIVFRaBitQFastScan
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer, d, nlist, faiss.METRIC_L2, 32 # bbs=32 for FastScan
)
index_ivf_rbq_fs.qb = 8
index_ivf_rbq_fs.centered = False
index_ivf_rbq_fs.train(xt)
# Add vectors to the index
test_vectors = xb[:100]
index_ivf_rbq_fs.add(test_vectors)
# Reconstruct the vectors using reconstruct_n
decoded_fs = index_ivf_rbq_fs.reconstruct_n(0, len(test_vectors))
# Check reconstruction error for FastScan
distances_fs = np.sum((test_vectors - decoded_fs) ** 2, axis=1)
avg_distance_fs = np.mean(distances_fs)
# Compare with original IndexIVFRaBitQ
quantizer_orig = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq = faiss.IndexIVFRaBitQ(
quantizer_orig, d, nlist, faiss.METRIC_L2
)
index_ivf_rbq.qb = 8
index_ivf_rbq.train(xt)
index_ivf_rbq.add(test_vectors)
# Reconstruct with original IVFRaBitQ
decoded_orig = index_ivf_rbq.reconstruct_n(0, len(test_vectors))
# Check reconstruction error for original
distances_orig = np.sum((test_vectors - decoded_orig) ** 2, axis=1)
avg_distance_orig = np.mean(distances_orig)
# FastScan should have similar reconstruction error to original
np.testing.assert_(
abs(avg_distance_fs - avg_distance_orig) < 0.01
)
def test_nprobe_variations(self):
"""Test different nprobe values comparing with IVFRaBitQ"""
nlist = 32
ds = datasets.SyntheticDataset(64, 1000, 1000, 50)
k = 10
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
xq = ds.get_queries()
# Ground truth
index_flat = faiss.IndexFlat(d, faiss.METRIC_L2)
index_flat.train(xt)
index_flat.add(xb)
_, I_f = index_flat.search(xq, k)
# Test different nprobe values
for nprobe in [1, 4, 8, 16]:
# IndexIVFRaBitQ baseline
quantizer_rbq = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq = faiss.IndexIVFRaBitQ(
quantizer_rbq, d, nlist, faiss.METRIC_L2
)
index_ivf_rbq.qb = 8
index_ivf_rbq.nprobe = nprobe
index_ivf_rbq.train(xt)
index_ivf_rbq.add(xb)
rbq_params = faiss.IVFRaBitQSearchParameters()
rbq_params.nprobe = nprobe
rbq_params.qb = 8
rbq_params.centered = False
_, I_rbq = index_ivf_rbq.search(xq, k, params=rbq_params)
# IndexIVFRaBitQFastScan
quantizer_fs = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer_fs, d, nlist, faiss.METRIC_L2, 32
)
index_ivf_rbq_fs.qb = 8
index_ivf_rbq_fs.centered = False
index_ivf_rbq_fs.nprobe = nprobe
index_ivf_rbq_fs.train(xt)
index_ivf_rbq_fs.add(xb)
rbq_fs_params = faiss.IVFSearchParameters()
rbq_fs_params.nprobe = nprobe
_, I_fs = index_ivf_rbq_fs.search(xq, k, params=rbq_fs_params)
# Evaluate against ground truth
eval_rbq = faiss.eval_intersection(I_rbq[:, :k], I_f[:, :k])
eval_rbq /= ds.nq * k
eval_fs = faiss.eval_intersection(I_fs[:, :k], I_f[:, :k])
eval_fs /= ds.nq * k
# Performance gap should be within 1 percent
performance_gap = abs(eval_rbq - eval_fs)
np.testing.assert_(
performance_gap < 0.01,
f"Performance gap too large for nprobe={nprobe}: "
f"{performance_gap:.4f}"
)
def test_serialization(self):
"""Test serialization and deserialization of IVFRaBitQFastScan"""
# Use similar parameters to non-IVF test but with IVF structure
nlist = 4 # Small number of centroids for simpler test
ds = datasets.SyntheticDataset(64, 1000, 100, 20) # Match dataset size
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
xq = ds.get_queries()
# Create index similar to non-IVF but with IVF structure
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer, d, nlist, faiss.METRIC_L2
)
index_ivf_rbq_fs.train(xt)
index_ivf_rbq_fs.add(xb)
# Set reasonable search parameters
index_ivf_rbq_fs.nprobe = 2 # Use fewer probes for stability
# Test search before serialization
Dref, Iref = index_ivf_rbq_fs.search(xq, 10)
# Serialize and deserialize
b = faiss.serialize_index(index_ivf_rbq_fs)
index2 = faiss.deserialize_index(b)
# Set same search parameters on deserialized index
index2.nprobe = 2
# Test search after deserialization
Dnew, Inew = index2.search(xq, 10)
# Results should be identical
np.testing.assert_array_equal(Dref, Dnew)
np.testing.assert_array_equal(Iref, Inew)
def test_memory_management(self):
"""Test that memory is managed correctly during operations"""
nlist = 16
ds = datasets.SyntheticDataset(64, 1000, 1000, 50)
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer, d, nlist, faiss.METRIC_L2
)
index_ivf_rbq_fs.train(xt)
# Add data in chunks to test memory management
chunk_size = 250
for i in range(0, ds.nb, chunk_size):
end_idx = min(i + chunk_size, ds.nb)
chunk_data = xb[i:end_idx]
index_ivf_rbq_fs.add(chunk_data)
# Verify total count
np.testing.assert_equal(index_ivf_rbq_fs.ntotal, ds.nb)
# Test search still works
search_params = faiss.IVFSearchParameters()
search_params.nprobe = 4
_, I = index_ivf_rbq_fs.search(
ds.get_queries(), 5, params=search_params
)
np.testing.assert_equal(I.shape, (ds.nq, 5))
def test_thread_safety(self):
"""Test parallel operations work correctly via OpenMP
OpenMP parallelization is triggered when n * nprobe > 1000
in compute_LUT (see IndexIVFRaBitQFastScan.cpp line 339).
With nq=300 and nprobe=4: 300 * 4 = 1200 > 1000.
This test verifies:
1. OpenMP threshold is exceeded to trigger parallel execution
2. Thread-safe operations produce correct results
3. No race conditions occur with query_factors_storage
"""
import os
# Verify OpenMP is available
omp_num_threads = os.environ.get("OMP_NUM_THREADS")
if omp_num_threads and int(omp_num_threads) == 1:
# Skip this test if OpenMP is explicitly disabled
return
nlist = 16
ds = datasets.SyntheticDataset(64, 1000, 1000, 300)
quantizer = faiss.IndexFlat(ds.d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer, ds.d, nlist, faiss.METRIC_L2
)
index_ivf_rbq_fs.qb = 8
index_ivf_rbq_fs.centered = False
index_ivf_rbq_fs.nprobe = 4
index_ivf_rbq_fs.train(ds.get_train())
index_ivf_rbq_fs.add(ds.get_database())
# Create search parameters
params = faiss.IVFSearchParameters()
params.nprobe = 4
# Search with multiple queries
# n * nprobe = 300 * 4 = 1200 > 1000, triggering OpenMP parallel loop
k = 10
distances, labels = index_ivf_rbq_fs.search(
ds.get_queries(), k, params=params
)
# Basic sanity checks
np.testing.assert_equal(distances.shape, (ds.nq, k))
np.testing.assert_equal(labels.shape, (ds.nq, k))
np.testing.assert_(np.all(distances >= 0))
np.testing.assert_(np.all(labels >= 0))
np.testing.assert_(np.all(labels < ds.nb))
def test_factory_construction(self):
"""Test that IVF index can be constructed via factory method"""
nlist = 16
ds = datasets.SyntheticDataset(64, 500, 500, 20)
# Test IVFRaBitQFastScan factory construction
index = faiss.index_factory(ds.d, f"IVF{nlist},RaBitQfs")
np.testing.assert_(isinstance(index, faiss.IndexIVFRaBitQFastScan))
index.train(ds.get_train())
index.add(ds.get_database())
k = 5
_, I = index.search(ds.get_queries(), k)
np.testing.assert_equal(I.shape, (ds.nq, k))
quantizer = faiss.IndexFlat(ds.d, faiss.METRIC_L2)
index_ivf_rbq = faiss.IndexIVFRaBitQ(
quantizer, ds.d, nlist, faiss.METRIC_L2
)
index_ivf_rbq.train(ds.get_train())
index_ivf_rbq.add(ds.get_database())
_, I_rbq = index_ivf_rbq.search(ds.get_queries(), k)
recall = faiss.eval_intersection(I[:, :k], I_rbq[:, :k])
recall /= (ds.nq * k)
print(f"IVFRaBitQFastScan vs IVFRaBitQ recall: {recall:.3f}")
np.testing.assert_(
recall > 0.95,
f"Recall too low: {recall:.3f} - should be close to baseline"
)
index_custom = faiss.index_factory(
ds.d, f"IVF{nlist},RaBitQfs_64"
)
np.testing.assert_(
isinstance(index_custom, faiss.IndexIVFRaBitQFastScan)
)
np.testing.assert_equal(index_custom.bbs, 64)
def do_test_search_implementation(self, impl):
"""Helper to test a specific search implementation"""
nlist = 32
nprobe = 8
ds = datasets.SyntheticDataset(128, 2048, 2048, 100)
k = 10
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
xq = ds.get_queries()
# Ground truth for evaluation
index_flat = faiss.IndexFlat(d, faiss.METRIC_L2)
index_flat.train(xt)
index_flat.add(xb)
_, I_f = index_flat.search(xq, k)
# Baseline: IndexIVFRaBitQ
quantizer_baseline = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq = faiss.IndexIVFRaBitQ(
quantizer_baseline, d, nlist, faiss.METRIC_L2
)
index_ivf_rbq.qb = 8
index_ivf_rbq.nprobe = nprobe
index_ivf_rbq.train(xt)
index_ivf_rbq.add(xb)
rbq_params = faiss.IVFRaBitQSearchParameters()
rbq_params.nprobe = nprobe
rbq_params.qb = 8
rbq_params.centered = False
_, I_rbq = index_ivf_rbq.search(xq, k, params=rbq_params)
# Evaluate baseline against ground truth
eval_baseline = faiss.eval_intersection(I_rbq[:, :k], I_f[:, :k])
eval_baseline /= ds.nq * k
# Test IndexIVFRaBitQFastScan with specific implementation
quantizer_fs = faiss.IndexFlat(d, faiss.METRIC_L2)
index_ivf_rbq_fs = faiss.IndexIVFRaBitQFastScan(
quantizer_fs, d, nlist, faiss.METRIC_L2, 32
)
index_ivf_rbq_fs.qb = 8
index_ivf_rbq_fs.centered = False
index_ivf_rbq_fs.nprobe = nprobe
index_ivf_rbq_fs.implem = impl
index_ivf_rbq_fs.train(xt)
index_ivf_rbq_fs.add(xb)
# Create search parameters
params = faiss.IVFSearchParameters()
params.nprobe = nprobe
# Perform search
_, I_impl = index_ivf_rbq_fs.search(xq, k, params=params)
# Evaluate against ground truth
eval_impl = faiss.eval_intersection(I_impl[:, :k], I_f[:, :k])
eval_impl /= ds.nq * k
# Basic sanity checks
np.testing.assert_equal(I_impl.shape, (ds.nq, k))
# FastScan should perform similarly to baseline (within 5% gap)
recall_gap = abs(eval_baseline - eval_impl)
np.testing.assert_(
recall_gap < 0.05,
f"Recall gap too large for search_implem_{impl}: "
f"baseline={eval_baseline:.4f}, impl={eval_impl:.4f}, "
f"gap={recall_gap:.4f}"
)
def test_search_implem_10(self):
self.do_test_search_implementation(impl=10)
def test_search_implem_12(self):
self.do_test_search_implementation(impl=12)
def test_search_implem_14(self):
self.do_test_search_implementation(impl=14)
def test_search_with_parameters(self):
"""Test IndexIVFRaBitQFastScan with search_with_parameters
This tests the code path through search_with_parameters which
performs explicit coarse quantization before calling
search_preassigned.
"""
nlist = 64
nprobe = 8
nq = 500
ds = datasets.SyntheticDataset(128, 2048, 2048, nq)
k = 10
d = ds.d
xb = ds.get_database()
xt = ds.get_train()
xq = ds.get_queries()
quantizer = faiss.IndexFlat(d, faiss.METRIC_L2)
index = faiss.IndexIVFRaBitQFastScan(
quantizer, d, nlist, faiss.METRIC_L2, 32
)
index.qb = 8
index.centered = False
index.nprobe = nprobe
index.train(xt)
index.add(xb)
params = faiss.IVFSearchParameters()
params.nprobe = nprobe
distances, labels = faiss.search_with_parameters(index, xq, k, params)
self.assertEqual(distances.shape, (nq, k))
self.assertEqual(labels.shape, (nq, k))
self.assertGreater(np.sum(labels >= 0), 0)
index_flat = faiss.IndexFlat(d, faiss.METRIC_L2)
index_flat.add(xb)
_, gt_labels = index_flat.search(xq, k)
recall = faiss.eval_intersection(labels, gt_labels) / (nq * k)
# With nlist=64 and nprobe=8, recall should be reasonable
self.assertGreater(recall, 0.4)
| TestIVFRaBitQFastScan |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/extra_trace.py | {
"start": 1754,
"end": 1826
} | class ____:
def transform(self, arg):
return arg
| TransformBase |
python | facebook__pyre-check | client/commands/tests/check_test.py | {
"start": 1857,
"end": 11263
} | class ____(testslide.TestCase):
def test_create_check_arguments(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(
root_path, [".pyre", "allows", "blocks", "search", "local/src"]
)
setup.write_configuration_file(
root_path,
{
"only_check_paths": ["allows", "nonexistent"],
"ignore_all_errors": ["blocks", "nonexistent"],
"exclude": ["exclude"],
"extensions": [".ext", "invalid_extension"],
"workers": 42,
"search_path": ["search"],
"optional_search_path": ["nonexistent"],
"strict": True,
},
)
setup.write_configuration_file(
root_path, {"source_directories": ["src"]}, relative="local"
)
check_configuration = frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
local_configuration="local",
dot_pyre_directory=root_path / ".pyre",
),
root_path,
)
)
self.assertEqual(
check.create_check_arguments(
check_configuration,
command_arguments.CheckArguments(
debug=True,
sequential=False,
show_error_traces=True,
),
),
check.Arguments(
base_arguments=backend_arguments.BaseArguments(
log_path=str(root_path / ".pyre/local"),
global_root=str(root_path),
checked_directory_allowlist=[
str(root_path / "allows"),
str(root_path / "nonexistent"),
],
checked_directory_blocklist=[
str(root_path / "blocks"),
str(root_path / "nonexistent"),
],
debug=True,
excludes=["exclude"],
extensions=[".ext"],
relative_local_root="local",
number_of_workers=42,
parallel=True,
python_version=check_configuration.get_python_version(),
system_platform=check_configuration.get_system_platform(),
search_paths=[
configuration.search_path.SimpleElement(
str(root_path / "search")
)
],
source_paths=backend_arguments.SimpleSourcePath(
[
configuration.search_path.SimpleElement(
str(root_path / "local/src")
)
]
),
),
show_error_traces=True,
strict=True,
),
)
def test_create_check_arguments_artifact_root_no_conflict(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
setup.ensure_directories_exists(root_path, [".pyre", "project"])
setup.ensure_files_exist(root_path, ["project/.buckconfig"])
setup.write_configuration_file(
root_path / "project",
{
"targets": ["//foo:bar"],
},
)
arguments = check.create_check_arguments(
frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(
dot_pyre_directory=root_path / ".pyre"
),
root_path / "project",
)
),
command_arguments.CheckArguments(),
)
# Make sure we are not overwriting the artifact root for server command
self.assertNotEqual(
arguments.base_arguments.source_paths.serialize()["artifact_root"],
root_path / ".pyre" / backend_arguments.SERVER_ARTIFACT_ROOT_NAME,
)
def test_create_check_arguments_logging(self) -> None:
with tempfile.TemporaryDirectory() as root:
root_path = Path(root).resolve()
log_path = root_path / ".pyre"
logger_path = root_path / "logger"
setup.ensure_directories_exists(root_path, [".pyre", "src"])
setup.ensure_files_exist(root_path, ["logger"])
setup.write_configuration_file(
root_path,
{"source_directories": ["src"], "logger": str(logger_path)},
)
arguments = check.create_check_arguments(
frontend_configuration.OpenSource(
configuration.create_configuration(
command_arguments.CommandArguments(dot_pyre_directory=log_path),
root_path,
)
),
command_arguments.CheckArguments(
logging_sections="foo,bar,-baz",
noninteractive=True,
enable_profiling=True,
enable_memory_profiling=True,
log_identifier="derp",
),
)
self.assertListEqual(
list(arguments.additional_logging_sections),
["foo", "bar", "-baz", "-progress"],
)
self.assertEqual(
arguments.base_arguments.profiling_output,
backend_arguments.get_profiling_log_path(log_path),
)
self.assertEqual(
arguments.base_arguments.memory_profiling_output,
backend_arguments.get_profiling_log_path(log_path),
)
self.assertEqual(
arguments.base_arguments.remote_logging,
backend_arguments.RemoteLogging(
logger=str(logger_path), identifier="derp"
),
)
def test_parse_response(self) -> None:
def assert_parsed(response: str, expected: Iterable[error.Error]) -> None:
self.assertListEqual(
check.parse_type_error_response(response), list(expected)
)
def assert_not_parsed(response: str) -> None:
with self.assertRaises(check.InvalidCheckResponse):
check.parse_type_error_response(response)
assert_not_parsed("derp")
assert_not_parsed("[]")
assert_not_parsed('["Error"]')
assert_not_parsed('["TypeErrors"]')
assert_not_parsed('["TypeErrors", "derp"]')
assert_not_parsed('["TypeErrors", {}]')
assert_parsed("{}", [])
assert_parsed('{"errors": []}', [])
assert_parsed('{"derp": 42}', [])
assert_not_parsed('{"errors": ["derp"]}')
assert_not_parsed('{"errors": [{}]}')
assert_parsed(
json.dumps(
{
"errors": [
{
"line": 1,
"column": 1,
"stop_line": 3,
"stop_column": 3,
"path": "test.py",
"code": 42,
"name": "Fake name",
"description": "Fake description",
},
{
"line": 2,
"column": 2,
"stop_line": 4,
"stop_column": 4,
"path": "test.py",
"code": 43,
"name": "Fake name 2",
"description": "Fake description 2",
"concise_description": "Concise description 2",
},
],
}
),
expected=[
error.Error(
line=1,
column=1,
stop_line=3,
stop_column=3,
path=Path("test.py"),
code=42,
name="Fake name",
description="Fake description",
),
error.Error(
line=2,
column=2,
stop_line=4,
stop_column=4,
path=Path("test.py"),
code=43,
name="Fake name 2",
description="Fake description 2",
concise_description="Concise description 2",
),
],
)
| CheckTest |
python | numpy__numpy | numpy/_core/arrayprint.py | {
"start": 52045,
"end": 53036
} | class ____(_TimelikeFormat):
def __init__(self, x, unit=None, timezone=None, casting='same_kind',
legacy=False):
# Get the unit from the dtype
if unit is None:
if x.dtype.kind == 'M':
unit = datetime_data(x.dtype)[0]
else:
unit = 's'
if timezone is None:
timezone = 'naive'
self.timezone = timezone
self.unit = unit
self.casting = casting
self.legacy = legacy
# must be called after the above are configured
super().__init__(x)
def __call__(self, x):
if self.legacy <= 113:
return self._format_non_nat(x)
return super().__call__(x)
def _format_non_nat(self, x):
return "'%s'" % datetime_as_string(x,
unit=self.unit,
timezone=self.timezone,
casting=self.casting)
| DatetimeFormat |
python | getsentry__sentry | tests/sentry/core/endpoints/test_organization_member_team_details.py | {
"start": 10744,
"end": 16120
} | class ____(CreateOrganizationMemberTeamTest):
@cached_property
def org(self):
# rerun create org member tests with closed membership
return self.create_organization(owner=self.user, flags=0)
def test_member_must_request_access_to_join_team(self) -> None:
self.login_as(self.member)
self.get_success_response(
self.org.slug, self.member.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
assert OrganizationAccessRequest.objects.filter(
team=self.team, member=self.member, requester_id=None
).exists()
def test_admin_must_request_access_to_join_team(self) -> None:
self.login_as(self.admin)
self.get_success_response(
self.org.slug, self.admin.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.admin
).exists()
assert OrganizationAccessRequest.objects.filter(
team=self.team, member=self.admin, requester_id=None
).exists()
def test_member_on_team_must_request_access_to_add_member_to_team(self) -> None:
self.login_as(self.member_on_team)
self.get_success_response(
self.org.slug, self.member.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
assert OrganizationAccessRequest.objects.filter(
team=self.team, member=self.member, requester_id=self.member_on_team.user_id
).exists()
def test_admin_must_request_access_to_add_member_to_team(self) -> None:
# admin not in the team
self.login_as(self.admin)
self.get_success_response(
self.org.slug, self.member.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
assert OrganizationAccessRequest.objects.filter(
team=self.team, member=self.member, requester_id=self.admin.user_id
).exists()
@with_feature("organizations:team-roles")
def test_team_admin_can_add_member(self) -> None:
self.login_as(self.team_admin)
self.get_success_response(
self.org.slug, self.member.id, self.team.slug, status_code=status.HTTP_201_CREATED
)
assert OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
@with_feature("organizations:team-roles")
def test_team_admin_can_add_member_using_user_token(self) -> None:
self.login_as(self.team_admin)
# Team admins needs both org:read and team:write to pass the permissions checks when open
# membership is off
token = self.create_user_auth_token(
user=self.team_admin_user, scope_list=["org:read", "team:write"]
)
self.get_success_response(
self.org.slug,
self.member.id,
self.team.slug,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {token.token}"},
status_code=status.HTTP_201_CREATED,
)
assert OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
def test_integration_token_needs_elevated_permissions(self) -> None:
internal_integration = self.create_internal_integration(
name="Internal App", organization=self.org, scopes=["org:read"]
)
# Integration tokens with org:read should generate an access request when open membership is off
integration_token = self.create_internal_integration_token(
user=self.user, internal_integration=internal_integration
)
self.get_success_response(
self.org.slug,
self.member.id,
self.team.slug,
extra_headers={"HTTP_AUTHORIZATION": f"Bearer {integration_token.token}"},
status_code=status.HTTP_202_ACCEPTED,
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.member
).exists()
assert OrganizationAccessRequest.objects.filter(
team=self.team,
member=self.member,
).exists()
def test_multiple_of_the_same_access_request(self) -> None:
self.login_as(self.member)
self.get_success_response(
self.org.slug, self.admin.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
self.login_as(self.member_on_team)
self.get_success_response(
self.org.slug, self.admin.id, self.team.slug, status_code=status.HTTP_202_ACCEPTED
)
assert not OrganizationMemberTeam.objects.filter(
team=self.team, organizationmember=self.admin
).exists()
oar = OrganizationAccessRequest.objects.get(team=self.team, member=self.admin)
assert oar.requester_id == self.member.user_id
| CreateWithClosedMembershipTest |
python | joke2k__faker | faker/providers/person/hi_IN/__init__.py | {
"start": 44,
"end": 9815
} | class ____(PersonProvider):
formats_male = (
"{{first_name_male}} {{last_name}}",
"{{prefix_male}} {{first_name_male}} {{last_name}}",
"{{first_name_male}} {{last_name}}{{suffix}}",
"{{prefix}} {{first_name_male}} {{last_name}}",
)
formats_female = (
"{{first_name_female}} {{last_name}}",
"{{prefix_female}} {{first_name_female}} {{last_name}}",
"{{first_name_female}} {{last_name}}{{suffix}}",
"{{prefix}} {{first_name_female}} {{last_name}}",
)
formats = (
"{{first_name}} {{last_name}}",
"{{prefix}} {{first_name}} {{last_name}}",
"{{first_name}} {{last_name}}{{suffix}}",
)
# http://www.20000-names.com/male_hindi_names.htm
first_names_male = (
"अभय",
"आदित्य",
"अजित",
"आकाङ्क्षा",
"अकबर",
"अखिल",
"अमर",
"अमित",
"अमृत",
"आनन्द",
"अनन्त",
"अनिल",
"अनिरुद्ध",
"अनिश",
"अंकुर",
"अनुज",
"अनुपम",
"अरविन्द",
"अर्जुन",
"अरुण",
"अरुणा",
"असीम",
"अशोक",
"बल",
"बलदेव",
"बलराम",
"भारत",
"ब्रह्मा",
"बृजेश",
"चण्ड",
"चन्दना",
"चन्द्रकान्त",
"दामोदर",
"दर्शन",
"दयाराम",
"देवदान",
"दीपक",
"देवदान",
"देवदास",
"देवराज",
"धनञ्जय",
"धवल",
"दिलीप",
"दिनेश",
"दीपक",
"दिलीप",
"गणेश",
"गौतम",
"गोपाल",
"गोतम",
"गोविंदा",
"गुलज़ार",
"हनुमान्",
"हरेन्द्र",
"हरि",
"हरीश",
"हर्श",
"हर्शद",
"हर्शल",
"इला",
"इन्द्र",
"इन्द्रजित",
"ईश",
"जगन्नाथ",
"जगदीश",
"जगजीत",
"जयदेव",
"ज़स्विन्देर्",
"जय",
"जयन्त",
"जयेन्द्र",
"जितेन्द्र",
"जौहर",
"ज्योतिष",
"कैलाश",
"कालिदास",
"काम",
"कमल",
"कम्बोज",
"कपिल",
"कर्ण",
"ख़ान",
"किरण",
"कशोर",
"कृष्ण",
"कुमार",
"कुणाल",
"लक्ष्मण",
"लाल",
"ललित",
"लोचन",
"माधव",
"मधुकर",
"महात्मा",
"महावीर",
"महेन्द्रा",
"मानदीप",
"मनीश",
"मणि",
"मणीन्द्र",
"मनीश",
"मञ्जुनाथ",
"मोहन",
"मुकेश",
"नंद",
"नारायण",
"नरेन्द्र",
"नवीन",
"निखिल",
"नीरव",
"िनशा",
"ओम",
"पद्म",
"पल्लव",
"पीताम्बर",
"प्रभाकर",
"प्रभात",
"प्रभु",
"प्रबोध",
"प्रदीप",
"प्रकाश",
"प्रमोद",
"प्रणव",
"प्रणय",
"प्रसाद",
"प्रसन्न",
"प्रताप",
"प्रेम",
"पुरुषोत्तम",
"रघु",
"राहुल",
"राज",
"राजन",
"रजनीकांत",
"राजीव",
"राजेन्द्र",
"राजेश",
"राजीव",
"राकेश",
"राम",
"रामचन्द्र",
"रामकृष्ण",
"रञ्जित",
"रतन",
"रत्नम",
"रावण",
"रवि",
"ऋषि",
"रोहन",
"सचिन",
"संदीप",
"शनि",
"संजय",
"संजित",
"संजीव",
"शंकर",
"सरल",
"सतीश",
"सवितृ",
"शेखर",
"सेठ",
"शनि",
"शंकर",
"शङ्कर",
"शंतनु",
"शर्म",
"शशि",
"शेखर",
"शेष",
"शिव",
"श्रेष्ठ",
"श्रीपति",
"श्याम",
"श्यामल",
"सिद्धार्थ",
"सिकन्दर",
"सोहेल",
"सुभाष",
"सुदर्शन",
"सुधीर",
"सुमन",
"सुमन्त्र",
"सुन्दर",
"सुनील",
"सुरज",
"सुरेन्द्र",
"सुरेश",
"सूर्य",
"सुशील",
"स्वपन",
"स्वप्निल",
"स्वर्ण",
"उत्तम",
"वसन्त",
"वासिष्ठ",
"भरत",
"विजय",
"विजया",
"विक्रम",
"विमल",
"विनय",
"विपिन",
"विपुल",
"विशाल",
"विष्णु",
"विवेक",
"यश",
)
# http://www.20000-names.com/female_hindi_names.htm
first_names_female = (
"आभा",
"अभिलाषा",
"अदिती",
"ऐश्वर्या",
"आकाङ्क्षा",
"अमला",
"अमिता",
"अमृता",
"आनन्दा",
"अनिला",
"अणिमा",
"अंकिता",
"अनुष्का",
"अनुजा",
"अर्चना",
"अरुंधती",
"आशा",
"अवनी",
"अवन्ती",
"बल",
"भरत",
"चण्डा",
"चन्दना",
"चन्द्रकान्ता",
"चेतना",
"दमयंती",
"दर्शना",
"दीपाली",
"दीप्ति",
"देवी",
"दीपाली",
"दीप्ति",
"दिव्या",
"दुर्गा",
"एषा",
"गौहर",
"गौरी",
"गीता",
"गोपीनाथ",
"गुलज़ार",
"इला",
"इन्दिरा",
"इन्द्रजित",
"इन्दु",
"ज़स्विन्देर्",
"जया",
"जयन्ती",
"ज्योत्सना",
"ज्योत्स्ना",
"कैलाश",
"कला",
"काली",
"कल्पना",
"कमला",
"कान्ता",
"कान्ती",
"करिश्मा",
"काशी",
"कौशल्या",
"िकशोरी",
"क्षितिज",
"कुमारी",
"कुंती",
"लक्ष्मी",
"लता",
"लावण्या",
"लक्ष्मी",
"लीला",
"लीलावती",
"लीला",
"लीला",
"लीलावती",
"लीना",
"माधवी",
"मधु",
"मधुर",
"माला",
"मालती",
"मनीषा",
"मञ्जु",
"मञ्जुला",
"मञ्जूषा",
"माया",
"मीरा",
"मोहना",
"मोहिनी",
"मुक्ता",
"नेहा",
"निखिला",
"निशा",
"नित्य",
"पद्म",
"पद्मावती",
"पद्मिनी",
"पार्वती",
"परवीन",
"पूर्णिमा",
"प्रतिभा",
"प्रतिमा",
"प्रेमा",
"प्रिया",
"पूर्णिमा",
"पुष्पा",
"रचना",
"राधा",
"रजनी",
"राज्य",
"रानी",
"रश्मी",
"रति",
"रत्न",
"रेशमी",
"रीतिका",
"रिया",
"रोहना",
"रुक्मिणी",
"रुपिन्द्र",
"संजना",
"सरला",
"सरस्वती",
"सारिका",
"सती",
"सावित्री",
"सीमा",
"सीता",
"शक्ति",
"शकुन्तला",
"शान्ता",
"शान्ती",
"शर्मिला",
"शशी",
"शीला",
"शिवाली",
"शोभा",
"श्यामा",
"श्यामला",
"सीमा",
"सीता",
"सितारा",
"सोनल",
"श्री",
"सुदर्शना",
"सुलभा",
"सुमना",
"सुमती",
"सुनीता",
"सुनीती",
"सुशीला",
"स्वर्ण",
"तारा",
"तृष्णा",
"उमा",
"उषा",
"वसन्ता",
"विद्या",
"विजया",
"विमला",
)
first_names = first_names_male + first_names_female
# https://blogs.transparent.com/hindi/common-surnames-in-india/
last_names = (
# Common Surnames in North India (Delhi, Haryana, Punjab,etc)
"शर्मा",
"भट",
"वर्मा",
"कुमार",
"गुप्ता",
"मल्होत्रा",
"भटनागर",
"सक्सेना",
"कपूर",
"सिंह",
"महरा",
"चोपरा",
"सरीन",
"मालिक",
"सैनी",
"जैन",
"कौल",
"खत्री",
"गोयल",
"तिवारी",
"भरद्वाज",
"चोपरा",
"प्रसाद",
"आचार्य",
"अगरवाल",
"अहलूवालिया",
"टंडन",
"आहूजा",
"अरोरा",
# Common Surnames in East India: (Bengal, Orrisa, etc.)
"चटर्जी",
"चतुर्वेदी",
"सेन",
"बोस",
"सेनगुप्ता",
"दास",
"दासगुप्ता",
"मुख़र्जी",
"दुत्ता",
"बनर्जी",
"चक्रवर्ती",
"भट्टाचार्य",
"घोष",
"मित्रा",
"गुहा",
"सरकार",
"साहा",
"रॉय",
"चोधरी",
"रॉय चौधरी",
"मजूमदार",
"मंडल",
"मैती",
"कलिता",
"हजारिका",
"नाथ",
"बुरुाह",
"थापा",
"गुरुंग",
"राय",
"प्रधान",
"तमांग",
"छेत्री",
# Common Surnames in South India (Karnataka, Tamil Nadu, Kerala, etc.)
"नायर",
"मेनन",
"पिल्लई",
"वेंकटएसन",
"बलासुब्रमानियम",
"राव",
"जयरामन",
"सुब्रमण्यम",
"रंगन",
"रंगराजन",
"नारायण",
"रेड्डी",
# Common Surnames in Central India (Bihar/ Uttar Pradesh, Madhya Pradesh, etc)
"सिंह",
"द्विवेदी",
"मिश्रा",
"त्रिवेदी",
"झा",
"शुक्ला",
"यादव",
"सिन्हा",
"पाण्डेय",
"झादव",
"जेटली",
"चौहान",
"जोशी",
"मिस्त्री",
"खान",
"श्रीवास्तव",
# Common Surnames in West India (Maharashtra, Gujarat, Goa etc)
"शाह",
"देशपांडे",
"गावडे",
"कदम",
"ताम्बे",
"मेहता",
"पटेल",
"पाटिल",
"पवार",
"चवन",
"डी’सोउज़ा",
"लोबो",
"रोद्रिगुएस",
"डी’कोस्टा",
)
prefixes_male = ("श्री", "श्रीमान")
prefixes_female = ("श्री", "श्रीमती")
prefixes = (
"माननीय",
"आदरसूचक",
"सम्मानसूचक",
"संमानित",
"आदरवाचक",
"सम्मानात्मक",
)
suffixes = ("जी",)
| Provider |
python | bottlepy__bottle | test/test_wsgi.py | {
"start": 7443,
"end": 8534
} | class ____(ServerTestBase):
""" Test that close-able return types are actually closed """
def setUp(self):
super().setUp()
def closeable(self, body=["OK"]):
self.closeable = CloseableBody(body)
def assertClosed(self, body, open_args=None):
closeable = CloseableBody(body)
self.app.route("/close")(lambda: closeable)
try:
self.urlopen("/close", **(open_args or {}))
finally:
self.assertTrue(len(closeable.close_events) > 0, "Response object was not closed")
def test_direct(self):
self.assertClosed(["OK"])
self.assertClosed([b"OK"])
self.assertClosed("OK")
self.assertClosed(b"OK")
self.assertClosed(["OK" for ok in range(10)])
self.assertClosed([b"OK" for ok in range(10)])
self.assertClosed(["OK" for ok in range(0)])
self.assertClosed(5) # Internal server error in Bottle._cast
try:
self.assertClosed(["CRASH"], open_args={'crash': 'start_response'})
except RuntimeError:
pass
| TestCloseable |
python | pandas-dev__pandas | pandas/tests/tslibs/test_conversion.py | {
"start": 3893,
"end": 4696
} | class ____(datetime):
pass
@pytest.mark.parametrize(
"dt, expected",
[
pytest.param(
Timestamp("2000-01-01"),
Timestamp("2000-01-01", tz=timezone.utc),
id="timestamp",
),
pytest.param(
datetime(2000, 1, 1),
datetime(2000, 1, 1, tzinfo=timezone.utc),
id="datetime",
),
pytest.param(
SubDatetime(2000, 1, 1),
SubDatetime(2000, 1, 1, tzinfo=timezone.utc),
id="subclassed_datetime",
),
],
)
def test_localize_pydatetime_dt_types(dt, expected):
# GH 25851
# ensure that subclassed datetime works with
# localize_pydatetime
result = conversion.localize_pydatetime(dt, timezone.utc)
assert result == expected
| SubDatetime |
python | psf__requests | src/requests/exceptions.py | {
"start": 2671,
"end": 2763
} | class ____(RequestException):
"""A valid URL is required to make a request."""
| URLRequired |
python | kubernetes-client__python | kubernetes/client/models/v1alpha1_pod_certificate_request_list.py | {
"start": 383,
"end": 7334
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'api_version': 'str',
'items': 'list[V1alpha1PodCertificateRequest]',
'kind': 'str',
'metadata': 'V1ListMeta'
}
attribute_map = {
'api_version': 'apiVersion',
'items': 'items',
'kind': 'kind',
'metadata': 'metadata'
}
def __init__(self, api_version=None, items=None, kind=None, metadata=None, local_vars_configuration=None): # noqa: E501
"""V1alpha1PodCertificateRequestList - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._api_version = None
self._items = None
self._kind = None
self._metadata = None
self.discriminator = None
if api_version is not None:
self.api_version = api_version
self.items = items
if kind is not None:
self.kind = kind
if metadata is not None:
self.metadata = metadata
@property
def api_version(self):
"""Gets the api_version of this V1alpha1PodCertificateRequestList. # noqa: E501
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:return: The api_version of this V1alpha1PodCertificateRequestList. # noqa: E501
:rtype: str
"""
return self._api_version
@api_version.setter
def api_version(self, api_version):
"""Sets the api_version of this V1alpha1PodCertificateRequestList.
APIVersion defines the versioned schema of this representation of an object. Servers should convert recognized schemas to the latest internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources # noqa: E501
:param api_version: The api_version of this V1alpha1PodCertificateRequestList. # noqa: E501
:type: str
"""
self._api_version = api_version
@property
def items(self):
"""Gets the items of this V1alpha1PodCertificateRequestList. # noqa: E501
items is a collection of PodCertificateRequest objects # noqa: E501
:return: The items of this V1alpha1PodCertificateRequestList. # noqa: E501
:rtype: list[V1alpha1PodCertificateRequest]
"""
return self._items
@items.setter
def items(self, items):
"""Sets the items of this V1alpha1PodCertificateRequestList.
items is a collection of PodCertificateRequest objects # noqa: E501
:param items: The items of this V1alpha1PodCertificateRequestList. # noqa: E501
:type: list[V1alpha1PodCertificateRequest]
"""
if self.local_vars_configuration.client_side_validation and items is None: # noqa: E501
raise ValueError("Invalid value for `items`, must not be `None`") # noqa: E501
self._items = items
@property
def kind(self):
"""Gets the kind of this V1alpha1PodCertificateRequestList. # noqa: E501
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:return: The kind of this V1alpha1PodCertificateRequestList. # noqa: E501
:rtype: str
"""
return self._kind
@kind.setter
def kind(self, kind):
"""Sets the kind of this V1alpha1PodCertificateRequestList.
Kind is a string value representing the REST resource this object represents. Servers may infer this from the endpoint the client submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds # noqa: E501
:param kind: The kind of this V1alpha1PodCertificateRequestList. # noqa: E501
:type: str
"""
self._kind = kind
@property
def metadata(self):
"""Gets the metadata of this V1alpha1PodCertificateRequestList. # noqa: E501
:return: The metadata of this V1alpha1PodCertificateRequestList. # noqa: E501
:rtype: V1ListMeta
"""
return self._metadata
@metadata.setter
def metadata(self, metadata):
"""Sets the metadata of this V1alpha1PodCertificateRequestList.
:param metadata: The metadata of this V1alpha1PodCertificateRequestList. # noqa: E501
:type: V1ListMeta
"""
self._metadata = metadata
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1alpha1PodCertificateRequestList):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1alpha1PodCertificateRequestList):
return True
return self.to_dict() != other.to_dict()
| V1alpha1PodCertificateRequestList |
python | jazzband__django-simple-history | simple_history/template_utils.py | {
"start": 6325,
"end": 9518
} | class ____:
"""
A class grouping functions and settings related to displaying the textual
difference between two (or more) objects.
``common_shorten_repr()`` is the main method for this.
The code is based on
https://github.com/python/cpython/blob/v3.12.0/Lib/unittest/util.py#L8-L52.
"""
def __init__(
self,
*,
max_length=80,
placeholder_len=12,
min_begin_len=5,
min_end_len=5,
min_common_len=5,
):
self.max_length = max_length
self.placeholder_len = placeholder_len
self.min_begin_len = min_begin_len
self.min_end_len = min_end_len
self.min_common_len = min_common_len
self.min_diff_len = max_length - (
min_begin_len
+ placeholder_len
+ min_common_len
+ placeholder_len
+ min_end_len
)
assert self.min_diff_len >= 0 # nosec
def common_shorten_repr(self, *args: Any) -> tuple[str, ...]:
"""
Returns ``args`` with each element converted into a string representation.
If any of the strings are longer than ``self.max_length``, they're all shortened
so that the first differences between the strings (after a potential common
prefix in all of them) are lined up.
"""
args = tuple(map(conditional_str, args))
max_len = max(map(len, args))
if max_len <= self.max_length:
return args
prefix = commonprefix(args)
prefix_len = len(prefix)
common_len = self.max_length - (
max_len - prefix_len + self.min_begin_len + self.placeholder_len
)
if common_len > self.min_common_len:
assert (
self.min_begin_len
+ self.placeholder_len
+ self.min_common_len
+ (max_len - prefix_len)
< self.max_length
) # nosec
prefix = self.shorten(prefix, self.min_begin_len, common_len)
return tuple(f"{prefix}{s[prefix_len:]}" for s in args)
prefix = self.shorten(prefix, self.min_begin_len, self.min_common_len)
return tuple(
prefix + self.shorten(s[prefix_len:], self.min_diff_len, self.min_end_len)
for s in args
)
def shorten(self, s: str, prefix_len: int, suffix_len: int) -> str:
skip = len(s) - prefix_len - suffix_len
if skip > self.placeholder_len:
suffix_index = len(s) - suffix_len
s = self.shortened_str(s[:prefix_len], skip, s[suffix_index:])
return s
def shortened_str(self, prefix: str, num_skipped_chars: int, suffix: str) -> str:
"""
Return a shortened version of the string representation of one of the args
passed to ``common_shorten_repr()``.
This should be in the format ``f"{prefix}{skip_str}{suffix}"``, where
``skip_str`` is a string indicating how many characters (``num_skipped_chars``)
of the string representation were skipped between ``prefix`` and ``suffix``.
"""
return f"{prefix}[{num_skipped_chars:d} chars]{suffix}"
| ObjDiffDisplay |
python | streamlit__streamlit | lib/tests/streamlit/elements/video_test.py | {
"start": 1147,
"end": 9079
} | class ____(DeltaGeneratorTestCase):
def test_st_video_from_bytes(self):
"""Test st.video using fake bytes data."""
# Make up some bytes to pretend we have a video. The server should not vet
# the video before sending it to the browser.
fake_video_data = b"\x12\x10\x35\x44\x55\x66"
st.video(fake_video_data)
el = self.get_delta_from_queue().new_element
# locate resultant file in InMemoryFileManager and test its properties.
file_id = _calculate_file_id(fake_video_data, "video/mp4")
media_file = self.media_file_storage.get_file(file_id)
assert media_file is not None
assert media_file.mimetype == "video/mp4"
assert self.media_file_storage.get_url(file_id) == el.video.url
def test_st_video_from_url(self):
"""We can pass a URL directly to st.video"""
some_url = "http://www.marmosetcare.com/video/in-the-wild/intro.webm"
st.video(some_url)
el = self.get_delta_from_queue().new_element
assert el.video.url == some_url
def test_youtube_urls_transformed_to_embed_links(self):
"""Youtube URLs should be transformed into embed links."""
yt_urls = (
"https://youtu.be/_T8LGqJtuGc",
"https://www.youtube.com/watch?v=kmfC-i9WgH0",
"https://www.youtube.com/embed/sSn4e1lLVpA",
"https://youtube.com/e/0TSXM-BGqHU",
"https://youtube.com/v/OIQskkX_DK0",
# HTTP should also work correctly
"http://youtu.be/4sPnOqeUDmk",
"http://www.youtube.com/embed/92jUAXBmZyU",
)
yt_embeds = (
"https://www.youtube.com/embed/_T8LGqJtuGc",
"https://www.youtube.com/embed/kmfC-i9WgH0",
"https://www.youtube.com/embed/sSn4e1lLVpA",
"https://www.youtube.com/embed/0TSXM-BGqHU",
"https://www.youtube.com/embed/OIQskkX_DK0",
"https://www.youtube.com/embed/4sPnOqeUDmk",
"https://www.youtube.com/embed/92jUAXBmZyU",
)
# url should be transformed into an embed link (or left alone).
for x in range(len(yt_urls)):
st.video(yt_urls[x])
el = self.get_delta_from_queue().new_element
assert el.video.url == yt_embeds[x]
def test_st_video_raises_on_bad_filename(self):
"""A non-URL string is assumed to be a filename. A file we can't
open will result in an error.
"""
with pytest.raises(MediaFileStorageError):
st.video("not/a/real/file")
def test_st_video_from_none(self):
"""st.video(None) is not an error."""
st.video(None)
el = self.get_delta_from_queue().new_element
assert el.video.url == ""
def test_st_video_other_inputs(self):
"""Test that our other data types don't result in an error."""
st.video(b"bytes_data")
st.video(b"str_data")
st.video(BytesIO(b"bytesio_data"))
st.video(np.array([0, 1, 2, 3]))
def test_st_video_options(self):
"""Test st.video with options."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
st.video(
fake_video_data,
format="video/mp4",
start_time=10,
end_time=18,
loop=True,
autoplay=True,
muted=True,
)
el = self.get_delta_from_queue().new_element
assert el.video.start_time == 10
assert el.video.end_time == 18
assert el.video.loop
assert el.video.autoplay
assert el.video.muted
assert el.video.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_video_data, "video/mp4") in el.video.url
def test_st_video_just_data(self):
"""Test st.video with just data specified."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
st.video(fake_video_data)
el = self.get_delta_from_queue().new_element
assert el.video.start_time == 0
assert el.video.end_time == 0
assert not el.video.loop
assert not el.video.autoplay
assert not el.video.muted
assert el.video.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_video_data, "video/mp4") in el.video.url
def test_st_video_subtitles(self):
"""Test st.video with subtitles."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
fake_subtitle_data = b"WEBVTT\n\n\n1\n00:01:47.250 --> 00:01:50.500\n`hello."
st.video(fake_video_data, subtitles=fake_subtitle_data)
el = self.get_delta_from_queue().new_element
assert el.video.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_video_data, "video/mp4") in el.video.url
expected_subtitle_url = _calculate_file_id(
fake_subtitle_data,
"text/vtt",
filename=f"{calc_md5(b'default')}.vtt",
)
assert expected_subtitle_url in el.video.subtitles[0].url
def test_st_video_empty_subtitles(self):
"""Test st.video with subtitles, empty subtitle label, content allowed."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
fake_subtitle_data = b"WEBVTT\n\n\n1\n00:01:47.250 --> 00:01:50.500\n`hello."
st.video(
fake_video_data,
subtitles={
"": "",
"English": fake_subtitle_data,
},
)
el = self.get_delta_from_queue().new_element
assert el.video.url.startswith(MEDIA_ENDPOINT)
assert _calculate_file_id(fake_video_data, "video/mp4") in el.video.url
expected_empty_subtitle_url = _calculate_file_id(
b"",
"text/vtt",
filename=f"{calc_md5(b'')}.vtt",
)
expected_english_subtitle_url = _calculate_file_id(
fake_subtitle_data,
"text/vtt",
filename=f"{calc_md5(b'English')}.vtt",
)
assert expected_empty_subtitle_url in el.video.subtitles[0].url
assert expected_english_subtitle_url in el.video.subtitles[1].url
def test_st_video_subtitles_path(self):
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
fake_sub_content = b"WEBVTT\n\n\n1\n00:01:47.250 --> 00:01:50.500\n`hello."
with NamedTemporaryFile(suffix=".vtt", mode="wb") as tmp_file:
p = Path(tmp_file.name)
tmp_file.write(fake_sub_content)
tmp_file.flush()
st.video(fake_video_data, subtitles=p)
expected_english_subtitle_url = _calculate_file_id(
fake_sub_content,
"text/vtt",
filename=f"{calc_md5(b'default')}.vtt",
)
el = self.get_delta_from_queue().new_element
assert expected_english_subtitle_url in el.video.subtitles[0].url
def test_singe_subtitle_exception(self):
"""Test that an error is raised if invalid subtitles is provided."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
with pytest.raises(StreamlitAPIException) as e:
st.video(fake_video_data, subtitles="invalid_subtitles")
assert str(e.value) == "Failed to process the provided subtitle: default"
def test_dict_subtitle_video_exception(self):
"""Test that an error is raised if invalid subtitles in dict is provided."""
fake_video_data = b"\x11\x22\x33\x44\x55\x66"
fake_sub_content = b"WEBVTT\n\n\n1\n00:01:47.250 --> 00:01:50.500\n`hello."
with pytest.raises(StreamlitAPIException) as e:
st.video(
fake_video_data,
subtitles={
"English": fake_sub_content,
"": "", # empty subtitle label and value are also valid
"Martian": "invalid_subtitles",
},
)
assert str(e.value) == "Failed to process the provided subtitle: Martian"
| VideoTest |
python | crytic__slither | slither/detectors/statements/unary.py | {
"start": 1253,
"end": 1766
} | class ____(ExpressionVisitor):
def __init__(self, expression: Expression) -> None:
self.result: bool = False
super().__init__(expression)
def _post_unary_operation(self, expression: UnaryOperation) -> None:
if expression.type == UnaryOperationType.PLUS_PRE:
# This is defined in ExpressionVisitor but pylint
# Seems to think its not
# pylint: disable=attribute-defined-outside-init
self.result = True
| InvalidUnaryStateVariableDetector |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 71524,
"end": 71868
} | class ____(str, Enum):
"""
If used, include weight modification, which will be applied to sparse vectors at query time: None - no modification (default) Idf - inverse document frequency, based on statistics of the collection
"""
def __str__(self) -> str:
return str(self.value)
NONE = "none"
IDF = "idf"
| Modifier |
python | pytorch__pytorch | test/export/test_lift_unlift.py | {
"start": 649,
"end": 4677
} | class ____:
def __init__(self) -> None:
self.graph = torch.fx.Graph()
self.nodes = {}
self.values = {}
self.nn_module_stack_key: dict[str, int] = {}
self.latest_id = 0
self.input_to_kind: dict[torch.fx.Node, InputKind] = {}
def input(self, name: str, value: torch.Tensor, kind: InputKind):
node = self.graph.placeholder(name)
node.meta["val"] = value
self.nodes[name] = node
self.values[name] = value
self.input_to_kind[node] = kind
def add(self, x: str, y: str, out: str, module_fqn: str = ""):
node = self.graph.create_node(
"call_function",
torch.ops.aten.add.Tensor,
(self.nodes[x], self.nodes[y]),
name=out,
)
self.values[out] = self.values[x] + self.values[y]
node.meta["val"] = self.values[out]
node.meta["nn_module_stack"] = self.create_nn_module_stack(module_fqn)
self.nodes[out] = node
def call_function(self, target, args, out: str, module_fqn: str = ""):
arg_nodes = tuple(self.nodes[arg] for arg in args)
arg_values = tuple(self.values[arg] for arg in args)
node = self.graph.create_node(
"call_function",
target,
arg_nodes,
name=out,
)
self.values[out] = target(*arg_values)
node.meta["val"] = self.values[out]
node.meta["nn_module_stack"] = self.create_nn_module_stack(module_fqn)
self.nodes[out] = node
def constant(
self, name: str, value: Any, target: Optional[str] = None, module_fqn: str = ""
):
if target is None:
target = name
node = self.graph.get_attr(target)
node.meta["val"] = value
node.meta["nn_module_stack"] = self.create_nn_module_stack(module_fqn)
self.nodes[name] = node
self.values[name] = value
def output(self, out: str):
self.graph.output(self.nodes[out])
def create_nn_module_stack(
self, module_fqn: str
) -> OrderedDict[int, tuple[str, type]]:
cur_name = ""
nn_module_stack = OrderedDict()
for atom in module_fqn.split("."):
if cur_name == "":
cur_name = atom
else:
cur_name = cur_name + "." + atom
if cur_name not in self.nn_module_stack_key:
id_counter = self.latest_id
self.latest_id += 1
self.nn_module_stack_key[cur_name] = id_counter
else:
id_counter = self.nn_module_stack_key[cur_name]
nn_module_stack[id_counter] = (cur_name, torch.nn.Module)
return nn_module_stack
def create_input_specs(self):
input_specs = []
for node in self.graph.nodes:
if node.op == "placeholder":
input_specs.append(
InputSpec(
kind=self.input_to_kind[node],
arg=TensorArgument(name=node.name),
target=None,
persistent=(
True
if self.input_to_kind[node] == InputKind.BUFFER
else None
),
)
)
return input_specs
# NOTE: does not handle non-user-outputs atm
def gen_graph_signature(self) -> ExportGraphSignature:
output = [n for n in self.graph.nodes if n.op == "output"]
assert len(output) == 1
output = output[0]
assert len(output.args) == 1, "multiple outputs NYI"
return ExportGraphSignature(
input_specs=self.create_input_specs(),
output_specs=[
OutputSpec(
kind=OutputKind.USER_OUTPUT,
arg=TensorArgument(name=n.name),
target=None,
)
for n in output.args
],
)
| GraphBuilder |
python | scikit-learn__scikit-learn | asv_benchmarks/benchmarks/common.py | {
"start": 2270,
"end": 3064
} | class ____(ABC):
"""Abstract base class for all the benchmarks"""
timer = timeit.default_timer # wall time
processes = 1
timeout = 500
(
profile,
n_jobs_vals,
save_estimators,
save_dir,
base_commit,
bench_predict,
bench_transform,
) = get_from_config()
if profile == "fast":
warmup_time = 0
repeat = 1
number = 1
min_run_count = 1
data_size = "small"
elif profile == "regular":
warmup_time = 1
repeat = (3, 100, 30)
data_size = "small"
elif profile == "large_scale":
warmup_time = 1
repeat = 3
number = 1
data_size = "large"
@property
@abstractmethod
def params(self):
pass
| Benchmark |
python | sanic-org__sanic | sanic/logging/formatter.py | {
"start": 7027,
"end": 7846
} | class ____(AutoFormatter):
MESSAGE_FORMAT = (
f"{c.PURPLE}%(host)s "
f"{c.BLUE + c.BOLD}%(request)s{c.END} "
f"%(right)s%(status)s %(byte)s {c.GREY}%(duration)s{c.END}"
)
def format(self, record: logging.LogRecord) -> str:
status = len(str(getattr(record, "status", "")))
byte = len(str(getattr(record, "byte", "")))
duration = len(str(getattr(record, "duration", "")))
record.right = (
CONTROL_LIMIT_END.format(right=status + byte + duration + 1)
if self.ATTY
else ""
)
return super().format(record)
def _set_levelname(self, record: logging.LogRecord) -> None:
if self.ATTY and record.levelno == logging.INFO:
record.levelname = f"{c.SANIC}ACCESS{c.END}"
| AutoAccessFormatter |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.