language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/protocol48.py | {
"start": 156,
"end": 281
} | class ____(Protocol[T]):
def method1(self) -> T: ...
def apply_method1(__x: SupportsMethod1[T]) -> T: ...
| SupportsMethod1 |
python | spack__spack | lib/spack/spack/util/web.py | {
"start": 30694,
"end": 30963
} | class ____(SpackWebError):
"""Raised when an operation can't get an internet connection."""
def __init__(self, message, url):
super().__init__("No network connection: " + str(message), "URL was: " + str(url))
self.url = url
| NoNetworkConnectionError |
python | pytorch__pytorch | test/jit/test_models.py | {
"start": 886,
"end": 1518
} | class ____(nn.Module):
def __init__(self) -> None:
super().__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.reshape(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
| MnistNet |
python | sympy__sympy | sympy/polys/domains/pythonintegerring.py | {
"start": 428,
"end": 3007
} | class ____(IntegerRing):
"""Integer ring based on Python's ``int`` type.
This will be used as :ref:`ZZ` if ``gmpy`` and ``gmpy2`` are not
installed. Elements are instances of the standard Python ``int`` type.
"""
dtype = PythonInteger # type: ignore
zero = dtype(0) # type: ignore
one = dtype(1) # type: ignore
alias = 'ZZ_python'
def __init__(self):
"""Allow instantiation of this domain. """
def to_sympy(self, a):
"""Convert ``a`` to a SymPy object. """
return SymPyInteger(a)
def from_sympy(self, a):
"""Convert SymPy's Integer to ``dtype``. """
if a.is_Integer:
return PythonInteger(a.p)
elif int_valued(a):
return PythonInteger(int(a))
else:
raise CoercionFailed("expected an integer, got %s" % a)
def from_FF_python(K1, a, K0):
"""Convert ``ModularInteger(int)`` to Python's ``int``. """
return K0.to_int(a)
def from_ZZ_python(K1, a, K0):
"""Convert Python's ``int`` to Python's ``int``. """
return a
def from_QQ(K1, a, K0):
"""Convert Python's ``Fraction`` to Python's ``int``. """
if a.denominator == 1:
return a.numerator
def from_QQ_python(K1, a, K0):
"""Convert Python's ``Fraction`` to Python's ``int``. """
if a.denominator == 1:
return a.numerator
def from_FF_gmpy(K1, a, K0):
"""Convert ``ModularInteger(mpz)`` to Python's ``int``. """
return PythonInteger(K0.to_int(a))
def from_ZZ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpz`` to Python's ``int``. """
return PythonInteger(a)
def from_QQ_gmpy(K1, a, K0):
"""Convert GMPY's ``mpq`` to Python's ``int``. """
if a.denom() == 1:
return PythonInteger(a.numer())
def from_RealField(K1, a, K0):
"""Convert mpmath's ``mpf`` to Python's ``int``. """
p, q = K0.to_rational(a)
if q == 1:
return PythonInteger(p)
def gcdex(self, a, b):
"""Compute extended GCD of ``a`` and ``b``. """
return python_gcdex(a, b)
def gcd(self, a, b):
"""Compute GCD of ``a`` and ``b``. """
return python_gcd(a, b)
def lcm(self, a, b):
"""Compute LCM of ``a`` and ``b``. """
return python_lcm(a, b)
def sqrt(self, a):
"""Compute square root of ``a``. """
return python_sqrt(a)
def factorial(self, a):
"""Compute factorial of ``a``. """
return python_factorial(a)
| PythonIntegerRing |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 485159,
"end": 485690
} | class ____(sgqlc.types.Type):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("cursor", "has_two_factor_enabled", "node", "role")
cursor = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="cursor")
has_two_factor_enabled = sgqlc.types.Field(
Boolean, graphql_name="hasTwoFactorEnabled"
)
node = sgqlc.types.Field("User", graphql_name="node")
role = sgqlc.types.Field(OrganizationMemberRole, graphql_name="role")
| OrganizationMemberEdge |
python | Pylons__pyramid | tests/test_config/test_testing.py | {
"start": 8321,
"end": 8505
} | class ____(SecurityAPIMixin, AuthenticationAPIMixin):
def __init__(self, environ=None):
if environ is None:
environ = {}
self.environ = environ
| DummyRequest |
python | sqlalchemy__sqlalchemy | test/orm/test_froms.py | {
"start": 4680,
"end": 11278
} | class ____(QueryTest, AssertsCompiledSQL):
__dialect__ = "default"
query_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
query_not_correlated = (
"SELECT users.name AS users_name, "
"(SELECT count(addresses.id) AS count_1 FROM addresses, users "
"WHERE addresses.user_id = users.id) AS anon_1 FROM users"
)
def test_scalar_subquery_select_auto_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_explicit_correlate(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.correlate(users)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_select_correlate_off(self):
addresses, users = self.tables.addresses, self.tables.users
query = (
select(func.count(addresses.c.id))
.where(addresses.c.user_id == users.c.id)
.correlate(None)
.scalar_subquery()
)
query = select(users.c.name.label("users_name"), query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_auto_correlate(self):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
def test_scalar_subquery_query_explicit_correlate(self):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(self.tables.users)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_correlated, dialect=default.DefaultDialect()
)
@testing.combinations(False, None)
def test_scalar_subquery_query_correlate_off(self, value):
sess = fixture_session()
Address, User = self.classes.Address, self.classes.User
query = (
sess.query(func.count(Address.id))
.filter(Address.user_id == User.id)
.correlate(value)
.scalar_subquery()
)
query = sess.query(User.name, query)
self.assert_compile(
query, self.query_not_correlated, dialect=default.DefaultDialect()
)
def test_correlate_to_union(self):
User = self.classes.User
sess = fixture_session()
q = sess.query(User)
q = sess.query(User).union(q)
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > User.id)
orm_subq = sess.query(u_alias).filter(u_alias.id > User.id).exists()
self.assert_compile(
q.add_columns(raw_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT * FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
# only difference is "1" vs. "*" (not sure why that is)
self.assert_compile(
q.add_columns(orm_subq),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT 1 FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
def test_correlate_to_union_w_labels_newstyle(self):
User = self.classes.User
q = select(User).set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
q = (
select(User)
.set_label_style(LABEL_STYLE_TABLENAME_PLUS_COL)
.union(q)
.subquery()
)
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > q.c[0])
self.assert_compile(
select(q, raw_subq).set_label_style(
LABEL_STYLE_TABLENAME_PLUS_COL
),
"SELECT anon_1.users_id AS anon_1_users_id, "
"anon_1.users_name AS anon_1_users_name, "
"EXISTS (SELECT * FROM users AS users_1 "
"WHERE users_1.id > anon_1.users_id) AS anon_2 "
"FROM ("
"SELECT users.id AS users_id, users.name AS users_name FROM users "
"UNION SELECT users.id AS users_id, users.name AS users_name "
"FROM users) AS anon_1",
)
def test_correlate_to_union_newstyle(self):
User = self.classes.User
q = select(User)
q = select(User).union(q).subquery()
u_alias = aliased(User)
raw_subq = exists().where(u_alias.id > q.c[0])
self.assert_compile(
select(q, raw_subq),
"SELECT anon_1.id, anon_1.name, EXISTS "
"(SELECT * FROM users AS users_1 WHERE users_1.id > anon_1.id) "
"AS anon_2 FROM (SELECT users.id AS id, users.name AS name "
"FROM users "
"UNION SELECT users.id AS id, users.name AS name FROM users) "
"AS anon_1",
)
| QueryCorrelatesLikeSelect |
python | anthropics__anthropic-sdk-python | src/anthropic/_exceptions.py | {
"start": 3611,
"end": 3751
} | class ____(APIStatusError):
status_code: Literal[503] = 503 # pyright: ignore[reportIncompatibleVariableOverride]
| ServiceUnavailableError |
python | kamyu104__LeetCode-Solutions | Python/number-of-1-bits.py | {
"start": 1976,
"end": 2175
} | class ____(object):
# @param n, an integer
# @return an integer
def hammingWeight(self, n: int) -> int:
b="{0:b}".format(n)
result=b.count("1")
return result
| Solution4 |
python | getsentry__sentry | src/sentry/apidocs/examples/autofix_examples.py | {
"start": 7784,
"end": 7905
} | class ____:
AUTOFIX_POST_RESPONSE = AUTOFIX_POST_RESPONSE
AUTOFIX_GET_RESPONSE = AUTOFIX_GET_RESPONSE
| AutofixExamples |
python | cython__cython | Cython/Compiler/Code.py | {
"start": 139857,
"end": 143761
} | class ____:
"""
Can be used for writing out some Cython code.
"""
def __init__(self, buffer=None, indent_level=0, context=None, encoding='ascii'):
self.buffer = buffer or StringIOTree()
self.level = indent_level
self.original_level = indent_level
self.context = context
self.encoding = encoding
self._insertion_points = {}
def indent(self, levels=1):
self.level += levels
return True
def dedent(self, levels=1):
self.level -= levels
@contextmanager
def indenter(self, line):
"""
with pyx_code.indenter("for i in range(10):"):
pyx_code.putln("print i")
"""
self.putln(line)
self.indent()
yield
self.dedent()
def empty(self):
return self.buffer.empty()
def getvalue(self):
result = self.buffer.getvalue()
if isinstance(result, bytes):
result = result.decode(self.encoding)
return result
def putln(self, line, context=None):
if context is None:
if self.context is not None:
context = self.context
if context is not None:
line = sub_tempita(line, context)
# Avoid indenting empty lines.
self.buffer.write(f"{self.level * ' '}{line}\n" if line else "\n")
def put_chunk(self, chunk, context=None):
if context is None:
if self.context is not None:
context = self.context
if context is not None:
chunk = sub_tempita(chunk, context)
chunk = _indent_chunk(chunk, self.level * 4)
self.buffer.write(chunk)
def insertion_point(self):
return type(self)(self.buffer.insertion_point(), self.level, self.context)
def reset(self):
# resets the buffer so that nothing gets written. Most useful
# for abandoning all work in a specific insertion point
self.buffer.reset()
self.level = self.original_level
def named_insertion_point(self, name):
self._insertion_points[name] = self.insertion_point()
def __getitem__(self, name):
return self._insertion_points[name]
@cython.final
@cython.ccall
def _indent_chunk(chunk: str, indentation_length: cython.int) -> str:
"""Normalise leading space to the intended indentation and strip empty lines.
"""
assert '\t' not in chunk
lines = chunk.splitlines(keepends=True)
if not lines:
return chunk
last_line = lines[-1].rstrip(' ')
if last_line:
lines[-1] = last_line
else:
del lines[-1]
if not lines:
return '\n'
# Count minimal (non-empty) indentation and strip empty lines.
min_indentation: cython.int = len(chunk) + 1
line_indentation: cython.int
line: str
i: cython.int
for i, line in enumerate(lines):
line_indentation = _count_indentation(line)
if line_indentation + 1 == len(line):
lines[i] = '\n'
elif line_indentation < min_indentation:
min_indentation = line_indentation
if min_indentation > len(chunk):
# All empty lines.
min_indentation = 0
if min_indentation < indentation_length:
add_indent = ' ' * (indentation_length - min_indentation)
lines = [
add_indent + line if line != '\n' else '\n'
for line in lines
]
elif min_indentation > indentation_length:
start: cython.int = min_indentation - indentation_length
lines = [
line[start:] if line != '\n' else '\n'
for line in lines
]
return ''.join(lines)
@cython.exceptval(-1)
@cython.cfunc
def _count_indentation(s: str) -> cython.int:
i: cython.int = 0
ch: cython.Py_UCS4
for i, ch in enumerate(s):
if ch != ' ':
break
return i
| PyxCodeWriter |
python | doocs__leetcode | solution/2000-2099/2073.Time Needed to Buy Tickets/Solution.py | {
"start": 0,
"end": 227
} | class ____:
def timeRequiredToBuy(self, tickets: List[int], k: int) -> int:
ans = 0
for i, x in enumerate(tickets):
ans += min(x, tickets[k] if i <= k else tickets[k] - 1)
return ans
| Solution |
python | pyqtgraph__pyqtgraph | pyqtgraph/util/mutex.py | {
"start": 44,
"end": 3266
} | class ____(QtCore.QMutex):
"""
Subclass of QMutex that provides useful debugging information during
deadlocks--tracebacks are printed for both the code location that is
attempting to lock the mutex as well as the location that has already
acquired the lock.
Also provides __enter__ and __exit__ methods for use in "with" statements.
"""
def __init__(self, *args, **kargs):
if kargs.get('recursive', False):
args = (QtCore.QMutex.Recursive,)
QtCore.QMutex.__init__(self, *args)
self.l = QtCore.QMutex() ## for serializing access to self.tb
self.tb = []
self.debug = kargs.pop('debug', False) ## True to enable debugging functions
def tryLock(self, timeout=None, id=None):
if timeout is None:
locked = QtCore.QMutex.tryLock(self)
else:
locked = QtCore.QMutex.tryLock(self, timeout)
if self.debug and locked:
self.l.lock()
try:
if id is None:
self.tb.append(''.join(traceback.format_stack()[:-1]))
else:
self.tb.append(" " + str(id))
#print 'trylock', self, len(self.tb)
finally:
self.l.unlock()
return locked
def lock(self, id=None):
c = 0
waitTime = 5000 # in ms
while True:
if self.tryLock(waitTime, id):
break
c += 1
if self.debug:
self.l.lock()
try:
print("Waiting for mutex lock (%0.1f sec). Traceback follows:"
% (c*waitTime/1000.))
traceback.print_stack()
if len(self.tb) > 0:
print("Mutex is currently locked from:\n")
print(self.tb[-1])
else:
print("Mutex is currently locked from [???]")
finally:
self.l.unlock()
#print 'lock', self, len(self.tb)
def unlock(self):
QtCore.QMutex.unlock(self)
if self.debug:
self.l.lock()
try:
#print 'unlock', self, len(self.tb)
if len(self.tb) > 0:
self.tb.pop()
else:
raise Exception("Attempt to unlock mutex before it has been locked")
finally:
self.l.unlock()
def acquire(self, blocking=True):
"""Mimics threading.Lock.acquire() to allow this class as a drop-in replacement.
"""
return self.tryLock()
def release(self):
"""Mimics threading.Lock.release() to allow this class as a drop-in replacement.
"""
self.unlock()
def depth(self):
self.l.lock()
n = len(self.tb)
self.l.unlock()
return n
def traceback(self):
self.l.lock()
try:
ret = self.tb[:]
finally:
self.l.unlock()
return ret
def __exit__(self, *args):
self.unlock()
def __enter__(self):
self.lock()
return self
| Mutex |
python | django__django | django/core/serializers/base.py | {
"start": 1786,
"end": 6504
} | class ____:
"""
Abstract serializer base class.
"""
# Indicates if the implemented serializer is only available for
# internal Django use.
internal_use_only = False
progress_class = ProgressBar
stream_class = StringIO
def serialize(
self,
queryset,
*,
stream=None,
fields=None,
use_natural_foreign_keys=False,
use_natural_primary_keys=False,
progress_output=None,
object_count=0,
**options,
):
"""
Serialize a queryset.
"""
self.options = options
self.stream = stream if stream is not None else self.stream_class()
self.selected_fields = fields
self.use_natural_foreign_keys = use_natural_foreign_keys
self.use_natural_primary_keys = use_natural_primary_keys
progress_bar = self.progress_class(progress_output, object_count)
self.start_serialization()
self.first = True
for count, obj in enumerate(queryset, start=1):
self.start_object(obj)
# Use the concrete parent class' _meta instead of the object's
# _meta This is to avoid local_fields problems for proxy models.
# Refs #17717.
concrete_model = obj._meta.concrete_model
# When using natural primary keys, retrieve the pk field of the
# parent for multi-table inheritance child models. That field must
# be serialized, otherwise deserialization isn't possible.
if self.use_natural_primary_keys:
pk = concrete_model._meta.pk
pk_parent = (
pk if pk.remote_field and pk.remote_field.parent_link else None
)
else:
pk_parent = None
for field in concrete_model._meta.local_fields:
if field.serialize or field is pk_parent:
if field.remote_field is None:
if (
self.selected_fields is None
or field.attname in self.selected_fields
):
self.handle_field(obj, field)
else:
if (
self.selected_fields is None
or field.attname[:-3] in self.selected_fields
):
self.handle_fk_field(obj, field)
for field in concrete_model._meta.local_many_to_many:
if field.serialize:
if (
self.selected_fields is None
or field.attname in self.selected_fields
):
self.handle_m2m_field(obj, field)
self.end_object(obj)
progress_bar.update(count)
self.first = self.first and False
self.end_serialization()
return self.getvalue()
def start_serialization(self):
"""
Called when serializing of the queryset starts.
"""
raise NotImplementedError(
"subclasses of Serializer must provide a start_serialization() method"
)
def end_serialization(self):
"""
Called when serializing of the queryset ends.
"""
pass
def start_object(self, obj):
"""
Called when serializing of an object starts.
"""
raise NotImplementedError(
"subclasses of Serializer must provide a start_object() method"
)
def end_object(self, obj):
"""
Called when serializing of an object ends.
"""
pass
def handle_field(self, obj, field):
"""
Called to handle each individual (non-relational) field on an object.
"""
raise NotImplementedError(
"subclasses of Serializer must provide a handle_field() method"
)
def handle_fk_field(self, obj, field):
"""
Called to handle a ForeignKey field.
"""
raise NotImplementedError(
"subclasses of Serializer must provide a handle_fk_field() method"
)
def handle_m2m_field(self, obj, field):
"""
Called to handle a ManyToManyField.
"""
raise NotImplementedError(
"subclasses of Serializer must provide a handle_m2m_field() method"
)
def getvalue(self):
"""
Return the fully serialized queryset (or None if the output stream is
not seekable).
"""
if callable(getattr(self.stream, "getvalue", None)):
return self.stream.getvalue()
| Serializer |
python | cython__cython | docs/examples/userguide/extension_types/penguin2.py | {
"start": 50,
"end": 247
} | class ____:
food: object
def __cinit__(self, food):
self.food = food
penguin = Penguin('fish 1')
penguin = None
penguin = Penguin('fish 2') # does not need to allocate memory!
| Penguin |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 709381,
"end": 713505
} | class ____(sgqlc.types.Type, Node):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = (
"app",
"branch",
"check_runs",
"commit",
"conclusion",
"created_at",
"creator",
"database_id",
"matching_pull_requests",
"push",
"repository",
"resource_path",
"status",
"updated_at",
"url",
"workflow_run",
)
app = sgqlc.types.Field(App, graphql_name="app")
branch = sgqlc.types.Field("Ref", graphql_name="branch")
check_runs = sgqlc.types.Field(
CheckRunConnection,
graphql_name="checkRuns",
args=sgqlc.types.ArgDict(
(
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
(
"filter_by",
sgqlc.types.Arg(
CheckRunFilter, graphql_name="filterBy", default=None
),
),
)
),
)
commit = sgqlc.types.Field(sgqlc.types.non_null("Commit"), graphql_name="commit")
conclusion = sgqlc.types.Field(CheckConclusionState, graphql_name="conclusion")
created_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="createdAt"
)
creator = sgqlc.types.Field("User", graphql_name="creator")
database_id = sgqlc.types.Field(Int, graphql_name="databaseId")
matching_pull_requests = sgqlc.types.Field(
PullRequestConnection,
graphql_name="matchingPullRequests",
args=sgqlc.types.ArgDict(
(
(
"states",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(PullRequestState)),
graphql_name="states",
default=None,
),
),
(
"labels",
sgqlc.types.Arg(
sgqlc.types.list_of(sgqlc.types.non_null(String)),
graphql_name="labels",
default=None,
),
),
(
"head_ref_name",
sgqlc.types.Arg(String, graphql_name="headRefName", default=None),
),
(
"base_ref_name",
sgqlc.types.Arg(String, graphql_name="baseRefName", default=None),
),
(
"order_by",
sgqlc.types.Arg(IssueOrder, graphql_name="orderBy", default=None),
),
("after", sgqlc.types.Arg(String, graphql_name="after", default=None)),
(
"before",
sgqlc.types.Arg(String, graphql_name="before", default=None),
),
("first", sgqlc.types.Arg(Int, graphql_name="first", default=None)),
("last", sgqlc.types.Arg(Int, graphql_name="last", default=None)),
)
),
)
push = sgqlc.types.Field("Push", graphql_name="push")
repository = sgqlc.types.Field(
sgqlc.types.non_null("Repository"), graphql_name="repository"
)
resource_path = sgqlc.types.Field(
sgqlc.types.non_null(URI), graphql_name="resourcePath"
)
status = sgqlc.types.Field(
sgqlc.types.non_null(CheckStatusState), graphql_name="status"
)
updated_at = sgqlc.types.Field(
sgqlc.types.non_null(DateTime), graphql_name="updatedAt"
)
url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="url")
workflow_run = sgqlc.types.Field("WorkflowRun", graphql_name="workflowRun")
| CheckSuite |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chartsheet05.py | {
"start": 315,
"end": 1431
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chartsheet05.xlsx")
def test_create_file(self):
"""Test the worksheet properties of an XlsxWriter chartsheet file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chartsheet = workbook.add_chartsheet()
chart = workbook.add_chart({"type": "bar"})
chart.axis_ids = [43695104, 43787008]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chartsheet.set_zoom(75)
chartsheet.set_chart(chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | RaRe-Technologies__gensim | gensim/test/test_miislita.py | {
"start": 1474,
"end": 3675
} | class ____(unittest.TestCase):
def test_textcorpus(self):
"""Make sure TextCorpus can be serialized to disk. """
# construct corpus from file
miislita = CorpusMiislita(datapath('head500.noblanks.cor.bz2'))
# make sure serializing works
ftmp = get_tmpfile('test_textcorpus.mm')
corpora.MmCorpus.save_corpus(ftmp, miislita)
self.assertTrue(os.path.exists(ftmp))
# make sure deserializing gives the same result
miislita2 = corpora.MmCorpus(ftmp)
self.assertEqual(list(miislita), list(miislita2))
def test_save_load_ability(self):
"""
Make sure we can save and load (un/pickle) TextCorpus objects (as long
as the underlying input isn't a file-like object; we cannot pickle those).
"""
# construct corpus from file
corpusname = datapath('miIslita.cor')
miislita = CorpusMiislita(corpusname)
# pickle to disk
tmpf = get_tmpfile('tc_test.cpickle')
miislita.save(tmpf)
miislita2 = CorpusMiislita.load(tmpf)
self.assertEqual(len(miislita), len(miislita2))
self.assertEqual(miislita.dictionary.token2id, miislita2.dictionary.token2id)
def test_miislita_high_level(self):
# construct corpus from file
miislita = CorpusMiislita(datapath('miIslita.cor'))
# initialize tfidf transformation and similarity index
tfidf = models.TfidfModel(miislita, miislita.dictionary, normalize=False)
index = similarities.SparseMatrixSimilarity(tfidf[miislita], num_features=len(miislita.dictionary))
# compare to query
query = 'latent semantic indexing'
vec_bow = miislita.dictionary.doc2bow(query.lower().split())
vec_tfidf = tfidf[vec_bow]
# perform a similarity query against the corpus
sims_tfidf = index[vec_tfidf]
# for the expected results see the article
expected = [0.0, 0.2560, 0.7022, 0.1524, 0.3334]
for i, value in enumerate(expected):
self.assertAlmostEqual(sims_tfidf[i], value, 2)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
| TestMiislita |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-confluence/tests/test_readers_confluence.py | {
"start": 438,
"end": 11481
} | class ____:
def __init__(self, *args, **kwargs) -> None:
pass
@pytest.fixture(autouse=True)
def mock_atlassian_confluence(monkeypatch):
monkeypatch.setattr("atlassian.Confluence", MockConfluence)
def test_confluence_reader_with_oauth2():
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
oauth2={
"client_id": "example_client_id",
"token": {"access_token": "example_token", "token_type": "Bearer"},
},
)
assert reader.confluence is not None
def test_confluence_reader_with_api_token():
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
)
assert reader.confluence is not None
def test_confluence_reader_with_cookies():
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
cookies={"key": "value"},
)
assert reader.confluence is not None
def test_confluence_reader_with_client_args():
with patch("atlassian.Confluence") as MockConstructor:
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
client_args={"backoff_and_retry": True},
)
assert reader.confluence is not None
MockConstructor.assert_called_once_with(
url="https://example.atlassian.net/wiki",
token="example_api_token",
cloud=True,
backoff_and_retry=True,
)
def test_confluence_reader_with_basic_auth():
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
user_name="example_user",
password="example_password",
)
assert reader.confluence is not None
def test_confluence_reader_with_env_api_token(monkeypatch):
monkeypatch.setenv("CONFLUENCE_API_TOKEN", "env_api_token")
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
)
assert reader.confluence is not None
monkeypatch.delenv("CONFLUENCE_API_TOKEN")
def test_confluence_reader_with_env_basic_auth(monkeypatch):
monkeypatch.setenv("CONFLUENCE_USERNAME", "env_user")
monkeypatch.setenv("CONFLUENCE_PASSWORD", "env_password")
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
)
assert reader.confluence is not None
monkeypatch.delenv("CONFLUENCE_USERNAME")
monkeypatch.delenv("CONFLUENCE_PASSWORD")
def test_confluence_reader_without_credentials():
with pytest.raises(ValueError) as excinfo:
ConfluenceReader(base_url="https://example.atlassian.net/wiki")
assert "Must set one of environment variables" in str(excinfo.value)
def test_confluence_reader_with_incomplete_basic_auth():
with pytest.raises(ValueError) as excinfo:
ConfluenceReader(
base_url="https://example.atlassian.net/wiki", user_name="example_user"
)
assert "Must set one of environment variables" in str(excinfo.value)
with pytest.raises(ValueError) as excinfo:
ConfluenceReader(
base_url="https://example.atlassian.net/wiki", password="example_password"
)
assert "Must set one of environment variables" in str(excinfo.value)
# Test new features
def test_confluence_reader_with_custom_folder_without_parsers():
"""Test that custom_folder raises error when used without custom_parsers."""
with pytest.raises(ValueError) as excinfo:
ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
custom_folder="/tmp/test",
)
assert "custom_folder can only be used when custom_parsers are provided" in str(
excinfo.value
)
def test_confluence_reader_with_custom_parsers_and_folder():
"""Test that custom_parsers and custom_folder work together."""
mock_parser = MagicMock(spec=BaseReader)
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
custom_parsers=custom_parsers,
custom_folder="/tmp/test",
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == "/tmp/test"
assert reader.custom_parser_manager is not None
def test_confluence_reader_with_custom_parsers_default_folder():
"""Test that custom_parsers uses default folder when custom_folder not specified."""
import os
mock_parser = MagicMock(spec=BaseReader)
custom_parsers = {FileType.PDF: mock_parser}
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
custom_parsers=custom_parsers,
)
assert reader.custom_parsers == custom_parsers
assert reader.custom_folder == os.getcwd()
assert reader.custom_parser_manager is not None
def test_confluence_reader_callbacks():
"""Test that callbacks are properly stored and can be used."""
def attachment_callback(
media_type: str, file_size: int, title: str
) -> tuple[bool, str]:
if file_size > 1000000: # 1MB
return False, "File too large"
return True, ""
def document_callback(page_id: str) -> bool:
return page_id != "excluded_page"
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
process_attachment_callback=attachment_callback,
process_document_callback=document_callback,
)
assert reader.process_attachment_callback == attachment_callback
assert reader.process_document_callback == document_callback
# Test callback functionality
should_process, reason = reader.process_attachment_callback(
"application/pdf", 2000000, "large_file.pdf"
)
assert should_process is False
assert reason == "File too large"
should_process, reason = reader.process_attachment_callback(
"application/pdf", 500000, "small_file.pdf"
)
assert should_process is True
assert reason == ""
should_process = reader.process_document_callback("normal_page")
assert should_process is True
should_process = reader.process_document_callback("excluded_page")
assert should_process is False
def test_confluence_reader_event_system():
"""Test that the new event system works correctly."""
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
)
# Test event handling
events_received = []
class TestEventHandler(BaseEventHandler):
def handle(self, event):
events_received.append(event)
class PageEventHandler(BaseEventHandler):
def handle(self, event):
if isinstance(event, PageDataFetchStartedEvent):
events_received.append(f"PAGE: {event.page_id}")
# Add event handlers to dispatcher
dispatcher = get_dispatcher("llama_index.readers.confluence.base")
test_handler = TestEventHandler()
page_handler = PageEventHandler()
dispatcher.add_event_handler(test_handler)
dispatcher.add_event_handler(page_handler)
# Create and emit events manually to test the system
page_event = PageDataFetchStartedEvent(page_id="test_page")
attachment_event = AttachmentProcessedEvent(
page_id="test_page",
attachment_id="att_123",
attachment_name="test.pdf",
attachment_type=FileType.PDF,
attachment_size=1000,
attachment_link="http://example.com/att_123",
)
dispatcher.event(page_event)
dispatcher.event(attachment_event)
# Check that events were received
assert len(events_received) == 3 # page_handler + 2 from test_handler
assert "PAGE: test_page" in events_received
assert any(
isinstance(event, PageDataFetchStartedEvent) for event in events_received
)
assert any(isinstance(event, AttachmentProcessedEvent) for event in events_received)
# Clean up handlers
for handler in [test_handler, page_handler]:
if handler in dispatcher.event_handlers:
dispatcher.event_handlers.remove(handler)
def test_confluence_reader_fail_on_error_setting():
"""Test that fail_on_error setting is properly stored."""
# Test default (True)
reader1 = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
)
assert reader1.fail_on_error is True
# Test explicit False
reader2 = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
fail_on_error=False,
)
assert reader2.fail_on_error is False
# Test explicit True
reader3 = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
fail_on_error=True,
)
assert reader3.fail_on_error is True
@patch("llama_index.readers.confluence.html_parser.HtmlTextParser")
def test_confluence_reader_process_page_with_callbacks(mock_html_parser_class):
"""Test that callbacks are properly used during page processing."""
mock_text_maker = MagicMock()
mock_text_maker.convert.return_value = "processed text"
mock_html_parser_class.return_value = mock_text_maker
# Mock the confluence API
mock_confluence = MagicMock()
def document_callback(page_id: str) -> bool:
return page_id != "skip_this_page"
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
process_document_callback=document_callback,
)
reader.confluence = mock_confluence
# Test page that should be processed
page_data = {
"id": "normal_page",
"title": "Test Page",
"status": "current",
"body": {"export_view": {"value": "<p>Test content</p>"}},
"_links": {"webui": "/pages/123"},
}
result = reader.process_page(page_data, False, mock_text_maker)
assert result is not None
assert result.doc_id == "normal_page"
assert result.metadata["title"] == "Test Page"
# Test page that should be skipped
page_data_skip = {
"id": "skip_this_page",
"title": "Skip This Page",
"status": "current",
"body": {"export_view": {"value": "<p>Skip content</p>"}},
"_links": {"webui": "/pages/456"},
}
result_skip = reader.process_page(page_data_skip, False, mock_text_maker)
assert result_skip is None
def test_confluence_reader_logger_setting():
"""Test that custom logger is properly stored."""
import logging
custom_logger = logging.getLogger("test_logger")
reader = ConfluenceReader(
base_url="https://example.atlassian.net/wiki",
api_token="example_api_token",
logger=custom_logger,
)
assert reader.logger == custom_logger
| MockConfluence |
python | numba__numba | numba/core/types/iterators.py | {
"start": 2326,
"end": 2912
} | class ____(SimpleIteratorType):
"""
Type class for `zip` objects.
Type instances are parametered with the underlying source types.
"""
def __init__(self, iterable_types):
from numba.core.types import Tuple
self.source_types = tuple(tp.iterator_type for tp in iterable_types)
yield_type = Tuple([tp.yield_type for tp in self.source_types])
name = 'zip(%s)' % ', '.join(str(tp) for tp in self.source_types)
super(ZipType, self).__init__(name, yield_type)
@property
def key(self):
return self.source_types
| ZipType |
python | pytorch__pytorch | torch/_export/db/examples/cond_operands.py | {
"start": 136,
"end": 799
} | class ____(torch.nn.Module):
"""
The operands passed to cond() must be:
- a list of tensors
- match arguments of `true_fn` and `false_fn`
NOTE: If the `pred` is test on a dim with batch size < 2, it will be specialized.
"""
def forward(self, x, y):
def true_fn(x, y):
return x + y
def false_fn(x, y):
return x - y
return torch.cond(x.shape[0] > 2, true_fn, false_fn, [x, y])
example_args = (x, y)
tags = {
"torch.cond",
"torch.dynamic-shape",
}
extra_inputs = (torch.randn(2, 2), torch.randn(2))
dynamic_shapes = {"x": {0: dim0_x}, "y": None}
model = CondOperands()
| CondOperands |
python | django-haystack__django-haystack | test_haystack/test_loading.py | {
"start": 7090,
"end": 7234
} | class ____(indexes.BasicSearchIndex, indexes.Indexable):
def get_model(self):
return AnotherMockModel
| BasicAnotherMockModelSearchIndex |
python | python-pillow__Pillow | src/PIL/MicImagePlugin.py | {
"start": 662,
"end": 2564
} | class ____(TiffImagePlugin.TiffImageFile):
format = "MIC"
format_description = "Microsoft Image Composer"
_close_exclusive_fp_after_loading = False
def _open(self) -> None:
# read the OLE directory and see if this is a likely
# to be a Microsoft Image Composer file
try:
self.ole = olefile.OleFileIO(self.fp)
except OSError as e:
msg = "not an MIC file; invalid OLE file"
raise SyntaxError(msg) from e
# find ACI subfiles with Image members (maybe not the
# best way to identify MIC files, but what the... ;-)
self.images = [
path
for path in self.ole.listdir()
if path[1:] and path[0].endswith(".ACI") and path[1] == "Image"
]
# if we didn't find any images, this is probably not
# an MIC file.
if not self.images:
msg = "not an MIC file; no image entries"
raise SyntaxError(msg)
self.frame = -1
self._n_frames = len(self.images)
self.is_animated = self._n_frames > 1
self.__fp = self.fp
self.seek(0)
def seek(self, frame: int) -> None:
if not self._seek_check(frame):
return
filename = self.images[frame]
self.fp = self.ole.openstream(filename)
TiffImagePlugin.TiffImageFile._open(self)
self.frame = frame
def tell(self) -> int:
return self.frame
def close(self) -> None:
self.__fp.close()
self.ole.close()
super().close()
def __exit__(self, *args: object) -> None:
self.__fp.close()
self.ole.close()
super().__exit__()
#
# --------------------------------------------------------------------
Image.register_open(MicImageFile.format, MicImageFile, _accept)
Image.register_extension(MicImageFile.format, ".mic")
| MicImageFile |
python | davidhalter__jedi | jedi/inference/compiled/subprocess/functions.py | {
"start": 8283,
"end": 8464
} | class ____:
"""Stores information returned from an implicit namespace spec"""
def __init__(self, name, paths):
self.name = name
self.paths = paths
| ImplicitNSInfo |
python | nedbat__coveragepy | coverage/misc.py | {
"start": 4481,
"end": 6292
} | class ____:
"""Hashes Python data for fingerprinting."""
def __init__(self) -> None:
self.hash = hashlib.new("sha3_256", usedforsecurity=False)
def update(self, v: Any) -> None:
"""Add `v` to the hash, recursively if needed."""
self.hash.update(str(type(v)).encode("utf-8"))
match v:
case None:
pass
case str():
self.hash.update(v.encode("utf-8"))
case bytes():
self.hash.update(v)
case int() | float():
self.hash.update(str(v).encode("utf-8"))
case tuple() | list():
for e in v:
self.update(e)
case dict():
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
case _:
for k in dir(v):
if k.startswith("__"):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
self.hash.update(b".")
def hexdigest(self) -> str:
"""Retrieve the hex digest of the hash."""
return self.hash.hexdigest()[:32]
def _needs_to_implement(that: Any, func_name: str) -> NoReturn:
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = f"{klass.__module__}.{klass.__name__}"
raise NotImplementedError(
f"{thing} {name!r} needs to implement {func_name}()",
)
| Hasher |
python | ApeWorX__ape | tests/functional/utils/test_basemodel.py | {
"start": 1683,
"end": 2887
} | class ____:
@pytest.fixture(scope="class")
def ExampleModel(self):
class _ExampleModel(DiskCacheableModel):
aa: int
bb: str
cc: dict[str, dict[str, int]]
return _ExampleModel
def test_model_validate_file(self, ExampleModel):
with create_tempdir() as path:
file = path / "example.json"
json_str = '{"aa":123,"bb":"Hello Pydantic!","cc":{"1":{"2":3}}}'
file.write_text(json_str)
instance = ExampleModel.model_validate_file(file)
file.unlink()
assert instance.aa == 123
assert instance.bb == "Hello Pydantic!"
assert instance.cc == {"1": {"2": 3}}
# Show the path was already set.
assert instance._path == file
def test_model_dump_file(self, ExampleModel):
instance = ExampleModel(aa=123, bb="Hello Pydantic!", cc={"1": {"2": 3}})
expected = '{"aa":123,"bb":"Hello Pydantic!","cc":{"1":{"2":3}}}'
with create_tempdir() as path:
file = path / "example.json"
instance.model_dump_file(file)
actual = file.read_text()
assert actual == expected
| TestDiskCacheableModel |
python | facebook__pyre-check | source/interprocedural_analyses/taint/test/integration/decorator.py | {
"start": 3876,
"end": 8889
} | class ____:
def sink_method(self, x: str) -> None:
print(x)
_test_sink(x)
@with_logging_args_kwargs_no_sink
def foo(self, x: str) -> None:
self.sink_method(x)
@with_logging_args_kwargs_no_sink
@with_logging_args_kwargs
def bar(self, x: str) -> None:
print(x)
@with_logging_args_kwargs_no_sink
def self_has_generic_type(self: TFoo, other: TFoo, x: str) -> None:
other.bar(x=x) # Sink is on the keyword argument
@classmethod
@with_logging_args_kwargs_no_sink
def some_class_method(cls, x: str) -> None:
cls().sink_method(x)
async def main() -> None:
foo(_test_source())
foo_with_sink(_test_source())
await foo_async(_test_source())
foo_args_kwargs(x=_test_source()) # Sink is on the keyword argument
# No issue because the taint is on the second parameter.
foo_args_kwargs_with_sink(_test_source(), 0)
# Issue.
foo_args_kwargs_with_sink("hello", _test_source())
foo_with_shady_decorators("hello")
foo_using_decorator_factory(_test_source())
foo_log_first_parameter(_test_source(), "hello")
foo_with_helper_function(_test_source(), "hello")
Foo().foo(_test_source()) # Sink is on the 1st argument (False negative)
Foo().foo(x=_test_source()) # Expect an issue
Foo.foo(Foo(), _test_source()) # Expect an issue
Foo().bar(x=_test_source()) # Sink is on the keyword argument
# Sink is on the keyword argument
Foo().self_has_generic_type(other=Foo(), x=_test_source())
Foo.some_class_method(
_test_source()
) # Sink is on the 1st argument (False negative)
def discard_second_parameter_inner(first_parameter: int) -> None:
return
def discard_second_parameter_non_inlineable(
f: Callable[[int, str], None],
) -> Callable[[int], None]:
# Return a function not defined here, to prevent from inlining decorators
return discard_second_parameter_inner
@discard_second_parameter_non_inlineable
def second_parameter_source_with_non_inlineable_decorator(arg1: int, arg2: str) -> None:
_test_sink(arg2) # Issue here
def trivial_decorator(f: Callable[P, None]) -> Callable[P, None]:
def inner(*args: P.args, **kwargs: P.kwargs) -> None:
f(*args, **kwargs)
return inner
@trivial_decorator
def second_parameter_source_inlineable_decorator(arg1: int, arg2: str) -> None:
_test_sink(arg2) # Issue here
@trivial_decorator
def second_parameter_source_inlineable_decorator_with_inner(
arg1: int, arg2: str
) -> None:
def inner():
_test_sink(arg2)
inner() # Issue here
@trivial_decorator
def sink_via_trivial_decorator(x: str) -> None:
_test_sink(x)
def issue_via_trivial_decorator() -> None:
sink_via_trivial_decorator(_test_source())
def _strip_first_parameter_(
f: Callable[Concatenate[int, P], None],
) -> Callable[Concatenate[P], None]:
def inner(*args: P.args, **kwargs: P.kwargs) -> None:
f(0, *args, **kwargs)
return inner
@_strip_first_parameter_
def decorated(self, into_sink) -> None:
_test_sink(into_sink)
def using_decorated(into_decorated):
decorated(into_decorated)
T = TypeVar("T")
def no_op_decorator(f: T) -> T:
return f
@no_op_decorator
def sink_via_no_op_decorator(x: str) -> None:
_test_sink(x)
def issue_via_no_op_decorator() -> None:
sink_via_no_op_decorator(_test_source())
# pyre-ignore
def no_op_decorator_factory(flag: bool) -> Callable[[T], T]:
def inner(f: T) -> T:
f.__doc___ = "dummy doc"
return f
return inner
@no_op_decorator_factory(True)
def sink_via_no_op_decorator_factory(x: str) -> None:
_test_sink(x)
def issue_via_no_op_decorator_factory() -> None:
sink_via_no_op_decorator_factory(_test_source())
# pyre-ignore
def conditional_no_op_decorator_factory(flag: bool) -> Callable[[T], T]:
if flag:
def inner_true(f: T) -> T:
return f
return inner_true
else:
def inner_false(f: T) -> T:
return f
return inner_false
@conditional_no_op_decorator_factory(False)
def sink_via_conditional_no_op_decorator_factory(x: str) -> None:
_test_sink(x)
def issue_via_conditional_no_op_decorator_factory():
sink_via_conditional_no_op_decorator_factory(_test_source())
def conditional_decorator_factory(flag: bool) -> Callable[[Callable[[str], None]], Callable[[str], None]]:
if flag:
def add_sink(f: Callable[[str], None]) -> Callable[[str], None]:
def inner(x: str) -> None:
_test_sink(x)
f(x)
return inner
return add_sink
else:
def identity(f: Callable[[str], None]) -> Callable[[str], None]:
return f
return identity
@conditional_decorator_factory(True)
def sink_via_conditional_decorator_factory(x: str) -> None:
print(x)
def issue_via_conditional_decorator_factory():
sink_via_conditional_decorator_factory(_test_source())
| Foo |
python | apache__airflow | providers/slack/tests/unit/slack/operators/test_slack.py | {
"start": 7101,
"end": 11001
} | class ____:
def setup_method(self):
self.test_username = "test_username"
self.test_channel = "#test_slack_channel"
self.test_initial_comment = "test text file test_filename.txt"
self.filename = "test_filename.txt"
self.test_content = "This is a test text file!"
self.test_api_params = {"key": "value"}
self.expected_method = "files.upload"
self.test_snippet_type = "text"
def __construct_operator(self, test_slack_conn_id, test_api_params=None):
return SlackAPIFileOperator(
task_id="slack",
slack_conn_id=test_slack_conn_id,
channels=self.test_channel,
initial_comment=self.test_initial_comment,
filename=self.filename,
content=self.test_content,
api_params=test_api_params,
snippet_type=self.test_snippet_type,
)
def test_init_with_valid_params(self):
slack_api_post_operator = self.__construct_operator(
SLACK_API_TEST_CONNECTION_ID, self.test_api_params
)
assert slack_api_post_operator.slack_conn_id == SLACK_API_TEST_CONNECTION_ID
assert slack_api_post_operator.method == self.expected_method
assert slack_api_post_operator.initial_comment == self.test_initial_comment
assert slack_api_post_operator.channels == self.test_channel
assert slack_api_post_operator.api_params == self.test_api_params
assert slack_api_post_operator.filename == self.filename
assert slack_api_post_operator.filetype is None
assert slack_api_post_operator.content == self.test_content
assert slack_api_post_operator.snippet_type == self.test_snippet_type
assert not hasattr(slack_api_post_operator, "token")
@pytest.mark.parametrize("initial_comment", [None, "foo-bar"])
@pytest.mark.parametrize("title", [None, "Spam Egg"])
@pytest.mark.parametrize("snippet_type", [None, "text"])
def test_api_call_params_with_content_args(self, initial_comment, title, snippet_type):
op = SlackAPIFileOperator(
task_id="slack",
slack_conn_id=SLACK_API_TEST_CONNECTION_ID,
content="test-content",
channels="#test-channel",
initial_comment=initial_comment,
title=title,
snippet_type=snippet_type,
)
with mock.patch(
"airflow.providers.slack.operators.slack.SlackHook.send_file_v1_to_v2"
) as mock_send_file:
op.execute({})
mock_send_file.assert_called_once_with(
channels="#test-channel",
content="test-content",
file=None,
initial_comment=initial_comment,
title=title,
snippet_type=snippet_type,
)
@pytest.mark.parametrize("initial_comment", [None, "foo-bar"])
@pytest.mark.parametrize("title", [None, "Spam Egg"])
@pytest.mark.parametrize("snippet_type", [None, "text"])
def test_api_call_params_with_file_args(self, initial_comment, title, snippet_type):
op = SlackAPIFileOperator(
task_id="slack",
slack_conn_id=SLACK_API_TEST_CONNECTION_ID,
channels="C1234567890",
filename="/dev/null",
initial_comment=initial_comment,
title=title,
snippet_type=snippet_type,
)
with mock.patch(
"airflow.providers.slack.operators.slack.SlackHook.send_file_v1_to_v2"
) as mock_send_file:
op.execute({})
mock_send_file.assert_called_once_with(
channels="C1234567890",
content=None,
file="/dev/null",
initial_comment=initial_comment,
title=title,
snippet_type=snippet_type,
)
| TestSlackAPIFileOperator |
python | vyperlang__vyper | vyper/codegen/core.py | {
"start": 1754,
"end": 53481
} | class ____(VyperType):
_invalid_locations = tuple(DataLocation)
def __init__(self, buf_size: int):
assert buf_size >= 0
self.buf_size: int = ceil32(buf_size)
super().__init__(members=None)
@property
def size_in_bytes(self):
return self.buf_size
def get_size_in(self, location: DataLocation) -> int: # pragma: nocover
# get_size_in should only be called by semantic analysis. by the
# time we get to codegen, this should never be called. (if this
# assumption changes in the future, we can lift the restriction).
raise CompilerPanic("internal buffer should only be used in memory!")
def get_type_for_exact_size(n_bytes):
"""Create a type which will take up exactly n_bytes. Used for allocating internal buffers.
Parameters:
n_bytes: the number of bytes to allocate
Returns:
type: A type which can be passed to context.new_variable
"""
return _InternalBufferT(n_bytes)
# propagate revert message when calls to external contracts fail
def check_external_call(call_ir):
copy_revertdata = ["returndatacopy", 0, 0, "returndatasize"]
revert = IRnode.from_list(["revert", 0, "returndatasize"], error_msg="external call failed")
propagate_revert_ir = ["seq", copy_revertdata, revert]
return ["if", ["iszero", call_ir], propagate_revert_ir]
# propagate revert message when create operations fail
# note the code for this is substantially the same as check_external_call,
# but keep it separate in case the assumptions about CREATE change.
def check_create_operation(create_ir: IRnode):
copy_revertdata = ["returndatacopy", 0, 0, "returndatasize"]
revert = IRnode.from_list(["revert", 0, "returndatasize"], error_msg="create failed")
propagate_revert_ir = ["seq", copy_revertdata, revert]
return ["if", ["iszero", create_ir], propagate_revert_ir]
# cost per byte of the identity precompile
def _identity_gas_bound(num_bytes):
return GAS_IDENTITY + GAS_IDENTITYWORD * (ceil32(num_bytes) // 32)
def _mcopy_gas_bound(num_bytes):
return GAS_COPY_WORD * ceil32(num_bytes) // 32
def _calldatacopy_gas_bound(num_bytes):
return GAS_COPY_WORD * ceil32(num_bytes) // 32
def _codecopy_gas_bound(num_bytes):
return GAS_COPY_WORD * ceil32(num_bytes) // 32
def data_location_to_address_space(s: DataLocation, is_ctor_ctx: bool) -> AddrSpace:
if s == DataLocation.MEMORY:
return MEMORY
if s == DataLocation.STORAGE:
return STORAGE
if s == DataLocation.TRANSIENT:
return TRANSIENT
if s == DataLocation.CODE:
if is_ctor_ctx:
return IMMUTABLES
return DATA
raise CompilerPanic("unreachable!") # pragma: nocover
def address_space_to_data_location(s: AddrSpace) -> DataLocation:
if s == MEMORY:
return DataLocation.MEMORY
if s == STORAGE:
return DataLocation.STORAGE
if s == TRANSIENT:
return DataLocation.TRANSIENT
if s in (IMMUTABLES, DATA):
return DataLocation.CODE
if s == CALLDATA:
return DataLocation.CALLDATA
raise CompilerPanic("unreachable!") # pragma: nocover
def writeable(context, ir_node):
assert ir_node.is_pointer # sanity check
if context.is_constant() and not legal_in_staticcall(ir_node.location):
return False
return ir_node.mutable
# Copy byte array word-for-word (including layout)
# TODO make this a private function
def make_byte_array_copier(dst, src):
assert isinstance(src.typ, _BytestringT)
assert isinstance(dst.typ, _BytestringT)
_check_assign_bytes(dst, src)
with src.cache_when_complex("src") as (b1, src):
if src.typ.maxlen == 0 or src.is_empty_intrinsic:
# set dst length to zero, preserving side effects of `src`.
ret = STORE(dst, 0)
return b1.resolve(ret)
if src.typ.maxlen <= 32 and not copy_opcode_available(dst, src):
# if there is no batch copy opcode available,
# it's cheaper to run two load/stores instead of copy_bytes
ret = ["seq"]
# store length word
len_ = get_bytearray_length(src)
ret.append(STORE(dst, len_))
# store the single data word.
dst_data_ptr = bytes_data_ptr(dst)
src_data_ptr = bytes_data_ptr(src)
ret.append(STORE(dst_data_ptr, LOAD(src_data_ptr)))
return b1.resolve(ret)
# batch copy the bytearray (including length word) using copy_bytes
len_ = add_ofst(get_bytearray_length(src), 32)
max_bytes = src.typ.maxlen + 32
if _prefer_copy_maxbound_heuristic(dst, src, item_size=1):
len_ = max_bytes
# batch copy the entire dynarray, including length word
ret = copy_bytes(dst, src, len_, max_bytes)
return b1.resolve(ret)
# heuristic to choose
def _prefer_copy_maxbound_heuristic(dst, src, item_size):
if dst.location != MEMORY:
return False
# a heuristic - it's cheaper to just copy the extra buffer bytes
# than calculate the number of bytes
# copy(dst, src, 32 + itemsize*load(src))
# DUP<src> MLOAD PUSH1 ITEMSIZE MUL PUSH1 32 ADD (3 * 4 + 8 = 20 gas | 8 bytes)
# or if ITEM_SIZE == 1:
# DUP<src> MLOAD PUSH1 32 ADD (3 * 4 = 12 gas | 5 bytes)
# =>
# copy(dst, src, bound)
# PUSH1 BOUND (3 gas | 2 bytes)
# (32 + itemsize*(load(src))) costs 3 * 4 [+ 8] - 3 gas over just `bound`
length_calc_cost = 4 * 3 - 3
length_calc_cost += 8 * (item_size != 1) # PUSH MUL
# NOTE: there is an opportunity for more optimization if this
# is one in a sequence of copies, since doing copy(dst, src, maxbound)
# allows us to fuse copies together, further saving gas (each copy
# costs at least 15 gas).
if _opt_codesize():
# if we are optimizing for codesize, we are ok with a higher
# gas cost before switching to copy(dst, src, <precise length>).
# +45 is based on vibes -- it says we are willing to burn 45
# gas (additional 15 words in the copy operation) at runtime to
# save these 5-8 bytes (depending on if itemsize is 1 or not)
# (DUP<src> MLOAD PUSH1 ITEMSIZE MUL PUSH1 32 ADD)
length_calc_cost += 45
src_bound = src.typ.memory_bytes_required
# 3 gas per word, minus the cost of the length word
# (since it is always copied, we don't include it in the marginal
# cost difference)
copy_cost = ceil32(src_bound - 32) * 3 // 32
if src.location in (CALLDATA, MEMORY) and copy_cost <= length_calc_cost:
return True
# threshold is 6 words of data (+ 1 length word that we need to copy anyway)
# dload(src) costs additional 14-20 gas depending on if `src` is a literal
# or not.
# (dload(src) expands to `codecopy(0, add(CODE_END, src), 32); mload(0)`,
# and we have already accounted for an `mload(ptr)`).
# PUSH1 32 DUP2 PUSH CODE_END ADD PUSH0 CODECOPY (3 * 4 + 2 + 6 = 20 gas)
# or if src is a literal:
# PUSH1 32 PUSH OFFSET PUSH0 CODECOPY (3 * 2 + 2 + 6 = 14 gas)
# for simplicity, skip the 14 case.
if src.location == DATA and copy_cost <= (20 + length_calc_cost):
return True
return False
def bytes_data_ptr(ptr):
if ptr.location is None: # pragma: nocover
raise CompilerPanic("tried to modify non-pointer type")
assert isinstance(ptr.typ, _BytestringT)
return add_ofst(ptr, ptr.location.word_scale)
def dynarray_data_ptr(ptr):
if ptr.location is None: # pragma: nocover
raise CompilerPanic("tried to modify non-pointer type")
assert isinstance(ptr.typ, DArrayT)
return add_ofst(ptr, ptr.location.word_scale)
def _dynarray_make_setter(dst, src, hi=None):
assert isinstance(src.typ, DArrayT)
assert isinstance(dst.typ, DArrayT)
if src.is_empty_intrinsic:
return IRnode.from_list(STORE(dst, 0))
# copy contents of src dynarray to dst.
# note that in case src and dst refer to the same dynarray,
# in order for get_element_ptr oob checks on the src dynarray
# to work, we need to wait until after the data is copied
# before we clobber the length word.
if src.value == "multi":
# validation is only performed on unsafe data, but we are dealing with
# a literal here.
assert hi is None
ret = ["seq"]
# handle literals
# copy each item
n_items = len(src.args)
for i in range(n_items):
k = IRnode.from_list(i, typ=UINT256_T)
dst_i = get_element_ptr(dst, k, array_bounds_check=False)
src_i = get_element_ptr(src, k, array_bounds_check=False)
ret.append(make_setter(dst_i, src_i))
# write the length word after data is copied
store_length = STORE(dst, n_items)
ann = None
if src.annotation is not None:
ann = f"len({src.annotation})"
store_length = IRnode.from_list(store_length, annotation=ann)
ret.append(store_length)
return ret
with src.cache_when_complex("darray_src") as (b1, src):
# for ABI-encoded dynamic data, we must loop to unpack, since
# the layout does not match our memory layout
should_loop = src.encoding == Encoding.ABI and src.typ.value_type.abi_type.is_dynamic()
# if the data is not validated, we must loop to unpack
should_loop |= needs_clamp(src.typ.value_type, src.encoding)
# performance: if the subtype is dynamic, there might be a lot
# of unused space inside of each element. for instance
# DynArray[DynArray[uint256, 100], 5] where all the child
# arrays are empty - for this case, we recursively call
# into make_setter instead of straight bytes copy
# TODO we can make this heuristic more precise, e.g.
# loop when subtype.is_dynamic AND location == storage
# OR array_size <= /bound where loop is cheaper than memcpy/
should_loop |= src.typ.value_type.abi_type.is_dynamic()
with get_dyn_array_count(src).cache_when_complex("darray_count") as (b2, count):
ret = ["seq"]
if should_loop:
i = IRnode.from_list(_freshname("copy_darray_ix"), typ=UINT256_T)
loop_body = make_setter(
get_element_ptr(dst, i, array_bounds_check=False),
get_element_ptr(src, i, array_bounds_check=False),
hi=hi,
)
loop_body.annotation = f"{dst}[i] = {src}[i]"
ret.append(["repeat", i, 0, count, src.typ.count, loop_body])
# write the length word after data is copied
ret.append(STORE(dst, count))
else:
element_size = src.typ.value_type.memory_bytes_required
# number of elements * size of element in bytes + length word
n_bytes = add_ofst(_mul(count, element_size), 32)
max_bytes = 32 + src.typ.count * element_size
if _prefer_copy_maxbound_heuristic(dst, src, element_size):
n_bytes = max_bytes
# batch copy the entire dynarray, including length word
ret.append(copy_bytes(dst, src, n_bytes, max_bytes))
return b1.resolve(b2.resolve(ret))
# Copy bytes
# Accepts 4 arguments:
# (i) an IR node for the start position of the source
# (ii) an IR node for the start position of the destination
# (iii) an IR node for the length (in bytes)
# (iv) a constant for the max length (in bytes)
# NOTE: may pad to ceil32 of `length`! If you ask to copy 1 byte, it may
# copy an entire (32-byte) word, depending on the copy routine chosen.
# TODO maybe always pad to ceil32, to reduce dirty bytes bugs
def copy_bytes(dst, src, length, length_bound):
annotation = f"copy up to {length_bound} bytes from {src} to {dst}"
src = IRnode.from_list(src)
dst = IRnode.from_list(dst)
length = IRnode.from_list(length)
with src.cache_when_complex("src") as (b1, src), length.cache_when_complex(
"copy_bytes_count"
) as (b2, length), dst.cache_when_complex("dst") as (b3, dst):
assert isinstance(length_bound, int) and length_bound >= 0
# correctness: do not clobber dst
if length_bound == 0:
ret = IRnode.from_list(["seq"], annotation=annotation)
return b1.resolve(b2.resolve(b3.resolve(ret)))
# performance: if we know that length is 0, do not copy anything
if length.value == 0:
ret = IRnode.from_list(["seq"], annotation=annotation)
return b1.resolve(b2.resolve(b3.resolve(ret)))
assert src.is_pointer and dst.is_pointer
# fast code for common case where num bytes is small
if length_bound <= 32:
copy_op = STORE(dst, LOAD(src))
ret = IRnode.from_list(copy_op, annotation=annotation)
return b1.resolve(b2.resolve(b3.resolve(ret)))
if dst.location == MEMORY and src.location in (MEMORY, CALLDATA, DATA):
# special cases: batch copy to memory
# TODO: iloadbytes
if src.location == MEMORY:
if version_check(begin="cancun"):
copy_op = ["mcopy", dst, src, length]
gas_bound = _mcopy_gas_bound(length_bound)
else:
copy_op = ["assert", ["staticcall", "gas", 4, src, length, dst, length]]
gas_bound = _identity_gas_bound(length_bound)
elif src.location == CALLDATA:
copy_op = ["calldatacopy", dst, src, length]
gas_bound = _calldatacopy_gas_bound(length_bound)
elif src.location == DATA:
copy_op = ["dloadbytes", dst, src, length]
# note: dloadbytes compiles to CODECOPY
gas_bound = _codecopy_gas_bound(length_bound)
ret = IRnode.from_list(copy_op, annotation=annotation, add_gas_estimate=gas_bound)
return b1.resolve(b2.resolve(b3.resolve(ret)))
if dst.location == IMMUTABLES and src.location in (MEMORY, DATA):
# TODO istorebytes-from-mem, istorebytes-from-calldata(?)
# compile to identity, CODECOPY respectively.
pass
# general case, copy word-for-word
# pseudocode for our approach (memory-storage as example):
# for i in range(len, bound=MAX_LEN):
# sstore(_dst + i, mload(src + i * 32))
i = IRnode.from_list(_freshname("copy_bytes_ix"), typ=UINT256_T)
# optimized form of (div (ceil32 len) 32)
n = ["div", ["add", 31, length], 32]
n_bound = ceil32(length_bound) // 32
dst_i = add_ofst(dst, _mul(i, dst.location.word_scale))
src_i = add_ofst(src, _mul(i, src.location.word_scale))
copy_one_word = STORE(dst_i, LOAD(src_i))
main_loop = ["repeat", i, 0, n, n_bound, copy_one_word]
return b1.resolve(
b2.resolve(b3.resolve(IRnode.from_list(main_loop, annotation=annotation)))
)
# get the number of bytes at runtime
def get_bytearray_length(arg):
typ = UINT256_T
# TODO: it would be nice to merge the implementations of get_bytearray_length and
# get_dynarray_count
if arg.is_empty_intrinsic:
return IRnode.from_list(0, typ=typ)
return IRnode.from_list(LOAD(arg), typ=typ)
# get the number of elements at runtime
def get_dyn_array_count(arg):
assert isinstance(arg.typ, DArrayT)
typ = UINT256_T
if arg.value == "multi":
return IRnode.from_list(len(arg.args), typ=typ)
if arg.is_empty_intrinsic:
# empty(DynArray[...])
return IRnode.from_list(0, typ=typ)
return IRnode.from_list(LOAD(arg), typ=typ)
def append_dyn_array(darray_node, elem_node):
assert isinstance(darray_node.typ, DArrayT)
assert darray_node.typ.count > 0, "jerk boy u r out"
ret = ["seq"]
with darray_node.cache_when_complex("darray") as (b1, darray_node):
len_ = get_dyn_array_count(darray_node)
with len_.cache_when_complex("old_darray_len") as (b2, len_):
assertion = ["assert", ["lt", len_, darray_node.typ.count]]
ret.append(IRnode.from_list(assertion, error_msg=f"{darray_node.typ} bounds check"))
# NOTE: typechecks elem_node
# NOTE skip array bounds check bc we already asserted len two lines up
ret.append(
make_setter(get_element_ptr(darray_node, len_, array_bounds_check=False), elem_node)
)
# store new length
ret.append(ensure_eval_once("append_dynarray", STORE(darray_node, ["add", len_, 1])))
return IRnode.from_list(b1.resolve(b2.resolve(ret)))
def pop_dyn_array(darray_node, return_popped_item):
assert isinstance(darray_node.typ, DArrayT)
assert darray_node.encoding == Encoding.VYPER
ret = ["seq"]
with darray_node.cache_when_complex("darray") as (b1, darray_node):
old_len = clamp("gt", get_dyn_array_count(darray_node), 0)
new_len = IRnode.from_list(["sub", old_len, 1], typ=UINT256_T)
with new_len.cache_when_complex("new_len") as (b2, new_len):
# store new length
ret.append(ensure_eval_once("pop_dynarray", STORE(darray_node, new_len)))
# NOTE skip array bounds check bc we already asserted len two lines up
if return_popped_item:
popped_item = get_element_ptr(darray_node, new_len, array_bounds_check=False)
ret.append(popped_item)
typ = popped_item.typ
location = popped_item.location
else:
typ, location = None, None
return IRnode.from_list(b1.resolve(b2.resolve(ret)), typ=typ, location=location)
# add an offset to a pointer, keeping location and encoding info
def add_ofst(ptr, ofst):
ret = ["add", ptr, ofst]
return IRnode.from_list(ret, location=ptr.location, encoding=ptr.encoding)
# shorthand util
def _mul(x, y):
ret = ["mul", x, y]
return IRnode.from_list(ret)
# Resolve pointer locations for ABI-encoded data
def _getelemptr_abi_helper(parent, member_t, ofst):
member_abi_t = member_t.abi_type
# ABI encoding has length word and then pretends length is not there
# e.g. [[1,2]] is encoded as 0x01 <len> 0x20 <inner array ofst> <encode(inner array)>
# note that inner array ofst is 0x20, not 0x40.
if has_length_word(parent.typ):
parent = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)
ofst_ir = add_ofst(parent, ofst)
if member_abi_t.is_dynamic():
# double dereference, according to ABI spec
ofst_ir = add_ofst(parent, unwrap_location(ofst_ir))
if _dirty_read_risk(ofst_ir):
# check no arithmetic overflow
ofst_ir = ["seq", ["assert", ["ge", ofst_ir, parent]], ofst_ir]
return IRnode.from_list(
ofst_ir,
typ=member_t,
location=parent.location,
encoding=parent.encoding,
annotation=f"{parent}{ofst}",
)
# TODO simplify this code, especially the ABI decoding
def _get_element_ptr_tuplelike(parent, key, hi=None):
typ = parent.typ
assert is_tuple_like(typ)
if isinstance(typ, StructT):
assert isinstance(key, str)
subtype = typ.member_types[key]
attrs = list(typ.tuple_keys())
index = attrs.index(key)
annotation = key
else:
assert isinstance(typ, TupleT)
assert isinstance(key, int)
subtype = typ.member_types[key]
attrs = list(typ.tuple_keys())
index = key
annotation = None
# generated by empty() + make_setter
if parent.is_empty_intrinsic:
return IRnode.from_list("~empty", typ=subtype)
if parent.value == "multi":
assert parent.encoding != Encoding.ABI, "no abi-encoded literals"
return parent.args[index]
ofst = 0 # offset from parent start
if parent.encoding == Encoding.ABI:
if parent.location in (STORAGE, TRANSIENT): # pragma: nocover
raise CompilerPanic("storage variables should not be abi encoded")
member_t = typ.member_types[attrs[index]]
for i in range(index):
member_abi_t = typ.member_types[attrs[i]].abi_type
ofst += member_abi_t.embedded_static_size()
return _getelemptr_abi_helper(parent, member_t, ofst)
data_location = address_space_to_data_location(parent.location)
for i in range(index):
t = typ.member_types[attrs[i]]
ofst += t.get_size_in(data_location)
return IRnode.from_list(
add_ofst(parent, ofst),
typ=subtype,
location=parent.location,
encoding=parent.encoding,
annotation=annotation,
)
def has_length_word(typ):
# Consider moving this to an attribute on typ
return isinstance(typ, (DArrayT, _BytestringT))
# TODO simplify this code, especially the ABI decoding
def _get_element_ptr_array(parent, key, array_bounds_check):
assert is_array_like(parent.typ)
if not is_integer_type(key.typ): # pragma: nocover
raise TypeCheckFailure(f"{key.typ} used as array index")
subtype = parent.typ.value_type
if parent.is_empty_intrinsic:
if array_bounds_check:
# this case was previously missing a bounds check. codegen
# is a bit complicated when bounds check is required, so
# block it. there is no reason to index into a literal empty
# array anyways!
raise TypeCheckFailure("indexing into zero array not allowed")
return IRnode.from_list("~empty", subtype)
if parent.value == "multi":
assert isinstance(key.value, int), key
return parent.args[key.value]
ix = unwrap_location(key)
if array_bounds_check:
is_darray = isinstance(parent.typ, DArrayT)
bound = get_dyn_array_count(parent) if is_darray else parent.typ.count
# NOTE: there are optimization rules for the bounds check when
# ix or bound is literal
with ix.cache_when_complex("ix") as (b1, ix):
LT = "slt" if ix.typ.is_signed else "lt"
# note: this is optimized out for unsigned integers
is_negative = [LT, ix, 0]
# always use unsigned ge, since bound is always an unsigned quantity
is_oob = ["ge", ix, bound]
checked_ix = ["seq", ["assert", ["iszero", ["or", is_negative, is_oob]]], ix]
ix = b1.resolve(IRnode.from_list(checked_ix))
ix.set_error_msg(f"{parent.typ} bounds check")
if parent.encoding == Encoding.ABI:
if parent.location in (STORAGE, TRANSIENT): # pragma: nocover
raise CompilerPanic("storage variables should not be abi encoded")
member_abi_t = subtype.abi_type
ofst = _mul(ix, member_abi_t.embedded_static_size())
return _getelemptr_abi_helper(parent, subtype, ofst)
data_location = address_space_to_data_location(parent.location)
element_size = subtype.get_size_in(data_location)
ofst = _mul(ix, element_size)
if has_length_word(parent.typ):
data_ptr = add_ofst(parent, parent.location.word_scale * DYNAMIC_ARRAY_OVERHEAD)
else:
data_ptr = parent
return IRnode.from_list(add_ofst(data_ptr, ofst), typ=subtype, location=parent.location)
def _get_element_ptr_mapping(parent, key):
assert isinstance(parent.typ, HashMapT)
subtype = parent.typ.value_type
key = unwrap_location(key)
if parent.location not in (STORAGE, TRANSIENT): # pragma: nocover
raise TypeCheckFailure(f"bad dereference on mapping {parent}[{key}]")
return IRnode.from_list(["sha3_64", parent, key], typ=subtype, location=parent.location)
# Take a value representing a memory or storage location, and descend down to
# an element or member variable
# This is analogous (but not necessarily equivalent to) getelementptr in LLVM.
def get_element_ptr(parent, key, array_bounds_check=True):
with parent.cache_when_complex("val") as (b, parent):
typ = parent.typ
if is_tuple_like(typ):
ret = _get_element_ptr_tuplelike(parent, key)
elif isinstance(typ, HashMapT):
ret = _get_element_ptr_mapping(parent, key)
elif is_array_like(typ):
ret = _get_element_ptr_array(parent, key, array_bounds_check)
else: # pragma: nocover
raise CompilerPanic(f"get_element_ptr cannot be called on {typ}")
return b.resolve(ret)
def LOAD(ptr: IRnode) -> IRnode:
if ptr.location is None: # pragma: nocover
raise CompilerPanic("cannot dereference non-pointer type")
op = ptr.location.load_op
if op is None: # pragma: nocover
raise CompilerPanic(f"unreachable {ptr.location}")
return IRnode.from_list([op, ptr])
def eval_once_check(name):
# an IRnode which enforces uniqueness. include with a side-effecting
# operation to sanity check that the codegen pipeline only generates
# the side-effecting operation once (otherwise, IR-to-assembly will
# throw a duplicate label exception). there is no runtime overhead
# since the jumpdest gets optimized out in the final stage of assembly.
return IRnode.from_list(["unique_symbol", name])
def ensure_eval_once(name, irnode):
return ["seq", eval_once_check(_freshname(name)), irnode]
def STORE(ptr: IRnode, val: IRnode) -> IRnode:
if ptr.location is None: # pragma: nocover
raise CompilerPanic("cannot dereference non-pointer type")
op = ptr.location.store_op
if op is None: # pragma: nocover
raise CompilerPanic(f"unreachable {ptr.location}")
store = [op, ptr, val]
# don't use eval_once_check for memory, immutables because it interferes
# with optimizer
if ptr.location in (MEMORY, IMMUTABLES):
return IRnode.from_list(store)
return IRnode.from_list(ensure_eval_once(f"{op}_", store))
# Unwrap location
def unwrap_location(orig):
if orig.location is not None:
return IRnode.from_list(LOAD(orig), typ=orig.typ)
else:
# CMC 2022-03-24 TODO refactor so this branch can be removed
if orig.is_empty_intrinsic:
# must be word type
return IRnode.from_list(0, typ=orig.typ)
return orig
# utility function, constructs an IR tuple out of a list of IR nodes
def ir_tuple_from_args(args):
typ = TupleT([x.typ for x in args])
return IRnode.from_list(["multi"] + [x for x in args], typ=typ)
def needs_external_call_wrap(typ):
# for calls to ABI conforming contracts.
# according to the ABI spec, return types are ALWAYS tuples even
# if only one element is being returned.
# https://solidity.readthedocs.io/en/latest/abi-spec.html#function-selector-and-argument-encoding
# "and the return values v_1, ..., v_k of f are encoded as
#
# enc((v_1, ..., v_k))
# i.e. the values are combined into a tuple and encoded.
# "
# therefore, wrap it in a tuple if it's not already a tuple.
# for example, `bytes` is returned as abi-encoded (bytes,)
# and `(bytes,)` is returned as abi-encoded ((bytes,),)
# In general `-> X` gets returned as (X,)
# including structs. MyStruct is returned as abi-encoded (MyStruct,).
# (Sorry this is so confusing. I didn't make these rules.)
return not (isinstance(typ, TupleT) and typ.length > 1)
def calculate_type_for_external_return(typ):
if needs_external_call_wrap(typ):
return TupleT([typ])
return typ
def wrap_value_for_external_return(ir_val):
# used for LHS promotion
if needs_external_call_wrap(ir_val.typ):
return ir_tuple_from_args([ir_val])
else:
return ir_val
def set_type_for_external_return(ir_val):
# used for RHS promotion
ir_val.typ = calculate_type_for_external_return(ir_val.typ)
# return a dummy IRnode with the given type
def dummy_node_for_type(typ):
return IRnode("fake_node", typ=typ)
def _check_assign_bytes(left, right): # pragma: nocover
if right.typ.maxlen > left.typ.maxlen:
raise TypeMismatch(f"Cannot cast from {right.typ} to {left.typ}")
# stricter check for zeroing a byte array.
# TODO: these should be TypeCheckFailure instead of TypeMismatch
rlen = right.typ.maxlen
if right.is_empty_intrinsic and rlen != 0 and rlen != left.typ.maxlen:
raise TypeMismatch(f"Cannot cast from empty({right.typ}) to {left.typ}")
def _check_assign_list(left, right):
def FAIL(): # pragma: no cover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ}")
if left.value == "multi": # pragma: nocover
# Cannot do something like [a, b, c] = [1, 2, 3]
FAIL()
if isinstance(left.typ, SArrayT):
if not is_array_like(right.typ): # pragma: nocover
FAIL()
if left.typ.count != right.typ.count: # pragma: nocover
FAIL()
# TODO recurse into left, right if literals?
check_assign(
dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)
)
if isinstance(left.typ, DArrayT):
if not isinstance(right.typ, DArrayT): # pragma: nocover
FAIL()
if left.typ.count < right.typ.count: # pragma: nocover
FAIL()
# stricter check for zeroing
if right.is_empty_intrinsic and right.typ.count != left.typ.count: # pragma: nocover
raise TypeCheckFailure(
f"Bad type for clearing bytes: expected {left.typ} but got {right.typ}"
)
# TODO recurse into left, right if literals?
check_assign(
dummy_node_for_type(left.typ.value_type), dummy_node_for_type(right.typ.value_type)
)
def _check_assign_tuple(left, right):
def FAIL(): # pragma: no cover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ}")
if not isinstance(right.typ, left.typ.__class__): # pragma: nocover
FAIL()
if isinstance(left.typ, StructT):
for k in left.typ.member_types:
if k not in right.typ.member_types: # pragma: nocover
FAIL()
# TODO recurse into left, right if literals?
check_assign(
dummy_node_for_type(left.typ.member_types[k]),
dummy_node_for_type(right.typ.member_types[k]),
)
for k in right.typ.member_types:
if k not in left.typ.member_types: # pragma: nocover
FAIL()
if left.typ.name != right.typ.name: # pragma: nocover
FAIL()
else:
if len(left.typ.member_types) != len(right.typ.member_types): # pragma: nocover
FAIL()
for left_, right_ in zip(left.typ.member_types, right.typ.member_types):
# TODO recurse into left, right if literals?
check_assign(dummy_node_for_type(left_), dummy_node_for_type(right_))
# sanity check an assignment
# typechecking source code is done at an earlier phase
# this function is more of a sanity check for typechecking internally
# generated assignments
# TODO: do we still need this?
def check_assign(left, right):
def FAIL(): # pragma: no cover
raise TypeCheckFailure(f"assigning {right.typ} to {left.typ} {left} {right}")
if isinstance(left.typ, _BytestringT):
_check_assign_bytes(left, right)
elif is_array_like(left.typ):
_check_assign_list(left, right)
elif is_tuple_like(left.typ):
_check_assign_tuple(left, right)
elif left.typ._is_prim_word:
# TODO once we propagate types from typechecker, introduce this check:
# if left.typ != right.typ: # pragma: nocover
# FAIL()
pass
else: # pragma: no cover
FAIL()
_label = 0
# TODO might want to coalesce with Context.fresh_varname
def _freshname(name):
global _label
_label += 1
return f"{name}{_label}"
def reset_names():
global _label
_label = 0
# could be refactored
ctx._alloca_id = 0
# returns True if t is ABI encoded and is a type that needs any kind of
# validation
def needs_clamp(t, encoding):
if encoding == Encoding.VYPER:
return False
if encoding != Encoding.ABI: # pragma: nocover
raise CompilerPanic("unreachable")
if isinstance(t, (_BytestringT, DArrayT)):
return True
if isinstance(t, FlagT):
return len(t._flag_members) < 256
if isinstance(t, SArrayT):
return needs_clamp(t.value_type, encoding)
if is_tuple_like(t):
return any(needs_clamp(m, encoding) for m in t.tuple_members())
if t._is_prim_word:
return t not in (INT256_T, UINT256_T, BYTES32_T)
raise CompilerPanic("unreachable") # pragma: nocover
# when abi encoded data is user provided and lives in memory,
# we risk either reading oob of the buffer or oob of the payload data.
# in these cases, we need additional validation.
def _dirty_read_risk(ir_node):
return ir_node.encoding == Encoding.ABI and ir_node.location == MEMORY
# child elements which have dynamic length, and could overflow the buffer
# even if the start of the item is in-bounds.
def _abi_payload_size(ir_node):
SCALE = ir_node.location.word_scale
assert SCALE == 32 # we must be in some byte-addressable region, like memory
OFFSET = DYNAMIC_ARRAY_OVERHEAD * SCALE
if isinstance(ir_node.typ, DArrayT):
# the amount of size each value occupies in static section
# (the amount of size it occupies in the dynamic section is handled in
# make_setter recursion)
item_size = ir_node.typ.value_type.abi_type.embedded_static_size()
return ["add", OFFSET, ["mul", get_dyn_array_count(ir_node), item_size]]
if isinstance(ir_node.typ, _BytestringT):
return ["add", OFFSET, get_bytearray_length(ir_node)]
raise CompilerPanic("unreachable") # pragma: nocover
def potential_overlap(left, right):
"""
Return true if make_setter(left, right) could potentially trample
src or dst during evaluation.
"""
if left.typ._is_prim_word and right.typ._is_prim_word:
return False
if len(left.referenced_variables & right.referenced_variables) > 0:
return True
if len(left.referenced_variables) > 0 and right.contains_risky_call:
return True
if left.contains_risky_call and len(right.referenced_variables) > 0:
return True
return False
# similar to `potential_overlap()`, but compares left's _reads_ vs
# right's _writes_.
# TODO: `potential_overlap()` can probably be replaced by this function,
# but all the cases need to be checked.
def read_write_overlap(left, right):
if not isinstance(left, IRnode) or not isinstance(right, IRnode):
return False
if left.typ._is_prim_word and right.typ._is_prim_word:
return False
if len(left.referenced_variables & right.variable_writes) > 0:
return True
if len(left.referenced_variables) > 0 and right.contains_risky_call:
return True
return False
# Create an x=y statement, where the types may be compound
def make_setter(left, right, hi=None):
check_assign(left, right)
if potential_overlap(left, right):
raise CompilerPanic("overlap between src and dst!")
# we need bounds checks when decoding from memory, otherwise we can
# get oob reads.
#
# the caller is responsible for calculating the bound;
# sanity check that there is a bound if there is dirty read risk
assert (hi is not None) == _dirty_read_risk(right)
# For types which occupy just one word we can use single load/store
if left.typ._is_prim_word:
enc = right.encoding # unwrap_location butchers encoding
right = unwrap_location(right)
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, enc):
right = clamp_basetype(right)
return STORE(left, right)
# Byte arrays
elif isinstance(left.typ, _BytestringT):
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, right.encoding):
with right.cache_when_complex("bs_ptr") as (b, right):
copier = make_byte_array_copier(left, right)
ret = b.resolve(["seq", clamp_bytestring(right, hi=hi), copier])
else:
ret = make_byte_array_copier(left, right)
return IRnode.from_list(ret)
elif isinstance(left.typ, DArrayT):
# TODO should we enable this?
# implicit conversion from sarray to darray
# if isinstance(right.typ, SArrayType):
# return _complex_make_setter(left, right)
# TODO rethink/streamline the clamp_basetype logic
if needs_clamp(right.typ, right.encoding):
with right.cache_when_complex("arr_ptr") as (b, right):
copier = _dynarray_make_setter(left, right, hi=hi)
ret = b.resolve(["seq", clamp_dyn_array(right, hi=hi), copier])
else:
ret = _dynarray_make_setter(left, right)
return IRnode.from_list(ret)
# Complex Types
assert isinstance(left.typ, (SArrayT, TupleT, StructT))
with right.cache_when_complex("c_right") as (b1, right):
ret = ["seq"]
if hi is not None:
item_end = add_ofst(right, right.typ.abi_type.static_size())
len_check = ["assert", ["le", item_end, hi]]
ret.append(len_check)
ret.append(_complex_make_setter(left, right, hi=hi))
return b1.resolve(IRnode.from_list(ret))
# locations with no dedicated copy opcode
# (i.e. storage and transient storage)
def copy_opcode_available(left, right):
if left.location == MEMORY and right.location == MEMORY:
return version_check(begin="cancun")
return left.location == MEMORY and right.location.has_copy_opcode
def _complex_make_setter(left, right, hi=None):
if right.is_empty_intrinsic and left.location == MEMORY:
# optimized memzero
return mzero(left, left.typ.memory_bytes_required)
ret = ["seq"]
if isinstance(left.typ, SArrayT):
n_items = right.typ.count
keys = [IRnode.from_list(i, typ=UINT256_T) for i in range(n_items)]
else:
assert is_tuple_like(left.typ)
keys = left.typ.tuple_keys()
# performance: if there is any dynamic data, there might be
# unused space between the end of the dynarray and the end of the buffer.
# for instance DynArray[uint256, 100] with runtime length of 5.
# in these cases, we recurse to dynarray make_setter which has its own
# heuristic for when to copy all data.
# use abi_type.is_dynamic since it is identical to the query "do any children
# have dynamic size"
has_dynamic_data = right.typ.abi_type.is_dynamic()
simple_encoding = right.encoding == Encoding.VYPER
if left.is_pointer and right.is_pointer and simple_encoding and not has_dynamic_data:
# both left and right are pointers, see if we want to batch copy
# instead of unrolling the loop.
assert left.encoding == Encoding.VYPER
len_ = left.typ.memory_bytes_required
# special logic for identity precompile (pre-cancun) in the else branch
mem2mem = left.location == right.location == MEMORY
if not copy_opcode_available(left, right) and not mem2mem:
if _opt_codesize():
# assuming PUSH2, a single sstore(dst (sload src)) is 8 bytes,
# sstore(add (dst ofst), (sload (add (src ofst)))) is 16 bytes,
# whereas loop overhead is 16-17 bytes.
base_cost = 3
if left._optimized.is_literal:
# code size is smaller since add is performed at compile-time
base_cost += 1
if right._optimized.is_literal:
base_cost += 1
# the formula is a heuristic, but it works.
# (CMC 2023-07-14 could get more detailed for PUSH1 vs
# PUSH2 etc but not worried about that too much now,
# it's probably better to add a proper unroll rule in the
# optimizer.)
should_batch_copy = len_ >= 32 * base_cost
elif _opt_gas():
# kind of arbitrary, but cut off when code used > ~160 bytes
should_batch_copy = len_ >= 32 * 10
else:
assert _opt_none()
# don't care, just generate the most readable version
should_batch_copy = True
else:
# find a cutoff for memory copy where identity is cheaper
# than unrolled mloads/mstores
# if MCOPY is available, mcopy is *always* better (except in
# the 1 word case, but that is already handled by copy_bytes).
if right.location == MEMORY and _opt_gas() and not version_check(begin="cancun"):
# cost for 0th word - (mstore dst (mload src))
base_unroll_cost = 12
nth_word_cost = base_unroll_cost
if not left._optimized.is_literal:
# (mstore (add N dst) (mload src))
nth_word_cost += 6
if not right._optimized.is_literal:
# (mstore dst (mload (add N src)))
nth_word_cost += 6
identity_base_cost = 115 # staticcall 4 gas dst len src len
n_words = ceil32(len_) // 32
should_batch_copy = (
base_unroll_cost + (nth_word_cost * (n_words - 1)) >= identity_base_cost
)
# calldata to memory, code to memory, cancun, or opt-codesize -
# batch copy is always better.
else:
should_batch_copy = True
if should_batch_copy:
return copy_bytes(left, right, len_, len_)
# general case, unroll
with left.cache_when_complex("_L") as (b1, left), right.cache_when_complex("_R") as (b2, right):
for k in keys:
l_i = get_element_ptr(left, k, array_bounds_check=False)
r_i = get_element_ptr(right, k, array_bounds_check=False)
ret.append(make_setter(l_i, r_i, hi=hi))
return b1.resolve(b2.resolve(IRnode.from_list(ret)))
def ensure_in_memory(ir_var, context):
"""
Ensure a variable is in memory. This is useful for functions
which expect to operate on memory variables.
"""
if ir_var.location == MEMORY:
return ir_var
return create_memory_copy(ir_var, context)
def create_memory_copy(ir_var, context):
typ = ir_var.typ
buf = context.new_internal_variable(typ)
do_copy = make_setter(buf, ir_var)
return IRnode.from_list(["seq", do_copy, buf], typ=typ, location=MEMORY)
def eval_seq(ir_node):
"""Tries to find the "return" value of a `seq` statement, in order so
that the value can be known without possibly evaluating side effects
"""
if ir_node.value in ("seq", "with") and len(ir_node.args) > 0:
return eval_seq(ir_node.args[-1])
if isinstance(ir_node.value, int):
return IRnode.from_list(ir_node)
return None
def mzero(dst, nbytes):
# calldatacopy from past-the-end gives zero bytes.
# cf. YP H.2 (ops section) with CALLDATACOPY spec.
return IRnode.from_list(
# calldatacopy mempos calldatapos len
["calldatacopy", dst, "calldatasize", nbytes],
annotation="mzero",
)
# zero pad a bytearray according to the ABI spec. The last word
# of the byte array needs to be right-padded with zeroes.
def zero_pad(bytez_placeholder):
len_ = ["mload", bytez_placeholder]
dst = ["add", ["add", bytez_placeholder, 32], "len"]
# the runtime length of the data rounded up to nearest 32
# from spec:
# the actual value of X as a byte sequence,
# followed by the *minimum* number of zero-bytes
# such that len(enc(X)) is a multiple of 32.
# optimized form of ceil32(len) - len:
num_zero_bytes = ["mod", ["sub", 0, "len"], 32]
return IRnode.from_list(
["with", "len", len_, ["with", "dst", dst, mzero("dst", num_zero_bytes)]],
annotation="Zero pad",
)
# convenience rewrites for shr/sar/shl
def shr(bits, x):
return ["shr", bits, x]
# convenience rewrites for shr/sar/shl
def shl(bits, x):
return ["shl", bits, x]
def sar(bits, x):
return ["sar", bits, x]
def clamp_bytestring(ir_node, hi=None):
t = ir_node.typ
if not isinstance(t, _BytestringT): # pragma: nocover
raise CompilerPanic(f"{t} passed to clamp_bytestring")
# check if byte array length is within type max
with get_bytearray_length(ir_node).cache_when_complex("length") as (b1, length):
len_check = ["assert", ["le", length, t.maxlen]]
assert (hi is not None) == _dirty_read_risk(ir_node)
if hi is not None:
assert t.maxlen < 2**64 # sanity check
# NOTE: this add does not risk arithmetic overflow because
# length is bounded by maxlen.
# however(!) _abi_payload_size can OOG, since it loads the word
# at `ir_node` to find the length of the bytearray, which could
# be out-of-bounds.
# if we didn't get OOG, we could overflow in `add`.
item_end = add_ofst(ir_node, _abi_payload_size(ir_node))
len_check = ["seq", ["assert", ["le", item_end, hi]], len_check]
return IRnode.from_list(b1.resolve(len_check), error_msg=f"{ir_node.typ} bounds check")
def clamp_dyn_array(ir_node, hi=None):
t = ir_node.typ
assert isinstance(t, DArrayT)
len_check = ["assert", ["le", get_dyn_array_count(ir_node), t.count]]
assert (hi is not None) == _dirty_read_risk(ir_node)
if hi is not None:
assert t.count < 2**64 # sanity check
# NOTE: this add does not risk arithmetic overflow because
# length is bounded by count * elemsize.
# however(!) _abi_payload_size can OOG, since it loads the word
# at `ir_node` to find the length of the bytearray, which could
# be out-of-bounds.
# if we didn't get OOG, we could overflow in `add`.
item_end = add_ofst(ir_node, _abi_payload_size(ir_node))
# if the subtype is dynamic, the length check is performed in
# the recursion, UNLESS the count is zero. here we perform the
# check all the time, but it could maybe be optimized out in the
# make_setter loop (in the common case that runtime count > 0).
len_check = ["seq", ["assert", ["le", item_end, hi]], len_check]
return IRnode.from_list(len_check, error_msg=f"{ir_node.typ} bounds check")
# clampers for basetype
def clamp_basetype(ir_node):
t = ir_node.typ
if not t._is_prim_word: # pragma: nocover
raise CompilerPanic(f"{t} passed to clamp_basetype")
# copy of the input
ir_node = unwrap_location(ir_node)
if isinstance(t, FlagT):
bits = len(t._flag_members)
# assert x >> bits == 0
ret = int_clamp(ir_node, bits, signed=False)
elif isinstance(t, (IntegerT, DecimalT)):
if t.bits == 256:
ret = ir_node
else:
ret = int_clamp(ir_node, t.bits, signed=t.is_signed)
elif isinstance(t, BytesM_T):
if t.m == 32:
ret = ir_node # special case, no clamp.
else:
ret = bytes_clamp(ir_node, t.m)
elif isinstance(t, (AddressT, InterfaceT)):
ret = int_clamp(ir_node, 160)
elif t in (BoolT(),):
ret = int_clamp(ir_node, 1)
else: # pragma: no cover
raise CompilerPanic(f"{t} passed to clamp_basetype")
return IRnode.from_list(ret, typ=ir_node.typ, error_msg=f"validate {t}")
def int_clamp(ir_node, bits, signed=False):
"""Generalized clamper for integer types. Takes the number of bits,
whether it's signed, and returns an IR node which checks it is
in bounds. (Consumers should use clamp_basetype instead which uses
type-based dispatch and is a little safer.)
"""
if bits >= 256: # pragma: nocover
raise CompilerPanic(f"invalid clamp: {bits}>=256 ({ir_node})")
u = "u" if not signed else ""
msg = f"{u}int{bits} bounds check"
with ir_node.cache_when_complex("val") as (b, val):
if signed:
# example for bits==128:
# promote_signed_int(val, bits) is the "canonical" version of val
# if val is in bounds, the bits above bit 128 should be equal.
# (this works for both val >= 0 and val < 0. in the first case,
# all upper bits should be 0 if val is a valid int128,
# in the latter case, all upper bits should be 1.)
assertion = ["assert", ["eq", val, promote_signed_int(val, bits)]]
else:
assertion = ["assert", ["iszero", shr(bits, val)]]
assertion = IRnode.from_list(assertion, error_msg=msg)
ret = b.resolve(["seq", assertion, val])
return IRnode.from_list(ret, annotation=msg)
def bytes_clamp(ir_node: IRnode, n_bytes: int) -> IRnode:
if not (0 < n_bytes <= 32): # pragma: nocover
raise CompilerPanic(f"bad type: bytes{n_bytes}")
msg = f"bytes{n_bytes} bounds check"
with ir_node.cache_when_complex("val") as (b, val):
assertion = IRnode.from_list(["assert", ["iszero", shl(n_bytes * 8, val)]], error_msg=msg)
ret = b.resolve(["seq", assertion, val])
return IRnode.from_list(ret, annotation=msg)
# e.g. for int8, promote 255 to -1
def promote_signed_int(x, bits):
assert bits % 8 == 0
ret = ["signextend", bits // 8 - 1, x]
return IRnode.from_list(ret, annotation=f"promote int{bits}")
# general clamp function for all ops and numbers
def clamp(op, arg, bound):
with IRnode.from_list(arg).cache_when_complex("clamp_arg") as (b1, arg):
check = IRnode.from_list(["assert", [op, arg, bound]], error_msg=f"clamp {op} {bound}")
ret = ["seq", check, arg]
return IRnode.from_list(b1.resolve(ret), typ=arg.typ)
def clamp_nonzero(arg):
# TODO: use clamp("ne", arg, 0) once optimizer rules can handle it
with IRnode.from_list(arg).cache_when_complex("should_nonzero") as (b1, arg):
check = IRnode.from_list(["assert", arg], error_msg="check nonzero")
ret = ["seq", check, arg]
return IRnode.from_list(b1.resolve(ret), typ=arg.typ)
def clamp_le(arg, hi, signed):
LE = "sle" if signed else "le"
return clamp(LE, arg, hi)
def clamp2(lo, arg, hi, signed):
with IRnode.from_list(arg).cache_when_complex("clamp2_arg") as (b1, arg):
GE = "sge" if signed else "ge"
LE = "sle" if signed else "le"
ret = ["seq", ["assert", ["and", [GE, arg, lo], [LE, arg, hi]]], arg]
return IRnode.from_list(b1.resolve(ret), typ=arg.typ)
# make sure we don't overrun the source buffer, checking for overflow:
# valid inputs satisfy:
# `assert !(start+length > src_len || start+length < start)`
def check_buffer_overflow_ir(start, length, src_len):
with start.cache_when_complex("start") as (b1, start):
with add_ofst(start, length).cache_when_complex("end") as (b2, end):
arithmetic_overflow = ["lt", end, start]
buffer_oob = ["gt", end, src_len]
ok = ["iszero", ["or", arithmetic_overflow, buffer_oob]]
return b1.resolve(b2.resolve(["assert", ok]))
| _InternalBufferT |
python | lepture__mistune | src/mistune/directives/_fenced.py | {
"start": 533,
"end": 883
} | class ____(DirectiveParser):
name = "fenced_directive"
@staticmethod
def parse_type(m: Match[str]) -> str:
return m.group("type")
@staticmethod
def parse_title(m: Match[str]) -> str:
return m.group("title")
@staticmethod
def parse_content(m: Match[str]) -> str:
return m.group("text")
| FencedParser |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/operators/dataplex.py | {
"start": 168126,
"end": 172698
} | class ____(DataplexCatalogBaseOperator):
"""
Search for Entries matching the given query and scope.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:DataplexCatalogSearchEntriesOperator`
:param query: Required. The query against which entries in scope should be matched. The query
syntax is defined in `Search syntax for Dataplex Catalog
<https://cloud.google.com/dataplex/docs/search-syntax>`__.
:param order_by: Optional. Specifies the ordering of results. Supported values are:
- ``relevance`` (default)
- ``last_modified_timestamp``
- ``last_modified_timestamp asc``
:param scope: Optional. The scope under which the search should be operating. It must either be
``organizations/<org_id>`` or ``projects/<project_ref>``. If it is unspecified, it
defaults to the organization where the project provided in ``name`` is located.
:param page_size: Optional. Number of items to return per page. If there are remaining results,
the service returns a next_page_token. If unspecified, the service returns at most 10 Entries.
The maximum value is 100; values above 100 will be coerced to 100.
:param page_token: Optional. Page token received from a previous ``ListEntries`` call. Provide
this to retrieve the subsequent page.
:param project_id: Required. The ID of the Google Cloud project where the service is used.
:param location: Required. The ID of the Google Cloud region where the service is used.
:param gcp_conn_id: Optional. The connection ID to use to connect to Google Cloud.
:param retry: Optional. A retry object used to retry requests. If `None` is specified, requests will not
be retried.
:param timeout: Optional. The amount of time, in seconds, to wait for the request to complete.
Note that if `retry` is specified, the timeout applies to each individual attempt.
:param metadata: Optional. Additional metadata that is provided to the method.
:param impersonation_chain: Optional. Service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account (templated).
"""
template_fields: Sequence[str] = tuple({"query"} | set(DataplexCatalogBaseOperator.template_fields))
def __init__(
self,
query: str,
order_by: str | None = None,
scope: str | None = None,
page_size: int | None = None,
page_token: str | None = None,
*args,
**kwargs,
) -> None:
super().__init__(*args, **kwargs)
self.query = query
self.page_size = page_size
self.page_token = page_token
self.order_by = order_by
self.scope = scope
def execute(self, context: Context):
self.log.info(
"Listing Entries from location %s matching the given query %s and scope %s.",
self.location,
self.query,
self.scope,
)
try:
entries_on_page = self.hook.search_entries(
query=self.query,
location=self.location,
project_id=self.project_id,
page_size=self.page_size,
page_token=self.page_token,
order_by=self.order_by,
retry=self.retry,
timeout=self.timeout,
metadata=self.metadata,
)
self.log.info("Entries on page: %s", entries_on_page)
context["ti"].xcom_push(
key="entry_page",
value=SearchEntriesResponse.to_dict(entries_on_page._response),
)
except Exception as ex:
raise AirflowException(ex)
# Constructing list to return Entries in readable format
entries_list = [
MessageToDict(entry._pb, preserving_proto_field_name=True)
for entry in next(iter(entries_on_page.pages)).results
]
return entries_list
| DataplexCatalogSearchEntriesOperator |
python | kamyu104__LeetCode-Solutions | Python/check-if-string-is-a-prefix-of-array.py | {
"start": 461,
"end": 869
} | class ____(object):
def isPrefixString(self, s, words):
"""
:type s: str
:type words: List[str]
:rtype: bool
"""
i = 0
for word in words:
for c in word:
if i == len(s) or s[i] != c:
return False
i += 1
if i == len(s):
return True
return False
| Solution2 |
python | django__django | tests/i18n/patterns/tests.py | {
"start": 9463,
"end": 12017
} | class ____(URLTestCaseBase):
"""
Tests if the user gets redirected to the right URL when there is no
language-prefix in the request URL.
"""
def test_no_prefix_response(self):
response = self.client.get("/not-prefixed/")
self.assertEqual(response.status_code, 200)
def test_en_redirect(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_en_redirect_wrong_url(self):
response = self.client.get(
"/profiel/registreren/", headers={"accept-language": "en"}
)
self.assertEqual(response.status_code, 404)
def test_nl_redirect(self):
response = self.client.get(
"/profiel/registreren/", headers={"accept-language": "nl"}
)
self.assertRedirects(response, "/nl/profiel/registreren/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_nl_redirect_wrong_url(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "nl"}
)
self.assertEqual(response.status_code, 404)
def test_pt_br_redirect(self):
response = self.client.get(
"/conta/registre-se/", headers={"accept-language": "pt-br"}
)
self.assertRedirects(response, "/pt-br/conta/registre-se/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
def test_pl_pl_redirect(self):
# language from outside of the supported LANGUAGES list
response = self.client.get(
"/account/register/", headers={"accept-language": "pl-pl"}
)
self.assertRedirects(response, "/en/account/register/")
response = self.client.get(response.headers["location"])
self.assertEqual(response.status_code, 200)
@override_settings(
MIDDLEWARE=[
"i18n.patterns.tests.PermanentRedirectLocaleMiddleWare",
"django.middleware.common.CommonMiddleware",
],
)
def test_custom_redirect_class(self):
response = self.client.get(
"/account/register/", headers={"accept-language": "en"}
)
self.assertRedirects(response, "/en/account/register/", 301)
| URLRedirectTests |
python | jazzband__django-pipeline | pipeline/compressors/uglifyjs.py | {
"start": 91,
"end": 354
} | class ____(SubProcessCompressor):
def compress_js(self, js):
command = (settings.UGLIFYJS_BINARY, settings.UGLIFYJS_ARGUMENTS)
if self.verbose:
command += " --verbose"
return self.execute_command(command, js)
| UglifyJSCompressor |
python | django__django | django/core/files/uploadhandler.py | {
"start": 1397,
"end": 1615
} | class ____(UploadFileException):
"""
Upload handlers that have handled a file and do not want future handlers to
run should raise this exception instead of returning None.
"""
pass
| StopFutureHandlers |
python | charliermarsh__ruff | crates/ruff_linter/resources/test/fixtures/pyupgrade/UP046_0.py | {
"start": 1546,
"end": 1698
} | class ____(Base1, Generic[T], Base2):
var: T
# runtime `TypeError` to inherit from `Generic` multiple times, but we still
# emit a diagnostic
| Sandwich |
python | pytorch__pytorch | torch/_inductor/shape_propagation.py | {
"start": 2241,
"end": 4565
} | class ____:
"""
Propagate shape from args to output
"""
@staticmethod
def constant(value: torch.types.Number, dtype: torch.dtype) -> BlockShapeType:
# See implementation of constant for triton for the reason
from torch._inductor.codegen.triton import triton_compute_type, TritonKernel
triton_type = triton_compute_type(dtype)
if isinstance(V.kernel, TritonKernel) and triton_type != "tl.float32":
ndim = V.kernel.triton_tensor_ndim()
return tuple([1] * ndim)
else:
return ()
@staticmethod
def store_reduction(name: str, index: int, value: ShapeArg) -> None:
return None
@staticmethod
def reduction(
dtype: torch.dtype,
src_dtype: torch.dtype,
reduction_type: str,
value: Union[ShapeArg, tuple[ShapeArg, ...]],
) -> Union[BlockShapeType, tuple[BlockShapeType, ...]]:
raise NotImplementedError
@staticmethod
def store(
name: str, index: int, value: ShapeArg, mode: Optional[str] = None
) -> None:
return None
@staticmethod
def to_dtype(
value: ShapeVar,
dtype: torch.dtype,
src_dtype: Optional[torch.dtype] = None,
use_compute_types: bool = True,
) -> BlockShapeType:
return value.shape
@staticmethod
def dot(a: sympy.Expr, b: sympy.Expr) -> BlockShapeType:
from torch._inductor.codegen.triton import TritonKernel
assert isinstance(V.kernel, TritonKernel), "dot supports Triton only"
return ("YBLOCK", "XBLOCK")
@staticmethod
def index_expr(expr: sympy.Expr, dtype: torch.dtype) -> BlockShapeType:
# shape is implicitly embedded in expr.
return None
@staticmethod
def load_seed(name: str, offset: int) -> BlockShapeType:
return ()
@staticmethod
def indirect_indexing(
var: ShapeArg,
size: Union[sympy.Expr, int],
check: bool = True,
wrap_neg: bool = True,
) -> None:
return None
def __getattr__(self, name: str) -> Callable[..., BlockShapeType]:
return lambda *args, **kwargs: broadcast_shapes_for_args(args)
@staticmethod
def device_assert_async(cond: ShapeArg, msg: str) -> None:
return None
| ShapePropagationOpsHandler |
python | tensorflow__tensorflow | tensorflow/python/training/adadelta.py | {
"start": 1008,
"end": 7315
} | class ____(optimizer.Optimizer):
"""Optimizer that implements the Adadelta algorithm.
References:
ADADELTA - An Adaptive Learning Rate Method:
[Zeiler, 2012](http://arxiv.org/abs/1212.5701)
([pdf](http://arxiv.org/pdf/1212.5701v1.pdf))
@compatibility(TF2)
tf.compat.v1.train.AdadeltaOptimizer is compatible with eager mode and
`tf.function`.
When eager execution is enabled, `learning_rate`, `rho`,
and `epsilon` can each be a callable that
takes no arguments and returns the actual value to use. This can be useful
for changing these values across different invocations of optimizer
functions.
To switch to native TF2 style, use [`tf.keras.optimizers.Adadelta`]
(https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/Adadelta)
instead. Please notice that due to the implementation differences,
`tf.keras.optimizers.Adadelta` and
`tf.compat.v1.train.AdadeltaOptimizer` may have slight differences in
floating point numerics even though the formula used for the variable
updates still matches.
#### Structural mapping to native TF2
Before:
```python
optimizer = tf.compat.v1.train.AdadeltaOptimizer(
learning_rate=learning_rate,
rho=rho,
epsilon=epsilon)
```
After:
```python
optimizer = tf.keras.optimizers.Adadelta(
learning_rate=learning_rate,
rho=rho,
epsilon=epsilon)
```
#### How to map arguments
| TF1 Arg Name | TF2 Arg Name | Note |
| ------------------ | ------------- | ------------------------------- |
| `learning_rate` | `learning_rate`| Be careful of setting |
: : : learning_rate tensor value computed from the global step. :
: : : In TF1 this was usually meant to imply a dynamic learning rate and :
: : : would recompute in each step. In TF2 (eager + function) it will :
: : : treat it as a scalar value that only gets computed once instead of :
: : : a symbolic placeholder to be computed each time. :
| `rho` | `rho` | - |
| `epsilon` | `epsilon` | Default value is 1e-08 in TF1, |
: : : but 1e-07 in TF2. :
| `use_locking` | - | Not applicable in TF2. |
#### Before & after usage example
Before:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.compat.v1.train.AdadeltaOptimizer(learning_rate=0.001)
optimizer.apply_gradients(zip([grad], [x]))
```
After:
```python
x = tf.Variable([1,2,3], dtype=tf.float32)
grad = tf.constant([0.1, 0.2, 0.3])
optimizer = tf.keras.optimizers.Adadelta(learning_rate=0.001)
optimizer.apply_gradients(zip([grad], [x]))
```
@end_compatibility
"""
def __init__(self, learning_rate=0.001, rho=0.95, epsilon=1e-8,
use_locking=False, name="Adadelta"):
"""Construct a new Adadelta optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
To match the exact form in the original paper use 1.0.
rho: A `Tensor` or a floating point value. The decay rate.
epsilon: A `Tensor` or a floating point value. A constant epsilon used
to better conditioning the grad update.
use_locking: If `True` use locks for update operations.
name: Optional name prefix for the operations created when applying
gradients. Defaults to "Adadelta".
"""
super(AdadeltaOptimizer, self).__init__(use_locking, name)
self._lr = learning_rate
self._rho = rho
self._epsilon = epsilon
# Tensor versions of the constructor arguments, created in _prepare().
self._lr_t = None
self._rho_t = None
self._epsilon_t = None
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "accum", self._name)
self._zeros_slot(v, "accum_update", self._name)
def _prepare(self):
lr = self._call_if_callable(self._lr)
rho = self._call_if_callable(self._rho)
epsilon = self._call_if_callable(self._epsilon)
self._lr_t = ops.convert_to_tensor(lr, name="lr")
self._rho_t = ops.convert_to_tensor(rho, name="rho")
self._epsilon_t = ops.convert_to_tensor(epsilon, name="epsilon")
def _apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return gen_training_ops.apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _resource_apply_dense(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return gen_training_ops.resource_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype.base_dtype),
math_ops.cast(self._rho_t, grad.dtype.base_dtype),
math_ops.cast(self._epsilon_t, grad.dtype.base_dtype),
grad,
use_locking=self._use_locking)
def _apply_sparse(self, grad, var):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return gen_training_ops.sparse_apply_adadelta(
var,
accum,
accum_update,
math_ops.cast(self._lr_t, var.dtype.base_dtype),
math_ops.cast(self._rho_t, var.dtype.base_dtype),
math_ops.cast(self._epsilon_t, var.dtype.base_dtype),
grad.values,
grad.indices,
use_locking=self._use_locking)
def _resource_apply_sparse(self, grad, var, indices):
accum = self.get_slot(var, "accum")
accum_update = self.get_slot(var, "accum_update")
return gen_training_ops.resource_sparse_apply_adadelta(
var.handle,
accum.handle,
accum_update.handle,
math_ops.cast(self._lr_t, grad.dtype),
math_ops.cast(self._rho_t, grad.dtype),
math_ops.cast(self._epsilon_t, grad.dtype),
grad,
indices,
use_locking=self._use_locking)
| AdadeltaOptimizer |
python | wandb__wandb | wandb/vendor/pygments/lexers/templates.py | {
"start": 55338,
"end": 55979
} | class ____(DelegatingLexer):
"""
Subclass of the `LassoLexer` which highlights unhandled data with the
`XmlLexer`.
.. versionadded:: 1.6
"""
name = 'XML+Lasso'
aliases = ['xml+lasso']
alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
'*.incl', '*.inc', '*.las']
mimetypes = ['application/xml+lasso']
def __init__(self, **options):
super(LassoXmlLexer, self).__init__(XmlLexer, LassoLexer, **options)
def analyse_text(text):
rv = LassoLexer.analyse_text(text) - 0.01
if looks_like_xml(text):
rv += 0.4
return rv
| LassoXmlLexer |
python | dask__dask | dask/dataframe/dask_expr/_merge.py | {
"start": 24040,
"end": 28397
} | class ____(Merge, PartitionsFiltered):
_parameters = [
"left",
"right",
"how",
"left_on",
"right_on",
"left_index",
"right_index",
"suffixes",
"indicator",
"_partitions",
]
_defaults = {
"how": "inner",
"left_on": None,
"right_on": None,
"left_index": None,
"right_index": None,
"suffixes": ("_x", "_y"),
"indicator": False,
"_partitions": None,
}
def _divisions(self):
if self.broadcast_side == "left":
if self.right_index:
return self.right.divisions
npartitions = self.right.npartitions
else:
if self.left_index:
return self.left.divisions
npartitions = self.left.npartitions
return (None,) * (npartitions + 1)
def _simplify_up(self, parent, dependents):
return
def _lower(self):
return None
def _layer(self) -> dict:
if self.broadcast_side == "left":
bcast_name = self.left._name
bcast_size = self.left.npartitions
other = self.right._name
other_on = self.right_on
else:
bcast_name = self.right._name
bcast_size = self.right.npartitions
other = self.left._name
other_on = self.left_on
split_name = f"split-{self._name}"
inter_name = f"inter-{self._name}"
kwargs = {
"how": self.how,
"indicator": self.indicator,
"left_index": self.left_index,
"right_index": self.right_index,
"suffixes": self.suffixes,
"result_meta": self._meta,
"left_on": self.left_on,
"right_on": self.right_on,
}
dsk = {}
for part_out in self._partitions:
if self.how != "inner":
dsk[(split_name, part_out)] = (
_split_partition,
(other, part_out),
other_on,
bcast_size,
)
_concat_list = []
for j in range(bcast_size):
# Specify arg list for `merge_chunk`
_merge_args = [
(
(
operator.getitem,
(split_name, part_out),
j,
)
if self.how != "inner"
else (other, part_out)
),
(bcast_name, j),
]
if self.broadcast_side in ("left", "leftsemi"):
_merge_args.reverse()
inter_key = (inter_name, part_out, j)
dsk[(inter_name, part_out, j)] = ( # type: ignore [assignment, index]
apply,
_merge_chunk_wrapper,
_merge_args,
kwargs,
)
_concat_list.append(inter_key)
dsk[(self._name, part_out)] = (_concat_wrapper, _concat_list) # type: ignore[assignment]
return dsk
def create_assign_index_merge_transfer():
from distributed.shuffle._core import ShuffleId
from distributed.shuffle._merge import merge_transfer
def assign_index_merge_transfer(
df,
index,
name,
npartitions,
id: ShuffleId,
input_partition: int,
index_merge,
):
if index_merge:
index = df[[]].copy()
index["_index"] = df.index
else:
index = _select_columns_or_index(df, index)
if isinstance(index, (str, list, tuple)):
# Assume column selection from df
index = [index] if isinstance(index, str) else list(index)
index = df[index]
dtypes = {}
for col, dtype in index.dtypes.items():
if _is_numeric_cast_type(dtype):
dtypes[col] = np.float64
if dtypes:
index = index.astype(dtypes, errors="ignore")
index = partitioning_index(index, npartitions)
df = df.assign(**{name: index})
return merge_transfer(df, id, input_partition)
return assign_index_merge_transfer
| BroadcastJoin |
python | PyCQA__pylint | tests/functional/ext/docparams/return/missing_return_doc_Google.py | {
"start": 1806,
"end": 2169
} | class ____:
"""test_finds_annotation_property_return_type_google
Example of a property having return documentation in
a Google style docstring
"""
@property
def foo_method(self) -> int:
"""docstring ...
Raises:
RuntimeError: Always
"""
raise RuntimeError()
return 10 # [unreachable]
| Foo |
python | sqlalchemy__sqlalchemy | test/typing/plain_files/orm/session.py | {
"start": 975,
"end": 4362
} | class ____(Base):
__tablename__ = "address"
id: Mapped[int] = mapped_column(primary_key=True)
user_id = mapped_column(ForeignKey("user.id"))
email: Mapped[str]
user: Mapped[User] = relationship(back_populates="addresses")
e = create_engine("sqlite://")
Base.metadata.create_all(e)
with Session(e) as sess:
u1 = User(name="u1")
sess.add(u1)
sess.add_all([Address(user=u1, email="e1"), Address(user=u1, email="e2")])
sess.commit()
q = sess.query(User).filter_by(id=7)
assert_type(q, Query[User])
rows1 = q.all()
assert_type(rows1, list[User])
q2 = sess.query(User.id).filter_by(id=7)
rows2 = q2.all()
assert_type(rows2, list[Row[int]])
# test #8280
sess.query(User).update(
{"name": User.name + " some name"}, synchronize_session="fetch"
)
sess.query(User).update(
{"name": User.name + " some name"}, synchronize_session=False
)
sess.query(User).update(
{"name": User.name + " some name"}, synchronize_session="evaluate"
)
sess.query(User).update(
{"name": User.name + " some name"},
# EXPECTED_MYPY: Argument "synchronize_session" to "update" of "Query" has incompatible type # noqa: E501
synchronize_session="invalid",
)
sess.query(User).update({"name": User.name + " some name"})
# test #9125
for row in sess.query(User.id, User.name):
assert_type(row, Row[int, str])
for uobj1 in sess.query(User):
assert_type(uobj1, User)
sess.query(User).limit(None).offset(None).limit(10).offset(10).limit(
User.id
).offset(User.id)
# test #11083
with sess.begin() as tx:
assert_type(tx, SessionTransaction)
# more result tests in typed_results.py
def test_with_for_update() -> None:
"""test #9762"""
sess = Session()
ss = scoped_session(sessionmaker())
sess.get(User, 1)
sess.get(User, 1, with_for_update=True)
ss.get(User, 1)
ss.get(User, 1, with_for_update=True)
u1 = User()
sess.refresh(u1)
sess.refresh(u1, with_for_update=True)
ss.refresh(u1)
ss.refresh(u1, with_for_update=True)
async def test_with_for_update_async() -> None:
"""test #9762"""
sess = AsyncSession()
ss = async_scoped_session(
async_sessionmaker(), scopefunc=asyncio.current_task
)
await sess.get(User, 1)
await sess.get(User, 1, with_for_update=True)
await ss.get(User, 1)
await ss.get(User, 1, with_for_update=True)
u1 = User()
await sess.refresh(u1)
await sess.refresh(u1, with_for_update=True)
await ss.refresh(u1)
await ss.refresh(u1, with_for_update=True)
def test_exec_options() -> None:
"""test #10182"""
session = Session()
session.connection(
execution_options={"isolation_level": "REPEATABLE READ"}
)
scoped = scoped_session(sessionmaker())
scoped.connection(execution_options={"isolation_level": "REPEATABLE READ"})
async def async_test_exec_options() -> None:
"""test #10182"""
session = AsyncSession()
await session.connection(
execution_options={"isolation_level": "REPEATABLE READ"}
)
scoped = async_scoped_session(
async_sessionmaker(), scopefunc=asyncio.current_task
)
await scoped.connection(
execution_options={"isolation_level": "REPEATABLE READ"}
)
| Address |
python | kamyu104__LeetCode-Solutions | Python/number-of-substrings-containing-all-three-characters.py | {
"start": 354,
"end": 793
} | class ____(object):
def numberOfSubstrings(self, s):
"""
:type s: str
:rtype: int
"""
result, left, count = 0, 0, [0]*3
for right, c in enumerate(s):
count[ord(s[right])-ord('a')] += 1
while all(count):
count[ord(s[left])-ord('a')] -= 1
left += 1
result += left
return result
# Time: O(n)
# Space: O(1)
| Solution2 |
python | django-haystack__django-haystack | haystack/admin.py | {
"start": 6290,
"end": 6358
} | class ____(SearchModelAdminMixin, ModelAdmin):
pass
| SearchModelAdmin |
python | modin-project__modin | modin/core/dataframe/pandas/interchange/dataframe_protocol/exception.py | {
"start": 1035,
"end": 1172
} | class ____(Exception):
"""Exception to be raised if there is no offsets buffer for ``PandasProtocolColumn``."""
pass
| NoOffsetsBuffer |
python | walkccc__LeetCode | solutions/1332. Remove Palindromic Subsequences/1332.py | {
"start": 0,
"end": 100
} | class ____:
def removePalindromeSub(self, s: str) -> int:
return 1 if s == s[::-1] else 2
| Solution |
python | pallets__werkzeug | src/werkzeug/routing/exceptions.py | {
"start": 1769,
"end": 4401
} | class ____(RoutingException, LookupError):
"""Raised if the build system cannot find a URL for an endpoint with the
values provided.
"""
def __init__(
self,
endpoint: t.Any,
values: t.Mapping[str, t.Any],
method: str | None,
adapter: MapAdapter | None = None,
) -> None:
super().__init__(endpoint, values, method)
self.endpoint = endpoint
self.values = values
self.method = method
self.adapter = adapter
@cached_property
def suggested(self) -> Rule | None:
return self.closest_rule(self.adapter)
def closest_rule(self, adapter: MapAdapter | None) -> Rule | None:
def _score_rule(rule: Rule) -> float:
return sum(
[
0.98
* difflib.SequenceMatcher(
# endpoints can be any type, compare as strings
None,
str(rule.endpoint),
str(self.endpoint),
).ratio(),
0.01 * bool(set(self.values or ()).issubset(rule.arguments)),
0.01 * bool(rule.methods and self.method in rule.methods),
]
)
if adapter and adapter.map._rules:
return max(adapter.map._rules, key=_score_rule)
return None
def __str__(self) -> str:
message = [f"Could not build url for endpoint {self.endpoint!r}"]
if self.method:
message.append(f" ({self.method!r})")
if self.values:
message.append(f" with values {sorted(self.values)!r}")
message.append(".")
if self.suggested:
if self.endpoint == self.suggested.endpoint:
if (
self.method
and self.suggested.methods is not None
and self.method not in self.suggested.methods
):
message.append(
" Did you mean to use methods"
f" {sorted(self.suggested.methods)!r}?"
)
missing_values = self.suggested.arguments.union(
set(self.suggested.defaults or ())
) - set(self.values.keys())
if missing_values:
message.append(
f" Did you forget to specify values {sorted(missing_values)!r}?"
)
else:
message.append(f" Did you mean {self.suggested.endpoint!r} instead?")
return "".join(message)
| BuildError |
python | redis__redis-py | redis/commands/search/reducers.py | {
"start": 908,
"end": 1119
} | class ____(FieldOnlyReducer):
"""
Calculates the largest value in the given field within the group
"""
NAME = "MAX"
def __init__(self, field: str) -> None:
super().__init__(field)
| max |
python | python-pillow__Pillow | src/PIL/PcdImagePlugin.py | {
"start": 539,
"end": 1774
} | class ____(ImageFile.ImageFile):
format = "PCD"
format_description = "Kodak PhotoCD"
def _open(self) -> None:
# rough
assert self.fp is not None
self.fp.seek(2048)
s = self.fp.read(1539)
if not s.startswith(b"PCD_"):
msg = "not a PCD file"
raise SyntaxError(msg)
orientation = s[1538] & 3
self.tile_post_rotate = None
if orientation == 1:
self.tile_post_rotate = 90
elif orientation == 3:
self.tile_post_rotate = 270
self._mode = "RGB"
self._size = (512, 768) if orientation in (1, 3) else (768, 512)
self.tile = [ImageFile._Tile("pcd", (0, 0, 768, 512), 96 * 2048)]
def load_prepare(self) -> None:
if self._im is None and self.tile_post_rotate:
self.im = Image.core.new(self.mode, (768, 512))
ImageFile.ImageFile.load_prepare(self)
def load_end(self) -> None:
if self.tile_post_rotate:
# Handle rotated PCDs
self.im = self.rotate(self.tile_post_rotate, expand=True).im
#
# registry
Image.register_open(PcdImageFile.format, PcdImageFile)
Image.register_extension(PcdImageFile.format, ".pcd")
| PcdImageFile |
python | walkccc__LeetCode | solutions/2708. Maximum Strength of a Group/2708.py | {
"start": 0,
"end": 665
} | class ____:
def maxStrength(self, nums: list[int]) -> int:
posProd = 1
negProd = 1
maxNeg = -math.inf
negCount = 0
hasPos = False
hasZero = False
for num in nums:
if num > 0:
posProd *= num
hasPos = True
elif num < 0:
negProd *= num
maxNeg = max(maxNeg, num)
negCount += 1
else: # num == 0
hasZero = True
if negCount == 0 and not hasPos:
return 0
if negCount % 2 == 0:
return negProd * posProd
if negCount >= 3:
return negProd // maxNeg * posProd
if hasPos:
return posProd
if hasZero:
return 0
return maxNeg
| Solution |
python | getsentry__sentry | tests/sentry_plugins/victorops/test_plugin.py | {
"start": 457,
"end": 712
} | class ____(Interface):
def to_string(self, event: Event) -> str:
return self.body
def get_title(self) -> str:
return self.title
def test_conf_key() -> None:
assert VictorOpsPlugin().conf_key == "victorops"
| UnicodeTestInterface |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/pyproject_hooks/_impl.py | {
"start": 662,
"end": 926
} | class ____(Exception):
"""Will be raised if the backend is invalid."""
def __init__(self, backend_name, backend_path, message):
super().__init__(message)
self.backend_name = backend_name
self.backend_path = backend_path
| BackendInvalid |
python | django__django | tests/admin_views/models.py | {
"start": 11548,
"end": 11684
} | class ____(Doodad):
owner = models.ForeignKey(Collector, models.CASCADE)
expensive = models.BooleanField(default=True)
| FancyDoodad |
python | geekcomputers__Python | venv/Lib/site-packages/pip/_vendor/rich/segment.py | {
"start": 1206,
"end": 21383
} | class ____(NamedTuple):
"""A piece of text with associated style. Segments are produced by the Console render process and
are ultimately converted in to strings to be written to the terminal.
Args:
text (str): A piece of text.
style (:class:`~rich.style.Style`, optional): An optional style to apply to the text.
control (Tuple[ControlCode], optional): Optional sequence of control codes.
Attributes:
cell_length (int): The cell length of this Segment.
"""
text: str
style: Optional[Style] = None
control: Optional[Sequence[ControlCode]] = None
@property
def cell_length(self) -> int:
"""The number of terminal cells required to display self.text.
Returns:
int: A number of cells.
"""
text, _style, control = self
return 0 if control else cell_len(text)
def __rich_repr__(self) -> Result:
yield self.text
if self.control is None:
if self.style is not None:
yield self.style
else:
yield self.style
yield self.control
def __bool__(self) -> bool:
"""Check if the segment contains text."""
return bool(self.text)
@property
def is_control(self) -> bool:
"""Check if the segment contains control codes."""
return self.control is not None
@classmethod
@lru_cache(1024 * 16)
def _split_cells(cls, segment: "Segment", cut: int) -> Tuple["Segment", "Segment"]:
text, style, control = segment
_Segment = Segment
cell_length = segment.cell_length
if cut >= cell_length:
return segment, _Segment("", style, control)
cell_size = get_character_cell_size
pos = int((cut / cell_length) * (len(text) - 1))
before = text[:pos]
cell_pos = cell_len(before)
if cell_pos == cut:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
while pos < len(text):
char = text[pos]
pos += 1
cell_pos += cell_size(char)
before = text[:pos]
if cell_pos == cut:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
if cell_pos > cut:
return (
_Segment(before[: pos - 1] + " ", style, control),
_Segment(" " + text[pos:], style, control),
)
raise AssertionError("Will never reach here")
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
"""Split segment in to two segments at the specified column.
If the cut point falls in the middle of a 2-cell wide character then it is replaced
by two spaces, to preserve the display width of the parent segment.
Returns:
Tuple[Segment, Segment]: Two segments.
"""
text, style, control = self
if _is_single_cell_widths(text):
# Fast path with all 1 cell characters
if cut >= len(text):
return self, Segment("", style, control)
return (
Segment(text[:cut], style, control),
Segment(text[cut:], style, control),
)
return self._split_cells(self, cut)
@classmethod
def line(cls) -> "Segment":
"""Make a new line segment."""
return cls("\n")
@classmethod
def apply_style(
cls,
segments: Iterable["Segment"],
style: Optional[Style] = None,
post_style: Optional[Style] = None,
) -> Iterable["Segment"]:
"""Apply style(s) to an iterable of segments.
Returns an iterable of segments where the style is replaced by ``style + segment.style + post_style``.
Args:
segments (Iterable[Segment]): Segments to process.
style (Style, optional): Base style. Defaults to None.
post_style (Style, optional): Style to apply on top of segment style. Defaults to None.
Returns:
Iterable[Segments]: A new iterable of segments (possibly the same iterable).
"""
result_segments = segments
if style:
apply = style.__add__
result_segments = (
cls(text, None if control else apply(_style), control)
for text, _style, control in result_segments
)
if post_style:
result_segments = (
cls(
text,
(
None
if control
else (_style + post_style if _style else post_style)
),
control,
)
for text, _style, control in result_segments
)
return result_segments
@classmethod
def filter_control(
cls, segments: Iterable["Segment"], is_control: bool = False
) -> Iterable["Segment"]:
"""Filter segments by ``is_control`` attribute.
Args:
segments (Iterable[Segment]): An iterable of Segment instances.
is_control (bool, optional): is_control flag to match in search.
Returns:
Iterable[Segment]: And iterable of Segment instances.
"""
if is_control:
return filter(attrgetter("control"), segments)
else:
return filterfalse(attrgetter("control"), segments)
@classmethod
def split_lines(cls, segments: Iterable["Segment"]) -> Iterable[List["Segment"]]:
"""Split a sequence of segments in to a list of lines.
Args:
segments (Iterable[Segment]): Segments potentially containing line feeds.
Yields:
Iterable[List[Segment]]: Iterable of segment lists, one per line.
"""
line: List[Segment] = []
append = line.append
for segment in segments:
if "\n" in segment.text and not segment.control:
text, style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, style))
if new_line:
yield line
line = []
append = line.append
else:
append(segment)
if line:
yield line
@classmethod
def split_and_crop_lines(
cls,
segments: Iterable["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
include_new_lines: bool = True,
) -> Iterable[List["Segment"]]:
"""Split segments in to lines, and crop lines greater than a given length.
Args:
segments (Iterable[Segment]): An iterable of segments, probably
generated from console.render.
length (int): Desired line length.
style (Style, optional): Style to use for any padding.
pad (bool): Enable padding of lines that are less than `length`.
Returns:
Iterable[List[Segment]]: An iterable of lines of segments.
"""
line: List[Segment] = []
append = line.append
adjust_line_length = cls.adjust_line_length
new_line_segment = cls("\n")
for segment in segments:
if "\n" in segment.text and not segment.control:
text, segment_style, _ = segment
while text:
_text, new_line, text = text.partition("\n")
if _text:
append(cls(_text, segment_style))
if new_line:
cropped_line = adjust_line_length(
line, length, style=style, pad=pad
)
if include_new_lines:
cropped_line.append(new_line_segment)
yield cropped_line
line.clear()
else:
append(segment)
if line:
yield adjust_line_length(line, length, style=style, pad=pad)
@classmethod
def adjust_line_length(
cls,
line: List["Segment"],
length: int,
style: Optional[Style] = None,
pad: bool = True,
) -> List["Segment"]:
"""Adjust a line to a given width (cropping or padding as required).
Args:
segments (Iterable[Segment]): A list of segments in a single line.
length (int): The desired width of the line.
style (Style, optional): The style of padding if used (space on the end). Defaults to None.
pad (bool, optional): Pad lines with spaces if they are shorter than `length`. Defaults to True.
Returns:
List[Segment]: A line of segments with the desired length.
"""
line_length = sum(segment.cell_length for segment in line)
new_line: List[Segment]
if line_length < length:
if pad:
new_line = line + [cls(" " * (length - line_length), style)]
else:
new_line = line[:]
elif line_length > length:
new_line = []
append = new_line.append
line_length = 0
for segment in line:
segment_length = segment.cell_length
if line_length + segment_length < length or segment.control:
append(segment)
line_length += segment_length
else:
text, segment_style, _ = segment
text = set_cell_size(text, length - line_length)
append(cls(text, segment_style))
break
else:
new_line = line[:]
return new_line
@classmethod
def get_line_length(cls, line: List["Segment"]) -> int:
"""Get the length of list of segments.
Args:
line (List[Segment]): A line encoded as a list of Segments (assumes no '\\\\n' characters),
Returns:
int: The length of the line.
"""
_cell_len = cell_len
return sum(_cell_len(text) for text, style, control in line if not control)
@classmethod
def get_shape(cls, lines: List[List["Segment"]]) -> Tuple[int, int]:
"""Get the shape (enclosing rectangle) of a list of lines.
Args:
lines (List[List[Segment]]): A list of lines (no '\\\\n' characters).
Returns:
Tuple[int, int]: Width and height in characters.
"""
get_line_length = cls.get_line_length
max_width = max(get_line_length(line) for line in lines) if lines else 0
return (max_width, len(lines))
@classmethod
def set_shape(
cls,
lines: List[List["Segment"]],
width: int,
height: Optional[int] = None,
style: Optional[Style] = None,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Set the shape of a list of lines (enclosing rectangle).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style, optional): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
_height = height or len(lines)
blank = (
[cls(" " * width + "\n", style)] if new_lines else [cls(" " * width, style)]
)
adjust_line_length = cls.adjust_line_length
shaped_lines = lines[:_height]
shaped_lines[:] = [
adjust_line_length(line, width, style=style) for line in lines
]
if len(shaped_lines) < _height:
shaped_lines.extend([blank] * (_height - len(shaped_lines)))
return shaped_lines
@classmethod
def align_top(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns lines to top (adds extra lines to bottom as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = lines + [[blank]] * extra_lines
return lines
@classmethod
def align_bottom(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns render to bottom (adds extra lines above as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added. Defaults to None.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
lines = [[blank]] * extra_lines + lines
return lines
@classmethod
def align_middle(
cls: Type["Segment"],
lines: List[List["Segment"]],
width: int,
height: int,
style: Style,
new_lines: bool = False,
) -> List[List["Segment"]]:
"""Aligns lines to middle (adds extra lines to above and below as required).
Args:
lines (List[List[Segment]]): A list of lines.
width (int): Desired width.
height (int, optional): Desired height or None for no change.
style (Style): Style of any padding added.
new_lines (bool, optional): Padded lines should include "\n". Defaults to False.
Returns:
List[List[Segment]]: New list of lines.
"""
extra_lines = height - len(lines)
if not extra_lines:
return lines[:]
lines = lines[:height]
blank = cls(" " * width + "\n", style) if new_lines else cls(" " * width, style)
top_lines = extra_lines // 2
bottom_lines = extra_lines - top_lines
lines = [[blank]] * top_lines + lines + [[blank]] * bottom_lines
return lines
@classmethod
def simplify(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Simplify an iterable of segments by combining contiguous segments with the same style.
Args:
segments (Iterable[Segment]): An iterable of segments.
Returns:
Iterable[Segment]: A possibly smaller iterable of segments that will render the same way.
"""
iter_segments = iter(segments)
try:
last_segment = next(iter_segments)
except StopIteration:
return
_Segment = Segment
for segment in iter_segments:
if last_segment.style == segment.style and not segment.control:
last_segment = _Segment(
last_segment.text + segment.text, last_segment.style
)
else:
yield last_segment
last_segment = segment
yield last_segment
@classmethod
def strip_links(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all links from an iterable of styles.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with link removed.
"""
for segment in segments:
if segment.control or segment.style is None:
yield segment
else:
text, style, _control = segment
yield cls(text, style.update_link(None) if style else None)
@classmethod
def strip_styles(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all styles from an iterable of segments.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with styles replace with None
"""
for text, _style, control in segments:
yield cls(text, None, control)
@classmethod
def remove_color(cls, segments: Iterable["Segment"]) -> Iterable["Segment"]:
"""Remove all color from an iterable of segments.
Args:
segments (Iterable[Segment]): An iterable segments.
Yields:
Segment: Segments with colorless style.
"""
cache: Dict[Style, Style] = {}
for text, style, control in segments:
if style:
colorless_style = cache.get(style)
if colorless_style is None:
colorless_style = style.without_color
cache[style] = colorless_style
yield cls(text, colorless_style, control)
else:
yield cls(text, None, control)
@classmethod
def divide(
cls, segments: Iterable["Segment"], cuts: Iterable[int]
) -> Iterable[List["Segment"]]:
"""Divides an iterable of segments in to portions.
Args:
cuts (Iterable[int]): Cell positions where to divide.
Yields:
[Iterable[List[Segment]]]: An iterable of Segments in List.
"""
split_segments: List["Segment"] = []
add_segment = split_segments.append
iter_cuts = iter(cuts)
while True:
cut = next(iter_cuts, -1)
if cut == -1:
return []
if cut != 0:
break
yield []
pos = 0
segments_clear = split_segments.clear
segments_copy = split_segments.copy
_cell_len = cached_cell_len
for segment in segments:
text, _style, control = segment
while text:
end_pos = pos if control else pos + _cell_len(text)
if end_pos < cut:
add_segment(segment)
pos = end_pos
break
if end_pos == cut:
add_segment(segment)
yield segments_copy()
segments_clear()
pos = end_pos
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
break
else:
before, segment = segment.split_cells(cut - pos)
text, _style, control = segment
add_segment(before)
yield segments_copy()
segments_clear()
pos = cut
cut = next(iter_cuts, -1)
if cut == -1:
if split_segments:
yield segments_copy()
return
yield segments_copy()
| Segment |
python | Lightning-AI__lightning | tests/tests_pytorch/accelerators/test_xla.py | {
"start": 4046,
"end": 7646
} | class ____(BoringModel):
count = 0
called = collections.defaultdict(int)
def __init__(self):
super().__init__()
self.automatic_optimization = False
@property
def should_update(self):
return self.count % 2 == 0
def on_train_batch_start(self, batch, batch_idx):
self.called["on_train_batch_start"] += 1
self.weight_before = self.layer.weight.clone()
def training_step(self, batch, batch_idx):
self.called["training_step"] += 1
opt = self.optimizers()
loss = self.step(batch)
if self.should_update:
self.manual_backward(loss)
opt.step()
opt.zero_grad()
return loss
def on_train_batch_end(self, *_):
self.called["on_train_batch_end"] += 1
after_before = self.layer.weight.clone()
if self.should_update:
assert not torch.equal(self.weight_before, after_before), self.count
else:
assert torch.equal(self.weight_before, after_before)
assert_emtpy_grad(self.layer.weight.grad)
self.count += 1
def on_train_start(self):
opt = self.optimizers()
self.opt_step_patch = patch.object(opt, "step", wraps=opt.step)
self.opt_step_mock = self.opt_step_patch.start()
def on_train_end(self):
# this might fail if run in an environment with too many ranks, as the total
# length of the dataloader will be distributed among them and then each rank might not do 3 steps
assert self.called["training_step"] == 3
assert self.called["on_train_batch_start"] == 3
assert self.called["on_train_batch_end"] == 3
self.opt_step_patch.stop()
assert self.opt_step_mock.call_count == 2
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_manual_optimization_tpus(tmp_path):
model = ManualOptimizationModel()
model_copy = deepcopy(model)
trainer = Trainer(
max_epochs=1,
default_root_dir=tmp_path,
limit_train_batches=3,
limit_test_batches=0,
limit_val_batches=0,
accelerator="tpu",
devices="auto",
)
trainer.fit(model)
for param, param_copy in zip(model.parameters(), model_copy.parameters()):
assert not torch.equal(param.cpu().data, param_copy.data)
def test_strategy_choice_tpu_str_ddp_spawn(tpu_available):
with pytest.raises(ValueError, match="XLAAccelerator` can only be used with a `SingleDeviceXLAStrategy`"):
Trainer(strategy="ddp_spawn", accelerator="tpu", devices=8)
@RunIf(skip_windows=True)
@mock.patch("lightning.pytorch.strategies.xla.XLAStrategy.set_world_ranks")
def test_strategy_choice_tpu_str_xla_debug(_, tpu_available):
trainer = Trainer(strategy="xla_debug", accelerator="tpu", devices=8)
assert isinstance(trainer.strategy, XLAStrategy)
@RunIf(tpu=True)
def test_strategy_choice_tpu_strategy():
trainer = Trainer(strategy=XLAStrategy(), accelerator="tpu", devices="auto")
assert isinstance(trainer.strategy, XLAStrategy)
@RunIf(tpu=True)
@mock.patch.dict(os.environ, os.environ.copy(), clear=True)
def test_auto_parameters_tying_tpus(tmp_path):
model = WeightSharingModule()
shared_params = find_shared_parameters(model)
assert shared_params[0] == ["layer_1.weight", "layer_3.weight"]
trainer = Trainer(default_root_dir=tmp_path, limit_train_batches=3, accelerator="tpu", devices="auto", max_epochs=1)
trainer.fit(model)
assert torch.equal(model.layer_1.weight, model.layer_3.weight)
| ManualOptimizationModel |
python | django__django | django/db/models/lookups.py | {
"start": 26996,
"end": 27114
} | class ____(YearLookup, LessThanOrEqual):
def get_bound_params(self, start, finish):
return (finish,)
| YearLte |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 1447,
"end": 1931
} | class ____(AsyncHTTPTestCase):
"""Base class for web tests that also supports WSGI mode.
Override get_handlers and get_app_kwargs instead of get_app.
This class is deprecated since WSGI mode is no longer supported.
"""
def get_app(self):
self.app = Application(self.get_handlers(), **self.get_app_kwargs())
return self.app
def get_handlers(self):
raise NotImplementedError()
def get_app_kwargs(self):
return {}
| WebTestCase |
python | python__mypy | mypyc/ir/ops.py | {
"start": 14715,
"end": 15572
} | class ____(ControlOp):
"""Return a value from a function."""
error_kind = ERR_NEVER
def __init__(
self, value: Value, line: int = -1, *, yield_target: BasicBlock | None = None
) -> None:
super().__init__(line)
self.value = value
# If this return is created by a yield, keep track of the next
# basic block. This doesn't affect the code we generate but
# can feed into analysis that need to understand the
# *original* CFG.
self.yield_target = yield_target
def sources(self) -> list[Value]:
return [self.value]
def set_sources(self, new: list[Value]) -> None:
(self.value,) = new
def stolen(self) -> list[Value]:
return [self.value]
def accept(self, visitor: OpVisitor[T]) -> T:
return visitor.visit_return(self)
@final
| Return |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/pool/impl.py | {
"start": 16895,
"end": 19107
} | class ____(Pool):
"""A :class:`_pool.Pool` that allows at most one checked out connection at
any given time.
This will raise an exception if more than one connection is checked out
at a time. Useful for debugging code that is using more connections
than desired.
The :class:`.AssertionPool` class **is compatible** with asyncio and
:func:`_asyncio.create_async_engine`.
"""
_conn: Optional[ConnectionPoolEntry]
_checkout_traceback: Optional[List[str]]
def __init__(self, *args: Any, **kw: Any):
self._conn = None
self._checked_out = False
self._store_traceback = kw.pop("store_traceback", True)
self._checkout_traceback = None
Pool.__init__(self, *args, **kw)
def status(self) -> str:
return "AssertionPool"
def _do_return_conn(self, record: ConnectionPoolEntry) -> None:
if not self._checked_out:
raise AssertionError("connection is not checked out")
self._checked_out = False
assert record is self._conn
def dispose(self) -> None:
self._checked_out = False
if self._conn:
self._conn.close()
def recreate(self) -> AssertionPool:
self.logger.info("Pool recreating")
return self.__class__(
self._creator,
echo=self.echo,
pre_ping=self._pre_ping,
recycle=self._recycle,
reset_on_return=self._reset_on_return,
logging_name=self._orig_logging_name,
_dispatch=self.dispatch,
dialect=self._dialect,
)
def _do_get(self) -> ConnectionPoolEntry:
if self._checked_out:
if self._checkout_traceback:
suffix = " at:\n%s" % "".join(
chop_traceback(self._checkout_traceback)
)
else:
suffix = ""
raise AssertionError("connection is already checked out" + suffix)
if not self._conn:
self._conn = self._create_connection()
self._checked_out = True
if self._store_traceback:
self._checkout_traceback = traceback.format_stack()
return self._conn
| AssertionPool |
python | google__jax | jax/experimental/pallas/ops/tpu/flash_attention.py | {
"start": 1517,
"end": 49855
} | class ____:
"""Tile sizes parameterizing FlashAttention kernels.
Those parameters have negligible effect on numerics, but affect performance
greatly.
"""
block_q: int
block_k_major: int
block_k: int
block_b: int
block_q_major_dkv: int | None = None
block_k_major_dkv: int | None = None
block_k_dkv: int | None = None
block_q_dkv: int | None = None
block_k_major_dq: int | None = None
block_k_dq: int | None = None
block_q_dq: int | None = None
def __post_init__(self):
def verify_major_minor(prefix, suffix, major, minor):
if minor > major:
raise ValueError(
f"{prefix}{suffix}={minor} should be smaller than"
f" {prefix}_major{suffix}={major}"
)
if major % minor != 0:
raise ValueError(
f"{prefix}{suffix}={minor} should divide"
f" {prefix}_major{suffix}={major}"
)
verify_major_minor("block_k", "", self.block_k_major, self.block_k)
if self.block_q_major_dkv is not None and self.block_q_dkv is not None:
verify_major_minor(
"block_q", "_dkv", self.block_q_major_dkv, self.block_q_dkv
)
if self.block_k_major_dkv is not None and self.block_k_dkv is not None:
verify_major_minor(
"block_k", "_dkv", self.block_k_major_dkv, self.block_k_dkv
)
if self.block_k_major_dq is not None and self.block_k_dq is not None:
verify_major_minor(
"block_k", "_dq", self.block_k_major_dq, self.block_k_dq
)
@property
def has_backward_blocks(self) -> bool:
backward_blocks = (
self.block_q_major_dkv,
self.block_k_major_dkv,
self.block_q_dkv,
self.block_k_dkv,
self.block_k_major_dq,
self.block_k_dq,
self.block_q_dq,
)
return all(b is not None for b in backward_blocks)
@classmethod
def get_default(cls, batch_size, num_heads, q_seq_len, kv_len, d_model):
# TODO(apaszke,sharadmv): Select better parameters based on a heuristic.
del batch_size, num_heads, q_seq_len, kv_len, d_model # Unused.
return BlockSizes(
block_q=128,
block_k_major=128,
block_k=128,
block_b=1,
block_q_major_dkv=128,
block_k_major_dkv=128,
block_k_dkv=128,
block_q_dkv=128,
block_k_major_dq=128,
block_k_dq=128,
block_q_dq=128,
)
@functools.partial(
jax.jit,
static_argnames=[
"causal",
"sm_scale",
"block_sizes",
"debug",
],
)
def flash_attention(
q, # [batch_size, num_heads, q_seq_len, d_model]
k, # [batch_size, num_heads, kv_seq_len, d_model]
v, # [batch_size, num_heads, kv_seq_len, d_model]
ab=None, # [batch_size, num_heads, q_seq_len, kv_seq_len]
segment_ids=None, # q of [batch_size, q_seq_len] and kv of [batch_size, kv_seq_len]
*,
causal: bool = False,
sm_scale: float = 1.0,
block_sizes: BlockSizes | None = None,
debug: bool = False,
):
batch_size, num_heads, q_seq_len, d_model = q.shape
batch_size_k, num_heads_k, kv_seq_len, d_model_k = k.shape
batch_size_v, num_heads_v, kv_seq_len_v, d_model_v = v.shape
if batch_size != batch_size_k or batch_size != batch_size_v:
raise ValueError(
f"Batch size mismatch: got {batch_size}, {batch_size_k} and"
f" {batch_size_v} (for q, k, v respectively)"
)
if num_heads != num_heads_k or num_heads != num_heads_v:
raise ValueError(
f"Head count mismatch: got {num_heads}, {num_heads_k},"
f" {num_heads_v} (for q, k, v respectively)"
)
if d_model != d_model_k:
raise ValueError(
f"Model dimension mismatch: got {d_model} and {d_model_k} (for q and k"
" respectively)"
)
if d_model != d_model_v:
raise NotImplementedError(
"V model dimension unequal to KV model dimension unsupported"
)
if kv_seq_len != kv_seq_len_v:
raise ValueError(
f"KV sequence length mismatch: got {kv_seq_len} and {kv_seq_len_v}"
)
if ab is not None:
if ab.shape != (batch_size, num_heads, q_seq_len, kv_seq_len):
raise ValueError(
f"Attention bias shape mismatch: expected ({batch_size=},"
f" {num_heads=}, {q_seq_len=}, {kv_seq_len=}), got {ab.shape}"
)
if segment_ids is not None:
if segment_ids.q.shape != (batch_size, q_seq_len):
raise ValueError(
f"Q segment ids shape mismatch: expected ({batch_size=},"
f" {q_seq_len=},), got {segment_ids.q.shape}"
)
if segment_ids.kv.shape != (batch_size, kv_seq_len):
raise ValueError(
f"KV segment ids shape mismatch: expected ({batch_size=},"
f" {kv_seq_len=},), got {segment_ids.kv.shape}"
)
if block_sizes is None:
block_sizes = BlockSizes.get_default(
batch_size, num_heads, q_seq_len, kv_seq_len, d_model
)
return _flash_attention(
q, k, v, ab, segment_ids, False, causal, sm_scale, block_sizes, debug
)
@functools.partial(jax.custom_vjp, nondiff_argnums=range(5, 10))
def _flash_attention(
q,
k,
v,
ab,
segment_ids,
save_residuals,
causal,
sm_scale,
block_sizes,
debug,
):
return _flash_attention_impl(
q,
k,
v,
ab,
segment_ids,
save_residuals,
causal,
sm_scale,
block_sizes.block_b,
block_sizes.block_q,
block_sizes.block_k_major,
block_sizes.block_k,
debug,
)
def _flash_attention_fwd(
q,
k,
v,
ab,
segment_ids,
save_residuals,
causal,
sm_scale,
block_sizes,
debug,
):
if save_residuals:
raise NotImplementedError("Higher-order AD not supported")
o, l, m = _flash_attention(
q, k, v, ab, segment_ids, True, causal, sm_scale, block_sizes, debug
)
return o, (q, k, v, ab, segment_ids, o, l, m)
def _flash_attention_bwd(
save_residuals: bool,
causal: bool,
sm_scale: float,
block_sizes: BlockSizes,
debug: bool,
residuals,
do,
):
"""VJP rule for FlashAttention."""
if save_residuals:
raise NotImplementedError("Higher-order AD not supported")
(q, k, v, ab, segment_ids, o, l, m) = residuals
if not block_sizes.has_backward_blocks:
raise ValueError(
"Program is being differentiated, but not all backward blocks are"
" specified"
)
di = jnp.sum(
o.astype(jnp.float32) * do.astype(jnp.float32), axis=-1
) # [batch_size, num_heads, q_seq_len]
dk, dv = _flash_attention_bwd_dkv(
q,
k,
v,
ab,
segment_ids,
l,
m,
do,
di,
block_q_major=block_sizes.block_q_major_dkv,
block_k_major=block_sizes.block_k_major_dkv,
block_k=block_sizes.block_k_dkv,
block_q=block_sizes.block_q_dkv,
sm_scale=sm_scale,
causal=causal,
mask_value=DEFAULT_MASK_VALUE,
debug=debug,
)
dq, ds = _flash_attention_bwd_dq(
q,
k,
v,
ab,
segment_ids,
l,
m,
do,
di,
block_q_major=block_sizes.block_q_dq,
block_k_major=block_sizes.block_k_major_dq,
block_k=block_sizes.block_k_dq,
sm_scale=sm_scale,
causal=causal,
mask_value=DEFAULT_MASK_VALUE,
debug=debug,
)
return dq, dk, dv, ds, None
_flash_attention.defvjp(fwd=_flash_attention_fwd, bwd=_flash_attention_bwd)
MIN_BLOCK_SIZE = 128
TRANS_B_DIM_NUMBERS = (((1,), (1,)), ((), ()))
def below_or_on_diag(r, r_blk_size, c, c_blk_size):
# A block is considered below or on diagonal as long as the bottom left
# corner of the block is below or on diagonal.
return ((r + 1) * r_blk_size - 1) > (c * c_blk_size)
def _flash_attention_kernel(q_tile_ref, *args, **kwargs):
block_b = q_tile_ref.shape[0]
# If we're not going to tile the softmax, then we can avoid a bunch of VPU ops.
if kwargs["block_k"] == kwargs["kv_seq_len"]:
kernel = _flash_attention_kernel_single_batch_single_step
else:
kernel = _flash_attention_kernel_single_batch
for batch_idx in range(block_b):
kernel((batch_idx, 0), q_tile_ref, *args, **kwargs)
def _flash_attention_kernel_single_batch(
batch_idx: tuple[int, ...],
q_tile_ref,
k_tile_ref,
v_tile_ref,
ab_tile_ref,
q_segment_ids_tile_ref,
kv_segment_ids_tile_ref, # Input arrays
o_tile_ref, # Output arrays
l_ref,
m_ref,
m_scratch_ref,
l_scratch_ref,
acc_scratch_ref,
*,
causal,
sm_scale,
block_k,
kv_seq_len,
mask_value,
):
block_k_major = k_tile_ref.shape[2]
block_q = q_tile_ref.shape[2]
head_dim = q_tile_ref.shape[-1]
kv_seq_idx = pl.program_id(3)
@pl.when(kv_seq_idx == 0)
def start_new_sequence():
m_scratch_ref[batch_idx] = jnp.full(
m_scratch_ref.shape[2:], -jnp.inf, jnp.float32
)
l_scratch_ref[batch_idx] = jnp.zeros(l_scratch_ref.shape[2:], jnp.float32)
acc_scratch_ref[batch_idx] = jnp.zeros(
acc_scratch_ref.shape[2:], jnp.float32
)
q_seq_idx = pl.program_id(2)
if causal:
should_run = below_or_on_diag(q_seq_idx, block_q, kv_seq_idx, block_k_major)
else:
should_run = True
@pl.when(should_run)
def run():
@pl.loop(0, block_k_major, step=block_k, unroll=True)
def _body(start_k):
m_prev = m_scratch_ref[batch_idx]
l_prev = l_scratch_ref[batch_idx]
q = q_tile_ref[batch_idx] # [block_q, head_dim]
k = k_tile_ref[
(*batch_idx, pl.dslice(start_k, block_k), slice(None))
] # [block_k, head_dim]
s = jax.lax.dot_general(
q, k, TRANS_B_DIM_NUMBERS, preferred_element_type=jnp.float32
) # [block_q, block_k]
# Add attention bias if needed.
# TODO(tanburn) Should the attention bias be added before or after
# multiplication by sm_scale?
if ab_tile_ref is not None:
ab = ab_tile_ref[
(*batch_idx, pl.dslice(None), pl.dslice(start_k, block_k))
].astype(jnp.float32)
s += ab
if sm_scale != 1.0:
s *= sm_scale
mask = None
if q_segment_ids_tile_ref is not None:
repeats, rem = divmod(block_k, NUM_LANES)
if rem:
raise NotImplementedError(
f"kv block size must be a multiple of {NUM_LANES}"
)
q_segment_ids = pltpu.repeat(
q_segment_ids_tile_ref[batch_idx[0]], repeats, axis=1
) # [block_q, block_k].
kv_segment_ids = kv_segment_ids_tile_ref[
batch_idx[0], :1, pl.dslice(start_k, block_k)
] # [1, block_k].
mask = jnp.equal(q_segment_ids, kv_segment_ids).astype(jnp.bool_)
if causal:
mask_shape = (block_q, block_k)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
row_ids += q_seq_idx * block_q
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
col_ids += kv_seq_idx * block_k_major + start_k
causal_mask = col_ids <= row_ids
mask = (
causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
)
s = s if mask is None else s + jnp.where(mask, 0.0, mask_value)
m_curr = jnp.max(s, axis=1)[:, None] # Row max, shape [block_q, 1].
m_next = jnp.maximum(m_prev, m_curr) # Shape [block_q, 128].
block_k_repeats, rem = divmod(block_k, MIN_BLOCK_SIZE)
if rem:
raise NotImplementedError(
f"{block_k=} should be a multiple of {MIN_BLOCK_SIZE}"
)
p = jnp.exp(s - pltpu.repeat(m_next, block_k_repeats, 1))
alpha = jnp.exp(m_prev - m_next) # Shape [block_q, 128].
l_corr = alpha * l_prev
l_next = jnp.sum(p, axis=1)[:, None] + l_corr # Shape [block_q, 128]
head_dim_repeats, rem = divmod(head_dim, MIN_BLOCK_SIZE)
l_broadcast = lambda l: pltpu.repeat(l, head_dim_repeats, 1)
if rem:
if head_dim_repeats == 0:
l_broadcast = lambda l: l[:, :head_dim]
else:
raise NotImplementedError(
f"{head_dim=} should be a multiple of {MIN_BLOCK_SIZE} if larger"
)
l_scratch_ref[batch_idx] = l_next
m_scratch_ref[batch_idx] = m_next
l_next_inv_safe = jnp.where(l_next == 0.0, 1.0, 1.0 / l_next)
acc_scratch_ref[batch_idx] *= l_broadcast(l_corr * l_next_inv_safe)
v = v_tile_ref[(*batch_idx, pl.dslice(start_k, block_k), slice(None))]
o_curr = jax.lax.dot(
p.astype(v.dtype), v, preferred_element_type=jnp.float32
)
acc_scratch_ref[batch_idx] += o_curr * l_broadcast(l_next_inv_safe)
@pl.when(kv_seq_idx == (kv_seq_len // block_k_major) - 1)
def store_output():
o_tile_ref[batch_idx] = acc_scratch_ref[batch_idx].astype(o_tile_ref.dtype)
if l_ref is not None:
l_ref[batch_idx] = l_scratch_ref[batch_idx].astype(l_ref.dtype)
if m_ref is not None:
m_ref[batch_idx] = m_scratch_ref[batch_idx].astype(m_ref.dtype)
def _flash_attention_kernel_single_batch_single_step(
batch_idx: tuple[int, ...],
q_tile_ref,
k_tile_ref,
v_tile_ref,
ab_tile_ref,
q_segment_ids_tile_ref,
kv_segment_ids_tile_ref, # Input arrays
o_tile_ref, # Output arrays
l_ref: Any | None = None,
m_ref: Any | None = None,
*,
causal,
sm_scale,
block_k,
kv_seq_len,
mask_value,
):
block_k_major = k_tile_ref.shape[2]
block_q = q_tile_ref.shape[2]
assert kv_seq_len == block_k_major == block_k
q = q_tile_ref[batch_idx] # [block_q, head_dim]
k = k_tile_ref[batch_idx] # [block_k, head_dim]
s = jax.lax.dot_general(
q, k, TRANS_B_DIM_NUMBERS, preferred_element_type=jnp.float32
) # [block_q, block_k]
if ab_tile_ref is not None:
s += ab_tile_ref[batch_idx].astype(jnp.float32)
if sm_scale != 1.0:
s *= sm_scale
mask = None
if q_segment_ids_tile_ref is not None:
repeats, rem = divmod(block_k, NUM_LANES)
if rem:
raise NotImplementedError(
f"kv block size must be a multiple of {NUM_LANES}"
)
q_segment_ids = q_segment_ids_tile_ref[
batch_idx[0]
] # [block_q, NUM_LANES].
q_segment_ids = pltpu.repeat(
q_segment_ids, repeats, axis=1
) # [block_q, block_k].
kv_segment_ids = kv_segment_ids_tile_ref[batch_idx[0], :1] # [1, block_k].
mask = jnp.equal(q_segment_ids, kv_segment_ids).astype(jnp.bool_)
if causal:
q_seq_idx = pl.program_id(2)
mask_shape = (block_q, block_k)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
row_ids += q_seq_idx * block_q
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
causal_mask = col_ids <= row_ids
mask = causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
s = s if mask is None else s + jnp.where(mask, 0.0, mask_value)
m = jnp.max(s, axis=1)[:, None]
p = jnp.exp(s - m)
l = jnp.sum(p, axis=1)[:, None]
p /= l
if m_ref is not None:
m_ref[batch_idx] = lax.broadcast_in_dim(m, m_ref.shape[2:], range(2))
if l_ref is not None:
l_ref[batch_idx] = lax.broadcast_in_dim(l, l_ref.shape[2:], range(2))
v = v_tile_ref[batch_idx]
o_tile_ref[batch_idx] = jax.lax.dot(
p.astype(v.dtype), v, preferred_element_type=jnp.float32
).astype(o_tile_ref.dtype)
def _bytes(x: jax.Array | jax.ShapeDtypeStruct) -> int:
return math.prod(x.shape) * x.dtype.itemsize
def _fwd_cost_estimate(
q: jax.Array,
k: jax.Array,
v: jax.Array,
ab: jax.Array | None,
segment_ids: SegmentIds | None,
*,
causal: bool,
sm_scale: jax.Array | None,
kernel_inputs_specs,
kernel_outputs_specs,
) -> pl.CostEstimate | None:
body_cost = pl.estimate_cost(
mha_reference,
q, k, v, ab, segment_ids, causal=causal, sm_scale=sm_scale
)
input_bytes = sum(_bytes(x) for x in jax.tree.leaves(kernel_inputs_specs))
output_bytes = sum(_bytes(x) for x in jax.tree.leaves(kernel_outputs_specs))
return pl.CostEstimate(
flops=body_cost.flops,
transcendentals=body_cost.transcendentals,
bytes_accessed=input_bytes + output_bytes,
)
def _flash_attention_impl(
q,
k,
v,
ab,
segment_ids,
save_residuals,
causal,
sm_scale,
block_b,
block_q,
block_k_major,
block_k,
debug,
):
batch_size, num_heads, q_seq_len, head_dim = q.shape
_, _, kv_seq_len, _ = k.shape
_verify_block("block_q", "q_seq_len", block_q, q_seq_len, should_divide=False)
_verify_block("block_k_major", "kv_seq_len", block_k_major, kv_seq_len)
_verify_block("block_k", "kv_seq_len", block_k, kv_seq_len)
_verify_block("block_b", "batch", block_b, batch_size, should_divide=False)
# TODO(apaszke): Tile over heads as well.
grid = (
pl.cdiv(batch_size, block_b),
num_heads,
pl.cdiv(q_seq_len, block_q),
kv_seq_len // block_k_major,
)
def q_index_map(batch_index, head_index, q_seq_index, _):
return (batch_index, head_index, q_seq_index, 0)
def kv_index_map(batch_index, head_index, q_seq_index, kv_seq_index):
if causal:
# If the kv block is skipped, prefetch the next valid kv block, i.e. the
# 0th one to be used for the next block_q rows.
next_kv_index = lax.select(
below_or_on_diag(q_seq_index, block_q, kv_seq_index, block_k_major),
kv_seq_index,
0,
)
else:
next_kv_index = kv_seq_index
return (batch_index, head_index, next_kv_index, 0)
def ab_index_map(batch_index, head_index, q_seq_index, kv_seq_index):
if causal:
should_run = below_or_on_diag(
q_seq_index, block_q, kv_seq_index, block_k_major
)
# If the ab block is skipped, prefetch the next valid ab block, i.e. the
# 0th kv to be used for the next block_q rows.
next_q_index = lax.select(
should_run,
q_seq_index,
lax.select(
q_seq_index == (q_seq_len // block_q) - 1, 0, q_seq_index + 1
),
)
next_kv_index = lax.select(should_run, kv_seq_index, 0)
else:
next_q_index = q_seq_index
next_kv_index = kv_seq_index
return (batch_index, head_index, next_q_index, next_kv_index)
def o_index_map(batch_index, head_index, q_seq_index, _):
return (batch_index, head_index, q_seq_index, 0)
def lm_index_map(batch_index, head_index, q_seq_index, _):
return (batch_index, head_index, q_seq_index, 0)
kernel = functools.partial(
_flash_attention_kernel,
causal=causal,
mask_value=DEFAULT_MASK_VALUE,
sm_scale=sm_scale,
block_k=block_k,
kv_seq_len=kv_seq_len,
)
out_shape = jax.ShapeDtypeStruct(shape=q.shape, dtype=q.dtype)
out_shape = [out_shape]
out_specs = [pl.BlockSpec((block_b, 1, block_q, head_dim), o_index_map)]
if block_k != kv_seq_len:
m_scratch = pltpu.VMEM((block_b, 1, block_q, MIN_BLOCK_SIZE), jnp.float32)
l_scratch = pltpu.VMEM((block_b, 1, block_q, MIN_BLOCK_SIZE), jnp.float32)
acc_scratch = pltpu.VMEM((block_b, 1, block_q, head_dim), jnp.float32)
scratch_shapes = [m_scratch, l_scratch, acc_scratch]
else:
scratch_shapes = []
if save_residuals:
out_specs = [
*out_specs,
pl.BlockSpec((block_b, 1, block_q, MIN_BLOCK_SIZE), lm_index_map),
pl.BlockSpec((block_b, 1, block_q, MIN_BLOCK_SIZE), lm_index_map),
]
l = jax.ShapeDtypeStruct(
(batch_size, num_heads, q_seq_len, MIN_BLOCK_SIZE), dtype=jnp.float32
)
m = jax.ShapeDtypeStruct(
(batch_size, num_heads, q_seq_len, MIN_BLOCK_SIZE), dtype=jnp.float32
)
out_shape = (*out_shape, l, m)
else:
out_specs = [*out_specs, None, None]
out_shape = (*out_shape, None, None)
ab_block_spec = (
pl.BlockSpec((block_b, 1, block_q, block_k_major), ab_index_map)
if ab is not None else None)
q_segment_ids_spec = kv_segment_ids_spec = None
q_segment_ids = kv_segment_ids = None
if segment_ids is not None:
def q_segment_ids_index_map(batch_index, head_index, q_seq_index, _):
del head_index
return (batch_index, q_seq_index, 0)
def kv_segment_ids_index_map(
batch_index, head_index, q_seq_index, kv_seq_index
):
del head_index
if causal:
next_kv_index = lax.select(
below_or_on_diag(q_seq_index, block_q, kv_seq_index, block_k_major),
kv_seq_index,
0,
)
else:
next_kv_index = kv_seq_index
return (batch_index, 0, next_kv_index)
q_segment_ids_spec = pl.BlockSpec(
(block_b, block_q, NUM_LANES), q_segment_ids_index_map
)
kv_segment_ids_spec = pl.BlockSpec(
(block_b, NUM_SUBLANES, block_k_major), kv_segment_ids_index_map
)
q_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.q,
(batch_size, q_seq_len, NUM_LANES),
(
0,
1,
),
)
kv_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.kv,
(batch_size, NUM_SUBLANES, kv_seq_len),
(
0,
2,
),
)
in_specs = [
pl.BlockSpec((block_b, 1, block_q, head_dim), q_index_map),
pl.BlockSpec((block_b, 1, block_k_major, head_dim), kv_index_map),
pl.BlockSpec((block_b, 1, block_k_major, head_dim), kv_index_map),
ab_block_spec,
q_segment_ids_spec,
kv_segment_ids_spec,
]
o, *aux = pl.pallas_call(
kernel,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
grid=grid,
in_specs=in_specs,
out_specs=out_specs,
scratch_shapes=scratch_shapes,
),
out_shape=out_shape,
debug=debug,
compiler_params=pltpu.CompilerParams(
dimension_semantics=(
"parallel",
"parallel",
"parallel",
"arbitrary",
)
),
cost_estimate=_fwd_cost_estimate(
q,
k,
v,
ab,
segment_ids,
causal=causal,
sm_scale=sm_scale,
kernel_inputs_specs=(q, k, v, ab, q_segment_ids, kv_segment_ids),
kernel_outputs_specs=out_shape,
),
)(q, k, v, ab, q_segment_ids, kv_segment_ids)
if save_residuals:
l, m = (v[..., 0] for v in aux[-2:])
return (o, l, m)
else:
return o
def _flash_attention_dkv_kernel(
q_tile_ref,
k_tile_ref,
v_tile_ref,
ab_tile_ref,
q_segment_ids_tile_ref,
kv_segment_ids_tile_ref,
l_tile_ref,
m_tile_ref,
do_tile_ref,
di_tile_ref,
dk_tile_ref,
dv_tile_ref,
dk_scratch_ref,
dv_scratch_ref,
*,
sm_scale: float,
causal: bool,
mask_value: float,
q_seq_len: int,
block_q: int,
block_k: int,
):
_, _, block_q_major, _ = q_tile_ref.shape
_, _, block_k_major, _ = k_tile_ref.shape
q_seq_index = pl.program_id(axis=3)
kv_seq_index = pl.program_id(axis=2)
@pl.when(q_seq_index == 0)
def start_new_sequence():
dk_scratch_ref[:, :] = jnp.zeros(dk_scratch_ref.shape, dk_scratch_ref.dtype)
dv_scratch_ref[:, :] = jnp.zeros(dv_scratch_ref.shape, dv_scratch_ref.dtype)
def q_body(j, _):
start_q = j * block_q
def k_body(i, _):
start_k = i * block_k
k = k_tile_ref[0, 0, pl.ds(start_k, block_k), :]
v = v_tile_ref[0, 0, pl.ds(start_k, block_k), :]
q = q_tile_ref[0, 0, pl.ds(start_q, block_q), :] # [block_q, head_dim]
l = l_tile_ref[0, 0, pl.ds(start_q, block_q), :] # [block_q, 128]
m = m_tile_ref[0, 0, pl.ds(start_q, block_q), :] # [block_q, 128]
do = do_tile_ref[0, 0, pl.ds(start_q, block_q), :] # [block_q, 128]
di = di_tile_ref[0, 0, pl.ds(start_q, block_q), :].astype(
jnp.float32
) # [block_q, 128]
capped_logits = lax.dot_general(
q, k, TRANS_B_DIM_NUMBERS, preferred_element_type=jnp.float32
) # [block_q_major, block_k]
if ab_tile_ref is not None:
ab = ab_tile_ref[
0,
0,
pl.dslice(j * block_q, block_q),
pl.dslice(i * block_k, block_k),
].astype(jnp.float32)
capped_logits += ab
if sm_scale != 1.0:
capped_logits *= sm_scale
mask = None
if q_segment_ids_tile_ref is not None:
repeats, rem = divmod(block_k, NUM_LANES)
if rem:
raise NotImplementedError(
)
q_segment_ids = q_segment_ids_tile_ref[
0, pl.ds(start_q, block_q), :
] # [block_q, NUM_LANES].
q_segment_ids = pltpu.repeat(
q_segment_ids, repeats, axis=1
) # [block_q, block_k].
kv_segment_ids = kv_segment_ids_tile_ref[
:, 0, pl.ds(start_k, block_k)
] # [1, block_k].
mask = jnp.equal(q_segment_ids, kv_segment_ids).astype(jnp.bool_)
if causal:
mask_shape = (block_q, block_k)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
row_ids += q_seq_index * block_q_major + start_q
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
col_ids += kv_seq_index * block_k_major + start_k
causal_mask = col_ids <= row_ids
mask = (
causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
)
capped_logits = (
capped_logits
if mask is None
else capped_logits + jnp.where(mask, 0.0, mask_value)
)
p = jnp.exp(
capped_logits - pltpu.repeat(m, block_k // MIN_BLOCK_SIZE, axis=1)
)
p = p * pltpu.repeat(
1 / l, block_k // MIN_BLOCK_SIZE, axis=1
) # [block_q_major, block_k_major]
dv = lax.dot(p.T.astype(do.dtype), do, preferred_element_type=jnp.float32)
dv_scratch_ref[pl.ds(start_k, block_k), :] += dv.astype(
dv_scratch_ref.dtype
)
# di: [block_q, 128]
# do: [block_q, head_dim]
# v: [block_k_major, head_dim]
dp = lax.dot_general(
do, v, TRANS_B_DIM_NUMBERS, preferred_element_type=jnp.float32
)
ds = (dp - pltpu.repeat(di, block_k // MIN_BLOCK_SIZE, axis=1)) * p
if sm_scale != 1.0:
ds = ds * sm_scale
# ds: [block_q_major, block_k_major]
# q: [block_q_major, head_dim]
dk = lax.dot(ds.T.astype(do.dtype), q, preferred_element_type=jnp.float32)
dk_scratch_ref[pl.ds(start_k, block_k), :] += dk.astype(
dk_scratch_ref.dtype
)
lax.fori_loop(0, block_k_major // block_k, k_body, None, unroll=True)
if causal:
should_run = below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
)
else:
should_run = True
@pl.when(should_run)
def run():
lax.fori_loop(0, block_q_major // block_q, q_body, None, unroll=True)
@pl.when(q_seq_index == q_seq_len // block_q_major - 1)
def end_of_q_sequence():
dv_tile_ref[0, 0, :, :] = dv_scratch_ref[...].astype(dv_tile_ref.dtype)
dk_tile_ref[0, 0, :, :] = dk_scratch_ref[...].astype(dk_tile_ref.dtype)
def _flash_attention_bwd_dkv(
q,
k,
v,
ab,
segment_ids,
l,
m,
do,
di,
*,
block_q_major: int | None,
block_q: int | None,
block_k_major: int | None,
block_k: int | None,
sm_scale: float,
causal: bool = False,
mask_value: float = DEFAULT_MASK_VALUE,
debug: bool = False,
):
batch_size, num_heads, q_seq_len, head_dim = q.shape
_, _, kv_seq_len, _ = k.shape
_verify_block("block_q_major_dkv", "q_seq_len", block_q_major, q_seq_len)
_verify_block("block_q_dkv", "q_seq_len", block_q, q_seq_len)
_verify_block("block_k_major_dkv", "kv_seq_len", block_k_major, kv_seq_len)
_verify_block("block_k_dkv", "kv_seq_len", block_k, kv_seq_len)
# Broadcast out scalar values
m = jnp.broadcast_to(m[..., None], (*m.shape, MIN_BLOCK_SIZE))
l = jnp.broadcast_to(l[..., None], (*l.shape, MIN_BLOCK_SIZE))
# Preprocess contraction for bwd pass
di = jnp.broadcast_to(di[..., None], (*di.shape, MIN_BLOCK_SIZE))
# kv index needs to be before q index since q index is the contractng
# dimension.
grid = (
batch_size,
num_heads,
kv_seq_len // block_k_major,
q_seq_len // block_q_major,
)
def qo_index_map(batch_index, head_index, kv_seq_index, q_seq_index):
if causal:
# If the q block is skipped, stay at the 0th q block.
next_q_index = lax.select(
below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
),
q_seq_index,
0,
)
else:
next_q_index = q_seq_index
return (batch_index, head_index, next_q_index, 0)
qo_spec = pl.BlockSpec((1, 1, block_q_major, head_dim), qo_index_map)
assert qo_spec.block_shape is not None
assert q.ndim == len(qo_spec.block_shape)
do_spec = qo_spec
assert do.ndim == len(qo_spec.block_shape)
def kv_index_map(batch_index, head_index, kv_seq_index, _):
return (batch_index, head_index, kv_seq_index, 0)
kv_spec = pl.BlockSpec((1, 1, block_k_major, head_dim), kv_index_map)
assert kv_spec.block_shape is not None
assert k.ndim == len(kv_spec.block_shape)
assert v.ndim == len(kv_spec.block_shape)
def lm_index_map(batch_index, head_index, _, q_seq_index):
return (batch_index, head_index, q_seq_index, 0)
lm_spec = pl.BlockSpec((1, 1, block_q_major, MIN_BLOCK_SIZE), lm_index_map)
assert lm_spec.block_shape is not None
assert l.ndim == len(lm_spec.block_shape)
assert m.ndim == len(lm_spec.block_shape)
di_spec = pl.BlockSpec((1, 1, block_q_major, MIN_BLOCK_SIZE), qo_index_map)
assert di_spec.block_shape is not None
assert di.ndim == len(di_spec.block_shape)
def ab_index_map(batch_index, head_index, kv_seq_index, q_seq_index):
return (batch_index, head_index, q_seq_index, kv_seq_index)
dab_spec = (
pl.BlockSpec((1, 1, block_q_major, block_k_major), ab_index_map)
if ab is not None
else None
)
q_segment_ids_spec = kv_segment_ids_spec = None
q_segment_ids = kv_segment_ids = None
if segment_ids is not None:
def q_segment_ids_index_map(
batch_index, head_index, kv_seq_index, q_seq_index
):
del head_index
if causal:
next_q_index = lax.select(
below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
),
q_seq_index,
0,
)
else:
next_q_index = q_seq_index
return (batch_index, next_q_index, 0)
def kv_segment_ids_index_map(batch_index, head_index, kv_seq_index, _):
del head_index
return (batch_index, 0, kv_seq_index)
q_segment_ids_spec = pl.BlockSpec(
(1, block_q_major, NUM_LANES), q_segment_ids_index_map
)
kv_segment_ids_spec = pl.BlockSpec(
(1, NUM_SUBLANES, block_k_major), kv_segment_ids_index_map
)
q_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.q,
(batch_size, q_seq_len, NUM_LANES),
(
0,
1,
),
)
kv_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.kv,
(batch_size, NUM_SUBLANES, kv_seq_len),
(
0,
2,
),
)
in_specs = [
qo_spec,
kv_spec,
kv_spec,
dab_spec,
q_segment_ids_spec,
kv_segment_ids_spec,
lm_spec,
lm_spec,
do_spec,
di_spec,
]
out_shapes = [
jax.ShapeDtypeStruct((batch_size, num_heads, kv_seq_len, head_dim),
k.dtype),
jax.ShapeDtypeStruct((batch_size, num_heads, kv_seq_len, head_dim),
v.dtype),
]
def dkv_index_map(batch_index, head_index, kv_seq_index, _):
return (batch_index, head_index, kv_seq_index, 0)
dkv_spec = pl.BlockSpec((1, 1, block_k_major, head_dim), dkv_index_map)
out_specs = [dkv_spec, dkv_spec]
scratch_shapes = [
pltpu.VMEM((block_k_major, head_dim), jnp.float32), # type: ignore
pltpu.VMEM((block_k_major, head_dim), jnp.float32), # type: ignore
]
kernel = functools.partial(
_flash_attention_dkv_kernel,
block_q=block_q, # type: ignore
block_k=block_k, # type: ignore
sm_scale=sm_scale,
causal=causal,
mask_value=mask_value,
q_seq_len=q_seq_len,
)
name_scope = f"flash_mha_bwd_dkv_{block_q_major=}_{block_q=}_{block_k_major=}_{block_k=}"
with jax.named_scope(name_scope):
dk, dv = pl.pallas_call(
kernel,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
grid=grid,
in_specs=in_specs,
out_specs=out_specs,
scratch_shapes=scratch_shapes,
),
out_shape=out_shapes,
debug=debug,
compiler_params=pltpu.CompilerParams(
dimension_semantics=(
"parallel",
"parallel",
"parallel",
"arbitrary",
)
),
)(q, k, v, ab, q_segment_ids, kv_segment_ids, l, m, do, di)
assert dk.shape == k.shape
assert dv.shape == v.shape
return dk, dv
def _flash_attention_dq_kernel(
q_tile_ref,
k_tile_ref,
v_tile_ref,
ab_tile_ref,
q_segment_ids_tile_ref,
kv_segment_ids_tile_ref,
l_tile_ref,
m_tile_ref,
do_tile_ref,
di_tile_ref,
dq_tile_ref,
ds_tile_ref,
dq_scratch_ref,
*,
sm_scale: float,
causal: bool,
mask_value: float,
kv_seq_len: int,
block_k: int,
):
_, _, block_k_major, _ = k_tile_ref.shape
_, _, block_q_major, _ = q_tile_ref.shape
kv_seq_index = pl.program_id(axis=3)
q_seq_index = pl.program_id(axis=2)
@pl.when(kv_seq_index == 0)
def start_new_sequence():
dq_scratch_ref[:, :] = jnp.zeros(dq_scratch_ref.shape, dq_scratch_ref.dtype)
def body(i, _):
k_slice = pl.ds(i * block_k, block_k)
q = q_tile_ref[0, 0, :, :]
k = k_tile_ref[0, 0, k_slice, :] # [block_k, head_dim]
v = v_tile_ref[0, 0, k_slice, :] # [block_k, head_dim]
l = l_tile_ref[0, 0, :, :] # [block_q_major, 128]
m = m_tile_ref[0, 0, :, :] # [block_q_major, 128]
do = do_tile_ref[0, 0, :, :] # [block_q_major, head_dim]
di = di_tile_ref[0, 0, :].astype(jnp.float32) # [block_q_major, 128]
capped_logits = jax.lax.dot_general(
q, k, TRANS_B_DIM_NUMBERS, preferred_element_type=jnp.float32
)
if ab_tile_ref is not None:
ab = ab_tile_ref[0, 0, :, pl.dslice(i * block_k, block_k)].astype(
jnp.float32
)
capped_logits += ab
if sm_scale != 1.0:
capped_logits *= sm_scale
mask = None
if q_segment_ids_tile_ref is not None:
repeats, rem = divmod(block_k, NUM_LANES)
if rem:
raise NotImplementedError(
f"kv block size must be a multiple of {NUM_LANES}"
)
q_segment_ids = pltpu.repeat(
q_segment_ids_tile_ref[0], repeats, axis=1
) # [block_q, block_k].
kv_segment_ids = kv_segment_ids_tile_ref[:, 0, k_slice] # [1, block_k].
mask = jnp.equal(q_segment_ids, kv_segment_ids).astype(jnp.bool_)
if causal:
mask_shape = (block_q_major, block_k)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
row_ids += q_seq_index * block_q_major
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
col_ids += kv_seq_index * block_k_major + i * block_k
causal_mask = col_ids <= row_ids
mask = causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
capped_logits = (
capped_logits
if mask is None
else capped_logits + jnp.where(mask, 0.0, mask_value)
)
p = jnp.exp(
capped_logits - pltpu.repeat(m, block_k // MIN_BLOCK_SIZE, axis=1)
)
p = p * pltpu.repeat(
1 / l, block_k // MIN_BLOCK_SIZE, axis=1
) # [block_q_major, block_k]
# di: [block_q_major, 128]
# do: [block_q_major, head_dim]
# v: [block_k_major, head_dim]
dp = jax.lax.dot_general(
do,
v,
TRANS_B_DIM_NUMBERS,
preferred_element_type=jnp.float32,
)
ds = (dp - pltpu.repeat(di, block_k // MIN_BLOCK_SIZE, axis=1)) * p
# dp = jnp.dot(do, v.T)
# ds = (dp - (dp * p).sum(axis=1)[:, None]) * p
if sm_scale != 1.0:
ds = ds * sm_scale
if ds_tile_ref is not None:
ds_tile_ref[0, 0, :, pl.dslice(i * block_k, block_k)] = ds.astype(
ds_tile_ref.dtype
)
# dp: [block_q_major, block_k]
# k: [block_k, head_dim]
dq_scratch_ref[:, :] += lax.dot(
ds.astype(k.dtype),
k,
preferred_element_type=jnp.float32,
).astype(dq_scratch_ref.dtype)
if causal:
should_run = below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
)
should_not_run = lax.select(should_run, False, True)
else:
should_run = True
should_not_run = False # type: ignore
@pl.when(should_run)
def run():
lax.fori_loop(0, block_k_major // block_k, body, None, unroll=True)
@pl.when(should_not_run)
def zero_out_ds():
if ds_tile_ref is not None:
ds_tile_ref[...] = jnp.zeros_like(ds_tile_ref)
@pl.when(kv_seq_index == kv_seq_len // block_k_major - 1)
def end_of_kv_sequence():
dq_tile_ref[0, 0, :, :] = dq_scratch_ref[...].astype(dq_tile_ref.dtype)
dq_scratch_ref[...] = jnp.zeros_like(dq_scratch_ref)
def _flash_attention_bwd_dq(
q,
k,
v,
ab,
segment_ids,
l,
m,
do,
di,
*,
block_q_major: int | None,
block_k_major: int | None,
block_k: int | None,
sm_scale: float,
causal: bool,
mask_value: float,
debug: bool,
):
batch_size, num_heads, q_seq_len, head_dim = q.shape
_, _, kv_seq_len, _ = k.shape
_verify_block("block_q_dq", "q_seq_len", block_q_major, q_seq_len)
_verify_block("block_k_major_dq", "kv_seq_len", block_k_major, kv_seq_len)
_verify_block("block_k_dq", "block_k", block_k, kv_seq_len)
# Broadcast out scalar values
m = jnp.broadcast_to(m[..., None], (*m.shape, MIN_BLOCK_SIZE))
l = jnp.broadcast_to(l[..., None], (*l.shape, MIN_BLOCK_SIZE))
# Preprocess contraction for bwd pass
di = jnp.broadcast_to(di[..., None], (*di.shape, block_k_major))
grid = (
batch_size,
num_heads,
q_seq_len // block_q_major,
kv_seq_len // block_k_major,
)
def qo_index_map(batch_index, head_index, q_seq_index, _):
return (batch_index, head_index, q_seq_index, 0)
qo_spec = pl.BlockSpec((1, 1, block_q_major, head_dim), qo_index_map)
do_spec = qo_spec
def kv_index_map(batch_index, head_index, q_seq_index, kv_seq_index):
if causal:
# If the kv block is skipped, prefetch the next valid kv block, i.e. the
# 0th one to be used for the next block_q rows.
next_kv_index = lax.select(
below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
),
kv_seq_index,
0,
)
else:
next_kv_index = kv_seq_index
return (batch_index, head_index, next_kv_index, 0)
kv_spec = pl.BlockSpec((1, 1, block_k_major, head_dim), kv_index_map)
assert kv_spec.block_shape is not None
assert k.ndim == len(kv_spec.block_shape)
assert v.ndim == len(kv_spec.block_shape)
def lm_index_map(batch_index, head_index, q_seq_index, _):
return (batch_index, head_index, q_seq_index, 0)
lm_spec = pl.BlockSpec((1, 1, block_q_major, MIN_BLOCK_SIZE), lm_index_map)
assert lm_spec.block_shape is not None
assert l.ndim == len(lm_spec.block_shape)
assert m.ndim == len(lm_spec.block_shape)
di_spec = pl.BlockSpec((1, 1, block_q_major, MIN_BLOCK_SIZE), qo_index_map)
assert di_spec.block_shape is not None
assert di.ndim == len(di_spec.block_shape)
def ab_index_map(batch_index, head_index, q_seq_index, kv_seq_index):
return (batch_index, head_index, q_seq_index, kv_seq_index)
dab_spec = (
pl.BlockSpec((1, 1, block_q_major, block_k_major), ab_index_map)
if ab is not None
else None
)
q_segment_ids_spec = kv_segment_ids_spec = None
q_segment_ids = kv_segment_ids = None
if segment_ids is not None:
def q_segment_ids_index_map(batch_index, head_index, q_seq_index, _):
del head_index
return (batch_index, q_seq_index, 0)
def kv_segment_ids_index_map(
batch_index, head_index, q_seq_index, kv_seq_index
):
del head_index
if causal:
# If the kv block is skipped, prefetch the next valid kv block, i.e. the
# 0th one to be used for the next block_q rows.
next_kv_index = lax.select(
below_or_on_diag(
q_seq_index, block_q_major, kv_seq_index, block_k_major
),
kv_seq_index,
0,
)
else:
next_kv_index = kv_seq_index
return (batch_index, 0, next_kv_index)
q_segment_ids_spec = pl.BlockSpec(
(1, block_q_major, NUM_LANES), q_segment_ids_index_map
)
kv_segment_ids_spec = pl.BlockSpec(
(1, NUM_SUBLANES, block_k_major), kv_segment_ids_index_map
)
q_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.q,
(batch_size, q_seq_len, NUM_LANES),
(
0,
1,
),
)
kv_segment_ids = jax.lax.broadcast_in_dim(
segment_ids.kv,
(batch_size, NUM_SUBLANES, kv_seq_len),
(
0,
2,
),
)
in_specs = [
qo_spec,
kv_spec,
kv_spec,
dab_spec,
q_segment_ids_spec,
kv_segment_ids_spec,
lm_spec,
lm_spec,
do_spec,
di_spec,
]
out_shapes = [
jax.ShapeDtypeStruct(q.shape, q.dtype),
jax.ShapeDtypeStruct(ab.shape, ab.dtype) if ab is not None else None,
]
dq_spec = pl.BlockSpec((1, 1, block_q_major, head_dim), qo_index_map)
out_specs = [
dq_spec,
dab_spec,
]
scratch_shapes = [pltpu.VMEM((block_q_major, head_dim), jnp.float32)] # type: ignore
kernel = functools.partial(
_flash_attention_dq_kernel,
sm_scale=sm_scale,
causal=causal,
mask_value=mask_value,
block_k=block_k, # type: ignore
kv_seq_len=kv_seq_len,
)
name_scope = f"flash_mha_bwd_dq_{block_q_major=}_{block_k_major=}_{block_k=}"
with jax.named_scope(name_scope):
dq, ds = pl.pallas_call(
kernel,
grid_spec=pltpu.PrefetchScalarGridSpec(
num_scalar_prefetch=0,
grid=grid,
in_specs=in_specs,
out_specs=out_specs,
scratch_shapes=scratch_shapes,
),
out_shape=out_shapes,
debug=debug,
compiler_params=pltpu.CompilerParams(
dimension_semantics=(
"parallel",
"parallel",
"parallel",
"arbitrary",
)
),
)(q, k, v, ab, q_segment_ids, kv_segment_ids, l, m, do, di)
# dab is just ds
return dq, ds
# For autograd testing.
def mha_reference_no_custom_vjp(
q,
k,
v,
ab: jax.Array | None = None,
segment_ids: SegmentIds | None = None,
*,
causal: bool = False,
mask_value: float = DEFAULT_MASK_VALUE,
sm_scale: float = 1.0,
save_residuals: bool = False,
):
logits = jnp.einsum("bhqc,bhkc->bhqk", q, k)
if ab is not None:
logits += ab
if sm_scale != 1.0:
logits *= sm_scale
mask = None
if segment_ids is not None:
mask = segment_ids.q[:, :, None] == segment_ids.kv[:, None, :]
mask = mask[:, None, :, :]
if causal:
_, _, q_seq_len, _ = q.shape
_, _, kv_seq_len, _ = k.shape
mask_shape = (q_seq_len, kv_seq_len)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
causal_mask = (col_ids <= row_ids)[None, None, :, :]
mask = causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
logits = logits if mask is None else logits + jnp.where(mask, 0.0, mask_value)
m = logits.max(axis=-1)
unnormalized = jnp.exp(logits - m[..., None])
l = unnormalized.sum(axis=-1)
weights = unnormalized / l[..., None]
out = jnp.einsum("bhqk,bhkc->bhqc", weights, v)
if save_residuals:
return out, l, m
return out
@functools.partial(
jax.jit, static_argnames=["causal", "mask_value", "sm_scale"]
)
@jax.default_matmul_precision("bfloat16")
def mha_reference(
q,
k,
v,
ab,
segment_ids: SegmentIds | None = None,
causal: bool = False,
mask_value: float = DEFAULT_MASK_VALUE,
sm_scale=1.0,
):
return _mha_reference(
q,
k,
v,
ab,
segment_ids,
causal=causal,
mask_value=mask_value,
sm_scale=sm_scale,
save_residuals=False,
)
@functools.partial(jax.custom_vjp, nondiff_argnums=(5, 6, 7, 8))
def _mha_reference(
q,
k,
v,
ab,
segment_ids: SegmentIds | None,
causal: bool,
mask_value: float,
sm_scale: float,
save_residuals: bool,
):
return mha_reference_no_custom_vjp(
q,
k,
v,
ab,
segment_ids,
causal=causal,
mask_value=mask_value,
sm_scale=sm_scale,
save_residuals=save_residuals,
)
def _mha_reference_fwd(
q,
k,
v,
ab,
segment_ids: SegmentIds | None,
causal: bool,
mask_value: float,
sm_scale: float,
save_residuals: bool,
):
if save_residuals:
raise NotImplementedError
res = _mha_reference(
q,
k,
v,
ab,
segment_ids,
causal=causal,
mask_value=mask_value,
sm_scale=sm_scale,
save_residuals=True,
)
assert isinstance(res, tuple)
out, l, m = res
return out, (q, k, v, ab, segment_ids, out, l, m)
@functools.partial(
jax.jit,
static_argnames=[
"causal",
"mask_value",
"sm_scale",
],
)
def mha_reference_bwd(
q,
k,
v,
ab,
segment_ids: SegmentIds | None,
o,
l,
m,
do,
causal: bool = False,
mask_value: float = DEFAULT_MASK_VALUE,
sm_scale: float = 1.0,
):
if sm_scale != 1.0:
raise NotImplementedError
logits = jnp.einsum(
"bhqc,bhkc->bhqk",
q.astype(jnp.float32),
k.astype(jnp.float32),
)
if ab is not None:
logits += ab
mask = None
if segment_ids is not None:
mask = segment_ids.q[:, :, None] == segment_ids.kv[:, None, :]
mask = mask[:, None, :, :]
if causal:
_, _, q_seq_len, _ = q.shape
_, _, kv_seq_len, _ = k.shape
mask_shape = (q_seq_len, kv_seq_len)
row_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 0)
col_ids = jax.lax.broadcasted_iota(jnp.int32, mask_shape, 1)
causal_mask = (col_ids <= row_ids)[None, None, :, :]
mask = causal_mask if mask is None else jnp.logical_and(mask, causal_mask)
logits = logits if mask is None else logits + jnp.where(mask, 0.0, mask_value)
unnormalized = jnp.exp(logits - m[..., None])
p = unnormalized / l[..., None]
dv = jnp.einsum("bhpt,bhpd->bhtd", p, do.astype(jnp.float32)).astype(v.dtype)
dp = jnp.einsum(
"bhpd,bhtd->bhpt", do.astype(jnp.float32), v.astype(jnp.float32)
)
di = jnp.sum(o.astype(jnp.float32) * do.astype(jnp.float32), axis=-1)[
..., None
] # [batch_size, num_heads, q_seq_len]
ds = (dp - di) * p
dk = jnp.einsum("bhsd,bhst->bhtd", q.astype(jnp.float32), ds).astype(k.dtype)
dq = jnp.einsum("bhst,bhtd->bhsd", ds, k.astype(jnp.float32)).astype(q.dtype)
# dab is just ds
dab = ds if ab is not None else None
return dq, dk, dv, dab
def _mha_reference_bwd(
causal: bool,
mask_value: float,
sm_scale: float,
save_residuals: bool,
residuals,
do,
):
del save_residuals
q, k, v, ab, segment_ids, o, l, m = residuals
dq, dk, dv, dab = mha_reference_bwd(
q,
k,
v,
ab,
segment_ids,
o,
l,
m,
do,
causal=causal,
mask_value=mask_value,
sm_scale=sm_scale,
)
return dq, dk, dv, dab, None
_mha_reference.defvjp(fwd=_mha_reference_fwd, bwd=_mha_reference_bwd)
def _verify_block(block_name, dim_name, block, dim, should_divide=True):
if block > dim:
raise ValueError(
f"{block_name}={block} should be smaller or equal to {dim_name}={dim}"
)
if should_divide and dim % block != 0:
raise ValueError(
f"{dim_name}={dim} should be divisible by {block_name}={block}"
)
| BlockSizes |
python | getsentry__sentry | src/sentry/api/bases/team.py | {
"start": 1442,
"end": 2504
} | class ____(Endpoint):
permission_classes: tuple[type[BasePermission], ...] = (TeamPermission,)
def convert_args(
self,
request: Request,
organization_id_or_slug: str | int,
team_id_or_slug: str | int,
*args: Any,
**kwargs: Any,
) -> tuple[tuple[Any, ...], dict[str, Any]]:
try:
team = (
Team.objects.filter(
organization__slug__id_or_slug=organization_id_or_slug,
slug__id_or_slug=team_id_or_slug,
)
.select_related("organization")
.get()
)
except Team.DoesNotExist:
raise ResourceDoesNotExist
if team.status != TeamStatus.ACTIVE:
raise ResourceDoesNotExist
self.check_object_permissions(request, team)
bind_organization_context(team.organization)
request._request.organization = team.organization # type: ignore[attr-defined]
kwargs["team"] = team
return (args, kwargs)
| TeamEndpoint |
python | getsentry__sentry | src/sentry/templatetags/sentry_assets.py | {
"start": 2008,
"end": 3431
} | class ____(template.Node):
def __init__(self, nodelist, **kwargs):
self.nodelist = nodelist
self.attrs = kwargs
def _get_value(self, token, context):
if isinstance(token, str):
return token
if isinstance(token, template.base.FilterExpression):
return token.resolve(context)
return None
def render(self, context):
request = context.get("request")
if hasattr(request, "csp_nonce"):
self.attrs["nonce"] = request.csp_nonce
content = ""
attrs = self._render_attrs(context)
if "src" not in self.attrs:
content = self.nodelist.render(context).strip()
content = self._unwrap_content(content)
return f"<script{attrs}>{content}</script>"
def _render_attrs(self, context):
output = []
for k, v in self.attrs.items():
value = self._get_value(v, context)
if value in (True, "True"):
output.append(f" {k}")
elif value in (None, False, "False"):
continue
else:
output.append(f' {k}="{value}"')
output = sorted(output)
return "".join(output)
def _unwrap_content(self, text):
matches = re.search(r"<script[^\>]*>([\s\S]*?)</script>", text)
if matches:
return matches.group(1).strip()
return text
| ScriptNode |
python | walkccc__LeetCode | solutions/399. Evaluate Division/399.py | {
"start": 0,
"end": 929
} | class ____:
def calcEquation(
self,
equations: list[list[str]],
values: list[float],
queries: list[list[str]],
) -> list[float]:
ans = []
# graph[A][B] := A / B
graph = collections.defaultdict(dict)
for (A, B), value in zip(equations, values):
graph[A][B] = value
graph[B][A] = 1 / value
def devide(A: str, C: str, seen: set[str]) -> float:
"""Returns A / C."""
if A == C:
return 1.0
seen.add(A)
# value := A / B
for B, value in graph[A].items():
if B in seen:
continue
res = devide(B, C, seen) # B / C
if res > 0: # valid result
return value * res # (A / B) * (B / C) = A / C
return -1.0 # invalid result
for A, C in queries:
if A not in graph or C not in graph:
ans.append(-1.0)
else:
ans.append(devide(A, C, set()))
return ans
| Solution |
python | python__mypy | mypy/checkexpr.py | {
"start": 288281,
"end": 292003
} | class ____(types.BoolTypeQuery):
def __init__(self, ignore_in_type_obj: bool) -> None:
super().__init__(types.ANY_STRATEGY)
self.ignore_in_type_obj = ignore_in_type_obj
def visit_any(self, t: AnyType) -> bool:
return t.type_of_any != TypeOfAny.special_form # special forms are not real Any types
def visit_callable_type(self, t: CallableType) -> bool:
if self.ignore_in_type_obj and t.is_type_obj():
return False
return super().visit_callable_type(t)
def visit_type_var(self, t: TypeVarType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default] + t.values)
def visit_param_spec(self, t: ParamSpecType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default, t.prefix])
def visit_type_var_tuple(self, t: TypeVarTupleType) -> bool:
default = [t.default] if t.has_default() else []
return self.query_types([t.upper_bound, *default])
def has_coroutine_decorator(t: Type) -> bool:
"""Whether t came from a function decorated with `@coroutine`."""
t = get_proper_type(t)
return isinstance(t, Instance) and t.type.fullname == "typing.AwaitableGenerator"
def is_async_def(t: Type) -> bool:
"""Whether t came from a function defined using `async def`."""
# In check_func_def(), when we see a function decorated with
# `@typing.coroutine` or `@async.coroutine`, we change the
# return type to typing.AwaitableGenerator[...], so that its
# type is compatible with either Generator or Awaitable.
# But for the check here we need to know whether the original
# function (before decoration) was an `async def`. The
# AwaitableGenerator type conveniently preserves the original
# type as its 4th parameter (3rd when using 0-origin indexing
# :-), so that we can recover that information here.
# (We really need to see whether the original, undecorated
# function was an `async def`, which is orthogonal to its
# decorations.)
t = get_proper_type(t)
if (
isinstance(t, Instance)
and t.type.fullname == "typing.AwaitableGenerator"
and len(t.args) >= 4
):
t = get_proper_type(t.args[3])
return isinstance(t, Instance) and t.type.fullname == "typing.Coroutine"
def is_non_empty_tuple(t: Type) -> bool:
t = get_proper_type(t)
return isinstance(t, TupleType) and bool(t.items)
def is_duplicate_mapping(
mapping: list[int], actual_types: list[Type], actual_kinds: list[ArgKind]
) -> bool:
return (
len(mapping) > 1
# Multiple actuals can map to the same formal if they both come from
# varargs (*args and **kwargs); in this case at runtime it is possible
# that here are no duplicates. We need to allow this, as the convention
# f(..., *args, **kwargs) is common enough.
and not (
len(mapping) == 2
and actual_kinds[mapping[0]] == nodes.ARG_STAR
and actual_kinds[mapping[1]] == nodes.ARG_STAR2
)
# Multiple actuals can map to the same formal if there are multiple
# **kwargs which cannot be mapped with certainty (non-TypedDict
# **kwargs).
and not all(
actual_kinds[m] == nodes.ARG_STAR2
and not isinstance(get_proper_type(actual_types[m]), TypedDictType)
for m in mapping
)
)
def replace_callable_return_type(c: CallableType, new_ret_type: Type) -> CallableType:
"""Return a copy of a callable type with a different return type."""
return c.copy_modified(ret_type=new_ret_type)
| HasAnyType |
python | tensorflow__tensorflow | tensorflow/python/ops/numpy_ops/np_random_test.py | {
"start": 3069,
"end": 3889
} | class ____(RandomTestBase):
def setUp(self):
self.np_func = np_random.uniform
self.onp_func = onp.random.uniform
super(UniformTest, self).setUp()
@parameterized.parameters(
((), (), None),
(1, (), None),
((), 1, None),
(1, 1, None),
((1, 2), (2, 1), None),
((1, 2, 1), (2, 1, 1), (2, 2, 2)),
((), (), (2, 2, 2)),
)
def test_broadcast(self, low_shape, high_shape, size):
low = np_array_ops.zeros(low_shape).astype(np_dtypes.float64)
high = np_array_ops.ones(high_shape).astype(np_dtypes.float64)
self._test(low=low, high=high, size=size)
def test_float32(self):
self._test(0, 1, (1, 2), allow_float64=False, onp_dtype=np_dtypes.float32)
def test_dtype_cast(self):
self._test(np_dtypes.int8(0), np_dtypes.uint8(1), (1, 2))
| UniformTest |
python | getsentry__sentry-python | tests/integrations/django/myapp/settings.py | {
"start": 1401,
"end": 5053
} | class ____(MiddlewareMixin):
def process_request(self, request):
# https://github.com/getsentry/sentry-python/issues/837 -- We should
# not touch the resolver_match because apparently people rely on it.
if request.resolver_match:
assert not getattr(request.resolver_match.callback, "__wrapped__", None)
if "middleware-exc" in request.path:
1 / 0
def process_response(self, request, response):
return response
def TestFunctionMiddleware(get_response): # noqa: N802
def middleware(request):
return get_response(request)
return middleware
MIDDLEWARE_CLASSES = [
"django.contrib.sessions.middleware.SessionMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"tests.integrations.django.myapp.settings.TestMiddleware",
]
if MiddlewareMixin is not object:
MIDDLEWARE = MIDDLEWARE_CLASSES + [
"tests.integrations.django.myapp.settings.TestFunctionMiddleware"
]
ROOT_URLCONF = "tests.integrations.django.myapp.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [],
"APP_DIRS": True,
"OPTIONS": {
"debug": True,
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
}
]
WSGI_APPLICATION = "tests.integrations.django.myapp.wsgi.application"
# Database
# https://docs.djangoproject.com/en/2.0/ref/settings/#databases
DATABASES = {"default": {"ENGINE": "django.db.backends.sqlite3", "NAME": ":memory:"}}
try:
import psycopg2 # noqa
db_engine = "django.db.backends.postgresql"
try:
from django.db.backends import postgresql # noqa: F401
except ImportError:
db_engine = "django.db.backends.postgresql_psycopg2"
DATABASES["postgres"] = {
"ENGINE": db_engine,
"HOST": os.environ.get("SENTRY_PYTHON_TEST_POSTGRES_HOST", "localhost"),
"PORT": int(os.environ.get("SENTRY_PYTHON_TEST_POSTGRES_PORT", "5432")),
"USER": os.environ.get("SENTRY_PYTHON_TEST_POSTGRES_USER", "postgres"),
"PASSWORD": os.environ.get("SENTRY_PYTHON_TEST_POSTGRES_PASSWORD", "sentry"),
"NAME": os.environ.get(
"SENTRY_PYTHON_TEST_POSTGRES_NAME", f"myapp_db_{os.getpid()}"
),
}
except (ImportError, KeyError):
from sentry_sdk.utils import logger
logger.warning("No psycopg2 found, testing with SQLite.")
# Password validation
# https://docs.djangoproject.com/en/2.0/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": "django.contrib.auth.password_validation.UserAttributeSimilarityValidator"
},
{"NAME": "django.contrib.auth.password_validation.MinimumLengthValidator"},
{"NAME": "django.contrib.auth.password_validation.CommonPasswordValidator"},
{"NAME": "django.contrib.auth.password_validation.NumericPasswordValidator"},
]
# Internationalization
# https://docs.djangoproject.com/en/2.0/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "UTC"
USE_I18N = True
USE_L10N = True
USE_TZ = False
TEMPLATE_DEBUG = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.0/howto/static-files/
STATIC_URL = "/static/"
# django-channels specific
ASGI_APPLICATION = "tests.integrations.django.myapp.routing.application"
| TestMiddleware |
python | pytorch__pytorch | torch/_inductor/ir.py | {
"start": 80146,
"end": 87392
} | class ____(Loops):
scan_ranges: list[Integer]
size: list[Integer]
combine_fn: Callable[[tuple[Any, ...], tuple[Any, ...]], tuple[Any, ...]]
reindex: Callable[[Sequence[_IntLike], Sequence[_IntLike]], Sequence[_IntLike]]
reduction_hint: ReductionHint
output_index: int
# output_index indexes the following tuples
dtypes: tuple[torch.dtype, ...]
inner_fns: tuple[Callable[..., Any], ...]
# HACK we mimic reduction
@cache_on_self_and_args("Scan")
def get_free_symbol_uses(self, unbacked_only: bool = False) -> OrderedSet[Symbol]:
# TODO: Can combine_fn/reindex close over unbacked symbols? If so, we
# need to explicitly represent the closure so we can pull out unbacked
# symbols here
return (
super().get_free_symbol_uses(unbacked_only)
| OrderedSet().union(
*(get_free_symbols(e, unbacked_only) for e in self.scan_ranges)
)
| OrderedSet().union(
*(get_free_symbols(e, unbacked_only) for e in self.size)
)
)
def __post_init__(self) -> None:
assert len(self.ranges) + len(self.scan_ranges) == len(self.size)
super().__post_init__()
def store_reduction(
self,
output_name: Optional[str],
indexer: Callable[[Sequence[_IntLike]], Never],
vars: Sequence[Expr],
scan_vars: Sequence[Symbol],
) -> Any:
idx = self.reindex(vars, scan_vars)
values = tuple(inner_fn(idx) for inner_fn in self.inner_fns)
result = ops.scan(self.dtypes, self.combine_fn, values)
return ops.store(
output_name or "unnamed", indexer(idx), result[self.output_index]
)
def get_reduction_type(self) -> Optional[str]:
# return self.scan_op
return "custom"
def get_reduction_size(self) -> Sequence[Expr]:
return self.scan_ranges
def get_size(self) -> Sequence[Expr]:
return self.size
def get_pointwise_size(self) -> Sequence[Expr]:
return self.ranges
def index_length(self) -> int:
return len(self.ranges) + len(self.scan_ranges)
def inner_fn_args(self) -> Sequence[Sequence[_IntLike]]:
index = self._index(self.ranges)
rindex = self._index(self.scan_ranges, SymT.R0_INDEX)
idx = self.reindex(index, rindex)
return (idx,)
def inner_fn_free_symbols(self, unbacked_only: bool = False) -> OrderedSet[Symbol]:
index = self._index(self.ranges)
rindex = self._index(self.scan_ranges, SymT.R0_INDEX)
idx = self.reindex(index, rindex)
return extract_free_symbols(self.inner_fn, idx, unbacked_only=unbacked_only)
@classmethod
def create( # type: ignore[override]
cls,
device: torch.device,
dtypes: tuple[torch.dtype, ...],
inner_fns: tuple[Callable[[Sequence[Expr]], Any], ...],
size: list[Integer],
axis: int,
combine_fn: Callable[[tuple[Any, ...], tuple[Any, ...]], tuple[Any, ...]],
reduction_hint: ReductionHint = ReductionHint.DEFAULT,
*,
# Whether we have the option to fallback to aten
can_fallback_to_aten: bool = True,
**kwargs: Any,
) -> Sequence[Optional[Union[TensorBox, ShapeAsConstantBuffer]]]:
pointwise_ranges = [*size[:axis], *size[axis + 1 :]]
scan_ranges = [size[axis]]
if not V.graph.has_feature(device, BackendFeature.SCAN):
return [None] * len(dtypes)
if len(dtypes) > 1 and not V.graph.has_feature(
device, BackendFeature.TUPLE_REDUCTION
):
return [None] * len(dtypes)
sizevars = V.graph.sizevars
scan_numel = sizevars.simplify(sympy_product(scan_ranges))
assert len(dtypes) == len(inner_fns)
# Scan with a single element is just a copy
if sizevars.statically_known_true(sympy.Le(scan_numel, 1)):
return [
Pointwise.create(
device=device,
dtype=dtypes[output_index],
inner_fn=inner_fns[output_index],
ranges=size,
)
for output_index in range(len(dtypes))
]
reduction_hint, num_splits = cls.num_splits(
device=device,
dtype=dtypes[0],
inner_fn=inner_fns[0],
axis=axis,
pointwise_ranges=pointwise_ranges,
scan_ranges=scan_ranges,
combine_fn=combine_fn,
scan_numel=scan_numel,
)
scan_type = Scan
if num_splits > 1:
supports_split = (
# pyrefly: ignore [unsupported-operation]
torch.version.hip is None or (has_triton and triton_version >= "3.3.0")
) and (len(dtypes) == 1)
if not supports_split:
if can_fallback_to_aten:
# Fallback to ATen
return [None] * len(dtypes)
else:
num_splits = 1
else:
scan_type = SplitScan
def reindex(index: Sequence[Expr], scan_index: Sequence[Expr]) -> list[Expr]:
assert len(scan_index) == len(scan_ranges)
assert len(index) == len(pointwise_ranges)
return [*index[:axis], *scan_index, *index[axis:]]
results = [
TensorBox.create(
scan_type(
device=device,
dtype=dtypes[output_index],
dtypes=dtypes,
inner_fn=inner_fns[output_index],
inner_fns=inner_fns,
size=size,
ranges=pointwise_ranges,
scan_ranges=scan_ranges,
combine_fn=combine_fn,
reindex=reindex,
reduction_hint=reduction_hint,
output_index=output_index,
**kwargs,
)
)
for output_index in range(len(dtypes))
]
for result in results:
result.realize()
return results
@classmethod
def num_splits(
cls,
device: torch.device,
dtype: torch.dtype,
inner_fn: Callable[[Sequence[Expr]], OpsValue],
axis: int,
pointwise_ranges: list[Integer],
scan_ranges: list[Integer],
combine_fn: Callable[[tuple[Any, ...], tuple[Any, ...]], tuple[Any, ...]],
scan_numel: Expr,
) -> tuple[ReductionHint, _IntLike]:
# TODO: custom splitting heuristic for scan
def wrapper_fn(idx: Sequence[Expr], reduction_idx: Sequence[Expr]) -> OpsValue:
return inner_fn([*idx[:axis], *reduction_idx, *idx[axis:]])
return Reduction.num_splits(
device=device,
dst_dtype=dtype,
src_dtype=dtype,
inner_fn=wrapper_fn,
ranges=pointwise_ranges,
reduction_ranges=scan_ranges,
reduction_type="scan",
reduction_numel=scan_numel,
)
# This signifies a scan op that should go through TritonSplitScanKernel codegen on CUDA.
@ir_dataclass
| Scan |
python | kamyu104__LeetCode-Solutions | Python/minimum-consecutive-cards-to-pick-up.py | {
"start": 42,
"end": 431
} | class ____(object):
def minimumCardPickup(self, cards):
"""
:type cards: List[int]
:rtype: int
"""
lookup = {}
result = float("inf")
for i, x in enumerate(cards):
if x in lookup:
result = min(result, i-lookup[x]+1)
lookup[x] = i
return result if result != float("inf") else -1
| Solution |
python | lepture__authlib | authlib/oidc/core/claims.py | {
"start": 7699,
"end": 9060
} | class ____(ImplicitIDToken):
RESPONSE_TYPES = ("code id_token", "code token", "code id_token token")
REGISTERED_CLAIMS = _REGISTERED_CLAIMS + ["c_hash"]
def validate(self, now=None, leeway=0):
super().validate(now=now, leeway=leeway)
self.validate_c_hash()
def validate_c_hash(self):
"""Code hash value. Its value is the base64url encoding of the
left-most half of the hash of the octets of the ASCII representation
of the code value, where the hash algorithm used is the hash algorithm
used in the alg Header Parameter of the ID Token's JOSE Header. For
instance, if the alg is HS512, hash the code value with SHA-512, then
take the left-most 256 bits and base64url encode them. The c_hash
value is a case sensitive string.
If the ID Token is issued from the Authorization Endpoint with a code,
which is the case for the response_type values code id_token and code
id_token token, this is REQUIRED; otherwise, its inclusion is OPTIONAL.
"""
code = self.params.get("code")
c_hash = self.get("c_hash")
if code:
if not c_hash:
raise MissingClaimError("c_hash")
if not _verify_hash(c_hash, code, self.header["alg"]):
raise InvalidClaimError("c_hash")
| HybridIDToken |
python | PrefectHQ__prefect | src/prefect/server/schemas/filters.py | {
"start": 7366,
"end": 8084
} | class ____(PrefectFilterBaseModel):
"""Filter by `FlowRun.id`."""
any_: Optional[list[UUID]] = Field(
default=None, description="A list of flow run ids to include"
)
not_any_: Optional[list[UUID]] = Field(
default=None, description="A list of flow run ids to exclude"
)
def _get_filter_list(
self, db: "PrefectDBInterface"
) -> Iterable[sa.ColumnExpressionArgument[bool]]:
filters: list[sa.ColumnExpressionArgument[bool]] = []
if self.any_ is not None:
filters.append(db.FlowRun.id.in_(self.any_))
if self.not_any_ is not None:
filters.append(db.FlowRun.id.not_in(self.not_any_))
return filters
| FlowRunFilterId |
python | django__django | tests/staticfiles_tests/test_forms.py | {
"start": 697,
"end": 1629
} | class ____(SimpleTestCase):
def test_absolute_url(self):
m = Media(
css={"all": ("path/to/css1", "/path/to/css2")},
js=(
"/path/to/js1",
"http://media.other.com/path/to/js2",
"https://secure.other.com/path/to/js3",
static("relative/path/to/js4"),
),
)
self.assertEqual(
str(m),
'<link href="https://example.com/assets/path/to/css1" media="all" '
'rel="stylesheet">\n'
'<link href="/path/to/css2" media="all" rel="stylesheet">\n'
'<script src="/path/to/js1"></script>\n'
'<script src="http://media.other.com/path/to/js2"></script>\n'
'<script src="https://secure.other.com/path/to/js3"></script>\n'
'<script src="https://example.com/assets/relative/path/to/js4"></script>',
)
| StaticFilesFormsMediaTestCase |
python | pandas-dev__pandas | pandas/tests/tslibs/test_npy_units.py | {
"start": 244,
"end": 922
} | class ____:
def test_is_date_array_normalized_day(self):
arr = day_arr
abbrev = "D"
unit = abbrev_to_npy_unit(abbrev)
result = is_date_array_normalized(arr.view("i8"), None, unit)
assert result is True
def test_is_date_array_normalized_seconds(self):
abbrev = "s"
arr = day_arr.astype(f"M8[{abbrev}]")
unit = abbrev_to_npy_unit(abbrev)
result = is_date_array_normalized(arr.view("i8"), None, unit)
assert result is True
arr[0] += np.timedelta64(1, abbrev)
result2 = is_date_array_normalized(arr.view("i8"), None, unit)
assert result2 is False
| TestIsDateArrayNormalized |
python | openai__openai-python | src/openai/types/beta/realtime/session_create_response.py | {
"start": 797,
"end": 919
} | class ____(BaseModel):
model: Optional[str] = None
"""The model to use for transcription."""
| InputAudioTranscription |
python | readthedocs__readthedocs.org | readthedocs/organizations/migrations/0003_team_auto_join_email_users.py | {
"start": 149,
"end": 621
} | class ____(migrations.Migration):
safe = Safe.after_deploy()
dependencies = [
("organizations", "0002_update_meta_options"),
]
operations = [
migrations.AddField(
model_name="team",
name="auto_join_email_users",
field=models.BooleanField(
default=False,
help_text="Auto join users with an organization's email address to this team.",
),
),
]
| Migration |
python | eventlet__eventlet | eventlet/db_pool.py | {
"start": 9557,
"end": 10243
} | class ____(BaseConnectionPool):
"""A pool which gives out :class:`~eventlet.tpool.Proxy`-based database
connections.
"""
def create(self):
now = time.time()
return now, now, self.connect(
self._db_module, self.connect_timeout, *self._args, **self._kwargs)
@classmethod
def connect(cls, db_module, connect_timeout, *args, **kw):
t = timeout.Timeout(connect_timeout, ConnectTimeout())
try:
from eventlet import tpool
conn = tpool.execute(db_module.connect, *args, **kw)
return tpool.Proxy(conn, autowrap_names=('cursor',))
finally:
t.cancel()
| TpooledConnectionPool |
python | tensorflow__tensorflow | tensorflow/python/eager/summary_optimizer_test.py | {
"start": 1461,
"end": 8072
} | class ____(test.TestCase):
def setUp(self):
super().setUp()
self.summary_dir = os.path.join(FLAGS.test_tmpdir, 'mylogs')
# Clean up any summary directories before starting the test so we can
# validate that summaries are only written when enabled.
try:
gfile.DeleteRecursively(self.summary_dir)
except Exception: # pylint: disable=broad-exception-caught
pass
@test_util.run_v2_only
def test_strip_summary_ops(self):
def normalize_while_node(fndef):
"""Helper method to normalize the while node for comparison."""
for node in fndef.node_def:
if node.op == 'While':
# The names of the nested functions are expected to be different
# because they will have a uid appended to them.
node.attr['body'].func.name = 'while_body'
node.attr['cond'].func.name = 'while_cond'
# The summary_writer and `include_summary` args are expected to be
# passed in and out of the transformed function as we do not modify
# the function signatures.
# Expect a mismatch in input and output types/shapes.
node.attr['T'].ClearField('list')
node.attr['output_shapes'].ClearField('list')
expected_inputs = {
'write_summary_summary_cond_input_1',
'record_summary',
}
if 'record_summary' not in node.input:
continue
inputs = node.input
node.ClearField('input')
node.input.extend(inp for inp in inputs if inp not in expected_inputs)
node.attr['_num_original_outputs'].i -= 2
return fndef
def normalize_fdef(fndef):
"""Method to normalize the tf.function's FunctionDefs for comparison."""
# Normalize the names for comparison as they have a uid appended.
fndef.signature.name = '__inference_add'
# The summary writer is expected to be passed into the transformed fn.
inputs = fndef.signature.input_arg
fndef.signature.ClearField('input_arg')
fndef.signature.input_arg.extend(
inp
for inp in inputs
if inp.name != 'write_summary_summary_cond_input_1'
)
# The disable_summaries_at_runtime attr is expected to be cleared.
fndef.attr['disable_summaries_at_runtime'].ClearField('list')
return fndef
writer = summary_ops_v2.create_file_writer_v2(self.summary_dir)
var = variables.Variable(1.0)
def remove_writer_attr(fndef):
arg_attr = fndef.arg_attr
attr_idx = None
# tf.function uses TraceType to create placeholder for captures.
# An extra "_user_specified_name" attr will be added to the placeholder.
for idx in arg_attr:
if arg_attr[idx].attr['_user_specified_name'].s == b'input_1':
attr_idx = idx
break
if attr_idx is not None:
# Copy subsequent arg_attr to ensure indexes are continuous
for idx in range(attr_idx, len(arg_attr) - 1):
fndef.arg_attr[idx].CopyFrom(fndef.arg_attr[idx + 1])
del fndef.arg_attr[len(arg_attr) - 1]
return fndef
@polymorphic_function.function(
autograph=False,
experimental_attributes={
'disable_summaries_at_runtime': ['record_summary', False]
},
)
def add(x, y, record_summary, include_summary):
def body(step, result):
result += math_ops.cast(step, dtypes.float32)
var.assign(result)
if include_summary:
# Perform a summary write in a nested function.
with writer.as_default():
summary_ops_v2.set_step(step)
summary_ops_v2.write('my_metric', result, step=step)
writer.flush()
return math_ops.add(step, 1)
result = math_ops.add(x, y)
step = constant_op.constant(0, dtypes.int64)
with summary_ops_v2.record_if(record_summary):
if include_summary:
# Perform a summary write in the main function body.
with writer.as_default():
summary_ops_v2.set_step(step)
summary_ops_v2.write('my_metric', result, step=step)
writer.flush()
step = math_ops.add(step, 1)
loop_cond = lambda i: math_ops.less(i, 3)
loop_body = lambda i: body(i, result)
step = while_loop.while_loop_v2(loop_cond, loop_body, [step])
var.assign(result)
return result
one = constant_op.constant(1.0, dtypes.float32)
inputs_with_summaries = [one, one, constant_op.constant(True), True]
inputs_without_summaries = [one, one, constant_op.constant(False), False]
inputs_without_summaries_at_runtime = [
one,
one,
constant_op.constant(False),
True,
]
# Ensure the result of `add` is the same with and without summaries.
self.assertEqual(
add(*inputs_with_summaries), add(*inputs_without_summaries)
)
# Ensure the result of `add` is the same when summaries have been stripped
# at trace time.
self.assertEqual(
add(*inputs_without_summaries_at_runtime),
add(*inputs_without_summaries),
)
# Force a trace of `add` where summaries have been stripped at trace time.
expected = add.get_concrete_function(*inputs_without_summaries).function_def
# Extract the trace of `add` where summaries have been stripped in the
# runtime.
function_name = add.get_concrete_function(
*inputs_without_summaries_at_runtime
).function_def.signature.name
ctx = runtime_client.GlobalPythonEagerContext()
rt = runtime_client.Runtime(ctx)
fndef = rt.GetFunctionProto(function_name + '__instance__no_summaries')
# Normalize the fndefs and compare them for equivalence.
fndef = normalize_fdef(normalize_while_node(fndef))
fndef = remove_writer_attr(fndef)
expected = normalize_fdef(normalize_while_node(expected))
self.assertProtoEquals(expected, fndef)
# Verify that summaries were only written when executing with the
# `inputs_with_summaries` argument.
num_summary_events = 0
summary_files = [
os.path.join(self.summary_dir, sf)
for sf in gfile.ListDirectory(self.summary_dir)
]
for record in readers.TFRecordDatasetV2(
filenames=summary_files
).as_numpy_iterator():
event = event_pb2.Event()
event.ParseFromString(record)
if event.HasField('summary'):
num_summary_events += 1
# 3 Events are written by `add` when summaries are enabled.
self.assertEqual(num_summary_events, 3)
if __name__ == '__main__':
test.main()
| SummaryOpsTransformationTest |
python | celery__celery | t/unit/utils/test_saferepr.py | {
"start": 1927,
"end": 2005
} | class ____(set):
def __repr__(self):
return super().__repr__()
| set3 |
python | bokeh__bokeh | tests/unit/bokeh/document/test_events__document.py | {
"start": 4055,
"end": 5170
} | class ____:
def test_init(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.document == doc
assert e.setter == "setter"
assert e.callback_invoker == "invoker"
def test_to_serializable(self) -> None:
doc = Document()
s = Serializer()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
with pytest.raises(NotImplementedError):
s.encode(e)
def test_dispatch(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
e.dispatch(FakeEmptyDispatcher())
d = FakeFullDispatcher()
e.dispatch(d)
assert d.called == ['_document_changed', '_document_patched']
def test_combine_ignores_all(self) -> None:
doc = Document()
e = bde.DocumentPatchedEvent(doc, "setter", "invoker")
e2 = bde.DocumentPatchedEvent(doc, "setter", "invoker")
assert e.combine(e2) is False
# ModelChangedEvent -----------------------------------------------------------
| TestDocumentPatchedEvent |
python | celery__celery | celery/utils/collections.py | {
"start": 20871,
"end": 22623
} | class ____(Evictable):
"""A buffer of pending messages."""
Empty = Empty
def __init__(self, maxsize, iterable=None, deque=deque):
# type: (int, Iterable, Any) -> None
self.maxsize = maxsize
self.data = deque(iterable or [])
self._append = self.data.append
self._pop = self.data.popleft
self._len = self.data.__len__
self._extend = self.data.extend
def put(self, item):
# type: (Any) -> None
self._append(item)
self.maxsize and self._evict()
def extend(self, it):
# type: (Iterable) -> None
self._extend(it)
self.maxsize and self._evict()
def take(self, *default):
# type: (*Any) -> Any
try:
return self._pop()
except IndexError:
if default:
return default[0]
raise self.Empty()
def _pop_to_evict(self):
# type: () -> None
return self.take()
def __repr__(self):
# type: () -> str
return f'<{type(self).__name__}: {len(self)}/{self.maxsize}>'
def __iter__(self):
# type: () -> Iterable
while 1:
try:
yield self._pop()
except IndexError:
break
def __len__(self):
# type: () -> int
return self._len()
def __contains__(self, item) -> bool:
return item in self.data
def __reversed__(self):
# type: () -> Iterable
return reversed(self.data)
def __getitem__(self, index):
# type: (Any) -> Any
return self.data[index]
@property
def _evictcount(self):
# type: () -> int
return len(self)
Sequence.register(Messagebuffer)
| Messagebuffer |
python | getsentry__sentry | src/sentry/workflow_engine/endpoints/serializers/action_handler_serializer.py | {
"start": 757,
"end": 3238
} | class ____(Serializer):
def transform_title(self, title: str) -> str:
if title in PLUGINS_WITH_FIRST_PARTY_EQUIVALENTS:
return f"(Legacy) {title}"
return title
def serialize(
self,
obj: ActionHandler,
attrs: Mapping[str, Any],
user: Any,
**kwargs: Any,
) -> ActionHandlerSerializerResponse:
action_type = kwargs.get("action_type")
if action_type is None:
raise ValueError("action_type is required")
result: ActionHandlerSerializerResponse = {
"type": action_type,
"handlerGroup": obj.group.value,
"configSchema": obj.config_schema,
"dataSchema": obj.data_schema,
}
integrations = kwargs.get("integrations")
if integrations:
integrations_result = []
for i in integrations:
i_result = {"id": str(i["integration"].id), "name": i["integration"].name}
if i["services"]:
i_result["services"] = [
{"id": str(id), "name": name} for id, name in i["services"]
]
integrations_result.append(i_result)
result["integrations"] = integrations_result
sentry_app_context = kwargs.get("sentry_app_context")
if sentry_app_context:
installation = sentry_app_context.installation
component = sentry_app_context.component
sentry_app: SentryAppContext = {
"id": str(installation.sentry_app.id),
"name": installation.sentry_app.name,
"installationId": str(installation.id),
"installationUuid": str(installation.uuid),
"status": installation.sentry_app.status,
}
if component:
sentry_app["settings"] = component.app_schema.get("settings", {})
if component.app_schema.get("title"):
sentry_app["title"] = component.app_schema.get("title")
result["sentryApp"] = sentry_app
services = kwargs.get("services")
if services:
services_list = [
{"slug": service.slug, "name": self.transform_title(service.title)}
for service in services
]
services_list.sort(key=lambda x: x["name"])
result["services"] = services_list
return result
| ActionHandlerSerializer |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 15408,
"end": 15697
} | class ____(PrefectBaseModel):
"""Filter by `Deployment.work_queue_name`."""
any_: Optional[List[str]] = Field(
default=None,
description="A list of work queue names to include",
examples=[["work_queue_1", "work_queue_2"]],
)
| DeploymentFilterWorkQueueName |
python | openai__openai-python | tests/api_resources/test_images.py | {
"start": 376,
"end": 9806
} | class ____:
parametrize = pytest.mark.parametrize("client", [False, True], indirect=True, ids=["loose", "strict"])
@parametrize
def test_method_create_variation(self, client: OpenAI) -> None:
image = client.images.create_variation(
image=b"raw file contents",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_method_create_variation_with_all_params(self, client: OpenAI) -> None:
image = client.images.create_variation(
image=b"raw file contents",
model="string",
n=1,
response_format="url",
size="1024x1024",
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_raw_response_create_variation(self, client: OpenAI) -> None:
response = client.images.with_raw_response.create_variation(
image=b"raw file contents",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_streaming_response_create_variation(self, client: OpenAI) -> None:
with client.images.with_streaming_response.create_variation(
image=b"raw file contents",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_edit_overload_1(self, client: OpenAI) -> None:
image = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_method_edit_with_all_params_overload_1(self, client: OpenAI) -> None:
image = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
background="transparent",
input_fidelity="high",
mask=b"raw file contents",
model="string",
n=1,
output_compression=100,
output_format="png",
partial_images=1,
quality="high",
response_format="url",
size="1024x1024",
stream=False,
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_raw_response_edit_overload_1(self, client: OpenAI) -> None:
response = client.images.with_raw_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_streaming_response_edit_overload_1(self, client: OpenAI) -> None:
with client.images.with_streaming_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_edit_overload_2(self, client: OpenAI) -> None:
image_stream = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
stream=True,
)
image_stream.response.close()
@parametrize
def test_method_edit_with_all_params_overload_2(self, client: OpenAI) -> None:
image_stream = client.images.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
stream=True,
background="transparent",
input_fidelity="high",
mask=b"raw file contents",
model="string",
n=1,
output_compression=100,
output_format="png",
partial_images=1,
quality="high",
response_format="url",
size="1024x1024",
user="user-1234",
)
image_stream.response.close()
@parametrize
def test_raw_response_edit_overload_2(self, client: OpenAI) -> None:
response = client.images.with_raw_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
@parametrize
def test_streaming_response_edit_overload_2(self, client: OpenAI) -> None:
with client.images.with_streaming_response.edit(
image=b"raw file contents",
prompt="A cute baby sea otter wearing a beret",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_generate_overload_1(self, client: OpenAI) -> None:
image = client.images.generate(
prompt="A cute baby sea otter",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_method_generate_with_all_params_overload_1(self, client: OpenAI) -> None:
image = client.images.generate(
prompt="A cute baby sea otter",
background="transparent",
model="string",
moderation="low",
n=1,
output_compression=100,
output_format="png",
partial_images=1,
quality="medium",
response_format="url",
size="1024x1024",
stream=False,
style="vivid",
user="user-1234",
)
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_raw_response_generate_overload_1(self, client: OpenAI) -> None:
response = client.images.with_raw_response.generate(
prompt="A cute baby sea otter",
)
assert response.is_closed is True
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
@parametrize
def test_streaming_response_generate_overload_1(self, client: OpenAI) -> None:
with client.images.with_streaming_response.generate(
prompt="A cute baby sea otter",
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
image = response.parse()
assert_matches_type(ImagesResponse, image, path=["response"])
assert cast(Any, response.is_closed) is True
@parametrize
def test_method_generate_overload_2(self, client: OpenAI) -> None:
image_stream = client.images.generate(
prompt="A cute baby sea otter",
stream=True,
)
image_stream.response.close()
@parametrize
def test_method_generate_with_all_params_overload_2(self, client: OpenAI) -> None:
image_stream = client.images.generate(
prompt="A cute baby sea otter",
stream=True,
background="transparent",
model="string",
moderation="low",
n=1,
output_compression=100,
output_format="png",
partial_images=1,
quality="medium",
response_format="url",
size="1024x1024",
style="vivid",
user="user-1234",
)
image_stream.response.close()
@parametrize
def test_raw_response_generate_overload_2(self, client: OpenAI) -> None:
response = client.images.with_raw_response.generate(
prompt="A cute baby sea otter",
stream=True,
)
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
@parametrize
def test_streaming_response_generate_overload_2(self, client: OpenAI) -> None:
with client.images.with_streaming_response.generate(
prompt="A cute baby sea otter",
stream=True,
) as response:
assert not response.is_closed
assert response.http_request.headers.get("X-Stainless-Lang") == "python"
stream = response.parse()
stream.close()
assert cast(Any, response.is_closed) is True
| TestImages |
python | falconry__falcon | tests/test_media_urlencoded.py | {
"start": 1050,
"end": 1142
} | class ____:
def on_post(self, req, resp):
resp.media = req.get_media()
| MediaMirror |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/github_schema.py | {
"start": 296700,
"end": 298224
} | class ____(sgqlc.types.Input):
"""Autogenerated input type of SetEnterpriseIdentityProvider"""
__schema__ = github_schema
__field_names__ = ("enterprise_id", "sso_url", "issuer", "idp_certificate", "signature_method", "digest_method", "client_mutation_id")
enterprise_id = sgqlc.types.Field(sgqlc.types.non_null(ID), graphql_name="enterpriseId")
"""The ID of the enterprise on which to set an identity provider."""
sso_url = sgqlc.types.Field(sgqlc.types.non_null(URI), graphql_name="ssoUrl")
"""The URL endpoint for the identity provider's SAML SSO."""
issuer = sgqlc.types.Field(String, graphql_name="issuer")
"""The Issuer Entity ID for the SAML identity provider"""
idp_certificate = sgqlc.types.Field(sgqlc.types.non_null(String), graphql_name="idpCertificate")
"""The x509 certificate used by the identity provider to sign
assertions and responses.
"""
signature_method = sgqlc.types.Field(sgqlc.types.non_null(SamlSignatureAlgorithm), graphql_name="signatureMethod")
"""The signature algorithm used to sign SAML requests for the
identity provider.
"""
digest_method = sgqlc.types.Field(sgqlc.types.non_null(SamlDigestAlgorithm), graphql_name="digestMethod")
"""The digest algorithm used to sign SAML requests for the identity
provider.
"""
client_mutation_id = sgqlc.types.Field(String, graphql_name="clientMutationId")
"""A unique identifier for the client performing the mutation."""
| SetEnterpriseIdentityProviderInput |
python | jazzband__django-waffle | waffle/management/commands/waffle_flag.py | {
"start": 345,
"end": 7249
} | class ____(BaseCommand):
def add_arguments(self, parser: CommandParser) -> None:
parser.add_argument(
'name',
nargs='?',
help='The name of the flag.')
parser.add_argument(
'-l', '--list',
action='store_true',
dest='list_flags',
default=False,
help='List existing samples.')
parser.add_argument(
'--everyone',
action='store_true',
dest='everyone',
help='Activate flag for all users.')
parser.add_argument(
'--deactivate',
action='store_false',
dest='everyone',
help='Deactivate flag for all users.')
parser.add_argument(
'--percent', '-p',
action='store',
type=int,
dest='percent',
help='Roll out the flag for a certain percentage of users. Takes '
'a number between 0.0 and 100.0')
parser.add_argument(
'--superusers',
action='store_true',
dest='superusers',
default=False,
help='Turn on the flag for Django superusers.')
parser.add_argument(
'--staff',
action='store_true',
dest='staff',
default=False,
help='Turn on the flag for Django staff.')
parser.add_argument(
'--authenticated',
action='store_true',
dest='authenticated',
default=False,
help='Turn on the flag for logged in users.')
parser.add_argument(
'--group', '-g',
action='append',
default=list(),
help='Turn on the flag for listed group names (use flag more '
'than once for multiple groups). WARNING: This will remove '
'any currently associated groups unless --append is used!')
parser.add_argument(
'--user', '-u',
action='append',
default=list(),
help='Turn on the flag for listed usernames (use flag more '
'than once for multiple users). WARNING: This will remove '
'any currently associated users unless --append is used!')
parser.add_argument(
'--append',
action='store_true',
dest='append',
default=False,
help='Append only mode when adding groups.')
parser.add_argument(
'--rollout', '-r',
action='store_true',
dest='rollout',
default=False,
help='Turn on rollout mode.')
parser.add_argument(
'--testing', '-t',
action='store_true',
dest='testing',
default=False,
help='Turn on testing mode, allowing the flag to be specified via '
'a querystring parameter.')
parser.add_argument(
'--create',
action='store_true',
dest='create',
default=False,
help='If the flag doesn\'t exist, create it.')
parser.set_defaults(everyone=None)
help = 'Modify a flag.'
def handle(self, *args: Any, **options: Any) -> None:
if options['list_flags']:
self.stdout.write('Flags:')
for flag in get_waffle_flag_model().objects.iterator():
self.log_flag_to_stdout(flag)
return
flag_name = options['name']
if not flag_name:
raise CommandError('You need to specify a flag name.')
if options['create']:
flag, created = get_waffle_flag_model().objects.get_or_create(name=flag_name)
if created:
self.stdout.write(f'Creating flag: {flag_name}')
else:
try:
flag = get_waffle_flag_model().objects.get(name=flag_name)
except get_waffle_flag_model().DoesNotExist:
raise CommandError('This flag does not exist.')
# Group isn't an attribute on the Flag, but a related Many-to-Many
# field, so we handle it a bit differently by looking up groups and
# adding each group to the flag individually
options_append = options.pop('append')
if groups := options.pop('group'):
group_hash = {}
for group in groups:
try:
group_instance = Group.objects.get(name=group)
group_hash[group_instance.name] = group_instance.id
except Group.DoesNotExist:
raise CommandError(f'Group {group} does not exist')
# If 'append' was not passed, we clear related groups
if not options_append:
flag.groups.clear()
self.stdout.write('Setting group(s): %s' % (
[name for name, _id in group_hash.items()])
)
for group_id in group_hash.values():
flag.groups.add(group_id)
if users := options.pop('user'):
user_hash = set()
for username in users:
try:
user_instance = UserModel.objects.get(
Q(**{UserModel.USERNAME_FIELD: username})
| Q(**{UserModel.EMAIL_FIELD: username})
)
user_hash.add(user_instance)
except UserModel.DoesNotExist:
raise CommandError(f'User {username} does not exist')
# If 'append' was not passed, we clear related users
if not options_append:
flag.users.clear()
self.stdout.write(f'Setting user(s): {user_hash}')
# for user in user_hash:
flag.users.add(*[user.id for user in user_hash])
for option_name, option in options.items():
if hasattr(flag, option_name):
self.stdout.write(f'Setting {option_name}: {option}')
setattr(flag, option_name, option)
flag.save()
def log_flag_to_stdout(self, flag: AbstractBaseFlag) -> None:
self.stdout.write(f'NAME: {flag.name}')
self.stdout.write(f'SUPERUSERS: {flag.superusers}')
self.stdout.write(f'EVERYONE: {flag.everyone}')
self.stdout.write(f'AUTHENTICATED: {flag.authenticated}')
self.stdout.write(f'PERCENT: {flag.percent}')
self.stdout.write(f'TESTING: {flag.testing}')
self.stdout.write(f'ROLLOUT: {flag.rollout}')
self.stdout.write(f'STAFF: {flag.staff}')
self.stdout.write('GROUPS: {}'.format(list(
flag.groups.values_list('name', flat=True)))
)
self.stdout.write('USERS: {}'.format(list(
flag.users.values_list(UserModel.USERNAME_FIELD, flat=True)))
)
self.stdout.write('')
| Command |
python | dagster-io__dagster | python_modules/automation/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/python_modules/automation/automation_tests/dagster_dev_tests/ai_review_tests/test_smoke.py | {
"start": 98,
"end": 2784
} | class ____:
"""Basic smoke tests for all ai-review commands."""
def test_cache_import(self):
"""Test ai-review-cache import."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
assert ai_review_cache is not None
assert ai_review_cache.name == "ai-review-cache"
def test_cache_help(self):
"""Test ai-review-cache help."""
from automation.dagster_dev.commands.ai_review_cache import ai_review_cache
runner = CliRunner()
result = runner.invoke(ai_review_cache, ["--help"])
assert result.exit_code == 0
def test_summarize_import(self):
"""Test ai-review-summarize import."""
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
assert ai_review_summarize is not None
assert ai_review_summarize.name == "ai-review-summarize"
def test_summarize_help(self):
"""Test ai-review-summarize help."""
from automation.dagster_dev.commands.ai_review_summarize import ai_review_summarize
runner = CliRunner()
result = runner.invoke(ai_review_summarize, ["--help"])
assert result.exit_code == 0
def test_analyze_import(self):
"""Test ai-review-analyze import."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
assert ai_review_analyze is not None
assert ai_review_analyze.name == "ai-review-analyze"
def test_analyze_help(self):
"""Test ai-review-analyze help."""
from automation.dagster_dev.commands.ai_review_analyze import ai_review_analyze
runner = CliRunner()
result = runner.invoke(ai_review_analyze, ["--help"])
assert result.exit_code == 0
def test_update_import(self):
"""Test ai-review-update import."""
from automation.dagster_dev.commands.ai_review_update import update_pr
assert update_pr is not None
assert update_pr.name == "ai-review-update"
def test_update_help(self):
"""Test ai-review-update help."""
from automation.dagster_dev.commands.ai_review_update import update_pr
runner = CliRunner()
result = runner.invoke(update_pr, ["--help"])
assert result.exit_code == 0
def test_update_required_params(self):
"""Test ai-review-update requires title and body."""
from automation.dagster_dev.commands.ai_review_update import update_pr
runner = CliRunner()
result = runner.invoke(update_pr, ["--body", "test"])
assert result.exit_code != 0
assert "Missing option '--title'" in result.output
| TestAiReviewSmoke |
python | vyperlang__vyper | vyper/venom/memory_location.py | {
"start": 4754,
"end": 20258
} | class ____(MemoryLocation):
"""Represents a memory location that can be analyzed for aliasing"""
offset: Optional[int] = None
size: Optional[int] = None
_is_volatile: bool = False
# Locations that should be considered volatile. Example usages of this would
# be locations that are accessed outside of the current function.
def is_empty(self):
return self.size == 0
@property
def is_offset_fixed(self) -> bool:
return self.offset is not None
@property
def is_size_fixed(self) -> bool:
return self.size is not None
@property
def is_fixed(self) -> bool:
return self.is_offset_fixed and self.is_size_fixed
@property
def is_volatile(self) -> bool:
return self._is_volatile
def mk_volatile(self) -> MemoryLocationSegment:
return dc.replace(self, _is_volatile=True)
# similar code to memmerging._Interval, but different data structure
def completely_contains(self, other: MemoryLocation) -> bool:
# If other is empty (size 0), always contained
if other.is_empty():
return True
# If self has unknown offset or size, can't guarantee containment
if not self.is_offset_fixed or not self.is_size_fixed:
return False
# If other has unknown offset or size, can't guarantee containment
if not other.is_offset_fixed or not other.is_size_fixed:
return False
if not isinstance(other, MemoryLocationSegment):
return False
# Both are known
assert self.offset is not None and self.size is not None
assert other.offset is not None and other.size is not None
start1, end1 = self.offset, self.offset + self.size
start2, end2 = other.offset, other.offset + other.size
return start1 <= start2 and end1 >= end2
@staticmethod
def may_overlap_concrete(loc1: MemoryLocationSegment, loc2: MemoryLocationSegment) -> bool:
"""
Determine if two memory locations may overlap
"""
o1, s1 = loc1.offset, loc1.size
o2, s2 = loc2.offset, loc2.size
# If either size is zero, no alias
if s1 == 0 or s2 == 0:
return False
if o1 is None or o2 is None:
# If offsets are unknown, can't be sure
return True
# guaranteed now that o1 and o2 are not None
# All known
if s1 is not None and s2 is not None:
end1 = o1 + s1
end2 = o2 + s2
return not (end1 <= o2 or end2 <= o1)
# loc1 known size, loc2 unknown size
if s1 is not None:
# end of loc1 is bounded by start of loc2
if o1 + s1 <= o2:
return False
# Otherwise, can't be sure
return True
# loc2 known size, loc1 unknown size
if s2 is not None:
# end of loc2 is bounded by start of loc1
if o2 + s2 <= o1:
return False
# Otherwise, can't be sure
return True
return True
MemoryLocation.EMPTY = MemoryLocationSegment(offset=0, size=0)
MemoryLocation.UNDEFINED = MemoryLocationSegment(offset=None, size=None)
def get_write_location(inst, addr_space: AddrSpace, var_base_pointers: dict) -> MemoryLocation:
"""Extract memory location info from an instruction"""
if addr_space == MEMORY:
return _get_memory_write_location(inst, var_base_pointers)
elif addr_space in (STORAGE, TRANSIENT):
return _get_storage_write_location(inst, addr_space, var_base_pointers)
else: # pragma: nocover
raise CompilerPanic(f"Invalid location type: {addr_space}")
def get_read_location(inst, addr_space: AddrSpace, var_base_pointers) -> MemoryLocation:
"""Extract memory location info from an instruction"""
if addr_space == MEMORY:
return _get_memory_read_location(inst, var_base_pointers)
elif addr_space in (STORAGE, TRANSIENT):
return _get_storage_read_location(inst, addr_space, var_base_pointers)
else: # pragma: nocover
raise CompilerPanic(f"Invalid location type: {addr_space}")
def _get_memory_write_location(inst, var_base_pointers: dict) -> MemoryLocation:
opcode = inst.opcode
if opcode == "mstore":
dst = inst.operands[1]
return MemoryLocation.from_operands(dst, MEMORY.word_scale, var_base_pointers)
elif opcode == "mload":
return MemoryLocation.EMPTY
elif opcode in ("mcopy", "calldatacopy", "dloadbytes", "codecopy", "returndatacopy"):
size, _, dst = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
elif opcode == "dload":
return MemoryLocationSegment(offset=0, size=32)
elif opcode == "sha3_64":
return MemoryLocationSegment(offset=0, size=64)
elif opcode == "invoke":
return MemoryLocation.UNDEFINED
elif opcode == "call":
size, dst, _, _, _, _, _ = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
elif opcode in ("delegatecall", "staticcall"):
size, dst, _, _, _, _ = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
elif opcode == "extcodecopy":
size, _, dst, _ = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
return MemoryLocationSegment.EMPTY
def _get_memory_read_location(inst, var_base_pointers) -> MemoryLocation:
opcode = inst.opcode
if opcode == "mstore":
return MemoryLocationSegment.EMPTY
elif opcode == "mload":
return MemoryLocation.from_operands(inst.operands[0], MEMORY.word_scale, var_base_pointers)
elif opcode == "mcopy":
size, src, _ = inst.operands
return MemoryLocation.from_operands(src, size, var_base_pointers)
elif opcode == "dload":
return MemoryLocationSegment(offset=0, size=32)
elif opcode == "invoke":
return MemoryLocation.UNDEFINED
elif opcode == "call":
_, _, size, dst, _, _, _ = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
elif opcode in ("delegatecall", "staticcall"):
_, _, size, dst, _, _ = inst.operands
return MemoryLocation.from_operands(dst, size, var_base_pointers)
elif opcode == "return":
size, src = inst.operands
return MemoryLocation.from_operands(src, size, var_base_pointers)
elif opcode == "create":
size, src, _value = inst.operands
return MemoryLocation.from_operands(src, size, var_base_pointers)
elif opcode == "create2":
_salt, size, src, _value = inst.operands
return MemoryLocation.from_operands(src, size, var_base_pointers)
elif opcode == "sha3":
size, offset = inst.operands
return MemoryLocation.from_operands(offset, size, var_base_pointers)
elif opcode == "sha3_64":
return MemoryLocationSegment(offset=0, size=64)
elif opcode == "log":
size, src = inst.operands[-2:]
return MemoryLocation.from_operands(src, size, var_base_pointers)
elif opcode == "revert":
size, src = inst.operands
return MemoryLocation.from_operands(src, size, var_base_pointers)
return MemoryLocationSegment.EMPTY
def _get_storage_write_location(inst, addr_space: AddrSpace, var_base_pointers) -> MemoryLocation:
opcode = inst.opcode
if opcode == addr_space.store_op:
dst = inst.operands[1]
return MemoryLocation.from_operands(dst, addr_space.word_scale, var_base_pointers)
elif opcode == addr_space.load_op:
return MemoryLocation.EMPTY
elif opcode in ("call", "delegatecall", "staticcall"):
return MemoryLocation.UNDEFINED
elif opcode == "invoke":
return MemoryLocation.UNDEFINED
elif opcode in ("create", "create2"):
return MemoryLocation.UNDEFINED
return MemoryLocation.EMPTY
def _get_storage_read_location(inst, addr_space: AddrSpace, var_base_pointers) -> MemoryLocation:
opcode = inst.opcode
if opcode == addr_space.store_op:
return MemoryLocation.EMPTY
elif opcode == addr_space.load_op:
return MemoryLocation.from_operands(
inst.operands[0], addr_space.word_scale, var_base_pointers
)
elif opcode in ("call", "delegatecall", "staticcall"):
return MemoryLocation.UNDEFINED
elif opcode == "invoke":
return MemoryLocation.UNDEFINED
elif opcode in ("create", "create2"):
return MemoryLocation.UNDEFINED
elif opcode in ("return", "stop", "sink"):
# these opcodes terminate execution and commit to (persistent)
# storage, resulting in storage writes escaping our control.
# returning `MemoryLocation.UNDEFINED` represents "future" reads
# which could happen in the next program invocation.
# while not a "true" read, this case makes the code in DSE simpler.
return MemoryLocation.UNDEFINED
elif opcode == "ret":
# `ret` escapes our control and returns execution to the
# caller function. to be conservative, we model these as
# "future" reads which could happen in the caller.
# while not a "true" read, this case makes the code in DSE simpler.
return MemoryLocation.UNDEFINED
return MemoryLocation.EMPTY
def in_free_var(var, offset):
return offset >= var and offset < (var + 32)
def fix_mem_loc(function: IRFunction):
for bb in function.get_basic_blocks():
for inst in bb.instructions:
write_op = get_memory_write_op(inst)
read_op = get_memory_read_op(inst)
if write_op is not None:
size = get_write_size(inst)
if size is None or not isinstance(write_op.value, int):
continue
if in_free_var(MemoryPositions.FREE_VAR_SPACE, write_op.value):
offset = write_op.value - MemoryPositions.FREE_VAR_SPACE
_update_write_location(inst, IRAbstractMemLoc.FREE_VAR1.with_offset(offset))
elif in_free_var(MemoryPositions.FREE_VAR_SPACE2, write_op.value):
offset = write_op.value - MemoryPositions.FREE_VAR_SPACE2
_update_write_location(inst, IRAbstractMemLoc.FREE_VAR2.with_offset(offset))
if read_op is not None:
size = _get_read_size(inst)
if size is None or not isinstance(read_op.value, int):
continue
if in_free_var(MemoryPositions.FREE_VAR_SPACE, read_op.value):
offset = read_op.value - MemoryPositions.FREE_VAR_SPACE
_update_read_location(inst, IRAbstractMemLoc.FREE_VAR1.with_offset(offset))
elif in_free_var(MemoryPositions.FREE_VAR_SPACE2, read_op.value):
offset = read_op.value - MemoryPositions.FREE_VAR_SPACE2
_update_read_location(inst, IRAbstractMemLoc.FREE_VAR2.with_offset(offset))
def get_memory_write_op(inst) -> IROperand | None:
opcode = inst.opcode
if opcode == "mstore":
dst = inst.operands[1]
return dst
elif opcode in ("mcopy", "calldatacopy", "dloadbytes", "codecopy", "returndatacopy"):
_, _, dst = inst.operands
return dst
elif opcode == "call":
_, dst, _, _, _, _, _ = inst.operands
return dst
elif opcode in ("delegatecall", "staticcall"):
_, dst, _, _, _, _ = inst.operands
return dst
elif opcode == "extcodecopy":
_, _, dst, _ = inst.operands
return dst
return None
def get_write_size(inst: IRInstruction) -> IROperand | None:
opcode = inst.opcode
if opcode == "mstore":
return IRLiteral(32)
elif opcode in ("mcopy", "calldatacopy", "dloadbytes", "codecopy", "returndatacopy"):
size, _, _ = inst.operands
return size
elif opcode == "call":
size, _, _, _, _, _, _ = inst.operands
return size
elif opcode in ("delegatecall", "staticcall"):
size, _, _, _, _, _ = inst.operands
return size
elif opcode == "extcodecopy":
size, _, _, _ = inst.operands
return size
return None
def get_memory_read_op(inst) -> IROperand | None:
opcode = inst.opcode
if opcode == "mload":
return inst.operands[0]
elif opcode == "mcopy":
_, src, _ = inst.operands
return src
elif opcode == "call":
_, _, _, src, _, _, _ = inst.operands
return src
elif opcode in ("delegatecall", "staticcall"):
_, _, _, src, _, _ = inst.operands
return src
elif opcode == "return":
_, src = inst.operands
return src
elif opcode == "create":
_, src, _ = inst.operands
return src
elif opcode == "create2":
_, size, src, _ = inst.operands
return src
elif opcode == "sha3":
_, offset = inst.operands
return offset
elif opcode == "log":
_, src = inst.operands[-2:]
return src
elif opcode == "revert":
size, src = inst.operands
if size.value == 0:
return None
return src
return None
def _get_read_size(inst: IRInstruction) -> IROperand | None:
opcode = inst.opcode
if opcode == "mload":
return IRLiteral(32)
elif opcode == "mcopy":
size, _, _ = inst.operands
return size
elif opcode == "call":
_, _, size, _, _, _, _ = inst.operands
return size
elif opcode in ("delegatecall", "staticcall"):
_, _, size, _, _, _ = inst.operands
return size
elif opcode == "return":
size, _ = inst.operands
return size
elif opcode == "create":
size, _, _ = inst.operands
return size
elif opcode == "create2":
_, size, _, _ = inst.operands
return size
elif opcode == "sha3":
size, _ = inst.operands
return size
elif opcode == "log":
size, _ = inst.operands[-2:]
return size
elif opcode == "revert":
size, _ = inst.operands
if size.value == 0:
return None
return size
return None
def _update_write_location(inst, new_op: IROperand):
opcode = inst.opcode
if opcode == "mstore":
inst.operands[1] = new_op
elif opcode in ("mcopy", "calldatacopy", "dloadbytes", "codecopy", "returndatacopy"):
inst.operands[2] = new_op
elif opcode == "call":
inst.operands[1] = new_op
elif opcode in ("delegatecall", "staticcall"):
inst.operands[1] = new_op
elif opcode == "extcodecopy":
inst.operands[2] = new_op
def _update_read_location(inst, new_op: IROperand):
opcode = inst.opcode
if opcode == "mload":
inst.operands[0] = new_op
elif opcode == "mcopy":
inst.operands[1] = new_op
elif opcode == "call":
inst.operands[3] = new_op
elif opcode in ("delegatecall", "staticcall", "call"):
inst.operands[3] = new_op
elif opcode == "return":
inst.operands[1] = new_op
elif opcode == "create":
inst.operands[1] = new_op
elif opcode == "create2":
inst.operands[2] = new_op
elif opcode == "sha3":
inst.operands[1] = new_op
elif opcode == "log":
inst.operands[-1] = new_op
elif opcode == "revert":
inst.operands[1] = new_op
| MemoryLocationSegment |
python | sympy__sympy | sympy/polys/numberfields/galois_resolvents.py | {
"start": 1661,
"end": 25006
} | class ____:
r"""
If $G$ is a subgroup of the symmetric group $S_n$,
$F$ a multivariate polynomial in $\mathbb{Z}[X_1, \ldots, X_n]$,
$H$ the stabilizer of $F$ in $G$ (i.e. the permutations $\sigma$ such that
$F(X_{\sigma(1)}, \ldots, X_{\sigma(n)}) = F(X_1, \ldots, X_n)$), and $s$
a set of left coset representatives of $H$ in $G$, then the resolvent
polynomial $R(Y)$ is the product over $\sigma \in s$ of
$Y - F(X_{\sigma(1)}, \ldots, X_{\sigma(n)})$.
For example, consider the resolvent for the form
$$F = X_0 X_2 + X_1 X_3$$
and the group $G = S_4$. In this case, the stabilizer $H$ is the dihedral
group $D4 = < (0123), (02) >$, and a set of representatives of $G/H$ is
$\{I, (01), (03)\}$. The resolvent can be constructed as follows:
>>> from sympy.combinatorics.permutations import Permutation
>>> from sympy.core.symbol import symbols
>>> from sympy.polys.numberfields.galoisgroups import Resolvent
>>> X = symbols('X0 X1 X2 X3')
>>> F = X[0]*X[2] + X[1]*X[3]
>>> s = [Permutation([0, 1, 2, 3]), Permutation([1, 0, 2, 3]),
... Permutation([3, 1, 2, 0])]
>>> R = Resolvent(F, X, s)
This resolvent has three roots, which are the conjugates of ``F`` under the
three permutations in ``s``:
>>> R.root_lambdas[0](*X)
X0*X2 + X1*X3
>>> R.root_lambdas[1](*X)
X0*X3 + X1*X2
>>> R.root_lambdas[2](*X)
X0*X1 + X2*X3
Resolvents are useful for computing Galois groups. Given a polynomial $T$
of degree $n$, we will use a resolvent $R$ where $Gal(T) \leq G \leq S_n$.
We will then want to substitute the roots of $T$ for the variables $X_i$
in $R$, and study things like the discriminant of $R$, and the way $R$
factors over $\mathbb{Q}$.
From the symmetry in $R$'s construction, and since $Gal(T) \leq G$, we know
from Galois theory that the coefficients of $R$ must lie in $\mathbb{Z}$.
This allows us to compute the coefficients of $R$ by approximating the
roots of $T$ to sufficient precision, plugging these values in for the
variables $X_i$ in the coefficient expressions of $R$, and then simply
rounding to the nearest integer.
In order to determine a sufficient precision for the roots of $T$, this
``Resolvent`` class imposes certain requirements on the form ``F``. It
could be possible to design a different ``Resolvent`` class, that made
different precision estimates, and different assumptions about ``F``.
``F`` must be homogeneous, and all terms must have unit coefficient.
Furthermore, if $r$ is the number of terms in ``F``, and $t$ the total
degree, and if $m$ is the number of conjugates of ``F``, i.e. the number
of permutations in ``s``, then we require that $m < r 2^t$. Again, it is
not impossible to work with forms ``F`` that violate these assumptions, but
this ``Resolvent`` class requires them.
Since determining the integer coefficients of the resolvent for a given
polynomial $T$ is one of the main problems this class solves, we take some
time to explain the precision bounds it uses.
The general problem is:
Given a multivariate polynomial $P \in \mathbb{Z}[X_1, \ldots, X_n]$, and a
bound $M \in \mathbb{R}_+$, compute an $\varepsilon > 0$ such that for any
complex numbers $a_1, \ldots, a_n$ with $|a_i| < M$, if the $a_i$ are
approximated to within an accuracy of $\varepsilon$ by $b_i$, that is,
$|a_i - b_i| < \varepsilon$ for $i = 1, \ldots, n$, then
$|P(a_1, \ldots, a_n) - P(b_1, \ldots, b_n)| < 1/2$. In other words, if it
is known that $P(a_1, \ldots, a_n) = c$ for some $c \in \mathbb{Z}$, then
$P(b_1, \ldots, b_n)$ can be rounded to the nearest integer in order to
determine $c$.
To derive our error bound, consider the monomial $xyz$. Defining
$d_i = b_i - a_i$, our error is
$|(a_1 + d_1)(a_2 + d_2)(a_3 + d_3) - a_1 a_2 a_3|$, which is bounded
above by $|(M + \varepsilon)^3 - M^3|$. Passing to a general monomial of
total degree $t$, this expression is bounded by
$M^{t-1}\varepsilon(t + 2^t\varepsilon/M)$ provided $\varepsilon < M$,
and by $(t+1)M^{t-1}\varepsilon$ provided $\varepsilon < M/2^t$.
But since our goal is to make the error less than $1/2$, we will choose
$\varepsilon < 1/(2(t+1)M^{t-1})$, which implies the condition that
$\varepsilon < M/2^t$, as long as $M \geq 2$.
Passing from the general monomial to the general polynomial is easy, by
scaling and summing error bounds.
In our specific case, we are given a homogeneous polynomial $F$ of
$r$ terms and total degree $t$, all of whose coefficients are $\pm 1$. We
are given the $m$ permutations that make the conjugates of $F$, and
we want to bound the error in the coefficients of the monic polynomial
$R(Y)$ having $F$ and its conjugates as roots (i.e. the resolvent).
For $j$ from $1$ to $m$, the coefficient of $Y^{m-j}$ in $R(Y)$ is the
$j$th elementary symmetric polynomial in the conjugates of $F$. This sums
the products of these conjugates, taken $j$ at a time, in all possible
combinations. There are $\binom{m}{j}$ such combinations, and each product
of $j$ conjugates of $F$ expands to a sum of $r^j$ terms, each of unit
coefficient, and total degree $jt$. An error bound for the $j$th coeff of
$R$ is therefore
$$\binom{m}{j} r^j (jt + 1) M^{jt - 1} \varepsilon$$
When our goal is to evaluate all the coefficients of $R$, we will want to
use the maximum of these error bounds. It is clear that this bound is
strictly increasing for $j$ up to the ceiling of $m/2$. After that point,
the first factor $\binom{m}{j}$ begins to decrease, while the others
continue to increase. However, the binomial coefficient never falls by more
than a factor of $1/m$ at a time, so our assumptions that $M \geq 2$ and
$m < r 2^t$ are enough to tell us that the constant coefficient of $R$,
i.e. that where $j = m$, has the largest error bound. Therefore we can use
$$r^m (mt + 1) M^{mt - 1} \varepsilon$$
as our error bound for all the coefficients.
Note that this bound is also (more than) adequate to determine whether any
of the roots of $R$ is an integer. Each of these roots is a single
conjugate of $F$, which contains less error than the trace, i.e. the
coefficient of $Y^{m - 1}$. By rounding the roots of $R$ to the nearest
integers, we therefore get all the candidates for integer roots of $R$. By
plugging these candidates into $R$, we can check whether any of them
actually is a root.
Note: We take the definition of resolvent from Cohen, but the error bound
is ours.
References
==========
.. [1] Cohen, H. *A Course in Computational Algebraic Number Theory*.
(Def 6.3.2)
"""
def __init__(self, F, X, s):
r"""
Parameters
==========
F : :py:class:`~.Expr`
polynomial in the symbols in *X*
X : list of :py:class:`~.Symbol`
s : list of :py:class:`~.Permutation`
representing the cosets of the stabilizer of *F* in
some subgroup $G$ of $S_n$, where $n$ is the length of *X*.
"""
self.F = F
self.X = X
self.s = s
# Number of conjugates:
self.m = len(s)
# Total degree of F (computed below):
self.t = None
# Number of terms in F (computed below):
self.r = 0
for monom, coeff in Poly(F).terms():
if abs(coeff) != 1:
raise ResolventException('Resolvent class expects forms with unit coeffs')
t = sum(monom)
if t != self.t and self.t is not None:
raise ResolventException('Resolvent class expects homogeneous forms')
self.t = t
self.r += 1
m, t, r = self.m, self.t, self.r
if not m < r * 2**t:
raise ResolventException('Resolvent class expects m < r*2^t')
M = symbols('M')
# Precision sufficient for computing the coeffs of the resolvent:
self.coeff_prec_func = Poly(r**m*(m*t + 1)*M**(m*t - 1))
# Precision sufficient for checking whether any of the roots of the
# resolvent are integers:
self.root_prec_func = Poly(r*(t + 1)*M**(t - 1))
# The conjugates of F are the roots of the resolvent.
# For evaluating these to required numerical precisions, we need
# lambdified versions.
# Note: for a given permutation sigma, the conjugate (sigma F) is
# equivalent to lambda [sigma^(-1) X]: F.
self.root_lambdas = [
lambdify((~s[j])(X), F)
for j in range(self.m)
]
# For evaluating the coeffs, we'll also need lambdified versions of
# the elementary symmetric functions for degree m.
Y = symbols('Y')
R = symbols(' '.join(f'R{i}' for i in range(m)))
f = 1
for r in R:
f *= (Y - r)
C = Poly(f, Y).coeffs()
self.esf_lambdas = [lambdify(R, c) for c in C]
def get_prec(self, M, target='coeffs'):
r"""
For a given upper bound *M* on the magnitude of the complex numbers to
be plugged in for this resolvent's symbols, compute a sufficient
precision for evaluating those complex numbers, such that the
coefficients, or the integer roots, of the resolvent can be determined.
Parameters
==========
M : real number
Upper bound on magnitude of the complex numbers to be plugged in.
target : str, 'coeffs' or 'roots', default='coeffs'
Name the task for which a sufficient precision is desired.
This is either determining the coefficients of the resolvent
('coeffs') or determining its possible integer roots ('roots').
The latter may require significantly lower precision.
Returns
=======
int $m$
such that $2^{-m}$ is a sufficient upper bound on the
error in approximating the complex numbers to be plugged in.
"""
# As explained in the docstring for this class, our precision estimates
# require that M be at least 2.
M = max(M, 2)
f = self.coeff_prec_func if target == 'coeffs' else self.root_prec_func
r, _, _, _ = evalf(2*f(M), 1, {})
return fastlog(r) + 1
def approximate_roots_of_poly(self, T, target='coeffs'):
"""
Approximate the roots of a given polynomial *T* to sufficient precision
in order to evaluate this resolvent's coefficients, or determine
whether the resolvent has an integer root.
Parameters
==========
T : :py:class:`~.Poly`
target : str, 'coeffs' or 'roots', default='coeffs'
Set the approximation precision to be sufficient for the desired
task, which is either determining the coefficients of the resolvent
('coeffs') or determining its possible integer roots ('roots').
The latter may require significantly lower precision.
Returns
=======
list of elements of :ref:`CC`
"""
ctx = MPContext()
# Because sympy.polys.polyroots._integer_basis() is called when a CRootOf
# is formed, we proactively extract the integer basis now. This means that
# when we call T.all_roots(), every root will be a CRootOf, not a Mul
# of Integer*CRootOf.
coeff, T = preprocess_roots(T)
coeff = ctx.mpf(str(coeff))
scaled_roots = T.all_roots(radicals=False)
# Since we're going to be approximating the roots of T anyway, we can
# get a good upper bound on the magnitude of the roots by starting with
# a very low precision approx.
approx0 = [coeff * quad_to_mpmath(_evalf_with_bounded_error(r, m=0)) for r in scaled_roots]
# Here we add 1 to account for the possible error in our initial approximation.
M = max(abs(b) for b in approx0) + 1
m = self.get_prec(M, target=target)
n = fastlog(M._mpf_) + 1
p = m + n + 1
ctx.prec = p
d = prec_to_dps(p)
approx1 = [r.eval_approx(d, return_mpmath=True) for r in scaled_roots]
approx1 = [coeff*ctx.mpc(r) for r in approx1]
return approx1
@staticmethod
def round_mpf(a):
if isinstance(a, int):
return a
# If we use python's built-in `round()`, we lose precision.
# If we use `ZZ` directly, we may add or subtract 1.
#
# XXX: We have to convert to int before converting to ZZ because
# flint.fmpz cannot convert a mpmath mpf.
return ZZ(int(a.context.nint(a)))
def round_roots_to_integers_for_poly(self, T):
"""
For a given polynomial *T*, round the roots of this resolvent to the
nearest integers.
Explanation
===========
None of the integers returned by this method is guaranteed to be a
root of the resolvent; however, if the resolvent has any integer roots
(for the given polynomial *T*), then they must be among these.
If the coefficients of the resolvent are also desired, then this method
should not be used. Instead, use the ``eval_for_poly`` method. This
method may be significantly faster than ``eval_for_poly``.
Parameters
==========
T : :py:class:`~.Poly`
Returns
=======
dict
Keys are the indices of those permutations in ``self.s`` such that
the corresponding root did round to a rational integer.
Values are :ref:`ZZ`.
"""
approx_roots_of_T = self.approximate_roots_of_poly(T, target='roots')
approx_roots_of_self = [r(*approx_roots_of_T) for r in self.root_lambdas]
return {
i: self.round_mpf(r.real)
for i, r in enumerate(approx_roots_of_self)
if self.round_mpf(r.imag) == 0
}
def eval_for_poly(self, T, find_integer_root=False):
r"""
Compute the integer values of the coefficients of this resolvent, when
plugging in the roots of a given polynomial.
Parameters
==========
T : :py:class:`~.Poly`
find_integer_root : ``bool``, default ``False``
If ``True``, then also determine whether the resolvent has an
integer root, and return the first one found, along with its
index, i.e. the index of the permutation ``self.s[i]`` it
corresponds to.
Returns
=======
Tuple ``(R, a, i)``
``R`` is this resolvent as a dense univariate polynomial over
:ref:`ZZ`, i.e. a list of :ref:`ZZ`.
If *find_integer_root* was ``True``, then ``a`` and ``i`` are the
first integer root found, and its index, if one exists.
Otherwise ``a`` and ``i`` are both ``None``.
"""
approx_roots_of_T = self.approximate_roots_of_poly(T, target='coeffs')
approx_roots_of_self = [r(*approx_roots_of_T) for r in self.root_lambdas]
approx_coeffs_of_self = [c(*approx_roots_of_self) for c in self.esf_lambdas]
R = []
for c in approx_coeffs_of_self:
if self.round_mpf(c.imag) != 0:
# If precision was enough, this should never happen.
raise ResolventException(f"Got non-integer coeff for resolvent: {c}")
R.append(self.round_mpf(c.real))
a0, i0 = None, None
if find_integer_root:
for i, r in enumerate(approx_roots_of_self):
if self.round_mpf(r.imag) != 0:
continue
if not dup_eval(R, (a := self.round_mpf(r.real)), ZZ):
a0, i0 = a, i
break
return R, a0, i0
def wrap(text, width=80):
"""Line wrap a polynomial expression. """
out = ''
col = 0
for c in text:
if c == ' ' and col > width:
c, col = '\n', 0
else:
col += 1
out += c
return out
def s_vars(n):
"""Form the symbols s1, s2, ..., sn to stand for elem. symm. polys. """
return symbols([f's{i + 1}' for i in range(n)])
def sparse_symmetrize_resolvent_coeffs(F, X, s, verbose=False):
"""
Compute the coefficients of a resolvent as functions of the coefficients of
the associated polynomial.
F must be a sparse polynomial.
"""
import time, sys
# Roots of resolvent as multivariate forms over vars X:
root_forms = [
F.compose(list(zip(X, sigma(X))))
for sigma in s
]
# Coeffs of resolvent (besides lead coeff of 1) as symmetric forms over vars X:
Y = [Dummy(f'Y{i}') for i in range(len(s))]
coeff_forms = []
for i in range(1, len(s) + 1):
if verbose:
print('----')
print(f'Computing symmetric poly of degree {i}...')
sys.stdout.flush()
t0 = time.time()
G = symmetric_poly(i, *Y)
t1 = time.time()
if verbose:
print(f'took {t1 - t0} seconds')
print('lambdifying...')
sys.stdout.flush()
t0 = time.time()
C = lambdify(Y, (-1)**i*G)
t1 = time.time()
if verbose:
print(f'took {t1 - t0} seconds')
sys.stdout.flush()
coeff_forms.append(C)
coeffs = []
for i, f in enumerate(coeff_forms):
if verbose:
print('----')
print(f'Plugging root forms into elem symm poly {i+1}...')
sys.stdout.flush()
t0 = time.time()
g = f(*root_forms)
t1 = time.time()
coeffs.append(g)
if verbose:
print(f'took {t1 - t0} seconds')
sys.stdout.flush()
# Now symmetrize these coeffs. This means recasting them as polynomials in
# the elementary symmetric polys over X.
symmetrized = []
symmetrization_times = []
ss = s_vars(len(X))
for i, A in list(enumerate(coeffs)):
if verbose:
print('-----')
print(f'Coeff {i+1}...')
sys.stdout.flush()
t0 = time.time()
B, rem, _ = A.symmetrize()
t1 = time.time()
if rem != 0:
msg = f"Got nonzero remainder {rem} for resolvent (F, X, s) = ({F}, {X}, {s})"
raise ResolventException(msg)
B_str = str(B.as_expr(*ss))
symmetrized.append(B_str)
symmetrization_times.append(t1 - t0)
if verbose:
print(wrap(B_str))
print(f'took {t1 - t0} seconds')
sys.stdout.flush()
return symmetrized, symmetrization_times
def define_resolvents():
"""Define all the resolvents for polys T of degree 4 through 6. """
from sympy.combinatorics.galois import PGL2F5
from sympy.combinatorics.permutations import Permutation
R4, X4 = xring("X0,X1,X2,X3", ZZ, lex)
X = X4
# The one resolvent used in `_galois_group_degree_4_lookup()`:
F40 = X[0]*X[1]**2 + X[1]*X[2]**2 + X[2]*X[3]**2 + X[3]*X[0]**2
s40 = [
Permutation(3),
Permutation(3)(0, 1),
Permutation(3)(0, 2),
Permutation(3)(0, 3),
Permutation(3)(1, 2),
Permutation(3)(2, 3),
]
# First resolvent used in `_galois_group_degree_4_root_approx()`:
F41 = X[0]*X[2] + X[1]*X[3]
s41 = [
Permutation(3),
Permutation(3)(0, 1),
Permutation(3)(0, 3)
]
R5, X5 = xring("X0,X1,X2,X3,X4", ZZ, lex)
X = X5
# First resolvent used in `_galois_group_degree_5_hybrid()`,
# and only one used in `_galois_group_degree_5_lookup_ext_factor()`:
F51 = ( X[0]**2*(X[1]*X[4] + X[2]*X[3])
+ X[1]**2*(X[2]*X[0] + X[3]*X[4])
+ X[2]**2*(X[3]*X[1] + X[4]*X[0])
+ X[3]**2*(X[4]*X[2] + X[0]*X[1])
+ X[4]**2*(X[0]*X[3] + X[1]*X[2]))
s51 = [
Permutation(4),
Permutation(4)(0, 1),
Permutation(4)(0, 2),
Permutation(4)(0, 3),
Permutation(4)(0, 4),
Permutation(4)(1, 4)
]
R6, X6 = xring("X0,X1,X2,X3,X4,X5", ZZ, lex)
X = X6
# First resolvent used in `_galois_group_degree_6_lookup()`:
H = PGL2F5()
term0 = X[0]**2*X[5]**2*(X[1]*X[4] + X[2]*X[3])
terms = {term0.compose(list(zip(X, s(X)))) for s in H.elements}
F61 = sum(terms)
s61 = [Permutation(5)] + [Permutation(5)(0, n) for n in range(1, 6)]
# Second resolvent used in `_galois_group_degree_6_lookup()`:
F62 = X[0]*X[1]*X[2] + X[3]*X[4]*X[5]
s62 = [Permutation(5)] + [
Permutation(5)(i, j + 3) for i in range(3) for j in range(3)
]
return {
(4, 0): (F40, X4, s40),
(4, 1): (F41, X4, s41),
(5, 1): (F51, X5, s51),
(6, 1): (F61, X6, s61),
(6, 2): (F62, X6, s62),
}
def generate_lambda_lookup(verbose=False, trial_run=False):
"""
Generate the whole lookup table of coeff lambdas, for all resolvents.
"""
jobs = define_resolvents()
lambda_lists = {}
total_time = 0
time_for_61 = 0
time_for_61_last = 0
for k, (F, X, s) in jobs.items():
symmetrized, times = sparse_symmetrize_resolvent_coeffs(F, X, s, verbose=verbose)
total_time += sum(times)
if k == (6, 1):
time_for_61 = sum(times)
time_for_61_last = times[-1]
sv = s_vars(len(X))
head = f'lambda {", ".join(str(v) for v in sv)}:'
lambda_lists[k] = ',\n '.join([
f'{head} ({wrap(f)})'
for f in symmetrized
])
if trial_run:
break
table = (
"# This table was generated by a call to\n"
"# `sympy.polys.numberfields.galois_resolvents.generate_lambda_lookup()`.\n"
f"# The entire job took {total_time:.2f}s.\n"
f"# Of this, Case (6, 1) took {time_for_61:.2f}s.\n"
f"# The final polynomial of Case (6, 1) alone took {time_for_61_last:.2f}s.\n"
"resolvent_coeff_lambdas = {\n")
for k, L in lambda_lists.items():
table += f" {k}: [\n"
table += " " + L + '\n'
table += " ],\n"
table += "}\n"
return table
def get_resolvent_by_lookup(T, number):
"""
Use the lookup table, to return a resolvent (as dup) for a given
polynomial *T*.
Parameters
==========
T : Poly
The polynomial whose resolvent is needed
number : int
For some degrees, there are multiple resolvents.
Use this to indicate which one you want.
Returns
=======
dup
"""
from sympy.polys.numberfields.resolvent_lookup import resolvent_coeff_lambdas
degree = T.degree()
L = resolvent_coeff_lambdas[(degree, number)]
T_coeffs = T.rep.to_list()[1:]
return [ZZ(1)] + [c(*T_coeffs) for c in L]
# Use
# (.venv) $ python -m sympy.polys.numberfields.galois_resolvents
# to reproduce the table found in resolvent_lookup.py
if __name__ == "__main__":
import sys
verbose = '-v' in sys.argv[1:]
trial_run = '-t' in sys.argv[1:]
table = generate_lambda_lookup(verbose=verbose, trial_run=trial_run)
print(table)
| Resolvent |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-dashscope/llama_index/readers/dashscope/domain/base_domains.py | {
"start": 22,
"end": 113
} | class ____(ABC):
@classmethod
def from_dict(cls, data: dict):
pass
| DictToObject |
python | wandb__wandb | wandb/vendor/pygments/lexers/lisp.py | {
"start": 84464,
"end": 122449
} | class ____(RegexLexer):
"""
An ELisp lexer, parsing a stream and outputting the tokens
needed to highlight elisp code.
.. versionadded:: 2.1
"""
name = 'EmacsLisp'
aliases = ['emacs', 'elisp', 'emacs-lisp']
filenames = ['*.el']
mimetypes = ['text/x-elisp', 'application/x-elisp']
flags = re.MULTILINE
# couple of useful regexes
# characters that are not macro-characters and can be used to begin a symbol
nonmacro = r'\\.|[\w!$%&*+-/<=>?@^{}~|]'
constituent = nonmacro + '|[#.:]'
terminated = r'(?=[ "()\]\'\n,;`])' # whitespace or terminating macro characters
# symbol token, reverse-engineered from hyperspec
# Take a deep breath...
symbol = r'((?:%s)(?:%s)*)' % (nonmacro, constituent)
macros = set((
'atomic-change-group', 'case', 'block', 'cl-block', 'cl-callf', 'cl-callf2',
'cl-case', 'cl-decf', 'cl-declaim', 'cl-declare',
'cl-define-compiler-macro', 'cl-defmacro', 'cl-defstruct',
'cl-defsubst', 'cl-deftype', 'cl-defun', 'cl-destructuring-bind',
'cl-do', 'cl-do*', 'cl-do-all-symbols', 'cl-do-symbols', 'cl-dolist',
'cl-dotimes', 'cl-ecase', 'cl-etypecase', 'eval-when', 'cl-eval-when', 'cl-flet',
'cl-flet*', 'cl-function', 'cl-incf', 'cl-labels', 'cl-letf',
'cl-letf*', 'cl-load-time-value', 'cl-locally', 'cl-loop',
'cl-macrolet', 'cl-multiple-value-bind', 'cl-multiple-value-setq',
'cl-progv', 'cl-psetf', 'cl-psetq', 'cl-pushnew', 'cl-remf',
'cl-return', 'cl-return-from', 'cl-rotatef', 'cl-shiftf',
'cl-symbol-macrolet', 'cl-tagbody', 'cl-the', 'cl-typecase',
'combine-after-change-calls', 'condition-case-unless-debug', 'decf',
'declaim', 'declare', 'declare-function', 'def-edebug-spec',
'defadvice', 'defclass', 'defcustom', 'defface', 'defgeneric',
'defgroup', 'define-advice', 'define-alternatives',
'define-compiler-macro', 'define-derived-mode', 'define-generic-mode',
'define-global-minor-mode', 'define-globalized-minor-mode',
'define-minor-mode', 'define-modify-macro',
'define-obsolete-face-alias', 'define-obsolete-function-alias',
'define-obsolete-variable-alias', 'define-setf-expander',
'define-skeleton', 'defmacro', 'defmethod', 'defsetf', 'defstruct',
'defsubst', 'deftheme', 'deftype', 'defun', 'defvar-local',
'delay-mode-hooks', 'destructuring-bind', 'do', 'do*',
'do-all-symbols', 'do-symbols', 'dolist', 'dont-compile', 'dotimes',
'dotimes-with-progress-reporter', 'ecase', 'ert-deftest', 'etypecase',
'eval-and-compile', 'eval-when-compile', 'flet', 'ignore-errors',
'incf', 'labels', 'lambda', 'letrec', 'lexical-let', 'lexical-let*',
'loop', 'multiple-value-bind', 'multiple-value-setq', 'noreturn',
'oref', 'oref-default', 'oset', 'oset-default', 'pcase',
'pcase-defmacro', 'pcase-dolist', 'pcase-exhaustive', 'pcase-let',
'pcase-let*', 'pop', 'psetf', 'psetq', 'push', 'pushnew', 'remf',
'return', 'rotatef', 'rx', 'save-match-data', 'save-selected-window',
'save-window-excursion', 'setf', 'setq-local', 'shiftf',
'track-mouse', 'typecase', 'unless', 'use-package', 'when',
'while-no-input', 'with-case-table', 'with-category-table',
'with-coding-priority', 'with-current-buffer', 'with-demoted-errors',
'with-eval-after-load', 'with-file-modes', 'with-local-quit',
'with-output-to-string', 'with-output-to-temp-buffer',
'with-parsed-tramp-file-name', 'with-selected-frame',
'with-selected-window', 'with-silent-modifications', 'with-slots',
'with-syntax-table', 'with-temp-buffer', 'with-temp-file',
'with-temp-message', 'with-timeout', 'with-tramp-connection-property',
'with-tramp-file-property', 'with-tramp-progress-reporter',
'with-wrapper-hook', 'load-time-value', 'locally', 'macrolet', 'progv',
'return-from',
))
special_forms = set((
'and', 'catch', 'cond', 'condition-case', 'defconst', 'defvar',
'function', 'if', 'interactive', 'let', 'let*', 'or', 'prog1',
'prog2', 'progn', 'quote', 'save-current-buffer', 'save-excursion',
'save-restriction', 'setq', 'setq-default', 'subr-arity',
'unwind-protect', 'while',
))
builtin_function = set((
'%', '*', '+', '-', '/', '/=', '1+', '1-', '<', '<=', '=', '>', '>=',
'Snarf-documentation', 'abort-recursive-edit', 'abs',
'accept-process-output', 'access-file', 'accessible-keymaps', 'acos',
'active-minibuffer-window', 'add-face-text-property',
'add-name-to-file', 'add-text-properties', 'all-completions',
'append', 'apply', 'apropos-internal', 'aref', 'arrayp', 'aset',
'ash', 'asin', 'assoc', 'assoc-string', 'assq', 'atan', 'atom',
'autoload', 'autoload-do-load', 'backtrace', 'backtrace--locals',
'backtrace-debug', 'backtrace-eval', 'backtrace-frame',
'backward-char', 'backward-prefix-chars', 'barf-if-buffer-read-only',
'base64-decode-region', 'base64-decode-string',
'base64-encode-region', 'base64-encode-string', 'beginning-of-line',
'bidi-find-overridden-directionality', 'bidi-resolved-levels',
'bitmap-spec-p', 'bobp', 'bolp', 'bool-vector',
'bool-vector-count-consecutive', 'bool-vector-count-population',
'bool-vector-exclusive-or', 'bool-vector-intersection',
'bool-vector-not', 'bool-vector-p', 'bool-vector-set-difference',
'bool-vector-subsetp', 'bool-vector-union', 'boundp',
'buffer-base-buffer', 'buffer-chars-modified-tick',
'buffer-enable-undo', 'buffer-file-name', 'buffer-has-markers-at',
'buffer-list', 'buffer-live-p', 'buffer-local-value',
'buffer-local-variables', 'buffer-modified-p', 'buffer-modified-tick',
'buffer-name', 'buffer-size', 'buffer-string', 'buffer-substring',
'buffer-substring-no-properties', 'buffer-swap-text', 'bufferp',
'bury-buffer-internal', 'byte-code', 'byte-code-function-p',
'byte-to-position', 'byte-to-string', 'byteorder',
'call-interactively', 'call-last-kbd-macro', 'call-process',
'call-process-region', 'cancel-kbd-macro-events', 'capitalize',
'capitalize-region', 'capitalize-word', 'car', 'car-less-than-car',
'car-safe', 'case-table-p', 'category-docstring',
'category-set-mnemonics', 'category-table', 'category-table-p',
'ccl-execute', 'ccl-execute-on-string', 'ccl-program-p', 'cdr',
'cdr-safe', 'ceiling', 'char-after', 'char-before',
'char-category-set', 'char-charset', 'char-equal', 'char-or-string-p',
'char-resolve-modifiers', 'char-syntax', 'char-table-extra-slot',
'char-table-p', 'char-table-parent', 'char-table-range',
'char-table-subtype', 'char-to-string', 'char-width', 'characterp',
'charset-after', 'charset-id-internal', 'charset-plist',
'charset-priority-list', 'charsetp', 'check-coding-system',
'check-coding-systems-region', 'clear-buffer-auto-save-failure',
'clear-charset-maps', 'clear-face-cache', 'clear-font-cache',
'clear-image-cache', 'clear-string', 'clear-this-command-keys',
'close-font', 'clrhash', 'coding-system-aliases',
'coding-system-base', 'coding-system-eol-type', 'coding-system-p',
'coding-system-plist', 'coding-system-priority-list',
'coding-system-put', 'color-distance', 'color-gray-p',
'color-supported-p', 'combine-after-change-execute',
'command-error-default-function', 'command-remapping', 'commandp',
'compare-buffer-substrings', 'compare-strings',
'compare-window-configurations', 'completing-read',
'compose-region-internal', 'compose-string-internal',
'composition-get-gstring', 'compute-motion', 'concat', 'cons',
'consp', 'constrain-to-field', 'continue-process',
'controlling-tty-p', 'coordinates-in-window-p', 'copy-alist',
'copy-category-table', 'copy-file', 'copy-hash-table', 'copy-keymap',
'copy-marker', 'copy-sequence', 'copy-syntax-table', 'copysign',
'cos', 'current-active-maps', 'current-bidi-paragraph-direction',
'current-buffer', 'current-case-table', 'current-column',
'current-global-map', 'current-idle-time', 'current-indentation',
'current-input-mode', 'current-local-map', 'current-message',
'current-minor-mode-maps', 'current-time', 'current-time-string',
'current-time-zone', 'current-window-configuration',
'cygwin-convert-file-name-from-windows',
'cygwin-convert-file-name-to-windows', 'daemon-initialized',
'daemonp', 'dbus--init-bus', 'dbus-get-unique-name',
'dbus-message-internal', 'debug-timer-check', 'declare-equiv-charset',
'decode-big5-char', 'decode-char', 'decode-coding-region',
'decode-coding-string', 'decode-sjis-char', 'decode-time',
'default-boundp', 'default-file-modes', 'default-printer-name',
'default-toplevel-value', 'default-value', 'define-category',
'define-charset-alias', 'define-charset-internal',
'define-coding-system-alias', 'define-coding-system-internal',
'define-fringe-bitmap', 'define-hash-table-test', 'define-key',
'define-prefix-command', 'delete',
'delete-all-overlays', 'delete-and-extract-region', 'delete-char',
'delete-directory-internal', 'delete-field', 'delete-file',
'delete-frame', 'delete-other-windows-internal', 'delete-overlay',
'delete-process', 'delete-region', 'delete-terminal',
'delete-window-internal', 'delq', 'describe-buffer-bindings',
'describe-vector', 'destroy-fringe-bitmap', 'detect-coding-region',
'detect-coding-string', 'ding', 'directory-file-name',
'directory-files', 'directory-files-and-attributes', 'discard-input',
'display-supports-face-attributes-p', 'do-auto-save', 'documentation',
'documentation-property', 'downcase', 'downcase-region',
'downcase-word', 'draw-string', 'dump-colors', 'dump-emacs',
'dump-face', 'dump-frame-glyph-matrix', 'dump-glyph-matrix',
'dump-glyph-row', 'dump-redisplay-history', 'dump-tool-bar-row',
'elt', 'emacs-pid', 'encode-big5-char', 'encode-char',
'encode-coding-region', 'encode-coding-string', 'encode-sjis-char',
'encode-time', 'end-kbd-macro', 'end-of-line', 'eobp', 'eolp', 'eq',
'eql', 'equal', 'equal-including-properties', 'erase-buffer',
'error-message-string', 'eval', 'eval-buffer', 'eval-region',
'event-convert-list', 'execute-kbd-macro', 'exit-recursive-edit',
'exp', 'expand-file-name', 'expt', 'external-debugging-output',
'face-attribute-relative-p', 'face-attributes-as-vector', 'face-font',
'fboundp', 'fceiling', 'fetch-bytecode', 'ffloor',
'field-beginning', 'field-end', 'field-string',
'field-string-no-properties', 'file-accessible-directory-p',
'file-acl', 'file-attributes', 'file-attributes-lessp',
'file-directory-p', 'file-executable-p', 'file-exists-p',
'file-locked-p', 'file-modes', 'file-name-absolute-p',
'file-name-all-completions', 'file-name-as-directory',
'file-name-completion', 'file-name-directory',
'file-name-nondirectory', 'file-newer-than-file-p', 'file-readable-p',
'file-regular-p', 'file-selinux-context', 'file-symlink-p',
'file-system-info', 'file-system-info', 'file-writable-p',
'fillarray', 'find-charset-region', 'find-charset-string',
'find-coding-systems-region-internal', 'find-composition-internal',
'find-file-name-handler', 'find-font', 'find-operation-coding-system',
'float', 'float-time', 'floatp', 'floor', 'fmakunbound',
'following-char', 'font-at', 'font-drive-otf', 'font-face-attributes',
'font-family-list', 'font-get', 'font-get-glyphs',
'font-get-system-font', 'font-get-system-normal-font', 'font-info',
'font-match-p', 'font-otf-alternates', 'font-put',
'font-shape-gstring', 'font-spec', 'font-variation-glyphs',
'font-xlfd-name', 'fontp', 'fontset-font', 'fontset-info',
'fontset-list', 'fontset-list-all', 'force-mode-line-update',
'force-window-update', 'format', 'format-mode-line',
'format-network-address', 'format-time-string', 'forward-char',
'forward-comment', 'forward-line', 'forward-word',
'frame-border-width', 'frame-bottom-divider-width',
'frame-can-run-window-configuration-change-hook', 'frame-char-height',
'frame-char-width', 'frame-face-alist', 'frame-first-window',
'frame-focus', 'frame-font-cache', 'frame-fringe-width', 'frame-list',
'frame-live-p', 'frame-or-buffer-changed-p', 'frame-parameter',
'frame-parameters', 'frame-pixel-height', 'frame-pixel-width',
'frame-pointer-visible-p', 'frame-right-divider-width',
'frame-root-window', 'frame-scroll-bar-height',
'frame-scroll-bar-width', 'frame-selected-window', 'frame-terminal',
'frame-text-cols', 'frame-text-height', 'frame-text-lines',
'frame-text-width', 'frame-total-cols', 'frame-total-lines',
'frame-visible-p', 'framep', 'frexp', 'fringe-bitmaps-at-pos',
'fround', 'fset', 'ftruncate', 'funcall', 'funcall-interactively',
'function-equal', 'functionp', 'gap-position', 'gap-size',
'garbage-collect', 'gc-status', 'generate-new-buffer-name', 'get',
'get-buffer', 'get-buffer-create', 'get-buffer-process',
'get-buffer-window', 'get-byte', 'get-char-property',
'get-char-property-and-overlay', 'get-file-buffer', 'get-file-char',
'get-internal-run-time', 'get-load-suffixes', 'get-pos-property',
'get-process', 'get-screen-color', 'get-text-property',
'get-unicode-property-internal', 'get-unused-category',
'get-unused-iso-final-char', 'getenv-internal', 'gethash',
'gfile-add-watch', 'gfile-rm-watch', 'global-key-binding',
'gnutls-available-p', 'gnutls-boot', 'gnutls-bye', 'gnutls-deinit',
'gnutls-error-fatalp', 'gnutls-error-string', 'gnutls-errorp',
'gnutls-get-initstage', 'gnutls-peer-status',
'gnutls-peer-status-warning-describe', 'goto-char', 'gpm-mouse-start',
'gpm-mouse-stop', 'group-gid', 'group-real-gid',
'handle-save-session', 'handle-switch-frame', 'hash-table-count',
'hash-table-p', 'hash-table-rehash-size',
'hash-table-rehash-threshold', 'hash-table-size', 'hash-table-test',
'hash-table-weakness', 'iconify-frame', 'identity', 'image-flush',
'image-mask-p', 'image-metadata', 'image-size', 'imagemagick-types',
'imagep', 'indent-to', 'indirect-function', 'indirect-variable',
'init-image-library', 'inotify-add-watch', 'inotify-rm-watch',
'input-pending-p', 'insert', 'insert-and-inherit',
'insert-before-markers', 'insert-before-markers-and-inherit',
'insert-buffer-substring', 'insert-byte', 'insert-char',
'insert-file-contents', 'insert-startup-screen', 'int86',
'integer-or-marker-p', 'integerp', 'interactive-form', 'intern',
'intern-soft', 'internal--track-mouse', 'internal-char-font',
'internal-complete-buffer', 'internal-copy-lisp-face',
'internal-default-process-filter',
'internal-default-process-sentinel', 'internal-describe-syntax-value',
'internal-event-symbol-parse-modifiers',
'internal-face-x-get-resource', 'internal-get-lisp-face-attribute',
'internal-lisp-face-attribute-values', 'internal-lisp-face-empty-p',
'internal-lisp-face-equal-p', 'internal-lisp-face-p',
'internal-make-lisp-face', 'internal-make-var-non-special',
'internal-merge-in-global-face',
'internal-set-alternative-font-family-alist',
'internal-set-alternative-font-registry-alist',
'internal-set-font-selection-order',
'internal-set-lisp-face-attribute',
'internal-set-lisp-face-attribute-from-resource',
'internal-show-cursor', 'internal-show-cursor-p', 'interrupt-process',
'invisible-p', 'invocation-directory', 'invocation-name', 'isnan',
'iso-charset', 'key-binding', 'key-description',
'keyboard-coding-system', 'keymap-parent', 'keymap-prompt', 'keymapp',
'keywordp', 'kill-all-local-variables', 'kill-buffer', 'kill-emacs',
'kill-local-variable', 'kill-process', 'last-nonminibuffer-frame',
'lax-plist-get', 'lax-plist-put', 'ldexp', 'length',
'libxml-parse-html-region', 'libxml-parse-xml-region',
'line-beginning-position', 'line-end-position', 'line-pixel-height',
'list', 'list-fonts', 'list-system-processes', 'listp', 'load',
'load-average', 'local-key-binding', 'local-variable-if-set-p',
'local-variable-p', 'locale-info', 'locate-file-internal',
'lock-buffer', 'log', 'logand', 'logb', 'logior', 'lognot', 'logxor',
'looking-at', 'lookup-image', 'lookup-image-map', 'lookup-key',
'lower-frame', 'lsh', 'macroexpand', 'make-bool-vector',
'make-byte-code', 'make-category-set', 'make-category-table',
'make-char', 'make-char-table', 'make-directory-internal',
'make-frame-invisible', 'make-frame-visible', 'make-hash-table',
'make-indirect-buffer', 'make-keymap', 'make-list',
'make-local-variable', 'make-marker', 'make-network-process',
'make-overlay', 'make-serial-process', 'make-sparse-keymap',
'make-string', 'make-symbol', 'make-symbolic-link', 'make-temp-name',
'make-terminal-frame', 'make-variable-buffer-local',
'make-variable-frame-local', 'make-vector', 'makunbound',
'map-char-table', 'map-charset-chars', 'map-keymap',
'map-keymap-internal', 'mapatoms', 'mapc', 'mapcar', 'mapconcat',
'maphash', 'mark-marker', 'marker-buffer', 'marker-insertion-type',
'marker-position', 'markerp', 'match-beginning', 'match-data',
'match-end', 'matching-paren', 'max', 'max-char', 'md5', 'member',
'memory-info', 'memory-limit', 'memory-use-counts', 'memq', 'memql',
'menu-bar-menu-at-x-y', 'menu-or-popup-active-p',
'menu-or-popup-active-p', 'merge-face-attribute', 'message',
'message-box', 'message-or-box', 'min',
'minibuffer-completion-contents', 'minibuffer-contents',
'minibuffer-contents-no-properties', 'minibuffer-depth',
'minibuffer-prompt', 'minibuffer-prompt-end',
'minibuffer-selected-window', 'minibuffer-window', 'minibufferp',
'minor-mode-key-binding', 'mod', 'modify-category-entry',
'modify-frame-parameters', 'modify-syntax-entry',
'mouse-pixel-position', 'mouse-position', 'move-overlay',
'move-point-visually', 'move-to-column', 'move-to-window-line',
'msdos-downcase-filename', 'msdos-long-file-names', 'msdos-memget',
'msdos-memput', 'msdos-mouse-disable', 'msdos-mouse-enable',
'msdos-mouse-init', 'msdos-mouse-p', 'msdos-remember-default-colors',
'msdos-set-keyboard', 'msdos-set-mouse-buttons',
'multibyte-char-to-unibyte', 'multibyte-string-p', 'narrow-to-region',
'natnump', 'nconc', 'network-interface-info',
'network-interface-list', 'new-fontset', 'newline-cache-check',
'next-char-property-change', 'next-frame', 'next-overlay-change',
'next-property-change', 'next-read-file-uses-dialog-p',
'next-single-char-property-change', 'next-single-property-change',
'next-window', 'nlistp', 'nreverse', 'nth', 'nthcdr', 'null',
'number-or-marker-p', 'number-to-string', 'numberp',
'open-dribble-file', 'open-font', 'open-termscript',
'optimize-char-table', 'other-buffer', 'other-window-for-scrolling',
'overlay-buffer', 'overlay-end', 'overlay-get', 'overlay-lists',
'overlay-properties', 'overlay-put', 'overlay-recenter',
'overlay-start', 'overlayp', 'overlays-at', 'overlays-in',
'parse-partial-sexp', 'play-sound-internal', 'plist-get',
'plist-member', 'plist-put', 'point', 'point-marker', 'point-max',
'point-max-marker', 'point-min', 'point-min-marker',
'pos-visible-in-window-p', 'position-bytes', 'posix-looking-at',
'posix-search-backward', 'posix-search-forward', 'posix-string-match',
'posn-at-point', 'posn-at-x-y', 'preceding-char',
'prefix-numeric-value', 'previous-char-property-change',
'previous-frame', 'previous-overlay-change',
'previous-property-change', 'previous-single-char-property-change',
'previous-single-property-change', 'previous-window', 'prin1',
'prin1-to-string', 'princ', 'print', 'process-attributes',
'process-buffer', 'process-coding-system', 'process-command',
'process-connection', 'process-contact', 'process-datagram-address',
'process-exit-status', 'process-filter', 'process-filter-multibyte-p',
'process-id', 'process-inherit-coding-system-flag', 'process-list',
'process-mark', 'process-name', 'process-plist',
'process-query-on-exit-flag', 'process-running-child-p',
'process-send-eof', 'process-send-region', 'process-send-string',
'process-sentinel', 'process-status', 'process-tty-name',
'process-type', 'processp', 'profiler-cpu-log',
'profiler-cpu-running-p', 'profiler-cpu-start', 'profiler-cpu-stop',
'profiler-memory-log', 'profiler-memory-running-p',
'profiler-memory-start', 'profiler-memory-stop', 'propertize',
'purecopy', 'put', 'put-text-property',
'put-unicode-property-internal', 'puthash', 'query-font',
'query-fontset', 'quit-process', 'raise-frame', 'random', 'rassoc',
'rassq', 're-search-backward', 're-search-forward', 'read',
'read-buffer', 'read-char', 'read-char-exclusive',
'read-coding-system', 'read-command', 'read-event',
'read-from-minibuffer', 'read-from-string', 'read-function',
'read-key-sequence', 'read-key-sequence-vector',
'read-no-blanks-input', 'read-non-nil-coding-system', 'read-string',
'read-variable', 'recent-auto-save-p', 'recent-doskeys',
'recent-keys', 'recenter', 'recursion-depth', 'recursive-edit',
'redirect-debugging-output', 'redirect-frame-focus', 'redisplay',
'redraw-display', 'redraw-frame', 'regexp-quote', 'region-beginning',
'region-end', 'register-ccl-program', 'register-code-conversion-map',
'remhash', 'remove-list-of-text-properties', 'remove-text-properties',
'rename-buffer', 'rename-file', 'replace-match',
'reset-this-command-lengths', 'resize-mini-window-internal',
'restore-buffer-modified-p', 'resume-tty', 'reverse', 'round',
'run-hook-with-args', 'run-hook-with-args-until-failure',
'run-hook-with-args-until-success', 'run-hook-wrapped', 'run-hooks',
'run-window-configuration-change-hook', 'run-window-scroll-functions',
'safe-length', 'scan-lists', 'scan-sexps', 'scroll-down',
'scroll-left', 'scroll-other-window', 'scroll-right', 'scroll-up',
'search-backward', 'search-forward', 'secure-hash', 'select-frame',
'select-window', 'selected-frame', 'selected-window',
'self-insert-command', 'send-string-to-terminal', 'sequencep',
'serial-process-configure', 'set', 'set-buffer',
'set-buffer-auto-saved', 'set-buffer-major-mode',
'set-buffer-modified-p', 'set-buffer-multibyte', 'set-case-table',
'set-category-table', 'set-char-table-extra-slot',
'set-char-table-parent', 'set-char-table-range', 'set-charset-plist',
'set-charset-priority', 'set-coding-system-priority',
'set-cursor-size', 'set-default', 'set-default-file-modes',
'set-default-toplevel-value', 'set-file-acl', 'set-file-modes',
'set-file-selinux-context', 'set-file-times', 'set-fontset-font',
'set-frame-height', 'set-frame-position', 'set-frame-selected-window',
'set-frame-size', 'set-frame-width', 'set-fringe-bitmap-face',
'set-input-interrupt-mode', 'set-input-meta-mode', 'set-input-mode',
'set-keyboard-coding-system-internal', 'set-keymap-parent',
'set-marker', 'set-marker-insertion-type', 'set-match-data',
'set-message-beep', 'set-minibuffer-window',
'set-mouse-pixel-position', 'set-mouse-position',
'set-network-process-option', 'set-output-flow-control',
'set-process-buffer', 'set-process-coding-system',
'set-process-datagram-address', 'set-process-filter',
'set-process-filter-multibyte',
'set-process-inherit-coding-system-flag', 'set-process-plist',
'set-process-query-on-exit-flag', 'set-process-sentinel',
'set-process-window-size', 'set-quit-char',
'set-safe-terminal-coding-system-internal', 'set-screen-color',
'set-standard-case-table', 'set-syntax-table',
'set-terminal-coding-system-internal', 'set-terminal-local-value',
'set-terminal-parameter', 'set-text-properties', 'set-time-zone-rule',
'set-visited-file-modtime', 'set-window-buffer',
'set-window-combination-limit', 'set-window-configuration',
'set-window-dedicated-p', 'set-window-display-table',
'set-window-fringes', 'set-window-hscroll', 'set-window-margins',
'set-window-new-normal', 'set-window-new-pixel',
'set-window-new-total', 'set-window-next-buffers',
'set-window-parameter', 'set-window-point', 'set-window-prev-buffers',
'set-window-redisplay-end-trigger', 'set-window-scroll-bars',
'set-window-start', 'set-window-vscroll', 'setcar', 'setcdr',
'setplist', 'show-face-resources', 'signal', 'signal-process', 'sin',
'single-key-description', 'skip-chars-backward', 'skip-chars-forward',
'skip-syntax-backward', 'skip-syntax-forward', 'sleep-for', 'sort',
'sort-charsets', 'special-variable-p', 'split-char',
'split-window-internal', 'sqrt', 'standard-case-table',
'standard-category-table', 'standard-syntax-table', 'start-kbd-macro',
'start-process', 'stop-process', 'store-kbd-macro-event', 'string',
'string-as-multibyte', 'string-as-unibyte', 'string-bytes',
'string-collate-equalp', 'string-collate-lessp', 'string-equal',
'string-lessp', 'string-make-multibyte', 'string-make-unibyte',
'string-match', 'string-to-char', 'string-to-multibyte',
'string-to-number', 'string-to-syntax', 'string-to-unibyte',
'string-width', 'stringp', 'subr-name', 'subrp',
'subst-char-in-region', 'substitute-command-keys',
'substitute-in-file-name', 'substring', 'substring-no-properties',
'suspend-emacs', 'suspend-tty', 'suspicious-object', 'sxhash',
'symbol-function', 'symbol-name', 'symbol-plist', 'symbol-value',
'symbolp', 'syntax-table', 'syntax-table-p', 'system-groups',
'system-move-file-to-trash', 'system-name', 'system-users', 'tan',
'terminal-coding-system', 'terminal-list', 'terminal-live-p',
'terminal-local-value', 'terminal-name', 'terminal-parameter',
'terminal-parameters', 'terpri', 'test-completion',
'text-char-description', 'text-properties-at', 'text-property-any',
'text-property-not-all', 'this-command-keys',
'this-command-keys-vector', 'this-single-command-keys',
'this-single-command-raw-keys', 'time-add', 'time-less-p',
'time-subtract', 'tool-bar-get-system-style', 'tool-bar-height',
'tool-bar-pixel-width', 'top-level', 'trace-redisplay',
'trace-to-stderr', 'translate-region-internal', 'transpose-regions',
'truncate', 'try-completion', 'tty-display-color-cells',
'tty-display-color-p', 'tty-no-underline',
'tty-suppress-bold-inverse-default-colors', 'tty-top-frame',
'tty-type', 'type-of', 'undo-boundary', 'unencodable-char-position',
'unhandled-file-name-directory', 'unibyte-char-to-multibyte',
'unibyte-string', 'unicode-property-table-internal', 'unify-charset',
'unintern', 'unix-sync', 'unlock-buffer', 'upcase', 'upcase-initials',
'upcase-initials-region', 'upcase-region', 'upcase-word',
'use-global-map', 'use-local-map', 'user-full-name',
'user-login-name', 'user-real-login-name', 'user-real-uid',
'user-uid', 'variable-binding-locus', 'vconcat', 'vector',
'vector-or-char-table-p', 'vectorp', 'verify-visited-file-modtime',
'vertical-motion', 'visible-frame-list', 'visited-file-modtime',
'w16-get-clipboard-data', 'w16-selection-exists-p',
'w16-set-clipboard-data', 'w32-battery-status',
'w32-default-color-map', 'w32-define-rgb-color',
'w32-display-monitor-attributes-list', 'w32-frame-menu-bar-size',
'w32-frame-rect', 'w32-get-clipboard-data',
'w32-get-codepage-charset', 'w32-get-console-codepage',
'w32-get-console-output-codepage', 'w32-get-current-locale-id',
'w32-get-default-locale-id', 'w32-get-keyboard-layout',
'w32-get-locale-info', 'w32-get-valid-codepages',
'w32-get-valid-keyboard-layouts', 'w32-get-valid-locale-ids',
'w32-has-winsock', 'w32-long-file-name', 'w32-reconstruct-hot-key',
'w32-register-hot-key', 'w32-registered-hot-keys',
'w32-selection-exists-p', 'w32-send-sys-command',
'w32-set-clipboard-data', 'w32-set-console-codepage',
'w32-set-console-output-codepage', 'w32-set-current-locale',
'w32-set-keyboard-layout', 'w32-set-process-priority',
'w32-shell-execute', 'w32-short-file-name', 'w32-toggle-lock-key',
'w32-unload-winsock', 'w32-unregister-hot-key', 'w32-window-exists-p',
'w32notify-add-watch', 'w32notify-rm-watch',
'waiting-for-user-input-p', 'where-is-internal', 'widen',
'widget-apply', 'widget-get', 'widget-put',
'window-absolute-pixel-edges', 'window-at', 'window-body-height',
'window-body-width', 'window-bottom-divider-width', 'window-buffer',
'window-combination-limit', 'window-configuration-frame',
'window-configuration-p', 'window-dedicated-p',
'window-display-table', 'window-edges', 'window-end', 'window-frame',
'window-fringes', 'window-header-line-height', 'window-hscroll',
'window-inside-absolute-pixel-edges', 'window-inside-edges',
'window-inside-pixel-edges', 'window-left-child',
'window-left-column', 'window-line-height', 'window-list',
'window-list-1', 'window-live-p', 'window-margins',
'window-minibuffer-p', 'window-mode-line-height', 'window-new-normal',
'window-new-pixel', 'window-new-total', 'window-next-buffers',
'window-next-sibling', 'window-normal-size', 'window-old-point',
'window-parameter', 'window-parameters', 'window-parent',
'window-pixel-edges', 'window-pixel-height', 'window-pixel-left',
'window-pixel-top', 'window-pixel-width', 'window-point',
'window-prev-buffers', 'window-prev-sibling',
'window-redisplay-end-trigger', 'window-resize-apply',
'window-resize-apply-total', 'window-right-divider-width',
'window-scroll-bar-height', 'window-scroll-bar-width',
'window-scroll-bars', 'window-start', 'window-system',
'window-text-height', 'window-text-pixel-size', 'window-text-width',
'window-top-child', 'window-top-line', 'window-total-height',
'window-total-width', 'window-use-time', 'window-valid-p',
'window-vscroll', 'windowp', 'write-char', 'write-region',
'x-backspace-delete-keys-p', 'x-change-window-property',
'x-change-window-property', 'x-close-connection',
'x-close-connection', 'x-create-frame', 'x-create-frame',
'x-delete-window-property', 'x-delete-window-property',
'x-disown-selection-internal', 'x-display-backing-store',
'x-display-backing-store', 'x-display-color-cells',
'x-display-color-cells', 'x-display-grayscale-p',
'x-display-grayscale-p', 'x-display-list', 'x-display-list',
'x-display-mm-height', 'x-display-mm-height', 'x-display-mm-width',
'x-display-mm-width', 'x-display-monitor-attributes-list',
'x-display-pixel-height', 'x-display-pixel-height',
'x-display-pixel-width', 'x-display-pixel-width', 'x-display-planes',
'x-display-planes', 'x-display-save-under', 'x-display-save-under',
'x-display-screens', 'x-display-screens', 'x-display-visual-class',
'x-display-visual-class', 'x-family-fonts', 'x-file-dialog',
'x-file-dialog', 'x-file-dialog', 'x-focus-frame', 'x-frame-geometry',
'x-frame-geometry', 'x-get-atom-name', 'x-get-resource',
'x-get-selection-internal', 'x-hide-tip', 'x-hide-tip',
'x-list-fonts', 'x-load-color-file', 'x-menu-bar-open-internal',
'x-menu-bar-open-internal', 'x-open-connection', 'x-open-connection',
'x-own-selection-internal', 'x-parse-geometry', 'x-popup-dialog',
'x-popup-menu', 'x-register-dnd-atom', 'x-select-font',
'x-select-font', 'x-selection-exists-p', 'x-selection-owner-p',
'x-send-client-message', 'x-server-max-request-size',
'x-server-max-request-size', 'x-server-vendor', 'x-server-vendor',
'x-server-version', 'x-server-version', 'x-show-tip', 'x-show-tip',
'x-synchronize', 'x-synchronize', 'x-uses-old-gtk-dialog',
'x-window-property', 'x-window-property', 'x-wm-set-size-hint',
'xw-color-defined-p', 'xw-color-defined-p', 'xw-color-values',
'xw-color-values', 'xw-display-color-p', 'xw-display-color-p',
'yes-or-no-p', 'zlib-available-p', 'zlib-decompress-region',
'forward-point',
))
builtin_function_highlighted = set((
'defvaralias', 'provide', 'require',
'with-no-warnings', 'define-widget', 'with-electric-help',
'throw', 'defalias', 'featurep'
))
lambda_list_keywords = set((
'&allow-other-keys', '&aux', '&body', '&environment', '&key', '&optional',
'&rest', '&whole',
))
error_keywords = set((
'cl-assert', 'cl-check-type', 'error', 'signal',
'user-error', 'warn',
))
def get_tokens_unprocessed(self, text):
stack = ['root']
for index, token, value in RegexLexer.get_tokens_unprocessed(self, text, stack):
if token is Name.Variable:
if value in EmacsLispLexer.builtin_function:
yield index, Name.Function, value
continue
if value in EmacsLispLexer.special_forms:
yield index, Keyword, value
continue
if value in EmacsLispLexer.error_keywords:
yield index, Name.Exception, value
continue
if value in EmacsLispLexer.builtin_function_highlighted:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.macros:
yield index, Name.Builtin, value
continue
if value in EmacsLispLexer.lambda_list_keywords:
yield index, Keyword.Pseudo, value
continue
yield index, token, value
tokens = {
'root': [
default('body'),
],
'body': [
# whitespace
(r'\s+', Text),
# single-line comment
(r';.*$', Comment.Single),
# strings and characters
(r'"', String, 'string'),
(r'\?([^\\]|\\.)', String.Char),
# quoting
(r":" + symbol, Name.Builtin),
(r"::" + symbol, String.Symbol),
(r"'" + symbol, String.Symbol),
(r"'", Operator),
(r"`", Operator),
# decimal numbers
(r'[-+]?\d+\.?' + terminated, Number.Integer),
(r'[-+]?\d+/\d+' + terminated, Number),
(r'[-+]?(\d*\.\d+([defls][-+]?\d+)?|\d+(\.\d*)?[defls][-+]?\d+)' +
terminated, Number.Float),
# vectors
(r'\[|\]', Punctuation),
# uninterned symbol
(r'#:' + symbol, String.Symbol),
# read syntax for char tables
(r'#\^\^?', Operator),
# function shorthand
(r'#\'', Name.Function),
# binary rational
(r'#[bB][+-]?[01]+(/[01]+)?', Number.Bin),
# octal rational
(r'#[oO][+-]?[0-7]+(/[0-7]+)?', Number.Oct),
# hex rational
(r'#[xX][+-]?[0-9a-fA-F]+(/[0-9a-fA-F]+)?', Number.Hex),
# radix rational
(r'#\d+r[+-]?[0-9a-zA-Z]+(/[0-9a-zA-Z]+)?', Number),
# reference
(r'#\d+=', Operator),
(r'#\d+#', Operator),
# special operators that should have been parsed already
(r'(,@|,|\.|:)', Operator),
# special constants
(r'(t|nil)' + terminated, Name.Constant),
# functions and variables
(r'\*' + symbol + '\*', Name.Variable.Global),
(symbol, Name.Variable),
# parentheses
(r'#\(', Operator, 'body'),
(r'\(', Punctuation, 'body'),
(r'\)', Punctuation, '#pop'),
],
'string': [
(r'[^"\\`]+', String),
(r'`%s\'' % symbol, String.Symbol),
(r'`', String),
(r'\\.', String),
(r'\\\n', String),
(r'"', String, '#pop'),
],
}
| EmacsLispLexer |
python | sphinx-doc__sphinx | sphinx/addnodes.py | {
"start": 16689,
"end": 16884
} | class ____(nodes.emphasis, not_smartquotable):
"""Node that behaves like `emphasis`, but further text processors are not
applied (e.g. smartypants for HTML output).
"""
| literal_emphasis |
python | pypa__pip | src/pip/_vendor/rich/prompt.py | {
"start": 9687,
"end": 9854
} | class ____(PromptBase[str]):
"""A prompt that returns a str.
Example:
>>> name = Prompt.ask("Enter your name")
"""
response_type = str
| Prompt |
python | sympy__sympy | sympy/integrals/manualintegrate.py | {
"start": 12660,
"end": 13238
} | class ____(Rule):
harg: Expr
ibnd: Expr
substep: Rule
def eval(self) -> Expr:
# If we are integrating over x and the integrand has the form
# Heaviside(m*x+b)*g(x) == Heaviside(harg)*g(symbol)
# then there needs to be continuity at -b/m == ibnd,
# so we subtract the appropriate term.
result = self.substep.eval()
return Heaviside(self.harg) * (result - result.subs(self.variable, self.ibnd))
def contains_dont_know(self) -> bool:
return self.substep.contains_dont_know()
@dataclass
| HeavisideRule |
python | altair-viz__altair | altair/vegalite/v6/schema/_config.py | {
"start": 262232,
"end": 282805
} | class ____(TypedDict, total=False):
"""
:class:`altair.TickConfig` ``TypedDict`` wrapper.
Parameters
----------
align
The horizontal alignment of the text or ranged marks (area, bar, image, rect, rule).
One of ``"left"``, ``"right"``, ``"center"``.
**Note:** Expression reference is *not* supported for range marks.
angle
The rotation angle of the text, in degrees.
aria
A boolean flag indicating if `ARIA attributes
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ should be
included (SVG output only). If ``false``, the "aria-hidden" attribute will be set on
the output SVG element, removing the mark item from the ARIA accessibility tree.
ariaRole
Sets the type of user interface element of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "role" attribute. Warning: this
property is experimental and may be changed in the future.
ariaRoleDescription
A human-readable, author-localized description for the role of the mark item for
`ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the "aria-roledescription" attribute.
Warning: this property is experimental and may be changed in the future.
aspect
Whether to keep aspect ratio of image marks.
bandSize
The width of the ticks.
**Default value:** 3/4 of step (width step for horizontal ticks and height step for
vertical ticks).
baseline
For text marks, the vertical text baseline. One of ``"alphabetic"`` (default),
``"top"``, ``"middle"``, ``"bottom"``, ``"line-top"``, ``"line-bottom"``, or an
expression reference that provides one of the valid values. The ``"line-top"`` and
``"line-bottom"`` values operate similarly to ``"top"`` and ``"bottom"``, but are
calculated relative to the ``lineHeight`` rather than ``fontSize`` alone.
For range marks, the vertical alignment of the marks. One of ``"top"``,
``"middle"``, ``"bottom"``.
**Note:** Expression reference is *not* supported for range marks.
binSpacing
Offset between bars for binned field. The ideal value for this is either 0
(preferred by statisticians) or 1 (Vega-Lite default, D3 example style).
**Default value:** ``1``
blend
The color blend mode for drawing an item on its current background. Any valid `CSS
mix-blend-mode <https://developer.mozilla.org/en-US/docs/Web/CSS/mix-blend-mode>`__
value can be used.
**Default value:** ``"source-over"``
color
Default color.
**Default value:** ``"#4682b4"``
**Note:**
* This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
* The ``fill`` and ``stroke`` properties have higher precedence than ``color`` and
will override ``color``.
continuousBandSize
The default size of the bars on continuous scales.
**Default value:** ``5``
cornerRadius
The radius in pixels of rounded rectangles or arcs' corners.
**Default value:** ``0``
cornerRadiusBottomLeft
The radius in pixels of rounded rectangles' bottom left corner.
**Default value:** ``0``
cornerRadiusBottomRight
The radius in pixels of rounded rectangles' bottom right corner.
**Default value:** ``0``
cornerRadiusTopLeft
The radius in pixels of rounded rectangles' top right corner.
**Default value:** ``0``
cornerRadiusTopRight
The radius in pixels of rounded rectangles' top left corner.
**Default value:** ``0``
cursor
The mouse cursor used over the mark. Any valid `CSS cursor type
<https://developer.mozilla.org/en-US/docs/Web/CSS/cursor#Values>`__ can be used.
description
A text description of the mark item for `ARIA accessibility
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA>`__ (SVG output
only). If specified, this property determines the `"aria-label" attribute
<https://developer.mozilla.org/en-US/docs/Web/Accessibility/ARIA/ARIA_Techniques/Using_the_aria-label_attribute>`__.
dir
The direction of the text. One of ``"ltr"`` (left-to-right) or ``"rtl"``
(right-to-left). This property determines on which side is truncated in response to
the limit parameter.
**Default value:** ``"ltr"``
discreteBandSize
The default size of the bars with discrete dimensions. If unspecified, the default
size is ``step-2``, which provides 2 pixel offset between bars.
dx
The horizontal offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
dy
The vertical offset, in pixels, between the text label and its anchor point. The
offset is applied after rotation by the *angle* property.
ellipsis
The ellipsis string for text truncated in response to the limit parameter.
**Default value:** ``"…"``
endAngle
The end angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
fill
Default fill color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove fill.
**Default value:** (None)
fillOpacity
The fill opacity (value between [0,1]).
**Default value:** ``1``
filled
Whether the mark's color should be used as fill color instead of stroke color.
**Default value:** ``false`` for all ``point``, ``line``, and ``rule`` marks as well
as ``geoshape`` marks for `graticule
<https://vega.github.io/vega-lite/docs/data.html#graticule>`__ data sources;
otherwise, ``true``.
**Note:** This property cannot be used in a `style config
<https://vega.github.io/vega-lite/docs/mark.html#style-config>`__.
font
The typeface to set the text in (e.g., ``"Helvetica Neue"``).
fontSize
The font size, in pixels.
**Default value:** ``11``
fontStyle
The font style (e.g., ``"italic"``).
fontWeight
The font weight. This can be either a string (e.g ``"bold"``, ``"normal"``) or a
number (``100``, ``200``, ``300``, ..., ``900`` where ``"normal"`` = ``400`` and
``"bold"`` = ``700``).
height
Height of the marks.
href
A URL to load upon mouse click. If defined, the mark acts as a hyperlink.
innerRadius
The inner radius in pixels of arc marks. ``innerRadius`` is an alias for
``radius2``.
**Default value:** ``0``
interpolate
The line interpolation method to use for line and area marks. One of the following:
* ``"linear"``: piecewise linear segments, as in a polyline.
* ``"linear-closed"``: close the linear segments to form a polygon.
* ``"step"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"step-before"``: alternate between vertical and horizontal segments, as in a
step function.
* ``"step-after"``: alternate between horizontal and vertical segments, as in a step
function.
* ``"basis"``: a B-spline, with control point duplication on the ends.
* ``"basis-open"``: an open B-spline; may not intersect the start or end.
* ``"basis-closed"``: a closed B-spline, as in a loop.
* ``"cardinal"``: a Cardinal spline, with control point duplication on the ends.
* ``"cardinal-open"``: an open Cardinal spline; may not intersect the start or end,
but will intersect other control points.
* ``"cardinal-closed"``: a closed Cardinal spline, as in a loop.
* ``"bundle"``: equivalent to basis, except the tension parameter is used to
straighten the spline.
* ``"monotone"``: cubic interpolation that preserves monotonicity in y.
invalid
Invalid data mode, which defines how the marks and corresponding scales should
represent invalid values (``null`` and ``NaN`` in continuous scales *without*
defined output for invalid values).
* ``"filter"`` — *Exclude* all invalid values from the visualization's *marks* and
*scales*. For path marks (for line, area, trail), this option will create paths
that connect valid points, as if the data rows with invalid values do not exist.
* ``"break-paths-filter-domains"`` — Break path marks (for line, area, trail) at
invalid values. For non-path marks, this is equivalent to ``"filter"``. All
*scale* domains will *exclude* these filtered data points.
* ``"break-paths-show-domains"`` — Break paths (for line, area, trail) at invalid
values. Hide invalid values for non-path marks. All *scale* domains will
*include* these filtered data points (for both path and non-path marks).
* ``"show"`` or ``null`` — Show all data points in the marks and scale domains. Each
scale will use the output for invalid values defined in ``config.scale.invalid``
or, if unspecified, by default invalid values will produce the same visual values
as zero (if the scale includes zero) or the minimum value (if the scale does not
include zero).
* ``"break-paths-show-path-domains"`` (default) — This is equivalent to
``"break-paths-show-domains"`` for path-based marks (line/area/trail) and
``"filter"`` for non-path marks.
**Note**: If any channel's scale has an output for invalid values defined in
``config.scale.invalid``, all values for the scales will be considered "valid" since
they can produce a reasonable output for the scales. Thus, fields for such channels
will not be filtered and will not cause path breaks.
limit
The maximum length of the text mark in pixels. The text value will be automatically
truncated if the rendered size exceeds the limit.
**Default value:** ``0`` -- indicating no limit
lineBreak
A delimiter, such as a newline character, upon which to break text strings into
multiple lines. This property is ignored if the text is array-valued.
lineHeight
The line height in pixels (the spacing between subsequent lines of text) for
multi-line text marks.
minBandSize
The minimum band size for bar and rectangle marks. **Default value:** ``0.25``
opacity
The overall opacity (value between [0,1]).
**Default value:** ``0.7`` for non-aggregate plots with ``point``, ``tick``,
``circle``, or ``square`` marks or layered ``bar`` charts and ``1`` otherwise.
order
For line and trail marks, this ``order`` property can be set to ``null`` or
``false`` to make the lines use the original order in the data sources.
orient
The orientation of a non-stacked bar, tick, area, and line charts. The value is
either horizontal (default) or vertical.
* For bar, rule and tick, this determines whether the size of the bar and tick
should be applied to x or y dimension.
* For area, this property determines the orient property of the Vega output.
* For line and trail marks, this property determines the sort order of the points in
the line if ``config.sortLineBy`` is not specified. For stacked charts, this is
always determined by the orientation of the stack; therefore explicitly specified
value will be ignored.
outerRadius
The outer radius in pixels of arc marks. ``outerRadius`` is an alias for ``radius``.
**Default value:** ``0``
padAngle
The angular padding applied to sides of the arc, in radians.
radius
For arc mark, the primary (outer) radius in pixels.
For text marks, polar coordinate radial offset, in pixels, of the text from the
origin determined by the ``x`` and ``y`` properties.
**Default value:** ``min(plot_width, plot_height)/2``
radius2
The secondary (inner) radius in pixels of arc marks.
**Default value:** ``0``
shape
Shape of the point marks. Supported values include:
* plotting shapes: ``"circle"``, ``"square"``, ``"cross"``, ``"diamond"``,
``"triangle-up"``, ``"triangle-down"``, ``"triangle-right"``, or
``"triangle-left"``.
* the line symbol ``"stroke"``
* centered directional shapes ``"arrow"``, ``"wedge"``, or ``"triangle"``
* a custom `SVG path string
<https://developer.mozilla.org/en-US/docs/Web/SVG/Tutorial/Paths>`__ (For correct
sizing, custom shape paths should be defined within a square bounding box with
coordinates ranging from -1 to 1 along both the x and y dimensions.)
**Default value:** ``"circle"``
size
Default size for marks.
* For ``point``/``circle``/``square``, this represents the pixel area of the marks.
Note that this value sets the area of the symbol; the side lengths will increase
with the square root of this value.
* For ``bar``, this represents the band size of the bar, in pixels.
* For ``text``, this represents the font size, in pixels.
**Default value:**
* ``30`` for point, circle, square marks; width/height's ``step``
* ``2`` for bar marks with discrete dimensions;
* ``5`` for bar marks with continuous dimensions;
* ``11`` for text marks.
smooth
A boolean flag (default true) indicating if the image should be smoothed when
resized. If false, individual pixels should be scaled directly rather than
interpolated with smoothing. For SVG rendering, this option may not work in some
browsers due to lack of standardization.
startAngle
The start angle in radians for arc marks. A value of ``0`` indicates up (north),
increasing values proceed clockwise.
stroke
Default stroke color. This property has higher precedence than ``config.color``. Set
to ``null`` to remove stroke.
**Default value:** (None)
strokeCap
The stroke cap for line ending style. One of ``"butt"``, ``"round"``, or
``"square"``.
**Default value:** ``"butt"``
strokeDash
An array of alternating stroke, space lengths for creating dashed or dotted lines.
strokeDashOffset
The offset (in pixels) into which to begin drawing with the stroke dash array.
strokeJoin
The stroke line join method. One of ``"miter"``, ``"round"`` or ``"bevel"``.
**Default value:** ``"miter"``
strokeMiterLimit
The miter limit at which to bevel a line join.
strokeOffset
The offset in pixels at which to draw the group stroke and fill. If unspecified, the
default behavior is to dynamically offset stroked groups such that 1 pixel stroke
widths align with the pixel grid.
strokeOpacity
The stroke opacity (value between [0,1]).
**Default value:** ``1``
strokeWidth
The stroke width, in pixels.
tension
Depending on the interpolation type, sets the tension parameter (for line and area
marks).
text
Placeholder text if the ``text`` channel is not specified
theta
* For arc marks, the arc length in radians if theta2 is not specified, otherwise the
start arc angle. (A value of 0 indicates up or “north”, increasing values proceed
clockwise.)
* For text marks, polar coordinate angle in radians.
theta2
The end angle of arc marks in radians. A value of 0 indicates up or “north”,
increasing values proceed clockwise.
thickness
Thickness of the tick mark.
**Default value:** ``1``
time
timeUnitBandPosition
Default relative band position for a time unit. If set to ``0``, the marks will be
positioned at the beginning of the time unit band step. If set to ``0.5``, the marks
will be positioned in the middle of the time unit band step.
timeUnitBandSize
Default relative band size for a time unit. If set to ``1``, the bandwidth of the
marks will be equal to the time unit band step. If set to ``0.5``, bandwidth of the
marks will be half of the time unit band step.
tooltip
The tooltip text string to show upon mouse hover or an object defining which fields
should the tooltip be derived from.
* If ``tooltip`` is ``true`` or ``{"content": "encoding"}``, then all fields from
``encoding`` will be used.
* If ``tooltip`` is ``{"content": "data"}``, then all fields that appear in the
highlighted data point will be used.
* If set to ``null`` or ``false``, then no tooltip will be used.
See the `tooltip <https://vega.github.io/vega-lite/docs/tooltip.html>`__
documentation for a detailed discussion about tooltip in Vega-Lite.
**Default value:** ``null``
url
The URL of the image file for image marks.
width
Width of the marks.
x
X coordinates of the marks, or width of horizontal ``"bar"`` and ``"area"`` without
specified ``x2`` or ``width``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
x2
X2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"width"`` for the width
of the plot.
y
Y coordinates of the marks, or height of vertical ``"bar"`` and ``"area"`` without
specified ``y2`` or ``height``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
y2
Y2 coordinates for ranged ``"area"``, ``"bar"``, ``"rect"``, and ``"rule"``.
The ``value`` of this channel can be a number or a string ``"height"`` for the
height of the plot.
"""
align: Align_T
angle: float
aria: bool
ariaRole: str
ariaRoleDescription: str
aspect: bool
bandSize: float
baseline: TextBaseline_T
binSpacing: float
blend: Blend_T
color: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T
continuousBandSize: float
cornerRadius: float
cornerRadiusBottomLeft: float
cornerRadiusBottomRight: float
cornerRadiusTopLeft: float
cornerRadiusTopRight: float
cursor: Cursor_T
description: str
dir: TextDirection_T
discreteBandSize: float
dx: float
dy: float
ellipsis: str
endAngle: float
fill: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None
fillOpacity: float
filled: bool
font: str
fontSize: float
fontStyle: str
fontWeight: FontWeight_T
height: float
href: str
innerRadius: float
interpolate: Interpolate_T
invalid: MarkInvalidDataMode_T | None
limit: float
lineBreak: str
lineHeight: float
minBandSize: float
opacity: float
order: bool | None
orient: Orientation_T
outerRadius: float
padAngle: float
radius: float
radius2: float
shape: str
size: float
smooth: bool
startAngle: float
stroke: ColorHex | LinearGradientKwds | RadialGradientKwds | ColorName_T | None
strokeCap: StrokeCap_T
strokeDash: Sequence[float]
strokeDashOffset: float
strokeJoin: StrokeJoin_T
strokeMiterLimit: float
strokeOffset: float
strokeOpacity: float
strokeWidth: float
tension: float
text: str | Sequence[str]
theta: float
theta2: float
thickness: float
time: float
timeUnitBandPosition: float
timeUnitBandSize: float
tooltip: str | bool | float | TooltipContentKwds | None
url: str
width: float
x: float | Literal["width"]
x2: float | Literal["width"]
y: float | Literal["height"]
y2: float | Literal["height"]
| TickConfigKwds |
python | huggingface__transformers | src/transformers/models/speecht5/configuration_speecht5.py | {
"start": 864,
"end": 19002
} | class ____(PreTrainedConfig):
r"""
This is the configuration class to store the configuration of a [`SpeechT5Model`]. It is used to instantiate a
SpeechT5 model according to the specified arguments, defining the model architecture. Instantiating a configuration
with the defaults will yield a similar configuration to that of the SpeechT5
[microsoft/speecht5_asr](https://huggingface.co/microsoft/speecht5_asr) architecture.
Configuration objects inherit from [`PreTrainedConfig`] and can be used to control the model outputs. Read the
documentation from [`PreTrainedConfig`] for more information.
Args:
vocab_size (`int`, *optional*, defaults to 81):
Vocabulary size of the SpeechT5 model. Defines the number of different tokens that can be represented by
the `inputs_ids` passed to the forward method of [`SpeechT5Model`].
hidden_size (`int`, *optional*, defaults to 768):
Dimensionality of the encoder layers and the pooler layer.
encoder_layers (`int`, *optional*, defaults to 12):
Number of hidden layers in the Transformer encoder.
encoder_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer encoder.
encoder_ffn_dim (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder.
encoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the encoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
decoder_layers (`int`, *optional*, defaults to 6):
Number of hidden layers in the Transformer decoder.
decoder_attention_heads (`int`, *optional*, defaults to 12):
Number of attention heads for each attention layer in the Transformer decoder.
decoder_ffn_dim (`int`, *optional*, defaults to 3072):
Dimensionality of the "intermediate" (often named feed-forward) layer in the Transformer decoder.
decoder_layerdrop (`float`, *optional*, defaults to 0.1):
The LayerDrop probability for the decoder. See the [LayerDrop paper](see https://huggingface.co/papers/1909.11556)
for more details.
hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`):
The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`,
`"relu"`, `"selu"` and `"gelu_new"` are supported.
positional_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for the text position encoding layers.
hidden_dropout (`float`, *optional*, defaults to 0.1):
The dropout probability for all fully connected layers in the embeddings, encoder, and pooler.
attention_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for the attention probabilities.
activation_dropout (`float`, *optional*, defaults to 0.1):
The dropout ratio for activations inside the fully connected layer.
initializer_range (`float`, *optional*, defaults to 0.02):
The standard deviation of the truncated_normal_initializer for initializing all weight matrices.
layer_norm_eps (`float`, *optional*, defaults to 1e-5):
The epsilon used by the layer normalization layers.
scale_embedding (`bool`, *optional*, defaults to `False`):
Scale embeddings by diving by sqrt(d_model).
feat_extract_norm (`str`, *optional*, defaults to `"group"`):
The norm to be applied to 1D convolutional layers in the speech encoder pre-net. One of `"group"` for group
normalization of only the first 1D convolutional layer or `"layer"` for layer normalization of all 1D
convolutional layers.
feat_proj_dropout (`float`, *optional*, defaults to 0.0):
The dropout probability for output of the speech encoder pre-net.
feat_extract_activation (`str, `optional`, defaults to `"gelu"`):
The non-linear activation function (function or string) in the 1D convolutional layers of the feature
extractor. If string, `"gelu"`, `"relu"`, `"selu"` and `"gelu_new"` are supported.
conv_dim (`tuple[int]` or `list[int]`, *optional*, defaults to `(512, 512, 512, 512, 512, 512, 512)`):
A tuple of integers defining the number of input and output channels of each 1D convolutional layer in the
speech encoder pre-net. The length of *conv_dim* defines the number of 1D convolutional layers.
conv_stride (`tuple[int]` or `list[int]`, *optional*, defaults to `(5, 2, 2, 2, 2, 2, 2)`):
A tuple of integers defining the stride of each 1D convolutional layer in the speech encoder pre-net. The
length of *conv_stride* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_kernel (`tuple[int]` or `list[int]`, *optional*, defaults to `(10, 3, 3, 3, 3, 3, 3)`):
A tuple of integers defining the kernel size of each 1D convolutional layer in the speech encoder pre-net.
The length of *conv_kernel* defines the number of convolutional layers and has to match the length of
*conv_dim*.
conv_bias (`bool`, *optional*, defaults to `False`):
Whether the 1D convolutional layers have a bias.
num_conv_pos_embeddings (`int`, *optional*, defaults to 128):
Number of convolutional positional embeddings. Defines the kernel size of 1D convolutional positional
embeddings layer.
num_conv_pos_embedding_groups (`int`, *optional*, defaults to 16):
Number of groups of 1D convolutional positional embeddings layer.
apply_spec_augment (`bool`, *optional*, defaults to `True`):
Whether to apply *SpecAugment* data augmentation to the outputs of the speech encoder pre-net. For
reference see [SpecAugment: A Simple Data Augmentation Method for Automatic Speech
Recognition](https://huggingface.co/papers/1904.08779).
mask_time_prob (`float`, *optional*, defaults to 0.05):
Percentage (between 0 and 1) of all feature vectors along the time axis which will be masked. The masking
procedure generates ''mask_time_prob*len(time_axis)/mask_time_length'' independent masks over the axis. If
reasoning from the probability of each feature vector to be chosen as the start of the vector span to be
masked, *mask_time_prob* should be `prob_vector_start*mask_time_length`. Note that overlap may decrease the
actual percentage of masked vectors. This is only relevant if `apply_spec_augment is True`.
mask_time_length (`int`, *optional*, defaults to 10):
Length of vector span along the time axis.
mask_time_min_masks (`int`, *optional*, defaults to 2),:
The minimum number of masks of length `mask_feature_length` generated along the time axis, each time step,
irrespectively of `mask_feature_prob`. Only relevant if ''mask_time_prob*len(time_axis)/mask_time_length <
mask_time_min_masks''
mask_feature_prob (`float`, *optional*, defaults to 0.0):
Percentage (between 0 and 1) of all feature vectors along the feature axis which will be masked. The
masking procedure generates ''mask_feature_prob*len(feature_axis)/mask_time_length'' independent masks over
the axis. If reasoning from the probability of each feature vector to be chosen as the start of the vector
span to be masked, *mask_feature_prob* should be `prob_vector_start*mask_feature_length`. Note that overlap
may decrease the actual percentage of masked vectors. This is only relevant if `apply_spec_augment is
True`.
mask_feature_length (`int`, *optional*, defaults to 10):
Length of vector span along the feature axis.
mask_feature_min_masks (`int`, *optional*, defaults to 0),:
The minimum number of masks of length `mask_feature_length` generated along the feature axis, each time
step, irrespectively of `mask_feature_prob`. Only relevant if
''mask_feature_prob*len(feature_axis)/mask_feature_length < mask_feature_min_masks''
num_mel_bins (`int`, *optional*, defaults to 80):
Number of mel features used per input features. Used by the speech decoder pre-net. Should correspond to
the value used in the [`SpeechT5Processor`] class.
speech_decoder_prenet_layers (`int`, *optional*, defaults to 2):
Number of layers in the speech decoder pre-net.
speech_decoder_prenet_units (`int`, *optional*, defaults to 256):
Dimensionality of the layers in the speech decoder pre-net.
speech_decoder_prenet_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability for the speech decoder pre-net layers.
speaker_embedding_dim (`int`, *optional*, defaults to 512):
Dimensionality of the *XVector* embedding vectors.
speech_decoder_postnet_layers (`int`, *optional*, defaults to 5):
Number of layers in the speech decoder post-net.
speech_decoder_postnet_units (`int`, *optional*, defaults to 256):
Dimensionality of the layers in the speech decoder post-net.
speech_decoder_postnet_kernel (`int`, *optional*, defaults to 5):
Number of convolutional filter channels in the speech decoder post-net.
speech_decoder_postnet_dropout (`float`, *optional*, defaults to 0.5):
The dropout probability for the speech decoder post-net layers.
reduction_factor (`int`, *optional*, defaults to 2):
Spectrogram length reduction factor for the speech decoder inputs.
max_speech_positions (`int`, *optional*, defaults to 4000):
The maximum sequence length of speech features that this model might ever be used with.
max_text_positions (`int`, *optional*, defaults to 450):
The maximum sequence length of text features that this model might ever be used with.
encoder_max_relative_position (`int`, *optional*, defaults to 160):
Maximum distance for relative position embedding in the encoder.
use_guided_attention_loss (`bool`, *optional*, defaults to `True`):
Whether to apply guided attention loss while training the TTS model.
guided_attention_loss_num_heads (`int`, *optional*, defaults to 2):
Number of attention heads the guided attention loss will be applied to. Use -1 to apply this loss to all
attention heads.
guided_attention_loss_sigma (`float`, *optional*, defaults to 0.4):
Standard deviation for guided attention loss.
guided_attention_loss_scale (`float`, *optional*, defaults to 10.0):
Scaling coefficient for guided attention loss (also known as lambda).
use_cache (`bool`, *optional*, defaults to `True`):
Whether or not the model should return the last key/values attentions (not used by all models).
Example:
```python
>>> from transformers import SpeechT5Model, SpeechT5Config
>>> # Initializing a "microsoft/speecht5_asr" style configuration
>>> configuration = SpeechT5Config()
>>> # Initializing a model (with random weights) from the "microsoft/speecht5_asr" style configuration
>>> model = SpeechT5Model(configuration)
>>> # Accessing the model configuration
>>> configuration = model.config
```"""
model_type = "speecht5"
attribute_map = {"num_attention_heads": "encoder_attention_heads", "num_hidden_layers": "encoder_layers"}
def __init__(
self,
vocab_size=81,
hidden_size=768,
encoder_layers=12,
encoder_attention_heads=12,
encoder_ffn_dim=3072,
encoder_layerdrop=0.1,
decoder_layers=6,
decoder_ffn_dim=3072,
decoder_attention_heads=12,
decoder_layerdrop=0.1,
hidden_act="gelu",
positional_dropout=0.1,
hidden_dropout=0.1,
attention_dropout=0.1,
activation_dropout=0.1,
initializer_range=0.02,
layer_norm_eps=1e-5,
scale_embedding=False,
feat_extract_norm="group",
feat_proj_dropout=0.0,
feat_extract_activation="gelu",
conv_dim=(512, 512, 512, 512, 512, 512, 512),
conv_stride=(5, 2, 2, 2, 2, 2, 2),
conv_kernel=(10, 3, 3, 3, 3, 2, 2),
conv_bias=False,
num_conv_pos_embeddings=128,
num_conv_pos_embedding_groups=16,
apply_spec_augment=True,
mask_time_prob=0.05,
mask_time_length=10,
mask_time_min_masks=2,
mask_feature_prob=0.0,
mask_feature_length=10,
mask_feature_min_masks=0,
pad_token_id=1,
bos_token_id=0,
eos_token_id=2,
decoder_start_token_id=2,
num_mel_bins=80,
speech_decoder_prenet_layers=2,
speech_decoder_prenet_units=256,
speech_decoder_prenet_dropout=0.5,
speaker_embedding_dim=512,
speech_decoder_postnet_layers=5,
speech_decoder_postnet_units=256,
speech_decoder_postnet_kernel=5,
speech_decoder_postnet_dropout=0.5,
reduction_factor=2,
max_speech_positions=4000,
max_text_positions=450,
encoder_max_relative_position=160,
use_guided_attention_loss=True,
guided_attention_loss_num_heads=2,
guided_attention_loss_sigma=0.4,
guided_attention_loss_scale=10.0,
use_cache=True,
is_encoder_decoder=True,
**kwargs,
):
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.encoder_layers = encoder_layers
self.encoder_ffn_dim = encoder_ffn_dim
self.encoder_attention_heads = encoder_attention_heads
self.encoder_layerdrop = encoder_layerdrop
self.decoder_layers = decoder_layers
self.decoder_ffn_dim = decoder_ffn_dim
self.decoder_attention_heads = decoder_attention_heads
self.decoder_layerdrop = decoder_layerdrop
self.hidden_act = hidden_act
self.positional_dropout = positional_dropout
self.hidden_dropout = hidden_dropout
self.attention_dropout = attention_dropout
self.activation_dropout = activation_dropout
self.initializer_range = initializer_range
self.layer_norm_eps = layer_norm_eps
self.scale_embedding = scale_embedding
self.feat_extract_norm = feat_extract_norm
self.feat_proj_dropout = feat_proj_dropout
self.feat_extract_activation = feat_extract_activation
self.conv_dim = list(conv_dim)
self.conv_stride = list(conv_stride)
self.conv_kernel = list(conv_kernel)
self.conv_bias = conv_bias
self.num_conv_pos_embeddings = num_conv_pos_embeddings
self.num_conv_pos_embedding_groups = num_conv_pos_embedding_groups
self.num_feat_extract_layers = len(self.conv_dim)
if (
(len(self.conv_stride) != self.num_feat_extract_layers)
or (len(self.conv_kernel) != self.num_feat_extract_layers)
or (len(self.conv_dim) != self.num_feat_extract_layers)
):
raise ValueError(
"Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="
" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="
f" {len(self.conv_dim)}`, `len(config.conv_stride) = {len(self.conv_stride)}`,"
f" `len(config.conv_kernel) = {len(self.conv_kernel)}`."
)
# fine-tuning config parameters for SpecAugment: https://huggingface.co/papers/1904.08779
self.apply_spec_augment = apply_spec_augment
self.mask_time_prob = mask_time_prob
self.mask_time_length = mask_time_length
self.mask_time_min_masks = mask_time_min_masks
self.mask_feature_prob = mask_feature_prob
self.mask_feature_length = mask_feature_length
self.mask_feature_min_masks = mask_feature_min_masks
self.num_mel_bins = num_mel_bins
self.speech_decoder_prenet_layers = speech_decoder_prenet_layers
self.speech_decoder_prenet_units = speech_decoder_prenet_units
self.speech_decoder_prenet_dropout = speech_decoder_prenet_dropout
self.speaker_embedding_dim = speaker_embedding_dim
self.speech_decoder_postnet_layers = speech_decoder_postnet_layers
self.speech_decoder_postnet_units = speech_decoder_postnet_units
self.speech_decoder_postnet_kernel = speech_decoder_postnet_kernel
self.speech_decoder_postnet_dropout = speech_decoder_postnet_dropout
self.reduction_factor = reduction_factor
self.max_speech_positions = max_speech_positions
self.max_text_positions = max_text_positions
self.encoder_max_relative_position = encoder_max_relative_position
self.use_guided_attention_loss = use_guided_attention_loss
self.guided_attention_loss_num_heads = guided_attention_loss_num_heads
self.guided_attention_loss_sigma = guided_attention_loss_sigma
self.guided_attention_loss_scale = guided_attention_loss_scale
self.use_cache = use_cache
self.is_encoder_decoder = is_encoder_decoder
super().__init__(
pad_token_id=pad_token_id,
bos_token_id=bos_token_id,
eos_token_id=eos_token_id,
is_encoder_decoder=is_encoder_decoder,
decoder_start_token_id=decoder_start_token_id,
**kwargs,
)
def inputs_to_logits_ratio(self):
return functools.reduce(operator.mul, self.conv_stride, 1)
| SpeechT5Config |
python | gevent__gevent | src/gevent/tests/test__local.py | {
"start": 1601,
"end": 1804
} | class ____(local, Mapping):
def __getitem__(self, name):
return self.d[name]
def __iter__(self):
return iter(self.d)
def __len__(self):
return len(self.d)
| LocalWithABC |
python | prabhupant__python-ds | data_structures/linked_list/odd_even_arrangement.py | {
"start": 77,
"end": 462
} | class ____():
def __init__(self, val):
self.val = val
self.next = None
def arrange(head):
if not head:
return None
odd = head
even = head.next
even_head = even
while even and even.next:
odd.next = even.next
odd = odd.next
even.next = odd.next
even = even.next
odd.next = even_head
return head
| Node |
python | getsentry__sentry | tests/sentry/rules/processing/test_delayed_processing.py | {
"start": 62073,
"end": 64766
} | class ____(CreateEventTestCase):
def setUp(self) -> None:
super().setUp()
self.project = self.create_project()
self.group = self.create_group(self.project)
self.rule = self.create_alert_rule()
self.log_config = LogConfig(num_events_issue_debugging=True)
def test_cleanup_redis(self) -> None:
self.push_to_hash(self.project.id, self.rule.id, self.group.id)
rules_to_groups: defaultdict[int, set[int]] = defaultdict(set)
rules_to_groups[self.rule.id].add(self.group.id)
cleanup_redis_buffer(self.log_config, self.project, rules_to_groups, None)
rule_group_data = buffer.backend.get_hash(Project, {"project_id": self.project.id})
assert rule_group_data == {}
@override_options({"delayed_processing.batch_size": 2})
@patch("sentry.rules.processing.delayed_processing.apply_delayed.apply_async")
def test_batched_cleanup(self, mock_apply_delayed: MagicMock) -> None:
group_two = self.create_group(self.project)
group_three = self.create_group(self.project)
self.push_to_hash(self.project.id, self.rule.id, self.group.id)
self.push_to_hash(self.project.id, self.rule.id, group_two.id)
self.push_to_hash(self.project.id, self.rule.id, group_three.id)
rules_to_groups: defaultdict[int, set[int]] = defaultdict(set)
rules_to_groups[self.rule.id].add(self.group.id)
rules_to_groups[self.rule.id].add(group_two.id)
rules_to_groups[self.rule.id].add(group_three.id)
process_in_batches(buffer.backend, self.project.id, "delayed_processing")
batch_one_key = mock_apply_delayed.call_args_list[0][1]["kwargs"]["batch_key"]
batch_two_key = mock_apply_delayed.call_args_list[1][1]["kwargs"]["batch_key"]
# Verify process_rulegroups_in_batches removed the data from the buffer
rule_group_data = buffer.backend.get_hash(Project, {"project_id": self.project.id})
assert rule_group_data == {}
cleanup_redis_buffer(self.log_config, self.project, rules_to_groups, batch_one_key)
# Verify the batch we "executed" is removed
rule_group_data = buffer.backend.get_hash(
Project, {"project_id": self.project.id, "batch_key": batch_one_key}
)
assert rule_group_data == {}
# Verify the batch we didn't execute is still in redis
rule_group_data = buffer.backend.get_hash(
Project, {"project_id": self.project.id, "batch_key": batch_two_key}
)
assert rule_group_data == {
f"{self.rule.id}:{group_three.id}": '{"event_id":null,"occurrence_id":null}',
}
| CleanupRedisBufferTest |
python | kamyu104__LeetCode-Solutions | Python/minimum-window-subsequence.py | {
"start": 33,
"end": 936
} | class ____(object):
def minWindow(self, S, T):
"""
:type S: str
:type T: str
:rtype: str
"""
lookup = [[None for _ in xrange(26)] for _ in xrange(len(S)+1)]
find_char_next_pos = [None]*26
for i in reversed(xrange(len(S))):
find_char_next_pos[ord(S[i])-ord('a')] = i+1
lookup[i] = list(find_char_next_pos)
min_i, min_len = None, float("inf")
for i in xrange(len(S)):
if S[i] != T[0]:
continue
start = i
for c in T:
start = lookup[start][ord(c)-ord('a')]
if start == None:
break
else:
if start-i < min_len:
min_i, min_len = i, start-i
return S[min_i:min_i+min_len] if min_i is not None else ""
# Time: O(s * t)
# Space: O(s)
| Solution |
python | PyCQA__pylint | tests/functional/r/regression/regression_property_no_member_3269.py | {
"start": 204,
"end": 441
} | class ____:
"""A child class"""
@property
def test(self):
"""Overriding implementation of prop which calls the parent"""
return A.test.fget(self) + " overridden"
if __name__ == "__main__":
print(B().test)
| B |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.