language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | doocs__leetcode | solution/2200-2299/2209.Minimum White Tiles After Covering With Carpets/Solution.py | {
"start": 0,
"end": 611
} | class ____:
def minimumWhiteTiles(self, floor: str, numCarpets: int, carpetLen: int) -> int:
@cache
def dfs(i: int, j: int) -> int:
if i >= n:
return 0
if floor[i] == "0":
return dfs(i + 1, j)
if j == 0:
return s[-1] - s[i]
return min(1 + dfs(i + 1, j), dfs(i + carpetLen, j - 1))
n = len(floor)
s = [0] * (n + 1)
for i, c in enumerate(floor):
s[i + 1] = s[i] + int(c == "1")
ans = dfs(0, numCarpets)
dfs.cache_clear()
return ans
| Solution |
python | getsentry__sentry | src/sentry/api/event_search.py | {
"start": 23043,
"end": 23091
} | class ____(NamedTuple):
name: str
| AggregateKey |
python | walkccc__LeetCode | solutions/2398. Maximum Number of Robots Within Budget/2398.py | {
"start": 0,
"end": 676
} | class ____:
def maximumRobots(
self,
chargeTimes: list[int],
runningCosts: list[int],
budget: int,
) -> int:
cost = 0
maxQ = collections.deque() # Stores `chargeTimes[i]`.
j = 0 # window's range := [i..j], so k = i - j + 1
for i, (chargeTime, runningCost) in enumerate(
zip(chargeTimes, runningCosts)):
cost += runningCost
while maxQ and maxQ[-1] < chargeTime:
maxQ.pop()
maxQ.append(chargeTime)
if maxQ[0] + (i - j + 1) * cost > budget:
if maxQ[0] == chargeTimes[j]:
maxQ.popleft()
cost -= runningCosts[j]
j += 1
return len(chargeTimes) - j
| Solution |
python | scipy__scipy | scipy/optimize/_basinhopping.py | {
"start": 1060,
"end": 7037
} | class ____:
"""This class implements the core of the basinhopping algorithm.
x0 : ndarray
The starting coordinates.
minimizer : callable
The local minimizer, with signature ``result = minimizer(x)``.
The return value is an `optimize.OptimizeResult` object.
step_taking : callable
This function displaces the coordinates randomly. Signature should
be ``x_new = step_taking(x)``. Note that `x` may be modified in-place.
accept_tests : list of callables
Each test is passed the kwargs `f_new`, `x_new`, `f_old` and
`x_old`. These tests will be used to judge whether or not to accept
the step. The acceptable return values are True, False, or ``"force
accept"``. If any of the tests return False then the step is rejected.
If ``"force accept"``, then this will override any other tests in
order to accept the step. This can be used, for example, to forcefully
escape from a local minimum that ``basinhopping`` is trapped in.
disp : bool, optional
Display status messages.
"""
def __init__(self, x0, minimizer, step_taking, accept_tests, disp=False):
self.x = np.copy(x0)
self.minimizer = minimizer
self.step_taking = step_taking
self.accept_tests = accept_tests
self.disp = disp
self.nstep = 0
# initialize return object
self.res = scipy.optimize.OptimizeResult()
self.res.minimization_failures = 0
# do initial minimization
minres = minimizer(self.x)
if not minres.success:
self.res.minimization_failures += 1
if self.disp:
print("warning: basinhopping: local minimization failure")
self.x = np.copy(minres.x)
self.energy = minres.fun
self.incumbent_minres = minres # best minimize result found so far
if self.disp:
print(f"basinhopping step {self.nstep}: f {self.energy:g}")
# initialize storage class
self.storage = Storage(minres)
if hasattr(minres, "nfev"):
self.res.nfev = minres.nfev
if hasattr(minres, "njev"):
self.res.njev = minres.njev
if hasattr(minres, "nhev"):
self.res.nhev = minres.nhev
def _monte_carlo_step(self):
"""Do one Monte Carlo iteration
Randomly displace the coordinates, minimize, and decide whether
or not to accept the new coordinates.
"""
# Take a random step. Make a copy of x because the step_taking
# algorithm might change x in place
x_after_step = np.copy(self.x)
x_after_step = self.step_taking(x_after_step)
# do a local minimization
minres = self.minimizer(x_after_step)
x_after_quench = minres.x
energy_after_quench = minres.fun
if not minres.success:
self.res.minimization_failures += 1
if self.disp:
print("warning: basinhopping: local minimization failure")
if hasattr(minres, "nfev"):
self.res.nfev += minres.nfev
if hasattr(minres, "njev"):
self.res.njev += minres.njev
if hasattr(minres, "nhev"):
self.res.nhev += minres.nhev
# accept the move based on self.accept_tests. If any test is False,
# then reject the step. If any test returns the special string
# 'force accept', then accept the step regardless. This can be used
# to forcefully escape from a local minimum if normal basin hopping
# steps are not sufficient.
accept = True
for test in self.accept_tests:
if wrapped_inspect_signature(test) == _new_accept_test_signature:
testres = test(res_new=minres, res_old=self.incumbent_minres)
else:
testres = test(f_new=energy_after_quench, x_new=x_after_quench,
f_old=self.energy, x_old=self.x)
if testres == 'force accept':
accept = True
break
elif testres is None:
raise ValueError("accept_tests must return True, False, or "
"'force accept'")
elif not testres:
accept = False
# Report the result of the acceptance test to the take step class.
# This is for adaptive step taking
if hasattr(self.step_taking, "report"):
self.step_taking.report(accept, f_new=energy_after_quench,
x_new=x_after_quench, f_old=self.energy,
x_old=self.x)
return accept, minres
def one_cycle(self):
"""Do one cycle of the basinhopping algorithm
"""
self.nstep += 1
new_global_min = False
accept, minres = self._monte_carlo_step()
if accept:
self.energy = minres.fun
self.x = np.copy(minres.x)
self.incumbent_minres = minres # best minimize result found so far
new_global_min = self.storage.update(minres)
# print some information
if self.disp:
self.print_report(minres.fun, accept)
if new_global_min:
print(
f"found new global minimum on step {self.nstep} with "
f"function value {self.energy:g}"
)
# save some variables as BasinHoppingRunner attributes
self.xtrial = minres.x
self.energy_trial = minres.fun
self.accept = accept
return new_global_min
def print_report(self, energy_trial, accept):
"""print a status update"""
minres = self.storage.get_lowest()
print(
f"basinhopping step {self.nstep}: f {self.energy:g} "
f"trial_f {energy_trial:g} accepted {accept} "
f"lowest_f {minres.fun:g}"
)
| BasinHoppingRunner |
python | HIPS__autograd | autograd/numpy/numpy_vspaces.py | {
"start": 911,
"end": 3673
} | class ____(ArrayVSpace):
iscomplex = True
@property
def size(self):
return np.prod(self.shape) * 2
def ones(self):
return np.ones(self.shape, dtype=self.dtype) + 1.0j * np.ones(self.shape, dtype=self.dtype)
def standard_basis(self):
for idxs in np.ndindex(*self.shape):
for v in [1.0, 1.0j]:
vect = np.zeros(self.shape, dtype=self.dtype)
vect[idxs] = v
yield vect
def randn(self):
return np.array(np.random.randn(*self.shape)).astype(self.dtype) + 1.0j * np.array(
np.random.randn(*self.shape)
).astype(self.dtype)
def _inner_prod(self, x, y):
return np.real(np.dot(np.conj(np.ravel(x)), np.ravel(y)))
def _covector(self, x):
return np.conj(x)
VSpace.register(np.ndarray, lambda x: ComplexArrayVSpace(x) if np.iscomplexobj(x) else ArrayVSpace(x))
for type_ in [float, np.longdouble, np.float64, np.float32, np.float16]:
ArrayVSpace.register(type_)
for type_ in [complex, np.clongdouble, np.complex64, np.complex128]:
ComplexArrayVSpace.register(type_)
if np.lib.NumpyVersion(np.__version__) >= "2.0.0":
class EigResultVSpace(NamedTupleVSpace):
seq_type = np.linalg._linalg.EigResult
class EighResultVSpace(NamedTupleVSpace):
seq_type = np.linalg._linalg.EighResult
class QRResultVSpace(NamedTupleVSpace):
seq_type = np.linalg._linalg.QRResult
class SlogdetResultVSpace(NamedTupleVSpace):
seq_type = np.linalg._linalg.SlogdetResult
class SVDResultVSpace(NamedTupleVSpace):
seq_type = np.linalg._linalg.SVDResult
EigResultVSpace.register(np.linalg._linalg.EigResult)
EighResultVSpace.register(np.linalg._linalg.EighResult)
QRResultVSpace.register(np.linalg._linalg.QRResult)
SlogdetResultVSpace.register(np.linalg._linalg.SlogdetResult)
SVDResultVSpace.register(np.linalg._linalg.SVDResult)
elif np.__version__ >= "1.25":
class EigResultVSpace(NamedTupleVSpace):
seq_type = np.linalg.linalg.EigResult
class EighResultVSpace(NamedTupleVSpace):
seq_type = np.linalg.linalg.EighResult
class QRResultVSpace(NamedTupleVSpace):
seq_type = np.linalg.linalg.QRResult
class SlogdetResultVSpace(NamedTupleVSpace):
seq_type = np.linalg.linalg.SlogdetResult
class SVDResultVSpace(NamedTupleVSpace):
seq_type = np.linalg.linalg.SVDResult
EigResultVSpace.register(np.linalg.linalg.EigResult)
EighResultVSpace.register(np.linalg.linalg.EighResult)
QRResultVSpace.register(np.linalg.linalg.QRResult)
SlogdetResultVSpace.register(np.linalg.linalg.SlogdetResult)
SVDResultVSpace.register(np.linalg.linalg.SVDResult)
| ComplexArrayVSpace |
python | coleifer__peewee | tests/models.py | {
"start": 2173,
"end": 58562
} | class ____(ModelTestCase):
def add_user(self, username):
return User.create(username=username)
def add_tweets(self, user, *tweets):
accum = []
for tweet in tweets:
accum.append(Tweet.create(user=user, content=tweet))
return accum
@requires_models(Point)
def test_no_primary_key(self):
p11 = Point.create(x=1, y=1)
p33 = Point.create(x=3, y=3)
p_db = Point.get((Point.x == 3) & (Point.y == 3))
self.assertEqual(p_db.x, 3)
self.assertEqual(p_db.y, 3)
@requires_models(Post, PostNote)
def test_pk_is_fk(self):
with self.database.atomic():
p1 = Post.create(content='p1')
p2 = Post.create(content='p2')
p1n = PostNote.create(post=p1, note='p1n')
p2n = PostNote.create(post=p2, note='p2n')
with self.assertQueryCount(2):
pn = PostNote.get(PostNote.note == 'p1n')
self.assertEqual(pn.post.content, 'p1')
with self.assertQueryCount(1):
pn = (PostNote
.select(PostNote, Post)
.join(Post)
.where(PostNote.note == 'p2n')
.get())
self.assertEqual(pn.post.content, 'p2')
if not IS_SQLITE:
exc_class = ProgrammingError if IS_CRDB else IntegrityError
with self.database.atomic() as txn:
self.assertRaises(exc_class, PostNote.create, note='pxn')
txn.rollback()
@requires_models(User, Tweet)
def test_assertQueryCount(self):
self.add_tweets(self.add_user('charlie'), 'foo', 'bar', 'baz')
def do_test(n):
with self.assertQueryCount(n):
authors = [tweet.user.username for tweet in Tweet.select()]
self.assertRaises(AssertionError, do_test, 1)
self.assertRaises(AssertionError, do_test, 3)
do_test(4)
self.assertRaises(AssertionError, do_test, 5)
@requires_models(Post)
def test_column_field_translation(self):
ts = datetime.datetime(2017, 2, 1, 13, 37)
ts2 = datetime.datetime(2017, 2, 2, 13, 37)
p = Post.create(content='p1', timestamp=ts)
p2 = Post.create(content='p2', timestamp=ts2)
p_db = Post.get(Post.content == 'p1')
self.assertEqual(p_db.content, 'p1')
self.assertEqual(p_db.timestamp, ts)
pd1, pd2 = Post.select().order_by(Post.id).dicts()
self.assertEqual(pd1['content'], 'p1')
self.assertEqual(pd1['timestamp'], ts)
self.assertEqual(pd2['content'], 'p2')
self.assertEqual(pd2['timestamp'], ts2)
@requires_models(User)
def test_insert_many(self):
data = [('u%02d' % i,) for i in range(100)]
with self.database.atomic():
for chunk in chunked(data, 10):
User.insert_many(chunk).execute()
self.assertEqual(User.select().count(), 100)
names = [u.username for u in User.select().order_by(User.username)]
self.assertEqual(names, ['u%02d' % i for i in range(100)])
@requires_models(DfltM)
def test_insert_many_defaults_nullable(self):
data = [
{'name': 'd1'},
{'name': 'd2', 'dflt1': 10},
{'name': 'd3', 'dflt2': 30},
{'name': 'd4', 'dfltn': 40}]
fields = [DfltM.name, DfltM.dflt1, DfltM.dflt2, DfltM.dfltn]
DfltM.insert_many(data, fields).execute()
expected = [
('d1', 1, 2, None),
('d2', 10, 2, None),
('d3', 1, 30, None),
('d4', 1, 2, 40)]
query = DfltM.select().order_by(DfltM.name)
actual = [(d.name, d.dflt1, d.dflt2, d.dfltn) for d in query]
self.assertEqual(actual, expected)
@requires_models(User, Tweet)
def test_create(self):
with self.assertQueryCount(1):
huey = self.add_user('huey')
self.assertEqual(huey.username, 'huey')
self.assertTrue(isinstance(huey.id, (int, long)))
self.assertTrue(huey.id > 0)
with self.assertQueryCount(1):
tweet = Tweet.create(user=huey, content='meow')
self.assertEqual(tweet.user.id, huey.id)
self.assertEqual(tweet.user.username, 'huey')
self.assertEqual(tweet.content, 'meow')
self.assertTrue(isinstance(tweet.id, int))
self.assertTrue(tweet.id > 0)
@requires_models(User)
def test_bulk_create(self):
users = [User(username='u%s' % i) for i in range(5)]
self.assertEqual(User.select().count(), 0)
with self.assertQueryCount(1):
User.bulk_create(users)
self.assertEqual(User.select().count(), 5)
self.assertEqual([u.username for u in User.select().order_by(User.id)],
['u0', 'u1', 'u2', 'u3', 'u4'])
if IS_POSTGRESQL:
self.assertEqual([u.id for u in User.select().order_by(User.id)],
[user.id for user in users])
@requires_models(User)
def test_bulk_create_empty(self):
self.assertEqual(User.select().count(), 0)
User.bulk_create([])
@requires_models(User)
def test_bulk_create_batching(self):
users = [User(username=str(i)) for i in range(10)]
with self.assertQueryCount(4):
User.bulk_create(users, 3)
self.assertEqual(User.select().count(), 10)
self.assertEqual([u.username for u in User.select().order_by(User.id)],
list('0123456789'))
if IS_POSTGRESQL:
self.assertEqual([u.id for u in User.select().order_by(User.id)],
[user.id for user in users])
@requires_models(Person)
def test_bulk_create_error(self):
people = [Person(first='a', last='b'),
Person(first='b', last='c'),
Person(first='a', last='b')]
with self.assertRaises(IntegrityError):
with self.database.atomic():
Person.bulk_create(people)
self.assertEqual(Person.select().count(), 0)
@requires_models(CPK)
def test_bulk_create_composite_key(self):
self.assertEqual(CPK.select().count(), 0)
items = [CPK(key='k1', value=1, extra=1),
CPK(key='k2', value=2, extra=2)]
CPK.bulk_create(items)
self.assertEqual([(c.key, c.value, c.extra) for c in items],
[('k1', 1, 1), ('k2', 2, 2)])
query = CPK.select().order_by(CPK.key).tuples()
self.assertEqual(list(query), [('k1', 1, 1), ('k2', 2, 2)])
@requires_models(Person)
def test_bulk_update(self):
data = [('f%s' % i, 'l%s' % i, datetime.date(1980, i, i))
for i in range(1, 5)]
Person.insert_many(data).execute()
p1, p2, p3, p4 = list(Person.select().order_by(Person.id))
p1.first = 'f1-x'
p1.last = 'l1-x'
p2.first = 'f2-y'
p3.last = 'l3-z'
with self.assertQueryCount(1):
n = Person.bulk_update([p1, p2, p3, p4], ['first', 'last'])
self.assertEqual(n, 3 if IS_MYSQL else 4)
query = Person.select().order_by(Person.id)
self.assertEqual([(p.first, p.last) for p in query], [
('f1-x', 'l1-x'),
('f2-y', 'l2'),
('f3', 'l3-z'),
('f4', 'l4')])
# Modify multiple fields, but only update "first".
p1.first = 'f1-x2'
p1.last = 'l1-x2'
p2.first = 'f2-y2'
p3.last = 'f3-z2'
with self.assertQueryCount(2): # Two batches, so two queries.
n = Person.bulk_update([p1, p2, p3, p4], [Person.first], 2)
self.assertEqual(n, 2 if IS_MYSQL else 4)
query = Person.select().order_by(Person.id)
self.assertEqual([(p.first, p.last) for p in query], [
('f1-x2', 'l1-x'),
('f2-y2', 'l2'),
('f3', 'l3-z'),
('f4', 'l4')])
@requires_models(User, Tweet)
def test_bulk_update_foreign_key(self):
for username in ('charlie', 'huey', 'zaizee'):
user = User.create(username=username)
for i in range(2):
Tweet.create(user=user, content='%s-%s' % (username, i))
c, h, z = list(User.select().order_by(User.id))
c0, c1, h0, h1, z0, z1 = list(Tweet.select().order_by(Tweet.id))
c0.content = 'charlie-0x'
c1.user = h
h0.user = z
h1.content = 'huey-1x'
z0.user = c
z0.content = 'zaizee-0x'
with self.assertQueryCount(1):
Tweet.bulk_update([c0, c1, h0, h1, z0, z1], ['user', 'content'])
query = (Tweet
.select(Tweet.content, User.username)
.join(User)
.order_by(Tweet.id)
.objects())
self.assertEqual([(t.username, t.content) for t in query], [
('charlie', 'charlie-0x'),
('huey', 'charlie-1'),
('zaizee', 'huey-0'),
('huey', 'huey-1x'),
('charlie', 'zaizee-0x'),
('zaizee', 'zaizee-1')])
@requires_models(Person)
def test_bulk_update_integrityerror(self):
people = [Person(first='f%s' % i, last='l%s' % i, dob='1980-01-01')
for i in range(10)]
Person.bulk_create(people)
# Get list of people w/the IDs populated. They will not be set if the
# underlying DB is Sqlite or MySQL.
people = list(Person.select().order_by(Person.id))
# First we'll just modify all the first and last names.
for person in people:
person.first += '-x'
person.last += '-x'
# Now we'll introduce an issue that will cause an integrity error.
p3, p7 = people[3], people[7]
p3.first = p7.first = 'fx'
p3.last = p7.last = 'lx'
with self.assertRaises(IntegrityError):
with self.assertQueryCount(1):
with self.database.atomic():
Person.bulk_update(people, fields=['first', 'last'])
with self.assertRaises(IntegrityError):
# 10 objects, batch size=4, so 0-3, 4-7, 8&9. But we never get to 8
# and 9 because of the integrity error processing the 2nd batch.
with self.assertQueryCount(2):
with self.database.atomic():
Person.bulk_update(people, ['first', 'last'], 4)
# Ensure no changes were made.
vals = [(p.first, p.last) for p in Person.select().order_by(Person.id)]
self.assertEqual(vals, [('f%s' % i, 'l%s' % i) for i in range(10)])
@requires_models(User, Tweet)
def test_bulk_update_apply_dbvalue(self):
u = User.create(username='u')
t1, t2, t3 = [Tweet.create(user=u, content=str(i)) for i in (1, 2, 3)]
# If we don't end up applying the field's db_value() to these timestamp
# values, then we will end up with bad data or an error when attempting
# to do the update.
t1.timestamp = datetime.datetime(2019, 1, 2, 3, 4, 5)
t2.timestamp = datetime.date(2019, 1, 3)
t3.timestamp = 1337133700 # 2012-05-15T21:1:40.
t3_dt = datetime.datetime.fromtimestamp(1337133700)
Tweet.bulk_update([t1, t2, t3], fields=['timestamp'])
# Ensure that the values were handled appropriately.
t1, t2, t3 = list(Tweet.select().order_by(Tweet.id))
self.assertEqual(t1.timestamp, datetime.datetime(2019, 1, 2, 3, 4, 5))
self.assertEqual(t2.timestamp, datetime.datetime(2019, 1, 3, 0, 0, 0))
self.assertEqual(t3.timestamp, t3_dt)
@skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB)
@requires_models(CPK)
def test_bulk_update_cte(self):
CPK.insert_many([('k1', 1, 1), ('k2', 2, 2), ('k3', 3, 3)]).execute()
# We can also do a bulk-update using ValuesList when the primary-key of
# the model is a composite-pk.
new_values = [('k1', 1, 10), ('k3', 3, 30)]
cte = ValuesList(new_values).cte('new_values', columns=('k', 'v', 'x'))
# We have to use a subquery to update the individual column, as SQLite
# does not support UPDATE/FROM syntax.
subq = (cte
.select(cte.c.x)
.where(CPK._meta.primary_key == (cte.c.k, cte.c.v)))
# Perform the update, assigning extra the new value from the values
# list, and restricting the overall update using the composite pk.
res = (CPK
.update(extra=subq)
.where(CPK._meta.primary_key.in_(cte.select(cte.c.k, cte.c.v)))
.with_cte(cte)
.execute())
self.assertEqual(list(sorted(CPK.select().tuples())), [
('k1', 1, 10), ('k2', 2, 2), ('k3', 3, 30)])
@requires_models(User)
def test_insert_rowcount(self):
User.create(username='u0') # Ensure that last insert ID != rowcount.
iq = User.insert_many([(u,) for u in ('u1', 'u2', 'u3')])
self.assertEqual(iq.as_rowcount().execute(), 3)
# Now explicitly specify empty returning() for all DBs.
iq = User.insert_many([(u,) for u in ('u4', 'u5')]).returning()
self.assertEqual(iq.as_rowcount().execute(), 2)
query = (User
.select(User.username.concat('-x'))
.where(User.username.in_(['u1', 'u2'])))
iq = User.insert_from(query, ['username'])
self.assertEqual(iq.as_rowcount().execute(), 2)
query = (User
.select(User.username.concat('-y'))
.where(User.username.in_(['u3', 'u4'])))
iq = User.insert_from(query, ['username']).returning()
self.assertEqual(iq.as_rowcount().execute(), 2)
query = User.insert({'username': 'u5'})
self.assertEqual(query.as_rowcount().execute(), 1)
@skip_if(IS_POSTGRESQL or IS_CRDB, 'requires sqlite or mysql')
@requires_models(Emp)
def test_replace_rowcount(self):
Emp.create(first='beanie', last='cat', empno='998')
data = [
('beanie', 'cat', '999'),
('mickey', 'dog', '123')]
fields = (Emp.first, Emp.last, Emp.empno)
# MySQL returns 3, Sqlite 2. However, older stdlib sqlite3 does not
# work properly, so we don't assert a result count here.
Emp.replace_many(data, fields=fields).execute()
query = Emp.select(Emp.first, Emp.last, Emp.empno).order_by(Emp.last)
self.assertEqual(list(query.tuples()), [
('beanie', 'cat', '999'),
('mickey', 'dog', '123')])
@requires_models(User, Tweet)
def test_get_shortcut(self):
huey = self.add_user('huey')
self.add_tweets(huey, 'meow', 'purr', 'wheeze')
mickey = self.add_user('mickey')
self.add_tweets(mickey, 'woof', 'yip')
# Lookup using just the ID.
huey_db = User.get(huey.id)
self.assertEqual(huey.id, huey_db.id)
# Lookup using an expression.
huey_db = User.get(User.username == 'huey')
self.assertEqual(huey.id, huey_db.id)
mickey_db = User.get(User.username == 'mickey')
self.assertEqual(mickey.id, mickey_db.id)
self.assertEqual(User.get(username='mickey').id, mickey.id)
# No results is an exception.
self.assertRaises(User.DoesNotExist, User.get, User.username == 'x')
# Multiple results is OK.
tweet = Tweet.get(Tweet.user == huey_db)
self.assertTrue(tweet.content in ('meow', 'purr', 'wheeze'))
# We cannot traverse a join like this.
@self.database.atomic()
def has_error():
Tweet.get(User.username == 'huey')
self.assertRaises(Exception, has_error)
# This is OK, though.
tweet = Tweet.get(user__username='mickey')
self.assertTrue(tweet.content in ('woof', 'yip'))
tweet = Tweet.get(content__ilike='w%',
user__username__ilike='%ck%')
self.assertEqual(tweet.content, 'woof')
@requires_models(User)
def test_get_with_alias(self):
huey = self.add_user('huey')
query = (User
.select(User.username.alias('name'))
.where(User.username == 'huey'))
obj = query.dicts().get()
self.assertEqual(obj, {'name': 'huey'})
obj = query.objects().get()
self.assertEqual(obj.name, 'huey')
@requires_models(User, Tweet)
def test_get_or_none(self):
huey = self.add_user('huey')
self.assertEqual(User.get_or_none(User.username == 'huey').username,
'huey')
self.assertIsNone(User.get_or_none(User.username == 'foo'))
@requires_models(User, Tweet)
def test_model_select_get_or_none(self):
huey = self.add_user('huey')
huey_db = User.select().where(User.username == 'huey').get_or_none()
self.assertEqual(huey_db.username, 'huey')
self.assertIsNone(
User.select().where(User.username == 'foo').get_or_none())
@requires_models(User, Color)
def test_get_by_id(self):
huey = self.add_user('huey')
self.assertEqual(User.get_by_id(huey.id).username, 'huey')
Color.insert_many([
{'name': 'red', 'is_neutral': False},
{'name': 'blue', 'is_neutral': False}]).execute()
self.assertEqual(Color.get_by_id('red').name, 'red')
self.assertRaises(Color.DoesNotExist, Color.get_by_id, 'green')
self.assertEqual(Color['red'].name, 'red')
self.assertRaises(Color.DoesNotExist, lambda: Color['green'])
@requires_models(User, Color)
def test_get_set_item(self):
huey = self.add_user('huey')
huey_db = User[huey.id]
self.assertEqual(huey_db.username, 'huey')
User[huey.id] = {'username': 'huey-x'}
huey_db = User[huey.id]
self.assertEqual(huey_db.username, 'huey-x')
del User[huey.id]
self.assertEqual(len(User), 0)
# Allow creation by specifying None for key.
User[None] = {'username': 'zaizee'}
User.get(User.username == 'zaizee')
@requires_models(User)
def test_get_or_create(self):
huey, created = User.get_or_create(username='huey')
self.assertTrue(created)
huey2, created2 = User.get_or_create(username='huey')
self.assertFalse(created2)
self.assertEqual(huey.id, huey2.id)
@requires_models(Category)
def test_get_or_create_self_referential_fk(self):
parent = Category.create(name='parent')
child, created = Category.get_or_create(parent=parent, name='child')
child_db = Category.get(Category.parent == parent)
self.assertEqual(child_db.parent.name, 'parent')
self.assertEqual(child_db.name, 'child')
@requires_models(Person)
def test_get_or_create_defaults(self):
p, created = Person.get_or_create(first='huey', defaults={
'last': 'cat',
'dob': datetime.date(2010, 7, 1)})
self.assertTrue(created)
p_db = Person.get(Person.first == 'huey')
self.assertEqual(p_db.first, 'huey')
self.assertEqual(p_db.last, 'cat')
self.assertEqual(p_db.dob, datetime.date(2010, 7, 1))
p2, created = Person.get_or_create(first='huey', defaults={
'last': 'kitten',
'dob': datetime.date(2020, 1, 1)})
self.assertFalse(created)
self.assertEqual(p2.first, 'huey')
self.assertEqual(p2.last, 'cat')
self.assertEqual(p2.dob, datetime.date(2010, 7, 1))
@requires_models(Person)
def test_save(self):
huey = Person(first='huey', last='cat', dob=datetime.date(2010, 7, 1))
self.assertTrue(huey.save() > 0)
self.assertTrue(huey.id is not None) # Ensure PK is set.
orig_id = huey.id
# Test initial save (INSERT) worked and data is all present.
huey_db = Person.get(first='huey', last='cat')
self.assertEqual(huey_db.id, huey.id)
self.assertEqual(huey_db.first, 'huey')
self.assertEqual(huey_db.last, 'cat')
self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1))
# Make a change and do a second save (UPDATE).
huey.dob = datetime.date(2010, 7, 2)
self.assertTrue(huey.save() > 0)
self.assertEqual(huey.id, orig_id)
# Test UPDATE worked correctly.
huey_db = Person.get(first='huey', last='cat')
self.assertEqual(huey_db.id, huey.id)
self.assertEqual(huey_db.first, 'huey')
self.assertEqual(huey_db.last, 'cat')
self.assertEqual(huey_db.dob, datetime.date(2010, 7, 2))
self.assertEqual(Person.select().count(), 1)
@requires_models(Person)
def test_save_only(self):
huey = Person(first='huey', last='cat', dob=datetime.date(2010, 7, 1))
huey.save()
huey.first = 'huker'
huey.last = 'kitten'
self.assertTrue(huey.save(only=('first',)) > 0)
huey_db = Person.get_by_id(huey.id)
self.assertEqual(huey_db.first, 'huker')
self.assertEqual(huey_db.last, 'cat')
self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1))
huey.first = 'hubie'
self.assertTrue(huey.save(only=[Person.last]) > 0)
huey_db = Person.get_by_id(huey.id)
self.assertEqual(huey_db.first, 'huker')
self.assertEqual(huey_db.last, 'kitten')
self.assertEqual(huey_db.dob, datetime.date(2010, 7, 1))
self.assertEqual(Person.select().count(), 1)
@requires_models(Color, User)
def test_save_force(self):
huey = User(username='huey')
self.assertTrue(huey.save() > 0)
huey_id = huey.id
huey.username = 'zaizee'
self.assertTrue(huey.save(force_insert=True, only=('username',)) > 0)
zaizee_id = huey.id
self.assertTrue(huey_id != zaizee_id)
query = User.select().order_by(User.username)
self.assertEqual([user.username for user in query], ['huey', 'zaizee'])
color = Color(name='red')
self.assertFalse(bool(color.save()))
self.assertEqual(Color.select().count(), 0)
color = Color(name='blue')
color.save(force_insert=True)
self.assertEqual(Color.select().count(), 1)
with self.database.atomic():
self.assertRaises(IntegrityError,
color.save,
force_insert=True)
@requires_models(User, Tweet)
def test_populate_unsaved_relations(self):
user = User(username='charlie')
tweet = Tweet(user=user, content='foo')
self.assertTrue(user.save())
self.assertTrue(user.id is not None)
with self.assertQueryCount(1):
self.assertEqual(tweet.user_id, user.id)
self.assertTrue(tweet.save())
self.assertEqual(tweet.user_id, user.id)
tweet_db = Tweet.get(Tweet.content == 'foo')
self.assertEqual(tweet_db.user.username, 'charlie')
@requires_models(User, Tweet)
def test_model_select(self):
huey = self.add_user('huey')
mickey = self.add_user('mickey')
zaizee = self.add_user('zaizee')
self.add_tweets(huey, 'meow', 'hiss', 'purr')
self.add_tweets(mickey, 'woof', 'whine')
with self.assertQueryCount(1):
query = (Tweet
.select(Tweet.content, User.username)
.join(User)
.order_by(User.username, Tweet.content))
self.assertSQL(query, (
'SELECT "t1"."content", "t2"."username" '
'FROM "tweet" AS "t1" '
'INNER JOIN "users" AS "t2" '
'ON ("t1"."user_id" = "t2"."id") '
'ORDER BY "t2"."username", "t1"."content"'), [])
tweets = list(query)
self.assertEqual([(t.content, t.user.username) for t in tweets], [
('hiss', 'huey'),
('meow', 'huey'),
('purr', 'huey'),
('whine', 'mickey'),
('woof', 'mickey')])
@requires_models(User, Tweet, Favorite)
def test_join_two_fks(self):
with self.database.atomic():
huey = self.add_user('huey')
mickey = self.add_user('mickey')
h_m, h_p, h_h = self.add_tweets(huey, 'meow', 'purr', 'hiss')
m_w, m_b = self.add_tweets(mickey, 'woof', 'bark')
Favorite.create(user=huey, tweet=m_w)
Favorite.create(user=mickey, tweet=h_m)
Favorite.create(user=mickey, tweet=h_p)
with self.assertQueryCount(1):
UA = User.alias()
query = (Favorite
.select(Favorite, Tweet, User, UA)
.join(Tweet)
.join(User)
.switch(Favorite)
.join(UA, on=Favorite.user)
.order_by(Favorite.id))
accum = [(f.tweet.user.username, f.tweet.content, f.user.username)
for f in query]
self.assertEqual(accum, [
('mickey', 'woof', 'huey'),
('huey', 'meow', 'mickey'),
('huey', 'purr', 'mickey')])
with self.assertQueryCount(5):
# Test intermediate models not selected.
query = (Favorite
.select()
.join(Tweet)
.switch(Favorite)
.join(User)
.where(User.username == 'mickey')
.order_by(Favorite.id))
accum = [(f.user.username, f.tweet.content) for f in query]
self.assertEqual(accum, [('mickey', 'meow'), ('mickey', 'purr')])
@requires_models(A, B, C)
def test_join_issue_1482(self):
a1 = A.create(a='a1')
b1 = B.create(a=a1, b='b1')
c1 = C.create(b=b1, c='c1')
with self.assertQueryCount(3):
query = C.select().join(B).join(A).where(A.a == 'a1')
accum = [(c.c, c.b.b, c.b.a.a) for c in query]
self.assertEqual(accum, [('c1', 'b1', 'a1')])
@requires_models(A, B, C)
def test_join_empty_intermediate_model(self):
a1 = A.create(a='a1')
a2 = A.create(a='a2')
b11 = B.create(a=a1, b='b11')
b12 = B.create(a=a1, b='b12')
b21 = B.create(a=a2, b='b21')
c111 = C.create(b=b11, c='c111')
c112 = C.create(b=b11, c='c112')
c211 = C.create(b=b21, c='c211')
with self.assertQueryCount(1):
query = C.select(C, A.a).join(B).join(A).order_by(C.c)
accum = [(c.c, c.b.a.a) for c in query]
self.assertEqual(accum, [
('c111', 'a1'),
('c112', 'a1'),
('c211', 'a2')])
with self.assertQueryCount(1):
query = C.select(C, B, A).join(B).join(A).order_by(C.c)
accum = [(c.c, c.b.b, c.b.a.a) for c in query]
self.assertEqual(accum, [
('c111', 'b11', 'a1'),
('c112', 'b11', 'a1'),
('c211', 'b21', 'a2')])
@requires_models(City, Venue, Event)
def test_join_empty_relations(self):
with self.database.atomic():
city = City.create(name='Topeka')
venue1 = Venue.create(name='House', city=city, city_n=city)
venue2 = Venue.create(name='Nowhere', city=city, city_n=None)
event1 = Event.create(name='House Party', venue=venue1)
event2 = Event.create(name='Holiday')
event3 = Event.create(name='Nowhere Party', venue=venue2)
with self.assertQueryCount(1):
query = (Event
.select(Event, Venue, City)
.join(Venue, JOIN.LEFT_OUTER)
.join(City, JOIN.LEFT_OUTER, on=Venue.city)
.order_by(Event.id))
# Here we have two left-outer joins, and the second Event
# ("Holiday"), does not have an associated Venue (hence, no City).
# Peewee would attach an empty Venue() model to the event, however.
# It did this since we are selecting from Venue/City and Venue is
# an intermediary model. It is more correct for Event.venue to be
# None in this case. This is now patched / fixed.
r = [(e.name, e.venue and e.venue.city.name or None)
for e in query]
self.assertEqual(r, [
('House Party', 'Topeka'),
('Holiday', None),
('Nowhere Party', 'Topeka')])
with self.assertQueryCount(1):
query = (Event
.select(Event, Venue, City)
.join(Venue, JOIN.INNER)
.join(City, JOIN.LEFT_OUTER, on=Venue.city_n)
.order_by(Event.id))
# Here we have an inner join and a left-outer join. The furthest
# object (City) will be NULL for the "Nowhere Party". Make sure
# that the object is left as None and not populated with an empty
# City instance.
accum = []
for event in query:
city_name = event.venue.city_n and event.venue.city_n.name
accum.append((event.name, event.venue.name, city_name))
self.assertEqual(accum, [
('House Party', 'House', 'Topeka'),
('Nowhere Party', 'Nowhere', None)])
@requires_models(Relationship, Person)
def test_join_same_model_twice(self):
d = datetime.date(2010, 1, 1)
huey = Person.create(first='huey', last='cat', dob=d)
zaizee = Person.create(first='zaizee', last='cat', dob=d)
mickey = Person.create(first='mickey', last='dog', dob=d)
relationships = (
(huey, zaizee),
(zaizee, huey),
(mickey, huey),
)
for src, dest in relationships:
Relationship.create(from_person=src, to_person=dest)
PA = Person.alias()
with self.assertQueryCount(1):
query = (Relationship
.select(Relationship, Person, PA)
.join(Person, on=Relationship.from_person)
.switch(Relationship)
.join(PA, on=Relationship.to_person)
.order_by(Relationship.id))
results = [(r.from_person.first, r.to_person.first) for r in query]
self.assertEqual(results, [
('huey', 'zaizee'),
('zaizee', 'huey'),
('mickey', 'huey')])
@requires_models(User)
def test_peek(self):
for username in ('huey', 'mickey', 'zaizee'):
self.add_user(username)
with self.assertQueryCount(1):
query = User.select(User.username).order_by(User.username).dicts()
self.assertEqual(query.peek(n=1), {'username': 'huey'})
self.assertEqual(query.peek(n=2), [{'username': 'huey'},
{'username': 'mickey'}])
@requires_models(User, Tweet, Favorite)
def test_multi_join(self):
u1 = User.create(username='u1')
u2 = User.create(username='u2')
u3 = User.create(username='u3')
t1_1 = Tweet.create(user=u1, content='t1-1')
t1_2 = Tweet.create(user=u1, content='t1-2')
t2_1 = Tweet.create(user=u2, content='t2-1')
t2_2 = Tweet.create(user=u2, content='t2-2')
favorites = ((u1, t2_1),
(u1, t2_2),
(u2, t1_1),
(u3, t1_2),
(u3, t2_2))
for user, tweet in favorites:
Favorite.create(user=user, tweet=tweet)
TweetUser = User.alias('u2')
with self.assertQueryCount(1):
query = (Favorite
.select(Favorite.id,
Tweet.content,
User.username,
TweetUser.username)
.join(Tweet)
.join(TweetUser, on=(Tweet.user == TweetUser.id))
.switch(Favorite)
.join(User)
.order_by(Tweet.content, Favorite.id))
self.assertSQL(query, (
'SELECT '
'"t1"."id", "t2"."content", "t3"."username", "u2"."username" '
'FROM "favorite" AS "t1" '
'INNER JOIN "tweet" AS "t2" ON ("t1"."tweet_id" = "t2"."id") '
'INNER JOIN "users" AS "u2" ON ("t2"."user_id" = "u2"."id") '
'INNER JOIN "users" AS "t3" ON ("t1"."user_id" = "t3"."id") '
'ORDER BY "t2"."content", "t1"."id"'), [])
accum = [(f.tweet.user.username, f.tweet.content, f.user.username)
for f in query]
self.assertEqual(accum, [
('u1', 't1-1', 'u2'),
('u1', 't1-2', 'u3'),
('u2', 't2-1', 'u1'),
('u2', 't2-2', 'u1'),
('u2', 't2-2', 'u3')])
res = query.count()
self.assertEqual(res, 5)
def _create_user_tweets(self):
data = (('huey', ('meow', 'purr', 'hiss')),
('zaizee', ()),
('mickey', ('woof', 'grr')))
with self.database.atomic():
ts = int(time.time())
for username, tweets in data:
user = User.create(username=username)
for tweet in tweets:
Tweet.create(user=user, content=tweet, timestamp=ts)
ts += 1
@requires_models(User, Tweet)
def test_join_subquery(self):
self._create_user_tweets()
# Select note user and timestamp of most recent tweet.
with self.assertQueryCount(1):
TA = Tweet.alias()
max_q = (TA
.select(TA.user, fn.MAX(TA.timestamp).alias('max_ts'))
.group_by(TA.user)
.alias('max_q'))
predicate = ((Tweet.user == max_q.c.user_id) &
(Tweet.timestamp == max_q.c.max_ts))
latest = (Tweet
.select(Tweet.user, Tweet.content, Tweet.timestamp)
.join(max_q, on=predicate)
.alias('latest'))
query = (User
.select(User, latest.c.content, latest.c.timestamp)
.join(latest, on=(User.id == latest.c.user_id)))
data = [(user.username, user.tweet.content) for user in query]
# Failing on travis-ci...old SQLite?
if not IS_SQLITE_OLD:
self.assertEqual(data, [
('huey', 'hiss'),
('mickey', 'grr')])
with self.assertQueryCount(1):
query = (Tweet
.select(Tweet, User)
.join(max_q, on=predicate)
.switch(Tweet)
.join(User))
data = [(note.user.username, note.content) for note in query]
self.assertEqual(data, [
('huey', 'hiss'),
('mickey', 'grr')])
@requires_models(User, Tweet)
def test_join_subquery_2(self):
self._create_user_tweets()
with self.assertQueryCount(1):
users = (User
.select(User.id, User.username)
.where(User.username.in_(['huey', 'zaizee'])))
query = (Tweet
.select(Tweet.content.alias('content'),
users.c.username.alias('username'))
.join(users, on=(Tweet.user == users.c.id))
.order_by(Tweet.id))
self.assertSQL(query, (
'SELECT "t1"."content" AS "content", '
'"t2"."username" AS "username"'
' FROM "tweet" AS "t1" '
'INNER JOIN (SELECT "t3"."id", "t3"."username" '
'FROM "users" AS "t3" '
'WHERE ("t3"."username" IN (?, ?))) AS "t2" '
'ON ("t1"."user_id" = "t2"."id") '
'ORDER BY "t1"."id"'), ['huey', 'zaizee'])
results = [(t.content, t.user.username) for t in query]
self.assertEqual(results, [
('meow', 'huey'),
('purr', 'huey'),
('hiss', 'huey')])
@skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES))
@requires_models(User, Tweet)
def test_join_subquery_cte(self):
self._create_user_tweets()
cte = (User
.select(User.id, User.username)
.where(User.username.in_(['huey', 'zaizee']))\
.cte('cats'))
with self.assertQueryCount(1):
# Attempt join with subquery as common-table expression.
query = (Tweet
.select(Tweet.content, cte.c.username)
.join(cte, on=(Tweet.user == cte.c.id))
.order_by(Tweet.id)
.with_cte(cte))
self.assertSQL(query, (
'WITH "cats" AS ('
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" '
'WHERE ("t1"."username" IN (?, ?))) '
'SELECT "t2"."content", "cats"."username" FROM "tweet" AS "t2" '
'INNER JOIN "cats" ON ("t2"."user_id" = "cats"."id") '
'ORDER BY "t2"."id"'), ['huey', 'zaizee'])
self.assertEqual([t.content for t in query],
['meow', 'purr', 'hiss'])
@skip_if(IS_MYSQL) # MariaDB does not support LIMIT in subqueries!
@requires_models(User)
def test_subquery_emulate_window(self):
# We have duplicated users. Select a maximum of 2 instances of the
# username.
name2count = {
'beanie': 6,
'huey': 5,
'mickey': 3,
'pipey': 1,
'zaizee': 4}
names = []
for name, count in sorted(name2count.items()):
names += [name] * count
User.insert_many([(i, n) for i, n in enumerate(names, 1)],
[User.id, User.username]).execute()
# The results we are trying to obtain.
expected = [
('beanie', 1), ('beanie', 2),
('huey', 7), ('huey', 8),
('mickey', 12), ('mickey', 13),
('pipey', 15),
('zaizee', 16), ('zaizee', 17)]
with self.assertQueryCount(1):
# Using a self-join.
UA = User.alias()
query = (User
.select(User.username, UA.id)
.join(UA, on=((UA.username == User.username) &
(UA.id >= User.id)))
.group_by(User.username, UA.id)
.having(fn.COUNT(UA.id) < 3)
.order_by(User.username, UA.id))
self.assertEqual(query.tuples()[:], expected)
with self.assertQueryCount(1):
# Using a correlated subquery.
subq = (UA
.select(UA.id)
.where(User.username == UA.username)
.order_by(UA.id)
.limit(2))
query = (User
.select(User.username, User.id)
.where(User.id.in_(subq.alias('subq')))
.order_by(User.username, User.id))
self.assertEqual(query.tuples()[:], expected)
@requires_models(User, Tweet)
def test_subquery_alias_selection(self):
data = (
('huey', ('meow', 'hiss', 'purr')),
('mickey', ('woof', 'bark')),
('zaizee', ()))
with self.database.atomic():
for username, tweets in data:
user = User.create(username=username)
for tweet in tweets:
Tweet.create(user=user, content=tweet)
with self.assertQueryCount(1):
subq = (Tweet
.select(fn.COUNT(Tweet.id))
.where(Tweet.user == User.id))
query = (User
.select(User.username, subq.alias('tweet_count'))
.order_by(User.id))
self.assertEqual([(u.username, u.tweet_count) for u in query], [
('huey', 3),
('mickey', 2),
('zaizee', 0)])
@requires_pglike
@requires_models(User)
def test_join_on_valueslist(self):
for username in ('huey', 'mickey', 'zaizee'):
User.create(username=username)
vl = ValuesList([('huey',), ('zaizee',)], columns=['username'])
with self.assertQueryCount(1):
query = (User
.select(vl.c.username)
.join(vl, on=(User.username == vl.c.username))
.order_by(vl.c.username.desc()))
self.assertEqual([u.username for u in query], ['zaizee', 'huey'])
@skip_if(IS_SQLITE_OLD or IS_MYSQL or IS_CRDB)
@requires_models(User)
def test_multi_update(self):
data = [(i, 'u%s' % i) for i in range(1, 4)]
User.insert_many(data, fields=[User.id, User.username]).execute()
data = [(i, 'u%sx' % i) for i in range(1, 3)]
vl = ValuesList(data)
cte = vl.select().cte('uv', columns=('id', 'username'))
subq = cte.select(cte.c.username).where(cte.c.id == User.id)
res = (User
.update(username=subq)
.where(User.id.in_(cte.select(cte.c.id)))
.with_cte(cte)
.execute())
query = User.select().order_by(User.id)
self.assertEqual([(u.id, u.username) for u in query], [
(1, 'u1x'),
(2, 'u2x'),
(3, 'u3')])
@requires_models(User, Tweet)
def test_insert_query_value(self):
huey = self.add_user('huey')
query = User.select(User.id).where(User.username == 'huey')
tid = Tweet.insert(content='meow', user=query).execute()
tweet = Tweet[tid]
self.assertEqual(tweet.user.id, huey.id)
self.assertEqual(tweet.user.username, 'huey')
@skip_if(IS_SQLITE and not IS_SQLITE_9, 'requires sqlite >= 3.9')
@requires_models(Register)
def test_compound_select(self):
for i in range(10):
Register.create(value=i)
q1 = Register.select().where(Register.value < 2)
q2 = Register.select().where(Register.value > 7)
c1 = (q1 | q2).order_by(SQL('2'))
self.assertSQL(c1, (
'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" '
'WHERE ("t1"."value" < ?) UNION '
'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" > ?) ORDER BY 2'), [2, 7])
self.assertEqual([row.value for row in c1], [0, 1, 8, 9],
[row.__data__ for row in c1])
self.assertEqual(c1.count(), 4)
q3 = Register.select().where(Register.value == 5)
c2 = (c1.order_by() | q3).order_by(SQL('2'))
self.assertSQL(c2, (
'SELECT "t1"."id", "t1"."value" FROM "register" AS "t1" '
'WHERE ("t1"."value" < ?) UNION '
'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" > ?) UNION '
'SELECT "t2"."id", "t2"."value" FROM "register" AS "t2" '
'WHERE ("t2"."value" = ?) ORDER BY 2'), [2, 7, 5])
self.assertEqual([row.value for row in c2], [0, 1, 5, 8, 9])
self.assertEqual(c2.count(), 5)
@requires_models(User, Tweet)
def test_union_column_resolution(self):
u1 = User.create(id=1, username='u1')
u2 = User.create(id=2, username='u2')
q1 = User.select().where(User.id == 1)
q2 = User.select()
union = q1 | q2
self.assertSQL(union, (
'SELECT "t1"."id", "t1"."username" FROM "users" AS "t1" '
'WHERE ("t1"."id" = ?) '
'UNION '
'SELECT "t2"."id", "t2"."username" FROM "users" AS "t2"'), [1])
results = [(user.id, user.username) for user in union]
self.assertEqual(sorted(results), [
(1, 'u1'),
(2, 'u2')])
t1_1 = Tweet.create(id=1, user=u1, content='u1-t1')
t1_2 = Tweet.create(id=2, user=u1, content='u1-t2')
t2_1 = Tweet.create(id=3, user=u2, content='u2-t1')
with self.assertQueryCount(1):
q1 = Tweet.select(Tweet, User).join(User).where(User.id == 1)
q2 = Tweet.select(Tweet, User).join(User)
union = q1 | q2
self.assertSQL(union, (
'SELECT "t1"."id", "t1"."user_id", "t1"."content", '
'"t1"."timestamp", "t2"."id", "t2"."username" '
'FROM "tweet" AS "t1" '
'INNER JOIN "users" AS "t2" ON ("t1"."user_id" = "t2"."id") '
'WHERE ("t2"."id" = ?) '
'UNION '
'SELECT "t3"."id", "t3"."user_id", "t3"."content", '
'"t3"."timestamp", "t4"."id", "t4"."username" '
'FROM "tweet" AS "t3" '
'INNER JOIN "users" AS "t4" ON ("t3"."user_id" = "t4"."id")'),
[1])
results = [(t.id, t.content, t.user.username) for t in union]
self.assertEqual(sorted(results), [
(1, 'u1-t1', 'u1'),
(2, 'u1-t2', 'u1'),
(3, 'u2-t1', 'u2')])
with self.assertQueryCount(1):
union_flat = (q1 | q2).objects()
results = [(t.id, t.content, t.username) for t in union_flat]
self.assertEqual(sorted(results), [
(1, 'u1-t1', 'u1'),
(2, 'u1-t2', 'u1'),
(3, 'u2-t1', 'u2')])
@requires_models(User, Tweet)
def test_compound_select_as_subquery(self):
with self.database.atomic():
for i in range(5):
user = User.create(username='u%s' % i)
for j in range(i * 2):
Tweet.create(user=user, content='t%s-%s' % (i, j))
q1 = (Tweet
.select(Tweet.id, Tweet.content, User.username)
.join(User)
.where(User.username == 'u3'))
q2 = (Tweet
.select(Tweet.id, Tweet.content, User.username)
.join(User)
.where(User.username.in_(['u2', 'u4'])))
union = (q1 | q2)
q = (union
.select_from(union.c.username, fn.COUNT(union.c.id).alias('ct'))
.group_by(union.c.username)
.order_by(fn.COUNT(union.c.id).desc())
.dicts())
self.assertEqual(list(q), [
{'username': 'u4', 'ct': 8},
{'username': 'u3', 'ct': 6},
{'username': 'u2', 'ct': 4}])
@requires_models(User, Tweet)
def test_union_with_join(self):
u1, u2 = [User.create(username='u%s' % i) for i in (1, 2)]
for u, ts in ((u1, ('t1', 't2')), (u2, ('t1',))):
for t in ts:
Tweet.create(user=u, content='%s-%s' % (u.username, t))
with self.assertQueryCount(1):
q1 = (User
.select(User, Tweet)
.join(Tweet, on=(Tweet.user == User.id).alias('foo')))
q2 = (User
.select(User, Tweet)
.join(Tweet, on=(Tweet.user == User.id).alias('foo')))
self.assertEqual(
sorted([(user.username, user.foo.content) for user in q1]),
[('u1', 'u1-t1'), ('u1', 'u1-t2'), ('u2', 'u2-t1')])
with self.assertQueryCount(1):
uq = q1.union_all(q2)
result = [(user.username, user.foo.content) for user in uq]
self.assertEqual(sorted(result), [
('u1', 'u1-t1'),
('u1', 'u1-t1'),
('u1', 'u1-t2'),
('u1', 'u1-t2'),
('u2', 'u2-t1'),
('u2', 'u2-t1'),
])
@skip_if(IS_SQLITE_OLD or (IS_MYSQL and not IS_MYSQL_ADVANCED_FEATURES))
@requires_models(User)
def test_union_cte(self):
with self.database.atomic():
(User
.insert_many({'username': 'u%s' % i} for i in range(10))
.execute())
lhs = User.select().where(User.username.in_(['u1', 'u3']))
rhs = User.select().where(User.username.in_(['u5', 'u7']))
u_cte = (lhs | rhs).cte('users_union')
query = (User
.select(User.username)
.join(u_cte, on=(User.id == u_cte.c.id))
.where(User.username.in_(['u1', 'u7']))
.with_cte(u_cte))
self.assertEqual(sorted([u.username for u in query]), ['u1', 'u7'])
@requires_models(Category)
def test_self_referential_fk(self):
self.assertTrue(Category.parent.rel_model is Category)
root = Category.create(name='root')
c1 = Category.create(parent=root, name='child-1')
c2 = Category.create(parent=root, name='child-2')
with self.assertQueryCount(1):
Parent = Category.alias('p')
query = (Category
.select(
Parent.name,
Category.name)
.where(Category.parent == root)
.order_by(Category.name))
query = query.join(Parent, on=(Category.parent == Parent.name))
c1_db, c2_db = list(query)
self.assertEqual(c1_db.name, 'child-1')
self.assertEqual(c1_db.parent.name, 'root')
self.assertEqual(c2_db.name, 'child-2')
self.assertEqual(c2_db.parent.name, 'root')
@requires_models(Category)
def test_empty_joined_instance(self):
root = Category.create(name='a')
c1 = Category.create(name='c1', parent=root)
c2 = Category.create(name='c2', parent=root)
with self.assertQueryCount(1):
Parent = Category.alias('p')
query = (Category
.select(Category, Parent)
.join(Parent, JOIN.LEFT_OUTER,
on=(Category.parent == Parent.name))
.order_by(Category.name))
result = [(category.name, category.parent is None)
for category in query]
self.assertEqual(result, [('a', True), ('c1', False), ('c2', False)])
@requires_models(User, Tweet)
def test_from_multi_table(self):
self.add_tweets(self.add_user('huey'), 'meow', 'hiss', 'purr')
self.add_tweets(self.add_user('mickey'), 'woof', 'wheeze')
with self.assertQueryCount(1):
query = (Tweet
.select(Tweet, User)
.from_(Tweet, User)
.where(
(Tweet.user == User.id) &
(User.username == 'huey'))
.order_by(Tweet.id)
.dicts())
self.assertEqual([t['content'] for t in query],
['meow', 'hiss', 'purr'])
self.assertEqual([t['username'] for t in query],
['huey', 'huey', 'huey'])
@requires_models(Point)
def test_subquery_in_select_expression(self):
for x, y in ((1, 1), (1, 2), (10, 10), (10, 20)):
Point.create(x=x, y=y)
with self.assertQueryCount(1):
PA = Point.alias('pa')
subq = PA.select(fn.SUM(PA.y)).where(PA.x == Point.x)
query = (Point
.select(Point.x, Point.y, subq.alias('sy'))
.order_by(Point.x, Point.y))
self.assertEqual(list(query.tuples()), [
(1, 1, 3),
(1, 2, 3),
(10, 10, 30),
(10, 20, 30)])
with self.assertQueryCount(1):
query = (Point
.select(Point.x, (Point.y + subq).alias('sy'))
.order_by(Point.x, Point.y))
self.assertEqual(list(query.tuples()), [
(1, 4), (1, 5),
(10, 40), (10, 50)])
@requires_models(User, Tweet)
def test_filtering(self):
with self.database.atomic():
huey = self.add_user('huey')
mickey = self.add_user('mickey')
self.add_tweets(huey, 'meow', 'hiss', 'purr')
self.add_tweets(mickey, 'woof', 'wheeze')
with self.assertQueryCount(1):
query = Tweet.filter(user__username='huey').order_by(Tweet.content)
self.assertEqual([row.content for row in query],
['hiss', 'meow', 'purr'])
with self.assertQueryCount(1):
query = User.filter(tweets__content__ilike='w%')
self.assertEqual([user.username for user in query],
['mickey', 'mickey'])
def test_deferred_fk(self):
class Note(TestModel):
foo = DeferredForeignKey('Foo', backref='notes')
class Foo(TestModel):
note = ForeignKeyField(Note)
self.assertTrue(Note.foo.rel_model is Foo)
self.assertTrue(Foo.note.rel_model is Note)
f = Foo(id=1337)
self.assertSQL(f.notes, (
'SELECT "t1"."id", "t1"."foo_id" FROM "note" AS "t1" '
'WHERE ("t1"."foo_id" = ?)'), [1337])
def test_deferred_fk_dependency_graph(self):
class AUser(TestModel):
foo = DeferredForeignKey('Tweet')
class ZTweet(TestModel):
user = ForeignKeyField(AUser, backref='ztweets')
self.assertEqual(sort_models([AUser, ZTweet]), [AUser, ZTweet])
def test_table_schema(self):
class Schema(TestModel):
pass
self.assertTrue(Schema._meta.schema is None)
self.assertSQL(Schema.select(), (
'SELECT "t1"."id" FROM "schema" AS "t1"'), [])
Schema._meta.schema = 'test'
self.assertSQL(Schema.select(), (
'SELECT "t1"."id" FROM "test"."schema" AS "t1"'), [])
Schema._meta.schema = 'another'
self.assertSQL(Schema.select(), (
'SELECT "t1"."id" FROM "another"."schema" AS "t1"'), [])
@requires_models(User)
def test_noop(self):
query = User.noop()
self.assertEqual(list(query), [])
@requires_models(User)
def test_iteration(self):
self.assertEqual(list(User), [])
self.assertEqual(len(User), 0)
self.assertTrue(User)
User.insert_many((['charlie'], ['huey']), [User.username]).execute()
self.assertEqual(sorted(u.username for u in User), ['charlie', 'huey'])
self.assertEqual(len(User), 2)
self.assertTrue(User)
@requires_models(User)
def test_iterator(self):
users = ['charlie', 'huey', 'zaizee']
with self.database.atomic():
for username in users:
User.create(username=username)
with self.assertQueryCount(1):
query = User.select().order_by(User.username).iterator()
self.assertEqual([u.username for u in query], users)
self.assertEqual(list(query), [])
@requires_models(User)
def test_select_count(self):
users = [self.add_user(u) for u in ('huey', 'charlie', 'mickey')]
self.assertEqual(User.select().count(), 3)
qr = User.select().execute()
self.assertEqual(qr.count, 0)
list(qr)
self.assertEqual(qr.count, 3)
@requires_models(User)
def test_batch_commit(self):
commit_method = self.database.commit
def assertBatch(n_rows, batch_size, n_commits):
User.delete().execute()
user_data = [{'username': 'u%s' % i} for i in range(n_rows)]
with mock.patch.object(self.database, 'commit') as mock_commit:
mock_commit.side_effect = commit_method
for row in self.database.batch_commit(user_data, batch_size):
User.create(**row)
self.assertEqual(mock_commit.call_count, n_commits)
self.assertEqual(User.select().count(), n_rows)
assertBatch(6, 1, 6)
assertBatch(6, 2, 3)
assertBatch(6, 3, 2)
assertBatch(6, 4, 2)
assertBatch(6, 6, 1)
assertBatch(6, 7, 1)
| TestModelAPIs |
python | weaviate__weaviate-python-client | weaviate/exceptions.py | {
"start": 11043,
"end": 11313
} | class ____(WeaviateBaseError):
"""Is raised when all objects fail to be inserted."""
def __init__(self, message: str = "") -> None:
msg = f"""Every object failed during insertion. {message}"""
super().__init__(msg)
| WeaviateInsertManyAllFailedError |
python | pytorch__pytorch | torch/testing/_internal/common_pruning.py | {
"start": 167,
"end": 629
} | class ____(BaseSparsifier):
def __init__(self, **kwargs: dict[str, Any]) -> None:
super().__init__(defaults=kwargs)
def update_mask(self, module: nn.Module, tensor_name: str, **kwargs: dict[str, Any]) -> None:
module.parametrizations.weight[0].mask[0] = 0 # type: ignore[index, union-attr]
linear_state = self.state['linear1.weight']
linear_state['step_count'] = linear_state.get('step_count', 0) + 1
| ImplementedSparsifier |
python | kamyu104__LeetCode-Solutions | Python/falling-squares.py | {
"start": 8691,
"end": 9459
} | class ____(object):
def fallingSquares(self, positions):
"""
:type positions: List[List[int]]
:rtype: List[int]
"""
heights = [0] * len(positions)
for i in xrange(len(positions)):
left_i, size_i = positions[i]
right_i = left_i + size_i
heights[i] += size_i
for j in xrange(i+1, len(positions)):
left_j, size_j = positions[j]
right_j = left_j + size_j
if left_j < right_i and left_i < right_j: # intersect
heights[j] = max(heights[j], heights[i])
result = []
for height in heights:
result.append(max(result[-1], height) if result else height)
return result
| Solution4 |
python | huggingface__transformers | src/transformers/models/deepseek_v3/modular_deepseek_v3.py | {
"start": 12796,
"end": 13369
} | class ____(LlamaPreTrainedModel):
_can_compile_fullgraph = False
@torch.no_grad()
def _init_weights(self, module):
PreTrainedModel._init_weights(self, module)
if isinstance(module, DeepseekV3TopkRouter):
init.normal_(module.weight, mean=0.0, std=self.config.initializer_range)
elif isinstance(module, DeepseekV3NaiveMoe):
init.normal_(module.gate_up_proj, mean=0.0, std=self.config.initializer_range)
init.normal_(module.down_proj, mean=0.0, std=self.config.initializer_range)
| DeepseekV3PreTrainedModel |
python | doocs__leetcode | lcci/17.24.Max Submatrix/Solution.py | {
"start": 0,
"end": 929
} | class ____:
def getMaxMatrix(self, matrix: List[List[int]]) -> List[int]:
m, n = len(matrix), len(matrix[0])
s = [[0] * n for _ in range(m + 1)]
for i in range(m):
for j in range(n):
# 构造列前缀和
s[i + 1][j] = s[i][j] + matrix[i][j]
mx = matrix[0][0]
ans = [0, 0, 0, 0]
for i1 in range(m):
for i2 in range(i1, m):
nums = [0] * n
for j in range(n):
nums[j] = s[i2 + 1][j] - s[i1][j]
start = 0
f = nums[0]
for j in range(1, n):
if f > 0:
f += nums[j]
else:
f = nums[j]
start = j
if f > mx:
mx = f
ans = [i1, start, i2, j]
return ans
| Solution |
python | pytorch__pytorch | test/distributed/checkpoint/test_planner.py | {
"start": 25129,
"end": 26129
} | class ____(TestCase):
def _make_metadata(self, chunks, size):
storage = TensorStorageMetadata(
properties=TensorProperties(dtype=torch.float32),
size=torch.Size(size),
chunks=chunks,
)
return Metadata(state_dict_metadata={"param": storage})
def test_non_overlapping_chunks(self):
chunks = [
ChunkStorageMetadata(offsets=torch.Size([i]), sizes=torch.Size([1]))
for i in range(4)
]
metadata = self._make_metadata(chunks, [4])
self.assertTrue(_validate_global_plan([SavePlan([])], metadata))
def test_detect_overlapping_chunks(self):
chunks = [
ChunkStorageMetadata(offsets=torch.Size([0]), sizes=torch.Size([2])),
ChunkStorageMetadata(offsets=torch.Size([1]), sizes=torch.Size([2])),
]
metadata = self._make_metadata(chunks, [4])
self.assertFalse(_validate_global_plan([SavePlan([])], metadata))
| TestValidateGlobalPlan |
python | getsentry__sentry | tests/sentry/models/test_grouphistory.py | {
"start": 1150,
"end": 2526
} | class ____(TestCase):
def test(self) -> None:
GroupAssignee.objects.assign(self.group, self.user)
proj_1_group_2 = self.store_event(data={}, project_id=self.project.id).group
GroupAssignee.objects.assign(self.group, self.team)
history = set(GroupHistory.objects.filter(group__in=[self.group, proj_1_group_2]))
other_org = self.create_organization()
other_team = self.create_team(other_org, members=[self.user])
other_project = self.create_project(organization=other_org, teams=[other_team])
other_group = self.store_event(data={}, project_id=other_project.id).group
assert other_group is not None
other_group_2 = self.store_event(data={}, project_id=other_project.id).group
assert other_group_2 is not None
GroupAssignee.objects.assign(other_group, self.user)
GroupAssignee.objects.assign(other_group_2, other_team)
other_history = set(GroupHistory.objects.filter(group__in=[other_group, other_group_2]))
# Even though the user is a member of both orgs, and is assigned to both groups, we should
# filter down to just the history that each team has access to here.
assert set(GroupHistory.objects.filter_to_team(self.team)) == history
assert set(GroupHistory.objects.filter_to_team(other_team)) == other_history
| FilterToTeamTest |
python | gevent__gevent | src/greentest/3.14/test_socket.py | {
"start": 27197,
"end": 81304
} | class ____(unittest.TestCase):
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_socket_type(self):
self.assertTrue(gc.is_tracked(_socket.socket))
with self.assertRaisesRegex(TypeError, "immutable"):
_socket.socket.foo = 1
def test_SocketType_is_socketobject(self):
import _socket
self.assertTrue(socket.SocketType is _socket.socket)
s = socket.socket()
self.assertIsInstance(s, socket.SocketType)
s.close()
def test_repr(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
with s:
self.assertIn('fd=%i' % s.fileno(), repr(s))
self.assertIn('family=%s' % socket.AF_INET, repr(s))
self.assertIn('type=%s' % socket.SOCK_STREAM, repr(s))
self.assertIn('proto=0', repr(s))
self.assertNotIn('raddr', repr(s))
s.bind(('127.0.0.1', 0))
self.assertIn('laddr', repr(s))
self.assertIn(str(s.getsockname()), repr(s))
self.assertIn('[closed]', repr(s))
self.assertNotIn('laddr', repr(s))
@unittest.skipUnless(_socket is not None, 'need _socket module')
def test_csocket_repr(self):
s = _socket.socket(_socket.AF_INET, _socket.SOCK_STREAM)
try:
expected = ('<socket object, fd=%s, family=%s, type=%s, proto=%s>'
% (s.fileno(), s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
finally:
s.close()
expected = ('<socket object, fd=-1, family=%s, type=%s, proto=%s>'
% (s.family, s.type, s.proto))
self.assertEqual(repr(s), expected)
def test_weakref(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
p = proxy(s)
self.assertEqual(p.fileno(), s.fileno())
s = None
support.gc_collect() # For PyPy or other GCs.
try:
p.fileno()
except ReferenceError:
pass
else:
self.fail('Socket proxy still exists')
def testSocketError(self):
# Testing socket module exceptions
msg = "Error raising socket exception (%s)."
with self.assertRaises(OSError, msg=msg % 'OSError'):
raise OSError
with self.assertRaises(OSError, msg=msg % 'socket.herror'):
raise socket.herror
with self.assertRaises(OSError, msg=msg % 'socket.gaierror'):
raise socket.gaierror
def testSendtoErrors(self):
# Testing that sendto doesn't mask failures. See #10169.
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind(('', 0))
sockname = s.getsockname()
# 2 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None)
self.assertIn('not NoneType',str(cm.exception))
# 3 args
with self.assertRaises(TypeError) as cm:
s.sendto('\u2620', 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'str'")
with self.assertRaises(TypeError) as cm:
s.sendto(5j, 0, sockname)
self.assertEqual(str(cm.exception),
"a bytes-like object is required, not 'complex'")
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, None)
self.assertIn('not NoneType', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 'bar', sockname)
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', None, None)
# wrong number of args
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo')
self.assertIn('(1 given)', str(cm.exception))
with self.assertRaises(TypeError) as cm:
s.sendto(b'foo', 0, sockname, 4)
self.assertIn('(4 given)', str(cm.exception))
def testCrucialConstants(self):
# Testing for mission critical constants
socket.AF_INET
if socket.has_ipv6:
socket.AF_INET6
socket.SOCK_STREAM
socket.SOCK_DGRAM
socket.SOCK_RAW
socket.SOCK_RDM
socket.SOCK_SEQPACKET
socket.SOL_SOCKET
socket.SO_REUSEADDR
def testCrucialIpProtoConstants(self):
socket.IPPROTO_TCP
socket.IPPROTO_UDP
if socket.has_ipv6:
socket.IPPROTO_IPV6
@unittest.skipUnless(os.name == "nt", "Windows specific")
def testWindowsSpecificConstants(self):
socket.IPPROTO_ICLFXBM
socket.IPPROTO_ST
socket.IPPROTO_CBT
socket.IPPROTO_IGP
socket.IPPROTO_RDP
socket.IPPROTO_PGM
socket.IPPROTO_L2TP
socket.IPPROTO_SCTP
@unittest.skipIf(support.is_wasi, "WASI is missing these methods")
def test_socket_methods(self):
# socket methods that depend on a configure HAVE_ check. They should
# be present on all platforms except WASI.
names = [
"_accept", "bind", "connect", "connect_ex", "getpeername",
"getsockname", "listen", "recvfrom", "recvfrom_into", "sendto",
"setsockopt", "shutdown"
]
for name in names:
if not hasattr(socket.socket, name):
self.fail(f"socket method {name} is missing")
@unittest.skipUnless(sys.platform == 'darwin', 'macOS specific test')
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test')
def test3542SocketOptions(self):
# Ref. issue #35569 and https://tools.ietf.org/html/rfc3542
opts = {
'IPV6_CHECKSUM',
'IPV6_DONTFRAG',
'IPV6_DSTOPTS',
'IPV6_HOPLIMIT',
'IPV6_HOPOPTS',
'IPV6_NEXTHOP',
'IPV6_PATHMTU',
'IPV6_PKTINFO',
'IPV6_RECVDSTOPTS',
'IPV6_RECVHOPLIMIT',
'IPV6_RECVHOPOPTS',
'IPV6_RECVPATHMTU',
'IPV6_RECVPKTINFO',
'IPV6_RECVRTHDR',
'IPV6_RECVTCLASS',
'IPV6_RTHDR',
'IPV6_RTHDRDSTOPTS',
'IPV6_RTHDR_TYPE_0',
'IPV6_TCLASS',
'IPV6_USE_MIN_MTU',
}
for opt in opts:
self.assertHasAttr(socket, opt)
def testHostnameRes(self):
# Testing hostname resolution mechanisms
hostname = socket.gethostname()
try:
ip = socket.gethostbyname(hostname)
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertTrue(ip.find('.') >= 0, "Error resolving host to ip.")
try:
hname, aliases, ipaddrs = socket.gethostbyaddr(ip)
except OSError:
# Probably a similar problem as above; skip this test
self.skipTest('name lookup failure')
all_host_names = [hostname, hname] + aliases
fqhn = socket.getfqdn(ip)
if not fqhn in all_host_names:
self.fail("Error testing host resolution mechanisms. (fqdn: %s, all: %s)" % (fqhn, repr(all_host_names)))
def test_host_resolution(self):
for addr in [socket_helper.HOSTv4, '10.0.0.1', '255.255.255.255']:
self.assertEqual(socket.gethostbyname(addr), addr)
# we don't test socket_helper.HOSTv6 because there's a chance it doesn't have
# a matching name entry (e.g. 'ip6-localhost')
for host in [socket_helper.HOSTv4]:
self.assertIn(host, socket.gethostbyaddr(host)[2])
def test_host_resolution_bad_address(self):
# These are all malformed IP addresses and expected not to resolve to
# any result. But some ISPs, e.g. AWS and AT&T, may successfully
# resolve these IPs. In particular, AT&T's DNS Error Assist service
# will break this test. See https://bugs.python.org/issue42092 for a
# workaround.
explanation = (
"resolving an invalid IP address did not raise OSError; "
"can be caused by a broken DNS server"
)
for addr in ['0.1.1.~1', '1+.1.1.1', '::1q', '::1::2',
'1:1:1:1:1:1:1:1:1']:
with self.assertRaises(OSError, msg=addr):
socket.gethostbyname(addr)
with self.assertRaises(OSError, msg=explanation):
socket.gethostbyaddr(addr)
@unittest.skipUnless(hasattr(socket, 'sethostname'), "test needs socket.sethostname()")
@unittest.skipUnless(hasattr(socket, 'gethostname'), "test needs socket.gethostname()")
def test_sethostname(self):
oldhn = socket.gethostname()
try:
socket.sethostname('new')
except OSError as e:
if e.errno == errno.EPERM:
self.skipTest("test should be run as root")
else:
raise
try:
# running test as root!
self.assertEqual(socket.gethostname(), 'new')
# Should work with bytes objects too
socket.sethostname(b'bar')
self.assertEqual(socket.gethostname(), 'bar')
finally:
socket.sethostname(oldhn)
@unittest.skipUnless(hasattr(socket, 'if_nameindex'),
'socket.if_nameindex() not available.')
@support.skip_android_selinux('if_nameindex')
def testInterfaceNameIndex(self):
interfaces = socket.if_nameindex()
for index, name in interfaces:
self.assertIsInstance(index, int)
self.assertIsInstance(name, str)
# interface indices are non-zero integers
self.assertGreater(index, 0)
_index = socket.if_nametoindex(name)
self.assertIsInstance(_index, int)
self.assertEqual(index, _index)
_name = socket.if_indextoname(index)
self.assertIsInstance(_name, str)
self.assertEqual(name, _name)
@unittest.skipUnless(hasattr(socket, 'if_indextoname'),
'socket.if_indextoname() not available.')
@support.skip_android_selinux('if_indextoname')
def testInvalidInterfaceIndexToName(self):
self.assertRaises(OSError, socket.if_indextoname, 0)
self.assertRaises(ValueError, socket.if_indextoname, -1)
self.assertRaises(OverflowError, socket.if_indextoname, 2**1000)
self.assertRaises(TypeError, socket.if_indextoname, '_DEADBEEF')
if hasattr(socket, 'if_nameindex'):
indices = dict(socket.if_nameindex())
for index in indices:
index2 = index + 2**32
if index2 not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index2)
for index in 2**32-1, 2**64-1:
if index not in indices:
with self.assertRaises((OverflowError, OSError)):
socket.if_indextoname(index)
@unittest.skipUnless(hasattr(socket, 'if_nametoindex'),
'socket.if_nametoindex() not available.')
@support.skip_android_selinux('if_nametoindex')
def testInvalidInterfaceNameToIndex(self):
self.assertRaises(TypeError, socket.if_nametoindex, 0)
self.assertRaises(OSError, socket.if_nametoindex, '_DEADBEEF')
@unittest.skipUnless(hasattr(sys, 'getrefcount'),
'test needs sys.getrefcount()')
def testRefCountGetNameInfo(self):
# Testing reference count for getnameinfo
try:
# On some versions, this loses a reference
orig = sys.getrefcount(__name__)
socket.getnameinfo(__name__,0)
except TypeError:
if sys.getrefcount(__name__) != orig:
self.fail("socket.getnameinfo loses a reference")
def testInterpreterCrash(self):
# Making sure getnameinfo doesn't crash the interpreter
try:
# On some versions, this crashes the interpreter.
socket.getnameinfo(('x', 0, 0, 0), 0)
except OSError:
pass
def testNtoH(self):
# This just checks that htons etc. are their own inverse,
# when looking at the lower 16 or 32 bits.
sizes = {socket.htonl: 32, socket.ntohl: 32,
socket.htons: 16, socket.ntohs: 16}
for func, size in sizes.items():
mask = (1<<size) - 1
for i in (0, 1, 0xffff, ~0xffff, 2, 0x01234567, 0x76543210):
self.assertEqual(i & mask, func(func(i&mask)) & mask)
swapped = func(mask)
self.assertEqual(swapped & mask, mask)
self.assertRaises(OverflowError, func, 1<<34)
def testNtoHErrors(self):
s_good_values = [0, 1, 2, 0xffff]
l_good_values = s_good_values + [0xffffffff]
neg_values = [-1, -2, -(1<<15)-1, -(1<<31)-1, -(1<<63)-1, -1<<1000]
l_bad_values = [1<<32, 1<<1000]
s_bad_values = l_bad_values + [1 << 16, (1<<31)-1, 1<<31]
for k in s_good_values:
socket.ntohs(k)
socket.htons(k)
for k in l_good_values:
socket.ntohl(k)
socket.htonl(k)
for k in neg_values:
self.assertRaises(ValueError, socket.ntohs, k)
self.assertRaises(ValueError, socket.htons, k)
self.assertRaises(ValueError, socket.ntohl, k)
self.assertRaises(ValueError, socket.htonl, k)
for k in s_bad_values:
self.assertRaises(OverflowError, socket.ntohs, k)
self.assertRaises(OverflowError, socket.htons, k)
for k in l_bad_values:
self.assertRaises(OverflowError, socket.ntohl, k)
self.assertRaises(OverflowError, socket.htonl, k)
def testGetServBy(self):
eq = self.assertEqual
# Find one service that exists, then check all the related interfaces.
# I've ordered this by protocols that have both a tcp and udp
# protocol, at least for modern Linuxes.
if (
sys.platform.startswith(
('linux', 'android', 'freebsd', 'netbsd', 'gnukfreebsd'))
or is_apple
):
# avoid the 'echo' service on this platform, as there is an
# assumption breaking non-standard port/protocol entry
services = ('daytime', 'qotd', 'domain')
else:
services = ('echo', 'daytime', 'domain')
for service in services:
try:
port = socket.getservbyname(service, 'tcp')
break
except OSError:
pass
else:
raise OSError
# Try same call with optional protocol omitted
# Issue gh-71123: this fails on Android before API level 23.
if not (support.is_android and platform.android_ver().api_level < 23):
port2 = socket.getservbyname(service)
eq(port, port2)
# Try udp, but don't barf if it doesn't exist
try:
udpport = socket.getservbyname(service, 'udp')
except OSError:
udpport = None
else:
eq(udpport, port)
# Now make sure the lookup by port returns the same service name
# Issue #26936: when the protocol is omitted, this fails on Android
# before API level 28.
if not (support.is_android and platform.android_ver().api_level < 28):
eq(socket.getservbyport(port2), service)
eq(socket.getservbyport(port, 'tcp'), service)
if udpport is not None:
eq(socket.getservbyport(udpport, 'udp'), service)
# Make sure getservbyport does not accept out of range ports.
self.assertRaises(OverflowError, socket.getservbyport, -1)
self.assertRaises(OverflowError, socket.getservbyport, 65536)
def testDefaultTimeout(self):
# Testing default timeout
# The default timeout should initially be None
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as s:
self.assertEqual(s.gettimeout(), None)
# Set the default timeout to 10, and see if it propagates
with socket_setdefaulttimeout(10):
self.assertEqual(socket.getdefaulttimeout(), 10)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), 10)
# Reset the default timeout to None, and see if it propagates
socket.setdefaulttimeout(None)
self.assertEqual(socket.getdefaulttimeout(), None)
with socket.socket() as sock:
self.assertEqual(sock.gettimeout(), None)
# Check that setting it to an invalid value raises ValueError
self.assertRaises(ValueError, socket.setdefaulttimeout, -1)
# Check that setting it to an invalid type raises TypeError
self.assertRaises(TypeError, socket.setdefaulttimeout, "spam")
@unittest.skipUnless(hasattr(socket, 'inet_aton'),
'test needs socket.inet_aton()')
def testIPv4_inet_aton_fourbytes(self):
# Test that issue1008086 and issue767150 are fixed.
# It must return 4 bytes.
self.assertEqual(b'\x00'*4, socket.inet_aton('0.0.0.0'))
self.assertEqual(b'\xff'*4, socket.inet_aton('255.255.255.255'))
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv4toString(self):
from socket import inet_aton as f, inet_pton, AF_INET
g = lambda a: inet_pton(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual(b'\x00\x00\x00\x00', f('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', f('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', f('170.170.170.170'))
self.assertEqual(b'\x01\x02\x03\x04', f('1.2.3.4'))
self.assertEqual(b'\xff\xff\xff\xff', f('255.255.255.255'))
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid(f, '0.0.0.')
assertInvalid(f, '300.0.0.0')
assertInvalid(f, 'a.0.0.0')
assertInvalid(f, '1.2.3.4.5')
assertInvalid(f, '::1')
self.assertEqual(b'\x00\x00\x00\x00', g('0.0.0.0'))
self.assertEqual(b'\xff\x00\xff\x00', g('255.0.255.0'))
self.assertEqual(b'\xaa\xaa\xaa\xaa', g('170.170.170.170'))
self.assertEqual(b'\xff\xff\xff\xff', g('255.255.255.255'))
assertInvalid(g, '0.0.0.')
assertInvalid(g, '300.0.0.0')
assertInvalid(g, 'a.0.0.0')
assertInvalid(g, '1.2.3.4.5')
assertInvalid(g, '::1')
@unittest.skipUnless(hasattr(socket, 'inet_pton'),
'test needs socket.inet_pton()')
def testIPv6toString(self):
try:
from socket import inet_pton, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_pton(AF_INET6, '::')
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_pton(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual(b'\x00' * 16, f('::'))
self.assertEqual(b'\x00' * 16, f('0::0'))
self.assertEqual(b'\x00\x01' + b'\x00' * 14, f('1::'))
self.assertEqual(
b'\x45\xef\x76\xcb\x00\x1a\x56\xef\xaf\xeb\x0b\xac\x19\x24\xae\xae',
f('45ef:76cb:1a:56ef:afeb:bac:1924:aeae')
)
self.assertEqual(
b'\xad\x42\x0a\xbc' + b'\x00' * 4 + b'\x01\x27\x00\x00\x02\x54\x00\x02',
f('ad42:abc::127:0:254:2')
)
self.assertEqual(b'\x00\x12\x00\x0a' + b'\x00' * 12, f('12:a::'))
assertInvalid('0x20::')
assertInvalid(':::')
assertInvalid('::0::')
assertInvalid('1::abc::')
assertInvalid('1::abc::def')
assertInvalid('1:2:3:4:5:6')
assertInvalid('1:2:3:4:5:6:')
assertInvalid('1:2:3:4:5:6:7:8:0')
# bpo-29972: inet_pton() doesn't fail on AIX
if not AIX:
assertInvalid('1:2:3:4:5:6:7:8:')
self.assertEqual(b'\x00' * 12 + b'\xfe\x2a\x17\x40',
f('::254.42.23.64')
)
self.assertEqual(
b'\x00\x42' + b'\x00' * 8 + b'\xa2\x9b\xfe\x2a\x17\x40',
f('42::a29b:254.42.23.64')
)
self.assertEqual(
b'\x00\x42\xa8\xb9\x00\x00\x00\x02\xff\xff\xa2\x9b\xfe\x2a\x17\x40',
f('42:a8b9:0:2:ffff:a29b:254.42.23.64')
)
assertInvalid('255.254.253.252')
assertInvalid('1::260.2.3.0')
assertInvalid('1::0.be.e.0')
assertInvalid('1:2:3:4:5:6:7:1.2.3.4')
assertInvalid('::1.2.3.4:0')
assertInvalid('0.100.200.0:3:4:5:6:7:8')
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv4(self):
from socket import inet_ntoa as f, inet_ntop, AF_INET
g = lambda a: inet_ntop(AF_INET, a)
assertInvalid = lambda func,a: self.assertRaises(
(OSError, ValueError), func, a
)
self.assertEqual('1.0.1.0', f(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', f(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', f(b'\xff\xff\xff\xff'))
self.assertEqual('1.2.3.4', f(b'\x01\x02\x03\x04'))
assertInvalid(f, b'\x00' * 3)
assertInvalid(f, b'\x00' * 5)
assertInvalid(f, b'\x00' * 16)
self.assertEqual('170.85.170.85', f(bytearray(b'\xaa\x55\xaa\x55')))
self.assertEqual('1.0.1.0', g(b'\x01\x00\x01\x00'))
self.assertEqual('170.85.170.85', g(b'\xaa\x55\xaa\x55'))
self.assertEqual('255.255.255.255', g(b'\xff\xff\xff\xff'))
assertInvalid(g, b'\x00' * 3)
assertInvalid(g, b'\x00' * 5)
assertInvalid(g, b'\x00' * 16)
self.assertEqual('170.85.170.85', g(bytearray(b'\xaa\x55\xaa\x55')))
@unittest.skipUnless(hasattr(socket, 'inet_ntop'),
'test needs socket.inet_ntop()')
def testStringToIPv6(self):
try:
from socket import inet_ntop, AF_INET6, has_ipv6
if not has_ipv6:
self.skipTest('IPv6 not available')
except ImportError:
self.skipTest('could not import needed symbols from socket')
if sys.platform == "win32":
try:
inet_ntop(AF_INET6, b'\x00' * 16)
except OSError as e:
if e.winerror == 10022:
self.skipTest('IPv6 might not be supported')
f = lambda a: inet_ntop(AF_INET6, a)
assertInvalid = lambda a: self.assertRaises(
(OSError, ValueError), f, a
)
self.assertEqual('::', f(b'\x00' * 16))
self.assertEqual('::1', f(b'\x00' * 15 + b'\x01'))
self.assertEqual(
'aef:b01:506:1001:ffff:9997:55:170',
f(b'\x0a\xef\x0b\x01\x05\x06\x10\x01\xff\xff\x99\x97\x00\x55\x01\x70')
)
self.assertEqual('::1', f(bytearray(b'\x00' * 15 + b'\x01')))
assertInvalid(b'\x12' * 15)
assertInvalid(b'\x12' * 17)
assertInvalid(b'\x12' * 4)
# XXX The following don't test module-level functionality...
def testSockName(self):
# Testing getsockname()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind(("0.0.0.0", port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
name = sock.getsockname()
# XXX(nnorwitz): http://tinyurl.com/os5jz seems to indicate
# it reasonable to get the host's addr in addition to 0.0.0.0.
# At least for eCos. This is required for the S/390 to pass.
try:
my_ip_addr = socket.gethostbyname(socket.gethostname())
except OSError:
# Probably name lookup wasn't set up right; skip this test
self.skipTest('name lookup failure')
self.assertIn(name[0], ("0.0.0.0", my_ip_addr), '%s invalid' % name[0])
self.assertEqual(name[1], port)
def testGetSockOpt(self):
# Testing getsockopt()
# We know a socket should start without reuse==0
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse != 0, "initial mode is reuse")
def testSetSockOpt(self):
# Testing setsockopt()
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(sock.close)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
reuse = sock.getsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR)
self.assertFalse(reuse == 0, "failed to set reuse mode")
def testSendAfterClose(self):
# testing send() after close() with timeout
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
sock.settimeout(1)
self.assertRaises(OSError, sock.send, b"spam")
def testCloseException(self):
sock = socket.socket()
sock.bind((socket._LOCALHOST, 0))
socket.socket(fileno=sock.fileno()).close()
try:
sock.close()
except OSError as err:
# Winsock apparently raises ENOTSOCK
self.assertIn(err.errno, (errno.EBADF, errno.ENOTSOCK))
else:
self.fail("close() should raise EBADF/ENOTSOCK")
def testNewAttributes(self):
# testing .family, .type and .protocol
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
self.assertEqual(sock.family, socket.AF_INET)
if hasattr(socket, 'SOCK_CLOEXEC'):
self.assertIn(sock.type,
(socket.SOCK_STREAM | socket.SOCK_CLOEXEC,
socket.SOCK_STREAM))
else:
self.assertEqual(sock.type, socket.SOCK_STREAM)
self.assertEqual(sock.proto, 0)
def test_getsockaddrarg(self):
sock = socket.socket()
self.addCleanup(sock.close)
port = socket_helper.find_unused_port()
big_port = port + 65536
neg_port = port - 65536
self.assertRaises(OverflowError, sock.bind, (HOST, big_port))
self.assertRaises(OverflowError, sock.bind, (HOST, neg_port))
# Since find_unused_port() is inherently subject to race conditions, we
# call it a couple times if necessary.
for i in itertools.count():
port = socket_helper.find_unused_port()
try:
sock.bind((HOST, port))
except OSError as e:
if e.errno != errno.EADDRINUSE or i == 5:
raise
else:
break
@unittest.skipUnless(os.name == "nt", "Windows specific")
def test_sock_ioctl(self):
self.assertHasAttr(socket.socket, 'ioctl')
self.assertHasAttr(socket, 'SIO_RCVALL')
self.assertHasAttr(socket, 'RCVALL_ON')
self.assertHasAttr(socket, 'RCVALL_OFF')
self.assertHasAttr(socket, 'SIO_KEEPALIVE_VALS')
s = socket.socket()
self.addCleanup(s.close)
self.assertRaises(ValueError, s.ioctl, -1, None)
s.ioctl(socket.SIO_KEEPALIVE_VALS, (1, 100, 100))
@unittest.skipUnless(os.name == "nt", "Windows specific")
@unittest.skipUnless(hasattr(socket, 'SIO_LOOPBACK_FAST_PATH'),
'Loopback fast path support required for this test')
def test_sio_loopback_fast_path(self):
s = socket.socket()
self.addCleanup(s.close)
try:
s.ioctl(socket.SIO_LOOPBACK_FAST_PATH, True)
except OSError as exc:
WSAEOPNOTSUPP = 10045
if exc.winerror == WSAEOPNOTSUPP:
self.skipTest("SIO_LOOPBACK_FAST_PATH is defined but "
"doesn't implemented in this Windows version")
raise
self.assertRaises(TypeError, s.ioctl, socket.SIO_LOOPBACK_FAST_PATH, None)
def testGetaddrinfo(self):
try:
socket.getaddrinfo('localhost', 80)
except socket.gaierror as err:
if err.errno == socket.EAI_SERVICE:
# see http://bugs.python.org/issue1282647
self.skipTest("buggy libc version")
raise
# len of every sequence is supposed to be == 5
for info in socket.getaddrinfo(HOST, None):
self.assertEqual(len(info), 5)
# host can be a domain name, a string representation of an
# IPv4/v6 address or None
socket.getaddrinfo('localhost', 80)
socket.getaddrinfo('127.0.0.1', 80)
socket.getaddrinfo(None, 80)
if socket_helper.IPV6_ENABLED:
socket.getaddrinfo('::1', 80)
# port can be a string service name such as "http", a numeric
# port number or None
# Issue #26936: this fails on Android before API level 23.
if not (support.is_android and platform.android_ver().api_level < 23):
socket.getaddrinfo(HOST, "http")
socket.getaddrinfo(HOST, 80)
socket.getaddrinfo(HOST, None)
# test family and socktype filters
infos = socket.getaddrinfo(HOST, 80, socket.AF_INET, socket.SOCK_STREAM)
for family, type, _, _, _ in infos:
self.assertEqual(family, socket.AF_INET)
self.assertEqual(repr(family), '<AddressFamily.AF_INET: %r>' % family.value)
self.assertEqual(str(family), str(family.value))
self.assertEqual(type, socket.SOCK_STREAM)
self.assertEqual(repr(type), '<SocketKind.SOCK_STREAM: %r>' % type.value)
self.assertEqual(str(type), str(type.value))
infos = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
for _, socktype, _, _, _ in infos:
self.assertEqual(socktype, socket.SOCK_STREAM)
# test proto and flags arguments
socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
# a server willing to support both IPv4 and IPv6 will
# usually do this
socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
# test keyword arguments
a = socket.getaddrinfo(HOST, None)
b = socket.getaddrinfo(host=HOST, port=None)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, socket.AF_INET)
b = socket.getaddrinfo(HOST, None, family=socket.AF_INET)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, socket.SOCK_STREAM)
b = socket.getaddrinfo(HOST, None, type=socket.SOCK_STREAM)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, socket.SOL_TCP)
b = socket.getaddrinfo(HOST, None, proto=socket.SOL_TCP)
self.assertEqual(a, b)
a = socket.getaddrinfo(HOST, None, 0, 0, 0, socket.AI_PASSIVE)
b = socket.getaddrinfo(HOST, None, flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
a = socket.getaddrinfo(None, 0, socket.AF_UNSPEC, socket.SOCK_STREAM, 0,
socket.AI_PASSIVE)
b = socket.getaddrinfo(host=None, port=0, family=socket.AF_UNSPEC,
type=socket.SOCK_STREAM, proto=0,
flags=socket.AI_PASSIVE)
self.assertEqual(a, b)
# Issue #6697.
self.assertRaises(UnicodeEncodeError, socket.getaddrinfo, 'localhost', '\uD800')
if hasattr(socket, 'AI_NUMERICSERV'):
self.assertRaises(socket.gaierror, socket.getaddrinfo, "localhost", "http",
flags=socket.AI_NUMERICSERV)
# Issue 17269: test workaround for OS X platform bug segfault
try:
# The arguments here are undefined and the call may succeed
# or fail. All we care here is that it doesn't segfault.
socket.getaddrinfo("localhost", None, 0, 0, 0,
socket.AI_NUMERICSERV)
except socket.gaierror:
pass
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_getaddrinfo_int_port_overflow(self):
# gh-74895: Test that getaddrinfo does not raise OverflowError on port.
#
# POSIX getaddrinfo() never specify the valid range for "service"
# decimal port number values. For IPv4 and IPv6 they are technically
# unsigned 16-bit values, but the API is protocol agnostic. Which values
# trigger an error from the C library function varies by platform as
# they do not all perform validation.
# The key here is that we don't want to produce OverflowError as Python
# prior to 3.12 did for ints outside of a [LONG_MIN, LONG_MAX] range.
# Leave the error up to the underlying string based platform C API.
from _testcapi import ULONG_MAX, LONG_MAX, LONG_MIN
try:
socket.getaddrinfo(None, ULONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
# Platforms differ as to what values constitute a getaddrinfo() error
# return. Some fail for LONG_MAX+1, others ULONG_MAX+1, and Windows
# silently accepts such huge "port" aka "service" numeric values.
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MAX - 0xffff + 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
try:
socket.getaddrinfo(None, LONG_MIN - 1, type=socket.SOCK_STREAM)
except OverflowError:
self.fail("Either no error or socket.gaierror expected.")
except socket.gaierror:
pass
socket.getaddrinfo(None, 0, type=socket.SOCK_STREAM) # No error expected.
socket.getaddrinfo(None, 0xffff, type=socket.SOCK_STREAM) # No error expected.
def test_getnameinfo(self):
# only IP addresses are allowed
self.assertRaises(OSError, socket.getnameinfo, ('mail.python.org',0), 0)
@unittest.skipUnless(support.is_resource_enabled('network'),
'network is not enabled')
def test_idna(self):
# Check for internet access before running test
# (issue #12804, issue #25138).
with socket_helper.transient_internet('python.org'):
socket.gethostbyname('python.org')
# these should all be successful
domain = 'испытание.pythontest.net'
socket.gethostbyname(domain)
socket.gethostbyname_ex(domain)
socket.getaddrinfo(domain,0,socket.AF_UNSPEC,socket.SOCK_STREAM)
# this may not work if the forward lookup chooses the IPv6 address, as that doesn't
# have a reverse entry yet
# socket.gethostbyaddr('испытание.python.org')
def check_sendall_interrupted(self, with_timeout):
# socketpair() is not strictly required, but it makes things easier.
if not hasattr(signal, 'alarm') or not hasattr(socket, 'socketpair'):
self.skipTest("signal.alarm and socket.socketpair required for this test")
# Our signal handlers clobber the C errno by calling a math function
# with an invalid domain value.
def ok_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
def raising_handler(*args):
self.assertRaises(ValueError, math.acosh, 0)
1 // 0
c, s = socket.socketpair()
old_alarm = signal.signal(signal.SIGALRM, raising_handler)
try:
if with_timeout:
# Just above the one second minimum for signal.alarm
c.settimeout(1.5)
with self.assertRaises(ZeroDivisionError):
signal.alarm(1)
c.sendall(b"x" * support.SOCK_MAX_SIZE)
if with_timeout:
signal.signal(signal.SIGALRM, ok_handler)
signal.alarm(1)
self.assertRaises(TimeoutError, c.sendall,
b"x" * support.SOCK_MAX_SIZE)
finally:
signal.alarm(0)
signal.signal(signal.SIGALRM, old_alarm)
c.close()
s.close()
def test_sendall_interrupted(self):
self.check_sendall_interrupted(False)
def test_sendall_interrupted_with_timeout(self):
self.check_sendall_interrupted(True)
def test_dealloc_warn(self):
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
r = repr(sock)
with self.assertWarns(ResourceWarning) as cm:
sock = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
# An open socket file object gets dereferenced after the socket
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
f = sock.makefile('rb')
r = repr(sock)
sock = None
support.gc_collect()
with self.assertWarns(ResourceWarning):
f = None
support.gc_collect()
def test_name_closed_socketio(self):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
fp = sock.makefile("rb")
fp.close()
self.assertEqual(repr(fp), "<_io.BufferedReader name=-1>")
def test_unusable_closed_socketio(self):
with socket.socket() as sock:
fp = sock.makefile("rb", buffering=0)
self.assertTrue(fp.readable())
self.assertFalse(fp.writable())
self.assertFalse(fp.seekable())
fp.close()
self.assertRaises(ValueError, fp.readable)
self.assertRaises(ValueError, fp.writable)
self.assertRaises(ValueError, fp.seekable)
def test_socket_close(self):
sock = socket.socket()
try:
sock.bind((HOST, 0))
socket.close(sock.fileno())
with self.assertRaises(OSError):
sock.listen(1)
finally:
with self.assertRaises(OSError):
# sock.close() fails with EBADF
sock.close()
with self.assertRaises(TypeError):
socket.close(None)
with self.assertRaises(OSError):
socket.close(-1)
def test_makefile_mode(self):
for mode in 'r', 'rb', 'rw', 'w', 'wb':
with self.subTest(mode=mode):
with socket.socket() as sock:
encoding = None if "b" in mode else "utf-8"
with sock.makefile(mode, encoding=encoding) as fp:
self.assertEqual(fp.mode, mode)
def test_makefile_invalid_mode(self):
for mode in 'rt', 'x', '+', 'a':
with self.subTest(mode=mode):
with socket.socket() as sock:
with self.assertRaisesRegex(ValueError, 'invalid mode'):
sock.makefile(mode)
def test_pickle(self):
sock = socket.socket()
with sock:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
self.assertRaises(TypeError, pickle.dumps, sock, protocol)
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
family = pickle.loads(pickle.dumps(socket.AF_INET, protocol))
self.assertEqual(family, socket.AF_INET)
type = pickle.loads(pickle.dumps(socket.SOCK_STREAM, protocol))
self.assertEqual(type, socket.SOCK_STREAM)
def test_listen_backlog(self):
for backlog in 0, -1:
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen(backlog)
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
srv.listen()
@support.cpython_only
@unittest.skipIf(_testcapi is None, "requires _testcapi")
def test_listen_backlog_overflow(self):
# Issue 15989
import _testcapi
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as srv:
srv.bind((HOST, 0))
self.assertRaises(OverflowError, srv.listen, _testcapi.INT_MAX + 1)
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_flowinfo(self):
self.assertRaises(OverflowError, socket.getnameinfo,
(socket_helper.HOSTv6, 0, 0xffffffff), 0)
with socket.socket(socket.AF_INET6, socket.SOCK_STREAM) as s:
self.assertRaises(OverflowError, s.bind, (socket_helper.HOSTv6, 0, -10))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
def test_getaddrinfo_ipv6_basic(self):
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D', # Note capital letter `D`.
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, 0))
def test_getfqdn_filter_localhost(self):
self.assertEqual(socket.getfqdn(), socket.getfqdn("0.0.0.0"))
self.assertEqual(socket.getfqdn(), socket.getfqdn("::"))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
@support.skip_android_selinux('if_nameindex')
def test_getaddrinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface (Linux, Mac OS X)
(ifindex, test_interface) = socket.if_nameindex()[0]
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + test_interface,
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless(
sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getaddrinfo_ipv6_scopeid_numeric(self):
# Also works on Linux and Mac OS X, but is not documented (?)
# Windows, Linux and Max OS X allow nonexistent interface numbers here.
ifindex = 42
((*_, sockaddr),) = socket.getaddrinfo(
'ff02::1de:c0:face:8D%' + str(ifindex),
1234, socket.AF_INET6,
socket.SOCK_DGRAM,
socket.IPPROTO_UDP
)
# Note missing interface name part in IPv6 address
self.assertEqual(sockaddr, ('ff02::1de:c0:face:8d', 1234, 0, ifindex))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipIf(sys.platform == 'win32', 'does not work on Windows')
@unittest.skipIf(AIX, 'Symbolic scope id does not work')
@unittest.skipUnless(hasattr(socket, 'if_nameindex'), "test needs socket.if_nameindex()")
@support.skip_android_selinux('if_nameindex')
def test_getnameinfo_ipv6_scopeid_symbolic(self):
# Just pick up any network interface.
(ifindex, test_interface) = socket.if_nameindex()[0]
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + test_interface, '1234'))
@unittest.skipUnless(socket_helper.IPV6_ENABLED, 'IPv6 required for this test.')
@unittest.skipUnless( sys.platform == 'win32',
'Numeric scope id does not work or undocumented')
def test_getnameinfo_ipv6_scopeid_numeric(self):
# Also works on Linux (undocumented), but does not work on Mac OS X
# Windows and Linux allow nonexistent interface numbers here.
ifindex = 42
sockaddr = ('ff02::1de:c0:face:8D', 1234, 0, ifindex) # Note capital letter `D`.
nameinfo = socket.getnameinfo(sockaddr, socket.NI_NUMERICHOST | socket.NI_NUMERICSERV)
self.assertEqual(nameinfo, ('ff02::1de:c0:face:8d%' + str(ifindex), '1234'))
def test_str_for_enums(self):
# Make sure that the AF_* and SOCK_* constants have enum-like string
# reprs.
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
self.assertEqual(repr(s.family), '<AddressFamily.AF_INET: %r>' % s.family.value)
self.assertEqual(repr(s.type), '<SocketKind.SOCK_STREAM: %r>' % s.type.value)
self.assertEqual(str(s.family), str(s.family.value))
self.assertEqual(str(s.type), str(s.type.value))
def test_socket_consistent_sock_type(self):
SOCK_NONBLOCK = getattr(socket, 'SOCK_NONBLOCK', 0)
SOCK_CLOEXEC = getattr(socket, 'SOCK_CLOEXEC', 0)
sock_type = socket.SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC
with socket.socket(socket.AF_INET, sock_type) as s:
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(1)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.settimeout(0)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(True)
self.assertEqual(s.type, socket.SOCK_STREAM)
s.setblocking(False)
self.assertEqual(s.type, socket.SOCK_STREAM)
def test_unknown_socket_family_repr(self):
# Test that when created with a family that's not one of the known
# AF_*/SOCK_* constants, socket.family just returns the number.
#
# To do this we fool socket.socket into believing it already has an
# open fd because on this path it doesn't actually verify the family and
# type and populates the socket object.
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
fd = sock.detach()
unknown_family = max(socket.AddressFamily.__members__.values()) + 1
unknown_type = max(
kind
for name, kind in socket.SocketKind.__members__.items()
if name not in {'SOCK_NONBLOCK', 'SOCK_CLOEXEC'}
) + 1
with socket.socket(
family=unknown_family, type=unknown_type, proto=23,
fileno=fd) as s:
self.assertEqual(s.family, unknown_family)
self.assertEqual(s.type, unknown_type)
# some OS like macOS ignore proto
self.assertIn(s.proto, {0, 23})
@unittest.skipUnless(hasattr(os, 'sendfile'), 'test needs os.sendfile()')
def test__sendfile_use_sendfile(self):
class File:
def __init__(self, fd):
self.fd = fd
def fileno(self):
return self.fd
with socket.socket() as sock:
fd = os.open(os.curdir, os.O_RDONLY)
os.close(fd)
with self.assertRaises(socket._GiveupOnSendfile):
sock._sendfile_use_sendfile(File(fd))
with self.assertRaises(OverflowError):
sock._sendfile_use_sendfile(File(2**1000))
with self.assertRaises(TypeError):
sock._sendfile_use_sendfile(File(None))
def _test_socket_fileno(self, s, family, stype):
self.assertEqual(s.family, family)
self.assertEqual(s.type, stype)
fd = s.fileno()
s2 = socket.socket(fileno=fd)
self.addCleanup(s2.close)
# detach old fd to avoid double close
s.detach()
self.assertEqual(s2.family, family)
self.assertEqual(s2.type, stype)
self.assertEqual(s2.fileno(), fd)
def test_socket_fileno(self):
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_STREAM)
if hasattr(socket, "SOCK_DGRAM"):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOST, 0))
self._test_socket_fileno(s, socket.AF_INET, socket.SOCK_DGRAM)
if socket_helper.IPV6_ENABLED:
s = socket.socket(socket.AF_INET6, socket.SOCK_STREAM)
self.addCleanup(s.close)
s.bind((socket_helper.HOSTv6, 0, 0, 0))
self._test_socket_fileno(s, socket.AF_INET6, socket.SOCK_STREAM)
if hasattr(socket, "AF_UNIX"):
unix_name = socket_helper.create_unix_domain_name()
self.addCleanup(os_helper.unlink, unix_name)
s = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
with s:
try:
s.bind(unix_name)
except PermissionError:
pass
else:
self._test_socket_fileno(s, socket.AF_UNIX,
socket.SOCK_STREAM)
def test_socket_fileno_rejects_float(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=42.5)
def test_socket_fileno_rejects_other_types(self):
with self.assertRaises(TypeError):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno="foo")
def test_socket_fileno_rejects_invalid_socket(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-1)
@unittest.skipIf(os.name == "nt", "Windows disallows -1 only")
def test_socket_fileno_rejects_negative(self):
with self.assertRaisesRegex(ValueError, "negative file descriptor"):
socket.socket(socket.AF_INET, socket.SOCK_STREAM, fileno=-42)
def test_socket_fileno_requires_valid_fd(self):
WSAENOTSOCK = 10038
with self.assertRaises(OSError) as cm:
socket.socket(fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=os_helper.make_bad_fd())
self.assertIn(cm.exception.errno, (errno.EBADF, WSAENOTSOCK))
def test_socket_fileno_requires_socket_fd(self):
with tempfile.NamedTemporaryFile() as afile:
with self.assertRaises(OSError):
socket.socket(fileno=afile.fileno())
with self.assertRaises(OSError) as cm:
socket.socket(
socket.AF_INET,
socket.SOCK_STREAM,
fileno=afile.fileno())
self.assertEqual(cm.exception.errno, errno.ENOTSOCK)
def test_addressfamily_enum(self):
import _socket, enum
CheckedAddressFamily = enum._old_convert_(
enum.IntEnum, 'AddressFamily', 'socket',
lambda C: C.isupper() and C.startswith('AF_'),
source=_socket,
)
enum._test_simple_enum(CheckedAddressFamily, socket.AddressFamily)
def test_socketkind_enum(self):
import _socket, enum
CheckedSocketKind = enum._old_convert_(
enum.IntEnum, 'SocketKind', 'socket',
lambda C: C.isupper() and C.startswith('SOCK_'),
source=_socket,
)
enum._test_simple_enum(CheckedSocketKind, socket.SocketKind)
def test_msgflag_enum(self):
import _socket, enum
CheckedMsgFlag = enum._old_convert_(
enum.IntFlag, 'MsgFlag', 'socket',
lambda C: C.isupper() and C.startswith('MSG_'),
source=_socket,
)
enum._test_simple_enum(CheckedMsgFlag, socket.MsgFlag)
def test_addressinfo_enum(self):
import _socket, enum
CheckedAddressInfo = enum._old_convert_(
enum.IntFlag, 'AddressInfo', 'socket',
lambda C: C.isupper() and C.startswith('AI_'),
source=_socket)
enum._test_simple_enum(CheckedAddressInfo, socket.AddressInfo)
@unittest.skipUnless(HAVE_SOCKET_CAN, 'SocketCan required for this test.')
| GeneralModuleTests |
python | google__jax | jax/experimental/jax2tf/tests/flax_models/bilstm_classifier.py | {
"start": 5533,
"end": 6133
} | class ____(nn.Module):
"""A simple unidirectional LSTM."""
@functools.partial(
nn.transforms.scan,
variable_broadcast='params',
in_axes=1, out_axes=1,
split_rngs={'params': False})
@nn.compact
def __call__(self, carry, x):
return nn.OptimizedLSTMCell(features=carry[0].shape[-1])(carry, x)
@staticmethod
def initialize_carry(batch_dims, hidden_size):
# Use fixed random key since default state init fn is just zeros.
return nn.OptimizedLSTMCell(hidden_size, parent=None).initialize_carry(
jax.random.PRNGKey(0), (batch_dims, 1)
)
| SimpleLSTM |
python | catalyst-team__catalyst | catalyst/callbacks/soft_update.py | {
"start": 140,
"end": 3382
} | class ____(Callback):
"""Callback to update `target` data inside `runner.model` with the `source`
data inside `runner.model` one smoothing by ``tau`` (inplace operation).
Args:
target_model: key to the data inside `runner.model` to update
source_model: key to the source data inside `runner.model`
tau: smoothing parameter `target * (1.0 - tau) + source * tau`
scope (str): when the `target` should be updated
``"on_batch_end"``
``"on_batch_start"``
``"on_epoch_end"``
``"on_epoch_start"``
Raises:
TypeError: if invalid scope
"""
def __init__(
self, target_model: str, source_model: str, tau: float, scope: str
) -> None:
"""Init."""
super().__init__(order=CallbackOrder.External)
self.target_model = target_model
self.source_model = source_model
self.tau = tau
if isinstance(scope, str) and scope in [
"on_batch_end",
"on_batch_start",
"on_epoch_end",
"on_epoch_start",
]:
self.scope = scope
else:
raise TypeError(
"""Expected scope to be on of the: [
"on_batch_end",
"on_batch_start",
"on_epoch_end",
"on_epoch_start"]"""
)
def on_experiment_start(self, runner: "IRunner") -> None:
"""Event handler."""
assert self.target_model in runner.model, (
f"Could not find speficied target model ({self.target_model}) "
"within available runner models ({runner.model.keys()})"
)
assert self.source_model in runner.model, (
f"Could not find speficied target model ({self.source_model}) "
"within available runner models ({runner.model.keys()})"
)
def on_epoch_start(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader and self.scope == "on_epoch_start":
soft_update(
runner.model[self.target_model],
runner.model[self.source_model],
self.tau,
)
def on_batch_start(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader and self.scope == "on_batch_start":
soft_update(
runner.model[self.target_model],
runner.model[self.source_model],
self.tau,
)
def on_batch_end(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader and self.scope == "on_batch_end":
soft_update(
runner.model[self.target_model],
runner.model[self.source_model],
self.tau,
)
def on_epoch_end(self, runner: "IRunner") -> None:
"""Event handler."""
if runner.is_train_loader and self.scope == "on_epoch_end":
soft_update(
runner.model[self.target_model],
runner.model[self.source_model],
self.tau,
)
__all__ = ["SoftUpdateCallaback"]
| SoftUpdateCallaback |
python | getsentry__sentry | tests/snuba/api/endpoints/test_organization_metrics_meta.py | {
"start": 4840,
"end": 10346
} | class ____(MetricsEnhancedPerformanceTestCase):
def setUp(self) -> None:
super().setUp()
self.min_ago = before_now(minutes=1)
self.two_min_ago = before_now(minutes=2)
self.features = {
"organizations:performance-use-metrics": True,
}
self.login_as(user=self.user)
# Don't create any txn on this, don't set its DS rules, it shouldn't show up anywhere
self.create_project()
def test_unparameterized_transactions(self) -> None:
# Make current project incompatible
self.store_transaction_metric(
1, tags={"transaction": "<< unparameterized >>"}, timestamp=self.min_ago
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 1
assert response.json()["sum"]["metrics_unparam"] == 1
assert response.json()["sum"]["metrics_null"] == 0
def test_null_transaction(self) -> None:
# Make current project incompatible
self.store_transaction_metric(1, tags={}, timestamp=self.min_ago)
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 1
assert response.json()["sum"]["metrics_unparam"] == 0
assert response.json()["sum"]["metrics_null"] == 1
def test_no_transaction(self) -> None:
# Make current project incompatible by having nothing
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 0
assert response.json()["sum"]["metrics_unparam"] == 0
assert response.json()["sum"]["metrics_null"] == 0
def test_has_transaction(self) -> None:
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 1
assert response.json()["sum"]["metrics_unparam"] == 0
assert response.json()["sum"]["metrics_null"] == 0
def test_multiple_projects(self) -> None:
project2 = self.create_project()
project3 = self.create_project()
# Not setting DS, it shouldn't show up
project4 = self.create_project()
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago
)
self.store_transaction_metric(
1, tags={"transaction": "foo_transaction"}, timestamp=self.min_ago, project=project4.id
)
self.store_transaction_metric(
1,
tags={"transaction": "<< unparameterized >>"},
timestamp=self.min_ago,
project=project2.id,
)
self.store_transaction_metric(
1,
tags={},
timestamp=self.min_ago,
project=project3.id,
)
self.store_event(
data={"timestamp": self.min_ago.isoformat(), "transaction": "foo_transaction"},
project_id=self.project.id,
)
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 4
assert response.json()["sum"]["metrics_unparam"] == 1
assert response.json()["sum"]["metrics_null"] == 1
def test_counts_add_up_correctly(self) -> None:
# Make current project incompatible
for _ in range(2):
self.store_transaction_metric(
1, tags={"transaction": "<< unparameterized >>"}, timestamp=self.min_ago
)
for _ in range(3):
self.store_transaction_metric(1, tags={}, timestamp=self.min_ago)
for _ in range(1):
self.store_transaction_metric(1, tags={"transaction": "/foo"}, timestamp=self.min_ago)
url = reverse(
"sentry-api-0-organization-metrics-compatibility-sums",
kwargs={"organization_id_or_slug": self.project.organization.slug},
)
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.json()["sum"]["metrics"] == 6
assert response.json()["sum"]["metrics_unparam"] == 2
assert response.json()["sum"]["metrics_null"] == 3
| OrganizationEventsMetricsSums |
python | ansible__ansible | test/integration/targets/inventory/doc_fragments/fragment_with_expression.py | {
"start": 37,
"end": 256
} | class ____:
DOCUMENTATION = """
options:
fragment_expression:
description: a fragment hosted expression that must be trusted whose default resolves to 4
default: 2 + 2
"""
| ModuleDocFragment |
python | apache__airflow | providers/openlineage/tests/unit/openlineage/utils/test_utils.py | {
"start": 3050,
"end": 62746
} | class ____(EmptyOperator):
pass
@pytest.mark.db_test
def test_get_airflow_job_facet():
with DAG(dag_id="dag", schedule=None, start_date=datetime.datetime(2024, 6, 1)) as dag:
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;")
with TaskGroup("section_1", prefix_group_id=True):
task_10 = PythonOperator(task_id="task_3", python_callable=lambda: 1)
task_0 >> task_10
dagrun_mock = MagicMock(DagRun)
dagrun_mock.dag = dag
result = get_airflow_job_facet(dagrun_mock)
assert result == {
"airflow": AirflowJobFacet(
taskTree={},
taskGroups={
"section_1": {
"parent_group": None,
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "section_1",
}
},
tasks={
"task_0": {
"operator": f"{BASH_OPERATOR_PATH}.BashOperator",
"task_group": None,
"emits_ol_events": True,
"ui_color": "#f0ede4",
"ui_fgcolor": "#000",
"ui_label": "task_0",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": ["section_1.task_3"],
},
"section_1.task_3": {
"operator": f"{PYTHON_OPERATOR_PATH}.PythonOperator",
"task_group": "section_1",
"emits_ol_events": True,
"ui_color": "#ffefeb",
"ui_fgcolor": "#000",
"ui_label": "task_3",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [],
},
},
)
}
@pytest.mark.db_test
def test_get_airflow_dag_run_facet():
with DAG(
dag_id="dag",
schedule="@once",
start_date=datetime.datetime(2024, 6, 1),
tags=["test"],
) as dag:
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;")
with TaskGroup("section_1", prefix_group_id=True):
task_10 = PythonOperator(task_id="task_3", python_callable=lambda: 1)
task_0 >> task_10
dagrun_mock = MagicMock(DagRun)
dagrun_mock.dag = dag
dagrun_mock.conf = {}
dagrun_mock.clear_number = 0
dagrun_mock.dag_id = dag.dag_id
dagrun_mock.data_interval_start = datetime.datetime(2024, 6, 1, 1, 2, 3, tzinfo=datetime.timezone.utc)
dagrun_mock.data_interval_end = datetime.datetime(2024, 6, 1, 2, 3, 4, tzinfo=datetime.timezone.utc)
dagrun_mock.external_trigger = True
dagrun_mock.run_id = "manual_2024-06-01T00:00:00+00:00"
dagrun_mock.run_type = DagRunType.MANUAL
dagrun_mock.execution_date = datetime.datetime(2024, 6, 1, 1, 2, 4, tzinfo=datetime.timezone.utc)
dagrun_mock.logical_date = datetime.datetime(2024, 6, 1, 1, 2, 4, tzinfo=datetime.timezone.utc)
dagrun_mock.run_after = datetime.datetime(2024, 6, 1, 1, 2, 4, tzinfo=datetime.timezone.utc)
dagrun_mock.start_date = datetime.datetime(2024, 6, 1, 1, 2, 4, tzinfo=datetime.timezone.utc)
dagrun_mock.end_date = datetime.datetime(2024, 6, 1, 1, 2, 14, 34172, tzinfo=datetime.timezone.utc)
dagrun_mock.dag_versions = [
MagicMock(
bundle_name="bundle_name",
bundle_version="bundle_version",
id="version_id",
version_number="version_number",
)
]
result = get_airflow_dag_run_facet(dagrun_mock)
expected_dag_info = {
"dag_id": "dag",
"description": None,
"fileloc": pathlib.Path(__file__).resolve().as_posix(),
"owner": "airflow",
"timetable": {},
"timetable_summary": "@once",
"start_date": "2024-06-01T00:00:00+00:00",
"tags": "['test']",
"owner_links": {},
}
if hasattr(dag, "schedule_interval"): # Airflow 2 compat.
expected_dag_info["schedule_interval"] = "@once"
assert result == {
"airflowDagRun": AirflowDagRunFacet(
dag=expected_dag_info,
dagRun={
"conf": {},
"dag_id": "dag",
"data_interval_start": "2024-06-01T01:02:03+00:00",
"data_interval_end": "2024-06-01T02:03:04+00:00",
"external_trigger": True,
"run_id": "manual_2024-06-01T00:00:00+00:00",
"run_type": DagRunType.MANUAL,
"start_date": "2024-06-01T01:02:04+00:00",
"end_date": "2024-06-01T01:02:14.034172+00:00",
"duration": 10.034172,
"execution_date": "2024-06-01T01:02:04+00:00",
"logical_date": "2024-06-01T01:02:04+00:00",
"run_after": "2024-06-01T01:02:04+00:00",
"dag_bundle_name": "bundle_name",
"dag_bundle_version": "bundle_version",
"dag_version_id": "version_id",
"dag_version_number": "version_number",
},
)
}
@pytest.mark.parametrize(
("dag_run_attrs", "expected_duration"),
(
({"start_date": None, "end_date": None}, None),
({"start_date": datetime.datetime(2025, 1, 1), "end_date": None}, None),
({"start_date": None, "end_date": datetime.datetime(2025, 1, 1)}, None),
({"start_date": "2024-06-01T01:02:04+00:00", "end_date": "2024-06-01T01:02:14.034172+00:00"}, None),
(
{
"start_date": datetime.datetime(2025, 1, 1, 6, 1, 1, tzinfo=datetime.timezone.utc),
"end_date": datetime.datetime(2025, 1, 1, 6, 1, 12, 3456, tzinfo=datetime.timezone.utc),
},
11.003456,
),
),
)
def test_dag_run_duration(dag_run_attrs, expected_duration):
dag_run = MagicMock(**dag_run_attrs)
result = DagRunInfo.duration(dag_run)
assert result == expected_duration
def test_dag_run_version_no_versions():
dag_run = MagicMock()
del dag_run.dag_versions
result = DagRunInfo.dag_version_info(dag_run, "somekey")
assert result is None
@pytest.mark.parametrize("key", ["bundle_name", "bundle_version", "version_id", "version_number"])
@pytest.mark.db_test
def test_dag_run_version(key):
dagrun_mock = MagicMock(DagRun)
dagrun_mock.dag_versions = [
MagicMock(
bundle_name="bundle_name",
bundle_version="bundle_version",
id="version_id",
version_number="version_number",
)
]
result = DagRunInfo.dag_version_info(dagrun_mock, key)
assert result == key
def test_get_fully_qualified_class_name_serialized_operator():
op_module_path = BASH_OPERATOR_PATH
op_name = "BashOperator"
op = BashOperator(task_id="test", bash_command="echo 1")
op_path_before_serialization = get_fully_qualified_class_name(op)
assert op_path_before_serialization == f"{op_module_path}.{op_name}"
serialized = SerializedBaseOperator.serialize_operator(op)
deserialized = SerializedBaseOperator.deserialize_operator(serialized)
op_path_after_deserialization = get_fully_qualified_class_name(deserialized)
assert op_path_after_deserialization == f"{op_module_path}.{op_name}"
assert deserialized._task_module == op_module_path
assert deserialized.task_type == op_name
def test_get_fully_qualified_class_name_mapped_operator():
mapped = MockOperator.partial(task_id="task_2").expand(arg2=["a", "b", "c"])
mapped_op_path = get_fully_qualified_class_name(mapped)
assert mapped_op_path == "tests_common.test_utils.mock_operators.MockOperator"
def test_get_fully_qualified_class_name_bash_operator():
result = get_fully_qualified_class_name(BashOperator(task_id="test", bash_command="echo 0;"))
expected_result = f"{BASH_OPERATOR_PATH}.BashOperator"
assert result == expected_result
def test_truncate_string_to_byte_size_ascii_below_limit():
s = "A" * (_MAX_DOC_BYTES - 500)
result = _truncate_string_to_byte_size(s)
assert result == s
assert len(result.encode("utf-8")) == _MAX_DOC_BYTES - 500
def test_truncate_string_to_byte_size_ascii_exact_limit():
s = "A" * _MAX_DOC_BYTES
result = _truncate_string_to_byte_size(s)
assert result == s
assert len(result.encode("utf-8")) == _MAX_DOC_BYTES
def test_truncate_string_to_byte_size_ascii_over_limit():
s = "A" * (_MAX_DOC_BYTES + 10)
result = _truncate_string_to_byte_size(s)
assert len(result.encode("utf-8")) == _MAX_DOC_BYTES
assert result == s[:_MAX_DOC_BYTES] # Each ASCII char = 1 byte
def test_truncate_string_to_byte_size_utf8_multibyte_under_limit():
emoji = "🧠"
s = emoji * 1000 # Each emoji is 4 bytes, total 4000 bytes
result = _truncate_string_to_byte_size(s)
assert result == s
assert len(result.encode("utf-8")) <= _MAX_DOC_BYTES
def test_truncate_string_to_byte_size_utf8_multibyte_truncation():
emoji = "🧠"
full = emoji * (_MAX_DOC_BYTES // 4 + 10)
result = _truncate_string_to_byte_size(full)
result_bytes = result.encode("utf-8")
assert len(result_bytes) <= _MAX_DOC_BYTES
assert result_bytes.decode("utf-8") == result # still valid UTF-8
# Ensure we didn't include partial emoji
assert result.endswith(emoji)
def test_truncate_string_to_byte_size_split_multibyte_character():
s = "A" * 10 + "🧠"
encoded = s.encode("utf-8")
# Chop in the middle of the emoji (🧠 = 4 bytes)
partial = encoded[:-2]
result = _truncate_string_to_byte_size(s, max_size=len(partial))
assert "🧠" not in result
assert result == "A" * 10 # emoji should be dropped
def test_truncate_string_to_byte_size_empty_string():
result = _truncate_string_to_byte_size("")
assert result == ""
def test_truncate_string_to_byte_size_exact_multibyte_fit():
emoji = "🚀"
num = _MAX_DOC_BYTES // len(emoji.encode("utf-8"))
s = emoji * num
result = _truncate_string_to_byte_size(s)
assert result == s
assert len(result.encode("utf-8")) <= _MAX_DOC_BYTES
def test_truncate_string_to_byte_size_null_characters():
s = "\x00" * (_MAX_DOC_BYTES + 10)
result = _truncate_string_to_byte_size(s)
assert len(result.encode("utf-8")) == _MAX_DOC_BYTES
assert all(c == "\x00" for c in result)
def test_truncate_string_to_byte_size_non_bmp_characters():
# Characters like '𝄞' (U+1D11E) are >2 bytes in UTF-8
s = "𝄞" * 1000
result = _truncate_string_to_byte_size(s)
assert len(result.encode("utf-8")) <= _MAX_DOC_BYTES
assert result.encode("utf-8").decode("utf-8") == result
@pytest.mark.parametrize(
("operator", "expected_doc", "expected_mime_type"),
[
(None, None, None),
(MagicMock(doc=None, doc_md=None, doc_json=None, doc_yaml=None, doc_rst=None), None, None),
(MagicMock(doc="Test doc"), "Test doc", "text/plain"),
(MagicMock(doc_md="test.md", doc=None), "test.md", "text/markdown"),
(
MagicMock(doc_json='{"key": "value"}', doc=None, doc_md=None),
'{"key": "value"}',
"application/json",
),
(
MagicMock(doc_yaml="key: value", doc_json=None, doc=None, doc_md=None),
"key: value",
"application/x-yaml",
),
(
MagicMock(doc_rst="Test RST", doc_yaml=None, doc_json=None, doc=None, doc_md=None),
"Test RST",
"text/x-rst",
),
],
)
def test_get_task_documentation(operator, expected_doc, expected_mime_type):
result_doc, result_mime_type = get_task_documentation(operator)
assert result_doc == expected_doc
assert result_mime_type == expected_mime_type
def test_get_task_documentation_serialized_operator():
op = BashOperator(task_id="test", bash_command="echo 1", doc="some_doc")
op_doc_before_serialization = get_task_documentation(op)
assert op_doc_before_serialization == ("some_doc", "text/plain")
serialized = SerializedBaseOperator.serialize_operator(op)
deserialized = SerializedBaseOperator.deserialize_operator(serialized)
op_doc_after_deserialization = get_task_documentation(deserialized)
assert op_doc_after_deserialization == ("some_doc", "text/plain")
def test_get_task_documentation_mapped_operator():
mapped = MockOperator.partial(task_id="task_2", doc_md="some_doc").expand(arg2=["a", "b", "c"])
mapped_op_doc = get_task_documentation(mapped)
assert mapped_op_doc == ("some_doc", "text/markdown")
def test_get_task_documentation_longer_than_allowed():
doc = "A" * (_MAX_DOC_BYTES + 10)
operator = MagicMock(doc=doc)
result_doc, result_mime_type = get_task_documentation(operator)
assert result_doc == "A" * _MAX_DOC_BYTES
assert result_mime_type == "text/plain"
@pytest.mark.parametrize(
("dag", "expected_doc", "expected_mime_type"),
[
(None, None, None),
(MagicMock(doc_md=None, description=None), None, None),
(MagicMock(doc_md="test.md", description=None), "test.md", "text/markdown"),
(MagicMock(doc_md="test.md", description="Description text"), "test.md", "text/markdown"),
(MagicMock(description="Description text", doc_md=None), "Description text", "text/plain"),
],
)
def test_get_dag_documentation(dag, expected_doc, expected_mime_type):
result_doc, result_mime_type = get_dag_documentation(dag)
assert result_doc == expected_doc
assert result_mime_type == expected_mime_type
def test_get_dag_documentation_longer_than_allowed():
doc = "A" * (_MAX_DOC_BYTES + 10)
dag = MagicMock(doc_md=doc, description=None)
result_doc, result_mime_type = get_dag_documentation(dag)
assert result_doc == "A" * _MAX_DOC_BYTES
assert result_mime_type == "text/markdown"
def test_get_job_name():
task_instance = MagicMock(dag_id="example_dag", task_id="example_task")
expected_result = "example_dag.example_task"
assert get_job_name(task_instance) == expected_result
def test_get_job_name_empty_ids():
task_instance = MagicMock(dag_id="", task_id="")
expected_result = "."
assert get_job_name(task_instance) == expected_result
def test_get_operator_class():
op_class = get_operator_class(BashOperator(task_id="test", bash_command="echo 0;"))
assert op_class == BashOperator
def test_get_operator_class_mapped_operator():
mapped = MockOperator.partial(task_id="task").expand(arg2=["a", "b", "c"])
op_class = get_operator_class(mapped)
assert op_class == MockOperator
@pytest.mark.parametrize("dr_conf", (None, {}))
def test_get_openlineage_data_from_dagrun_conf_none_conf(dr_conf):
_dr_conf = None if dr_conf is None else {}
assert _get_openlineage_data_from_dagrun_conf(dr_conf) == {}
assert dr_conf == _dr_conf # Assert conf is not changed
def test_get_openlineage_data_from_dagrun_conf_no_openlineage_key():
dr_conf = {"something_else": {"a": 1}}
assert _get_openlineage_data_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"something_else": {"a": 1}} # Assert conf is not changed
def test_get_openlineage_data_from_dagrun_conf_invalid_type():
dr_conf = {"openlineage": "not_a_dict"}
assert _get_openlineage_data_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"openlineage": "not_a_dict"} # Assert conf is not changed
def test_get_openlineage_data_from_dagrun_conf_valid_dict():
dr_conf = {"openlineage": {"key": "value"}}
assert _get_openlineage_data_from_dagrun_conf(dr_conf) == {"key": "value"}
assert dr_conf == {"openlineage": {"key": "value"}} # Assert conf is not changed
@pytest.mark.parametrize("dr_conf", (None, {}))
def test_get_parent_information_from_dagrun_conf_no_conf(dr_conf):
_dr_conf = None if dr_conf is None else {}
assert get_parent_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == _dr_conf # Assert conf is not changed
def test_get_parent_information_from_dagrun_conf_no_openlineage():
dr_conf = {"something": "else"}
assert get_parent_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"something": "else"} # Assert conf is not changed
def test_get_parent_information_from_dagrun_conf_openlineage_not_dict():
dr_conf = {"openlineage": "my_value"}
assert get_parent_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"openlineage": "my_value"} # Assert conf is not changed
def test_get_parent_information_from_dagrun_conf_missing_keys():
dr_conf = {"openlineage": {"parentRunId": "id_only"}}
assert get_parent_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"openlineage": {"parentRunId": "id_only"}} # Assert conf is not changed
def test_get_parent_information_from_dagrun_conf_invalid_run_id():
dr_conf = {
"openlineage": {
"parentRunId": "not_uuid",
"parentJobNamespace": "ns",
"parentJobName": "jobX",
}
}
assert get_parent_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"parentRunId": "not_uuid",
"parentJobNamespace": "ns",
"parentJobName": "jobX",
}
}
def test_get_parent_information_from_dagrun_conf_valid_data():
dr_conf = {
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobX",
}
}
expected = {
"parent_run_id": "11111111-1111-1111-1111-111111111111",
"parent_job_namespace": "ns",
"parent_job_name": "jobX",
}
assert get_parent_information_from_dagrun_conf(dr_conf) == expected
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobX",
}
}
@pytest.mark.parametrize("dr_conf", (None, {}))
def test_get_root_information_from_dagrun_conf_no_conf(dr_conf):
_dr_conf = None if dr_conf is None else {}
assert get_root_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == _dr_conf # Assert conf is not changed
def test_get_root_information_from_dagrun_conf_no_openlineage():
dr_conf = {"something": "else"}
assert get_root_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"something": "else"} # Assert conf is not changed
def test_get_root_information_from_dagrun_conf_openlineage_not_dict():
dr_conf = {"openlineage": "my_value"}
assert get_root_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"openlineage": "my_value"} # Assert conf is not changed
def test_get_root_information_from_dagrun_conf_missing_keys():
dr_conf = {"openlineage": {"rootParentRunId": "id_only"}}
assert get_root_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == {"openlineage": {"rootParentRunId": "id_only"}} # Assert conf is not changed
def test_get_root_information_from_dagrun_conf_invalid_run_id():
dr_conf = {
"openlineage": {
"rootParentRunId": "not_uuid",
"rootParentJobNamespace": "ns",
"rootParentJobName": "jobX",
}
}
assert get_root_information_from_dagrun_conf(dr_conf) == {}
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"rootParentRunId": "not_uuid",
"rootParentJobNamespace": "ns",
"rootParentJobName": "jobX",
}
}
def test_get_root_information_from_dagrun_conf_valid_data():
dr_conf = {
"openlineage": {
"rootParentRunId": "11111111-1111-1111-1111-111111111111",
"rootParentJobNamespace": "ns",
"rootParentJobName": "jobX",
}
}
expected = {
"root_parent_run_id": "11111111-1111-1111-1111-111111111111",
"root_parent_job_namespace": "ns",
"root_parent_job_name": "jobX",
}
assert get_root_information_from_dagrun_conf(dr_conf) == expected
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"rootParentRunId": "11111111-1111-1111-1111-111111111111",
"rootParentJobNamespace": "ns",
"rootParentJobName": "jobX",
}
}
@pytest.mark.parametrize("dr_conf", (None, {}))
def test_get_dag_parent_run_facet_no_conf(dr_conf):
_dr_conf = None if dr_conf is None else {}
assert get_dag_parent_run_facet(dr_conf) == {}
assert dr_conf == _dr_conf # Assert conf is not changed
def test_get_dag_parent_run_facet_missing_keys():
dr_conf = {"openlineage": {"parentRunId": "11111111-1111-1111-1111-111111111111"}}
assert get_dag_parent_run_facet(dr_conf) == {}
# Assert conf is not changed
assert dr_conf == {"openlineage": {"parentRunId": "11111111-1111-1111-1111-111111111111"}}
def test_get_dag_parent_run_facet_valid_no_root():
dr_conf = {
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
}
}
result = get_dag_parent_run_facet(dr_conf)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
assert parent_facet.root is not None # parent is used as root, since root is missing
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
}
}
def test_get_dag_parent_run_facet_invalid_uuid():
dr_conf = {
"openlineage": {
"parentRunId": "not_uuid",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
}
}
result = get_dag_parent_run_facet(dr_conf)
assert result == {}
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"parentRunId": "not_uuid",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
}
}
def test_get_dag_parent_run_facet_valid_with_root():
dr_conf = {
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
"rootParentRunId": "22222222-2222-2222-2222-222222222222",
"rootParentJobNamespace": "rootns",
"rootParentJobName": "rootjob",
}
}
result = get_dag_parent_run_facet(dr_conf)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
assert parent_facet.root is not None
assert parent_facet.root.run.runId == "22222222-2222-2222-2222-222222222222"
assert parent_facet.root.job.namespace == "rootns"
assert parent_facet.root.job.name == "rootjob"
assert dr_conf == { # Assert conf is not changed
"openlineage": {
"parentRunId": "11111111-1111-1111-1111-111111111111",
"parentJobNamespace": "ns",
"parentJobName": "jobA",
"rootParentRunId": "22222222-2222-2222-2222-222222222222",
"rootParentJobNamespace": "rootns",
"rootParentJobName": "rootjob",
}
}
def test_get_task_parent_run_facet_defaults():
"""Test default behavior with minimal parameters - parent is used as root with default namespace."""
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == namespace()
assert parent_facet.job.name == "jobA"
# Root should default to parent values when no root info is provided
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == namespace()
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_custom_root_values():
"""Test with all explicit root parameters provided - root should use the provided values."""
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
root_parent_run_id="22222222-2222-2222-2222-222222222222",
root_parent_job_name="rjob",
root_parent_job_namespace="rns",
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
assert parent_facet.root.run.runId == "22222222-2222-2222-2222-222222222222"
assert parent_facet.root.job.namespace == "rns"
assert parent_facet.root.job.name == "rjob"
def test_get_task_parent_run_facet_partial_root_info_ignored():
"""Test that incomplete explicit root identifiers are ignored - root defaults to parent."""
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
root_parent_run_id="22222222-2222-2222-2222-222222222222", # Only run_id provided
# Missing root_parent_job_name and root_parent_job_namespace
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should default to parent since incomplete root info was ignored
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_with_empty_dr_conf():
"""Test with empty dr_conf - root should default to function parent parameters."""
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf={},
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should default to parent
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_with_dr_conf_root_info():
"""Test with dr_conf containing root information - root should use values from dr_conf."""
dr_conf = {
"openlineage": {
"rootParentRunId": "22222222-2222-2222-2222-222222222222",
"rootParentJobNamespace": "rootns",
"rootParentJobName": "rootjob",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use values from dr_conf
assert parent_facet.root.run.runId == "22222222-2222-2222-2222-222222222222"
assert parent_facet.root.job.namespace == "rootns"
assert parent_facet.root.job.name == "rootjob"
def test_get_task_parent_run_facet_with_dr_conf_parent_info_only():
"""Test with dr_conf containing only parent information - parent info is used as root fallback."""
dr_conf = {
"openlineage": {
"parentRunId": "33333333-3333-3333-3333-333333333333",
"parentJobNamespace": "conf_parent_ns",
"parentJobName": "conf_parent_job",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use parent info from dr_conf as fallback
assert parent_facet.root.run.runId == "33333333-3333-3333-3333-333333333333"
assert parent_facet.root.job.namespace == "conf_parent_ns"
assert parent_facet.root.job.name == "conf_parent_job"
def test_get_task_parent_run_facet_with_dr_conf_both_parent_and_root():
"""Test with dr_conf containing both root and parent information - root info takes precedence."""
dr_conf = {
"openlineage": {
"parentRunId": "33333333-3333-3333-3333-333333333333",
"parentJobNamespace": "conf_parent_ns",
"parentJobName": "conf_parent_job",
"rootParentRunId": "44444444-4444-4444-4444-444444444444",
"rootParentJobNamespace": "conf_root_ns",
"rootParentJobName": "conf_root_job",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use explicit root info from dr_conf
assert parent_facet.root.run.runId == "44444444-4444-4444-4444-444444444444"
assert parent_facet.root.job.namespace == "conf_root_ns"
assert parent_facet.root.job.name == "conf_root_job"
def test_get_task_parent_run_facet_with_dr_conf_incomplete_root():
"""Test with dr_conf containing incomplete root information - root defaults to function parent."""
dr_conf = {
"openlineage": {
"rootParentRunId": "22222222-2222-2222-2222-222222222222",
# Missing rootParentJobNamespace and rootParentJobName
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should default to parent since dr_conf root info is incomplete
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_with_dr_conf_invalid_root_uuid():
"""Test with dr_conf containing invalid root UUID - validation fails, root defaults to parent."""
dr_conf = {
"openlineage": {
"rootParentRunId": "not_a_valid_uuid",
"rootParentJobNamespace": "rootns",
"rootParentJobName": "rootjob",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should default to parent since dr_conf root UUID is invalid
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_explicit_root_overrides_dr_conf():
"""Test that explicitly provided root parameters take precedence over dr_conf values."""
dr_conf = {
"openlineage": {
"rootParentRunId": "99999999-9999-9999-9999-999999999999",
"rootParentJobNamespace": "conf_rootns",
"rootParentJobName": "conf_rootjob",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
root_parent_run_id="22222222-2222-2222-2222-222222222222",
root_parent_job_name="explicit_rjob",
root_parent_job_namespace="explicit_rns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use explicitly provided values, not dr_conf
assert parent_facet.root.run.runId == "22222222-2222-2222-2222-222222222222"
assert parent_facet.root.job.namespace == "explicit_rns"
assert parent_facet.root.job.name == "explicit_rjob"
def test_get_task_parent_run_facet_partial_root_in_dr_conf_with_full_parent():
"""Test partial root + full parent in dr_conf - parent info is used as root fallback."""
dr_conf = {
"openlineage": {
"parentRunId": "33333333-3333-3333-3333-333333333333",
"parentJobNamespace": "conf_parent_ns",
"parentJobName": "conf_parent_job",
"rootParentRunId": "44444444-4444-4444-4444-444444444444",
# Missing rootParentJobNamespace and rootParentJobName
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use parent info from dr_conf since root info is incomplete
assert parent_facet.root is not None
assert parent_facet.root.run.runId == "33333333-3333-3333-3333-333333333333"
assert parent_facet.root.job.namespace == "conf_parent_ns"
assert parent_facet.root.job.name == "conf_parent_job"
def test_get_task_parent_run_facet_partial_root_and_partial_parent_in_dr_conf():
"""Test both root and parent incomplete in dr_conf - root defaults to function parent."""
dr_conf = {
"openlineage": {
"parentRunId": "33333333-3333-3333-3333-333333333333",
# Missing parentJobNamespace and parentJobName
"rootParentRunId": "44444444-4444-4444-4444-444444444444",
# Missing rootParentJobNamespace and rootParentJobName
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should default to function parent since both dr_conf root and parent are incomplete
assert parent_facet.root is not None
assert parent_facet.root.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.root.job.namespace == "ns"
assert parent_facet.root.job.name == "jobA"
def test_get_task_parent_run_facet_invalid_root_uuid_with_valid_parent_in_dr_conf():
"""Test invalid root UUID with valid parent in dr_conf - parent info used as root fallback."""
dr_conf = {
"openlineage": {
"parentRunId": "33333333-3333-3333-3333-333333333333",
"parentJobNamespace": "conf_parent_ns",
"parentJobName": "conf_parent_job",
"rootParentRunId": "not_a_valid_uuid",
"rootParentJobNamespace": "conf_root_ns",
"rootParentJobName": "conf_root_job",
}
}
result = get_task_parent_run_facet(
parent_run_id="11111111-1111-1111-1111-111111111111",
parent_job_name="jobA",
parent_job_namespace="ns",
dr_conf=dr_conf,
)
parent_facet = result.get("parent")
assert isinstance(parent_facet, parent_run.ParentRunFacet)
assert parent_facet.run.runId == "11111111-1111-1111-1111-111111111111"
assert parent_facet.job.namespace == "ns"
assert parent_facet.job.name == "jobA"
# Root should use parent info from dr_conf since root UUID is invalid
assert parent_facet.root is not None
assert parent_facet.root.run.runId == "33333333-3333-3333-3333-333333333333"
assert parent_facet.root.job.namespace == "conf_parent_ns"
assert parent_facet.root.job.name == "conf_parent_job"
def test_get_tasks_details():
class TestMappedOperator(BaseOperator):
def __init__(self, value, **kwargs):
super().__init__(**kwargs)
self.value = value
def execute(self, context):
return self.value + 1
@task
def generate_list() -> list:
return [1, 2, 3]
@task
def process_item(item: int) -> int:
return item * 2
@task
def sum_values(values: list[int]) -> int:
return sum(values)
with DAG(dag_id="dag", schedule=None, start_date=datetime.datetime(2024, 6, 1)) as dag:
task_ = CustomOperatorForTest(task_id="task", bash_command="exit 0;")
task_0 = BashOperator(task_id="task_0", bash_command="exit 0;")
task_1 = CustomOperatorFromEmpty(task_id="task_1")
task_2 = PythonOperator(task_id="task_2", python_callable=lambda: 1)
task_3 = BashOperator(task_id="task_3", bash_command="exit 0;")
task_4 = EmptyOperator(task_id="task_4.test.dot")
task_5 = BashOperator(task_id="task_5", bash_command="exit 0;")
task_6 = TestMappedOperator.partial(task_id="task_6").expand(value=[1, 2])
list_result = generate_list()
processed_results = process_item.expand(item=list_result)
result_sum = sum_values(processed_results) # noqa: F841
with TaskGroup("section_1", prefix_group_id=True) as tg:
task_10 = PythonOperator(task_id="task_3", python_callable=lambda: 1)
with TaskGroup("section_2", parent_group=tg) as tg2:
task_11 = EmptyOperator(task_id="task_11") # noqa: F841
with TaskGroup("section_3", parent_group=tg2):
task_12 = PythonOperator(task_id="task_12", python_callable=lambda: 1)
task_ >> [task_2, task_6]
task_0 >> [task_2, task_1] >> task_3 >> [task_4, task_5]
task_1 >> task_6 >> task_3 >> task_4 >> task_5
task_3 >> task_10 >> task_12
py_decorator_path = (
"airflow.providers.standard.decorators.python._PythonDecoratedOperator"
if AIRFLOW_V_3_0_PLUS
else "airflow.decorators.python._PythonDecoratedOperator"
)
expected = {
"generate_list": {
"emits_ol_events": True,
"is_setup": False,
"is_teardown": False,
"operator": py_decorator_path,
"task_group": None,
"ui_color": "#ffefeb",
"ui_fgcolor": "#000",
"ui_label": "generate_list",
"downstream_task_ids": [
"process_item",
],
},
"process_item": {
"emits_ol_events": True,
"is_setup": False,
"is_teardown": False,
"operator": py_decorator_path,
"task_group": None,
"ui_color": "#ffefeb",
"ui_fgcolor": "#000",
"ui_label": "process_item",
"downstream_task_ids": [
"sum_values",
],
},
"sum_values": {
"emits_ol_events": True,
"is_setup": False,
"is_teardown": False,
"operator": py_decorator_path,
"task_group": None,
"ui_color": "#ffefeb",
"ui_fgcolor": "#000",
"ui_label": "sum_values",
"downstream_task_ids": [],
},
"task": {
"operator": "unit.openlineage.utils.test_utils.CustomOperatorForTest",
"task_group": None,
"emits_ol_events": True,
"ui_color": CustomOperatorForTest.ui_color,
"ui_fgcolor": CustomOperatorForTest.ui_fgcolor,
"ui_label": "task",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"task_2",
"task_6",
],
},
"task_0": {
"operator": f"{BASH_OPERATOR_PATH}.BashOperator",
"task_group": None,
"emits_ol_events": True,
"ui_color": BashOperator.ui_color,
"ui_fgcolor": BashOperator.ui_fgcolor,
"ui_label": "task_0",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"task_1",
"task_2",
],
},
"task_1": {
"operator": "unit.openlineage.utils.test_utils.CustomOperatorFromEmpty",
"task_group": None,
"emits_ol_events": False,
"ui_color": CustomOperatorFromEmpty.ui_color,
"ui_fgcolor": CustomOperatorFromEmpty.ui_fgcolor,
"ui_label": "task_1",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"task_3",
"task_6",
],
},
"task_2": {
"operator": f"{PYTHON_OPERATOR_PATH}.PythonOperator",
"task_group": None,
"emits_ol_events": True,
"ui_color": PythonOperator.ui_color,
"ui_fgcolor": PythonOperator.ui_fgcolor,
"ui_label": "task_2",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"task_3",
],
},
"task_3": {
"operator": f"{BASH_OPERATOR_PATH}.BashOperator",
"task_group": None,
"emits_ol_events": True,
"ui_color": BashOperator.ui_color,
"ui_fgcolor": BashOperator.ui_fgcolor,
"ui_label": "task_3",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"section_1.task_3",
"task_4.test.dot",
"task_5",
],
},
"task_4.test.dot": {
"operator": "airflow.providers.standard.operators.empty.EmptyOperator",
"task_group": None,
"emits_ol_events": False,
"ui_color": EmptyOperator.ui_color,
"ui_fgcolor": EmptyOperator.ui_fgcolor,
"ui_label": "task_4.test.dot",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"task_5",
],
},
"task_5": {
"operator": f"{BASH_OPERATOR_PATH}.BashOperator",
"task_group": None,
"emits_ol_events": True,
"ui_color": BashOperator.ui_color,
"ui_fgcolor": BashOperator.ui_fgcolor,
"ui_label": "task_5",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [],
},
"task_6": {
"emits_ol_events": True,
"is_setup": False,
"is_teardown": False,
"operator": "unit.openlineage.utils.test_utils.TestMappedOperator",
"task_group": None,
"ui_color": "#fff",
"ui_fgcolor": "#000",
"ui_label": "task_6",
"downstream_task_ids": [
"task_3",
],
},
"section_1.task_3": {
"operator": f"{PYTHON_OPERATOR_PATH}.PythonOperator",
"task_group": "section_1",
"emits_ol_events": True,
"ui_color": PythonOperator.ui_color,
"ui_fgcolor": PythonOperator.ui_fgcolor,
"ui_label": "task_3",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [
"section_1.section_2.section_3.task_12",
],
},
"section_1.section_2.task_11": {
"operator": "airflow.providers.standard.operators.empty.EmptyOperator",
"task_group": "section_1.section_2",
"emits_ol_events": False,
"ui_color": EmptyOperator.ui_color,
"ui_fgcolor": EmptyOperator.ui_fgcolor,
"ui_label": "task_11",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [],
},
"section_1.section_2.section_3.task_12": {
"operator": f"{PYTHON_OPERATOR_PATH}.PythonOperator",
"task_group": "section_1.section_2.section_3",
"emits_ol_events": True,
"ui_color": PythonOperator.ui_color,
"ui_fgcolor": PythonOperator.ui_fgcolor,
"ui_label": "task_12",
"is_setup": False,
"is_teardown": False,
"downstream_task_ids": [],
},
}
result = _get_tasks_details(dag)
assert result == expected
def test_get_tasks_details_empty_dag():
assert _get_tasks_details(DAG("test_dag", schedule=None, start_date=datetime.datetime(2024, 6, 1))) == {}
def test_get_tasks_large_dag():
"""Test how get_tasks behaves for a large dag with many dependent tasks."""
with DAG("test", schedule=None) as dag:
start = EmptyOperator(task_id="start")
a = [
start >> EmptyOperator(task_id=f"a_1_{i}") >> EmptyOperator(task_id=f"a_2_{i}")
for i in range(200)
]
middle = EmptyOperator(task_id="middle")
b = [
middle >> EmptyOperator(task_id=f"b_1_{i}") >> EmptyOperator(task_id=f"b_2_{i}")
for i in range(200)
]
middle2 = EmptyOperator(task_id="middle2")
c = [
middle2 >> EmptyOperator(task_id=f"c_1_{i}") >> EmptyOperator(task_id=f"c_2_{i}")
for i in range(200)
]
end = EmptyOperator(task_id="end")
start >> a >> middle >> b >> middle2 >> c >> end
result = _get_tasks_details(dag)
expected_dependencies = {
"start": 400,
"middle": 400,
"middle2": 400,
"end": 0,
}
assert len(result) == 1204
for task_id, task_info in result.items():
assert len(task_info["downstream_task_ids"]) == expected_dependencies.get(task_id, 1)
def test_get_task_groups_details():
with DAG("test_dag", schedule=None, start_date=datetime.datetime(2024, 6, 1)) as dag:
with TaskGroup("tg1", prefix_group_id=True):
task_1 = EmptyOperator(task_id="task_1") # noqa: F841
with TaskGroup("tg2", prefix_group_id=False):
task = EmptyOperator(task_id="task_1") # noqa: F841
with TaskGroup("tg3"):
task_2 = EmptyOperator(task_id="task_2") # noqa: F841
result = _get_task_groups_details(dag)
expected = {
"tg1": {
"parent_group": None,
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg1",
},
"tg2": {
"parent_group": None,
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg2",
},
"tg3": {
"parent_group": None,
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg3",
},
}
assert result == expected
def test_get_task_groups_details_nested():
with DAG("test_dag", schedule=None, start_date=datetime.datetime(2024, 6, 1)) as dag:
with TaskGroup("tg1", prefix_group_id=True) as tg:
with TaskGroup("tg2", parent_group=tg) as tg2:
with TaskGroup("tg3", parent_group=tg2):
pass
result = _get_task_groups_details(dag)
expected = {
"tg1": {
"parent_group": None,
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg1",
},
"tg1.tg2": {
"parent_group": "tg1",
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg2",
},
"tg1.tg2.tg3": {
"parent_group": "tg1.tg2",
"ui_color": "CornflowerBlue",
"ui_fgcolor": "#000",
"ui_label": "tg3",
},
}
assert result == expected
def test_get_task_groups_details_no_task_groups():
assert (
_get_task_groups_details(
DAG("test_dag", schedule=None, start_date=datetime.datetime(2024, 6, 1)),
)
== {}
)
@patch("airflow.providers.openlineage.conf.custom_run_facets", return_value=set())
def test_get_user_provided_run_facets_with_no_function_definition(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert result == {}
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={"unit.openlineage.utils.custom_facet_fixture.get_additional_test_facet"},
)
def test_get_user_provided_run_facets_with_function_definition(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert len(result) == 1
assert result["additional_run_facet"].name == f"test-lineage-namespace-{TaskInstanceState.RUNNING}"
assert result["additional_run_facet"].cluster == "TEST_test-dag.test-task"
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={
"unit.openlineage.utils.custom_facet_fixture.get_additional_test_facet",
},
)
def test_get_user_provided_run_facets_with_return_value_as_none(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=BashOperator(
task_id="test-task",
bash_command="exit 0;",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=BashOperator(
task_id="test-task",
bash_command="exit 0;",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert result == {}
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={
"invalid_function",
"unit.openlineage.utils.custom_facet_fixture.get_additional_test_facet",
"unit.openlineage.utils.custom_facet_fixture.return_type_is_not_dict",
"unit.openlineage.utils.custom_facet_fixture.get_another_test_facet",
},
)
def test_get_user_provided_run_facets_with_multiple_function_definition(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert len(result) == 2
assert result["additional_run_facet"].name == f"test-lineage-namespace-{TaskInstanceState.RUNNING}"
assert result["additional_run_facet"].cluster == "TEST_test-dag.test-task"
assert result["another_run_facet"] == {"name": "another-lineage-namespace"}
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={
"unit.openlineage.utils.custom_facet_fixture.get_additional_test_facet",
"unit.openlineage.utils.custom_facet_fixture.get_duplicate_test_facet_key",
},
)
def test_get_user_provided_run_facets_with_duplicate_facet_keys(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert len(result) == 1
assert result["additional_run_facet"].name == f"test-lineage-namespace-{TaskInstanceState.RUNNING}"
assert result["additional_run_facet"].cluster == "TEST_test-dag.test-task"
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={"invalid_function"},
)
def test_get_user_provided_run_facets_with_invalid_function_definition(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert result == {}
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={"providers.unit.openlineage.utils.custom_facet_fixture.return_type_is_not_dict"},
)
def test_get_user_provided_run_facets_with_wrong_return_type_function(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert result == {}
@patch(
"airflow.providers.openlineage.conf.custom_run_facets",
return_value={"providers.unit.openlineage.utils.custom_facet_fixture.get_custom_facet_throws_exception"},
)
def test_get_user_provided_run_facets_with_exception(mock_custom_facet_funcs):
if AIRFLOW_V_3_0_PLUS:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
dag_version_id=mock.MagicMock(),
)
else:
sample_ti = TaskInstance(
task=EmptyOperator(
task_id="test-task",
dag=DAG("test-dag", schedule=None, start_date=datetime.datetime(2024, 7, 1)),
),
state="running",
)
result = get_user_provided_run_facets(sample_ti, TaskInstanceState.RUNNING)
assert result == {}
def test_daginfo_timetable_summary():
from airflow.timetables.simple import NullTimetable
dag = MagicMock()
# timetable is enough to get summary
dag.timetable = NullTimetable()
dag.timetable_summary = None
assert DagInfo(dag).timetable_summary == "None"
# but if summary is present, it's preferred
dag.timetable_summary = "explicit_summary"
assert DagInfo(dag).timetable_summary == "explicit_summary"
@pytest.mark.skipif(AIRFLOW_V_3_0_PLUS, reason="Airflow 2 tests")
| CustomOperatorFromEmpty |
python | apache__airflow | providers/postgres/src/airflow/providers/postgres/dialects/postgres.py | {
"start": 965,
"end": 5332
} | class ____(Dialect):
"""Postgres dialect implementation."""
@property
def name(self) -> str:
return "postgresql"
@lru_cache(maxsize=None)
def get_primary_keys(self, table: str, schema: str | None = None) -> list[str] | None:
"""
Get the table's primary key.
:param table: Name of the target table
:param schema: Name of the target schema, public by default
:return: Primary key columns list
"""
if schema is None:
table, schema = self.extract_schema_from_table(table)
table = self.unescape_word(table) or table
schema = self.unescape_word(schema) if schema else None
query = """
select kcu.column_name
from information_schema.table_constraints tco
join information_schema.key_column_usage kcu
on kcu.constraint_name = tco.constraint_name
and kcu.constraint_schema = tco.constraint_schema
and kcu.constraint_name = tco.constraint_name
where tco.constraint_type = 'PRIMARY KEY'
and kcu.table_schema = %s
and kcu.table_name = %s
order by kcu.ordinal_position
"""
pk_columns = [row[0] for row in self.get_records(query, (schema, table))]
return pk_columns or None
@staticmethod
def _to_row(row):
return {
"name": row[0],
"type": row[1],
"nullable": row[2].casefold() == "yes",
"default": row[3],
"autoincrement": row[4].casefold() == "always",
"identity": row[5].casefold() == "yes",
}
@lru_cache(maxsize=None)
def get_column_names(
self, table: str, schema: str | None = None, predicate: Callable[[T], bool] = lambda column: True
) -> list[str] | None:
if schema is None:
table, schema = self.extract_schema_from_table(table)
table = self.unescape_word(table) or table
schema = self.unescape_word(schema) if schema else None
query = """
select column_name,
data_type,
is_nullable,
column_default,
is_generated,
is_identity
from information_schema.columns
where table_schema = %s
and table_name = %s
order by ordinal_position
"""
column_names = []
for row in map(
self._to_row,
self.get_records(query, (schema, table)),
):
if predicate(row):
column_names.append(row["name"])
self.log.debug("Column names for table '%s': %s", table, column_names)
return column_names
def generate_replace_sql(self, table, values, target_fields, **kwargs) -> str:
"""
Generate the REPLACE SQL statement.
:param table: Name of the target table
:param values: The row to insert into the table
:param target_fields: The names of the columns to fill in the table
:param replace: Whether to replace instead of insert
:param replace_index: the column or list of column names to act as
index for the ON CONFLICT clause
:return: The generated INSERT or REPLACE SQL statement
"""
if not target_fields:
raise ValueError("PostgreSQL ON CONFLICT upsert syntax requires column names")
replace_index = kwargs.get("replace_index") or self.get_primary_keys(table)
if not replace_index:
raise ValueError("PostgreSQL ON CONFLICT upsert syntax requires an unique index")
if isinstance(replace_index, str):
replace_index = [replace_index]
sql = self.generate_insert_sql(table, values, target_fields, **kwargs)
on_conflict_str = f" ON CONFLICT ({', '.join(map(self.escape_word, replace_index))})"
replace_target = [self.escape_word(f) for f in target_fields if f not in replace_index]
if replace_target:
replace_target_str = ", ".join(f"{col} = excluded.{col}" for col in replace_target)
sql += f"{on_conflict_str} DO UPDATE SET {replace_target_str}"
else:
sql += f"{on_conflict_str} DO NOTHING"
return sql
| PostgresDialect |
python | huggingface__transformers | src/transformers/models/rt_detr_v2/modeling_rt_detr_v2.py | {
"start": 15911,
"end": 20667
} | class ____(nn.Module):
def __init__(self, config: RTDetrV2Config):
super().__init__()
# self-attention
self.self_attn = RTDetrV2MultiheadAttention(
embed_dim=config.d_model,
num_heads=config.decoder_attention_heads,
dropout=config.attention_dropout,
)
self.dropout = config.dropout
self.activation_fn = ACT2FN[config.decoder_activation_function]
self.activation_dropout = config.activation_dropout
self.self_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
# override only the encoder attention module with v2 version
self.encoder_attn = RTDetrV2MultiscaleDeformableAttention(config)
self.encoder_attn_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
# feedforward neural networks
self.fc1 = nn.Linear(config.d_model, config.decoder_ffn_dim)
self.fc2 = nn.Linear(config.decoder_ffn_dim, config.d_model)
self.final_layer_norm = nn.LayerNorm(config.d_model, eps=config.layer_norm_eps)
def forward(
self,
hidden_states: torch.Tensor,
position_embeddings: Optional[torch.Tensor] = None,
reference_points=None,
spatial_shapes=None,
spatial_shapes_list=None,
level_start_index=None,
encoder_hidden_states: Optional[torch.Tensor] = None,
encoder_attention_mask: Optional[torch.Tensor] = None,
output_attentions: Optional[bool] = False,
):
"""
Args:
hidden_states (`torch.FloatTensor`):
Input to the layer of shape `(seq_len, batch, embed_dim)`.
position_embeddings (`torch.FloatTensor`, *optional*):
Position embeddings that are added to the queries and keys in the self-attention layer.
reference_points (`torch.FloatTensor`, *optional*):
Reference points.
spatial_shapes (`torch.LongTensor`, *optional*):
Spatial shapes.
level_start_index (`torch.LongTensor`, *optional*):
Level start index.
encoder_hidden_states (`torch.FloatTensor`):
cross attention input to the layer of shape `(seq_len, batch, embed_dim)`
encoder_attention_mask (`torch.FloatTensor`): encoder attention mask of size
`(batch, 1, target_len, source_len)` where padding elements are indicated by very large negative
values.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
returned tensors for more detail.
"""
residual = hidden_states
# Self Attention
hidden_states, self_attn_weights = self.self_attn(
hidden_states=hidden_states,
attention_mask=encoder_attention_mask,
position_embeddings=position_embeddings,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.self_attn_layer_norm(hidden_states)
second_residual = hidden_states
# Cross-Attention
cross_attn_weights = None
hidden_states, cross_attn_weights = self.encoder_attn(
hidden_states=hidden_states,
encoder_hidden_states=encoder_hidden_states,
position_embeddings=position_embeddings,
reference_points=reference_points,
spatial_shapes=spatial_shapes,
spatial_shapes_list=spatial_shapes_list,
level_start_index=level_start_index,
output_attentions=output_attentions,
)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = second_residual + hidden_states
hidden_states = self.encoder_attn_layer_norm(hidden_states)
# Fully Connected
residual = hidden_states
hidden_states = self.activation_fn(self.fc1(hidden_states))
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
hidden_states = self.fc2(hidden_states)
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
hidden_states = residual + hidden_states
hidden_states = self.final_layer_norm(hidden_states)
outputs = (hidden_states,)
if output_attentions:
outputs += (self_attn_weights, cross_attn_weights)
return outputs
@auto_docstring
| RTDetrV2DecoderLayer |
python | fluentpython__example-code | 20-descriptor/bulkfood/model_v5.py | {
"start": 540,
"end": 861
} | class ____(abc.ABC, AutoStorage): # <3>
def __set__(self, instance, value):
value = self.validate(instance, value) # <4>
super().__set__(instance, value) # <5>
@abc.abstractmethod
def validate(self, instance, value): # <6>
"""return validated value or raise ValueError"""
| Validated |
python | jazzband__django-polymorphic | example/pexp/admin.py | {
"start": 403,
"end": 843
} | class ____(PolymorphicChildModelAdmin):
base_model = Project # Can be set explicitly.
# On purpose, only have the shared fields here.
# The fields of the derived model should still be displayed.
base_fieldsets = (("Base fields", {"fields": ("topic",)}),)
admin.site.register(Project, ProjectAdmin)
admin.site.register(ArtProject, ProjectChildAdmin)
admin.site.register(ResearchProject, ProjectChildAdmin)
| ProjectChildAdmin |
python | weaviate__weaviate-python-client | weaviate/collections/classes/grpc.py | {
"start": 18834,
"end": 19386
} | class ____(_WeaviateInput):
link_on: str
include_vector: INCLUDE_VECTOR = Field(default=False)
return_metadata: Optional[MetadataQuery] = Field(default=None)
return_properties: Union["PROPERTIES", bool, None] = Field(default=None)
return_references: Optional["REFERENCES"] = Field(default=None)
def __hash__(self) -> int: # for set
return hash(str(self))
@property
def _return_metadata(self) -> _MetadataQuery:
return _MetadataQuery.from_public(self.return_metadata, self.include_vector)
| _QueryReference |
python | openai__openai-python | src/openai/cli/_cli.py | {
"start": 701,
"end": 6779
} | class ____(BaseModel):
if PYDANTIC_V1:
class Config(pydantic.BaseConfig): # type: ignore
extra: Any = pydantic.Extra.ignore # type: ignore
else:
model_config: ClassVar[ConfigDict] = ConfigDict(
extra="ignore",
)
verbosity: int
version: Optional[str] = None
api_key: Optional[str]
api_base: Optional[str]
organization: Optional[str]
proxy: Optional[List[str]]
api_type: Optional[_ApiType] = None
api_version: Optional[str] = None
# azure
azure_endpoint: Optional[str] = None
azure_ad_token: Optional[str] = None
# internal, set by subparsers to parse their specific args
args_model: Optional[Type[BaseModel]] = None
# internal, used so that subparsers can forward unknown arguments
unknown_args: List[str] = []
allow_unknown_args: bool = False
def _build_parser() -> argparse.ArgumentParser:
parser = argparse.ArgumentParser(description=None, prog="openai")
parser.add_argument(
"-v",
"--verbose",
action="count",
dest="verbosity",
default=0,
help="Set verbosity.",
)
parser.add_argument("-b", "--api-base", help="What API base url to use.")
parser.add_argument("-k", "--api-key", help="What API key to use.")
parser.add_argument("-p", "--proxy", nargs="+", help="What proxy to use.")
parser.add_argument(
"-o",
"--organization",
help="Which organization to run as (will use your default organization if not specified)",
)
parser.add_argument(
"-t",
"--api-type",
type=str,
choices=("openai", "azure"),
help="The backend API to call, must be `openai` or `azure`",
)
parser.add_argument(
"--api-version",
help="The Azure API version, e.g. 'https://learn.microsoft.com/en-us/azure/ai-services/openai/reference#rest-api-versioning'",
)
# azure
parser.add_argument(
"--azure-endpoint",
help="The Azure endpoint, e.g. 'https://endpoint.openai.azure.com'",
)
parser.add_argument(
"--azure-ad-token",
help="A token from Azure Active Directory, https://www.microsoft.com/en-us/security/business/identity-access/microsoft-entra-id",
)
# prints the package version
parser.add_argument(
"-V",
"--version",
action="version",
version="%(prog)s " + __version__,
)
def help() -> None:
parser.print_help()
parser.set_defaults(func=help)
subparsers = parser.add_subparsers()
sub_api = subparsers.add_parser("api", help="Direct API calls")
register_commands(sub_api)
sub_tools = subparsers.add_parser("tools", help="Client side tools for convenience")
_tools.register_commands(sub_tools, subparsers)
return parser
def main() -> int:
try:
_main()
except (APIError, CLIError, pydantic.ValidationError) as err:
display_error(err)
return 1
except KeyboardInterrupt:
sys.stderr.write("\n")
return 1
return 0
def _parse_args(parser: argparse.ArgumentParser) -> tuple[argparse.Namespace, Arguments, list[str]]:
# argparse by default will strip out the `--` but we want to keep it for unknown arguments
if "--" in sys.argv:
idx = sys.argv.index("--")
known_args = sys.argv[1:idx]
unknown_args = sys.argv[idx:]
else:
known_args = sys.argv[1:]
unknown_args = []
parsed, remaining_unknown = parser.parse_known_args(known_args)
# append any remaining unknown arguments from the initial parsing
remaining_unknown.extend(unknown_args)
args = model_parse(Arguments, vars(parsed))
if not args.allow_unknown_args:
# we have to parse twice to ensure any unknown arguments
# result in an error if that behaviour is desired
parser.parse_args()
return parsed, args, remaining_unknown
def _main() -> None:
parser = _build_parser()
parsed, args, unknown = _parse_args(parser)
if args.verbosity != 0:
sys.stderr.write("Warning: --verbosity isn't supported yet\n")
proxies: dict[str, httpx.BaseTransport] = {}
if args.proxy is not None:
for proxy in args.proxy:
key = "https://" if proxy.startswith("https") else "http://"
if key in proxies:
raise CLIError(f"Multiple {key} proxies given - only the last one would be used")
proxies[key] = httpx.HTTPTransport(proxy=httpx.Proxy(httpx.URL(proxy)))
http_client = httpx.Client(
mounts=proxies or None,
http2=can_use_http2(),
)
openai.http_client = http_client
if args.organization:
openai.organization = args.organization
if args.api_key:
openai.api_key = args.api_key
if args.api_base:
openai.base_url = args.api_base
# azure
if args.api_type is not None:
openai.api_type = args.api_type
if args.azure_endpoint is not None:
openai.azure_endpoint = args.azure_endpoint
if args.api_version is not None:
openai.api_version = args.api_version
if args.azure_ad_token is not None:
openai.azure_ad_token = args.azure_ad_token
try:
if args.args_model:
parsed.func(
model_parse(
args.args_model,
{
**{
# we omit None values so that they can be defaulted to `NotGiven`
# and we'll strip it from the API request
key: value
for key, value in vars(parsed).items()
if value is not None
},
"unknown_args": unknown,
},
)
)
else:
parsed.func()
finally:
try:
http_client.close()
except Exception:
pass
if __name__ == "__main__":
sys.exit(main())
| Arguments |
python | getsentry__sentry | src/sentry/analytics/events/first_release_tag_sent.py | {
"start": 79,
"end": 237
} | class ____(analytics.Event):
user_id: int
organization_id: int
project_id: int
analytics.register(FirstReleaseTagSentEvent)
| FirstReleaseTagSentEvent |
python | doocs__leetcode | solution/1800-1899/1851.Minimum Interval to Include Each Query/Solution.py | {
"start": 0,
"end": 612
} | class ____:
def minInterval(self, intervals: List[List[int]], queries: List[int]) -> List[int]:
n, m = len(intervals), len(queries)
intervals.sort()
queries = sorted((x, i) for i, x in enumerate(queries))
ans = [-1] * m
pq = []
i = 0
for x, j in queries:
while i < n and intervals[i][0] <= x:
a, b = intervals[i]
heappush(pq, (b - a + 1, b))
i += 1
while pq and pq[0][1] < x:
heappop(pq)
if pq:
ans[j] = pq[0][0]
return ans
| Solution |
python | Textualize__textual | docs/examples/guide/screens/modal01.py | {
"start": 493,
"end": 1022
} | class ____(Screen):
"""Screen with a dialog to quit."""
def compose(self) -> ComposeResult:
yield Grid(
Label("Are you sure you want to quit?", id="question"),
Button("Quit", variant="error", id="quit"),
Button("Cancel", variant="primary", id="cancel"),
id="dialog",
)
def on_button_pressed(self, event: Button.Pressed) -> None:
if event.button.id == "quit":
self.app.exit()
else:
self.app.pop_screen()
| QuitScreen |
python | huggingface__transformers | tests/models/bridgetower/test_image_processing_bridgetower.py | {
"start": 3580,
"end": 7268
} | class ____(ImageProcessingTestMixin, unittest.TestCase):
image_processing_class = BridgeTowerImageProcessor if is_vision_available() else None
fast_image_processing_class = BridgeTowerImageProcessorFast if is_torchvision_available() else None
def setUp(self):
super().setUp()
self.image_processor_tester = BridgeTowerImageProcessingTester(self)
@property
def image_processor_dict(self):
return self.image_processor_tester.prepare_image_processor_dict()
def test_image_processor_properties(self):
for image_processing_class in self.image_processor_list:
image_processing = image_processing_class(**self.image_processor_dict)
self.assertTrue(hasattr(image_processing, "image_mean"))
self.assertTrue(hasattr(image_processing, "image_std"))
self.assertTrue(hasattr(image_processing, "do_normalize"))
self.assertTrue(hasattr(image_processing, "do_resize"))
self.assertTrue(hasattr(image_processing, "size"))
self.assertTrue(hasattr(image_processing, "size_divisor"))
@require_vision
@require_torch
def test_slow_fast_equivalence(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
dummy_image = load_image(url_to_local_path("http://images.cocodataset.org/val2017/000000039769.jpg"))
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_image, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_image, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_mask.float(), encoding_fast.pixel_mask.float())
@require_vision
@require_torch
def test_slow_fast_equivalence_batched(self):
if not self.test_slow_image_processor or not self.test_fast_image_processor:
self.skipTest(reason="Skipping slow/fast equivalence test")
if self.image_processing_class is None or self.fast_image_processing_class is None:
self.skipTest(reason="Skipping slow/fast equivalence test as one of the image processors is not defined")
if hasattr(self.image_processor_tester, "do_center_crop") and self.image_processor_tester.do_center_crop:
self.skipTest(
reason="Skipping as do_center_crop is True and center_crop functions are not equivalent for fast and slow processors"
)
dummy_images = self.image_processor_tester.prepare_image_inputs(equal_resolution=False, torchify=True)
image_processor_slow = self.image_processing_class(**self.image_processor_dict)
image_processor_fast = self.fast_image_processing_class(**self.image_processor_dict)
encoding_slow = image_processor_slow(dummy_images, return_tensors="pt")
encoding_fast = image_processor_fast(dummy_images, return_tensors="pt")
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_values, encoding_fast.pixel_values)
self._assert_slow_fast_tensors_equivalence(encoding_slow.pixel_mask.float(), encoding_fast.pixel_mask.float())
| BridgeTowerImageProcessingTest |
python | tensorflow__tensorflow | tensorflow/python/data/experimental/ops/global_shuffle_op.py | {
"start": 2828,
"end": 4054
} | class ____(dataset_ops.UnaryUnchangedStructureDataset):
"""Shuffles all elements in the input dataset."""
def __init__(
self,
input_dataset: dataset_ops.DatasetV2,
seed: Optional[Union[int, tensor.Tensor]] = None,
reshuffle_each_iteration: bool = True,
name: Optional[str] = None):
options = options_lib.Options()
# Currently, prefetching threads cannot access the runtime context required
# for global shuffling when `warm_start` is enabled. Supporting it will be
# future work.
options.experimental_warm_start = False
input_dataset = input_dataset.with_options(options)
self._input_dataset = input_dataset
self._seed, self._seed2 = random_seed.get_seed(seed)
self._reshuffle_each_iteration = reshuffle_each_iteration
self._name = name
variant_tensor = ged_ops.global_shuffle_dataset(
self._input_dataset._variant_tensor, # pylint: disable=protected-access
seed=self._seed,
seed2=self._seed2,
seed_generator=gen_dataset_ops.dummy_seed_generator(),
reshuffle_each_iteration=self._reshuffle_each_iteration,
**self._common_args)
super().__init__(input_dataset, variant_tensor)
| _GlobalShuffleDataset |
python | ray-project__ray | rllib/core/models/torch/base.py | {
"start": 380,
"end": 3076
} | class ____(nn.Module, Model, abc.ABC):
"""Base class for RLlib's PyTorch models.
This class defines the interface for RLlib's PyTorch models.
Example usage for a single Flattening layer:
.. testcode::
from ray.rllib.core.models.configs import ModelConfig
from ray.rllib.core.models.torch.base import TorchModel
import torch
class FlattenModelConfig(ModelConfig):
def build(self, framework: str):
assert framework == "torch"
return TorchFlattenModel(self)
class TorchFlattenModel(TorchModel):
def __init__(self, config):
TorchModel.__init__(self, config)
self.flatten_layer = torch.nn.Flatten()
def _forward(self, inputs, **kwargs):
return self.flatten_layer(inputs)
model = FlattenModelConfig().build("torch")
inputs = torch.Tensor([[[1, 2]]])
print(model(inputs))
.. testoutput::
tensor([[1., 2.]])
"""
def __init__(self, config: ModelConfig):
"""Initialized a TorchModel.
Args:
config: The ModelConfig to use.
"""
nn.Module.__init__(self)
Model.__init__(self, config)
def forward(
self, inputs: Union[dict, TensorType], **kwargs
) -> Union[dict, TensorType]:
"""Returns the output of this model for the given input.
This method only makes sure that we have a spec-checked _forward() method.
Args:
inputs: The input tensors.
**kwargs: Forward compatibility kwargs.
Returns:
dict: The output tensors.
"""
return self._forward(inputs, **kwargs)
@override(Model)
def get_num_parameters(self) -> Tuple[int, int]:
num_trainable_parameters = 0
num_frozen_parameters = 0
for p in self.parameters():
n = p.numel()
if p.requires_grad:
num_trainable_parameters += n
else:
num_frozen_parameters += n
return num_trainable_parameters, num_frozen_parameters
@override(Model)
def _set_to_dummy_weights(self, value_sequence=(-0.02, -0.01, 0.01, 0.02)):
trainable_weights = []
non_trainable_weights = []
for p in self.parameters():
if p.requires_grad:
trainable_weights.append(p)
else:
non_trainable_weights.append(p)
for i, w in enumerate(trainable_weights + non_trainable_weights):
fill_val = value_sequence[i % len(value_sequence)]
with torch.no_grad():
w.fill_(fill_val)
| TorchModel |
python | davidhalter__jedi | jedi/api/refactoring/__init__.py | {
"start": 2733,
"end": 9579
} | class ____:
def __init__(self, inference_state, file_to_node_changes, renames=()):
self._inference_state = inference_state
self._renames = renames
self._file_to_node_changes = file_to_node_changes
def get_changed_files(self) -> Dict[Path, ChangedFile]:
def calculate_to_path(p):
if p is None:
return p
p = str(p)
for from_, to in renames:
if p.startswith(str(from_)):
p = str(to) + p[len(str(from_)):]
return Path(p)
renames = self.get_renames()
return {
path: ChangedFile(
self._inference_state,
from_path=path,
to_path=calculate_to_path(path),
module_node=next(iter(map_)).get_root_node(),
node_to_str_map=map_
)
# We need to use `or`, because the path can be None
for path, map_ in sorted(
self._file_to_node_changes.items(),
key=lambda x: x[0] or Path("")
)
}
def get_renames(self) -> Iterable[Tuple[Path, Path]]:
"""
Files can be renamed in a refactoring.
"""
return sorted(self._renames)
def get_diff(self):
text = ''
project_path = self._inference_state.project.path
for from_, to in self.get_renames():
text += 'rename from %s\nrename to %s\n' \
% (_try_relative_to(from_, project_path), _try_relative_to(to, project_path))
return text + ''.join(f.get_diff() for f in self.get_changed_files().values())
def apply(self):
"""
Applies the whole refactoring to the files, which includes renames.
"""
for f in self.get_changed_files().values():
f.apply()
for old, new in self.get_renames():
old.rename(new)
def _calculate_rename(path, new_name):
dir_ = path.parent
if path.name in ('__init__.py', '__init__.pyi'):
return dir_, dir_.parent.joinpath(new_name)
return path, dir_.joinpath(new_name + path.suffix)
def rename(inference_state, definitions, new_name):
file_renames = set()
file_tree_name_map = {}
if not definitions:
raise RefactoringError("There is no name under the cursor")
for d in definitions:
# This private access is ok in a way. It's not public to
# protect Jedi users from seeing it.
tree_name = d._name.tree_name
if d.type == 'module' and tree_name is None and d.module_path is not None:
p = Path(d.module_path)
file_renames.add(_calculate_rename(p, new_name))
elif isinstance(d._name, ImplicitNSName):
for p in d._name._value.py__path__():
file_renames.add(_calculate_rename(Path(p), new_name))
else:
if tree_name is not None:
fmap = file_tree_name_map.setdefault(d.module_path, {})
fmap[tree_name] = tree_name.prefix + new_name
return Refactoring(inference_state, file_tree_name_map, file_renames)
def inline(inference_state, names):
if not names:
raise RefactoringError("There is no name under the cursor")
if any(n.api_type in ('module', 'namespace') for n in names):
raise RefactoringError("Cannot inline imports, modules or namespaces")
if any(n.tree_name is None for n in names):
raise RefactoringError("Cannot inline builtins/extensions")
definitions = [n for n in names if n.tree_name.is_definition()]
if len(definitions) == 0:
raise RefactoringError("No definition found to inline")
if len(definitions) > 1:
raise RefactoringError("Cannot inline a name with multiple definitions")
if len(names) == 1:
raise RefactoringError("There are no references to this name")
tree_name = definitions[0].tree_name
expr_stmt = tree_name.get_definition()
if expr_stmt.type != 'expr_stmt':
type_ = dict(
funcdef='function',
classdef='class',
).get(expr_stmt.type, expr_stmt.type)
raise RefactoringError("Cannot inline a %s" % type_)
if len(expr_stmt.get_defined_names(include_setitem=True)) > 1:
raise RefactoringError("Cannot inline a statement with multiple definitions")
first_child = expr_stmt.children[1]
if first_child.type == 'annassign' and len(first_child.children) == 4:
first_child = first_child.children[2]
if first_child != '=':
if first_child.type == 'annassign':
raise RefactoringError(
'Cannot inline a statement that is defined by an annotation'
)
else:
raise RefactoringError(
'Cannot inline a statement with "%s"'
% first_child.get_code(include_prefix=False)
)
rhs = expr_stmt.get_rhs()
replace_code = rhs.get_code(include_prefix=False)
references = [n for n in names if not n.tree_name.is_definition()]
file_to_node_changes = {}
for name in references:
tree_name = name.tree_name
path = name.get_root_context().py__file__()
s = replace_code
if rhs.type == 'testlist_star_expr' \
or tree_name.parent.type in EXPRESSION_PARTS \
or tree_name.parent.type == 'trailer' \
and tree_name.parent.get_next_sibling() is not None:
s = '(' + replace_code + ')'
of_path = file_to_node_changes.setdefault(path, {})
n = tree_name
prefix = n.prefix
par = n.parent
if par.type == 'trailer' and par.children[0] == '.':
prefix = par.parent.children[0].prefix
n = par
for some_node in par.parent.children[:par.parent.children.index(par)]:
of_path[some_node] = ''
of_path[n] = prefix + s
path = definitions[0].get_root_context().py__file__()
changes = file_to_node_changes.setdefault(path, {})
changes[expr_stmt] = _remove_indent_of_prefix(expr_stmt.get_first_leaf().prefix)
next_leaf = expr_stmt.get_next_leaf()
# Most of the time we have to remove the newline at the end of the
# statement, but if there's a comment we might not need to.
if next_leaf.prefix.strip(' \t') == '' \
and (next_leaf.type == 'newline' or next_leaf == ';'):
changes[next_leaf] = ''
return Refactoring(inference_state, file_to_node_changes)
def _remove_indent_of_prefix(prefix):
r"""
Removes the last indentation of a prefix, e.g. " \n \n " becomes " \n \n".
"""
return ''.join(split_lines(prefix, keepends=True)[:-1])
def _try_relative_to(path: Path, base: Path) -> Path:
try:
return path.relative_to(base)
except ValueError:
return path
| Refactoring |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/destinations.py | {
"start": 78784,
"end": 84877
} | class ____(GeneratedAirbyteDestination):
class NoCompression:
@public
def __init__(self, compression_type: Optional[str] = None):
self.compression_type = check.opt_str_param(compression_type, "compression_type")
class Deflate:
@public
def __init__(self, codec: str, compression_level: int):
self.codec = check.str_param(codec, "codec")
self.compression_level = check.int_param(compression_level, "compression_level")
class Bzip2:
@public
def __init__(self, codec: str):
self.codec = check.str_param(codec, "codec")
class Xz:
@public
def __init__(self, codec: str, compression_level: int):
self.codec = check.str_param(codec, "codec")
self.compression_level = check.int_param(compression_level, "compression_level")
class Zstandard:
@public
def __init__(
self, codec: str, compression_level: int, include_checksum: Optional[bool] = None
):
self.codec = check.str_param(codec, "codec")
self.compression_level = check.int_param(compression_level, "compression_level")
self.include_checksum = check.opt_bool_param(include_checksum, "include_checksum")
class Snappy:
@public
def __init__(self, codec: str):
self.codec = check.str_param(codec, "codec")
class AvroApacheAvro:
@public
def __init__(
self,
format_type: str,
compression_codec: Union[
"R2Destination.NoCompression",
"R2Destination.Deflate",
"R2Destination.Bzip2",
"R2Destination.Xz",
"R2Destination.Zstandard",
"R2Destination.Snappy",
],
):
self.format_type = check.str_param(format_type, "format_type")
self.compression_codec = check.inst_param(
compression_codec,
"compression_codec",
(
R2Destination.NoCompression,
R2Destination.Deflate,
R2Destination.Bzip2,
R2Destination.Xz,
R2Destination.Zstandard,
R2Destination.Snappy,
),
)
class GZIP:
@public
def __init__(self, compression_type: Optional[str] = None):
self.compression_type = check.opt_str_param(compression_type, "compression_type")
class CSVCommaSeparatedValues:
@public
def __init__(
self,
format_type: str,
flattening: str,
compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],
):
self.format_type = check.str_param(format_type, "format_type")
self.flattening = check.str_param(flattening, "flattening")
self.compression = check.inst_param(
compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)
)
class JSONLinesNewlineDelimitedJSON:
@public
def __init__(
self,
format_type: str,
compression: Union["R2Destination.NoCompression", "R2Destination.GZIP"],
):
self.format_type = check.str_param(format_type, "format_type")
self.compression = check.inst_param(
compression, "compression", (R2Destination.NoCompression, R2Destination.GZIP)
)
@public
def __init__(
self,
name: str,
account_id: str,
access_key_id: str,
secret_access_key: str,
s3_bucket_name: str,
s3_bucket_path: str,
format: Union[
"R2Destination.AvroApacheAvro",
"R2Destination.CSVCommaSeparatedValues",
"R2Destination.JSONLinesNewlineDelimitedJSON",
],
s3_path_format: Optional[str] = None,
file_name_pattern: Optional[str] = None,
):
"""Airbyte Destination for R2.
Documentation can be found at https://docs.airbyte.com/integrations/destinations/r2
Args:
name (str): The name of the destination.
account_id (str): Cloudflare account ID
access_key_id (str): The access key ID to access the R2 bucket. Airbyte requires Read and Write permissions to the given bucket. Read more here.
secret_access_key (str): The corresponding secret to the access key ID. Read more here
s3_bucket_name (str): The name of the R2 bucket. Read more here.
s3_bucket_path (str): Directory under the R2 bucket where data will be written.
format (Union[R2Destination.AvroApacheAvro, R2Destination.CSVCommaSeparatedValues, R2Destination.JSONLinesNewlineDelimitedJSON]): Format of the data output. See here for more details
s3_path_format (Optional[str]): Format string on how data will be organized inside the R2 bucket directory. Read more here
file_name_pattern (Optional[str]): The pattern allows you to set the file-name format for the R2 staging file(s)
"""
self.account_id = check.str_param(account_id, "account_id")
self.access_key_id = check.str_param(access_key_id, "access_key_id")
self.secret_access_key = check.str_param(secret_access_key, "secret_access_key")
self.s3_bucket_name = check.str_param(s3_bucket_name, "s3_bucket_name")
self.s3_bucket_path = check.str_param(s3_bucket_path, "s3_bucket_path")
self.format = check.inst_param(
format,
"format",
(
R2Destination.AvroApacheAvro,
R2Destination.CSVCommaSeparatedValues,
R2Destination.JSONLinesNewlineDelimitedJSON,
),
)
self.s3_path_format = check.opt_str_param(s3_path_format, "s3_path_format")
self.file_name_pattern = check.opt_str_param(file_name_pattern, "file_name_pattern")
super().__init__("R2", name)
| R2Destination |
python | django__django | tests/generic_views/views.py | {
"start": 8209,
"end": 8332
} | class ____(generic.DetailView):
def get_queryset(self):
return Book.does_not_exist.all()
| ObjectDoesNotExistDetail |
python | spyder-ide__spyder | external-deps/python-lsp-server/pylsp/config/source.py | {
"start": 199,
"end": 2753
} | class ____:
"""Base class for implementing a config source."""
def __init__(self, root_path) -> None:
self.root_path = root_path
self.is_windows = sys.platform == "win32"
self.xdg_home = os.environ.get(
"XDG_CONFIG_HOME", os.path.expanduser("~/.config")
)
def user_config(self) -> None:
"""Return user-level (i.e. home directory) configuration."""
raise NotImplementedError()
def project_config(self, document_path) -> None:
"""Return project-level (i.e. workspace directory) configuration."""
raise NotImplementedError()
@classmethod
def read_config_from_files(cls, files):
config = configparser.RawConfigParser()
for filename in files:
if os.path.exists(filename) and not os.path.isdir(filename):
config.read(filename)
return config
@classmethod
def parse_config(cls, config, key, options):
"""Parse the config with the given options."""
conf = {}
for source, destination, opt_type in options:
opt_value = cls._get_opt(config, key, source, opt_type)
if opt_value is not None:
cls._set_opt(conf, destination, opt_value)
return conf
@classmethod
def _get_opt(cls, config, key, option, opt_type):
"""Get an option from a configparser with the given type."""
for opt_key in [option, option.replace("-", "_")]:
if not config.has_option(key, opt_key):
continue
if opt_type is bool:
return config.getboolean(key, opt_key)
if opt_type is int:
return config.getint(key, opt_key)
if opt_type is str:
return config.get(key, opt_key)
if opt_type is list:
return cls._parse_list_opt(config.get(key, opt_key))
raise ValueError("Unknown option type: %s" % opt_type)
@classmethod
def _parse_list_opt(cls, string):
return [s.strip() for s in string.split(",") if s.strip()]
@classmethod
def _set_opt(cls, config_dict, path, value):
"""Set the value in the dictionary at the given path if the value is not None."""
if value is None:
return
if "." not in path:
config_dict[path] = value
return
key, rest = path.split(".", 1)
if key not in config_dict:
config_dict[key] = {}
cls._set_opt(config_dict[key], rest, value)
| ConfigSource |
python | PrefectHQ__prefect | src/prefect/client/subscriptions.py | {
"start": 597,
"end": 4382
} | class ____(Generic[S]):
def __init__(
self,
model: type[S],
path: str,
keys: Iterable[str],
client_id: Optional[str] = None,
base_url: Optional[str] = None,
):
self.model = model
self.client_id = client_id
base_url = base_url.replace("http", "ws", 1) if base_url else None
self.subscription_url: str = f"{base_url}{path}"
self.keys: list[str] = list(keys)
self._connect = websocket_connect(
self.subscription_url,
subprotocols=[websockets.Subprotocol("prefect")],
)
self._websocket = None
def __aiter__(self) -> Self:
return self
@property
def websocket(self) -> websockets.asyncio.client.ClientConnection:
if not self._websocket:
raise RuntimeError("Subscription is not connected")
return self._websocket
async def __anext__(self) -> S:
while True:
try:
await self._ensure_connected()
message = await self.websocket.recv()
await self.websocket.send(orjson.dumps({"type": "ack"}).decode())
return self.model.model_validate_json(message)
except (
ConnectionRefusedError,
websockets.exceptions.ConnectionClosedError,
):
self._websocket = None
if hasattr(self._connect, "protocol"):
await self._connect.__aexit__(None, None, None)
await asyncio.sleep(0.5)
async def _ensure_connected(self):
if self._websocket:
return
websocket = await self._connect.__aenter__()
try:
settings = get_current_settings()
auth_token = (
settings.api.auth_string.get_secret_value()
if settings.api.auth_string
else None
)
api_key = settings.api.key.get_secret_value() if settings.api.key else None
token = auth_token or api_key # Prioritize auth_token
await websocket.send(
orjson.dumps({"type": "auth", "token": token}).decode()
)
auth: dict[str, Any] = orjson.loads(await websocket.recv())
assert auth["type"] == "auth_success", auth.get("message")
message: dict[str, Any] = {"type": "subscribe", "keys": self.keys}
if self.client_id:
message.update({"client_id": self.client_id})
await websocket.send(orjson.dumps(message).decode())
except (
AssertionError,
websockets.exceptions.ConnectionClosedError,
) as e:
if isinstance(e, AssertionError) or (
e.rcvd and e.rcvd.code == WS_1008_POLICY_VIOLATION
):
if isinstance(e, AssertionError):
reason = e.args[0]
elif e.rcvd and e.rcvd.reason:
reason = e.rcvd.reason
else:
reason = "unknown"
else:
reason = None
if reason:
error_message = (
"Unable to authenticate to the subscription. Please ensure the provided "
"`PREFECT_API_AUTH_STRING` (for self-hosted with auth string) or "
"`PREFECT_API_KEY` (for Cloud or self-hosted with API key) "
f"you are using is valid for this environment. Reason: {reason}"
)
raise Exception(error_message) from e
raise
else:
self._websocket = websocket
def __repr__(self) -> str:
return f"{type(self).__name__}[{self.model.__name__}]"
| Subscription |
python | laurentluce__python-algorithms | algorithms/a_star_path_finding.py | {
"start": 15,
"end": 662
} | class ____(object):
def __init__(self, x, y, reachable):
"""Initialize new cell.
@param reachable is cell reachable? not a wall?
@param x cell x coordinate
@param y cell y coordinate
@param g cost to move from the starting cell to this cell.
@param h estimation of the cost to move from this cell
to the ending cell.
@param f f = g + h
"""
self.reachable = reachable
self.x = x
self.y = y
self.parent = None
self.g = 0
self.h = 0
self.f = 0
def __lt__(self, other):
return self.f < other.f
| Cell |
python | numba__numba | numba/core/rewrites/ir_print.py | {
"start": 132,
"end": 2047
} | class ____(Rewrite):
"""
Rewrite calls to the print() global function to dedicated IR print() nodes.
"""
def match(self, func_ir, block, typemap, calltypes):
self.prints = prints = {}
self.block = block
# Find all assignments with a right-hand print() call
for inst in block.find_insts(ir.Assign):
if isinstance(inst.value, ir.Expr) and inst.value.op == 'call':
expr = inst.value
try:
callee = func_ir.infer_constant(expr.func)
except errors.ConstantInferenceError:
continue
if callee is print:
if expr.kws:
# Only positional args are supported
msg = ("Numba's print() function implementation does not "
"support keyword arguments.")
raise errors.UnsupportedError(msg, inst.loc)
prints[inst] = expr
return len(prints) > 0
def apply(self):
"""
Rewrite `var = call <print function>(...)` as a sequence of
`print(...)` and `var = const(None)`.
"""
new_block = self.block.copy()
new_block.clear()
for inst in self.block.body:
if inst in self.prints:
expr = self.prints[inst]
print_node = ir.Print(args=expr.args, vararg=expr.vararg,
loc=expr.loc)
new_block.append(print_node)
assign_node = ir.Assign(value=ir.Const(None, loc=expr.loc),
target=inst.target,
loc=inst.loc)
new_block.append(assign_node)
else:
new_block.append(inst)
return new_block
@register_rewrite('before-inference')
| RewritePrintCalls |
python | doocs__leetcode | solution/0400-0499/0450.Delete Node in a BST/Solution.py | {
"start": 192,
"end": 833
} | class ____:
def deleteNode(self, root: Optional[TreeNode], key: int) -> Optional[TreeNode]:
if root is None:
return None
if root.val > key:
root.left = self.deleteNode(root.left, key)
return root
if root.val < key:
root.right = self.deleteNode(root.right, key)
return root
if root.left is None:
return root.right
if root.right is None:
return root.left
node = root.right
while node.left:
node = node.left
node.left = root.left
root = root.right
return root
| Solution |
python | django-haystack__django-haystack | test_haystack/test_views.py | {
"start": 9192,
"end": 10887
} | class ____(TestCase):
fixtures = ["base_data"]
def setUp(self):
super().setUp()
# Stow.
self.old_unified_index = connections["default"]._index
self.ui = UnifiedIndex()
self.bmmsi = BasicMockModelSearchIndex()
self.bammsi = BasicAnotherMockModelSearchIndex()
self.ui.build(indexes=[self.bmmsi, self.bammsi])
connections["default"]._index = self.ui
# Update the "index".
backend = connections["default"].get_backend()
backend.clear()
backend.update(self.bmmsi, MockModel.objects.all())
def tearDown(self):
connections["default"]._index = self.old_unified_index
super().tearDown()
def test_search_no_query(self):
response = self.client.get(reverse("haystack_basic_search"))
self.assertEqual(response.status_code, 200)
def test_search_query(self):
response = self.client.get(reverse("haystack_basic_search"), {"q": "haystack"})
self.assertEqual(response.status_code, 200)
self.assertEqual(type(response.context[-1]["form"]), ModelSearchForm)
self.assertEqual(len(response.context[-1]["page"].object_list), 3)
self.assertEqual(
response.context[-1]["page"].object_list[0].content_type(), "core.mockmodel"
)
self.assertEqual(response.context[-1]["page"].object_list[0].pk, "1")
self.assertEqual(response.context[-1]["query"], "haystack")
def test_invalid_page(self):
response = self.client.get(
reverse("haystack_basic_search"), {"q": "haystack", "page": "165233"}
)
self.assertEqual(response.status_code, 404)
| BasicSearchViewTestCase |
python | doocs__leetcode | solution/0900-0999/0981.Time Based Key-Value Store/Solution.py | {
"start": 0,
"end": 567
} | class ____:
def __init__(self):
self.ktv = defaultdict(list)
def set(self, key: str, value: str, timestamp: int) -> None:
self.ktv[key].append((timestamp, value))
def get(self, key: str, timestamp: int) -> str:
if key not in self.ktv:
return ''
tv = self.ktv[key]
i = bisect_right(tv, (timestamp, chr(127)))
return tv[i - 1][1] if i else ''
# Your TimeMap object will be instantiated and called as such:
# obj = TimeMap()
# obj.set(key,value,timestamp)
# param_2 = obj.get(key,timestamp)
| TimeMap |
python | PrefectHQ__prefect | src/prefect/server/database/query_components.py | {
"start": 19010,
"end": 28106
} | class ____(BaseQueryComponents):
# --- Postgres-specific SqlAlchemy bindings
def insert(self, obj: type[orm_models.Base]) -> postgresql.Insert:
return postgresql.insert(obj)
# --- Postgres-specific JSON handling
@property
def uses_json_strings(self) -> bool:
return False
def cast_to_json(self, json_obj: sa.ColumnElement[T]) -> sa.ColumnElement[T]:
return json_obj
def build_json_object(
self, *args: Union[str, sa.ColumnElement[Any]]
) -> sa.ColumnElement[Any]:
return sa.func.jsonb_build_object(*args)
def json_arr_agg(self, json_array: sa.ColumnElement[Any]) -> sa.ColumnElement[Any]:
return sa.func.jsonb_agg(json_array)
# --- Postgres-optimized subqueries
def make_timestamp_intervals(
self,
start_time: datetime.datetime,
end_time: datetime.datetime,
interval: datetime.timedelta,
) -> sa.Select[tuple[datetime.datetime, datetime.datetime]]:
dt = sa.func.generate_series(
start_time, end_time, interval, type_=Timestamp()
).column_valued("dt")
return (
sa.select(
dt.label("interval_start"),
sa.type_coerce(
dt + sa.bindparam("interval", interval, type_=sa.Interval()),
type_=Timestamp(),
).label("interval_end"),
)
.where(dt < end_time)
.limit(500) # grab at most 500 intervals
)
@db_injector
def set_state_id_on_inserted_flow_runs_statement(
self,
db: PrefectDBInterface,
inserted_flow_run_ids: Sequence[UUID],
insert_flow_run_states: Iterable[dict[str, Any]],
) -> sa.Update:
"""Given a list of flow run ids and associated states, set the state_id
to the appropriate state for all flow runs"""
# postgres supports `UPDATE ... FROM` syntax
FlowRun, FlowRunState = db.FlowRun, db.FlowRunState
stmt = (
sa.update(FlowRun)
.where(
FlowRun.id.in_(inserted_flow_run_ids),
FlowRunState.flow_run_id == FlowRun.id,
FlowRunState.id.in_([r["id"] for r in insert_flow_run_states]),
)
.values(state_id=FlowRunState.id)
# no need to synchronize as these flow runs are entirely new
.execution_options(synchronize_session=False)
)
return stmt
@property
def _get_scheduled_flow_runs_from_work_pool_template_path(self) -> str:
"""
Template for the query to get scheduled flow runs from a work pool
"""
return "postgres/get-runs-from-worker-queues.sql.jinja"
@db_injector
def _build_flow_run_graph_v2_query(
self, db: PrefectDBInterface
) -> sa.Select[FlowRunGraphV2Node]:
"""Postgresql version of the V2 FlowRun graph data query
This SQLA query is built just once and then cached per DB interface
"""
# the parameters this query takes as inputs
param_flow_run_id = sa.bindparam("flow_run_id", type_=UUIDTypeDecorator)
param_since = sa.bindparam("since", type_=Timestamp)
param_max_nodes = sa.bindparam("max_nodes", type_=sa.Integer)
Flow, FlowRun, TaskRun = db.Flow, db.FlowRun, db.TaskRun
input = sa.func.jsonb_each(TaskRun.task_inputs).table_valued(
"key", "value", name="input"
)
argument = (
sa.func.jsonb_array_elements(input.c.value, type_=postgresql.JSONB())
.table_valued(sa.column("value", postgresql.JSONB()))
.render_derived(name="argument")
)
edges = (
sa.select(
sa.case((FlowRun.id.is_not(None), "flow-run"), else_="task-run").label(
"kind"
),
sa.func.coalesce(FlowRun.id, TaskRun.id).label("id"),
sa.func.coalesce(Flow.name + " / " + FlowRun.name, TaskRun.name).label(
"label"
),
sa.func.coalesce(FlowRun.state_type, TaskRun.state_type).label(
"state_type"
),
sa.func.coalesce(
FlowRun.start_time,
FlowRun.expected_start_time,
TaskRun.start_time,
TaskRun.expected_start_time,
).label("start_time"),
sa.func.coalesce(
FlowRun.end_time,
TaskRun.end_time,
sa.case(
(
TaskRun.state_type == StateType.COMPLETED,
TaskRun.expected_start_time,
),
else_=sa.null(),
),
).label("end_time"),
sa.cast(argument.c.value["id"].astext, type_=UUIDTypeDecorator).label(
"parent"
),
(input.c.key == "__parents__").label("has_encapsulating_task"),
)
.join_from(TaskRun, input, onclause=sa.true(), isouter=True)
.join(argument, onclause=sa.true(), isouter=True)
.join(
FlowRun,
isouter=True,
onclause=FlowRun.parent_task_run_id == TaskRun.id,
)
.join(Flow, isouter=True, onclause=Flow.id == FlowRun.flow_id)
.where(
TaskRun.flow_run_id == param_flow_run_id,
TaskRun.state_type != StateType.PENDING,
sa.func.coalesce(
FlowRun.start_time,
FlowRun.expected_start_time,
TaskRun.start_time,
TaskRun.expected_start_time,
).is_not(None),
)
# -- the order here is important to speed up building the two sets of
# -- edges in the with_parents and with_children CTEs below
.order_by(sa.func.coalesce(FlowRun.id, TaskRun.id))
).cte("edges")
children, parents = edges.alias("children"), edges.alias("parents")
with_encapsulating = (
sa.select(
children.c.id,
sa.func.array_agg(
postgresql.aggregate_order_by(parents.c.id, parents.c.start_time)
).label("encapsulating_ids"),
)
.join(parents, onclause=parents.c.id == children.c.parent)
.where(children.c.has_encapsulating_task.is_(True))
.group_by(children.c.id)
).cte("with_encapsulating")
with_parents = (
sa.select(
children.c.id,
sa.func.array_agg(
postgresql.aggregate_order_by(parents.c.id, parents.c.start_time)
).label("parent_ids"),
)
.join(parents, onclause=parents.c.id == children.c.parent)
.where(children.c.has_encapsulating_task.is_distinct_from(True))
.group_by(children.c.id)
.cte("with_parents")
)
with_children = (
sa.select(
parents.c.id,
sa.func.array_agg(
postgresql.aggregate_order_by(children.c.id, children.c.start_time)
).label("child_ids"),
)
.join(children, onclause=children.c.parent == parents.c.id)
.where(children.c.has_encapsulating_task.is_distinct_from(True))
.group_by(parents.c.id)
.cte("with_children")
)
graph = (
sa.select(
edges.c.kind,
edges.c.id,
edges.c.label,
edges.c.state_type,
edges.c.start_time,
edges.c.end_time,
with_parents.c.parent_ids,
with_children.c.child_ids,
with_encapsulating.c.encapsulating_ids,
)
.distinct(edges.c.id)
.join(with_parents, isouter=True, onclause=with_parents.c.id == edges.c.id)
.join(
with_children, isouter=True, onclause=with_children.c.id == edges.c.id
)
.join(
with_encapsulating,
isouter=True,
onclause=with_encapsulating.c.id == edges.c.id,
)
.cte("nodes")
)
query = (
sa.select(
graph.c.kind,
graph.c.id,
graph.c.label,
graph.c.state_type,
graph.c.start_time,
graph.c.end_time,
graph.c.parent_ids,
graph.c.child_ids,
graph.c.encapsulating_ids,
)
.where(sa.or_(graph.c.end_time.is_(None), graph.c.end_time >= param_since))
.order_by(graph.c.start_time, graph.c.end_time)
.limit(param_max_nodes)
)
return cast(sa.Select[FlowRunGraphV2Node], query)
| AsyncPostgresQueryComponents |
python | google__jax | jax/_src/interpreters/pxla.py | {
"start": 18258,
"end": 24423
} | class ____(core.Trace):
__slots__ = ("axis_name", "emap_info")
def __init__(self, axis_name, emap_info):
super().__init__()
self.emap_info = emap_info
self.axis_name = axis_name
def to_map_tracer(self, val):
if isinstance(val, MapTracer):
return val
else:
return MapTracer(self, val, {})
def process_primitive(self, primitive, tracers, params):
from jax._src.lax import parallel # pytype: disable=import-error
if primitive is parallel.axis_index_p:
return self.process_axis_index(**params) # pytype: disable=missing-parameter
if primitive is parallel.psum_p:
f = HashableFunction(
lambda *xs: parallel.psum(
xs, axis_name=params['axes'], axis_index_groups=params['axis_index_groups']),
(primitive, tuple(params.items())))
else:
f = HashableFunction(lambda *args: primitive.bind(*args, **params),
(primitive, tuple(params.items())))
tracers = map(self.to_map_tracer, tracers)
vals, shard_axes = unzip2([(t.val, t.shard_axes) for t in tracers])
info = self.emap_info
names = core.get_axis_env().axis_names()
all_axes = tuple(_map_schedule(map(s.get, names)) for s in shard_axes) # pytype: disable=wrong-arg-types # always-use-return-annotations
f_mapped, out_shard_axes = _multi_pmap(f, self.emap_info, names, all_axes)
with core.eval_context(), api.disable_jit(False):
outvals = f_mapped(*vals)
if primitive.multiple_results:
return [MapTracer(self, val, out_shard_axes) for val in outvals]
return MapTracer(self, outvals, out_shard_axes)
def process_call(self, call_primitive, fun, tracers, params):
raise NotImplementedError
def process_map(self, map_primitive, fun, tracers, params):
if params['devices'] is not None:
raise ValueError("Nested pmap with explicit devices argument.")
if not config.disable_jit.value:
bind = HashableFunction(
lambda *args, **kwargs: map_primitive.bind(fun, *args, **kwargs),
(map_primitive, fun))
fake_primitive = FakePrimitive(multiple_results=True, bind=bind)
return self.process_primitive(fake_primitive, tracers, params)
axis_name, in_axes, out_axes_thunk, axis_size = (params["axis_name"],
params["in_axes"], params["out_axes_thunk"], params["axis_size"])
vals, shard_axes = unzip2((t.val, t.shard_axes) for t in tracers)
shard_axes = [{axis_name: _annot_to_flat(np.ndim(v), s.values(), ax), **s}
if ax is not None else s
for v, ax, s in zip(vals, in_axes, shard_axes)]
in_tracers = map(partial(MapTracer, self), vals, shard_axes)
with core.extend_axis_env_nd([(axis_name, axis_size)]):
with core.set_current_trace(self):
ans = fun.call_wrapped(*in_tracers)
out_tracers = map(self.to_map_tracer, ans)
out, outaxes = unzip2((t.val, t.shard_axes) for t in out_tracers)
out, outaxes = unzip2(_match_annot(axis_name, axis_size, v, s, dst)
for v, s, dst in zip(out, outaxes, out_axes_thunk()))
return map(partial(MapTracer, self), out, outaxes)
def process_custom_jvp_call(self, prim, fun, jvp, tracers, *, symbolic_zeros):
if symbolic_zeros:
msg = ("custom_jvp with symbolic_zeros=True not supported with eager pmap. "
"Please open an issue at https://github.com/jax-ml/jax/issues !")
raise NotImplementedError(msg)
del prim, jvp, symbolic_zeros # always base main, can drop jvp
with core.set_current_trace(self):
return fun.call_wrapped(*tracers)
def process_custom_vjp_call(self, primitive, fun, fwd, bwd, tracers,
out_trees, symbolic_zeros):
if symbolic_zeros:
msg = ("custom_vjp with symbolic_zeros=True not supported with eager pmap. "
"Please open an issue at https://github.com/jax-ml/jax/issues !")
raise NotImplementedError(msg)
del primitive, fwd, bwd, out_trees, symbolic_zeros # always base main, drop vjp
with core.set_current_trace(self):
return fun.call_wrapped(*tracers)
def process_axis_index(self, axis_name):
from jax._src.lax import lax, parallel # pytype: disable=import-error
bind = HashableFunction(
lambda _: parallel.axis_index(axis_name),
(parallel.axis_index, axis_name))
fake_primitive = FakePrimitive(multiple_results=False, bind=bind)
range = lax.iota(np.int32, core.get_axis_env().axis_size(axis_name))
dummy_tracer = MapTracer(self, range, {axis_name: 0})
return self.process_primitive(fake_primitive, (dummy_tracer,), {})
def _annot_to_flat(ndim: int, mapped_axes: Iterable[int],
annotation: int | None) -> int | None:
if annotation is None: return None
mapped_axes_ = set(mapped_axes)
return [i for i in range(ndim) if i not in mapped_axes_][annotation]
def _match_annot(axis_name: core.AxisName, axis_size: int, val: Any,
shard_axis_src: dict[core.AxisName, int],
dst_annotation: int | None
) -> tuple[Any, dict[core.AxisName, int]]:
shard_axis_out = dict(shard_axis_src)
src = shard_axis_out.pop(axis_name, None)
dst = _annot_to_flat(np.ndim(val) + (src is None), shard_axis_out.values(),
dst_annotation)
with core.eval_context():
if src == dst:
outval = val
elif type(src) == type(dst) == int:
outval = batching.moveaxis(val, src, dst)
shard_axis_out = _moveaxis(np.ndim(val), shard_axis_src, src, dst)
elif src is None and dst is not None:
outval = batching.broadcast(val, axis_size, dst, None)
shard_axis_out = {n: d + (dst <= d) for n, d in shard_axis_out.items()}
else:
raise NotImplementedError
return outval, shard_axis_out
def _moveaxis(ndim: int, shard_axes: dict[core.AxisName, int],
src: int, dst: int) -> dict[core.AxisName, int]:
lst: list[core.AxisName | None] = [None] * ndim
for k, v in shard_axes.items():
lst[v] = k
name = lst.pop(src)
lst.insert(dst - (src < dst), name)
return {name: i for i, name in enumerate(lst) if name is not None}
| MapTrace |
python | django__django | tests/i18n/test_compilation.py | {
"start": 13995,
"end": 14371
} | class ____(MessageCompilationTests):
work_subdir = "exclude"
def test_locale_paths_pathlib(self):
with override_settings(LOCALE_PATHS=[Path(self.test_dir) / "canned_locale"]):
call_command("compilemessages", locale=["fr"], verbosity=0)
self.assertTrue(os.path.exists("canned_locale/fr/LC_MESSAGES/django.mo"))
| PathLibLocaleCompilationTests |
python | sympy__sympy | sympy/sets/fancysets.py | {
"start": 5876,
"end": 6869
} | class ____(Interval, metaclass=Singleton):
"""
Represents all real numbers
from negative infinity to positive infinity,
including all integer, rational and irrational numbers.
This set is also available as the singleton ``S.Reals``.
Examples
========
>>> from sympy import S, Rational, pi, I
>>> 5 in S.Reals
True
>>> Rational(-1, 2) in S.Reals
True
>>> pi in S.Reals
True
>>> 3*I in S.Reals
False
>>> S.Reals.contains(pi)
True
See Also
========
ComplexRegion
"""
@property
def start(self):
return S.NegativeInfinity
@property
def end(self):
return S.Infinity
@property
def left_open(self):
return True
@property
def right_open(self):
return True
def __eq__(self, other):
return other == Interval(S.NegativeInfinity, S.Infinity)
def __hash__(self):
return hash(Interval(S.NegativeInfinity, S.Infinity))
| Reals |
python | getsentry__sentry | tests/sentry/sentry_apps/api/endpoints/test_sentry_app_installation_external_issues.py | {
"start": 208,
"end": 4584
} | class ____(APITestCase):
def setUp(self) -> None:
self.superuser = self.create_user(email="a@example.com", is_superuser=True)
self.user = self.create_user(email="boop@example.com")
self.org = self.create_organization(owner=self.user)
self.project = self.create_project(organization=self.org)
self.group = self.create_group(project=self.project)
def _set_up_sentry_app(self, name: str, scopes: list[str]) -> None:
self.sentry_app = self.create_sentry_app(
name=name,
organization=self.org,
webhook_url="https://example.com",
scopes=scopes,
)
self.install = self.create_sentry_app_installation(
organization=self.org, slug=self.sentry_app.slug, user=self.user
)
self.api_token = self.create_internal_integration_token(
install=self.install, user=self.user
)
self.url = reverse(
"sentry-api-0-sentry-app-installation-external-issues", args=[self.install.uuid]
)
def _post_data(self) -> dict[str, str | int]:
return {
"issueId": self.group.id,
"webUrl": "https://somerandom.io/project/issue-id",
"project": "ExternalProj",
"identifier": "issue-1",
}
def test_creates_external_issue(self) -> None:
self._set_up_sentry_app("Testin", ["event:write"])
data = self._post_data()
response = self.client.post(
self.url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
external_issue = PlatformExternalIssue.objects.get()
assert response.status_code == 200
assert response.data == {
"id": str(external_issue.id),
"issueId": str(self.group.id),
"serviceType": self.sentry_app.slug,
"displayName": "ExternalProj#issue-1",
"webUrl": "https://somerandom.io/project/issue-id",
}
def test_invalid_group_id(self) -> None:
self._set_up_sentry_app("Testin", ["event:write"])
data = self._post_data()
data["issueId"] = self.create_group(project=self.create_project()).id
response = self.client.post(
self.url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 404
def test_invalid_scopes(self) -> None:
self._set_up_sentry_app("Testin", ["project:read"])
data = self._post_data()
response = self.client.post(
self.url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 403
def test_invalid_token(self) -> None:
"""
You can only create external issues for the integration
whose token you are using to hit this endpoint.
"""
self._set_up_sentry_app("Testin", ["event:write"])
new_install = self.create_sentry_app_installation(
organization=self.org,
slug=self.create_sentry_app(
name="NewApp", organization=self.org, scopes=["event:write"]
).slug,
user=self.user,
)
new_api_token = self.create_internal_integration_token(install=new_install, user=self.user)
data = self._post_data()
response = self.client.post(
self.url,
data=data,
HTTP_AUTHORIZATION=f"Bearer {new_api_token.token}",
)
assert response.status_code == 403
@patch(
"sentry.sentry_apps.external_issues.external_issue_creator.PlatformExternalIssue.objects.update_or_create"
)
def test_external_issue_creation_fails_with_db_error(
self, mock_update_or_create: MagicMock
) -> None:
self._set_up_sentry_app("Testin", ["event:write"])
mock_update_or_create.side_effect = Exception("bruh")
data = self._post_data()
response = self.client.post(
self.url, data=data, HTTP_AUTHORIZATION=f"Bearer {self.api_token.token}"
)
assert response.status_code == 500
assert response.data == {
"detail": f"An issue occured during the integration platform process. Sentry error ID: {None}"
}
mock_update_or_create.assert_called_once()
| SentryAppInstallationExternalIssuesEndpointTest |
python | doocs__leetcode | solution/1100-1199/1101.The Earliest Moment When Everyone Become Friends/Solution.py | {
"start": 0,
"end": 429
} | class ____:
def earliestAcq(self, logs: List[List[int]], n: int) -> int:
def find(x):
if p[x] != x:
p[x] = find(p[x])
return p[x]
p = list(range(n))
for t, x, y in sorted(logs):
if find(x) == find(y):
continue
p[find(x)] = find(y)
n -= 1
if n == 1:
return t
return -1
| Solution |
python | encode__httpx | httpx/_decoders.py | {
"start": 1145,
"end": 2022
} | class ____(ContentDecoder):
"""
Handle 'deflate' decoding.
See: https://stackoverflow.com/questions/1838699
"""
def __init__(self) -> None:
self.first_attempt = True
self.decompressor = zlib.decompressobj()
def decode(self, data: bytes) -> bytes:
was_first_attempt = self.first_attempt
self.first_attempt = False
try:
return self.decompressor.decompress(data)
except zlib.error as exc:
if was_first_attempt:
self.decompressor = zlib.decompressobj(-zlib.MAX_WBITS)
return self.decode(data)
raise DecodingError(str(exc)) from exc
def flush(self) -> bytes:
try:
return self.decompressor.flush()
except zlib.error as exc: # pragma: no cover
raise DecodingError(str(exc)) from exc
| DeflateDecoder |
python | pydantic__pydantic | .github/actions/people/people.py | {
"start": 6455,
"end": 6566
} | class ____(BaseModel):
"""Container for pull request edges."""
edges: list[PullRequestEdge]
| PullRequests |
python | PyCQA__pylint | tests/functional/u/useless/useless_parent_delegation.py | {
"start": 13120,
"end": 13224
} | class ____(SuperTwo):
def __init__(self, a, b, *args):
super().__init__(a, b, *args)
| SubTwoTwo |
python | charliermarsh__ruff | crates/ruff_python_formatter/resources/test/fixtures/ruff/statement/class_definition.py | {
"start": 1532,
"end": 1564
} | class ____(): # comment
pass
| C |
python | getsentry__sentry | src/sentry/integrations/services/integration/model.py | {
"start": 2613,
"end": 2758
} | class ____(RpcModel):
integration: RpcIntegration | None
organization_integration: RpcOrganizationIntegration | None
| RpcOrganizationContext |
python | run-llama__llama_index | llama-index-core/llama_index/core/postprocessor/node.py | {
"start": 707,
"end": 2211
} | class ____(BaseNodePostprocessor):
"""Keyword-based Node processor."""
required_keywords: List[str] = Field(default_factory=list)
exclude_keywords: List[str] = Field(default_factory=list)
lang: str = Field(default="en")
@classmethod
def class_name(cls) -> str:
return "KeywordNodePostprocessor"
def _postprocess_nodes(
self,
nodes: List[NodeWithScore],
query_bundle: Optional[QueryBundle] = None,
) -> List[NodeWithScore]:
"""Postprocess nodes."""
try:
import spacy
except ImportError:
raise ImportError(
"Spacy is not installed, please install it with `pip install spacy`."
)
from spacy.matcher import PhraseMatcher
nlp = spacy.blank(self.lang)
required_matcher = PhraseMatcher(nlp.vocab)
exclude_matcher = PhraseMatcher(nlp.vocab)
required_matcher.add("RequiredKeywords", list(nlp.pipe(self.required_keywords)))
exclude_matcher.add("ExcludeKeywords", list(nlp.pipe(self.exclude_keywords)))
new_nodes = []
for node_with_score in nodes:
node = node_with_score.node
doc = nlp(node.get_content())
if self.required_keywords and not required_matcher(doc):
continue
if self.exclude_keywords and exclude_matcher(doc):
continue
new_nodes.append(node_with_score)
return new_nodes
| KeywordNodePostprocessor |
python | pydantic__pydantic | pydantic-core/tests/test_tzinfo.py | {
"start": 1358,
"end": 8497
} | class ____(unittest.TestCase):
"""Adapted from CPython `timezone` tests
Original tests are located here https://github.com/python/cpython/blob/a0bb4a39d1ca10e4a75f50a9fbe90cc9db28d29e/Lib/test/datetimetester.py#L256
"""
def setUp(self):
self.ACDT = TzInfo(timedelta(hours=9.5).total_seconds())
self.EST = TzInfo(-timedelta(hours=5).total_seconds())
self.UTC = TzInfo(timedelta(0).total_seconds())
self.DT = datetime(2010, 1, 1)
def test_str(self):
for tz in [self.ACDT, self.EST]:
self.assertEqual(str(tz), tz.tzname(None))
def test_constructor(self):
for subminute in [timedelta(microseconds=1), timedelta(seconds=1)]:
tz = TzInfo(subminute.total_seconds())
self.assertNotEqual(tz.utcoffset(None) % timedelta(minutes=1), 0)
# invalid offsets
for invalid in [timedelta(1, 1), timedelta(1)]:
self.assertRaises(ValueError, TzInfo, invalid.total_seconds())
self.assertRaises(ValueError, TzInfo, -invalid.total_seconds())
with self.assertRaises(TypeError):
TzInfo(None)
with self.assertRaises(TypeError):
TzInfo(timedelta(seconds=42))
with self.assertRaises(TypeError):
TzInfo(ZERO, None)
with self.assertRaises(TypeError):
TzInfo(ZERO, 42)
with self.assertRaises(TypeError):
TzInfo(ZERO, 'ABC', 'extra')
def test_inheritance(self):
self.assertIsInstance(self.EST, tzinfo)
def test_utcoffset(self):
dummy = self.DT
for h in [0, 1.5, 12]:
offset = h * HOUR
self.assertEqual(timedelta(seconds=offset), TzInfo(offset).utcoffset(dummy))
self.assertEqual(timedelta(seconds=-offset), TzInfo(-offset).utcoffset(dummy))
self.assertEqual(self.EST.utcoffset(''), timedelta(hours=-5))
self.assertEqual(self.EST.utcoffset(5), timedelta(hours=-5))
def test_dst(self):
self.EST.dst('') is None
self.EST.dst(5) is None
def test_tzname(self):
self.assertEqual('-05:00', TzInfo(-5 * HOUR).tzname(None))
self.assertEqual('+09:30', TzInfo(9.5 * HOUR).tzname(None))
self.assertEqual('-00:01', TzInfo(timedelta(minutes=-1).total_seconds()).tzname(None))
# Sub-minute offsets:
self.assertEqual('+01:06:40', TzInfo(timedelta(0, 4000).total_seconds()).tzname(None))
self.assertEqual('-01:06:40', TzInfo(-timedelta(0, 4000).total_seconds()).tzname(None))
self.assertEqual('+01:06:40', TzInfo(timedelta(0, 4000, 1).total_seconds()).tzname(None))
self.assertEqual('-01:06:40', TzInfo(-timedelta(0, 4000, 1).total_seconds()).tzname(None))
self.assertEqual(self.EST.tzname(''), '-05:00')
self.assertEqual(self.EST.tzname(5), '-05:00')
def test_fromutc(self):
for tz in [self.EST, self.ACDT]:
utctime = self.DT.replace(tzinfo=tz)
local = tz.fromutc(utctime)
self.assertEqual(local - utctime, tz.utcoffset(local))
self.assertEqual(local, self.DT.replace(tzinfo=timezone.utc))
def test_comparison(self):
self.assertNotEqual(TzInfo(ZERO), TzInfo(HOUR))
self.assertEqual(TzInfo(HOUR), TzInfo(HOUR))
self.assertFalse(TzInfo(ZERO) < TzInfo(ZERO))
self.assertIn(TzInfo(ZERO), {TzInfo(ZERO)})
self.assertTrue(TzInfo(ZERO) is not None)
self.assertFalse(TzInfo(ZERO) is None)
tz = TzInfo(ZERO)
self.assertTrue(tz == ALWAYS_EQ)
self.assertFalse(tz != ALWAYS_EQ)
self.assertTrue(tz < LARGEST)
self.assertFalse(tz > LARGEST)
self.assertTrue(tz <= LARGEST)
self.assertFalse(tz >= LARGEST)
self.assertFalse(tz < SMALLEST)
self.assertTrue(tz > SMALLEST)
self.assertFalse(tz <= SMALLEST)
self.assertTrue(tz >= SMALLEST)
# offset based comparison tests for tzinfo derived classes like datetime.timezone.
utcdatetime = self.DT.replace(tzinfo=timezone.utc)
self.assertTrue(tz == utcdatetime.tzinfo)
estdatetime = self.DT.replace(tzinfo=timezone(-timedelta(hours=5)))
self.assertTrue(self.EST == estdatetime.tzinfo)
self.assertTrue(tz > estdatetime.tzinfo)
if sys.platform == 'linux':
try:
europe_london = ZoneInfo('Europe/London')
except ZoneInfoNotFoundError:
# tz data not available
pass
else:
self.assertFalse(tz == europe_london)
with self.assertRaises(TypeError):
tz > europe_london
def test_copy(self):
for tz in self.ACDT, self.EST:
tz_copy = copy.copy(tz)
self.assertEqual(tz_copy, tz)
def test_deepcopy(self):
for tz in self.ACDT, self.EST:
tz_copy = copy.deepcopy(tz)
self.assertEqual(tz_copy, tz)
def test_offset_boundaries(self):
# Test timedeltas close to the boundaries
time_deltas = [timedelta(hours=23, minutes=59), timedelta(hours=23, minutes=59, seconds=59)]
time_deltas.extend([-delta for delta in time_deltas])
for delta in time_deltas:
with self.subTest(test_type='good', delta=delta):
print(delta.total_seconds())
TzInfo(delta.total_seconds())
# Test timedeltas on and outside the boundaries
bad_time_deltas = [timedelta(hours=24), timedelta(hours=24, microseconds=1)]
bad_time_deltas.extend([-delta for delta in bad_time_deltas])
for delta in bad_time_deltas:
with self.subTest(test_type='bad', delta=delta):
with self.assertRaises(ValueError):
TzInfo(delta.total_seconds())
def test_no_args_constructor(self):
# Test that TzInfo can be constructed without arguments
tz = TzInfo()
self.assertEqual(tz.utcoffset(None), timedelta(0))
self.assertEqual(str(tz), 'UTC')
def test_pickle(self):
# Test that TzInfo can be pickled and unpickled
for tz in self.ACDT, self.EST, self.UTC:
for pickler, unpickler, proto in pickle_choices:
with self.subTest(tz=tz, proto=proto):
pickled = pickler.dumps(tz, proto)
unpickled = unpickler.loads(pickled)
self.assertEqual(tz, unpickled)
self.assertEqual(tz.utcoffset(None), unpickled.utcoffset(None))
def test_tzinfo_could_be_reused():
class Model:
value: datetime
v = SchemaValidator(
core_schema.model_schema(
Model, core_schema.model_fields_schema({'value': core_schema.model_field(core_schema.datetime_schema())})
)
)
m = v.validate_python({'value': '2015-10-21T15:28:00.000000+01:00'})
target = datetime(1955, 11, 12, 14, 38, tzinfo=m.value.tzinfo)
assert target == datetime(1955, 11, 12, 14, 38, tzinfo=timezone(timedelta(hours=1)))
now = datetime.now(tz=m.value.tzinfo)
assert isinstance(now, datetime)
| TestTzInfo |
python | openai__openai-python | src/openai/types/responses/custom_tool_param.py | {
"start": 291,
"end": 748
} | class ____(TypedDict, total=False):
name: Required[str]
"""The name of the custom tool, used to identify it in tool calls."""
type: Required[Literal["custom"]]
"""The type of the custom tool. Always `custom`."""
description: str
"""Optional description of the custom tool, used to provide more context."""
format: CustomToolInputFormat
"""The input format for the custom tool. Default is unconstrained text."""
| CustomToolParam |
python | django__django | tests/order_with_respect_to/models.py | {
"start": 933,
"end": 1098
} | class ____(models.Model):
dimension = models.ForeignKey("Dimension", on_delete=models.CASCADE)
class Meta:
order_with_respect_to = "dimension"
| Component |
python | facebook__pyre-check | client/configuration/unwatched.py | {
"start": 1571,
"end": 2884
} | class ____:
change_indicator: str
files: UnwatchedFiles
@staticmethod
def from_json(json: Dict[str, object]) -> "UnwatchedDependency":
change_indicator = json.get("change_indicator", None)
if change_indicator is None:
raise exceptions.InvalidConfiguration(
"Missing `change_indicator` field in UnwatchedDependency"
)
if not isinstance(change_indicator, str):
raise exceptions.InvalidConfiguration(
"`change_indicator` field in UnwatchedDependency must be a string"
)
files_json = json.get("files", None)
if files_json is None:
raise exceptions.InvalidConfiguration(
"Missing `files` field in UnwatchedDependency"
)
if not isinstance(files_json, dict):
raise exceptions.InvalidConfiguration(
"`files` field in UnwatchedDependency must be a dict"
)
return UnwatchedDependency(
change_indicator=change_indicator,
files=UnwatchedFiles.from_json(files_json),
)
def to_json(self) -> Dict[str, object]:
return {
"change_indicator": str(self.change_indicator),
"files": self.files.to_json(),
}
| UnwatchedDependency |
python | getsentry__sentry | src/sentry/replays/lib/new_query/fields.py | {
"start": 6321,
"end": 6414
} | class ____(ColumnField[int]):
"""Integer-type condition column field."""
| IntegerColumnField |
python | astropy__astropy | astropy/modeling/rotations.py | {
"start": 1972,
"end": 4323
} | class ____(Model):
"""
Perform a series of rotations about different axis in 3D space.
Positive angles represent a counter-clockwise rotation.
Parameters
----------
angles : array-like
Angles of rotation in deg in the order of axes_order.
axes_order : str
A sequence of 'x', 'y', 'z' corresponding to axis of rotation.
Examples
--------
>>> model = RotationSequence3D([1.1, 2.1, 3.1, 4.1], axes_order='xyzx')
"""
standard_broadcasting = False
_separable = False
n_inputs = 3
n_outputs = 3
angles = Parameter(
default=[],
getter=_to_orig_unit,
setter=_to_radian,
description="Angles of rotation in deg in the order of axes_order",
)
def __init__(self, angles, axes_order, name=None):
self.axes = ["x", "y", "z"]
unrecognized = set(axes_order).difference(self.axes)
if unrecognized:
raise ValueError(
f"Unrecognized axis label {unrecognized}; should be one of {self.axes} "
)
self.axes_order = axes_order
if len(angles) != len(axes_order):
raise ValueError(
f"The number of angles {len(angles)} should match "
f"the number of axes {len(axes_order)}."
)
super().__init__(angles, name=name)
self._inputs = ("x", "y", "z")
self._outputs = ("x", "y", "z")
@property
def inverse(self):
"""Inverse rotation."""
angles = self.angles.value[::-1] * -1
return self.__class__(angles, axes_order=self.axes_order[::-1])
def evaluate(self, x, y, z, angles):
"""
Apply the rotation to a set of 3D Cartesian coordinates.
"""
if x.shape != y.shape or x.shape != z.shape:
raise ValueError("Expected input arrays to have the same shape")
# Note: If the original shape was () (an array scalar) convert to a
# 1-element 1-D array on output for consistency with most other models
orig_shape = x.shape or (1,)
inarr = np.array([x.ravel(), y.ravel(), z.ravel()])
result = np.dot(_create_matrix(angles[0], self.axes_order), inarr)
x, y, z = result[0], result[1], result[2]
x.shape = y.shape = z.shape = orig_shape
return x, y, z
| RotationSequence3D |
python | ray-project__ray | python/ray/train/_internal/state/schema.py | {
"start": 910,
"end": 1869
} | class ____(BaseModel):
"""Metadata of a Ray Train worker."""
actor_id: str = Field(description="Actor ID of the worker.")
world_rank: int = Field(description="World rank of the worker.")
local_rank: int = Field(description="Local rank of the worker.")
node_rank: int = Field(description="Node rank of the worker.")
node_id: str = Field(description="ID of the node that the worker is running on.")
node_ip: str = Field(
description="IP address of the node that the worker is running on."
)
pid: int = Field(description="Process ID of the worker.")
gpu_ids: List[int] = Field(
description="A list of GPU ids allocated to that worker."
)
status: ActorStatusEnum = Field(
description="The status of the train worker actor. It can be ALIVE or DEAD."
)
resources: Dict[str, float] = Field(
description="The resources allocated to the worker."
)
@DeveloperAPI
| TrainWorkerInfo |
python | pennersr__django-allauth | allauth/account/views.py | {
"start": 21530,
"end": 21852
} | class ____(TemplateView):
template_name = "account/password_reset_done." + app_settings.TEMPLATE_EXTENSION
password_reset_done = PasswordResetDoneView.as_view()
@method_decorator(rate_limit(action="reset_password_from_key"), name="dispatch")
@method_decorator(login_not_required, name="dispatch")
| PasswordResetDoneView |
python | openai__openai-python | src/openai/resources/containers/files/content.py | {
"start": 5713,
"end": 5950
} | class ____:
def __init__(self, content: AsyncContent) -> None:
self._content = content
self.retrieve = _legacy_response.async_to_raw_response_wrapper(
content.retrieve,
)
| AsyncContentWithRawResponse |
python | scipy__scipy | scipy/stats/_continuous_distns.py | {
"start": 349337,
"end": 349709
} | class ____(FitDataError):
def __init__(self, ptp, fscale):
self.args = (
"Invalid values in `data`. Maximum likelihood estimation with "
"the uniform distribution and fixed scale requires that "
f"np.ptp(data) <= fscale, but np.ptp(data) = {ptp} and "
f"fscale = {fscale}."
)
| FitUniformFixedScaleDataError |
python | huggingface__transformers | tests/models/swin2sr/test_modeling_swin2sr.py | {
"start": 1334,
"end": 5606
} | class ____:
def __init__(
self,
parent,
batch_size=13,
image_size=32,
patch_size=1,
num_channels=3,
num_channels_out=1,
embed_dim=16,
depths=[1, 2, 1],
num_heads=[2, 2, 4],
window_size=2,
mlp_ratio=2.0,
qkv_bias=True,
hidden_dropout_prob=0.0,
attention_probs_dropout_prob=0.0,
drop_path_rate=0.1,
hidden_act="gelu",
use_absolute_embeddings=False,
patch_norm=True,
initializer_range=0.02,
layer_norm_eps=1e-5,
is_training=True,
scope=None,
use_labels=False,
upscale=2,
):
self.parent = parent
self.batch_size = batch_size
self.image_size = image_size
self.patch_size = patch_size
self.num_channels = num_channels
self.num_channels_out = num_channels_out
self.embed_dim = embed_dim
self.depths = depths
self.num_heads = num_heads
self.window_size = window_size
self.mlp_ratio = mlp_ratio
self.qkv_bias = qkv_bias
self.hidden_dropout_prob = hidden_dropout_prob
self.attention_probs_dropout_prob = attention_probs_dropout_prob
self.drop_path_rate = drop_path_rate
self.hidden_act = hidden_act
self.use_absolute_embeddings = use_absolute_embeddings
self.patch_norm = patch_norm
self.layer_norm_eps = layer_norm_eps
self.initializer_range = initializer_range
self.is_training = is_training
self.scope = scope
self.use_labels = use_labels
self.upscale = upscale
# here we set some attributes to make tests pass
self.num_hidden_layers = len(depths)
self.hidden_size = embed_dim
self.seq_length = (image_size // patch_size) ** 2
def prepare_config_and_inputs(self):
pixel_values = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size])
labels = None
if self.use_labels:
labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
config = self.get_config()
return config, pixel_values, labels
def get_config(self):
return Swin2SRConfig(
image_size=self.image_size,
patch_size=self.patch_size,
num_channels=self.num_channels,
num_channels_out=self.num_channels_out,
embed_dim=self.embed_dim,
depths=self.depths,
num_heads=self.num_heads,
window_size=self.window_size,
mlp_ratio=self.mlp_ratio,
qkv_bias=self.qkv_bias,
hidden_dropout_prob=self.hidden_dropout_prob,
attention_probs_dropout_prob=self.attention_probs_dropout_prob,
drop_path_rate=self.drop_path_rate,
hidden_act=self.hidden_act,
use_absolute_embeddings=self.use_absolute_embeddings,
path_norm=self.patch_norm,
layer_norm_eps=self.layer_norm_eps,
initializer_range=self.initializer_range,
upscale=self.upscale,
)
def create_and_check_model(self, config, pixel_values, labels):
model = Swin2SRModel(config=config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.embed_dim, self.image_size, self.image_size)
)
def create_and_check_for_image_super_resolution(self, config, pixel_values, labels):
model = Swin2SRForImageSuperResolution(config)
model.to(torch_device)
model.eval()
result = model(pixel_values)
expected_image_size = self.image_size * self.upscale
self.parent.assertEqual(
result.reconstruction.shape,
(self.batch_size, self.num_channels_out, expected_image_size, expected_image_size),
)
def prepare_config_and_inputs_for_common(self):
config_and_inputs = self.prepare_config_and_inputs()
config, pixel_values, labels = config_and_inputs
inputs_dict = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
| Swin2SRModelTester |
python | ray-project__ray | python/ray/autoscaler/v2/instance_manager/config.py | {
"start": 18204,
"end": 19086
} | class ____(IConfigReader):
"""A class that reads cluster config from a K8s RayCluster CR."""
def __init__(self, config_producer: AutoscalingConfigProducer):
self._config_producer = config_producer
self._cached_config = self._generate_configs_from_k8s()
def _generate_configs_from_k8s(self) -> AutoscalingConfig:
return AutoscalingConfig(self._config_producer())
def get_cached_autoscaling_config(self) -> AutoscalingConfig:
"""
Returns:
AutoscalingConfig: The autoscaling config.
"""
return self._cached_config
def refresh_cached_autoscaling_config(self):
"""
Reads the configs from the K8s RayCluster CR.
This reads from the K8s API server every time to pick up changes.
"""
self._cached_config = self._generate_configs_from_k8s()
| KubeRayConfigReader |
python | allegroai__clearml | clearml/automation/parameters.py | {
"start": 10604,
"end": 13024
} | class ____(Parameter):
"""
Discrete randomly sampled Hyper-Parameter object.
"""
def __init__(
self,
parameter_combinations: Sequence[Mapping[str, Union[float, int, str, Parameter]]] = (),
) -> ():
"""
Uniformly sample values form a list of discrete options (combinations) of parameters.
:param list parameter_combinations: The list/tuple of valid parameter combinations.
For example, two combinations with three specific parameters per combination:
.. code-block:: javascript
[
{"opt1": 10, "arg2": 20, "arg2": 30},
{"opt2": 11, "arg2": 22, "arg2": 33}
]
Two complex combination each one sampled from a different range:
.. code-block:: javascript
[
{"opt1": UniformParameterRange('arg1',0,1) , "arg2": 20},
{"opt2": UniformParameterRange('arg1',11,12), "arg2": 22},
]
"""
super(ParameterSet, self).__init__(name=None)
self.values = parameter_combinations
def get_value(self) -> Mapping[str, Any]:
"""
Return uniformly sampled value from the valid list of values.
:return: ``{self.name: random entry from self.value}``
"""
return self._get_value(self._random.choice(self.values))
def to_list(self) -> Sequence[Mapping[str, Any]]:
"""
Return a list of all the valid values of the Parameter.
:return: list of dicts ``{name: value}``
"""
combinations = []
for combination in self.values:
single_option = {}
for k, v in combination.items():
if isinstance(v, Parameter):
single_option[k] = v.to_list()
else:
single_option[k] = [
{k: v},
]
for state in product(*single_option.values()):
combinations.append(dict(kv for d in state for kv in d.items()))
return combinations
@staticmethod
def _get_value(combination: dict) -> dict:
value_dict = {}
for k, v in combination.items():
if isinstance(v, Parameter):
value_dict.update(v.get_value())
else:
value_dict[k] = v
return value_dict
| ParameterSet |
python | kamyu104__LeetCode-Solutions | Python/count-paths-that-can-form-a-palindrome-in-a-tree.py | {
"start": 1020,
"end": 1734
} | class ____(object):
def countPalindromePaths(self, parent, s):
"""
:type parent: List[int]
:type s: str
:rtype: int
"""
def dfs(u, mask):
result = 0
if u:
mask ^= 1<<(ord(s[u])-ord('a'))
result += cnt[mask]+sum(cnt[mask^(1<<i)] if mask^(1<<i) in cnt else 0 for i in xrange(26))
cnt[mask] += 1
return result+sum(dfs(v, mask) for v in adj[u])
adj = [[] for _ in xrange(len(parent))]
for u, p in enumerate(parent):
if p != -1:
adj[p].append(u)
cnt = collections.defaultdict(int)
cnt[0] = 1
return dfs(0, 0)
| Solution2 |
python | ansible__ansible | lib/ansible/plugins/doc_fragments/decrypt.py | {
"start": 208,
"end": 487
} | class ____(object):
# Standard files documentation fragment
DOCUMENTATION = r"""
options:
decrypt:
description:
- This option controls the auto-decryption of source files using vault.
type: bool
default: yes
version_added: '2.4'
"""
| ModuleDocFragment |
python | run-llama__llama_index | llama-index-integrations/tools/llama-index-tools-arxiv/llama_index/tools/arxiv/base.py | {
"start": 163,
"end": 1199
} | class ____(BaseToolSpec):
"""arXiv tool spec."""
spec_functions = ["arxiv_query"]
def __init__(self, max_results: Optional[int] = 3):
self.max_results = max_results
def arxiv_query(self, query: str, sort_by: Optional[str] = "relevance"):
"""
A tool to query arxiv.org
ArXiv contains a variety of papers that are useful for answering
mathematic and scientific questions.
Args:
query (str): The query to be passed to arXiv.
sort_by (str): Either 'relevance' (default) or 'recent'
"""
import arxiv
sort = arxiv.SortCriterion.Relevance
if sort_by == "recent":
sort = arxiv.SortCriterion.SubmittedDate
search = arxiv.Search(query, max_results=self.max_results, sort_by=sort)
results = []
for result in search.results():
results.append(
Document(text=f"{result.pdf_url}: {result.title}\n{result.summary}")
)
return results
| ArxivToolSpec |
python | lxml__lxml | test.py | {
"start": 3026,
"end": 11065
} | class ____:
"""Configurable properties of the test runner."""
# test location
basedir = '' # base directory for tests (defaults to
# basedir of argv[0] + 'src'), must be absolute
src_in_path = True # add 'src/' to sys.path
follow_symlinks = True # should symlinks to subdirectories be
# followed? (hardcoded, may cause loops)
# which tests to run
unit_tests = False # unit tests (default if both are false)
functional_tests = False # functional tests
# test filtering
level = 1 # run only tests at this or lower level
# (if None, runs all tests)
pathname_regex = '' # regexp for filtering filenames
test_regex = '' # regexp for filtering test cases
# actions to take
list_files = False # --list-files
list_tests = False # --list-tests
list_hooks = False # --list-hooks
run_tests = True # run tests (disabled by --list-foo)
# output verbosity
verbosity = 0 # verbosity level (-v)
quiet = 0 # do not print anything on success (-q)
warn_omitted = False # produce warnings when a test case is
# not included in a test suite (-w)
progress = False # show running progress (-p)
coverage = False # produce coverage reports (--coverage)
coverdir = 'coverage' # where to put them (currently hardcoded)
immediate_errors = False # show tracebacks twice (currently hardcoded)
screen_width = 80 # screen width (autodetected)
def compile_matcher(regex):
"""Returns a function that takes one argument and returns True or False.
Regex is a regular expression. Empty regex matches everything. There
is one expression: if the regex starts with "!", the meaning of it is
reversed.
"""
if not regex:
return lambda x: True
elif regex == '!':
return lambda x: False
elif regex.startswith('!'):
rx = re.compile(regex[1:])
return lambda x: rx.search(x) is None
else:
rx = re.compile(regex)
return lambda x: rx.search(x) is not None
def walk_with_symlinks(top, func, arg):
"""Like os.path.walk, but follows symlinks on POSIX systems.
If the symlinks create a loop, this function will never finish.
"""
try:
names = os.listdir(top)
except os.error:
return
func(arg, top, names)
exceptions = ('.', '..')
for name in names:
if name not in exceptions:
name = os.path.join(top, name)
if os.path.isdir(name):
walk_with_symlinks(name, func, arg)
def get_test_files(cfg):
"""Returns a list of test module filenames."""
matcher = compile_matcher(cfg.pathname_regex)
results = []
test_names = []
if cfg.unit_tests:
test_names.append('tests')
if cfg.functional_tests:
test_names.append('ftests')
baselen = len(cfg.basedir) + 1
def visit(ignored, dir, files):
if os.path.basename(dir) not in test_names:
for name in test_names:
if name + '.py' in files:
path = os.path.join(dir, name + '.py')
if matcher(path[baselen:]):
results.append(path)
return
if '__init__.py' not in files:
stderr("%s is not a package" % dir)
return
for file in files:
if file.startswith('test') and file.endswith('.py'):
path = os.path.join(dir, file)
if matcher(path[baselen:]):
results.append(path)
if cfg.follow_symlinks:
walker = walk_with_symlinks
else:
walker = os.path.walk
walker(cfg.basedir, visit, None)
results.sort()
return results
def import_module(filename, cfg, cov=None):
"""Imports and returns a module."""
filename = os.path.splitext(filename)[0]
modname = filename[len(cfg.basedir):].replace(os.path.sep, '.')
if modname.startswith('.'):
modname = modname[1:]
if cov is not None:
cov.start()
mod = __import__(modname)
if cov is not None:
cov.stop()
components = modname.split('.')
for comp in components[1:]:
mod = getattr(mod, comp)
return mod
def filter_testsuite(suite, matcher, level=None):
"""Returns a flattened list of test cases that match the given matcher."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = []
for test in suite._tests:
if level is not None and getattr(test, 'level', 0) > level:
continue
if isinstance(test, unittest.TestCase):
testname = test.id() # package.module.class.method
if matcher(testname):
results.append(test)
else:
filtered = filter_testsuite(test, matcher, level)
results.extend(filtered)
return results
def get_all_test_cases(module):
"""Returns a list of all test case classes defined in a given module."""
results = []
for name in dir(module):
if not name.startswith('Test'):
continue
item = getattr(module, name)
if (isinstance(item, (type, types.ClassType)) and
issubclass(item, unittest.TestCase)):
results.append(item)
return results
def get_test_classes_from_testsuite(suite):
"""Returns a set of test case classes used in a test suite."""
if not isinstance(suite, unittest.TestSuite):
raise TypeError('not a TestSuite', suite)
results = set()
for test in suite._tests:
if isinstance(test, unittest.TestCase):
results.add(test.__class__)
else:
classes = get_test_classes_from_testsuite(test)
results.update(classes)
return results
def get_test_cases(test_files, cfg, cov=None):
"""Returns a list of test cases from a given list of test modules."""
matcher = compile_matcher(cfg.test_regex)
results = []
for file in test_files:
module = import_module(file, cfg, cov=cov)
if cov is not None:
cov.start()
test_suite = module.test_suite()
if cov is not None:
cov.stop()
if test_suite is None:
continue
if cfg.warn_omitted:
all_classes = set(get_all_test_cases(module))
classes_in_suite = get_test_classes_from_testsuite(test_suite)
difference = all_classes - classes_in_suite
for test_class in difference:
# surround the warning with blank lines, otherwise it tends
# to get lost in the noise
stderr("\n%s: WARNING: %s not in test suite\n"
% (file, test_class.__name__))
if (cfg.level is not None and
getattr(test_suite, 'level', 0) > cfg.level):
continue
filtered = filter_testsuite(test_suite, matcher, cfg.level)
results.extend(filtered)
return results
def get_test_hooks(test_files, cfg, cov=None):
"""Returns a list of test hooks from a given list of test modules."""
results = []
dirs = set(map(os.path.dirname, test_files))
for dir in list(dirs):
if os.path.basename(dir) == 'ftests':
dirs.add(os.path.join(os.path.dirname(dir), 'tests'))
dirs = list(dirs)
dirs.sort()
for dir in dirs:
filename = os.path.join(dir, 'checks.py')
if os.path.exists(filename):
module = import_module(filename, cfg, tracer=tracer)
if cov is not None:
cov.start()
hooks = module.test_hooks()
if cov is not None:
cov.stop()
results.extend(hooks)
return results
| Options |
python | weaviate__weaviate-python-client | weaviate/collections/classes/config.py | {
"start": 65297,
"end": 67781
} | class ____(_ConfigBase):
name: str
description: Optional[str]
generative_config: Optional[GenerativeConfig]
inverted_index_config: InvertedIndexConfig
multi_tenancy_config: MultiTenancyConfig
properties: List[PropertyConfig]
references: List[ReferencePropertyConfig]
replication_config: ReplicationConfig
reranker_config: Optional[RerankerConfig]
sharding_config: Optional[ShardingConfig]
vector_index_config: Union[
VectorIndexConfigHNSW, VectorIndexConfigFlat, VectorIndexConfigDynamic, None
]
vector_index_type: Optional[VectorIndexType]
vectorizer_config: Optional[VectorizerConfig]
vectorizer: Optional[Union[Vectorizers, str]]
vector_config: Optional[Dict[str, _NamedVectorConfig]]
def to_dict(self) -> dict:
out = super().to_dict()
out["class"] = out.pop("name")
out["moduleConfig"] = {}
for name in [
("generativeConfig", "generative"),
("vectorizerConfig", "vectorizer"),
("rerankerConfig", "reranker"),
]:
if name[0] not in out:
continue
val = out.pop(name[0])
module_name = val[name[1]]
out["moduleConfig"][module_name] = val.get("model", {})
vectorize_collection_name = val.get("vectorizeCollectionName", None)
if vectorize_collection_name is not None:
out["moduleConfig"][module_name]["vectorizeClassName"] = vectorize_collection_name
if "vectorConfig" in out:
for k, v in out["vectorConfig"].items():
extra_values = v["vectorizer"].pop("model", {})
vectorizer = v["vectorizer"].pop("vectorizer")
out["vectorConfig"][k]["vectorizer"] = {
vectorizer: {**extra_values, **v["vectorizer"]}
}
# remove default values for single vector setup
out.pop(
"vectorIndexType", None
) # if doesn't exist (in the case of named vectors) then do nothing
out.pop(
"vectorIndexConfig", None
) # if doesn't exist (in the case of named vectors) then do nothing
out["properties"] = [
*[prop.to_dict() for prop in self.properties],
*[prop.to_dict() for prop in self.references],
]
out.pop("references")
return out
CollectionConfig = _CollectionConfig
@dataclass
| _CollectionConfig |
python | tensorflow__tensorflow | tensorflow/lite/python/metrics/metrics_interface.py | {
"start": 748,
"end": 1542
} | class ____(metaclass=abc.ABCMeta):
"""Abstract class for TFLiteMetrics."""
@abc.abstractmethod
def increase_counter_debugger_creation(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_interpreter_creation(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_converter_attempt(self):
raise NotImplementedError
@abc.abstractmethod
def increase_counter_converter_success(self):
raise NotImplementedError
@abc.abstractmethod
def set_converter_param(self, name, value):
raise NotImplementedError
@abc.abstractmethod
def set_converter_error(self, error_data):
raise NotImplementedError
@abc.abstractmethod
def set_converter_latency(self, value):
raise NotImplementedError
| TFLiteMetricsInterface |
python | spack__spack | lib/spack/spack/vendor/ruamel/yaml/cyaml.py | {
"start": 1301,
"end": 1884
} | class ____(CParser, SafeConstructor, Resolver): # type: ignore
def __init__(self, stream, version=None, preserve_quotes=None):
# type: (StreamTextType, Optional[VersionType], Optional[bool]) -> None
CParser.__init__(self, stream)
self._parser = self._composer = self
SafeConstructor.__init__(self, loader=self)
Resolver.__init__(self, loadumper=self)
# self.descend_resolver = self._resolver.descend_resolver
# self.ascend_resolver = self._resolver.ascend_resolver
# self.resolve = self._resolver.resolve
| CSafeLoader |
python | pytorch__pytorch | test/torch_np/numpy_tests/core/test_multiarray.py | {
"start": 155037,
"end": 157022
} | class ____(TestCase):
@parametrize(
"byteorder", [subtest("little", name="little"), subtest("big", name="big")]
)
@parametrize("dtype", [float, int, complex])
def test_basic(self, byteorder, dtype):
dt = np.dtype(dtype).newbyteorder(byteorder)
x = (np.random.random((4, 7)) * 5).astype(dt)
buf = x.tobytes()
assert_array_equal(np.frombuffer(buf, dtype=dt), x.flat)
# @xpassIfTorchDynamo_np
@parametrize(
"obj", [np.arange(10), subtest("12345678", decorators=[xfailIfTorchDynamo])]
)
def test_array_base(self, obj):
# Objects (including NumPy arrays), which do not use the
# `release_buffer` slot should be directly used as a base object.
# See also gh-21612
if isinstance(obj, str):
# @parametrize breaks with bytes objects
obj = bytes(obj, encoding="latin-1")
new = np.frombuffer(obj)
assert new.base is obj
def test_empty(self):
assert_array_equal(np.frombuffer(b""), np.array([]))
@skip("fails on CI, we are unlikely to implement this")
@skipif(
IS_PYPY,
reason="PyPy's memoryview currently does not track exports. See: "
"https://github.com/pypy/pypy/issues/3723",
)
def test_mmap_close(self):
# The old buffer protocol was not safe for some things that the new
# one is. But `frombuffer` always used the old one for a long time.
# Checks that it is safe with the new one (using memoryviews)
with tempfile.TemporaryFile(mode="wb") as tmp:
tmp.write(b"asdf")
tmp.flush()
mm = mmap.mmap(tmp.fileno(), 0)
arr = np.frombuffer(mm, dtype=np.uint8)
with pytest.raises(BufferError):
mm.close() # cannot close while array uses the buffer
del arr
mm.close()
@skip # (reason="TODO") # FIXME: skip -> xfail (a0.shape = (4, 5) raises)
| TestFromBuffer |
python | doocs__leetcode | solution/3100-3199/3171.Find Subarray With Bitwise OR Closest to K/Solution2.py | {
"start": 0,
"end": 255
} | class ____:
def minimumDifference(self, nums: List[int], k: int) -> int:
ans = inf
s = set()
for x in nums:
s = {x | y for y in s} | {x}
ans = min(ans, min(abs(y - k) for y in s))
return ans
| Solution |
python | plotly__plotly.py | plotly/tools.py | {
"start": 20475,
"end": 24915
} | class ____(object):
@staticmethod
def _deprecated(old_method, new_method=None):
if new_method is None:
# The method name stayed the same.
new_method = old_method
warnings.warn(
"plotly.tools.FigureFactory.{} is deprecated. "
"Use plotly.figure_factory.{}".format(old_method, new_method)
)
@staticmethod
def create_2D_density(*args, **kwargs):
FigureFactory._deprecated("create_2D_density", "create_2d_density")
from plotly.figure_factory import create_2d_density
return create_2d_density(*args, **kwargs)
@staticmethod
def create_annotated_heatmap(*args, **kwargs):
FigureFactory._deprecated("create_annotated_heatmap")
from plotly.figure_factory import create_annotated_heatmap
return create_annotated_heatmap(*args, **kwargs)
@staticmethod
def create_candlestick(*args, **kwargs):
FigureFactory._deprecated("create_candlestick")
from plotly.figure_factory import create_candlestick
return create_candlestick(*args, **kwargs)
@staticmethod
def create_dendrogram(*args, **kwargs):
FigureFactory._deprecated("create_dendrogram")
from plotly.figure_factory import create_dendrogram
return create_dendrogram(*args, **kwargs)
@staticmethod
def create_distplot(*args, **kwargs):
FigureFactory._deprecated("create_distplot")
from plotly.figure_factory import create_distplot
return create_distplot(*args, **kwargs)
@staticmethod
def create_facet_grid(*args, **kwargs):
FigureFactory._deprecated("create_facet_grid")
from plotly.figure_factory import create_facet_grid
return create_facet_grid(*args, **kwargs)
@staticmethod
def create_gantt(*args, **kwargs):
FigureFactory._deprecated("create_gantt")
from plotly.figure_factory import create_gantt
return create_gantt(*args, **kwargs)
@staticmethod
def create_ohlc(*args, **kwargs):
FigureFactory._deprecated("create_ohlc")
from plotly.figure_factory import create_ohlc
return create_ohlc(*args, **kwargs)
@staticmethod
def create_quiver(*args, **kwargs):
FigureFactory._deprecated("create_quiver")
from plotly.figure_factory import create_quiver
return create_quiver(*args, **kwargs)
@staticmethod
def create_scatterplotmatrix(*args, **kwargs):
FigureFactory._deprecated("create_scatterplotmatrix")
from plotly.figure_factory import create_scatterplotmatrix
return create_scatterplotmatrix(*args, **kwargs)
@staticmethod
def create_streamline(*args, **kwargs):
FigureFactory._deprecated("create_streamline")
from plotly.figure_factory import create_streamline
return create_streamline(*args, **kwargs)
@staticmethod
def create_table(*args, **kwargs):
FigureFactory._deprecated("create_table")
from plotly.figure_factory import create_table
return create_table(*args, **kwargs)
@staticmethod
def create_trisurf(*args, **kwargs):
FigureFactory._deprecated("create_trisurf")
from plotly.figure_factory import create_trisurf
return create_trisurf(*args, **kwargs)
@staticmethod
def create_violin(*args, **kwargs):
FigureFactory._deprecated("create_violin")
from plotly.figure_factory import create_violin
return create_violin(*args, **kwargs)
def get_config_plotly_server_url():
"""
Function to get the .config file's 'plotly_domain' without importing
the chart_studio package. This property is needed to compute the default
value of the plotly.js config plotlyServerURL, so it is independent of
the chart_studio integration and still needs to live in
Returns
-------
str
"""
config_file = os.path.join(PLOTLY_DIR, ".config")
default_server_url = "https://plot.ly"
if not os.path.exists(config_file):
return default_server_url
with open(config_file, "rt") as f:
try:
config_dict = json.load(f)
if not isinstance(config_dict, dict):
config_dict = {}
except Exception:
# TODO: issue a warning and bubble it up
config_dict = {}
return config_dict.get("plotly_domain", default_server_url)
| FigureFactory |
python | getsentry__sentry | tests/sentry/api/serializers/test_apitoken.py | {
"start": 1606,
"end": 2532
} | class ____(TestApiTokenSerializer):
def setUp(self) -> None:
super().setUp()
attrs = self._serializer.get_attrs(item_list=[self._token], user=self._user)
attrs["application"] = None
self._attrs = attrs
def test_no_refresh_token_on_user_token(self) -> None:
serialized_object = self._serializer.serialize(
obj=self._token, user=self._user, attrs=self._attrs
)
assert "refreshToken" not in serialized_object
def test_refresh_token_on_non_user_token(self) -> None:
with assume_test_silo_mode(SiloMode.CONTROL):
token = ApiToken.objects.create(user=self._user)
assert token.hashed_refresh_token is not None
serialized_object = self._serializer.serialize(
obj=token, user=self._user, attrs=self._attrs
)
assert "refreshToken" in serialized_object
| TestRefreshTokens |
python | tensorflow__tensorflow | tensorflow/python/autograph/pyct/loader_test.py | {
"start": 1086,
"end": 3920
} | class ____(test.TestCase):
def assertAstMatches(self, actual_node, expected_node_src):
expected_node = gast.parse(expected_node_src).body[0]
msg = 'AST did not match expected:\n{}\nActual:\n{}'.format(
pretty_printer.fmt(expected_node),
pretty_printer.fmt(actual_node))
self.assertTrue(ast_util.matches(actual_node, expected_node), msg)
def test_parse_load_identity(self):
def test_fn(x):
a = True
b = ''
if a:
b = (x + 1)
return b
node, _ = parser.parse_entity(test_fn, future_features=())
module, _, _ = loader.load_ast(node)
source = tf_inspect.getsource(module.test_fn)
expected_node_src = textwrap.dedent(tf_inspect.getsource(test_fn))
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
def test_load_ast(self):
node = gast.FunctionDef(
name='f',
args=gast.arguments(
args=[
gast.Name(
'a', ctx=gast.Param(), annotation=None, type_comment=None)
],
posonlyargs=[],
vararg=None,
kwonlyargs=[],
kw_defaults=[],
kwarg=None,
defaults=[]),
body=[
gast.Return(
gast.BinOp(
op=gast.Add(),
left=gast.Name(
'a',
ctx=gast.Load(),
annotation=None,
type_comment=None),
right=gast.Constant(1, kind=None)))
],
decorator_list=[],
returns=None,
type_comment=None)
module, source, _ = loader.load_ast(node)
expected_node_src = """
# coding=utf-8
def f(a):
return (a + 1)
"""
expected_node_src = textwrap.dedent(expected_node_src)
self.assertAstMatches(node, source)
self.assertAstMatches(node, expected_node_src)
self.assertEqual(2, module.f(1))
with open(module.__file__, 'r') as temp_output:
self.assertAstMatches(node, temp_output.read())
def test_load_source(self):
test_source = textwrap.dedent(u"""
# coding=utf-8
def f(a):
'日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))'
return a + 1
""")
module, _ = loader.load_source(test_source, delete_on_exit=True)
self.assertEqual(module.f(1), 2)
self.assertEqual(
module.f.__doc__, '日本語 Δθₜ ← Δθₜ₋₁ + ∇Q(sₜ, aₜ)(rₜ + γₜ₊₁ max Q(⋅))')
def test_cleanup(self):
test_source = textwrap.dedent('')
_, filename = loader.load_source(test_source, delete_on_exit=True)
# Clean up the file before loader.py tries to remove it, to check that the
# latter can deal with that situation.
os.unlink(filename)
if __name__ == '__main__':
test.main()
| LoaderTest |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 11619,
"end": 11778
} | class ____(graphene.Interface):
message = graphene.NonNull(graphene.String)
class Meta:
name = "PipelineRunConflict"
| GraphenePipelineRunConflict |
python | pytorch__pytorch | torch/_dynamo/source.py | {
"start": 30050,
"end": 30588
} | class ____(GetItemSource):
def reconstruct(self, codegen: "PyCodegen") -> None:
codegen.add_push_null(
lambda: codegen.load_import_from(utils.__name__, "tuple_iterator_getitem")
)
codegen(self.base)
codegen.append_output(codegen.create_load_const(self.index))
codegen.extend_output(create_call_function(2, False))
def name(self) -> str:
return f"___tuple_iterator_getitem({self.base.name()}, {self.index!r})"
@dataclasses.dataclass(frozen=True)
| TupleIteratorGetItemSource |
python | tornadoweb__tornado | maint/test/cython/cythonapp_test.py | {
"start": 125,
"end": 450
} | class ____(AsyncTestCase):
@gen_test
def test_native_coroutine(self):
x = yield cythonapp.native_coroutine()
self.assertEqual(x, "goodbye")
@gen_test
def test_decorated_coroutine(self):
x = yield cythonapp.decorated_coroutine()
self.assertEqual(x, "goodbye")
| CythonCoroutineTest |
python | Farama-Foundation__Gymnasium | gymnasium/core.py | {
"start": 537,
"end": 15508
} | class ____(Generic[ObsType, ActType]):
r"""The main Gymnasium class for implementing Reinforcement Learning Agents environments.
The class encapsulates an environment with arbitrary behind-the-scenes dynamics through the :meth:`step` and :meth:`reset` functions.
An environment can be partially or fully observed by single agents. For multi-agent environments, see PettingZoo.
The main API methods that users of this class need to know are:
- :meth:`step` - Updates an environment with actions returning the next agent observation, the reward for taking that actions,
if the environment has terminated or truncated due to the latest action and information from the environment about the step, i.e. metrics, debug info.
- :meth:`reset` - Resets the environment to an initial state, required before calling step.
Returns the first agent observation for an episode and information, i.e. metrics, debug info.
- :meth:`render` - Renders the environments to help visualise what the agent see, examples modes are "human", "rgb_array", "ansi" for text.
- :meth:`close` - Closes the environment, important when external software is used, i.e. pygame for rendering, databases
Environments have additional attributes for users to understand the implementation
- :attr:`action_space` - The Space object corresponding to valid actions, all valid actions should be contained within the space.
- :attr:`observation_space` - The Space object corresponding to valid observations, all valid observations should be contained within the space.
- :attr:`spec` - An environment spec that contains the information used to initialize the environment from :meth:`gymnasium.make`
- :attr:`metadata` - The metadata of the environment, e.g. `{"render_modes": ["rgb_array", "human"], "render_fps": 30}`. For Jax or Torch, this can be indicated to users with `"jax"=True` or `"torch"=True`.
- :attr:`np_random` - The random number generator for the environment. This is automatically assigned during
``super().reset(seed=seed)`` and when assessing :attr:`np_random`.
.. seealso:: For modifying or extending environments use the :class:`gymnasium.Wrapper` class
Note:
To get reproducible sampling of actions, a seed can be set with ``env.action_space.seed(123)``.
Note:
For strict type checking (e.g. mypy or pyright), :class:`Env` is a generic class with two parameterized types: ``ObsType`` and ``ActType``.
The ``ObsType`` and ``ActType`` are the expected types of the observations and actions used in :meth:`reset` and :meth:`step`.
The environment's :attr:`observation_space` and :attr:`action_space` should have type ``Space[ObsType]`` and ``Space[ActType]``,
see a space's implementation to find its parameterized type.
"""
# Set this in SOME subclasses
metadata: dict[str, Any] = {"render_modes": []}
# define render_mode if your environment supports rendering
render_mode: str | None = None
spec: EnvSpec | None = None
# Set these in ALL subclasses
action_space: spaces.Space[ActType]
observation_space: spaces.Space[ObsType]
# Created
_np_random: np.random.Generator | None = None
# will be set to the "invalid" value -1 if the seed of the currently set rng is unknown
_np_random_seed: int | None = None
def step(
self, action: ActType
) -> tuple[ObsType, SupportsFloat, bool, bool, dict[str, Any]]:
"""Run one timestep of the environment's dynamics using the agent actions.
When the end of an episode is reached (``terminated or truncated``), it is necessary to call :meth:`reset` to
reset this environment's state for the next episode.
.. versionchanged:: 0.26
The Step API was changed removing ``done`` in favor of ``terminated`` and ``truncated`` to make it clearer
to users when the environment had terminated or truncated which is critical for reinforcement learning
bootstrapping algorithms.
Args:
action (ActType): an action provided by the agent to update the environment state.
Returns:
observation (ObsType): An element of the environment's :attr:`observation_space` as the next observation due to the agent actions.
An example is a numpy array containing the positions and velocities of the pole in CartPole.
reward (SupportsFloat): The reward as a result of taking the action.
terminated (bool): Whether the agent reaches the terminal state (as defined under the MDP of the task)
which can be positive or negative. An example is reaching the goal state or moving into the lava from
the Sutton and Barto Gridworld. If true, the user needs to call :meth:`reset`.
truncated (bool): Whether the truncation condition outside the scope of the MDP is satisfied.
Typically, this is a timelimit, but could also be used to indicate an agent physically going out of bounds.
Can be used to end the episode prematurely before a terminal state is reached.
If true, the user needs to call :meth:`reset`.
info (dict): Contains auxiliary diagnostic information (helpful for debugging, learning, and logging).
This might, for instance, contain: metrics that describe the agent's performance state, variables that are
hidden from observations, or individual reward terms that are combined to produce the total reward.
In OpenAI Gym <v26, it contains "TimeLimit.truncated" to distinguish truncation and termination,
however this is deprecated in favour of returning terminated and truncated variables.
done (bool): (Deprecated) A boolean value for if the episode has ended, in which case further :meth:`step` calls will
return undefined results. This was removed in OpenAI Gym v26 in favor of terminated and truncated attributes.
A done signal may be emitted for different reasons: Maybe the task underlying the environment was solved successfully,
a certain timelimit was exceeded, or the physics simulation has entered an invalid state.
"""
raise NotImplementedError
def reset(
self,
*,
seed: int | None = None,
options: dict[str, Any] | None = None,
) -> tuple[ObsType, dict[str, Any]]: # type: ignore
"""Resets the environment to an initial internal state, returning an initial observation and info.
This method generates a new starting state often with some randomness to ensure that the agent explores the
state space and learns a generalised policy about the environment. This randomness can be controlled
with the ``seed`` parameter otherwise if the environment already has a random number generator and
:meth:`reset` is called with ``seed=None``, the RNG is not reset.
Therefore, :meth:`reset` should (in the typical use case) be called with a seed right after initialization and then never again.
For Custom environments, the first line of :meth:`reset` should be ``super().reset(seed=seed)`` which implements
the seeding correctly.
.. versionchanged:: v0.25
The ``return_info`` parameter was removed and now info is expected to be returned.
Args:
seed (optional int): The seed that is used to initialize the environment's PRNG (`np_random`) and
the read-only attribute `np_random_seed`.
If the environment does not already have a PRNG and ``seed=None`` (the default option) is passed,
a seed will be chosen from some source of entropy (e.g. timestamp or /dev/urandom).
However, if the environment already has a PRNG and ``seed=None`` is passed, the PRNG will *not* be reset
and the env's :attr:`np_random_seed` will *not* be altered.
If you pass an integer, the PRNG will be reset even if it already exists.
Usually, you want to pass an integer *right after the environment has been initialized and then never again*.
Please refer to the minimal example above to see this paradigm in action.
options (optional dict): Additional information to specify how the environment is reset (optional,
depending on the specific environment)
Returns:
observation (ObsType): Observation of the initial state. This will be an element of :attr:`observation_space`
(typically a numpy array) and is analogous to the observation returned by :meth:`step`.
info (dictionary): This dictionary contains auxiliary information complementing ``observation``. It should be analogous to
the ``info`` returned by :meth:`step`.
"""
# Initialize the RNG if the seed is manually passed
if seed is not None:
self._np_random, self._np_random_seed = seeding.np_random(seed)
def render(self) -> RenderFrame | list[RenderFrame] | None:
"""Compute the render frames as specified by :attr:`render_mode` during the initialization of the environment.
The environment's :attr:`metadata` render modes (`env.metadata["render_modes"]`) should contain the possible
ways to implement the render modes. In addition, list versions for most render modes is achieved through
`gymnasium.make` which automatically applies a wrapper to collect rendered frames.
Note:
As the :attr:`render_mode` is known during ``__init__``, the objects used to render the environment state
should be initialised in ``__init__``.
By convention, if the :attr:`render_mode` is:
- None (default): no render is computed.
- "human": The environment is continuously rendered in the current display or terminal, usually for human consumption.
This rendering should occur during :meth:`step` and :meth:`render` doesn't need to be called. Returns ``None``.
- "rgb_array": Return a single frame representing the current state of the environment.
A frame is a ``np.ndarray`` with shape ``(x, y, 3)`` representing RGB values for an x-by-y pixel image.
- "ansi": Return a strings (``str``) or ``StringIO.StringIO`` containing a terminal-style text representation
for each time step. The text can include newlines and ANSI escape sequences (e.g. for colors).
- "rgb_array_list" and "ansi_list": List based version of render modes are possible (except Human) through the
wrapper, :py:class:`gymnasium.wrappers.RenderCollection` that is automatically applied during ``gymnasium.make(..., render_mode="rgb_array_list")``.
The frames collected are popped after :meth:`render` is called or :meth:`reset`.
Note:
Make sure that your class's :attr:`metadata` ``"render_modes"`` key includes the list of supported modes.
.. versionchanged:: 0.25.0
The render function was changed to no longer accept parameters, rather these parameters should be specified
in the environment initialised, i.e., ``gymnasium.make("CartPole-v1", render_mode="human")``
"""
raise NotImplementedError
def close(self):
"""After the user has finished using the environment, close contains the code necessary to "clean up" the environment.
This is critical for closing rendering windows, database or HTTP connections.
Calling ``close`` on an already closed environment has no effect and won't raise an error.
"""
pass
@property
def unwrapped(self) -> Env[ObsType, ActType]:
"""Returns the base non-wrapped environment.
Returns:
Env: The base non-wrapped :class:`gymnasium.Env` instance
"""
return self
@property
def np_random_seed(self) -> int:
"""Returns the environment's internal :attr:`_np_random_seed` that if not set will first initialise with a random int as seed.
If :attr:`np_random_seed` was set directly instead of through :meth:`reset` or :meth:`set_np_random_through_seed`,
the seed will take the value -1.
Returns:
int: the seed of the current `np_random` or -1, if the seed of the rng is unknown
"""
if self._np_random_seed is None:
self._np_random, self._np_random_seed = seeding.np_random()
return self._np_random_seed
@property
def np_random(self) -> np.random.Generator:
"""Returns the environment's internal :attr:`_np_random` that if not set will initialise with a random seed.
Returns:
Instances of `np.random.Generator`
"""
if self._np_random is None:
self._np_random, self._np_random_seed = seeding.np_random()
return self._np_random
@np_random.setter
def np_random(self, value: np.random.Generator):
"""Sets the environment's internal :attr:`_np_random` with the user-provided Generator.
Since it is generally not possible to extract a seed from an instance of a random number generator,
this will also set the :attr:`_np_random_seed` to `-1`, which is not valid as input for the creation
of a numpy rng.
"""
self._np_random = value
# Setting a numpy rng with -1 will cause a ValueError
self._np_random_seed = -1
def __str__(self):
"""Returns a string of the environment with :attr:`spec` id's if :attr:`spec.
Returns:
A string identifying the environment
"""
if self.spec is None:
return f"<{type(self).__name__} instance>"
else:
return f"<{type(self).__name__}<{self.spec.id}>>"
def __enter__(self):
"""Support with-statement for the environment."""
return self
def __exit__(self, *args: Any):
"""Support with-statement for the environment and closes the environment."""
self.close()
# propagate exception
return False
def has_wrapper_attr(self, name: str) -> bool:
"""Checks if the attribute `name` exists in the environment."""
return hasattr(self, name)
def get_wrapper_attr(self, name: str) -> Any:
"""Gets the attribute `name` from the environment."""
return getattr(self, name)
def set_wrapper_attr(self, name: str, value: Any, *, force: bool = True) -> bool:
"""Sets the attribute `name` on the environment with `value`, see `Wrapper.set_wrapper_attr` for more info."""
if force or hasattr(self, name):
setattr(self, name, value)
return True
return False
WrapperObsType = TypeVar("WrapperObsType")
WrapperActType = TypeVar("WrapperActType")
| Env |
python | mlflow__mlflow | dev/clint/src/clint/rules/no_rst.py | {
"start": 36,
"end": 151
} | class ____(Rule):
def _message(self) -> str:
return "Do not use RST style. Use Google style instead."
| NoRst |
python | pytorch__pytorch | torch/_export/db/examples/unsupported_operator.py | {
"start": 89,
"end": 411
} | class ____(torch.nn.Module):
"""
torch.sym_min operator is not supported in export.
"""
def forward(self, x):
return x.sum() + torch.sym_min(x.size(0), 100)
example_args = (torch.randn(3, 2),)
tags = {"torch.operator"}
support_level = SupportLevel.NOT_SUPPORTED_YET
model = TorchSymMin()
| TorchSymMin |
python | gevent__gevent | src/greentest/3.9/test_httplib.py | {
"start": 19566,
"end": 47281
} | class ____(TestCase):
def test_dir_with_added_behavior_on_status(self):
# see issue40084
self.assertTrue({'description', 'name', 'phrase', 'value'} <= set(dir(HTTPStatus(404))))
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("''")''')
def test_partial_reads(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_mixed_reads(self):
# readline() should update the remaining length, so that read() knows
# how much data is left and does not raise IncompleteRead
body = "HTTP/1.1 200 Ok\r\nContent-Length: 13\r\n\r\nText\r\nAnother"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.readline(), b'Text\r\n')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(), b'Another')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have Content-Length, HTTPResponse knows when to close itself,
# the same behaviour as when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:password@www.python.org"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\n'
b'Transfer-Encoding: chunked\r\n'
b'\r\n')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile(io.TextIOBase):
mode = 'r'
d = data()
def read(self, blocksize=-1):
return next(self.d)
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_blocksize_request(self):
"""Check that request() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.request("PUT", "/", io.BytesIO(expected), {"Content-Length": "9"})
self.assertEqual(sock.sendall_calls, 3)
body = sock.data.split(b"\r\n\r\n", 1)[1]
self.assertEqual(body, expected)
def test_blocksize_send(self):
"""Check that send() respects the configured block size."""
blocksize = 8 # For easy debugging.
conn = client.HTTPConnection('example.com', blocksize=blocksize)
sock = FakeSocket(None)
conn.sock = sock
expected = b"a" * blocksize + b"b"
conn.send(io.BytesIO(expected))
self.assertEqual(sock.sendall_calls, 2)
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_header_limit_after_100(self):
body = (
'HTTP/1.1 100 OK\r\n'
'r\n' * 32768
)
resp = client.HTTPResponse(FakeSocket(body))
with self.assertRaises(client.HTTPException) as cm:
resp.begin()
# We must assert more because other reasonable errors that we
# do not want can also be HTTPException derived.
self.assertIn('got more than ', str(cm.exception))
self.assertIn('headers', str(cm.exception))
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_error_leak(self):
# Test that the socket is not leaked if getresponse() fails
conn = client.HTTPConnection('example.com')
response = None
class Response(client.HTTPResponse):
def __init__(self, *pos, **kw):
nonlocal response
response = self # Avoid garbage collector closing the socket
client.HTTPResponse.__init__(self, *pos, **kw)
conn.response_class = Response
conn.sock = FakeSocket('Invalid status line')
conn.request('GET', '/')
self.assertRaises(client.BadStatusLine, conn.getresponse)
self.assertTrue(response.closed)
self.assertTrue(conn.sock.file_closed)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readlines_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readlines(2000), [expected])
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(2000), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_readline_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\n' + expected + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.readline(10), expected)
self.assertEqual(resp.readline(10), b"")
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_read1_bound_content_length(self):
extradata = b"extradata"
expected = b"Hello123\r\n"
sock = FakeSocket(b'HTTP/1.1 200 OK\r\nContent-Length: 30\r\n\r\n' + expected*3 + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read1(20), expected*2)
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(), extradata) #we read to the end
resp.close()
def test_response_fileno(self):
# Make sure fd returned by fileno is valid.
serv = socket.create_server((HOST, 0))
self.addCleanup(serv.close)
result = None
def run_server():
[conn, address] = serv.accept()
with conn, conn.makefile("rb") as reader:
# Read the request header until a blank line
while True:
line = reader.readline()
if not line.rstrip(b"\r\n"):
break
conn.sendall(b"HTTP/1.1 200 Connection established\r\n\r\n")
nonlocal result
result = reader.read()
thread = threading.Thread(target=run_server)
thread.start()
self.addCleanup(thread.join, float(1))
conn = client.HTTPConnection(*serv.getsockname())
conn.request("CONNECT", "dummy:1234")
response = conn.getresponse()
try:
self.assertEqual(response.status, client.OK)
s = socket.socket(fileno=response.fileno())
try:
s.sendall(b"proxied data\n")
finally:
s.detach()
finally:
response.close()
conn.close()
thread.join()
self.assertEqual(result, b"proxied data\n")
def test_putrequest_override_domain_validation(self):
"""
It should be possible to override the default validation
behavior in putrequest (bpo-38216).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_path(self, url):
pass
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/\x00')
def test_putrequest_override_host_validation(self):
class UnsafeHTTPConnection(client.HTTPConnection):
def _validate_host(self, url):
pass
conn = UnsafeHTTPConnection('example.com\r\n')
conn.sock = FakeSocket('')
# set skip_host so a ValueError is not raised upon adding the
# invalid URL as the value of the "Host:" header
conn.putrequest('GET', '/', skip_host=1)
def test_putrequest_override_encoding(self):
"""
It should be possible to override the default encoding
to transmit bytes in another encoding even if invalid
(bpo-36274).
"""
class UnsafeHTTPConnection(client.HTTPConnection):
def _encode_request(self, str_url):
return str_url.encode('utf-8')
conn = UnsafeHTTPConnection('example.com')
conn.sock = FakeSocket('')
conn.putrequest('GET', '/☃')
| BasicTest |
python | scrapy__scrapy | tests/AsyncCrawlerRunner/multi_parallel.py | {
"start": 265,
"end": 669
} | class ____(Spider):
name = "no_request"
async def start(self):
return
yield
@deferred_f_from_coro_f
async def main(reactor):
configure_logging()
runner = AsyncCrawlerRunner()
runner.crawl(NoRequestsSpider)
runner.crawl(NoRequestsSpider)
await runner.join()
install_reactor("twisted.internet.asyncioreactor.AsyncioSelectorReactor")
react(main)
| NoRequestsSpider |
python | conda__conda | conda/models/records.py | {
"start": 4284,
"end": 5318
} | class ____(StringField):
def __init__(self):
super().__init__(required=False)
def __get__(self, instance, instance_type):
try:
return super().__get__(instance, instance_type)
except AttributeError:
try:
url = instance.url
except AttributeError:
url = None
if url:
return self.unbox(instance, instance_type, Channel(url).subdir)
try:
platform, arch = instance.platform.name, instance.arch
except AttributeError:
platform, arch = None, None
if platform and not arch:
return self.unbox(instance, instance_type, "noarch")
elif platform:
if "x86" in arch:
arch = "64" if "64" in arch else "32"
return self.unbox(instance, instance_type, f"{platform}-{arch}")
else:
return self.unbox(instance, instance_type, context.subdir)
| SubdirField |
python | openai__openai-python | tests/test_utils/test_typing.py | {
"start": 344,
"end": 432
} | class ____(BaseGenericMultipleTypeArgs[_T, _T2, _T3]): ...
| SubclassGenericMultipleTypeArgs |
python | scrapy__scrapy | tests/test_pipeline_files.py | {
"start": 11745,
"end": 11869
} | class ____(TestFilesPipelineFieldsMixin):
item_class = FilesPipelineTestDataClass
@attr.s
| TestFilesPipelineFieldsDataClass |
python | encode__django-rest-framework | tests/test_relations_pk.py | {
"start": 15826,
"end": 16782
} | class ____(TestCase):
def setUp(self):
self.target = ForeignKeyTarget.objects.create(name='target-1')
ForeignKeySource.objects.create(name='source-1', target=self.target)
ForeignKeySource.objects.create(name='source-2', target=self.target)
def test_relation_field_callable_source(self):
serializer = ForeignKeyTargetCallableSourceSerializer(self.target)
expected = {
'id': 1,
'name': 'target-1',
'first_source': 1,
}
with self.assertNumQueries(1):
self.assertEqual(serializer.data, expected)
def test_relation_field_property_source(self):
serializer = ForeignKeyTargetPropertySourceSerializer(self.target)
expected = {
'id': 1,
'name': 'target-1',
'first_source': 1,
}
with self.assertNumQueries(1):
self.assertEqual(serializer.data, expected)
| PKRelationTests |
python | ansible__ansible | test/lib/ansible_test/_internal/cli/argparsing/parsers.py | {
"start": 2524,
"end": 2724
} | class ____:
"""Boundary details for parsing composite input."""
delimiters: str
required: bool
match: t.Optional[str] = None
ready: bool = True
@dataclasses.dataclass
| ParserBoundary |
python | bokeh__bokeh | src/bokeh/models/scales.py | {
"start": 2665,
"end": 2939
} | class ____(ContinuousScale):
''' Represent a linear scale transformation between continuous ranges.
'''
# explicit __init__ to support Init signatures
def __init__(self, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
| LinearScale |
python | spyder-ide__spyder | spyder/plugins/ipythonconsole/utils/kernel_handler.py | {
"start": 2710,
"end": 3406
} | class ____(QThread):
"""Poll for changes in std buffers."""
sig_out = Signal(str)
def __init__(self, parent, std_buffer):
super().__init__(parent)
self._std_buffer = std_buffer
self._closing = False
def run(self):
txt = True
while txt:
try:
txt = self._std_buffer.read1()
except ValueError: # I/O operation on closed file
break
if txt:
try:
txt = txt.decode()
except UnicodeDecodeError:
txt = str(txt)
self.sig_out.emit(txt)
else:
break # EOF
| StdThread |
python | doocs__leetcode | solution/2000-2099/2027.Minimum Moves to Convert String/Solution.py | {
"start": 0,
"end": 241
} | class ____:
def minimumMoves(self, s: str) -> int:
ans = i = 0
while i < len(s):
if s[i] == "X":
ans += 1
i += 3
else:
i += 1
return ans
| Solution |
python | GoogleCloudPlatform__python-docs-samples | functions/slack/main_test.py | {
"start": 933,
"end": 1101
} | class ____:
def __init__(self, data="", headers={}):
self.data = data
self.headers = headers
def get_data(self):
return self.data
| Request |
python | pytorch__pytorch | torch/_inductor/comm_analysis.py | {
"start": 3444,
"end": 16793
} | class ____(IntEnum):
# The ordering and enum values here matches original in
# https://github.com/NVIDIA/nccl/blob/0b083e52096c387bad7a5c5c65b26a9dca54de8c/src/include/devcomm.h#L28
# For difference between these protocols, see https://github.com/NVIDIA/nccl/issues/281#issuecomment-571816990
LL = 0 # Low-latency
# LL128 = 1 # Low-latency 128-byte
# SIMPLE = 2
# Latencies in us
# len(NCCL_ALGO) x len(NCCL_PROTO)
# NOTE: use array instead of tensor to prevent incompatibility with fake mode
baseLat = [
# Tree
[
6.8, # LL
],
# Ring
[
6.6, # LL
],
]
# Latencies in us
# len(NCCL_HW) x len(NCCL_ALGO) x len(NCCL_PROTO)
hwLat = [
# NVLINK
[
[0.6], # Tree (LL)
[0.6], # Ring (LL)
],
# PCI
[
[1.0], # Tree (LL)
[1.0], # Ring (LL)
],
# NET
[
[5.0], # Tree (LL)
[2.7], # Ring (LL)
],
]
# LL128 max BW per channel
llMaxBws = [
# Volta-N1/Intel-N2/Intel-N4
[
39.0,
39.0,
20.4,
],
# Ampere-N1/AMD-N2/AMD-N4
[
87.7,
22.5, # avg of ring & tree
19.0,
],
# Hopper-N1/AMD-N2/AMD-N4
[
87.7,
22.5, # avg of ring & tree
19.0,
],
]
def estimate_nccl_collective_runtime_nccl_estimator(snode) -> Optional[float]: # type: ignore[no-untyped-def]
kernel = snode.node
assert kernel is not None
py_kernel_name = getattr(kernel, "python_kernel_name", "")
pg_name = kernel.constant_args[-1] # type: ignore[attr-defined]
from torch.distributed.distributed_c10d import _resolve_process_group
pg = _resolve_process_group(pg_name)
rank: int = torch.distributed.get_rank(pg)
# TODO(ivankobzarev): Figure out how we can use time estimations,
# without cuda allocations.
device = torch.device(f"cuda:{rank}")
fn = eval(py_kernel_name)
args, kwargs = snode_args_kwargs(snode)
# TODO(ivankobzarev): fix out variants snode_args_kwargs
if "all_gather_into_tensor_out" in py_kernel_name:
args = args[1:] + args[0]
with torch.distributed._time_estimator(group=pg, device=device) as time_estimator:
w = fn(*args, **kwargs)
torch.ops._c10d_functional.wait_tensor.default(w)
est_time_us = time_estimator.estimated_time
# -1000 constant is NCCL return in case of error during estimations.
# Observed it for all_to_all estimations.
if est_time_us < 0:
return None
est_time_ms = est_time_us / 1e3
return est_time_ms
def estimate_nccl_collective_runtime_impl(
tensor_storage_size_bytes: int, group_size: int, coll: NCCL_COLL
) -> float:
"""
Returns estimated NCCL collective runtime in milliseconds (ms).
The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
We aim to estimate the runtime as accurately as possible.
Assumptions:
- only ring algorithm (NCCL_ALGO_RING) is used
- only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
- 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
- collective is one of: allreduce, reducescatter, allgather
"""
# Convert bytes to GB
tensor_storage_size_GB = tensor_storage_size_bytes / 1024 / 1024 / 1024
# Currently assumes each node has 8 gpus. And when >1 node is used, assumes each node uses all 8 gpus.
# TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
num_gpus_per_node = 8
nNodes = math.ceil(group_size / num_gpus_per_node)
nRanks = group_size # this is total # of gpus globally that participate in this collective op
if nRanks <= 1:
return 0
# Assumes ring algorithm
nccl_algo = NCCL_ALGO.RING
nccl_proto = NCCL_PROTO.LL
# =============== bandwidth computation ===============
# First compute bandwidth in GB/s; then at the end, convert it to GB/ns
bwIntra = torch._inductor.config.intra_node_bw
bwInter = torch._inductor.config.inter_node_bw
compCapIndex = get_gpu_type()
index2 = nNodes - 1 if nNodes <= 2 else 2
# LL: for single node, we look at GPU type; for multi-node, we look at CPU type
index1 = compCapIndex if nNodes == 1 else 0
llMaxBw = llMaxBws[index1][index2]
# NOTE: each step of ring algorithm is synchronized,
# and is bottlenecked by the slowest link which is the inter-node interconnect.
# hence when nNodes >= 2, bw is inter-node bandwidth.
# NOTE: the original code in https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc
# have this as `if nNodes <= 2` which seems wrong. Corrected it here.
bw = bwIntra if nNodes == 1 else bwInter
nChannels = 2 # Assume # channels is 2
busBw = nChannels * bw
# Various model refinements
busBw = min(
llMaxBw,
busBw
* (1.0 / 4.0 if (nNodes > 1 or coll == NCCL_COLL.ALL_REDUCE) else 1.0 / 3.0),
)
if coll == NCCL_COLL.ALL_REDUCE:
nsteps = 2 * (nRanks - 1)
elif coll == NCCL_COLL.ALL_TO_ALL:
nsteps = 2 * (nRanks - 1)
elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER):
nsteps = nRanks - 1
# Convert bus BW to algorithm BW (tensor bytes / algoBW = actual execution time)
ratio = (1.0 * nRanks) / nsteps # type: ignore[possibly-undefined]
bandwidth = busBw * ratio
# Convert GB/s to GB/ns
bandwidth_GB_per_ns = bandwidth / 1e9
# =============== latency computation ===============
intraHw = NCCL_HW.NVLINK
if coll == NCCL_COLL.ALL_REDUCE:
if nNodes > 1:
nInterSteps = 2 * nNodes
else:
nInterSteps = 0
elif coll in (NCCL_COLL.REDUCE_SCATTER, NCCL_COLL.ALL_GATHER, NCCL_COLL.ALL_TO_ALL):
nInterSteps = nNodes - 1
# First compute latency in us; then at the end, convert it to ns
latency = baseLat[nccl_algo][nccl_proto]
intraLat = hwLat[intraHw][nccl_algo][nccl_proto]
interLat = hwLat[NCCL_HW.NET][nccl_algo][nccl_proto]
# Inter-node rings still have to launch nsteps * net overhead.
netOverhead = 0.0
if nNodes > 1:
netOverhead = 1.0 # getNetOverhead(comm);
intraLat = max(intraLat, netOverhead)
latency += (nsteps - nInterSteps) * intraLat + nInterSteps * interLat # type: ignore[possibly-undefined]
# Convert us to ns
latency_ns = latency * 1e3
# =============== final result ===============
transport_ns = tensor_storage_size_GB / bandwidth_GB_per_ns
ns = transport_ns + latency_ns
ms = ns / 1e6
return ms
################################################################################################################
# The above code and constants are adapted from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc #
################################################################################################################
def estimate_nccl_collective_runtime(node: ir.IRNode) -> float:
"""
Returns estimated NCCL collective runtime in nanoseconds (ms).
The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
We aim to estimate the runtime as accurately as possible.
Assumptions:
- only ring algorithm (NCCL_ALGO_RING) is used
- only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
- 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
- collective is one of: allreduce, reducescatter, allgather
"""
tensor_storage_size_bytes = get_collective_input_size_bytes(node)
group_size = get_collective_group_size(node)
coll = get_collective_type(node)
return estimate_nccl_collective_runtime_impl(
tensor_storage_size_bytes, group_size, coll
)
def estimate_fx_collective_size(fx_node: torch.fx.Node) -> int:
"""Estimate the size of a collective operation in bytes, including inputs and outputs."""
input_bytes = None
args, kwargs = fx_node.args, fx_node.kwargs
kwargs = dict(kwargs)
# dont double count pre-allocated buffer passed in
kwargs.pop("out", None)
def tensor_bytes(t: torch.Tensor) -> int:
return get_fx_node_size_numel(t.size()) * get_dtype_size(t.dtype)
def add_inp_bytes(inp: torch.fx.Node):
inp_val = inp.meta.get("val", None)
if not isinstance(inp_val, torch.Tensor):
return
nonlocal input_bytes
if input_bytes is None:
input_bytes = 0
input_bytes += tensor_bytes(inp_val)
pytree.tree_map_only(
torch.fx.Node,
add_inp_bytes,
(args, kwargs),
)
output_val = fx_node.meta.get("val", None)
if input_bytes is None or not isinstance(output_val, torch.Tensor):
return 0
output_bytes = tensor_bytes(output_val)
return input_bytes + output_bytes
def estimate_fx_collective_memory_footprint(fx_node: torch.fx.Node) -> int:
"""Estimate the memory footprint of a collective operation in bytes.
This returns the total bytes that need to be live concurrently in memory.
For all_reduce, we divide by 2 since it can be done in-place.
"""
from torch._inductor.fx_passes.bucketing import (
is_all_reduce_tensor as is_all_reduce,
)
size = estimate_fx_collective_size(fx_node)
return size if not is_all_reduce(fx_node) else size // 2
def estimate_nccl_collective_runtime_from_fx_node(
fx_node: torch.fx.Node,
override_size: Optional[int] = None,
use_nccl_estimator: bool = True,
) -> float:
"""
Returns estimated NCCL collective runtime in nanoseconds (ms).
The following heuristics are copied from https://github.com/NVIDIA/nccl/blob/master/src/graph/tuning.cc.
We aim to estimate the runtime as accurately as possible.
Assumptions:
- only ring algorithm (NCCL_ALGO_RING) is used
- only Low-Latency protocol (NCCL_PROTO_LL) is used, i.e. Simple or LL128 is not used
- 8 gpus per node # TODO: Need to find a way to get accurate "gpus per node" and "# nodes" info.
- collective is one of: allreduce, reducescatter, allgather
"""
from torch.distributed.distributed_c10d import _get_group_size_by_name
if override_size is None:
tensor_storage_size_bytes = estimate_fx_collective_size(fx_node)
else:
tensor_storage_size_bytes = override_size
assert not isinstance(fx_node.target, str)
opt_args_kwargs = normalize_function(
fx_node.target,
args=fx_node.args,
kwargs=fx_node.kwargs,
normalize_to_only_use_kwargs=True,
)
assert opt_args_kwargs is not None
args, kwargs = opt_args_kwargs
group_name = kwargs["group_name"]
group_size = _get_group_size_by_name(group_name)
assert isinstance(fx_node.target, torch._ops.OpOverload)
coll = get_collective_type_from_kernel_name(fx_node.target.name())
def _nccl_estimate() -> Optional[float]:
# TODO: Refactor with estimate_nccl_collective_runtime_nccl_estimator
from torch.distributed.distributed_c10d import (
_get_pg_default_device,
_resolve_process_group,
)
pg = _resolve_process_group(group_name)
if torch.distributed.distributed_c10d.get_backend(pg) == "fake":
# nccl estimator requires real process group
return None
device = _get_pg_default_device(pg)
backend = pg._get_backend(device)
if not backend.supports_time_estimate:
return None
flat_args, flat_args_pytree_spec = pytree.tree_flatten((args, kwargs))
def _tensor(size, dtype, device) -> torch.Tensor: # type: ignore[no-untyped-def]
return torch.empty(
size if override_size is None else [override_size],
dtype=dtype,
device=device,
)
def try_size_hint(s: sympy.Expr) -> int:
return V.graph.sizevars.size_hint(s, fallback=0)
def to_real_tensor(e: Any) -> Any:
if isinstance(e, torch.fx.Node):
return to_real_tensor(e.meta["val"])
if isinstance(e, torch.Tensor):
return _tensor([get_fx_node_size_numel(e.size())], e.dtype, e.device)
return e
flat_args = [to_real_tensor(a) for a in flat_args]
real_args, real_kwargs = pytree.tree_unflatten(flat_args, flat_args_pytree_spec)
fn = fx_node.target
assert isinstance(fn, torch._ops.OpOverload)
with torch.distributed._time_estimator(group=pg) as time_estimator:
w = fn(*real_args, **real_kwargs)
torch.ops._c10d_functional.wait_tensor.default(w)
est_time_us = time_estimator.estimated_time
# -1000 constant is NCCL return in case of error during estimations.
# Observed it for all_to_all estimations.
if est_time_us < 0:
return None
est_time_ms = est_time_us / 1e3
return est_time_ms
if use_nccl_estimator:
est_time_ms = _nccl_estimate()
if est_time_ms is not None:
return est_time_ms
return estimate_nccl_collective_runtime_impl(
tensor_storage_size_bytes, group_size, coll
)
| NCCL_PROTO |
python | django__django | django/db/models/functions/text.py | {
"start": 9580,
"end": 9711
} | class ____(MySQLSHA2Mixin, OracleHashMixin, PostgreSQLSHAMixin, Transform):
function = "SHA256"
lookup_name = "sha256"
| SHA256 |
python | pypa__pip | src/pip/_vendor/urllib3/response.py | {
"start": 3389,
"end": 4249
} | class ____(object):
"""
From RFC7231:
If one or more encodings have been applied to a representation, the
sender that applied the encodings MUST generate a Content-Encoding
header field that lists the content codings in the order in which
they were applied.
"""
def __init__(self, modes):
self._decoders = [_get_decoder(m.strip()) for m in modes.split(",")]
def flush(self):
return self._decoders[0].flush()
def decompress(self, data):
for d in reversed(self._decoders):
data = d.decompress(data)
return data
def _get_decoder(mode):
if "," in mode:
return MultiDecoder(mode)
if mode == "gzip":
return GzipDecoder()
if brotli is not None and mode == "br":
return BrotliDecoder()
return DeflateDecoder()
| MultiDecoder |
python | python__mypy | mypy/test/testtypes.py | {
"start": 54290,
"end": 56126
} | class ____(Suite):
def setUp(self) -> None:
self.fx = TypeFixture()
def test_optional(self) -> None:
t = UnionType.make_union([self.fx.a, self.fx.nonet])
self.assert_union_result(t, [self.fx.a, self.fx.nonet])
def test_two_instances(self) -> None:
t = UnionType.make_union([self.fx.a, self.fx.b])
self.assert_union_result(t, [self.fx.a, self.fx.b])
def test_multiple_same_instances(self) -> None:
t = UnionType.make_union([self.fx.a, self.fx.a])
assert remove_instance_last_known_values(t) == self.fx.a
t = UnionType.make_union([self.fx.a, self.fx.a, self.fx.b])
self.assert_union_result(t, [self.fx.a, self.fx.b])
t = UnionType.make_union([self.fx.a, self.fx.nonet, self.fx.a, self.fx.b])
self.assert_union_result(t, [self.fx.a, self.fx.nonet, self.fx.b])
def test_single_last_known_value(self) -> None:
t = UnionType.make_union([self.fx.lit1_inst, self.fx.nonet])
self.assert_union_result(t, [self.fx.a, self.fx.nonet])
def test_last_known_values_with_merge(self) -> None:
t = UnionType.make_union([self.fx.lit1_inst, self.fx.lit2_inst, self.fx.lit4_inst])
assert remove_instance_last_known_values(t) == self.fx.a
t = UnionType.make_union(
[self.fx.lit1_inst, self.fx.b, self.fx.lit2_inst, self.fx.lit4_inst]
)
self.assert_union_result(t, [self.fx.a, self.fx.b])
def test_generics(self) -> None:
t = UnionType.make_union([self.fx.ga, self.fx.gb])
self.assert_union_result(t, [self.fx.ga, self.fx.gb])
def assert_union_result(self, t: ProperType, expected: list[Type]) -> None:
t2 = remove_instance_last_known_values(t)
assert type(t2) is UnionType
assert t2.items == expected
| RemoveLastKnownValueSuite |
python | Lightning-AI__lightning | tests/tests_pytorch/callbacks/test_model_checkpoint_additional_cases.py | {
"start": 378,
"end": 666
} | class ____(Dataset):
def __init__(self, n: int = 4):
self.x = torch.arange(n, dtype=torch.float32).view(-1, 1)
self.y = self.x.clone()
def __len__(self):
return len(self.x)
def __getitem__(self, idx):
return self.x[idx], self.y[idx]
| TinyDataset |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.