language stringclasses 1
value | repo stringclasses 346
values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | sqlalchemy__sqlalchemy | test/orm/test_transaction.py | {
"start": 1642,
"end": 31521
} | class ____(fixtures.RemovesEvents, FixtureTest):
run_inserts = None
__sparse_driver_backend__ = True
def test_no_close_transaction_on_flush(self, connection):
User, users = self.classes.User, self.tables.users
c = connection
self.mapper_registry.map_imperatively(User, users)
s = Session(bind=c)
s.begin()
tran = s.get_transaction()
s.add(User(name="first"))
s.flush()
c.exec_driver_sql("select * from users")
u = User(name="two")
s.add(u)
s.flush()
u = User(name="third")
s.add(u)
s.flush()
assert s.get_transaction() is tran
tran.close()
def test_subtransaction_on_external_no_begin(self, connection_no_trans):
users, User = self.tables.users, self.classes.User
connection = connection_no_trans
self.mapper_registry.map_imperatively(User, users)
trans = connection.begin()
sess = Session(bind=connection, autoflush=True)
u = User(name="ed")
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
trans.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_external_nested_transaction(self, connection_no_trans):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
connection = connection_no_trans
trans = connection.begin()
sess = Session(bind=connection, autoflush=True)
u1 = User(name="u1")
sess.add(u1)
sess.flush()
savepoint = sess.begin_nested()
u2 = User(name="u2")
sess.add(u2)
sess.flush()
savepoint.rollback()
trans.commit()
assert len(sess.query(User).all()) == 1
join_transaction_mode = testing.variation(
"join_transaction_mode",
[
"none",
"conditional_savepoint",
"create_savepoint",
"control_fully",
"rollback_only",
],
)
@join_transaction_mode
@testing.variation("operation", ["commit", "close", "rollback", "nothing"])
@testing.variation("external_state", ["none", "transaction", "savepoint"])
def test_join_transaction_modes(
self,
connection_no_trans,
join_transaction_mode,
operation,
external_state: testing.Variation,
):
"""test new join_transaction modes added in #9015"""
connection = connection_no_trans
t1: Optional[Transaction]
s1: Optional[NestedTransaction]
if external_state.none:
t1 = s1 = None
elif external_state.transaction:
t1 = connection.begin()
s1 = None
elif external_state.savepoint:
t1 = connection.begin()
s1 = connection.begin_nested()
else:
external_state.fail()
if join_transaction_mode.none:
sess = Session(connection)
else:
sess = Session(
connection, join_transaction_mode=join_transaction_mode.name
)
sess.connection()
if operation.close:
sess.close()
elif operation.commit:
sess.commit()
elif operation.rollback:
sess.rollback()
elif operation.nothing:
pass
else:
operation.fail()
if external_state.none:
if operation.nothing:
assert connection.in_transaction()
else:
assert not connection.in_transaction()
elif external_state.transaction:
assert t1 is not None
if (
join_transaction_mode.none
or join_transaction_mode.conditional_savepoint
or join_transaction_mode.rollback_only
):
if operation.rollback:
assert t1._deactivated_from_connection
assert not t1.is_active
else:
assert not t1._deactivated_from_connection
assert t1.is_active
elif join_transaction_mode.create_savepoint:
assert not t1._deactivated_from_connection
assert t1.is_active
elif join_transaction_mode.control_fully:
if operation.nothing:
assert not t1._deactivated_from_connection
assert t1.is_active
else:
assert t1._deactivated_from_connection
assert not t1.is_active
else:
join_transaction_mode.fail()
if t1.is_active:
t1.rollback()
elif external_state.savepoint:
assert s1 is not None
assert t1 is not None
assert not t1._deactivated_from_connection
assert t1.is_active
if join_transaction_mode.rollback_only:
if operation.rollback:
assert s1._deactivated_from_connection
assert not s1.is_active
else:
assert not s1._deactivated_from_connection
assert s1.is_active
elif join_transaction_mode.control_fully:
if operation.nothing:
assert not s1._deactivated_from_connection
assert s1.is_active
else:
assert s1._deactivated_from_connection
assert not s1.is_active
else:
if operation.nothing:
# session is still open in the sub-savepoint,
# so we are not activated on connection
assert s1._deactivated_from_connection
# but we are still an active savepoint
assert s1.is_active
# close session, then we're good
sess.close()
assert not s1._deactivated_from_connection
assert s1.is_active
if s1.is_active:
s1.rollback()
if t1.is_active:
t1.rollback()
else:
external_state.fail()
@join_transaction_mode
@testing.variation("operation", ["commit", "close", "rollback"])
def test_join_transaction_mode_with_event(
self, join_transaction_mode, operation
):
eng = engines.testing_engine()
eng_conn = None
events = []
@event.listens_for(eng, "commit")
def on_commit(conn):
events.append("commit")
@event.listens_for(eng, "rollback")
def on_rollback(conn):
events.append("rollback")
@event.listens_for(eng.pool, "checkin")
def on_checkin(conn, record):
events.append("checkin")
@event.listens_for(eng, "engine_connect")
def make_stat(conn):
nonlocal eng_conn
eng_conn = conn
conn.begin()
if join_transaction_mode.none:
s = Session(eng)
else:
s = Session(eng, join_transaction_mode=join_transaction_mode.name)
s.connection()
expected = []
if operation.commit:
s.commit()
expected.append("commit")
elif operation.rollback:
s.rollback()
expected.append("rollback")
elif operation.close:
s.close()
expected.append("rollback")
else:
operation.fail()
is_(eng_conn.in_transaction(), False)
expected.append("checkin")
eq_(events, expected)
def test_subtransaction_on_external_commit(self, connection_no_trans):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
connection = connection_no_trans
connection.begin()
sess = Session(bind=connection, autoflush=True)
u = User(name="ed")
sess.add(u)
sess.flush()
sess.commit() # commit does nothing
connection.rollback() # rolls back
assert len(sess.query(User).all()) == 0
sess.close()
def test_subtransaction_on_external_rollback(self, connection_no_trans):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
connection = connection_no_trans
connection.begin()
sess = Session(bind=connection, autoflush=True)
u = User(name="ed")
sess.add(u)
sess.flush()
sess.rollback() # rolls back
connection.commit() # nothing to commit
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_savepoint_on_external(self, connection_no_trans):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
connection = connection_no_trans
connection.begin()
sess = Session(bind=connection, autoflush=True)
u1 = User(name="u1")
sess.add(u1)
sess.flush()
n1 = sess.begin_nested()
u2 = User(name="u2")
sess.add(u2)
sess.flush()
n1.rollback()
connection.commit()
assert len(sess.query(User).all()) == 1
@testing.requires.savepoints
def test_nested_accounting_new_items_removed(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
session = fixture_session()
session.begin()
n1 = session.begin_nested()
u1 = User(name="u1")
session.add(u1)
n1.commit()
assert u1 in session
session.rollback()
assert u1 not in session
@testing.requires.savepoints
def test_nested_accounting_deleted_items_restored(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
session = fixture_session()
session.begin()
u1 = User(name="u1")
session.add(u1)
session.commit()
session.begin()
u1 = session.query(User).first()
n1 = session.begin_nested()
session.delete(u1)
n1.commit()
assert u1 not in session
session.rollback()
assert u1 in session
@testing.requires.savepoints
def test_dirty_state_transferred_deep_nesting(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
with fixture_session() as s:
u1 = User(name="u1")
s.add(u1)
s.commit()
nt1 = s.begin_nested()
nt2 = s.begin_nested()
u1.name = "u2"
assert attributes.instance_state(u1) not in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
s.flush()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) not in nt1._dirty
nt2.commit()
assert attributes.instance_state(u1) in nt2._dirty
assert attributes.instance_state(u1) in nt1._dirty
nt1.rollback()
assert attributes.instance_state(u1).expired
eq_(u1.name, "u1")
@testing.requires.independent_connections
def test_transactions_isolated(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
s1 = fixture_session()
s2 = fixture_session()
u1 = User(name="u1")
s1.add(u1)
s1.flush()
assert s2.query(User).all() == []
@testing.requires.two_phase_transactions
def test_twophase(self):
users, Address, addresses, User = (
self.tables.users,
self.classes.Address,
self.tables.addresses,
self.classes.User,
)
# TODO: mock up a failure condition here
# to ensure a rollback succeeds
self.mapper_registry.map_imperatively(User, users)
self.mapper_registry.map_imperatively(Address, addresses)
engine2 = engines.testing_engine()
sess = fixture_session(autoflush=False, twophase=True)
sess.bind_mapper(User, testing.db)
sess.bind_mapper(Address, engine2)
sess.begin()
u1 = User(name="u1")
a1 = Address(email_address="u1@e")
sess.add_all((u1, a1))
sess.commit()
sess.close()
engine2.dispose()
with testing.db.connect() as conn:
eq_(conn.scalar(select(func.count("*")).select_from(users)), 1)
eq_(conn.scalar(select(func.count("*")).select_from(addresses)), 1)
@testing.requires.independent_connections
def test_invalidate(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u = User(name="u1")
sess.add(u)
sess.flush()
c1 = sess.connection(bind_arguments={"mapper": User})
dbapi_conn = c1.connection
assert dbapi_conn.is_valid
sess.invalidate()
# Connection object is closed
assert c1.closed
# "invalidated" is not part of "closed" state
assert not c1.invalidated
# but the DBAPI conn (really ConnectionFairy)
# is invalidated
assert not dbapi_conn.is_valid
eq_(sess.query(User).all(), [])
c2 = sess.connection(bind_arguments={"mapper": User})
assert not c2.invalidated
assert c2.connection.is_valid
@testing.requires.savepoints
def test_nested_transaction(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sess.begin()
u = User(name="u1")
sess.add(u)
sess.flush()
n1 = sess.begin_nested() # nested transaction
u2 = User(name="u2")
sess.add(u2)
sess.flush()
n1.rollback()
sess.commit()
assert len(sess.query(User).all()) == 1
sess.close()
@testing.requires.savepoints
def test_nested_autotrans(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u = User(name="u1")
sess.add(u)
sess.flush()
sess.begin_nested() # nested transaction
u2 = User(name="u2")
sess.add(u2)
sess.flush()
sess.rollback() # rolls back the whole trans
sess.commit()
assert len(sess.query(User).all()) == 0
sess.close()
@testing.requires.savepoints
def test_nested_transaction_connection_add(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sess.begin()
sess.begin_nested()
u1 = User(name="u1")
sess.add(u1)
sess.flush()
sess.rollback()
u2 = User(name="u2")
sess.add(u2)
sess.commit()
eq_(set(sess.query(User).all()), {u2})
sess.rollback()
sess.begin()
n1 = sess.begin_nested()
u3 = User(name="u3")
sess.add(u3)
n1.commit() # commit the nested transaction
sess.rollback()
eq_(set(sess.query(User).all()), {u2})
sess.close()
@testing.requires.savepoints
def test_mixed_transaction_close(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
sess.begin_nested()
sess.add(User(name="u1"))
sess.flush()
sess.close()
sess.add(User(name="u2"))
sess.commit()
sess.close()
eq_(len(sess.query(User).all()), 1)
def test_begin_fails_connection_is_closed(self):
eng = engines.testing_engine()
state = []
@event.listens_for(eng, "begin")
def do_begin(conn):
state.append((conn, conn.connection))
raise Exception("failure")
s1 = Session(eng)
assert_raises_message(
Exception, "failure", s1.execute, text("select 1")
)
conn, fairy = state[0]
assert not fairy.is_valid
assert conn.closed
assert not conn.invalidated
s1.close()
# close does not occur because references were not saved, however
# the underlying DBAPI connection was closed
assert not fairy.is_valid
assert conn.closed
assert not conn.invalidated
def test_begin_savepoint_fails_connection_is_not_closed(self):
eng = engines.testing_engine()
state = []
@event.listens_for(eng, "savepoint")
def do_begin(conn, name):
state.append((conn, conn.connection))
raise Exception("failure")
s1 = Session(eng)
s1.begin_nested()
assert_raises_message(
Exception, "failure", s1.execute, text("select 1")
)
conn, fairy = state[0]
assert fairy.is_valid
assert not conn.closed
assert not conn.invalidated
s1.close()
assert conn.closed
assert not fairy.is_valid
@testing.requires.independent_connections
def test_no_rollback_in_committed_state(self):
"""test #7388
Prior to the fix, using the session.begin() context manager
would produce the error "This session is in 'committed' state; no
further SQL can be emitted ", when it attempted to call .rollback()
if the connection.close() operation failed inside of session.commit().
While the real exception was chained inside, this still proved to
be misleading so we now skip the rollback() in this specific case
and allow the original error to be raised.
"""
sess = fixture_session()
def fail(*arg, **kw):
raise BaseException("some base exception")
with (
mock.patch.object(
testing.db.dialect, "do_rollback", side_effect=fail
) as fail_mock,
mock.patch.object(
testing.db.dialect,
"do_commit",
side_effect=testing.db.dialect.do_commit,
) as succeed_mock,
):
# sess.begin() -> commit(). why would do_rollback() be called?
# because of connection pool finalize_fairy *after* the commit.
# this will cause the conn.close() in session.commit() to fail,
# but after the DB commit succeeded.
with expect_raises_message(BaseException, "some base exception"):
with sess.begin():
conn = sess.connection()
fairy_conn = conn.connection
eq_(succeed_mock.mock_calls, [mock.call(fairy_conn)])
eq_(fail_mock.mock_calls, [mock.call(fairy_conn)])
def test_continue_flushing_on_commit(self):
"""test that post-flush actions get flushed also if
we're in commit()"""
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
to_flush = [User(name="ed"), User(name="jack"), User(name="wendy")]
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
if to_flush:
session.add(to_flush.pop(0))
x = [1]
@event.listens_for(sess, "after_commit") # noqa
def add_another_user(session): # noqa
x[0] += 1
sess.add(to_flush.pop())
sess.commit()
eq_(x, [2])
eq_(sess.scalar(select(func.count(users.c.id))), 3)
def test_continue_flushing_guard(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
@event.listens_for(sess, "after_flush_postexec")
def add_another_user(session, ctx):
session.add(User(name="x"))
sess.add(User(name="x"))
assert_raises_message(
orm_exc.FlushError,
"Over 100 subsequent flushes have occurred",
sess.commit,
)
def test_no_sql_during_commit(self):
sess = fixture_session()
@event.listens_for(sess, "after_commit")
def go(session):
session.execute(text("select 1"))
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'committed' state; no further "
"SQL can be emitted within this transaction.",
sess.commit,
)
def test_no_sql_during_prepare(self):
sess = fixture_session(twophase=True)
sess.prepare()
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'prepared' state; no further "
"SQL can be emitted within this transaction.",
sess.execute,
text("select 1"),
)
def test_no_sql_during_rollback(self):
sess = fixture_session()
sess.connection()
@event.listens_for(sess, "after_rollback")
def go(session):
session.execute(text("select 1"))
assert_raises_message(
sa_exc.InvalidRequestError,
"This session is in 'inactive' state, due to the SQL transaction "
"being rolled back; no further SQL can be emitted within this "
"transaction.",
sess.rollback,
)
@testing.requires.independent_connections
@testing.emits_warning(".*previous exception")
def test_failed_rollback_deactivates_transaction(self):
# test #4050
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
session = Session(bind=testing.db)
rollback_error = testing.db.dialect.dbapi.InterfaceError(
"Can't roll back to savepoint"
)
def prevent_savepoint_rollback(
cursor, statement, parameters, context=None
):
if (
context is not None
and context.compiled
and isinstance(
context.compiled.statement,
elements.RollbackToSavepointClause,
)
):
raise rollback_error
self.event_listen(
testing.db.dialect, "do_execute", prevent_savepoint_rollback
)
with session.begin():
session.add(User(id=1, name="x"))
session.begin_nested()
# raises IntegrityError on flush
session.add(User(id=1, name="x"))
assert_raises_message(
sa_exc.InterfaceError,
"Can't roll back to savepoint",
session.commit,
)
# rollback succeeds, because the Session is deactivated
eq_(session._transaction._state, _session.DEACTIVE)
eq_(session.is_active, False)
session.rollback()
is_(session._transaction, None)
session.connection()
# back to normal
eq_(session._transaction._state, _session.ACTIVE)
eq_(session.is_active, True)
trans = session._transaction
# leave the outermost trans
session.rollback()
# trans is now closed
eq_(trans._state, _session.CLOSED)
# outermost transaction is new
is_not(session._transaction, trans)
is_(session._transaction, None)
eq_(session.is_active, True)
def test_no_prepare_wo_twophase(self):
sess = fixture_session()
assert_raises_message(
sa_exc.InvalidRequestError,
"'twophase' mode not enabled, or not root "
"transaction; can't prepare.",
sess.prepare,
)
def test_closed_status_check(self):
sess = fixture_session()
trans = sess.begin()
trans.rollback()
assert_raises_message(
sa_exc.ResourceClosedError,
"This transaction is closed",
trans.rollback,
)
assert_raises_message(
sa_exc.ResourceClosedError,
"This transaction is closed",
trans.commit,
)
def _inactive_flushed_session_fixture(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(id=1, name="u1")
sess.add(u1)
sess.commit()
sess.add(User(id=1, name="u2"))
with expect_warnings("New instance"):
assert_raises(sa_exc.IntegrityError, sess.flush)
return sess, u1
def test_execution_options_begin_transaction(self):
bind = mock.Mock(
connect=mock.Mock(
return_value=mock.Mock(
_is_future=False,
execution_options=mock.Mock(
return_value=mock.Mock(
_is_future=False,
in_transaction=mock.Mock(return_value=False),
)
),
)
)
)
sess = Session(bind=bind)
c1 = sess.connection(execution_options={"isolation_level": "FOO"})
eq_(bind.mock_calls, [mock.call.connect()])
eq_(
bind.connect().mock_calls,
[mock.call.execution_options(isolation_level="FOO")],
)
eq_(c1, bind.connect().execution_options())
def test_execution_options_ignored_mid_transaction(self):
bind = mock.Mock()
conn = mock.Mock(
engine=bind, in_transaction=mock.Mock(return_value=False)
)
bind.connect = mock.Mock(return_value=conn)
sess = Session(bind=bind)
sess.execute(text("select 1"))
with expect_warnings(
"Connection is already established for the "
"given bind; execution_options ignored"
):
sess.connection(execution_options={"isolation_level": "FOO"})
def test_warning_on_using_inactive_session_new(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
u2 = User(name="u2")
sess.add(u2)
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u2 not in sess
assert u1 in sess
def test_warning_on_using_inactive_session_dirty(self):
sess, u1 = self._inactive_flushed_session_fixture()
u1.name = "newname"
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u1 in sess
assert u1 not in sess.dirty
def test_warning_on_using_inactive_session_delete(self):
sess, u1 = self._inactive_flushed_session_fixture()
sess.delete(u1)
def go():
sess.rollback()
assert_warnings(
go,
[
"Session's state has been changed on a "
"non-active transaction - this state "
"will be discarded."
],
)
assert u1 in sess
assert u1 not in sess.deleted
def test_warning_on_using_inactive_session_rollback_evt(self):
users, User = self.tables.users, self.classes.User
self.mapper_registry.map_imperatively(User, users)
sess = fixture_session()
u1 = User(id=1, name="u1")
sess.add(u1)
sess.commit()
u3 = User(name="u3")
@event.listens_for(sess, "after_rollback")
def evt(s):
sess.add(u3)
sess.add(User(id=1, name="u2"))
def go():
assert_raises(orm_exc.FlushError, sess.flush)
assert u3 not in sess
def test_preserve_flush_error(self):
User = self.classes.User
sess, u1 = self._inactive_flushed_session_fixture()
for i in range(5):
assert_raises_message(
sa_exc.PendingRollbackError,
"^This Session's transaction has been "
r"rolled back due to a previous exception "
"during flush. To "
"begin a new transaction with this "
"Session, first issue "
r"Session.rollback\(\). Original exception "
"was:",
sess.commit,
)
sess.rollback()
sess.add(User(id=5, name="some name"))
sess.commit()
def test_no_autobegin_after_explicit_commit(self):
User, users = self.classes.User, self.tables.users
self.mapper_registry.map_imperatively(User, users)
session = fixture_session()
session.add(User(name="ed"))
session.get_transaction().commit()
is_(session.get_transaction(), None)
session.connection()
is_not(session.get_transaction(), None)
| SessionTransactionTest |
python | pennersr__django-allauth | allauth/socialaccount/providers/evernote/provider.py | {
"start": 364,
"end": 706
} | class ____(OAuthProvider):
id = "evernote"
name = "Evernote"
account_class = EvernoteAccount
oauth_adapter_class = EvernoteOAuthAdapter
def extract_uid(self, data):
return str(data["edam_userId"])
def extract_common_fields(self, data):
return data
provider_classes = [EvernoteProvider]
| EvernoteProvider |
python | walkccc__LeetCode | solutions/1051. Height Checker/1051.py | {
"start": 0,
"end": 372
} | class ____:
def heightChecker(self, heights: list[int]) -> int:
ans = 0
currentHeight = 1
count = [0] * 101
for height in heights:
count[height] += 1
for height in heights:
while count[currentHeight] == 0:
currentHeight += 1
if height != currentHeight:
ans += 1
count[currentHeight] -= 1
return ans
| Solution |
python | dagster-io__dagster | examples/docs_snippets/docs_snippets/guides/components/shell-script-component/custom-schema-resolution.py | {
"start": 295,
"end": 716
} | class ____(dg.Component, dg.Resolvable):
# Resolver specifies a function used to map input from the model
# to a value for this field
api_client: Annotated[
MyApiClient,
dg.Resolver(
resolve_api_key,
model_field_name="api_key",
model_field_type=str,
),
]
def build_defs(self, context: dg.ComponentLoadContext) -> dg.Definitions: ...
| MyComponent |
python | openai__openai-python | src/openai/lib/_old_api.py | {
"start": 668,
"end": 814
} | class ____(OpenAIError):
def __init__(self, *, symbol: str) -> None:
super().__init__(INSTRUCTIONS.format(symbol=symbol))
| APIRemovedInV1 |
python | dagster-io__dagster | python_modules/libraries/dagster-dlt/dagster_dlt/components/dlt_load_collection/component.py | {
"start": 6475,
"end": 7456
} | class ____(
create_component_translator_cls(DltLoadCollectionComponent, DagsterDltTranslator),
ComponentTranslator[DltLoadCollectionComponent],
):
def __init__(self, component: "DltLoadCollectionComponent", load_spec: "DltLoadSpecModel"):
self._component = component
self._load_spec = load_spec
def get_asset_spec(self, data: DltResourceTranslatorData) -> AssetSpec:
table_name = data.resource.table_name
if isinstance(table_name, Callable):
table_name = data.resource.name
prefix = (
[data.pipeline.dataset_name] if data.pipeline and data.pipeline.dataset_name else []
)
base_asset_spec = (
super().get_asset_spec(data).replace_attributes(key=AssetKey(prefix + [table_name]))
)
if self._load_spec.translation is None:
return base_asset_spec
else:
return self._load_spec.translation(base_asset_spec, data)
| DltComponentTranslator |
python | getsentry__sentry | src/sentry/preprod/size_analysis/models.py | {
"start": 496,
"end": 799
} | class ____(BaseModel):
"""Complete treemap analysis results."""
root: TreemapElement
file_count: int
category_breakdown: dict[str, dict[str, int]]
platform: str
# Keep in sync with https://github.com/getsentry/launchpad/blob/main/src/launchpad/size/models/common.py#L92
| TreemapResults |
python | spack__spack | lib/spack/spack/test/conftest.py | {
"start": 77326,
"end": 81312
} | class ____(io.IOBase):
"""This is a mock HTTP response, which implements part of http.client.HTTPResponse"""
def __init__(self, status, reason, headers=None, body=None):
self.msg = None
self.version = 11
self.url = None
self.headers = email.message.EmailMessage()
self.status = status
self.code = status
self.reason = reason
self.debuglevel = 0
self._body = body
if headers is not None:
for key, value in headers.items():
self.headers[key] = value
@classmethod
def with_json(cls, status, reason, headers=None, body=None):
"""Create a mock HTTP response with JSON string as body"""
body = io.BytesIO(json.dumps(body).encode("utf-8"))
return cls(status, reason, headers, body)
def read(self, *args, **kwargs):
return self._body.read(*args, **kwargs)
def getheader(self, name, default=None):
self.headers.get(name, default)
def getheaders(self):
return self.headers.items()
def fileno(self):
return 0
def getcode(self):
return self.status
def info(self):
return self.headers
@pytest.fixture()
def mock_runtimes(config, mock_packages):
return mock_packages.packages_with_tags("runtime")
@pytest.fixture()
def write_config_file(tmp_path: Path):
"""Returns a function that writes a config file."""
def _write(config, data, scope):
config_dir = tmp_path / scope
config_dir.mkdir(parents=True, exist_ok=True)
config_yaml = config_dir / (config + ".yaml")
with config_yaml.open("w") as f:
syaml.dump_config(data, f)
return config_yaml
return _write
def _include_cache_root():
return join_path(str(tempfile.mkdtemp()), "user_cache", "includes")
@pytest.fixture()
def mock_include_cache(monkeypatch):
"""Override the include cache directory so tests don't pollute user cache."""
monkeypatch.setattr(spack.config, "_include_cache_location", _include_cache_root)
@pytest.fixture()
def wrapper_dir(install_mockery):
"""Installs the compiler wrapper and returns the prefix where the script is installed."""
wrapper = spack.concretize.concretize_one("compiler-wrapper")
wrapper_pkg = wrapper.package
PackageInstaller([wrapper_pkg], explicit=True).install()
return wrapper_pkg.bin_dir()
def _noop(*args, **kwargs):
pass
@pytest.fixture(autouse=True)
def no_compilers_init(monkeypatch):
"""Disables automatic compiler initialization"""
monkeypatch.setattr(spack.compilers.config, "_init_packages_yaml", _noop)
@pytest.fixture(autouse=True)
def skip_provenance_check(monkeypatch, request):
"""Skip binary provenance check for git versions
Binary provenance checks require querying git repositories and mirrors.
The infrastructure for this is complex and a heavy lift for simple things like spec syntax
checks. This fixture defaults to skipping this check, but can be overridden with the
@pytest.mark.require_provenance decorator
"""
if "require_provenance" not in request.keywords:
monkeypatch.setattr(spack.package_base.PackageBase, "_resolve_git_provenance", _noop)
@pytest.fixture(scope="function")
def config_two_gccs(mutable_config):
# Configure two gcc compilers that could be concretized to
extra_attributes_block = {
"compilers": {"c": "/path/to/gcc", "cxx": "/path/to/g++", "fortran": "/path/to/fortran"}
}
mutable_config.set(
"packages:gcc:externals::",
[
{
"spec": "gcc@12.3.1 languages=c,c++,fortran",
"prefix": "/path",
"extra_attributes": extra_attributes_block,
},
{
"spec": "gcc@10.3.1 languages=c,c++,fortran",
"prefix": "/path",
"extra_attributes": extra_attributes_block,
},
],
)
| MockHTTPResponse |
python | pytorch__pytorch | test/test_dataloader.py | {
"start": 113072,
"end": 115968
} | class ____(TestCase):
def setUp(self):
super().setUp()
self.dataset = DictDataset()
def test_sequential_batch(self):
for persistent_workers in (False, True):
if persistent_workers:
loader = DataLoader(
self.dataset,
batch_size=2,
shuffle=False,
persistent_workers=persistent_workers,
num_workers=1,
)
else:
loader = DataLoader(
self.dataset,
batch_size=2,
shuffle=False,
persistent_workers=persistent_workers,
)
batch_size = loader.batch_size
for i, sample in enumerate(loader):
idx = i * batch_size
self.assertEqual(set(sample.keys()), {"a_tensor", "another_dict"})
self.assertEqual(set(sample["another_dict"].keys()), {"a_number"})
t = sample["a_tensor"]
self.assertEqual(t.size(), torch.Size([batch_size, 4, 2]))
self.assertTrue((t[0] == idx).all())
self.assertTrue((t[1] == idx + 1).all())
n = sample["another_dict"]["a_number"]
self.assertEqual(n.size(), torch.Size([batch_size]))
self.assertEqual(n[0], idx)
self.assertEqual(n[1], idx + 1)
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertTrue(sample["a_tensor"].is_pinned())
self.assertTrue(sample["another_dict"]["a_number"].is_pinned())
@skipIfXpu
@unittest.skipIf(TEST_CUDA, "Test for when CUDA is not available")
def test_pin_memory_no_cuda(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory=True)
for sample in loader:
self.assertFalse(sample["a_tensor"].is_pinned())
self.assertFalse(sample["another_dict"]["a_number"].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_device(self):
loader = DataLoader(
self.dataset, batch_size=2, pin_memory=True, pin_memory_device="cuda"
)
for sample in loader:
self.assertTrue(sample["a_tensor"].is_pinned())
self.assertTrue(sample["another_dict"]["a_number"].is_pinned())
@unittest.skipIf(not TEST_CUDA, "CUDA unavailable")
def test_pin_memory_with_only_device(self):
loader = DataLoader(self.dataset, batch_size=2, pin_memory_device="cuda")
for sample in loader:
self.assertFalse(sample["a_tensor"].is_pinned())
self.assertFalse(sample["another_dict"]["a_number"].is_pinned())
| TestDictDataLoader |
python | milvus-io__pymilvus | pymilvus/client/types.py | {
"start": 11998,
"end": 13034
} | class ____:
def __init__(
self,
replica_id: int,
shards: List[str],
nodes: List[tuple],
resource_group: str,
num_outbound_node: dict,
) -> None:
self._id = replica_id
self._shards = shards
self._nodes = tuple(nodes)
self._resource_group = resource_group
self._num_outbound_node = num_outbound_node
def __repr__(self) -> str:
return (
f"ReplicaInfo: <id:{self.id}>, <nodes:{self.group_nodes}>, "
f"<shards:{self.shards}>, <resource_group: {self.resource_group}>, "
f"<num_outbound_node: {self.num_outbound_node}>"
)
@property
def id(self):
return self._id
@property
def group_nodes(self):
return self._nodes
@property
def shards(self):
return self._shards
@property
def resource_group(self):
return self._resource_group
@property
def num_outbound_node(self):
return self._num_outbound_node
| ReplicaInfo |
python | bokeh__bokeh | examples/plotting/data_models.py | {
"start": 471,
"end": 1545
} | class ____(DataModel):
amp = Float(default=0.1, help="Amplitude")
freq = Float(default=0.1, help="Frequency")
phase = Float(default=0, help="Phase")
offset = Float(default=-5, help="Offset")
params = Params(amp=2, freq=3, phase=0.4, offset=1)
A = params.amp
k = params.freq
phi = params.phase
B = params.offset
x = np.linspace(0, 10, 100)
y = A*np.sin(k*x + phi) + B
source = ColumnDataSource(data=dict(x=x, y=y))
plot = figure(tags=[params], y_range=(-10, 10), title="Data models example")
plot.line("x", "y", source=source, line_width=3, line_alpha=0.6)
callback = CustomJS(args=dict(source=source, params=params), code="""
const data = source.data
const A = params.amp
const k = params.freq
const phi = params.phase
const B = params.offset
const x = source.data.x
const y = Array.from(x, (x) => B + A*Math.sin(k*x+phi))
source.data = { x, y }
""")
params.js_on_change("amp", callback)
params.js_on_change("freq", callback)
params.js_on_change("phase", callback)
params.js_on_change("offset", callback)
show(plot)
| Params |
python | great-expectations__great_expectations | great_expectations/exceptions/exceptions.py | {
"start": 12033,
"end": 12274
} | class ____(DataContextError):
def __init__(self, datasource_name: str, message: str) -> None:
self.message = f"Cannot initialize datasource {datasource_name}, error: {message}"
super().__init__(self.message)
| DatasourceError |
python | prompt-toolkit__python-prompt-toolkit | src/prompt_toolkit/input/posix_pipe.py | {
"start": 317,
"end": 1072
} | class ____:
"Wrapper around os.pipe, that ensures we don't double close any end."
def __init__(self) -> None:
self.read_fd, self.write_fd = os.pipe()
self._read_closed = False
self._write_closed = False
def close_read(self) -> None:
"Close read-end if not yet closed."
if self._read_closed:
return
os.close(self.read_fd)
self._read_closed = True
def close_write(self) -> None:
"Close write-end if not yet closed."
if self._write_closed:
return
os.close(self.write_fd)
self._write_closed = True
def close(self) -> None:
"Close both read and write ends."
self.close_read()
self.close_write()
| _Pipe |
python | HypothesisWorks__hypothesis | hypothesis-python/tests/cover/test_type_lookup.py | {
"start": 7477,
"end": 7624
} | class ____(enum.Enum):
pass
@fails_with(InvalidArgument)
@given(st.from_type(EmptyEnum))
def test_error_if_enum_is_empty(x):
pass
| EmptyEnum |
python | pytorch__pytorch | torchgen/_autoheuristic/ah_tree.py | {
"start": 118,
"end": 768
} | class ____:
def __init__(
self,
feature: str | None = None,
threshold: float | None = None,
left: Optional["DecisionTreeNode"] = None,
right: Optional["DecisionTreeNode"] = None,
class_probs: Any = None,
num_samples: int = 0,
node_id: int = 0,
) -> None:
self.feature = feature
self.threshold = threshold
self.left = left
self.right = right
self.class_probs = class_probs
self.num_samples = num_samples
self.id = node_id
def is_leaf(self) -> bool:
return self.left is None or self.right is None
| DecisionTreeNode |
python | Lightning-AI__lightning | tests/tests_pytorch/utilities/test_model_summary.py | {
"start": 1519,
"end": 2062
} | class ____(BoringModel):
"""A model with precalculated total params size in MB for FP16 and FP32."""
def __init__(self, precision: int = 32):
super().__init__()
# 32K params
self.layer = nn.Linear(32, 1000, bias=False)
# 218K params
self.layer1 = nn.Linear(1000, 218, bias=False)
# calculate model size based on precision.
self.pre_calculated_model_size = 1.0 / (32 / precision)
def forward(self, x):
x = self.layer(x)
return self.layer1(x)
| PreCalculatedModel |
python | PrefectHQ__prefect | src/prefect/client/schemas/filters.py | {
"start": 21612,
"end": 21803
} | class ____(PrefectBaseModel):
"""Filter by `BlockType.slug`"""
any_: Optional[List[str]] = Field(
default=None, description="A list of slugs to match"
)
| BlockTypeFilterSlug |
python | pypa__warehouse | warehouse/macaroons/services.py | {
"start": 1561,
"end": 7107
} | class ____:
def __init__(self, db_session):
self.db = db_session
def find_macaroon(self, macaroon_id) -> Macaroon | None:
"""
Returns a macaroon model from the DB by its identifier.
Returns None if no macaroon has the given ID.
"""
try:
uuid.UUID(macaroon_id)
except ValueError:
return None
return (
self.db.query(Macaroon)
.options(
joinedload(Macaroon.user),
joinedload(Macaroon.oidc_publisher),
)
.filter_by(id=macaroon_id)
.one_or_none()
)
def find_userid(self, raw_macaroon: str) -> uuid.UUID | None:
"""
Returns the id of the user associated with the given raw (serialized)
macaroon.
"""
try:
m = deserialize_raw_macaroon(raw_macaroon)
except InvalidMacaroonError:
return None
try:
identifier = m.identifier.decode()
except UnicodeDecodeError:
return None
dm = self.find_macaroon(identifier)
if dm is None:
return None
# This can be None if the macaroon has no associated user
# (e.g., an OIDC-minted macaroon).
if dm.user is None:
return None
return dm.user.id
def find_from_raw(self, raw_macaroon: str) -> Macaroon:
"""
Returns a DB macaroon matching the input, or raises InvalidMacaroonError
"""
m = deserialize_raw_macaroon(raw_macaroon)
try:
identifier = m.identifier.decode()
except UnicodeDecodeError:
raise InvalidMacaroonError("Macaroon not found")
dm = self.find_macaroon(identifier)
if not dm:
raise InvalidMacaroonError("Macaroon not found")
return dm
def verify(self, raw_macaroon: str, request, context, permission) -> bool:
"""
Returns True if the given raw (serialized) macaroon is
valid for the request, context, and requested permission.
Raises InvalidMacaroonError if the macaroon is not valid.
"""
m = deserialize_raw_macaroon(raw_macaroon)
dm = self.find_macaroon(m.identifier.decode())
if dm is None:
raise InvalidMacaroonError("deleted or nonexistent macaroon")
verified = caveats.verify(m, dm.key, request, context, permission)
if verified:
dm.last_used = datetime.datetime.now()
return True
raise InvalidMacaroonError(verified.msg)
def create_macaroon(
self,
location: str,
description: str,
scopes: list[caveats.Caveat],
*,
user_id: uuid.UUID | None = None,
oidc_publisher_id: str | None = None,
additional: dict[str, typing.Any] | None = None,
) -> tuple[str, Macaroon]:
"""
Returns a tuple of a new raw (serialized) macaroon and its DB model.
The description provided is not embedded into the macaroon, only stored
in the DB model.
An associated identity (either a user or macaroon, by ID) must be specified.
"""
if not all(isinstance(c, caveats.Caveat) for c in scopes):
raise TypeError("scopes must be a list of Caveat instances")
# NOTE: This is a bit of a hack: we keep a separate copy of the
# permissions caveat in the DB, so that we can display scope information
# in the UI.
permissions: dict[str, list[str]] | str = {}
for caveat in scopes:
if isinstance(caveat, caveats.ProjectName):
permissions = typing.cast(dict[str, list[str]], permissions)
projects = permissions.setdefault("projects", [])
projects.extend(caveat.normalized_names)
elif isinstance(caveat, caveats.RequestUser):
permissions = "user"
break
dm = Macaroon(
user_id=user_id,
oidc_publisher_id=oidc_publisher_id,
description=description,
permissions_caveat={"permissions": permissions},
additional=additional,
caveats=scopes,
)
self.db.add(dm)
self.db.flush() # flush db now so dm.id is available
m = pymacaroons.Macaroon(
location=location,
identifier=str(dm.id),
key=dm.key,
version=pymacaroons.MACAROON_V2,
)
for caveat in scopes:
m.add_first_party_caveat(caveats.serialize(caveat))
serialized_macaroon = f"pypi-{m.serialize()}"
return serialized_macaroon, dm
def delete_macaroon(self, macaroon_id) -> None:
"""
Deletes a macaroon from the DB by its identifier.
"""
dm = self.find_macaroon(macaroon_id)
self.db.delete(dm) if dm else None
def get_macaroon_by_description(self, user_id, description):
"""
Returns a macaroon model from the DB with the given description,
if one exists for the given user.
Returns None if the user doesn't have a macaroon with this description.
"""
dm = (
self.db.query(Macaroon)
.filter(Macaroon.description == description)
.filter(Macaroon.user_id == user_id)
.one_or_none()
)
return dm
def database_macaroon_factory(context, request):
return DatabaseMacaroonService(request.db)
| DatabaseMacaroonService |
python | wepe__MachineLearning | KMeans/kmeans.py | {
"start": 3515,
"end": 6283
} | class ____(object):
def __init__(self,n_clusters=5):
self.n_clusters = n_clusters
self.centroids = None
self.clusterAssment = None
self.labels = None
self.sse = None
#计算两点的欧式距离
def _distEclud(self, vecA, vecB):
return np.linalg.norm(vecA - vecB)
def fit(self,X):
m = X.shape[0]
self.clusterAssment = np.zeros((m,2))
centroid0 = np.mean(X, axis=0).tolist()
centList =[centroid0]
for j in range(m):#计算每个样本点与质心之间初始的平方误差
self.clusterAssment[j,1] = self._distEclud(np.asarray(centroid0), X[j,:])**2
while (len(centList) < self.n_clusters):
lowestSSE = np.inf
for i in range(len(centList)):#尝试划分每一族,选取使得误差最小的那个族进行划分
ptsInCurrCluster = X[np.nonzero(self.clusterAssment[:,0]==i)[0],:]
clf = KMeans(n_clusters=2)
clf.fit(ptsInCurrCluster)
centroidMat, splitClustAss = clf.centroids, clf.clusterAssment#划分该族后,所得到的质心、分配结果及误差矩阵
sseSplit = sum(splitClustAss[:,1])
sseNotSplit = sum(self.clusterAssment[np.nonzero(self.clusterAssment[:,0]!=i)[0],1])
if (sseSplit + sseNotSplit) < lowestSSE:
bestCentToSplit = i
bestNewCents = centroidMat
bestClustAss = splitClustAss.copy()
lowestSSE = sseSplit + sseNotSplit
#该族被划分成两个子族后,其中一个子族的索引变为原族的索引,另一个子族的索引变为len(centList),然后存入centList
bestClustAss[np.nonzero(bestClustAss[:,0] == 1)[0],0] = len(centList)
bestClustAss[np.nonzero(bestClustAss[:,0] == 0)[0],0] = bestCentToSplit
centList[bestCentToSplit] = bestNewCents[0,:].tolist()
centList.append(bestNewCents[1,:].tolist())
self.clusterAssment[np.nonzero(self.clusterAssment[:,0] == bestCentToSplit)[0],:]= bestClustAss
self.labels = self.clusterAssment[:,0]
self.sse = sum(self.clusterAssment[:,1])
self.centroids = np.asarray(centList)
def predict(self,X):#根据聚类结果,预测新输入数据所属的族
#类型检查
if not isinstance(X,np.ndarray):
try:
X = np.asarray(X)
except:
raise TypeError("numpy.ndarray required for X")
m = X.shape[0]#m代表样本数量
preds = np.empty((m,))
for i in range(m):#将每个样本点分配到离它最近的质心所属的族
minDist = np.inf
for j in range(self.n_clusters):
distJI = self._distEclud(self.centroids[j,:],X[i,:])
if distJI < minDist:
minDist = distJI
preds[i] = j
return preds
| biKMeans |
python | doocs__leetcode | solution/0800-0899/0854.K-Similar Strings/Solution.py | {
"start": 0,
"end": 763
} | class ____:
def kSimilarity(self, s1: str, s2: str) -> int:
def next(s):
i = 0
while s[i] == s2[i]:
i += 1
res = []
for j in range(i + 1, n):
if s[j] == s2[i] and s[j] != s2[j]:
res.append(s2[: i + 1] + s[i + 1 : j] + s[i] + s[j + 1 :])
return res
q = deque([s1])
vis = {s1}
ans, n = 0, len(s1)
while 1:
for _ in range(len(q)):
s = q.popleft()
if s == s2:
return ans
for nxt in next(s):
if nxt not in vis:
vis.add(nxt)
q.append(nxt)
ans += 1
| Solution |
python | PyCQA__isort | isort/exceptions.py | {
"start": 2532,
"end": 2869
} | class ____(ISortError):
"""Raised when a profile is set by the user that doesn't exist"""
def __init__(self, profile: str):
super().__init__(
f"Specified profile of {profile} does not exist. "
f"Available profiles: {','.join(profiles)}."
)
self.profile = profile
| ProfileDoesNotExist |
python | sqlalchemy__sqlalchemy | test/dialect/oracle/test_dialect.py | {
"start": 21151,
"end": 27449
} | class ____(fixtures.TestBase, AssertsCompiledSQL):
def _dialect(self, server_version, **kw):
def server_version_info(conn):
return server_version
dialect = oracle.dialect(
dbapi=Mock(
version="0.0.0",
paramstyle="named",
),
**kw,
)
dialect._get_server_version_info = server_version_info
dialect.get_isolation_level = Mock()
dialect._check_unicode_description = Mock()
dialect._get_default_schema_name = Mock()
dialect._detect_decimal_char = Mock()
dialect.__check_max_identifier_length = Mock()
dialect._get_compat_server_version_info = Mock()
return dialect
def test_ora8_flags(self):
dialect = self._dialect((8, 2, 5))
# before connect, assume modern DB
assert dialect._supports_char_length
assert dialect.use_ansi
assert not dialect._use_nchar_for_unicode
dialect.initialize(Mock())
# oracle 8 / 8i support returning
assert dialect.insert_returning
assert not dialect._supports_char_length
assert not dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(Unicode(50), "VARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "CLOB", dialect=dialect)
def test_default_flags(self):
"""test with no initialization or server version info"""
dialect = self._dialect(None)
assert dialect._supports_char_length
assert not dialect._use_nchar_for_unicode
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(UnicodeText(), "CLOB", dialect=dialect)
def test_ora10_flags(self):
dialect = self._dialect((10, 2, 5))
dialect.initialize(Mock())
assert dialect._supports_char_length
assert not dialect._use_nchar_for_unicode
assert dialect.use_ansi
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(UnicodeText(), "CLOB", dialect=dialect)
def test_use_nchar(self):
dialect = self._dialect((10, 2, 5), use_nchar_for_unicode=True)
dialect.initialize(Mock())
assert dialect._use_nchar_for_unicode
self.assert_compile(String(50), "VARCHAR2(50 CHAR)", dialect=dialect)
self.assert_compile(Unicode(50), "NVARCHAR2(50)", dialect=dialect)
self.assert_compile(UnicodeText(), "NCLOB", dialect=dialect)
def test_ident_length_in_13_is_30(self):
from sqlalchemy import __version__
m = re.match(r"(\d+)\.(\d+)(?:\.(\d+))?", __version__)
version = tuple(int(x) for x in m.group(1, 2, 3) if x is not None)
if version >= (1, 4):
length = 128
else:
length = 30
eq_(oracle.OracleDialect.max_identifier_length, length)
dialect = self._dialect((12, 2, 0))
conn = mock.Mock(
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=lambda: "12.2.0")
)
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (12, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (12, 2, 0)
)
eq_(dialect.max_identifier_length, length)
def test_max_ident_122(self):
dialect = self._dialect((12, 2, 0))
conn = mock.Mock(
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=lambda: "12.2.0")
)
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (12, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (12, 2, 0)
)
eq_(
dialect.max_identifier_length,
oracle.OracleDialect.max_identifier_length,
)
def test_max_ident_112(self):
dialect = self._dialect((11, 2, 0))
conn = mock.Mock(
exec_driver_sql=mock.Mock(return_value=mock.Mock(scalar="11.0.0"))
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (11, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (11, 2, 0)
)
eq_(dialect.max_identifier_length, 30)
def test_max_ident_122_11compat(self):
dialect = self._dialect((12, 2, 0))
conn = mock.Mock(
exec_driver_sql=mock.Mock(
return_value=mock.Mock(scalar=lambda: "11.0.0")
)
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (12, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (11, 0, 0)
)
eq_(dialect.max_identifier_length, 30)
def test_max_ident_122_11compat_vparam_raises(self):
dialect = self._dialect((12, 2, 0))
def c122():
raise exc.DBAPIError(
"statement", None, "no such table", None, None
)
conn = mock.Mock(
exec_driver_sql=mock.Mock(return_value=mock.Mock(scalar=c122))
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (12, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (12, 2, 0)
)
eq_(
dialect.max_identifier_length,
oracle.OracleDialect.max_identifier_length,
)
def test_max_ident_122_11compat_vparam_cant_parse(self):
dialect = self._dialect((12, 2, 0))
def c122():
return "12.thisiscrap.0"
conn = mock.Mock(
exec_driver_sql=mock.Mock(return_value=mock.Mock(scalar=c122))
)
dialect.initialize(conn)
eq_(dialect.server_version_info, (12, 2, 0))
eq_(
dialect._get_effective_compat_server_version_info(conn), (12, 2, 0)
)
eq_(
dialect.max_identifier_length,
oracle.OracleDialect.max_identifier_length,
)
| CompatFlagsTest |
python | getsentry__sentry | src/sentry/interfaces/contexts.py | {
"start": 5814,
"end": 5889
} | class ____(ContextType):
type = "default"
@contexttype
| DefaultContextType |
python | sympy__sympy | sympy/polys/polyoptions.py | {
"start": 474,
"end": 900
} | class ____:
"""Base class for all kinds of options. """
option: str | None = None
is_Flag = False
requires: list[str] = []
excludes: list[str] = []
after: list[str] = []
before: list[str] = []
@classmethod
def default(cls):
return None
@classmethod
def preprocess(cls, option):
return None
@classmethod
def postprocess(cls, options):
pass
| Option |
python | doocs__leetcode | solution/0700-0799/0767.Reorganize String/Solution.py | {
"start": 0,
"end": 443
} | class ____:
def reorganizeString(self, s: str) -> str:
n = len(s)
cnt = Counter(s)
mx = max(cnt.values())
if mx > (n + 1) // 2:
return ''
i = 0
ans = [None] * n
for k, v in cnt.most_common():
while v:
ans[i] = k
v -= 1
i += 2
if i >= n:
i = 1
return ''.join(ans)
| Solution |
python | pytest-dev__pytest | doc/en/example/assertion/test_setup_flow_example.py | {
"start": 110,
"end": 1289
} | class ____:
def setup_class(cls):
cls.classcount += 1
def teardown_class(cls):
cls.classcount -= 1
def setup_method(self, method):
self.id = eval(method.__name__[5:])
def test_42(self):
assert self.classcount == 1
assert self.id == 42
def test_23(self):
assert self.classcount == 1
assert self.id == 23
def teardown_module(module):
assert module.TestStateFullThing.classcount == 0
""" For this example the control flow happens as follows::
import test_setup_flow_example
setup_module(test_setup_flow_example)
setup_class(TestStateFullThing)
instance = TestStateFullThing()
setup_method(instance, instance.test_42)
instance.test_42()
setup_method(instance, instance.test_23)
instance.test_23()
teardown_class(TestStateFullThing)
teardown_module(test_setup_flow_example)
Note that ``setup_class(TestStateFullThing)`` is called and not
``TestStateFullThing.setup_class()`` which would require you
to insert ``setup_class = classmethod(setup_class)`` to make
your setup function callable.
"""
| TestStateFullThing |
python | dask__dask | dask/dataframe/dask_expr/_collection.py | {
"start": 168410,
"end": 173404
} | class ____(Series):
"""Index-like Expr Collection.
The constructor takes the expression that represents the query as input. The class
is not meant to be instantiated directly. Instead, use one of the IO connectors from
Dask.
"""
_accessors: ClassVar[set[str]] = set()
_partition_type = pd.Index
_dt_attributes = {
"nanosecond",
"microsecond",
"millisecond",
"dayofyear",
"minute",
"hour",
"day",
"dayofweek",
"second",
"week",
"weekday",
"weekofyear",
"month",
"quarter",
"year",
}
_cat_attributes = {
"known",
"as_known",
"as_unknown",
"add_categories",
"categories",
"remove_categories",
"reorder_categories",
"as_ordered",
"codes",
"remove_unused_categories",
"set_categories",
"as_unordered",
"ordered",
"rename_categories",
}
def __getattr__(self, key):
if (
isinstance(self._meta.dtype, pd.CategoricalDtype)
and key in self._cat_attributes
):
return getattr(self.cat, key)
elif key in self._dt_attributes:
return getattr(self.dt, key)
if hasattr(super(), key): # Doesn't trigger super().__getattr__
# Not a magic attribute. This is a real method or property of Series that
# has been overridden by RaiseAttributeError().
raise AttributeError(
f"{self.__class__.__name__!r} object has no attribute {key!r}"
)
return super().__getattr__(key)
def __repr__(self):
return f"<dask_expr.expr.Index: expr={self.expr}>"
def __array_wrap__(self, array, context=None):
return pd.Index(array, name=self.name)
@derived_from(pd.Index)
def to_series(self, index=None, name=no_default):
if index is not None:
raise NotImplementedError
return new_collection(expr.ToSeriesIndex(self, index=index, name=name))
@derived_from(pd.Index, ua_args=["index"])
def to_frame(self, index=True, name=no_default):
if not index:
raise NotImplementedError
return new_collection(expr.ToFrameIndex(self, index=index, name=name))
@derived_from(pd.Index)
def memory_usage(self, deep=False):
return new_collection(MemoryUsageIndex(self, deep=deep))
def shift(self, periods=1, freq=None):
return new_collection(expr.ShiftIndex(self, periods, freq))
@derived_from(pd.Index)
def map(self, arg, na_action=None, meta=None, is_monotonic=False):
"""
Note that this method clears any known divisions.
If your mapping function is monotonically increasing then use `is_monotonic`
to apply the mapping function to the old divisions and assign the new
divisions to the output.
"""
if isinstance(arg, Series):
if not expr.are_co_aligned(self.expr, arg.expr):
if meta is None:
warnings.warn(meta_warning(meta, method="map"))
return new_collection(
expr.MapIndexAlign(self, arg, na_action, meta, is_monotonic)
)
if meta is None:
meta = expr.emulate(M.map, self, arg, na_action=na_action, udf=True)
warnings.warn(meta_warning(meta, method="map"))
return new_collection(
expr.Map(
self, arg=arg, na_action=na_action, meta=meta, is_monotonic=is_monotonic
)
)
def __dir__(self):
o = set(dir(type(self)))
o.update(self.__dict__)
o.update(set(dir(expr.Expr)))
o.update(self._dt_attributes)
if isinstance(self.dtype, pd.CategoricalDtype):
o.update(self._cat_attributes)
return list(o)
# Methods and properties of Series that are not implemented on Index
def count(self, split_every=False):
return new_collection(IndexCount(self, split_every))
@property # type: ignore[misc]
def index(self):
raise AttributeError("'Index' object has no attribute 'index'")
def sum(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'sum'")
def prod(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'prod'")
def mean(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'mean'")
def std(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'std'")
def var(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'var'")
def idxmax(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'idxmax'")
def idxmin(self, *args, **kwargs):
raise AttributeError("'Index' object has no attribute 'idxmin'")
| Index |
python | ray-project__ray | rllib/env/utils/infinite_lookback_buffer.py | {
"start": 537,
"end": 28798
} | class ____:
def __init__(
self,
data: Optional[Union[List, np.ndarray]] = None,
lookback: int = 0,
space: Optional[gym.Space] = None,
):
self.data = data if data is not None else []
self.lookback = min(lookback, len(self.data))
self.finalized = not isinstance(self.data, list)
self.space = space
def __eq__(
self,
other: "InfiniteLookbackBuffer",
) -> bool:
"""Compares two `InfiniteLookbackBuffers.
Args:
other: Another object. If another `LookbackBuffer` instance all
their attributes are compared.
Returns:
`True`, if `other` is an `InfiniteLookbackBuffer` instance and all
attributes are identical. Otherwise, returns `False`.
"""
return (
isinstance(other, InfiniteLookbackBuffer)
# Todo (mark): Replace `data_equivalence` with ray / rllib implementation similar to `check` without asserts
and data_equivalence(self.data, other.data)
and self.lookback == other.lookback
and self.finalized == other.finalized
and self.space_struct == other.space_struct
and self.space == other.space
)
@property
def space(self):
return self._space
@space.setter
def space(self, value):
self._space = value
self._space_struct = get_base_struct_from_space(value)
@property
def space_struct(self):
return self._space_struct
def get_state(self) -> Dict[str, Any]:
"""Returns the pickable state of a buffer.
The data in the buffer is stored into a dictionary. Note that
buffers can also be generated from pickable states (see
`InfiniteLookbackBuffer.from_state`)
Returns:
A dict containing all the data and metadata from the buffer.
"""
return {
"data": self.data,
"lookback": self.lookback,
"finalized": self.finalized,
"space": gym_space_to_dict(self.space) if self.space else None,
}
@staticmethod
def from_state(state: Dict[str, Any]) -> "InfiniteLookbackBuffer":
"""Creates a new `InfiniteLookbackBuffer` from a state dict.
Args:
state: The state dict, as returned by `self.get_state`.
Returns:
A new `InfiniteLookbackBuffer` instance with the data and metadata
from the state dict.
"""
buffer = InfiniteLookbackBuffer()
buffer.lookback = state["lookback"]
buffer.finalized = state["finalized"]
buffer.space = gym_space_from_dict(state["space"]) if state["space"] else None
# space_struct is set when space is assigned
buffer.data = state["data"]
return buffer
def append(self, item) -> None:
"""Appends the given item to the end of this buffer."""
if self.finalized:
self.data = tree.map_structure(
lambda d, i: np.concatenate([d, [i]], axis=0), self.data, item
)
else:
self.data.append(item)
def extend(self, items) -> None:
"""Appends all items in `items` to the end of this buffer."""
if self.finalized:
# TODO (sven): When extending with a list of structs, we should
# probably rather do: `tree.map_structure(..., self.data,
# tree.map_structure(lambda *s: np.array(*s), *items)`)??
self.data = tree.map_structure(
lambda d, i: np.concatenate([d, i], axis=0),
self.data,
# Note, we could have dictionaries here.
np.array(items) if isinstance(items, list) else items,
)
else:
for item in items:
self.append(item)
def concat(self, other: "InfiniteLookbackBuffer") -> None:
"""Concatenates the data of `other` (w/o its lookback) to `self`.
Args:
other: The other InfiniteLookbackBuffer to be concatenated to self.
"""
self.data.extend(other.get())
def pop(self, index: int = -1) -> None:
"""Removes the item at `index` from this buffer, but does NOT return it.
Args:
index: The index to pop out of this buffer (w/o returning it from this
method).
"""
if self.finalized:
self.data = tree.map_structure(
lambda s: np.delete(s, index, axis=0), self.data
)
else:
self.data.pop(index)
def finalize(self) -> None:
"""Finalizes this buffer by converting internal data lists into numpy arrays.
Thereby, if the individual items in the list are nested structures, the
resulting buffer content will be a nested struct of np.ndarrays (leafs).
"""
if not self.finalized:
self.data = batch(self.data)
self.finalized = True
def get(
self,
indices: Optional[Union[int, slice, List[int]]] = None,
*,
neg_index_as_lookback: bool = False,
fill: Optional[Any] = None,
one_hot_discrete: bool = False,
_ignore_last_ts: bool = False,
_add_last_ts_value: Optional[Any] = None,
) -> Any:
"""Returns data, based on the given args, from this buffer.
Args:
indices: A single int is interpreted as an index, from which to return the
individual data stored at this index.
A list of ints is interpreted as a list of indices from which to gather
individual data in a batch of size len(indices).
A slice object is interpreted as a range of data to be returned.
Thereby, negative indices by default are interpreted as "before the end"
unless the `neg_index_as_lookback=True` option is used, in which case
negative indices are interpreted as "before ts=0", meaning going back
into the lookback buffer.
neg_index_as_lookback: If True, negative values in `indices` are
interpreted as "before ts=0", meaning going back into the lookback
buffer. For example, a buffer with data [4, 5, 6, 7, 8, 9],
where [4, 5, 6] is the lookback buffer range (ts=0 item is 7), will
respond to `get(-1, neg_index_as_lookback=True)` with `6` and to
`get(slice(-2, 1), neg_index_as_lookback=True)` with `[5, 6, 7]`.
fill: An optional float value to use for filling up the returned results at
the boundaries. This filling only happens if the requested index range's
start/stop boundaries exceed the buffer's boundaries (including the
lookback buffer on the left side). This comes in very handy, if users
don't want to worry about reaching such boundaries and want to zero-pad.
For example, a buffer with data [10, 11, 12, 13, 14] and lookback
buffer size of 2 (meaning `10` and `11` are part of the lookback buffer)
will respond to `get(slice(-7, -2), fill=0.0)`
with `[0.0, 0.0, 10, 11, 12]`.
one_hot_discrete: If True, will return one-hot vectors (instead of
int-values) for those sub-components of a (possibly complex) space
that are Discrete or MultiDiscrete. Note that if `fill=0` and the
requested `indices` are out of the range of our data, the returned
one-hot vectors will actually be zero-hot (all slots zero).
_ignore_last_ts: Whether to ignore the last record in our internal
`self.data` when getting the provided indices.
_add_last_ts_value: Whether to add the value of this arg to the end of
the internal `self.data` buffer (just for the duration of this get
operation, not permanently).
"""
if indices is None:
data = self._get_all_data(
one_hot_discrete=one_hot_discrete,
_ignore_last_ts=_ignore_last_ts,
)
elif isinstance(indices, slice):
data = self._get_slice(
indices,
fill=fill,
neg_index_as_lookback=neg_index_as_lookback,
one_hot_discrete=one_hot_discrete,
_ignore_last_ts=_ignore_last_ts,
_add_last_ts_value=_add_last_ts_value,
)
elif isinstance(indices, list):
data = [
self._get_int_index(
idx,
fill=fill,
neg_index_as_lookback=neg_index_as_lookback,
one_hot_discrete=one_hot_discrete,
_ignore_last_ts=_ignore_last_ts,
_add_last_ts_value=_add_last_ts_value,
)
for idx in indices
]
if self.finalized:
data = batch(data)
else:
assert isinstance(indices, int)
data = self._get_int_index(
indices,
fill=fill,
neg_index_as_lookback=neg_index_as_lookback,
one_hot_discrete=one_hot_discrete,
_ignore_last_ts=_ignore_last_ts,
_add_last_ts_value=_add_last_ts_value,
)
return data
def __add__(
self, other: Union[List, "InfiniteLookbackBuffer", int, float, complex]
) -> "InfiniteLookbackBuffer":
"""Adds another InfiniteLookbackBuffer object or list to the end of this one.
Args:
other: Another `InfiniteLookbackBuffer` or a `list` or a number.
If a `InfiniteLookbackBuffer` its data (w/o its lookback buffer) gets
concatenated to self's data. If a `list`, we concat it to self's data.
If a number, we add this number to each element of self (if possible).
Returns:
A new `InfiniteLookbackBuffer` instance `self.data` containing
concatenated data from `self` and `other` (or adding `other` to each element
in self's data).
"""
if self.finalized:
raise RuntimeError(f"Cannot `add` to a finalized {type(self).__name__}.")
else:
# If `other` is an int, simply add it to all our values (if possible) and
# use the result as the underlying data for the returned buffer.
if isinstance(other, (int, float, complex)):
data = [
(d + other) if isinstance(d, (int, float, complex)) else d
for d in self.data
]
# If `other` is a InfiniteLookbackBuffer itself, do NOT include its
# lookback buffer anymore. We assume that `other`'s lookback buffer i
# already at the end of `self`.
elif isinstance(other, InfiniteLookbackBuffer):
data = self.data + other.data[other.lookback :]
# `other` is a list, simply concat the two lists and use the result as
# the underlying data for the returned buffer.
else:
data = self.data + other
return InfiniteLookbackBuffer(
data=data,
lookback=self.lookback,
space=self.space,
)
def __getitem__(self, item):
"""Support squared bracket syntax, e.g. buffer[:5]."""
return self.get(item)
def __setitem__(self, key, value):
self.set(new_data=value, at_indices=key)
def set(
self,
new_data,
*,
at_indices: Optional[Union[int, slice, List[int]]] = None,
neg_index_as_lookback: bool = False,
) -> None:
"""Overwrites all or some of the data in this buffer with the provided data.
Args:
new_data: The new data to overwrite existing records with.
at_indices: A single int is interpreted as an index, at which to overwrite
the individual record stored at this index with `new_data`.
A list of ints is interpreted as a list of indices, which to overwrite
with `new_data`, which must be a batch of size `len(at_indices)`.
A slice object is interpreted as a range, which to overwrite with
`new_data`. Thereby, negative indices by default are interpreted as
"before the end" unless the `neg_index_as_lookback=True` option is
used, in which case negative indices are interpreted as
"before ts=0", meaning going back into the lookback buffer.
neg_index_as_lookback: If True, negative values in `at_indices` are
interpreted as "before ts=0", meaning going back into the lookback
buffer. For example, a buffer with data [4, 5, 6, 7, 8, 9],
where [4, 5, 6] is the lookback buffer range (ts=0 item is 7), will
handle a call `set(99, at_indices=-1, neg_index_as_lookback=True)`
with `6` being replaced by 99 and to `set([98, 99, 100],
at_indices=slice(-2, 1), neg_index_as_lookback=True)` with
`[5, 6, 7]` being replaced by `[98, 99, 100]`.
"""
# `at_indices` is None -> Override all our data (excluding the lookback buffer).
if at_indices is None:
self._set_all_data(new_data)
elif isinstance(at_indices, slice):
self._set_slice(
new_data,
slice_=at_indices,
neg_index_as_lookback=neg_index_as_lookback,
)
elif isinstance(at_indices, list):
for i, idx in enumerate(at_indices):
self._set_int_index(
new_data[i],
idx=idx,
neg_index_as_lookback=neg_index_as_lookback,
)
else:
assert isinstance(at_indices, int)
self._set_int_index(
new_data,
idx=at_indices,
neg_index_as_lookback=neg_index_as_lookback,
)
def __len__(self):
"""Return the length of our data, excluding the lookback buffer."""
len_ = self.len_incl_lookback()
# Only count the data after the lookback.
return max(len_ - self.lookback, 0)
def len_incl_lookback(self):
if self.finalized:
return len(tree.flatten(self.data)[0])
else:
return len(self.data)
def __repr__(self):
return (
f"{type(self).__name__}({self.data[:self.lookback]} <- "
f"lookback({self.lookback}) | {self.data[self.lookback:]})"
)
def _get_all_data(self, one_hot_discrete=False, _ignore_last_ts=False):
data = self[: (None if not _ignore_last_ts else -1)]
if one_hot_discrete:
data = self._one_hot(data, space_struct=self.space_struct)
return data
def _set_all_data(self, new_data):
self._set_slice(new_data, slice(0, None))
def _get_slice(
self,
slice_,
fill=None,
neg_index_as_lookback=False,
one_hot_discrete=False,
_ignore_last_ts=False,
_add_last_ts_value=None,
):
data_to_use = self.data
if _ignore_last_ts:
if self.finalized:
data_to_use = tree.map_structure(lambda s: s[:-1], self.data)
else:
data_to_use = self.data[:-1]
if _add_last_ts_value is not None:
if self.finalized:
data_to_use = tree.map_structure(
lambda s, t: np.append(s, t),
data_to_use.copy(),
_add_last_ts_value,
)
else:
data_to_use = np.append(data_to_use.copy(), _add_last_ts_value)
slice_, slice_len, fill_left_count, fill_right_count = self._interpret_slice(
slice_,
neg_index_as_lookback,
len_self_plus_lookback=(
self.len_incl_lookback()
+ int(_add_last_ts_value is not None)
- int(_ignore_last_ts)
),
)
# Perform the actual slice.
data_slice = None
if slice_len > 0:
if self.finalized:
data_slice = tree.map_structure(lambda s: s[slice_], data_to_use)
else:
data_slice = data_to_use[slice_]
if one_hot_discrete:
data_slice = self._one_hot(data_slice, space_struct=self.space_struct)
# Data is shorter than the range requested -> Fill the rest with `fill` data.
if fill is not None and (fill_right_count > 0 or fill_left_count > 0):
if self.finalized:
if fill_left_count:
if self.space is None:
fill_batch = np.array([fill] * fill_left_count)
else:
fill_batch = get_dummy_batch_for_space(
self.space,
fill_value=fill,
batch_size=fill_left_count,
one_hot_discrete=one_hot_discrete,
)
if data_slice is not None:
data_slice = tree.map_structure(
lambda s0, s: np.concatenate([s0, s]),
fill_batch,
data_slice,
)
else:
data_slice = fill_batch
if fill_right_count:
if self.space is None:
fill_batch = np.array([fill] * fill_right_count)
else:
fill_batch = get_dummy_batch_for_space(
self.space,
fill_value=fill,
batch_size=fill_right_count,
one_hot_discrete=one_hot_discrete,
)
if data_slice is not None:
data_slice = tree.map_structure(
lambda s0, s: np.concatenate([s, s0]),
fill_batch,
data_slice,
)
else:
data_slice = fill_batch
else:
if self.space is None:
fill_batch = [fill]
else:
fill_batch = [
get_dummy_batch_for_space(
self.space,
fill_value=fill,
batch_size=0,
one_hot_discrete=one_hot_discrete,
)
]
data_slice = (
fill_batch * fill_left_count
+ (data_slice if data_slice is not None else [])
+ fill_batch * fill_right_count
)
if data_slice is None:
if self.finalized:
return tree.map_structure(lambda s: s[slice_], data_to_use)
else:
return data_to_use[slice_]
return data_slice
def _set_slice(
self,
new_data,
slice_,
neg_index_as_lookback=False,
):
slice_, _, _, _ = self._interpret_slice(slice_, neg_index_as_lookback)
# Check, whether the setting to new_data changes the length of self
# (it shouldn't). If it does, raise an error.
try:
if self.finalized:
def __set(s, n):
if self.space:
assert self.space.contains(n[0])
assert len(s[slice_]) == len(n)
s[slice_] = n
tree.map_structure(__set, self.data, new_data)
else:
assert len(self.data[slice_]) == len(new_data)
self.data[slice_] = new_data
except AssertionError:
raise IndexError(
f"Cannot `set()` value via at_indices={slice_} (option "
f"neg_index_as_lookback={neg_index_as_lookback})! Slice of data "
"does NOT have the same size as `new_data`."
)
def _get_int_index(
self,
idx: int,
fill=None,
neg_index_as_lookback=False,
one_hot_discrete=False,
_ignore_last_ts=False,
_add_last_ts_value=None,
):
data_to_use = self.data
if _ignore_last_ts:
if self.finalized:
data_to_use = tree.map_structure(lambda s: s[:-1], self.data)
else:
data_to_use = self.data[:-1]
if _add_last_ts_value is not None:
if self.finalized:
data_to_use = tree.map_structure(
lambda s, last: np.append(s, last), data_to_use, _add_last_ts_value
)
else:
data_to_use = data_to_use.copy()
data_to_use.append(_add_last_ts_value)
# If index >= 0 -> Ignore lookback buffer.
# Otherwise, include lookback buffer.
if idx >= 0 or neg_index_as_lookback:
idx = self.lookback + idx
# Negative indices mean: Go to left into lookback buffer starting from idx=0.
# But if we pass the lookback buffer, the index should be invalid and we will
# have to fill, if required. Invalidate the index by setting it to one larger
# than max.
if neg_index_as_lookback and idx < 0:
idx = len(self) + self.lookback - (_ignore_last_ts is True)
try:
if self.finalized:
data = tree.map_structure(lambda s: s[idx], data_to_use)
else:
data = data_to_use[idx]
# Out of range index -> If `fill`, use a fill dummy (B=0), if not, error out.
except IndexError as e:
if fill is not None:
if self.space is None:
return fill
return get_dummy_batch_for_space(
self.space,
fill_value=fill,
batch_size=0,
one_hot_discrete=one_hot_discrete,
)
else:
raise e
# Convert discrete/multi-discrete components to one-hot vectors, if required.
if one_hot_discrete:
data = self._one_hot(data, self.space_struct)
return data
def _set_int_index(self, new_data, idx, neg_index_as_lookback):
actual_idx = idx
# If index >= 0 -> Ignore lookback buffer.
# Otherwise, include lookback buffer.
if actual_idx >= 0 or neg_index_as_lookback:
actual_idx = self.lookback + actual_idx
# Negative indices mean: Go to left into lookback buffer starting from idx=0.
# But if we pass the lookback buffer, the index should be invalid and we will
# have to fill, if required. Invalidate the index by setting it to one larger
# than max.
if neg_index_as_lookback and actual_idx < 0:
actual_idx = len(self) + self.lookback
try:
if self.finalized:
def __set(s, n):
if self.space:
assert self.space.contains(n), n
s[actual_idx] = n
tree.map_structure(__set, self.data, new_data)
else:
self.data[actual_idx] = new_data
except IndexError:
raise IndexError(
f"Cannot `set()` value at index {idx} (option "
f"neg_index_as_lookback={neg_index_as_lookback})! Out of range "
f"of buffer data."
)
def _interpret_slice(
self,
slice_,
neg_index_as_lookback,
len_self_plus_lookback=None,
):
if len_self_plus_lookback is None:
len_self_plus_lookback = len(self) + self.lookback
# Re-interpret slice bounds as absolute positions (>=0) within our
# internal data.
start = slice_.start
stop = slice_.stop
# Start is None -> Exclude lookback buffer.
if start is None:
start = self.lookback
# Start is negative.
elif start < 0:
# `neg_index_as_lookback=True` -> User wants to index into the lookback
# range.
if neg_index_as_lookback:
start = self.lookback + start
# Interpret index as counting "from end".
else:
start = len_self_plus_lookback + start
# Start is 0 or positive -> timestep right after lookback is interpreted as 0.
else:
start = self.lookback + start
# Stop is None -> Set stop to very last index + 1 of our internal data.
if stop is None:
stop = len_self_plus_lookback
# Stop is negative.
elif stop < 0:
# `neg_index_as_lookback=True` -> User wants to index into the lookback
# range. Set to 0 (beginning of lookback buffer) if result is a negative
# index.
if neg_index_as_lookback:
stop = self.lookback + stop
# Interpret index as counting "from end". Set to 0 (beginning of actual
# episode) if result is a negative index.
else:
stop = len_self_plus_lookback + stop
# Stop is positive -> Add lookback range to it.
else:
stop = self.lookback + stop
fill_left_count = fill_right_count = 0
# Both start and stop are on left side.
if start < 0 and stop < 0:
fill_left_count = abs(start - stop)
fill_right_count = 0
start = stop = 0
# Both start and stop are on right side.
elif start >= len_self_plus_lookback and stop >= len_self_plus_lookback:
fill_right_count = abs(start - stop)
fill_left_count = 0
start = stop = len_self_plus_lookback
# Set to 0 (beginning of actual episode) if result is a negative index.
elif start < 0:
fill_left_count = -start
start = 0
elif stop >= len_self_plus_lookback:
fill_right_count = stop - len_self_plus_lookback
stop = len_self_plus_lookback
# Only `stop` might be < 0, when slice has negative step and start is > 0.
elif stop < 0:
if start >= len_self_plus_lookback:
fill_left_count = start - len_self_plus_lookback + 1
start = len_self_plus_lookback - 1
fill_right_count = -stop - 1
stop = -LARGE_INTEGER
assert start >= 0 and (stop >= 0 or stop == -LARGE_INTEGER), (start, stop)
step = slice_.step if slice_.step is not None else 1
slice_ = slice(start, stop, step)
slice_len = max(0, (stop - start + (step - (1 if step > 0 else -1))) // step)
return slice_, slice_len, fill_left_count, fill_right_count
def _one_hot(self, data, space_struct):
if space_struct is None:
raise ValueError(
f"Cannot `one_hot` data in `{type(self).__name__}` if a "
"gym.Space was NOT provided during construction!"
)
def _convert(dat_, space):
if isinstance(space, gym.spaces.Discrete):
return one_hot(dat_, depth=space.n)
elif isinstance(space, gym.spaces.MultiDiscrete):
return one_hot_multidiscrete(dat_, depths=space.nvec)
return dat_
if isinstance(data, list):
data = [
tree.map_structure(_convert, dslice, space_struct) for dslice in data
]
else:
data = tree.map_structure(_convert, data, space_struct)
return data
| InfiniteLookbackBuffer |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 12316,
"end": 13370
} | class ____(ModelTestCase):
@requires_models(KV, KV2)
def test_compound_select(self):
KV.insert_many([('10', 1), ('40', 4)]).execute()
KV2.insert_many([('20', 2), ('30', 3)]).execute()
lhs = KV.select(KV.k.cast('INT'), KV.v)
rhs = KV2.select(KV2.k2.cast('INT'), KV2.v2)
query = (lhs | rhs).order_by(SQL('1'))
self.assertEqual([(obj.k, obj.v) for obj in query],
[(10, 1), (20, 2), (30, 3), (40, 4)])
@requires_models(Post, PostNote)
def test_primary_key_as_foreign_key(self):
p = Post.create(content='p')
n = PostNote.create(post=p, note='n')
p_db = Post.select().get()
self.assertEqual([n.note for n in p_db.notes], ['n'])
with self.assertQueryCount(1):
query = (PostNote
.select(PostNote, Post)
.join(Post))
self.assertEqual([(n.post.content, n.note) for n in query],
[('p', 'n')])
@skip_unless(IS_CRDB)
| TestCockroachIntegration |
python | apache__airflow | providers/google/tests/unit/google/cloud/operators/test_dataproc.py | {
"start": 88306,
"end": 89716
} | class ____(DataprocClusterTestBase):
@mock.patch(DATAPROC_PATH.format("Cluster.to_dict"))
@mock.patch(DATAPROC_PATH.format("DataprocHook"))
def test_execute(self, mock_hook, mock_to_dict):
cluster = MagicMock()
cluster.status.State.STOPPED = 4
cluster.status.state = 0
mock_hook.return_value.get_cluster.return_value = cluster
op = DataprocStopClusterOperator(
task_id=TASK_ID,
cluster_name=CLUSTER_NAME,
region=GCP_REGION,
project_id=GCP_PROJECT,
request_id=REQUEST_ID,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
gcp_conn_id=GCP_CONN_ID,
impersonation_chain=IMPERSONATION_CHAIN,
)
op.execute(context=self.mock_context)
mock_hook.return_value.get_cluster.assert_called_with(
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_name=CLUSTER_NAME,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
mock_hook.return_value.stop_cluster.assert_called_once_with(
cluster_name=CLUSTER_NAME,
region=GCP_REGION,
project_id=GCP_PROJECT,
cluster_uuid=None,
retry=RETRY,
timeout=TIMEOUT,
metadata=METADATA,
)
| TestDataprocStopClusterOperator |
python | django-compressor__django-compressor | compressor/tests/test_base.py | {
"start": 17429,
"end": 17854
} | class ____(SimpleTestCase):
def setUp(self):
self.js = """
<script src="/static/js/one.js"></script>
<script src="/static/js/two.js"></script>
"""
def test_js_content(self):
js_node = JsCompressor("js", self.js)
content = js_node.filter_input()
self.assertEqual(content[0], "obj = {};;")
self.assertEqual(content[1], "pollos = {};")
| JSWithParensTestCase |
python | streamlit__streamlit | lib/tests/streamlit/elements/arrow_dataframe_dimensions_test.py | {
"start": 926,
"end": 5832
} | class ____(DeltaGeneratorTestCase):
"""Test the layout configuration in st.dataframe for different dimension options."""
def test_no_dimensions(self):
"""Test default behavior when no dimension parameters are passed."""
df = pd.DataFrame({"A": [1, 2, 3, 4, 5]})
st.dataframe(df)
element = self.get_delta_from_queue().new_element
assert (
element.width_config.WhichOneof("width_spec")
== WidthConfigFields.USE_STRETCH.value
)
assert element.width_config.use_stretch is True
# Should not set height config for default auto behavior
assert element.height_config.WhichOneof("height_spec") is None
@parameterized.expand(
[
# Width only (height remains unset)
(
{"width": 400},
(WidthConfigFields.PIXEL_WIDTH, "pixel_width", 400),
(None, None, None),
),
(
{"width": "stretch"},
(WidthConfigFields.USE_STRETCH, "use_stretch", True),
(None, None, None),
),
(
{"width": "content"},
(WidthConfigFields.USE_CONTENT, "use_content", True),
(None, None, None),
),
# Height only (width gets default "stretch")
(
{"height": 300},
(WidthConfigFields.USE_STRETCH, "use_stretch", True),
(HeightConfigFields.PIXEL_HEIGHT, "pixel_height", 300),
),
(
{"height": "auto"},
(WidthConfigFields.USE_STRETCH, "use_stretch", True),
(None, None, None), # auto doesn't set height config
),
(
{"height": "stretch"},
(WidthConfigFields.USE_STRETCH, "use_stretch", True),
(HeightConfigFields.USE_STRETCH, "use_stretch", True),
),
(
{"height": "content"},
(WidthConfigFields.USE_STRETCH, "use_stretch", True),
(HeightConfigFields.USE_CONTENT, "use_content", True),
),
# Combinations
(
{"width": 200, "height": 250},
(WidthConfigFields.PIXEL_WIDTH, "pixel_width", 200),
(HeightConfigFields.PIXEL_HEIGHT, "pixel_height", 250),
),
]
)
def test_dimension_values(self, kwargs, expected_width, expected_height):
"""Test that width and height values set layout config correctly."""
df = pd.DataFrame({"A": [1, 2, 3, 4, 5]})
st.dataframe(df, **kwargs)
element = self.get_delta_from_queue().new_element
width_field, width_attr, width_value = expected_width
if width_field is not None:
assert element.width_config.WhichOneof("width_spec") == width_field.value
assert getattr(element.width_config, width_attr) == width_value
height_field, height_attr, height_value = expected_height
if height_field is not None:
assert element.height_config.WhichOneof("height_spec") == height_field.value
assert getattr(element.height_config, height_attr) == height_value
else:
assert element.height_config.WhichOneof("height_spec") is None
@parameterized.expand(
[
# use_container_width=True cases - always results in "stretch"
(None, True, WidthConfigFields.USE_STRETCH, "use_stretch", True),
("stretch", True, WidthConfigFields.USE_STRETCH, "use_stretch", True),
("content", True, WidthConfigFields.USE_STRETCH, "use_stretch", True),
(400, True, WidthConfigFields.USE_STRETCH, "use_stretch", True),
# use_container_width=False cases - respects width parameter
(None, False, WidthConfigFields.USE_CONTENT, "use_content", True),
("stretch", False, WidthConfigFields.USE_CONTENT, "use_content", True),
("content", False, WidthConfigFields.USE_CONTENT, "use_content", True),
(400, False, WidthConfigFields.PIXEL_WIDTH, "pixel_width", 400),
]
)
def test_use_container_width_behavior(
self, width_param, use_container_width, expected_field, field_name, field_value
):
"""Test that use_container_width parameter properly overrides width parameter."""
df = pd.DataFrame({"A": [1, 2, 3, 4, 5]})
kwargs = {"use_container_width": use_container_width}
if width_param is not None:
kwargs["width"] = width_param
st.dataframe(df, **kwargs)
element = self.get_delta_from_queue().new_element
assert element.width_config.WhichOneof("width_spec") == expected_field.value
assert getattr(element.width_config, field_name) == field_value
| ArrowDataFrameDimensionsTest |
python | dagster-io__dagster | python_modules/dagster/dagster/_core/execution/backfill.py | {
"start": 2866,
"end": 4534
} | class ____:
"""Filters to use when querying for bulk actions (i.e. backfills) from the BulkActionsTable.
Each field of the BulkActionsFilter represents a logical AND with each other. For
example, if you specify status and created_before, then you will receive only bulk actions
with the specified states AND the created before created_before. If left blank, then
all values will be permitted for that field.
Args:
statuses (Optional[Sequence[BulkActionStatus]]): A list of statuses to filter by.
created_before (Optional[DateTime]): Filter by bulk actions that were created before this datetime. Note that the
create_time for each bulk action is stored in UTC.
created_after (Optional[DateTime]): Filter by bulk actions that were created after this datetime. Note that the
create_time for each bulk action is stored in UTC.
tags (Optional[Dict[str, Union[str, List[str]]]]): A dictionary of tags to query by. All tags specified
here must be present for a given bulk action to pass the filter.
job_name (Optional[str]): Name of the job to query for. If blank, all job_names will be accepted.
backfill_ids (Optional[Sequence[str]]): A list of backfill_ids to filter by. If blank, all backfill_ids will be included
"""
statuses: Optional[Sequence[BulkActionStatus]] = None
created_before: Optional[datetime] = None
created_after: Optional[datetime] = None
tags: Optional[Mapping[str, Union[str, Sequence[str]]]] = None
job_name: Optional[str] = None
backfill_ids: Optional[Sequence[str]] = None
@whitelist_for_serdes
| BulkActionsFilter |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/links/vertex_ai.py | {
"start": 4942,
"end": 5240
} | class ____(BaseGoogleLink):
"""Helper class for constructing Vertex AI HyperparameterTuningJobs Link."""
name = "Hyperparameter Tuning Job List"
key = "hyperparameter_tuning_jobs_conf"
format_str = VERTEX_AI_HYPERPARAMETER_TUNING_JOB_LIST_LINK
| VertexAIHyperparameterTuningJobListLink |
python | huggingface__transformers | src/transformers/models/llama/modeling_llama.py | {
"start": 21398,
"end": 21498
} | class ____(GenericForSequenceClassification, LlamaPreTrainedModel): ...
| LlamaForSequenceClassification |
python | openai__openai-python | src/openai/resources/beta/realtime/sessions.py | {
"start": 21411,
"end": 21650
} | class ____:
def __init__(self, sessions: AsyncSessions) -> None:
self._sessions = sessions
self.create = _legacy_response.async_to_raw_response_wrapper(
sessions.create,
)
| AsyncSessionsWithRawResponse |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-pinecone/llama_index/vector_stores/pinecone/utils.py | {
"start": 712,
"end": 2473
} | class ____(BaseSparseEmbedding):
"""Default Pinecone sparse embedding."""
tokenizer: Callable = Field(
default_factory=get_default_tokenizer,
description="A callable that returns token input ids.",
)
def build_sparse_embeddings(
self, input_batch: List[List[int]]
) -> List[SparseEmbedding]:
"""
Build a list of sparse dictionaries from a batch of input_ids.
NOTE: taken from https://www.pinecone.io/learn/hybrid-search-intro/.
"""
# store a batch of sparse embeddings
sparse_emb_list = []
# iterate through input batch
for token_ids in input_batch:
sparse_emb = {}
# convert the input_ids list to a dictionary of key to frequency values
d = dict(Counter(token_ids))
for idx in d:
sparse_emb[idx] = float(d[idx])
sparse_emb_list.append(sparse_emb)
# return sparse_emb list
return sparse_emb_list
def _get_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query synchronously."""
token_ids = self.tokenizer([query])[0]
return self.build_sparse_embeddings([token_ids])[0]
async def _aget_query_embedding(self, query: str) -> SparseEmbedding:
"""Embed the input query asynchronously."""
return self._get_query_embedding(query)
def _get_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text synchronously."""
return self._get_query_embedding(text)
async def _aget_text_embedding(self, text: str) -> SparseEmbedding:
"""Embed the input text asynchronously."""
return self._get_query_embedding(text)
| DefaultPineconeSparseEmbedding |
python | numba__numba | numba/cuda/cudamath.py | {
"start": 647,
"end": 1172
} | class ____(ConcreteTemplate):
cases = [
signature(types.float64, types.int64),
signature(types.float64, types.uint64),
signature(types.float32, types.float32),
signature(types.float64, types.float64),
]
@infer_global(math.sin)
@infer_global(math.cos)
@infer_global(math.ceil)
@infer_global(math.floor)
@infer_global(math.sqrt)
@infer_global(math.log)
@infer_global(math.log2)
@infer_global(math.log10)
@infer_global(math.exp)
@infer_global(math.fabs)
@infer_global(math.trunc)
| Math_unary |
python | huggingface__transformers | src/transformers/models/mimi/modeling_mimi.py | {
"start": 52627,
"end": 53968
} | class ____(nn.Module):
"""SEANet decoder as used by Mimi."""
def __init__(self, config: MimiConfig):
super().__init__()
scaling = int(2 ** len(config.upsampling_ratios))
model = [MimiConv1d(config, config.hidden_size, scaling * config.num_filters, config.kernel_size)]
# Upsample to raw audio scale
for ratio in config.upsampling_ratios:
current_scale = scaling * config.num_filters
# Add upsampling layers
model += [nn.ELU()]
model += [
MimiConvTranspose1d(config, current_scale, current_scale // 2, kernel_size=ratio * 2, stride=ratio)
]
# Add residual layers
for j in range(config.num_residual_layers):
model += [MimiResnetBlock(config, current_scale // 2, (config.dilation_growth_rate**j, 1))]
scaling //= 2
# Add final layers
model += [nn.ELU()]
model += [MimiConv1d(config, config.num_filters, config.audio_channels, config.last_kernel_size)]
self.layers = nn.ModuleList(model)
# Copied from transformers.models.encodec.modeling_encodec.EncodecDecoder.forward
def forward(self, hidden_states):
for layer in self.layers:
hidden_states = layer(hidden_states)
return hidden_states
| MimiDecoder |
python | apache__airflow | providers/google/src/airflow/providers/google/cloud/triggers/dataproc.py | {
"start": 20751,
"end": 24075
} | class ____(DataprocBaseTrigger):
"""
DataprocDeleteClusterTrigger run on the trigger worker to perform delete cluster operation.
:param cluster_name: The name of the cluster
:param end_time: Time in second left to check the cluster status
:param project_id: The ID of the Google Cloud project the cluster belongs to
:param region: The Cloud Dataproc region in which to handle the request
:param metadata: Additional metadata that is provided to the method
:param gcp_conn_id: The connection ID to use when fetching connection info.
:param impersonation_chain: Optional service account to impersonate using short-term
credentials, or chained list of accounts required to get the access_token
of the last account in the list, which will be impersonated in the request.
If set as a string, the account must grant the originating account
the Service Account Token Creator IAM role.
If set as a sequence, the identities from the list must grant
Service Account Token Creator IAM role to the directly preceding identity, with first
account from the list granting this role to the originating account.
:param polling_interval_seconds: Time in seconds to sleep between checks of cluster status
"""
def __init__(
self,
cluster_name: str,
end_time: float,
metadata: Sequence[tuple[str, str]] = (),
**kwargs: Any,
):
super().__init__(**kwargs)
self.cluster_name = cluster_name
self.end_time = end_time
self.metadata = metadata
def serialize(self) -> tuple[str, dict[str, Any]]:
"""Serialize DataprocDeleteClusterTrigger arguments and classpath."""
return (
"airflow.providers.google.cloud.triggers.dataproc.DataprocDeleteClusterTrigger",
{
"cluster_name": self.cluster_name,
"end_time": self.end_time,
"project_id": self.project_id,
"region": self.region,
"metadata": self.metadata,
"gcp_conn_id": self.gcp_conn_id,
"impersonation_chain": self.impersonation_chain,
"polling_interval_seconds": self.polling_interval_seconds,
},
)
async def run(self) -> AsyncIterator[TriggerEvent]:
"""Wait until cluster is deleted completely."""
try:
while self.end_time > time.time():
cluster = await self.get_async_hook().get_cluster(
region=self.region,
cluster_name=self.cluster_name,
project_id=self.project_id,
metadata=self.metadata,
)
self.log.info(
"Cluster status is %s. Sleeping for %s seconds.",
cluster.status.state,
self.polling_interval_seconds,
)
await asyncio.sleep(self.polling_interval_seconds)
except NotFound:
yield TriggerEvent({"status": "success", "message": ""})
except Exception as e:
yield TriggerEvent({"status": "error", "message": str(e)})
else:
yield TriggerEvent({"status": "error", "message": "Timeout"})
| DataprocDeleteClusterTrigger |
python | sphinx-doc__sphinx | sphinx/ext/autodoc/directive.py | {
"start": 1690,
"end": 3807
} | class ____:
"""A parameters container for Documenters."""
def __init__(
self,
env: BuildEnvironment,
reporter: Reporter | None,
options: Options,
lineno: int,
state: Any,
) -> None:
self.env = env
self._reporter = reporter
self.genopt = options
self.lineno = lineno
self.record_dependencies: set[str] = set()
self.result = StringList()
self.state = state
# Retained: legacy class-based
def process_documenter_options(
documenter: type[Documenter], config: Config, options: dict[str, str]
) -> Options:
"""Recognize options of Documenter from user input."""
default_options = config.autodoc_default_options
for name in AUTODOC_DEFAULT_OPTIONS:
if name not in documenter.option_spec:
continue
negated = options.pop('no-' + name, True) is None
if name in default_options and not negated:
if name in options and isinstance(default_options[name], str):
# take value from options if present or extend it
# with autodoc_default_options if necessary
if name in AUTODOC_EXTENDABLE_OPTIONS:
if options[name] is not None and options[name].startswith('+'):
options[name] = f'{default_options[name]},{options[name][1:]}'
else:
options[name] = default_options[name]
elif options.get(name) is not None:
# remove '+' from option argument if there's nothing to merge it with
options[name] = options[name].lstrip('+')
return Options(assemble_option_dict(options.items(), documenter.option_spec))
# Retained: legacy class-based
def parse_generated_content(
state: RSTState, content: StringList, documenter: Documenter
) -> list[Node]:
"""Parse an item of content generated by Documenter."""
from sphinx.ext.autodoc._directive import parse_generated_content
return parse_generated_content(state, content, documenter.titles_allowed)
# Retained: legacy class-based
| DocumenterBridge |
python | ray-project__ray | rllib/algorithms/cql/torch/default_cql_torch_rl_module.py | {
"start": 581,
"end": 8421
} | class ____(DefaultSACTorchRLModule):
def __init__(self, *args, **kwargs):
catalog_class = kwargs.pop("catalog_class", None)
if catalog_class is None:
catalog_class = SACCatalog
super().__init__(*args, **kwargs, catalog_class=catalog_class)
@override(DefaultSACTorchRLModule)
def _forward_train(self, batch: Dict) -> Dict[str, Any]:
# Call the super method.
fwd_out = super()._forward_train(batch)
# Make sure we perform a "straight-through gradient" pass here,
# ignoring the gradients of the q-net, however, still recording
# the gradients of the policy net (which was used to rsample the actions used
# here). This is different from doing `.detach()` or `with torch.no_grads()`,
# as these two methds would fully block all gradient recordings, including
# the needed policy ones.
all_params = list(self.pi_encoder.parameters()) + list(self.pi.parameters())
# if self.twin_q:
# all_params += list(self.qf_twin.parameters()) + list(
# self.qf_twin_encoder.parameters()
# )
for param in all_params:
param.requires_grad = False
# Compute the repeated actions, action log-probabilites and Q-values for all
# observations.
# First for the random actions (from the mu-distribution as named by Kumar et
# al. (2020)).
low = torch.tensor(
self.action_space.low,
device=fwd_out[QF_PREDS].device,
)
high = torch.tensor(
self.action_space.high,
device=fwd_out[QF_PREDS].device,
)
num_samples = batch[Columns.ACTIONS].shape[0] * self.model_config["num_actions"]
actions_rand_repeat = low + (high - low) * torch.rand(
(num_samples, low.shape[0]), device=fwd_out[QF_PREDS].device
)
# First for the random actions (from the mu-distribution as named in Kumar
# et al. (2020)) using repeated observations.
rand_repeat_out = self._repeat_actions(batch[Columns.OBS], actions_rand_repeat)
(fwd_out["actions_rand_repeat"], fwd_out["q_rand_repeat"]) = (
rand_repeat_out[Columns.ACTIONS],
rand_repeat_out[QF_PREDS],
)
# Sample current and next actions (from the pi distribution as named in Kumar
# et al. (2020)) using repeated observations
# Second for the current observations and the current action distribution.
curr_repeat_out = self._repeat_actions(batch[Columns.OBS])
(
fwd_out["actions_curr_repeat"],
fwd_out["logps_curr_repeat"],
fwd_out["q_curr_repeat"],
) = (
curr_repeat_out[Columns.ACTIONS],
curr_repeat_out[Columns.ACTION_LOGP],
curr_repeat_out[QF_PREDS],
)
# Then, for the next observations and the current action distribution.
next_repeat_out = self._repeat_actions(batch[Columns.NEXT_OBS])
(
fwd_out["actions_next_repeat"],
fwd_out["logps_next_repeat"],
fwd_out["q_next_repeat"],
) = (
next_repeat_out[Columns.ACTIONS],
next_repeat_out[Columns.ACTION_LOGP],
next_repeat_out[QF_PREDS],
)
if self.twin_q:
# First for the random actions from the mu-distribution.
fwd_out["q_twin_rand_repeat"] = rand_repeat_out[QF_TWIN_PREDS]
# Second for the current observations and the current action distribution.
fwd_out["q_twin_curr_repeat"] = curr_repeat_out[QF_TWIN_PREDS]
# Then, for the next observations and the current action distribution.
fwd_out["q_twin_next_repeat"] = next_repeat_out[QF_TWIN_PREDS]
# Reset the gradient requirements for all Q-function parameters.
for param in all_params:
param.requires_grad = True
return fwd_out
def _repeat_tensor(self, tensor: TensorType, repeat: int) -> TensorType:
"""Generates a repeated version of a tensor.
The repetition is done similar `np.repeat` and repeats each value
instead of the complete vector.
Args:
tensor: The tensor to be repeated.
repeat: How often each value in the tensor should be repeated.
Returns:
A tensor holding `repeat` repeated values of the input `tensor`
"""
# Insert the new dimension at axis 1 into the tensor.
t_repeat = tensor.unsqueeze(1)
# Repeat the tensor along the new dimension.
t_repeat = torch.repeat_interleave(t_repeat, repeat, dim=1)
# Stack the repeated values into the batch dimension.
t_repeat = t_repeat.view(-1, *tensor.shape[1:])
# Return the repeated tensor.
return t_repeat
def _repeat_actions(
self, obs: TensorType, actions: Optional[TensorType] = None
) -> Dict[str, TensorType]:
"""Generated actions and Q-values for repeated observations.
The `self.model_config["num_actions"]` define a multiplier
used for generating `num_actions` as many actions as the batch size.
Observations are repeated and then a model forward pass is made.
Args:
obs: A batched observation tensor.
actions: An optional batched actions tensor.
Returns:
A dictionary holding the (sampled or passed-in actions), the log
probabilities (of sampled actions), the Q-values and if available
the twin-Q values.
"""
output = {}
# Receive the batch size.
batch_size = obs.shape[0]
# Receive the number of action to sample.
num_actions = self.model_config["num_actions"]
# Repeat the observations `num_actions` times.
obs_repeat = tree.map_structure(
lambda t: self._repeat_tensor(t, num_actions), obs
)
# Generate a batch for the forward pass.
temp_batch = {Columns.OBS: obs_repeat}
if actions is None:
# TODO (simon): Run the forward pass in inference mode.
# Compute the action logits.
pi_encoder_outs = self.pi_encoder(temp_batch)
action_logits = self.pi(pi_encoder_outs[ENCODER_OUT])
# Generate the squashed Gaussian from the model's logits.
action_dist = self.get_train_action_dist_cls().from_logits(action_logits)
# Sample the actions. Note, we want to make a backward pass through
# these actions.
output[Columns.ACTIONS] = action_dist.rsample()
# Compute the action log-probabilities.
output[Columns.ACTION_LOGP] = action_dist.logp(
output[Columns.ACTIONS]
).view(batch_size, num_actions, 1)
else:
output[Columns.ACTIONS] = actions
# Compute all Q-values.
temp_batch.update(
{
Columns.ACTIONS: output[Columns.ACTIONS],
}
)
output.update(
{
QF_PREDS: self._qf_forward_train_helper(
temp_batch,
self.qf_encoder,
self.qf,
).view(batch_size, num_actions, 1)
}
)
# If we have a twin-Q network, compute its Q-values, too.
if self.twin_q:
output.update(
{
QF_TWIN_PREDS: self._qf_forward_train_helper(
temp_batch,
self.qf_twin_encoder,
self.qf_twin,
).view(batch_size, num_actions, 1)
}
)
del temp_batch
# Return
return output
| DefaultCQLTorchRLModule |
python | plotly__plotly.py | plotly/graph_objs/layout/title/_pad.py | {
"start": 235,
"end": 4380
} | class ____(_BaseLayoutHierarchyType):
_parent_path_str = "layout.title"
_path_str = "layout.title.pad"
_valid_props = {"b", "l", "r", "t"}
@property
def b(self):
"""
The amount of padding (in px) along the bottom of the
component.
The 'b' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["b"]
@b.setter
def b(self, val):
self["b"] = val
@property
def l(self):
"""
The amount of padding (in px) on the left side of the
component.
The 'l' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["l"]
@l.setter
def l(self, val):
self["l"] = val
@property
def r(self):
"""
The amount of padding (in px) on the right side of the
component.
The 'r' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["r"]
@r.setter
def r(self, val):
self["r"] = val
@property
def t(self):
"""
The amount of padding (in px) along the top of the component.
The 't' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["t"]
@t.setter
def t(self, val):
self["t"] = val
@property
def _prop_descriptions(self):
return """\
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
"""
def __init__(self, arg=None, b=None, l=None, r=None, t=None, **kwargs):
"""
Construct a new Pad object
Sets the padding of the title. Each padding value only applies
when the corresponding `xanchor`/`yanchor` value is set
accordingly. E.g. for left padding to take effect, `xanchor`
must be set to "left". The same rule applies if
`xanchor`/`yanchor` is determined automatically. Padding is
muted if the respective anchor value is "middle*/*center".
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.layout.title.Pad`
b
The amount of padding (in px) along the bottom of the
component.
l
The amount of padding (in px) on the left side of the
component.
r
The amount of padding (in px) on the right side of the
component.
t
The amount of padding (in px) along the top of the
component.
Returns
-------
Pad
"""
super().__init__("pad")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.layout.title.Pad
constructor must be a dict or
an instance of :class:`plotly.graph_objs.layout.title.Pad`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("b", arg, b)
self._set_property("l", arg, l)
self._set_property("r", arg, r)
self._set_property("t", arg, t)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Pad |
python | mlflow__mlflow | mlflow/webhooks/types.py | {
"start": 6709,
"end": 7331
} | class ____(TypedDict):
"""Payload sent when a tag is set on a prompt.
Example payload:
.. code-block:: python
{
"name": "example_prompt",
"key": "example_key",
"value": "example_value",
}
"""
name: str
"""The name of the prompt."""
key: str
"""The tag key being set."""
value: str
"""The tag value being set."""
@classmethod
def example(cls) -> "PromptTagSetPayload":
return cls(
name="example_prompt",
key="example_key",
value="example_value",
)
| PromptTagSetPayload |
python | gawel__pyquery | tests/test_browser.py | {
"start": 101,
"end": 599
} | class ____(unittest.TestCase, TextExtractionMixin):
def _prepare_dom(self, html):
super()._prepare_dom(html)
self.pq = PyQuery(self.last_html)
def _simple_test(self, html, expected_sq, expected_nosq, **kwargs):
self._prepare_dom(html)
text_sq = self.pq.text(squash_space=True, **kwargs)
text_nosq = self.pq.text(squash_space=False, **kwargs)
self.assertEqual(text_sq, expected_sq)
self.assertEqual(text_nosq, expected_nosq)
| TestInnerText |
python | huggingface__transformers | src/transformers/models/mobilevitv2/modeling_mobilevitv2.py | {
"start": 4386,
"end": 6037
} | class ____(nn.Module):
"""
Inverted residual block (MobileNetv2): https://huggingface.co/papers/1801.04381
"""
def __init__(
self, config: MobileViTV2Config, in_channels: int, out_channels: int, stride: int, dilation: int = 1
) -> None:
super().__init__()
expanded_channels = make_divisible(int(round(in_channels * config.expand_ratio)), 8)
if stride not in [1, 2]:
raise ValueError(f"Invalid stride {stride}.")
self.use_residual = (stride == 1) and (in_channels == out_channels)
self.expand_1x1 = MobileViTV2ConvLayer(
config, in_channels=in_channels, out_channels=expanded_channels, kernel_size=1
)
self.conv_3x3 = MobileViTV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=expanded_channels,
kernel_size=3,
stride=stride,
groups=expanded_channels,
dilation=dilation,
)
self.reduce_1x1 = MobileViTV2ConvLayer(
config,
in_channels=expanded_channels,
out_channels=out_channels,
kernel_size=1,
use_activation=False,
)
def forward(self, features: torch.Tensor) -> torch.Tensor:
residual = features
features = self.expand_1x1(features)
features = self.conv_3x3(features)
features = self.reduce_1x1(features)
return residual + features if self.use_residual else features
# Copied from transformers.models.mobilevit.modeling_mobilevit.MobileViTMobileNetLayer with MobileViT->MobileViTV2
| MobileViTV2InvertedResidual |
python | pyinstaller__pyinstaller | bootloader/waflib/Tools/glib2.py | {
"start": 8925,
"end": 10021
} | class ____(Task.Task):
run_str = 'rm -f ${GLIB_VALIDATE_SCHEMA_OUTPUT} && ${GLIB_COMPILE_SCHEMAS} --dry-run ${GLIB_COMPILE_SCHEMAS_OPTIONS} && touch ${GLIB_VALIDATE_SCHEMA_OUTPUT}'
color = 'PINK'
@extension('.gresource.xml')
def process_gresource_source(self, node):
if not self.env.GLIB_COMPILE_RESOURCES:
raise Errors.WafError(
"Unable to process GResource file - glib-compile-resources was not found during configure"
)
if 'gresource' in self.features:
return
h_node = node.change_ext('_xml.h')
c_node = node.change_ext('_xml.c')
self.create_task('glib_gresource_source', node, [h_node, c_node])
self.source.append(c_node)
@feature('gresource')
def process_gresource_bundle(self):
for i in self.to_list(self.source):
node = self.path.find_resource(i)
task = self.create_task('glib_gresource_bundle', node, node.change_ext(''))
inst_to = getattr(self, 'install_path', None)
if inst_to:
self.add_install_files(install_to=inst_to, install_from=task.outputs)
| glib_validate_schema |
python | altair-viz__altair | tests/utils/test_schemapi.py | {
"start": 1672,
"end": 2634
} | class ____(_TestSchema):
_schema = {
"$schema": _JSON_SCHEMA_DRAFT_URL,
"definitions": {
"StringMapping": {
"type": "object",
"additionalProperties": {"type": "string"},
},
"StringArray": {"type": "array", "items": {"type": "string"}},
},
"properties": {
"a": {"$ref": "#/definitions/StringMapping"},
"a2": {"type": "object", "additionalProperties": {"type": "number"}},
"b": {"$ref": "#/definitions/StringArray"},
"b2": {"type": "array", "items": {"type": "number"}},
"c": {"type": ["string", "number"]},
"d": {
"anyOf": [
{"$ref": "#/definitions/StringMapping"},
{"$ref": "#/definitions/StringArray"},
]
},
"e": {"items": [{"type": "string"}, {"type": "string"}]},
},
}
| MySchema |
python | ray-project__ray | python/ray/air/tests/execution/test_e2e_train_flow.py | {
"start": 1915,
"end": 8236
} | class ____:
"""This is a Ray Train-like execution flow.
- We want to run 4 actors in total ("trials")
- Each actor runs two init functions
- We train all actors in parallel for 10 iterations
- Errors can come up on actor construction, in the init functions,
or during training
- When an actor fails, restart that actor
- When a task fails, stop actor, and restart
"""
def __init__(
self, actor_manager: RayActorManager, errors: Optional[List[str]] = None
):
self._actor_manager = actor_manager
self._finished = False
self._actors_to_run = 4
self._tracked_actors = []
self._actors_stopped = 0
self._actors_to_replace = set()
self._ready_actors = set()
self._training_barrier = Barrier(
max_results=self._actors_to_run,
on_completion=self.training_barrier_completed,
)
self._restart_training = None
self._training_iter = 0
self._results = []
self._errors = errors
def setup_actors(self):
for actor_id in range(self._actors_to_run):
error_kwargs = {}
if self._errors:
error = random.choice(self._errors)
error_kwargs[error] = True
print("Actor", actor_id, "will be failing with", error_kwargs)
tracked_actor = self._actor_manager.add_actor(
cls=Actor,
kwargs={"id": actor_id, **error_kwargs},
resource_request=ResourceRequest([{"CPU": 1}]),
on_start=self.actor_started,
on_stop=self.actor_stopped,
on_error=self.actor_error,
)
self._tracked_actors.append(tracked_actor)
def actor_started(self, tracked_actor: TrackedActor):
self._actor_manager.schedule_actor_task(
tracked_actor,
"setup_1",
on_error=self.setup_error,
on_result=self.setup_1_result,
)
def actor_stopped(self, tracked_actor: TrackedActor):
self._ready_actors.discard(tracked_actor)
if tracked_actor in self._actors_to_replace:
self._replace_actor(tracked_actor=tracked_actor)
else:
self._actors_stopped += 1
self._finished = self._actors_stopped >= self._actors_to_run
def actor_error(self, tracked_actor: TrackedActor, exception: Exception):
self._ready_actors.discard(tracked_actor)
self._replace_actor(tracked_actor=tracked_actor)
def _replace_actor(self, tracked_actor: TrackedActor):
actor_index = self._tracked_actors.index(tracked_actor)
replacement_actor = self._actor_manager.add_actor(
cls=Actor,
kwargs={"id": actor_index},
resource_request=ResourceRequest([{"CPU": 1}]),
on_start=self.actor_started,
on_stop=self.actor_stopped,
on_error=self.actor_error,
)
self._tracked_actors[actor_index] = replacement_actor
def setup_1_result(self, tracked_actor: TrackedActor, result: Any):
self._actor_manager.schedule_actor_task(
tracked_actor,
"setup_2",
on_error=self.setup_error,
on_result=self.setup_2_result,
)
def setup_2_result(self, tracked_actor: TrackedActor, result: Any):
self._ready_actors.add(tracked_actor)
if len(self._ready_actors) == self._actors_to_run:
self.continue_training()
def setup_error(self, tracked_actor: TrackedActor, exception: Exception):
if isinstance(exception, RayActorError):
return
self._actors_to_replace.add(tracked_actor)
self._actor_manager.remove_actor(tracked_actor)
def continue_training(self):
if self._restart_training:
self._training_iter = self._restart_training
else:
self._training_iter += 1
self._training_barrier.reset()
self._actor_manager.schedule_actor_tasks(
self._tracked_actors,
"train",
args=(self._training_iter,),
on_result=self._training_barrier.arrive,
on_error=self.training_error,
)
def training_barrier_completed(self, barrier: Barrier):
self._results.append([res for _, res in barrier.get_results()])
self._restart_training = None
# If less than 10 epochs, continue training
if self._training_iter < 10:
return self.continue_training()
# Else, training finished
for tracked_actor in self._tracked_actors:
self._actor_manager.remove_actor(tracked_actor)
def training_error(self, tracked_actor: TrackedActor, exception: Exception):
self._restart_training = self._training_iter
if isinstance(exception, RayActorError):
return
self._actors_to_replace.add(tracked_actor)
self._ready_actors.discard(tracked_actor)
self._actor_manager.remove_actor(tracked_actor)
def run(self):
self.setup_actors()
while not self._finished:
self._actor_manager.next()
def get_results(self) -> List[List[float]]:
return self._results
@pytest.mark.parametrize(
"resource_manager_cls", [FixedResourceManager, PlacementGroupResourceManager]
)
@pytest.mark.parametrize(
"errors",
[
None,
"actor_init_kill",
"actor_setup_kill",
"actor_setup_fail",
"actor_train_kill",
"actor_train_fail",
# Chaos - every actor fails somehow, but in different ways
[
"actor_init_kill",
"actor_setup_kill",
"actor_setup_fail",
"actor_train_kill",
"actor_train_fail",
],
],
)
def test_e2e(ray_start_4_cpus, resource_manager_cls, errors):
actor_manager = RayActorManager(resource_manager=resource_manager_cls())
if errors and isinstance(errors, str):
errors = [errors]
flow = TrainFlow(actor_manager=actor_manager, errors=errors)
flow.run()
results = flow.get_results()
assert results == [[i] * 4 for i in range(1, 11)], results
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
| TrainFlow |
python | apache__airflow | providers/google/tests/unit/google/cloud/triggers/test_bigquery.py | {
"start": 30746,
"end": 35872
} | class ____:
def test_table_existence_trigger_serialization(self, table_existence_trigger):
"""
Asserts that the BigQueryTableExistenceTrigger correctly serializes its arguments and classpath.
"""
classpath, kwargs = table_existence_trigger.serialize()
assert classpath == "airflow.providers.google.cloud.triggers.bigquery.BigQueryTableExistenceTrigger"
assert kwargs == {
"dataset_id": TEST_DATASET_ID,
"project_id": TEST_GCP_PROJECT_ID,
"table_id": TEST_TABLE_ID,
"gcp_conn_id": TEST_GCP_CONN_ID,
"impersonation_chain": TEST_IMPERSONATION_CHAIN,
"poll_interval": POLLING_PERIOD_SECONDS,
"hook_params": TEST_HOOK_PARAMS,
}
@pytest.mark.asyncio
@mock.patch(
"airflow.providers.google.cloud.triggers.bigquery.BigQueryTableExistenceTrigger._table_exists"
)
async def test_big_query_table_existence_trigger_success(
self, mock_table_exists, table_existence_trigger
):
"""Tests success case BigQueryTableExistenceTrigger"""
mock_table_exists.return_value = True
generator = table_existence_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "success", "message": "success"}) == actual
@pytest.mark.asyncio
@mock.patch(
"airflow.providers.google.cloud.triggers.bigquery.BigQueryTableExistenceTrigger._table_exists"
)
async def test_table_existence_trigger_pending(self, mock_table_exists, table_existence_trigger):
"""Test that BigQueryTableExistenceTrigger is in loop till the table exist."""
mock_table_exists.return_value = False
task = asyncio.create_task(table_existence_trigger.run().__anext__())
await asyncio.sleep(0.5)
# TriggerEvent was not returned
assert task.done() is False
asyncio.get_event_loop().stop()
@pytest.mark.asyncio
@mock.patch(
"airflow.providers.google.cloud.triggers.bigquery.BigQueryTableExistenceTrigger._table_exists"
)
async def test_table_existence_trigger_exception(self, mock_table_exists, table_existence_trigger):
"""Test BigQueryTableExistenceTrigger throws exception if any error."""
mock_table_exists.side_effect = AsyncMock(side_effect=Exception("Test exception"))
generator = table_existence_trigger.run()
actual = await generator.asend(None)
assert TriggerEvent({"status": "error", "message": "Test exception"}) == actual
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryTableAsyncHook.get_table_client")
async def test_table_exists(self, mock_get_table_client, table_existence_trigger):
"""Test BigQueryTableExistenceTrigger._table_exists async function with mocked value
and mocked return value"""
hook = BigQueryTableAsyncHook()
mock_get_table_client.return_value = AsyncMock(Table)
res = await table_existence_trigger._table_exists(
hook, TEST_DATASET_ID, TEST_TABLE_ID, TEST_GCP_PROJECT_ID
)
assert res is True
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryTableAsyncHook.get_table_client")
async def test_table_exists_exception(self, mock_get_table_client, table_existence_trigger):
"""Test BigQueryTableExistenceTrigger._table_exists async function with exception and return False"""
hook = BigQueryTableAsyncHook()
mock_get_table_client.side_effect = ClientResponseError(
history=(),
request_info=RequestInfo(
headers=CIMultiDict(),
real_url=URL("https://example.com"),
method="GET",
url=URL("https://example.com"),
),
status=404,
message="Not Found",
)
res = await table_existence_trigger._table_exists(
hook, TEST_DATASET_ID, TEST_TABLE_ID, TEST_GCP_PROJECT_ID
)
expected_response = False
assert res == expected_response
@pytest.mark.asyncio
@mock.patch("airflow.providers.google.cloud.hooks.bigquery.BigQueryTableAsyncHook.get_table_client")
async def test_table_exists_raise_exception(self, mock_get_table_client, table_existence_trigger):
"""Test BigQueryTableExistenceTrigger._table_exists async function with raise exception"""
hook = BigQueryTableAsyncHook()
mock_get_table_client.side_effect = ClientResponseError(
history=(),
request_info=RequestInfo(
headers=CIMultiDict(),
real_url=URL("https://example.com"),
method="GET",
url=URL("https://example.com"),
),
status=400,
message="Not Found",
)
with pytest.raises(ClientResponseError):
await table_existence_trigger._table_exists(
hook, TEST_DATASET_ID, TEST_TABLE_ID, TEST_GCP_PROJECT_ID
)
| TestBigQueryTableExistenceTrigger |
python | getsentry__sentry | tests/sentry/users/api/endpoints/test_user_avatar.py | {
"start": 432,
"end": 8536
} | class ____(APITestCase):
def test_get_letter_avatar(self) -> None:
user = self.create_user(email="a@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(user.id)
assert response.data["avatar"]["avatarType"] == "letter_avatar"
assert response.data["avatar"]["avatarUuid"] is None
def test_get_gravatar(self) -> None:
user = self.create_user(email="a@example.com")
UserAvatar.objects.create(user=user, avatar_type=UserAvatarType.GRAVATAR)
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(user.id)
assert response.data["avatar"]["avatarType"] == "gravatar"
assert response.data["avatar"]["avatarUuid"] is None
def test_get_upload_control_file(self) -> None:
user = self.create_user(email="a@example.com")
photo = ControlFile.objects.create(name="test.png", type="avatar.file")
photo.putfile(BytesIO(b"test"))
UserAvatar.objects.create(
user=user, control_file_id=photo.id, avatar_type=UserAvatarType.UPLOAD
)
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(user.id)
assert response.data["avatar"]["avatarType"] == "upload"
assert response.data["avatar"]["avatarUuid"]
def test_get_upload_file(self) -> None:
user = self.create_user(email="a@example.com")
with assume_test_silo_mode(SiloMode.REGION):
photo = File.objects.create(name="test.png", type="avatar.file")
photo.putfile(BytesIO(b"test"))
UserAvatar.objects.create(
user=user, control_file_id=photo.id, avatar_type=UserAvatarType.UPLOAD
)
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(user.id)
assert response.data["avatar"]["avatarType"] == "upload"
assert response.data["avatar"]["avatarUuid"]
def test_get_prefers_control_file(self) -> None:
user = self.create_user(email="a@example.com")
with assume_test_silo_mode(SiloMode.REGION):
photo = File.objects.create(name="test.png", type="avatar.file")
photo.putfile(BytesIO(b"test"))
controlphoto = ControlFile.objects.create(name="control_test.png", type="avatar.file")
controlphoto.putfile(BytesIO(b"control test"))
avatar = UserAvatar.objects.create(
user=user,
control_file_id=controlphoto.id,
avatar_type=UserAvatarType.UPLOAD,
)
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.get(url, format="json")
assert response.status_code == 200, response.content
assert response.data["id"] == str(user.id)
assert response.data["avatar"]["avatarType"] == "upload"
assert response.data["avatar"]["avatarUuid"]
assert isinstance(avatar.get_file(), ControlFile)
def test_put_gravatar(self) -> None:
user = self.create_user(email="a@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.put(url, data={"avatar_type": "gravatar"}, format="json")
avatar = UserAvatar.objects.get(user=user)
assert response.status_code == 200, response.content
assert avatar.get_avatar_type_display() == "gravatar"
def test_put_upload(self) -> None:
user = self.create_user(email="a@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.put(
url,
data={
"avatar_type": "upload",
"avatar_photo": b64encode(self.load_fixture("avatar.jpg")),
},
format="json",
)
avatar = UserAvatar.objects.get(user=user)
assert response.status_code == 200, response.content
assert avatar.get_avatar_type_display() == "upload"
assert avatar.get_file_id()
assert avatar.control_file_id, "new files are control files"
assert ControlFile.objects.filter(id=avatar.control_file_id).exists()
def test_put_upload_saves_to_control_file(self) -> None:
user = self.create_user(email="a@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
with self.tasks():
response = self.client.put(
url,
data={
"avatar_type": "upload",
"avatar_photo": b64encode(self.load_fixture("avatar.jpg")),
},
format="json",
)
avatar = UserAvatar.objects.get(user=user)
assert response.status_code == 200, response.content
assert avatar.get_file_id()
assert avatar.control_file_id
assert isinstance(avatar.get_file(), ControlFile)
assert ControlFile.objects.filter(id=avatar.control_file_id).exists()
def test_put_upload_saves_to_control_file_with_separate_storage(self) -> None:
with self.options(
{
"filestore.control.backend": options_store.get("filestore.backend"),
"filestore.control.options": options_store.get("filestore.options"),
}
):
user = self.create_user(email="a@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
with self.tasks():
response = self.client.put(
url,
data={
"avatar_type": "upload",
"avatar_photo": b64encode(self.load_fixture("avatar.jpg")),
},
format="json",
)
avatar = UserAvatar.objects.get(user=user)
assert response.status_code == 200, response.content
assert avatar.get_file_id()
assert isinstance(avatar.get_file(), ControlFile)
assert ControlFile.objects.filter(id=avatar.control_file_id).exists()
def test_put_bad(self) -> None:
user = self.create_user(email="a@example.com")
UserAvatar.objects.create(user=user)
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": "me"})
response = self.client.put(url, data={"avatar_type": "upload"}, format="json")
avatar = UserAvatar.objects.get(user=user)
assert response.status_code == 400
assert avatar.get_avatar_type_display() == "letter_avatar"
response = self.client.put(url, data={"avatar_type": "foo"}, format="json")
assert response.status_code == 400
assert avatar.get_avatar_type_display() == "letter_avatar"
def test_put_forbidden(self) -> None:
user = self.create_user(email="a@example.com")
user2 = self.create_user(email="b@example.com")
self.login_as(user=user)
url = reverse("sentry-api-0-user-avatar", kwargs={"user_id": user2.id})
response = self.client.put(url, data={"avatar_type": "gravatar"}, format="json")
assert response.status_code == 403
| UserAvatarTest |
python | django__django | tests/forms_tests/tests/test_formsets.py | {
"start": 1011,
"end": 1067
} | class ____(Form):
name = CharField()
| FavoriteDrinkForm |
python | dagster-io__dagster | python_modules/dagster/dagster/components/core/defs_module.py | {
"start": 14061,
"end": 19104
} | class ____(Component):
"""A Python module containing Dagster definitions or Pythonic
components. Used for implicit loading of Dagster definitions from
Python files in the defs folder.
"""
path: Path
components: Mapping[str, Component]
def build_defs(self, context: ComponentLoadContext) -> Definitions:
from dagster.components.core.decl import PythonFileDecl
module = context.load_defs_relative_python_module(self.path)
def_objects = check.is_list(
list(find_objects_in_module_of_types(module, Definitions)), Definitions
)
lazy_def_objects = check.is_list(
list(find_objects_in_module_of_types(module, LazyDefinitions)), LazyDefinitions
)
if lazy_def_objects and def_objects:
raise DagsterInvalidDefinitionError(
f"Found both @definitions-decorated functions and Definitions objects in {self.path}. "
"At most one may be specified per module."
)
if len(def_objects) == 1:
return next(iter(def_objects))
if len(def_objects) > 1:
raise DagsterInvalidDefinitionError(
f"Found multiple Definitions objects in {self.path}. At most one Definitions object "
"may be specified per module."
)
if len(lazy_def_objects) == 1:
lazy_def = next(iter(lazy_def_objects))
return lazy_def(context)
if len(lazy_def_objects) > 1:
return Definitions.merge(*[lazy_def(context) for lazy_def in lazy_def_objects])
decl = check.inst(context.component_decl, PythonFileDecl)
return Definitions.merge(
*[
context.build_defs_at_path(child_decl.path).with_definition_metadata_update(
lambda metadata: _add_defs_py_metadata(
component=self.components[attr],
metadata=metadata,
)
)
for attr, child_decl in decl.decls.items()
],
load_definitions_from_module(module),
)
def invoke_inline_template_var(context: ComponentDeclLoadContext, tv: Callable) -> Any:
sig = inspect.signature(tv)
if len(sig.parameters) == 1:
return tv(context)
elif len(sig.parameters) == 0:
return tv()
else:
raise ValueError(f"Template var must have 0 or 1 parameters, got {len(sig.parameters)}")
def load_yaml_component_from_path(context: ComponentLoadContext, component_def_path: Path):
from dagster.components.core.decl import build_component_decl_from_yaml_file
decl = build_component_decl_from_yaml_file(context, component_def_path)
return context.load_structural_component_at_path(decl.path)
# When we remove component.yaml, we can remove this function for just a defs.yaml check
def find_defs_or_component_yaml(path: Path) -> Optional[Path]:
# Check for defs.yaml has precedence, component.yaml is deprecated
return next(
(p for p in (path / "defs.yaml", path / "component.yaml") if p.exists()),
None,
)
T = TypeVar("T", bound=ComponentDeclLoadContext)
def context_with_injected_scope(
context: T,
component_cls: type[Component],
template_vars_module: Optional[str],
) -> T:
# Merge backward-compatible get_additional_scope with context-aware static template vars
legacy_scope = component_cls.get_additional_scope()
context_aware_scope = get_context_aware_static_template_vars(component_cls, context)
# Merge scopes, with context-aware taking precedence
merged_scope = {**legacy_scope, **context_aware_scope}
context = context.with_rendering_scope(merged_scope)
if not template_vars_module:
return context
absolute_template_vars_module = (
f"{context.defs_relative_module_name(context.path)}{template_vars_module}"
if template_vars_module.startswith(".")
else template_vars_module
)
module = importlib.import_module(absolute_template_vars_module)
template_var_fns = find_inline_template_vars_in_module(module)
if not template_var_fns:
raise DagsterInvalidDefinitionError(
f"No template vars found in module {absolute_template_vars_module}"
)
return context.with_rendering_scope(
{
**{
name: invoke_inline_template_var(context, tv)
for name, tv in template_var_fns.items()
},
},
)
def asset_post_processor_list_from_post_processing_dict(
resolution_context: ResolutionContext, post_processing: Optional[Mapping[str, Any]]
) -> list[AssetPostProcessor]:
if not post_processing:
return []
post_processing_model = ComponentPostProcessingModel.resolve_from_model(
context=resolution_context,
model=TypeAdapter(ComponentPostProcessingModel.model()).validate_python(post_processing),
)
return list(post_processing_model.assets or [])
| PythonFileComponent |
python | python__mypy | mypy/traverser.py | {
"start": 30070,
"end": 30603
} | class ____(FuncCollectorBase):
def __init__(self) -> None:
super().__init__()
self.in_assignment = False
self.yield_from_expressions: list[tuple[YieldFromExpr, bool]] = []
def visit_assignment_stmt(self, stmt: AssignmentStmt) -> None:
self.in_assignment = True
super().visit_assignment_stmt(stmt)
self.in_assignment = False
def visit_yield_from_expr(self, expr: YieldFromExpr) -> None:
self.yield_from_expressions.append((expr, self.in_assignment))
| YieldFromCollector |
python | facebook__pyre-check | tools/upgrade/commands/tests/strict_default_test.py | {
"start": 606,
"end": 7949
} | class ____(unittest.TestCase):
@patch.object(Configuration, "get_source_paths")
@patch.object(strict_default, "remove_local_mode")
@patch.object(strict_default, "_get_configuration_path", return_value=Path("."))
@patch.object(Configuration, "get_directory")
@patch.object(Configuration, "write")
@patch.object(Configuration, "add_strict")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
def test_run_strict_default(
self,
get_and_suppress_errors,
add_strict,
configuration_write,
get_directory,
get_configuration_path,
remove_local_mode,
get_source_paths,
) -> None:
arguments = MagicMock()
arguments.local_configuration = Path("local")
arguments.fixme_threshold = 1
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
get_and_suppress_errors.reset_mock()
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
# Exceeding error threshold
get_and_suppress_errors.reset_mock()
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
get_and_suppress_errors.assert_called_once()
@patch.object(Configuration, "get_source_paths")
@patch.object(strict_default, "remove_local_mode")
@patch.object(strict_default, "_get_configuration_path", return_value=Path("."))
@patch.object(Configuration, "get_directory")
@patch.object(Configuration, "write")
@patch.object(Configuration, "add_strict")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
def test_remove_strict_headers(
self,
get_and_suppress_errors,
add_strict,
configuration_write,
get_directory,
get_configuration_path,
remove_local_mode,
get_source_paths,
) -> None:
arguments = MagicMock()
arguments.local_configuration = Path("local")
arguments.fixme_threshold = 1
configuration_contents = '{"targets":[]}'
get_source_paths.return_value = {Path("foo.py")}
# Remove strict headers only
arguments.remove_strict_headers = True
arguments.remove_unsafe_headers = False
configuration_contents = '{"source_directories":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
remove_local_mode.assert_called_once_with(
Path("foo.py"), [LocalMode.STRICT]
)
@patch.object(Configuration, "get_source_paths")
@patch.object(strict_default, "remove_local_mode")
@patch.object(strict_default, "_get_configuration_path", return_value=Path("."))
@patch.object(Configuration, "get_directory")
@patch.object(Configuration, "write")
@patch.object(Configuration, "add_strict")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
def test_remove_unsafe_headers(
self,
get_and_suppress_errors,
add_strict,
configuration_write,
get_directory,
get_configuration_path,
remove_local_mode,
get_source_paths,
) -> None:
arguments = MagicMock()
arguments.local_configuration = Path("local")
arguments.fixme_threshold = 1
configuration_contents = '{"targets":[]}'
get_source_paths.return_value = {Path("foo.py")}
# Remove unsafe headers only
remove_local_mode.reset_mock()
arguments.remove_strict_headers = False
arguments.remove_unsafe_headers = True
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
remove_local_mode.assert_called_once_with(
Path("foo.py"), [LocalMode.UNSAFE]
)
@patch.object(Configuration, "get_source_paths")
@patch.object(strict_default, "remove_local_mode")
@patch.object(strict_default, "_get_configuration_path", return_value=Path("."))
@patch.object(Configuration, "get_directory")
@patch.object(Configuration, "write")
@patch.object(Configuration, "add_strict")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
def test_remove_strict_and_unsafe_headers(
self,
get_and_suppress_errors,
add_strict,
configuration_write,
get_directory,
get_configuration_path,
remove_local_mode,
get_source_paths,
) -> None:
arguments = MagicMock()
arguments.local_configuration = Path("local")
arguments.fixme_threshold = 1
configuration_contents = '{"targets":[]}'
# Remove unsafe and strict headers
arguments.remove_strict_headers = True
configuration_contents = '{"targets":[]}'
get_source_paths.return_value = {Path("foo.py")}
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
remove_local_mode.assert_called_once_with(
Path("foo.py"), [LocalMode.STRICT, LocalMode.UNSAFE]
)
@patch.object(Configuration, "get_source_paths")
@patch.object(strict_default, "remove_local_mode")
@patch.object(strict_default, "_get_configuration_path", return_value=Path("."))
@patch.object(Configuration, "get_directory")
@patch.object(Configuration, "write")
@patch.object(Configuration, "add_strict")
@patch.object(ErrorSuppressingCommand, "_get_and_suppress_errors")
def test_dont_remove_strict_or_unsafe_headers(
self,
get_and_suppress_errors,
add_strict,
configuration_write,
get_directory,
get_configuration_path,
remove_local_mode,
get_source_paths,
) -> None:
arguments = MagicMock()
arguments.local_configuration = Path("local")
arguments.fixme_threshold = 1
configuration_contents = '{"targets":[]}'
get_source_paths.return_value = {Path("foo.py")}
# Don't remove unsafe or strict headers
arguments.remove_strict_headers = False
arguments.remove_unsafe_headers = False
configuration_contents = '{"targets":[]}'
with patch("builtins.open", mock_open(read_data=configuration_contents)):
StrictDefault.from_arguments(arguments, repository).run()
remove_local_mode.assert_not_called()
def _ensure_files_exist(root: Path, relatives: Iterable[str]) -> None:
for relative in relatives:
full_path = root / relative
full_path.parent.mkdir(parents=True, exist_ok=True)
full_path.touch(exist_ok=True)
| StrictDefaultTest |
python | realpython__materials | document-python-code-with-chatgpt/circle.py | {
"start": 268,
"end": 1133
} | class ____:
"""
A Circle class to perform some circle-related operations.
Parameters
----------
radius : float
The radius of the circle.
Attributes
----------
radius : float
The radius of the circle.
Methods
-------
calculate_area():
Calculates the area of the circle.
"""
def __init__(self, radius):
"""
Constructs all the necessary attributes for the circle object.
Parameters
----------
radius : float
The radius of the circle.
"""
self.radius = radius
def calculate_area(self):
"""
Calculate the area of the circle.
Returns
-------
float
The area of the circle rounded to 2 decimal places.
"""
return round(math.pi * self.radius**2, 2)
| Circle |
python | openai__openai-python | src/openai/types/responses/response.py | {
"start": 1552,
"end": 1642
} | class ____(BaseModel):
id: str
"""The unique ID of the conversation."""
| Conversation |
python | pytorch__pytorch | torch/distributed/optim/optimizer.py | {
"start": 1016,
"end": 2042
} | class ____(nn.Module):
# TorchScript does not support multithread concurrent compiling.
# request_callback might invoke concurrent compiling, so we
# serialize the compiling with a lock
compile_lock = Lock()
def __init__(self, optim_cls, local_params_rref, *args, **kwargs):
super().__init__()
self._local_params = [rref.local_value() for rref in local_params_rref]
self.optim = optim_cls(self._local_params, *args, **kwargs)
@jit.export
def step(self, autograd_ctx_id: int):
all_local_grads = dist_autograd.get_gradients(autograd_ctx_id)
# apply functional optimizer step with a list of gradients
grads: list[Tensor | None] = [
all_local_grads[p] if p in all_local_grads else None # noqa: SIM401
for p in self._local_params
]
self.optim.step(grads)
# TODO (wanchaol): remove/merge this with ScriptLocalOptimizer once
# we have converted all to functional optimizer in distributed.optim
| _ScriptLocalOptimizer |
python | walkccc__LeetCode | solutions/628. Maximum Product of Three Numbers/628.py | {
"start": 0,
"end": 176
} | class ____:
def maximumProduct(self, nums: list[int]) -> int:
nums.sort()
return max(nums[-1] * nums[0] * nums[1],
nums[-1] * nums[-2] * nums[-3])
| Solution |
python | ray-project__ray | python/ray/_private/thirdparty/pynvml/pynvml.py | {
"start": 106899,
"end": 107619
} | class ____(Structure):
_fields_ = [('isCecAttestationReportPresent', c_uint), # output
('attestationReportSize', c_uint), # output
('cecAttestationReportSize', c_uint), # output
('nonce', c_uint8 * NVML_CC_GPU_CEC_NONCE_SIZE), # input: spdm supports 32 bytes on nonce
('attestationReport', c_uint8 * NVML_CC_GPU_ATTESTATION_REPORT_SIZE), # output
('cecAttestationReport', c_uint8 * NVML_CC_GPU_CEC_ATTESTATION_REPORT_SIZE), # output
]
| c_nvmlConfComputeGpuAttestationReport_t |
python | allegroai__clearml | clearml/backend_api/services/v2_23/tasks.py | {
"start": 307318,
"end": 313016
} | class ____(Request):
"""
Add or update task hyper parameters
:param task: Task ID
:type task: str
:param hyperparams: Task hyper parameters. The new ones will be added and the
already existing ones will be updated
:type hyperparams: Sequence[ParamsItem]
:param replace_hyperparams: Can be set to one of the following: 'all' - all the
hyper parameters will be replaced with the provided ones 'section' - the
sections that present in the new parameters will be replaced with the provided
parameters 'none' (the default value) - only the specific parameters will be
updated or added
:type replace_hyperparams: ReplaceHyperparamsEnum
:param force: If set to True then both new and running task hyper params can be
edited. Otherwise only the new task ones. Default is False
:type force: bool
"""
_service = "tasks"
_action = "edit_hyper_params"
_version = "2.23"
_schema = {
"definitions": {
"params_item": {
"properties": {
"description": {
"description": "The parameter description. Optional",
"type": ["string", "null"],
},
"name": {
"description": "Name of the parameter. The combination of section and name should be unique",
"type": ["string", "null"],
},
"section": {
"description": "Section that the parameter belongs to",
"type": ["string", "null"],
},
"type": {
"description": "Type of the parameter. Optional",
"type": ["string", "null"],
},
"value": {
"description": "Value of the parameter",
"type": ["string", "null"],
},
},
"type": "object",
},
"replace_hyperparams_enum": {
"enum": ["none", "section", "all"],
"type": "string",
},
},
"properties": {
"force": {
"description": (
"If set to True then both new and running task hyper params can be edited. Otherwise only the new"
" task ones. Default is False"
),
"type": "boolean",
},
"hyperparams": {
"description": (
"Task hyper parameters. The new ones will be added and the already existing ones will be updated"
),
"items": {"$ref": "#/definitions/params_item"},
"type": "array",
},
"replace_hyperparams": {
"$ref": "#/definitions/replace_hyperparams_enum",
"description": (
"Can be set to one of the following:\n 'all' - all the hyper parameters will be"
" replaced with the provided ones\n 'section' - the sections that present in"
" the new parameters will be replaced with the provided parameters\n 'none'"
" (the default value) - only the specific parameters will be updated or added"
),
},
"task": {"description": "Task ID", "type": "string"},
},
"required": ["task", "hyperparams"],
"type": "object",
}
def __init__(
self, task, hyperparams, replace_hyperparams=None, force=None, **kwargs
):
super(EditHyperParamsRequest, self).__init__(**kwargs)
self.task = task
self.hyperparams = hyperparams
self.replace_hyperparams = replace_hyperparams
self.force = force
@schema_property("task")
def task(self):
return self._property_task
@task.setter
def task(self, value):
if value is None:
self._property_task = None
return
self.assert_isinstance(value, "task", six.string_types)
self._property_task = value
@schema_property("hyperparams")
def hyperparams(self):
return self._property_hyperparams
@hyperparams.setter
def hyperparams(self, value):
if value is None:
self._property_hyperparams = None
return
self.assert_isinstance(value, "hyperparams", (dict, ParamsItem), is_array=True)
value = [(ParamsItem(**v) if isinstance(v, dict) else v) for v in value]
self._property_hyperparams = value
@schema_property("replace_hyperparams")
def replace_hyperparams(self):
return self._property_replace_hyperparams
@replace_hyperparams.setter
def replace_hyperparams(self, value):
if value is None:
self._property_replace_hyperparams = None
return
if isinstance(value, six.string_types):
try:
value = ReplaceHyperparamsEnum(value)
except ValueError:
pass
else:
self.assert_isinstance(value, "replace_hyperparams", enum.Enum)
self._property_replace_hyperparams = value
@schema_property("force")
def force(self):
return self._property_force
@force.setter
def force(self, value):
if value is None:
self._property_force = None
return
self.assert_isinstance(value, "force", (bool,))
self._property_force = value
| EditHyperParamsRequest |
python | kamyu104__LeetCode-Solutions | Python/recover-a-tree-from-preorder-traversal.py | {
"start": 1041,
"end": 1742
} | class ____(object):
def recoverFromPreorder(self, S):
"""
:type S: str
:rtype: TreeNode
"""
def recoverFromPreorderHelper(S, level, i):
j = i[0]
while j < len(S) and S[j] == '-':
j += 1
if level != j - i[0]:
return None
i[0] = j
while j < len(S) and S[j] != '-':
j += 1
node = TreeNode(int(S[i[0]:j]))
i[0] = j
node.left = recoverFromPreorderHelper(S, level+1, i)
node.right = recoverFromPreorderHelper(S, level+1, i)
return node
return recoverFromPreorderHelper(S, 0, [0])
| Solution2 |
python | scipy__scipy | scipy/sparse/linalg/_dsolve/tests/test_linsolve.py | {
"start": 6814,
"end": 16639
} | class ____:
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_array((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Matrix is exactly singular", MatrixRankWarning)
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_array((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtime error or return value
# appropriate for singular input (which yields the warning)
with warnings.catch_warnings():
warnings.filterwarnings(
"ignore", "Matrix is exactly singular", MatrixRankWarning)
x = spsolve(A, b)
assert not np.isfinite(x).any()
except RuntimeError:
pass
@pytest.mark.parametrize('format', ['csc', 'csr'])
@pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
def test_twodiags(self, format: str, idx_dtype: np.dtype):
A = dia_array(([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1]),
shape=(5, 5)).asformat(format)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
Asp = A.astype(t)
Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
x = spsolve(Asp, b)
assert_(norm(b - Asp@x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_array(Adense)
rng = np.random.default_rng(1234)
x = rng.standard_normal(3)
b = As@x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_array(Adense)
rng = np.random.default_rng(1234)
x = rng.standard_normal((3, 4))
Bdense = As.dot(x)
Bs = csc_array(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.toarray())
def test_non_square(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_array(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
def test_example_comparison(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_array((data,(row,col)), shape=(3,3), dtype=float)
M = sM.toarray()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_array((data, (row,col)), shape=(3,3), dtype=float)
N = sN.toarray()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.toarray())
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
use_solver(useUmfpack=True)
A = csc_array([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_array([[1], [6]]),
csr_array([[1], [6]]),
dok_array([[1], [6]]),
bsr_array([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_array([[1., 2., 3.], [6., 8., 10.]]),
csr_array([[1., 2., 3.], [6., 8., 10.]]),
dok_array([[1., 2., 3.], [6., 8., 10.]]),
bsr_array([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_array, csr_array, dok_array, lil_array]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x,
err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x,
err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if issparse(b) and x.ndim > 1:
assert_(issparse(x1), repr((b, spmattype, 1)))
assert_(issparse(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_array((3, 3))
b = csc_array((1, 3))
assert_raises(ValueError, spsolve, A, b)
def test_ndarray_support(self):
with warnings.catch_warnings():
warnings.simplefilter('ignore', SparseEfficiencyWarning)
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = dia_array(((d, 2*d, d[::-1]), (-3, 0, 5)), shape=(N, N))
for container in (csc_array, csr_array):
A = container(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = f"{container!r} {badop!r}"
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(A.format == 'csc'), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(A.format == 'csc'), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(A.format == 'csc'), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_array([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_array([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.toarray(), b.toarray(), atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_array([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_array([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
x = spsolve(A, b)
assert_array_almost_equal(A @ x, b)
| TestLinsolve |
python | PrefectHQ__prefect | src/prefect/infrastructure/provisioners/coiled.py | {
"start": 742,
"end": 8971
} | class ____:
"""
A infrastructure provisioner for Coiled push work pools.
"""
def __init__(self, client: Optional["PrefectClient"] = None):
self._console = Console()
@property
def console(self) -> Console:
return self._console
@console.setter
def console(self, value: Console) -> None:
self._console = value
@staticmethod
def _is_coiled_installed() -> bool:
"""
Checks if the coiled package is installed.
Returns:
True if the coiled package is installed, False otherwise
"""
try:
importlib.import_module("coiled")
return True
except ModuleNotFoundError:
return False
async def _install_coiled(self):
"""
Installs the coiled package.
"""
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Installing coiled..."),
transient=True,
console=self.console,
) as progress:
task = progress.add_task("coiled install")
progress.start()
global coiled
await ainstall_packages(["coiled"])
coiled = importlib.import_module("coiled")
progress.advance(task)
async def _get_coiled_token(self) -> str:
"""
Gets a Coiled API token from the current Coiled configuration.
"""
import dask.config
return dask.config.get("coiled.token", "")
async def _create_new_coiled_token(self):
"""
Triggers a Coiled login via the browser if no current token. Will create a new token.
"""
await run_process(["coiled", "login"])
async def _create_coiled_credentials_block(
self,
block_document_name: str,
coiled_token: str,
client: "PrefectClient",
) -> BlockDocument:
"""
Creates a CoiledCredentials block containing the provided token.
Args:
block_document_name: The name of the block document to create
coiled_token: The Coiled API token
Returns:
The ID of the created block
"""
assert client is not None, "client injection failed"
try:
credentials_block_type = await client.read_block_type_by_slug(
"coiled-credentials"
)
except ObjectNotFound:
# Shouldn't happen, but just in case
raise RuntimeError(
"Unable to find CoiledCredentials block type. Please ensure you are"
" using Prefect Cloud."
)
credentials_block_schema = (
await client.get_most_recent_block_schema_for_block_type(
block_type_id=credentials_block_type.id
)
)
assert credentials_block_schema is not None, (
f"Unable to find schema for block type {credentials_block_type.slug}"
)
block_doc = await client.create_block_document(
block_document=BlockDocumentCreate(
name=block_document_name,
data={
"api_token": coiled_token,
},
block_type_id=credentials_block_type.id,
block_schema_id=credentials_block_schema.id,
)
)
return block_doc
@inject_client
async def provision(
self,
work_pool_name: str,
base_job_template: Dict[str, Any],
client: Optional["PrefectClient"] = None,
) -> Dict[str, Any]:
"""
Provisions resources necessary for a Coiled push work pool.
Provisioned resources:
- A CoiledCredentials block containing a Coiled API token
Args:
work_pool_name: The name of the work pool to provision resources for
base_job_template: The base job template to update
Returns:
A copy of the provided base job template with the provisioned resources
"""
credentials_block_name = f"{work_pool_name}-coiled-credentials"
base_job_template_copy = deepcopy(base_job_template)
assert client is not None, "client injection failed"
try:
block_doc = await client.read_block_document_by_name(
credentials_block_name, "coiled-credentials"
)
self.console.print(
f"Work pool [blue]{work_pool_name!r}[/] will reuse the existing Coiled"
f" credentials block [blue]{credentials_block_name!r}[/blue]"
)
except ObjectNotFound:
if self._console.is_interactive and not Confirm.ask(
(
"\n"
"To configure your Coiled push work pool we'll need to store a Coiled"
" API token with Prefect Cloud as a block. We'll pull the token from"
" your local Coiled configuration or create a new token if we"
" can't find one.\n"
"\n"
"Would you like to continue?"
),
console=self.console,
default=True,
):
self.console.print(
"No problem! You can always configure your Coiled push work pool"
" later via the Prefect UI."
)
return base_job_template
if not self._is_coiled_installed():
if self.console.is_interactive and Confirm.ask(
(
"The [blue]coiled[/] package is required to configure"
" authentication for your work pool.\n"
"\n"
"Would you like to install it now?"
),
console=self.console,
default=True,
):
await self._install_coiled()
if not self._is_coiled_installed():
raise RuntimeError(
"The coiled package is not installed.\n\nPlease try installing coiled,"
" or you can use the Prefect UI to create your Coiled push work pool."
)
# Get the current Coiled API token
coiled_api_token = await self._get_coiled_token()
if not coiled_api_token:
# Create a new token one wasn't found
if self.console.is_interactive and Confirm.ask(
"Coiled credentials not found. Would you like to create a new token?",
console=self.console,
default=True,
):
await self._create_new_coiled_token()
coiled_api_token = await self._get_coiled_token()
else:
raise RuntimeError(
"Coiled credentials not found. Please create a new token by"
" running [blue]coiled login[/] and try again."
)
# Create the credentials block
with Progress(
SpinnerColumn(),
TextColumn("[bold blue]Saving Coiled credentials..."),
transient=True,
console=self.console,
) as progress:
task = progress.add_task("create coiled credentials block")
progress.start()
block_doc = await self._create_coiled_credentials_block(
credentials_block_name,
coiled_api_token,
client=client,
)
progress.advance(task)
base_job_template_copy["variables"]["properties"]["credentials"]["default"] = {
"$ref": {"block_document_id": str(block_doc.id)}
}
if "image" in base_job_template_copy["variables"]["properties"]:
base_job_template_copy["variables"]["properties"]["image"]["default"] = ""
self.console.print(
f"Successfully configured Coiled push work pool {work_pool_name!r}!",
style="green",
)
return base_job_template_copy
| CoiledPushProvisioner |
python | fabric__fabric | fabric/exceptions.py | {
"start": 562,
"end": 698
} | class ____(Exception):
"""
Raised when attempting to import a Fabric 1 ``env`` which is missing data.
"""
pass
| InvalidV1Env |
python | kubernetes-client__python | kubernetes/client/models/v1_pod_failure_policy.py | {
"start": 383,
"end": 4278
} | class ____(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'rules': 'list[V1PodFailurePolicyRule]'
}
attribute_map = {
'rules': 'rules'
}
def __init__(self, rules=None, local_vars_configuration=None): # noqa: E501
"""V1PodFailurePolicy - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._rules = None
self.discriminator = None
self.rules = rules
@property
def rules(self):
"""Gets the rules of this V1PodFailurePolicy. # noqa: E501
A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. # noqa: E501
:return: The rules of this V1PodFailurePolicy. # noqa: E501
:rtype: list[V1PodFailurePolicyRule]
"""
return self._rules
@rules.setter
def rules(self, rules):
"""Sets the rules of this V1PodFailurePolicy.
A list of pod failure policy rules. The rules are evaluated in order. Once a rule matches a Pod failure, the remaining of the rules are ignored. When no rule matches the Pod failure, the default handling applies - the counter of pod failures is incremented and it is checked against the backoffLimit. At most 20 elements are allowed. # noqa: E501
:param rules: The rules of this V1PodFailurePolicy. # noqa: E501
:type: list[V1PodFailurePolicyRule]
"""
if self.local_vars_configuration.client_side_validation and rules is None: # noqa: E501
raise ValueError("Invalid value for `rules`, must not be `None`") # noqa: E501
self._rules = rules
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PodFailurePolicy):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PodFailurePolicy):
return True
return self.to_dict() != other.to_dict()
| V1PodFailurePolicy |
python | has2k1__plotnine | plotnine/scales/scale_color.py | {
"start": 6147,
"end": 6919
} | class ____(_scale_color_continuous):
"""
Create a desaturated color gradient
See Also
--------
mizani.palettes.desaturate_pal : The palette class that generates
the desaturated colours.
"""
color: InitVar[str] = "red"
"""
Color to desaturate
"""
prop: InitVar[float] = 0
"""
Saturation channel of color will be multiplied by this value.
"""
reverse: InitVar[bool] = False
"""
Whether to go from color to desaturated color or desaturated color
to color.
"""
def __post_init__(self, color, prop, reverse):
from mizani.palettes import desaturate_pal
super().__post_init__()
self.palette = desaturate_pal(color, prop, reverse)
@dataclass
| scale_color_desaturate |
python | google__pytype | pytype/abstract/_instances.py | {
"start": 2660,
"end": 5842
} | class ____(_instance_base.Instance, mixin.LazyMembers, types.Module):
"""Represents an (imported) module."""
def __init__(
self,
ctx: "context.Context",
name: str,
member_map: dict[str, cfg.Variable],
ast: pytd.TypeDeclUnit,
) -> None:
super().__init__(ctx.convert.module_type, ctx)
self.name = name
self.ast = ast
mixin.LazyMembers.init_mixin(self, member_map)
def _convert_member(self, name: str, member, subst=None):
"""Called to convert the items in _member_map to cfg.Variable."""
if isinstance(member, pytd.Alias) and isinstance(member.type, pytd.Module):
module = self.ctx.vm.import_module(
member.type.module_name, member.type.module_name, 0
)
if not module:
raise abstract_utils.ModuleLoadError()
return module.to_variable(self.ctx.root_node)
var = self.ctx.convert.constant_to_var(member)
for value in var.data:
# Only do this if this is a class which isn't already part of a module, or
# is a module itself.
# (This happens if e.g. foo.py does "from bar import x" and we then
# do "from foo import x".)
if not value.module and not isinstance(value, Module):
value.module = self.name
return var
@property
def module(self) -> None:
return None
@module.setter
def module(self, m: str) -> None:
assert m is None or m == self.ast.name, (m, self.ast.name)
@property
def full_name(self) -> str:
return self.ast.name
def has_getattr(self) -> bool:
"""Does this module have a module-level __getattr__?
We allow __getattr__ on the module level to specify that this module doesn't
have any contents. The typical syntax is
def __getattr__(name) -> Any
.
See https://www.python.org/dev/peps/pep-0484/#stub-files
Returns:
True if we have __getattr__.
"""
f = self._member_map.get("__getattr__")
if f:
if isinstance(f, pytd.Function):
if len(f.signatures) != 1:
log.warning("overloaded module-level __getattr__ (in %s)", self.name)
elif f.signatures[0].return_type != pytd.AnythingType():
log.warning(
"module-level __getattr__ doesn't return Any (in %s)", self.name
)
return True
else:
log.warning("__getattr__ in %s is not a function", self.name)
return False
def get_submodule(self, node: cfg.CFGNode, name: str) -> cfg.Variable | None:
full_name = self.name + "." + name
mod = self.ctx.vm.import_module(
full_name, full_name, 0
) # 0: absolute import
if mod is not None:
return mod.to_variable(node)
elif self.has_getattr():
return self.ctx.new_unsolvable(node)
else:
log.warning("Couldn't find attribute / module %r", full_name)
return None
def items(self) -> list[tuple[str, cfg.Variable]]:
for name in self._member_map:
self.load_lazy_attribute(name)
return list(self.members.items())
def get_fullhash(self, seen: set[int] | None = None) -> int:
"""Hash the set of member names."""
return hash((type(self), self.full_name) + tuple(sorted(self._member_map)))
| Module |
python | getsentry__sentry | src/sentry/hybridcloud/services/tombstone/service.py | {
"start": 1363,
"end": 1976
} | class ____(RpcService):
key = "region_tombstone"
local_mode = SiloMode.REGION
@classmethod
def get_local_implementation(cls) -> RpcService:
from .impl import DatabaseBackedRegionTombstoneService
return DatabaseBackedRegionTombstoneService()
@regional_rpc_method(resolve=ByRegionName())
@abstractmethod
def record_remote_tombstone(self, *, region_name: str, tombstone: RpcTombstone) -> None:
pass
region_tombstone_service = RegionTombstoneService.create_delegation()
control_tombstone_service = ControlTombstoneService.create_delegation()
| RegionTombstoneService |
python | getsentry__sentry | tests/sentry/api/serializers/test_incident_activity.py | {
"start": 402,
"end": 2236
} | class ____(TestCase, SnubaTestCase):
def test_simple(self) -> None:
activity = create_incident_activity(
incident=self.create_incident(),
activity_type=IncidentActivityType.CREATED,
)
result = serialize(activity)
assert result["id"] == str(activity.id)
assert result["incidentIdentifier"] == str(activity.incident.identifier)
assert result["type"] == activity.type
assert result["value"] is None
assert result["previousValue"] is None
assert result["dateCreated"] == activity.date_added
def test_event_stats(self) -> None:
now = datetime.now()
with freeze_time((now - timedelta(days=1)).replace(hour=12, minute=30, second=25)):
for _ in range(2):
self.store_event(
data={
"event_id": uuid4().hex,
"fingerprint": ["group1"],
"timestamp": before_now(seconds=1).isoformat(),
},
project_id=self.project.id,
)
incident = self.create_incident(
date_started=timezone.now() - timedelta(hours=2), projects=[self.project], query=""
)
activity = create_incident_activity(
incident=incident,
activity_type=IncidentActivityType.CREATED,
)
result = serialize(activity)
assert result["id"] == str(activity.id)
assert result["incidentIdentifier"] == str(activity.incident.identifier)
assert result["type"] == activity.type
assert result["value"] is None
assert result["previousValue"] is None
assert result["dateCreated"] == activity.date_added
| IncidentActivitySerializerTest |
python | walkccc__LeetCode | solutions/2115. Find All Possible Recipes from Given Supplies/2115.py | {
"start": 0,
"end": 823
} | class ____:
def findAllRecipes(
self,
recipes: list[str],
ingredients: list[list[str]],
supplies: list[str],
) -> list[str]:
ans = []
supplies = set(supplies)
graph = collections.defaultdict(list)
inDegrees = collections.Counter()
q = collections.deque()
# Build the graph.
for i, recipe in enumerate(recipes):
for ingredient in ingredients[i]:
if ingredient not in supplies:
graph[ingredient].append(recipe)
inDegrees[recipe] += 1
# Perform topological sorting.
for recipe in recipes:
if inDegrees[recipe] == 0:
q.append(recipe)
while q:
u = q.popleft()
ans.append(u)
for v in graph[u]:
inDegrees[v] -= 1
if inDegrees[v] == 0:
q.append(v)
return ans
| Solution |
python | pydantic__pydantic | pydantic/warnings.py | {
"start": 4154,
"end": 4262
} | class ____(UserWarning):
"""A warning raised during core schema generation."""
| CoreSchemaGenerationWarning |
python | networkx__networkx | networkx/algorithms/isomorphism/isomorphvf2.py | {
"start": 39801,
"end": 43048
} | class ____:
"""Internal representation of state for the GraphMatcher class.
This class is used internally by the GraphMatcher class. It is used
only to store state specific data. There will be at most G2.order() of
these objects in memory at a time, due to the depth-first search
strategy employed by the VF2 algorithm.
"""
def __init__(self, GM, G1_node=None, G2_node=None):
"""Initializes GMState object.
Pass in the GraphMatcher to which this GMState belongs and the
new node pair that will be added to the GraphMatcher's current
isomorphism mapping.
"""
self.GM = GM
# Initialize the last stored node pair.
self.G1_node = None
self.G2_node = None
self.depth = len(GM.core_1)
if G1_node is None or G2_node is None:
# Then we reset the class variables
GM.core_1 = {}
GM.core_2 = {}
GM.inout_1 = {}
GM.inout_2 = {}
# Watch out! G1_node == 0 should evaluate to True.
if G1_node is not None and G2_node is not None:
# Add the node pair to the isomorphism mapping.
GM.core_1[G1_node] = G2_node
GM.core_2[G2_node] = G1_node
# Store the node that was added last.
self.G1_node = G1_node
self.G2_node = G2_node
# Now we must update the other two vectors.
# We will add only if it is not in there already!
self.depth = len(GM.core_1)
# First we add the new nodes...
if G1_node not in GM.inout_1:
GM.inout_1[G1_node] = self.depth
if G2_node not in GM.inout_2:
GM.inout_2[G2_node] = self.depth
# Now we add every other node...
# Updates for T_1^{inout}
new_nodes = set()
for node in GM.core_1:
new_nodes.update(
[neighbor for neighbor in GM.G1[node] if neighbor not in GM.core_1]
)
for node in new_nodes:
if node not in GM.inout_1:
GM.inout_1[node] = self.depth
# Updates for T_2^{inout}
new_nodes = set()
for node in GM.core_2:
new_nodes.update(
[neighbor for neighbor in GM.G2[node] if neighbor not in GM.core_2]
)
for node in new_nodes:
if node not in GM.inout_2:
GM.inout_2[node] = self.depth
def restore(self):
"""Deletes the GMState object and restores the class variables."""
# First we remove the node that was added from the core vectors.
# Watch out! G1_node == 0 should evaluate to True.
if self.G1_node is not None and self.G2_node is not None:
del self.GM.core_1[self.G1_node]
del self.GM.core_2[self.G2_node]
# Now we revert the other two vectors.
# Thus, we delete all entries which have this depth level.
for vector in (self.GM.inout_1, self.GM.inout_2):
for node in list(vector.keys()):
if vector[node] == self.depth:
del vector[node]
| GMState |
python | joerick__pyinstrument | pyinstrument/renderers/console.py | {
"start": 501,
"end": 11913
} | class ____(FrameRenderer):
"""
Produces text-based output, suitable for text files or ANSI-compatible
consoles.
"""
def __init__(
self,
show_all: bool = False,
timeline: bool = False,
processor_options: dict[str, Any] | None = None,
unicode: bool = False,
color: bool = False,
flat: bool = False,
time: LiteralStr["seconds", "percent_of_total"] = "seconds",
flat_time: FlatTimeMode = "self",
short_mode: bool = False,
) -> None:
"""
:param unicode: Use unicode, like box-drawing characters in the output.
:param color: Enable color support, using ANSI color sequences.
:param flat: Display a flat profile instead of a call graph.
:param time: How to display the duration of each frame - ``'seconds'`` or ``'percent_of_total'``
:param flat_time: Show ``'self'`` time or ``'total'`` time (including children) in flat profile.
:param short_mode: Display a short version of the output.
:param show_all: See :class:`FrameRenderer`.
:param timeline: See :class:`FrameRenderer`.
:param processor_options: See :class:`FrameRenderer`.
"""
super().__init__(show_all=show_all, timeline=timeline, processor_options=processor_options)
self.unicode = unicode
self.color = color
self.flat = flat
self.time = time
self.flat_time = flat_time
self.short_mode = short_mode
if self.flat and self.timeline:
raise Renderer.MisconfigurationError("Cannot use timeline and flat options together.")
self.colors = self.colors_enabled if color else self.colors_disabled
def render(self, session: Session) -> str:
result = self.render_preamble(session)
frame = self.preprocess(session.root_frame())
indent = ". " if self.short_mode else ""
precision = math.ceil(-math.log10(min(max(1e-9, session.max_interval), 1)))
if frame is None:
result += f"{indent}No samples were recorded.\n"
else:
self.root_frame = frame
if self.flat:
result += self.render_frame_flat(self.root_frame, precision=precision)
else:
result += self.render_frame(
self.root_frame, precision=precision, indent=indent, child_indent=indent
)
result += f"{indent}\n"
if self.short_mode:
result += "." * 53 + "\n\n"
return result
# pylint: disable=W1401
def render_preamble(self, session: Session) -> str:
if self.short_mode:
return textwrap.dedent(
f"""
pyinstrument ........................................
.
. {session.target_description}
.
"""
)
lines = [
r"",
r" _ ._ __/__ _ _ _ _ _/_ ",
r" /_//_/// /_\ / //_// / //_'/ // ",
r"/ _/ {:>20}".format("v" + pyinstrument.__version__),
]
lines[1] += " Recorded: {:<9}".format(
time.strftime("%X", time.localtime(session.start_time))
)
lines[2] += f" Duration: {session.duration:<9.3f}"
lines[1] += f" Samples: {session.sample_count}"
lines[2] += f" CPU time: {session.cpu_time:.3f}"
lines.append("")
lines.append(session.target_description)
lines.append("")
lines.append("")
return "\n".join(lines)
def should_render_frame(self, frame: Frame) -> bool:
if frame.group and not self.should_ignore_group(frame.group):
return self.should_render_frame_in_group(frame)
return True
def should_render_frame_in_group(self, frame: Frame) -> bool:
# Only render the root frame, or frames that are significant
assert frame.group
return (
frame.group.root == frame
or frame.total_self_time > 0.2 * self.root_frame.time
or frame in frame.group.exit_frames
)
def should_ignore_group(self, group: FrameGroup) -> bool:
"""
If a group is ignored, its frames are all printed - they're not hidden.
"""
hidden_frames = [f for f in group.frames if not self.should_render_frame_in_group(f)]
# don't bother printing groups with one/zero hidden frames
return len(hidden_frames) < 2
def group_description(self, group: FrameGroup) -> str:
hidden_frames = [f for f in group.frames if not self.should_render_frame(f)]
libraries = self.libraries_for_frames(hidden_frames)
return "[{count} frames hidden] {c.faint}{libraries}{c.end}\n".format(
count=len(hidden_frames),
libraries=truncate(", ".join(libraries), 40),
c=self.colors,
)
def libraries_for_frames(self, frames: list[Frame]) -> list[str]:
libraries: list[str] = []
for frame in frames:
if frame.file_path_short:
library = re.split(r"[\\/\.]", frame.file_path_short, maxsplit=1)[0]
if library and library not in libraries:
libraries.append(library)
return libraries
def render_frame(
self, frame: Frame, precision: int, indent: str = "", child_indent: str = ""
) -> str:
if self.should_render_frame(frame):
result = f"{indent}{self.frame_description(frame, precision=precision)}\n"
if self.unicode:
indents = {"├": "├─ ", "│": "│ ", "└": "└─ ", " ": " "}
else:
indents = {"├": "|- ", "│": "| ", "└": "`- ", " ": " "}
if (
frame.group
and frame.group.root == frame
and not self.should_ignore_group(frame.group)
):
result += f"{child_indent} {self.group_description(frame.group)}"
for key in indents:
indents[key] = " "
else:
result = ""
indents = {"├": "", "│": "", "└": "", " ": ""}
if frame.children:
children_to_be_rendered_indices = [
i for i, f in enumerate(frame.children) if self.should_render_frame(f)
]
last_rendered_child_index = (
children_to_be_rendered_indices[-1] if children_to_be_rendered_indices else -1
)
for i, child in enumerate(frame.children):
if i < last_rendered_child_index:
c_indent = child_indent + indents["├"]
cc_indent = child_indent + indents["│"]
else:
c_indent = child_indent + indents["└"]
cc_indent = child_indent + indents[" "]
result += self.render_frame(
child, precision=precision, indent=c_indent, child_indent=cc_indent
)
return result
def render_frame_flat(self, frame: Frame, precision: int) -> str:
def walk(frame: Frame):
frame_id_to_time[frame.identifier] = (
frame_id_to_time.get(frame.identifier, 0) + frame.total_self_time
if self.flat_time == "self"
else frame.time
)
frame_id_to_frame[frame.identifier] = frame
for child in frame.children:
walk(child)
frame_id_to_time: Dict[str, float] = {}
frame_id_to_frame: Dict[str, Frame] = {}
walk(frame)
id_time_pairs: List[Tuple[str, float]] = sorted(
frame_id_to_time.items(), key=(lambda item: item[1]), reverse=True
)
if not self.show_all:
# remove nodes that represent less than 0.1% of the total time
id_time_pairs = [
pair for pair in id_time_pairs if pair[1] / self.root_frame.time > 0.001
]
result = ""
for frame_id, self_time in id_time_pairs:
result += self.frame_description(
frame_id_to_frame[frame_id], precision=precision, override_time=self_time
)
result += "\n"
return result
def frame_description(
self, frame: Frame, *, precision: int = 3, override_time: float | None = None
) -> str:
time = override_time if override_time is not None else frame.time
time_color = self._ansi_color_for_time(time)
if self.time == "percent_of_total":
time_str = f"{self.frame_proportion_of_total_time(time) * 100:.1f}%"
else:
time_str = f"{time:.{precision}f}"
value_str = f"{time_color}{time_str}{self.colors.end}"
class_name = frame.class_name
if class_name:
function_name = f"{class_name}.{frame.function}"
else:
function_name = frame.function
function_color = self._ansi_color_for_name(frame)
function_str = f"{function_color}{function_name}{self.colors.end}"
code_position_short = frame.code_position_short()
if code_position_short:
code_position_str = f"{self.colors.faint}{code_position_short}{self.colors.end}"
else:
code_position_str = ""
return f"{value_str} {function_str} {code_position_str}"
def frame_proportion_of_total_time(self, time: float) -> float:
if self.root_frame.time == 0:
return 1
return time / self.root_frame.time
def _ansi_color_for_time(self, time: float) -> str:
proportion_of_total = self.frame_proportion_of_total_time(time)
if proportion_of_total > 0.6:
return self.colors.red
elif proportion_of_total > 0.2:
return self.colors.yellow
elif proportion_of_total > 0.05:
return self.colors.green
else:
return self.colors.bright_green + self.colors.faint
def _ansi_color_for_name(self, frame: Frame) -> str:
if frame.is_application_code:
return self.colors.bg_dark_blue_255 + self.colors.white_255
else:
return ""
def default_processors(self) -> ProcessorList:
return [
processors.remove_importlib,
processors.remove_tracebackhide,
processors.merge_consecutive_self_time,
processors.aggregate_repeated_calls,
processors.remove_irrelevant_nodes,
processors.remove_unnecessary_self_time_nodes,
processors.remove_first_pyinstrument_frames_processor,
processors.group_library_frames_processor,
]
class colors_enabled:
red = "\033[31m"
green = "\033[32m"
yellow = "\033[33m"
blue = "\033[34m"
cyan = "\033[36m"
bright_green = "\033[92m"
white = "\033[37m\033[97m"
bg_dark_blue_255 = "\033[48;5;24m"
white_255 = "\033[38;5;15m"
bold = "\033[1m"
faint = "\033[2m"
end = "\033[0m"
class colors_disabled:
red = ""
green = ""
yellow = ""
blue = ""
cyan = ""
bright_green = ""
white = ""
bg_dark_blue_255 = ""
white_255 = ""
bold = ""
faint = ""
end = ""
| ConsoleRenderer |
python | PyCQA__pylint | pylint/message/message.py | {
"start": 480,
"end": 2165
} | class ____: # pylint: disable=too-many-instance-attributes
"""This class represent a message to be issued by the reporters."""
msg_id: str
symbol: str
msg: str
C: str
category: str
confidence: Confidence
abspath: str
path: str
module: str
obj: str
line: int
column: int
end_line: int | None
end_column: int | None
def __init__(
self,
msg_id: str,
symbol: str,
location: MessageLocationTuple,
msg: str,
confidence: Confidence | None,
) -> None:
self.msg_id = msg_id
self.symbol = symbol
self.msg = msg
self.C = msg_id[0]
self.category = MSG_TYPES[msg_id[0]]
self.confidence = confidence or UNDEFINED
self.abspath = location.abspath
self.path = location.path
self.module = location.module
self.obj = location.obj
self.line = location.line
self.column = location.column
self.end_line = location.end_line
self.end_column = location.end_column
def format(self, template: str) -> str:
"""Format the message according to the given template.
The template format is the one of the format method :
cf. https://docs.python.org/2/library/string.html#formatstrings
"""
return template.format(**asdict(self))
@property
def location(self) -> MessageLocationTuple:
return MessageLocationTuple(
self.abspath,
self.path,
self.module,
self.obj,
self.line,
self.column,
self.end_line,
self.end_column,
)
| Message |
python | langchain-ai__langchain | libs/core/langchain_core/example_selectors/semantic_similarity.py | {
"start": 802,
"end": 3243
} | class ____(BaseExampleSelector, BaseModel, ABC):
"""Example selector that selects examples based on SemanticSimilarity."""
vectorstore: VectorStore
"""VectorStore that contains information about examples."""
k: int = 4
"""Number of examples to select."""
example_keys: list[str] | None = None
"""Optional keys to filter examples to."""
input_keys: list[str] | None = None
"""Optional keys to filter input to. If provided, the search is based on
the input variables instead of all variables."""
vectorstore_kwargs: dict[str, Any] | None = None
"""Extra arguments passed to similarity_search function of the `VectorStore`."""
model_config = ConfigDict(
arbitrary_types_allowed=True,
extra="forbid",
)
@staticmethod
def _example_to_text(example: dict[str, str], input_keys: list[str] | None) -> str:
if input_keys:
return " ".join(sorted_values({key: example[key] for key in input_keys}))
return " ".join(sorted_values(example))
def _documents_to_examples(self, documents: list[Document]) -> list[dict]:
# Get the examples from the metadata.
# This assumes that examples are stored in metadata.
examples = [dict(e.metadata) for e in documents]
# If example keys are provided, filter examples to those keys.
if self.example_keys:
examples = [{k: eg[k] for k in self.example_keys} for eg in examples]
return examples
def add_example(self, example: dict[str, str]) -> str:
"""Add a new example to vectorstore.
Args:
example: A dictionary with keys as input variables
and values as their values.
Returns:
The ID of the added example.
"""
ids = self.vectorstore.add_texts(
[self._example_to_text(example, self.input_keys)], metadatas=[example]
)
return ids[0]
async def aadd_example(self, example: dict[str, str]) -> str:
"""Async add new example to vectorstore.
Args:
example: A dictionary with keys as input variables
and values as their values.
Returns:
The ID of the added example.
"""
ids = await self.vectorstore.aadd_texts(
[self._example_to_text(example, self.input_keys)], metadatas=[example]
)
return ids[0]
| _VectorStoreExampleSelector |
python | pallets__werkzeug | examples/cupoftee/pages.py | {
"start": 1451,
"end": 1670
} | class ____(Page):
url_rule = "/server/<id>"
def process(self, id):
try:
self.server = self.cup.server_browser.servers[id]
except KeyError:
raise NotFound() from None
| Server |
python | weaviate__weaviate-python-client | weaviate/collections/queries/base_executor.py | {
"start": 1646,
"end": 24318
} | class ____(Generic[ConnectionType]):
def __init__(
self,
connection: ConnectionType,
name: str,
consistency_level: Optional[ConsistencyLevel],
tenant: Optional[str],
properties: Optional[Type[WeaviateProperties]],
references: Optional[Type[Optional[Mapping[str, Any]]]],
validate_arguments: bool,
) -> None:
self._connection = connection
self._name = name
self.__tenant = tenant
self.__consistency_level = consistency_level
self._properties = properties
self._references = references
self._validate_arguments = validate_arguments
self.__uses_125_api = connection._weaviate_version.is_at_least(1, 25, 0)
self.__uses_127_api = connection._weaviate_version.is_at_least(1, 27, 0)
self._query = _QueryGRPC(
connection._weaviate_version,
self._name,
self.__tenant,
self.__consistency_level,
validate_arguments=self._validate_arguments,
uses_125_api=self.__uses_125_api,
uses_127_api=self.__uses_127_api,
)
def __retrieve_timestamp(
self,
timestamp: int,
) -> datetime.datetime:
# Handle the case in which last_update_time_unix is in nanoseconds or milliseconds, issue #958
if len(str(timestamp)) <= 13:
return datetime.datetime.fromtimestamp(timestamp / 1000, tz=datetime.timezone.utc)
else:
return datetime.datetime.fromtimestamp(timestamp / 1e9, tz=datetime.timezone.utc)
def __extract_metadata_for_object(
self,
add_props: "search_get_pb2.MetadataResult",
) -> MetadataReturn:
meta = MetadataReturn(
distance=add_props.distance if add_props.distance_present else None,
certainty=add_props.certainty if add_props.certainty_present else None,
creation_time=(
self.__retrieve_timestamp(add_props.creation_time_unix)
if add_props.creation_time_unix_present
else None
),
last_update_time=(
self.__retrieve_timestamp(add_props.last_update_time_unix)
if add_props.last_update_time_unix_present
else None
),
score=add_props.score if add_props.score_present else None,
explain_score=(add_props.explain_score if add_props.explain_score_present else None),
is_consistent=(add_props.is_consistent if add_props.is_consistent_present else None),
rerank_score=(add_props.rerank_score if add_props.rerank_score_present else None),
)
return meta
def __extract_metadata_for_group_by_object(
self,
add_props: "search_get_pb2.MetadataResult",
) -> GroupByMetadataReturn:
meta = GroupByMetadataReturn(
distance=add_props.distance if add_props.distance_present else None,
)
return meta
def __extract_id_for_object(
self,
add_props: "search_get_pb2.MetadataResult",
) -> uuid_lib.UUID:
return _WeaviateUUIDInt(int.from_bytes(add_props.id_as_bytes, byteorder="big"))
def __extract_vector_for_object(
self,
add_props: "search_get_pb2.MetadataResult",
) -> Dict[str, Union[List[float], List[List[float]]]]:
if (
len(add_props.vector_bytes) == 0
and len(add_props.vector) == 0
and len(add_props.vectors) == 0
):
return {}
if len(add_props.vector_bytes) > 0:
vec = _ByteOps.decode_float32s(add_props.vector_bytes)
return {"default": vec}
vecs: Dict[str, Union[List[float], List[List[float]]]] = {}
for vec in add_props.vectors:
if vec.type == base_pb2.Vectors.VECTOR_TYPE_SINGLE_FP32:
vecs[vec.name] = _Unpack.single(vec.vector_bytes)
elif vec.type == base_pb2.Vectors.VECTOR_TYPE_MULTI_FP32:
vecs[vec.name] = _Unpack.multi(vec.vector_bytes)
else:
vecs[vec.name] = _Unpack.single(vec.vector_bytes)
return vecs
def __extract_generated_from_metadata(
self,
add_props: search_get_pb2.MetadataResult,
) -> Optional[str]:
return add_props.generative if add_props.generative_present else None
def __extract_generated_from_generative(
self, generative: generative_pb2.GenerativeResult
) -> Optional[str]:
return generative.values[0].result if len(generative.values) > 0 else None
def __extract_generated_from_reply(self, res: search_get_pb2.SearchReply) -> Optional[str]:
if (
res.generative_grouped_result != ""
): # for BC, is deprecated in favour of generative_grouped_results
return res.generative_grouped_result
if len(res.generative_grouped_results.values) > 0:
return res.generative_grouped_results.values[0].result
return None
def __extract_generative_metadata(
self, metadata: generative_pb2.GenerativeMetadata
) -> Optional[GenerativeMetadata]:
if metadata.HasField("anthropic"):
return metadata.anthropic
if metadata.HasField("anyscale"):
return metadata.anyscale
if metadata.HasField("aws"):
return metadata.aws
if metadata.HasField("cohere"):
return metadata.cohere
if metadata.HasField("databricks"):
return metadata.databricks
if metadata.HasField("dummy"):
return metadata.dummy
if metadata.HasField("friendliai"):
return metadata.friendliai
if metadata.HasField("google"):
return metadata.google
if metadata.HasField("mistral"):
return metadata.mistral
if metadata.HasField("nvidia"):
return metadata.nvidia
if metadata.HasField("ollama"):
return metadata.ollama
if metadata.HasField("openai"):
return metadata.openai
return None
def __extract_generative_single_from_generative(
self, result: generative_pb2.GenerativeResult
) -> Optional[GenerativeSingle]:
if len(vs := result.values) > 0:
generative = vs[0]
return GenerativeSingle(
debug=generative.debug if generative.debug.full_prompt != "" else None,
metadata=self.__extract_generative_metadata(generative.metadata),
text=generative.result,
)
return None
def __extract_generative_grouped_from_generative(
self, result: generative_pb2.GenerativeResult
) -> Optional[GenerativeGrouped]:
if len(vs := result.values) > 0:
generative = vs[0]
return GenerativeGrouped(
metadata=self.__extract_generative_metadata(generative.metadata),
text=generative.result,
)
return None
def __deserialize_list_value_prop_125(
self, value: properties_pb2.ListValue
) -> Optional[List[Any]]:
if value.HasField("bool_values"):
return list(value.bool_values.values)
if value.HasField("date_values"):
return [_datetime_from_weaviate_str(val) for val in value.date_values.values]
if value.HasField("int_values"):
return _ByteOps.decode_int64s(value.int_values.values)
if value.HasField("number_values"):
return _ByteOps.decode_float64s(value.number_values.values)
if value.HasField("text_values"):
return list(value.text_values.values)
if value.HasField("uuid_values"):
return [uuid_lib.UUID(val) for val in value.uuid_values.values]
if value.HasField("object_values"):
return [
self.__parse_nonref_properties_result(val) for val in value.object_values.values
]
_Warnings.unknown_type_encountered(value.WhichOneof("value"))
return None
def __deserialize_non_ref_prop(self, value: properties_pb2.Value) -> Any:
if value.HasField("uuid_value"):
return uuid_lib.UUID(value.uuid_value)
if value.HasField("date_value"):
return _datetime_from_weaviate_str(value.date_value)
if value.HasField("text_value"):
return str(value.text_value)
if value.HasField("int_value"):
return int(value.int_value)
if value.HasField("number_value"):
return float(value.number_value)
if value.HasField("bool_value"):
return bool(value.bool_value)
if value.HasField("list_value"):
return self.__deserialize_list_value_prop_125(value.list_value)
if value.HasField("object_value"):
return self.__parse_nonref_properties_result(value.object_value)
if value.HasField("geo_value"):
return GeoCoordinate(
latitude=value.geo_value.latitude, longitude=value.geo_value.longitude
)
if value.HasField("blob_value"):
return value.blob_value
if value.HasField("phone_value"):
return _PhoneNumber(
country_code=value.phone_value.country_code,
default_country=value.phone_value.default_country,
international_formatted=value.phone_value.international_formatted,
national=value.phone_value.national,
national_formatted=value.phone_value.national_formatted,
number=value.phone_value.input,
valid=value.phone_value.valid,
)
if value.HasField("null_value"):
return None
_Warnings.unknown_type_encountered(value.WhichOneof("value"))
return None
def __parse_nonref_properties_result(
self,
properties: properties_pb2.Properties,
) -> dict:
return {
name: self.__deserialize_non_ref_prop(value)
for name, value in properties.fields.items()
}
def __parse_ref_properties_result(
self,
properties: search_get_pb2.PropertiesResult,
) -> Optional[dict]:
if len(properties.ref_props) == 0:
return {} if properties.ref_props_requested else None
return {
ref_prop.prop_name: _CrossReference._from(
[
self.__result_to_query_object(
prop,
prop.metadata,
_QueryOptions(True, True, True, True, False),
)
for prop in ref_prop.properties
]
)
for ref_prop in properties.ref_props
}
def __result_to_query_object(
self,
props: search_get_pb2.PropertiesResult,
meta: search_get_pb2.MetadataResult,
options: _QueryOptions,
) -> Object[Any, Any]:
return Object(
collection=props.target_collection,
properties=(
self.__parse_nonref_properties_result(props.non_ref_props)
if options.include_properties
else {}
),
metadata=(
self.__extract_metadata_for_object(meta)
if options.include_metadata
else MetadataReturn()
),
references=(
self.__parse_ref_properties_result(props) if options.include_references else None
),
uuid=self.__extract_id_for_object(meta),
vector=(self.__extract_vector_for_object(meta) if options.include_vector else {}),
)
def __result_to_generative_object(
self,
props: search_get_pb2.PropertiesResult,
meta: search_get_pb2.MetadataResult,
gen: generative_pb2.GenerativeResult,
options: _QueryOptions,
) -> GenerativeObject[Any, Any]:
return GenerativeObject(
collection=props.target_collection,
properties=(
self.__parse_nonref_properties_result(props.non_ref_props)
if options.include_properties
else {}
),
metadata=(
self.__extract_metadata_for_object(meta)
if options.include_metadata
else MetadataReturn()
),
references=(
self.__parse_ref_properties_result(props) if options.include_references else None
),
uuid=self.__extract_id_for_object(meta),
vector=(self.__extract_vector_for_object(meta) if options.include_vector else {}),
generated=(
self.__extract_generated_from_generative(gen)
if self.__uses_127_api
else self.__extract_generated_from_metadata(meta)
),
generative=self.__extract_generative_single_from_generative(gen),
)
def __result_to_group(
self,
res: search_get_pb2.GroupByResult,
options: _QueryOptions,
) -> Group[Any, Any]:
return Group(
objects=[
self.__result_to_group_by_object(obj.properties, obj.metadata, options, res.name)
for obj in res.objects
],
name=res.name,
number_of_objects=res.number_of_objects,
min_distance=res.min_distance,
max_distance=res.max_distance,
rerank_score=res.rerank.score if res.rerank is not None else None,
)
def __result_to_generative_group(
self,
res: search_get_pb2.GroupByResult,
options: _QueryOptions,
) -> GenerativeGroup[Any, Any]:
return GenerativeGroup(
objects=[
self.__result_to_group_by_object(obj.properties, obj.metadata, options, res.name)
for obj in res.objects
],
name=res.name,
number_of_objects=res.number_of_objects,
min_distance=res.min_distance,
max_distance=res.max_distance,
rerank_score=res.rerank.score if res.rerank is not None else None,
generated=res.generative.result if res.generative is not None else None,
)
def __result_to_group_by_object(
self,
props: search_get_pb2.PropertiesResult,
meta: search_get_pb2.MetadataResult,
options: _QueryOptions,
group_name: str,
) -> GroupByObject[Any, Any]:
return GroupByObject(
collection=props.target_collection,
properties=(
self.__parse_nonref_properties_result(props.non_ref_props)
if options.include_properties
else {}
),
metadata=(
self.__extract_metadata_for_group_by_object(meta)
if options.include_metadata
else GroupByMetadataReturn()
),
references=(
self.__parse_ref_properties_result(props) if options.include_references else None
),
uuid=self.__extract_id_for_object(meta),
vector=(self.__extract_vector_for_object(meta) if options.include_vector else {}),
belongs_to_group=group_name,
)
def _result_to_query_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> QueryReturn[WeaviateProperties, CrossReferences]:
return QueryReturn(
objects=[
self.__result_to_query_object(obj.properties, obj.metadata, options)
for obj in res.results
]
)
def _result_to_generative_query_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> GenerativeReturn[WeaviateProperties, CrossReferences]:
return GenerativeReturn(
generated=self.__extract_generated_from_reply(res),
objects=[
self.__result_to_generative_object(
obj.properties, obj.metadata, obj.generative, options
)
for obj in res.results
],
generative=self.__extract_generative_grouped_from_generative(
res.generative_grouped_results
),
)
def _result_to_generative_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> Union[
GenerativeReturn[WeaviateProperties, CrossReferences],
GenerativeGroupByReturn[WeaviateProperties, CrossReferences],
]:
return (
self._result_to_generative_query_return(res, options)
if options.is_group_by is False
else self._result_to_generative_groupby_return(res, options)
)
def _result_to_groupby_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> GroupByReturn[WeaviateProperties, CrossReferences]:
groups = {
group.name: self.__result_to_group(group, options) for group in res.group_by_results
}
objects_group_by: List[GroupByObject] = [
obj for group in groups.values() for obj in group.objects
]
return GroupByReturn(objects=objects_group_by, groups=groups)
def _result_to_generative_groupby_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> GenerativeGroupByReturn[WeaviateProperties, CrossReferences]:
groups = {
group.name: self.__result_to_generative_group(group, options)
for group in res.group_by_results
}
objects_group_by: List[GroupByObject] = [
GroupByObject(
collection=obj.collection,
properties=obj.properties,
references=obj.references,
metadata=obj.metadata,
belongs_to_group=group.name,
uuid=obj.uuid,
vector=obj.vector,
)
for group in groups.values()
for obj in group.objects
]
return GenerativeGroupByReturn(
objects=objects_group_by,
groups=groups,
generated=(
res.generative_grouped_result if res.generative_grouped_result != "" else None
),
)
def _result_to_query_or_groupby_return(
self,
res: search_get_pb2.SearchReply,
options: _QueryOptions,
) -> Union[
QueryReturn[WeaviateProperties, CrossReferences],
GroupByReturn[WeaviateProperties, CrossReferences],
]:
return (
self._result_to_query_return(res, options)
if not options.is_group_by
else self._result_to_groupby_return(res, options)
)
def _parse_return_properties(
self,
return_properties: Optional[ReturnProperties[WeaviateProperties]],
) -> Union[PROPERTIES, bool, None]:
if (
return_properties is not None and not return_properties
): # fast way to check if it is an empty list or False
return []
if (
isinstance(return_properties, Sequence)
or isinstance(return_properties, str)
or isinstance(return_properties, QueryNested)
or (
(return_properties is None or return_properties is True)
and self._properties is None
)
):
# return self.__parse_properties(return_properties)
return cast(
Union[PROPERTIES, bool, None], return_properties
) # is not sourced from any generic
elif (
return_properties is None or return_properties is True
) and self._properties is not None:
if not is_typeddict(self._properties):
return return_properties
return _extract_properties_from_data_model(
self._properties
) # is sourced from collection-specific generic
else:
assert return_properties is not None
assert return_properties is not True
if not is_typeddict(return_properties):
raise WeaviateInvalidInputError(
f"return_properties must only be a TypedDict or PROPERTIES within this context but is {type(return_properties)}"
)
return _extract_properties_from_data_model(
return_properties
) # is sourced from query-specific generic
def _parse_return_metadata(
self, return_metadata: Optional[METADATA], include_vector: INCLUDE_VECTOR
) -> Optional[_MetadataQuery]:
if self._validate_arguments:
_validate_input(
[
_ValidateArgument(
[Sequence[str], MetadataQuery, None],
"return_metadata",
return_metadata,
),
_ValidateArgument([bool, str, Sequence], "include_vector", include_vector),
]
)
if return_metadata is None:
ret_md = None
elif hasattr(return_metadata, "creation_time"):
# cheaper than isinstance(), needs to be MetadataQuery
ret_md = cast(MetadataQuery, return_metadata)
else:
ret_md = MetadataQuery(**{str(prop): True for prop in return_metadata})
return _MetadataQuery.from_public(ret_md, include_vector)
def _parse_return_references(
self, return_references: Optional[ReturnReferences[TReferences]]
) -> Optional[REFERENCES]:
if (
(return_references is None and self._references is None)
or isinstance(return_references, Sequence)
or isinstance(return_references, _QueryReference)
):
return return_references
elif return_references is None and self._references is not None:
if not is_typeddict(self._references):
return return_references
refs = _extract_references_from_data_model(self._references)
return refs
else:
assert return_references is not None
if not is_typeddict(return_references):
raise WeaviateInvalidInputError(
f"return_references must only be a TypedDict or ReturnReferences within this context but is {type(return_references)}"
)
return _extract_references_from_data_model(return_references)
| _BaseExecutor |
python | walkccc__LeetCode | solutions/103. Binary Tree Zigzag Level Order Traversal/103-2.py | {
"start": 0,
"end": 591
} | class ____:
def zigzagLevelOrder(self, root: TreeNode | None) -> list[list[int]]:
if not root:
return []
ans = []
q = collections.deque([root])
isLeftToRight = True
while q:
size = len(q)
currLevel = [0] * size
for i in range(size):
node = q.popleft()
index = i if isLeftToRight else size - i - 1
currLevel[index] = node.val
if node.left:
q.append(node.left)
if node.right:
q.append(node.right)
ans.append(currLevel)
isLeftToRight = not isLeftToRight
return ans
| Solution |
python | python-openxml__python-docx | src/docx/oxml/table.py | {
"start": 12568,
"end": 12863
} | class ____(BaseOxmlElement):
"""`w:tblPrEx` element, exceptions to table-properties.
Applied at a lower level, like a `w:tr` to modify the appearance. Possibly used when
two tables are merged. For more see:
http://officeopenxml.com/WPtablePropertyExceptions.php
"""
| CT_TblPrEx |
python | dagster-io__dagster | python_modules/libraries/dagster-fivetran/dagster_fivetran/translator.py | {
"start": 5077,
"end": 5681
} | class ____:
"""Represents a Fivetran table, based on data as returned from the API."""
enabled: bool
name_in_destination: str
# We keep the raw data for columns to add it as `column_info` in the metadata.
columns: Optional[Mapping[str, Any]]
@classmethod
def from_table_details(cls, table_details: Mapping[str, Any]) -> "FivetranTable":
return cls(
enabled=table_details["enabled"],
name_in_destination=table_details["name_in_destination"],
columns=table_details.get("columns"),
)
@whitelist_for_serdes
@record
| FivetranTable |
python | Lightning-AI__lightning | tests/tests_pytorch/models/test_hparams.py | {
"start": 10382,
"end": 10591
} | class ____(CustomBoringModel):
any_other_loss = torch.nn.CrossEntropyLoss()
def __init__(self, *args, subclass_arg=1200, **kwargs):
super().__init__(*args, **kwargs)
| NonSavingSubClassBoringModel |
python | RaRe-Technologies__gensim | gensim/similarities/docsim.py | {
"start": 43854,
"end": 52781
} | class ____(interfaces.SimilarityABC):
"""Compute cosine similarity against a corpus of documents by storing the index matrix in memory.
Examples
--------
Here is how you would index and query a corpus of documents in the bag-of-words format using the
cosine similarity:
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.similarities import SparseMatrixSimilarity
>>> from gensim.test.utils import common_texts as corpus
>>>
>>> dictionary = Dictionary(corpus) # fit dictionary
>>> bow_corpus = [dictionary.doc2bow(line) for line in corpus] # convert corpus to BoW format
>>> index = SparseMatrixSimilarity(bm25_corpus, num_docs=len(corpus), num_terms=len(dictionary))
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> bow_query = dictionary.doc2bow(query)
>>> similarities = index[bow_query] # calculate similarity of query to each doc from bow_corpus
Here is how you would index and query a corpus of documents using the Okapi BM25 scoring
function:
.. sourcecode:: pycon
>>> from gensim.corpora import Dictionary
>>> from gensim.models import TfidfModel, OkapiBM25Model
>>> from gensim.similarities import SparseMatrixSimilarity
>>> from gensim.test.utils import common_texts as corpus
>>>
>>> dictionary = Dictionary(corpus) # fit dictionary
>>> query_model = TfidfModel(dictionary=dictionary, smartirs='bnn') # enforce binary weights
>>> document_model = OkapiBM25Model(dictionary=dictionary) # fit bm25 model
>>>
>>> bow_corpus = [dictionary.doc2bow(line) for line in corpus] # convert corpus to BoW format
>>> bm25_corpus = document_model[bow_corpus]
>>> index = SparseMatrixSimilarity(bm25_corpus, num_docs=len(corpus), num_terms=len(dictionary),
... normalize_queries=False, normalize_documents=False)
>>>
>>>
>>> query = 'graph trees computer'.split() # make a query
>>> bow_query = dictionary.doc2bow(query)
>>> bm25_query = query_model[bow_query]
>>> similarities = index[bm25_query] # calculate similarity of query to each doc from bow_corpus
Notes
-----
Use this if your input corpus contains sparse vectors (such as TF-IDF documents) and fits into RAM.
The matrix is internally stored as a :class:`scipy.sparse.csr_matrix` matrix. Unless the entire
matrix fits into main memory, use :class:`~gensim.similarities.docsim.Similarity` instead.
Takes an optional `maintain_sparsity` argument, setting this to True
causes `get_similarities` to return a sparse matrix instead of a
dense representation if possible.
See also
--------
:class:`~gensim.similarities.docsim.Similarity`
Index similarity (wrapper for other inheritors of :class:`~gensim.interfaces.SimilarityABC`).
:class:`~gensim.similarities.docsim.MatrixSimilarity`
Index similarity (dense with cosine distance).
"""
def __init__(self, corpus, num_features=None, num_terms=None, num_docs=None, num_nnz=None,
num_best=None, chunksize=500, dtype=numpy.float32, maintain_sparsity=False,
normalize_queries=True, normalize_documents=True):
"""
Parameters
----------
corpus: iterable of list of (int, float)
A list of documents in the BoW format.
num_features : int, optional
Size of the dictionary. Must be either specified, or present in `corpus.num_terms`.
num_terms : int, optional
Alias for `num_features`, you can use either.
num_docs : int, optional
Number of documents in `corpus`. Will be calculated if not provided.
num_nnz : int, optional
Number of non-zero elements in `corpus`. Will be calculated if not provided.
num_best : int, optional
If set, return only the `num_best` most similar documents, always leaving out documents with similarity = 0.
Otherwise, return a full vector with one float for every document in the index.
chunksize : int, optional
Size of query chunks. Used internally when the query is an entire corpus.
dtype : numpy.dtype, optional
Data type of the internal matrix.
maintain_sparsity : bool, optional
Return sparse arrays from :meth:`~gensim.similarities.docsim.SparseMatrixSimilarity.get_similarities`?
normalize_queries : bool, optional
If queries are in bag-of-words (int, float) format, as opposed to a sparse or dense
2D arrays, they will be L2-normalized. Default is True.
normalize_documents : bool, optional
If `corpus` is in bag-of-words (int, float) format, as opposed to a sparse or dense
2D arrays, it will be L2-normalized. Default is True.
"""
self.num_best = num_best
self.normalize = normalize_queries
self.chunksize = chunksize
self.maintain_sparsity = maintain_sparsity
if corpus is not None:
logger.info("creating sparse index")
# iterate over input corpus, populating the sparse index matrix
try:
# use the more efficient corpus generation version, if the input
# `corpus` is MmCorpus-like (knows its shape and number of non-zeroes).
num_terms, num_docs, num_nnz = corpus.num_terms, corpus.num_docs, corpus.num_nnz
logger.debug("using efficient sparse index creation")
except AttributeError:
# no MmCorpus, use the slower version (or maybe user supplied the
# num_* params in constructor)
pass
if num_features is not None:
# num_terms is just an alias for num_features, for compatibility with MatrixSimilarity
num_terms = num_features
if num_terms is None:
raise ValueError("refusing to guess the number of sparse features: specify num_features explicitly")
corpus = (matutils.scipy2sparse(v) if scipy.sparse.issparse(v) else
(matutils.full2sparse(v) if isinstance(v, numpy.ndarray) else
matutils.unitvec(v) if normalize_documents else v) for v in corpus)
self.index = matutils.corpus2csc(
corpus, num_terms=num_terms, num_docs=num_docs, num_nnz=num_nnz,
dtype=dtype, printprogress=10000,
).T
# convert to Compressed Sparse Row for efficient row slicing and multiplications
self.index = self.index.tocsr() # currently no-op, CSC.T is already CSR
logger.info("created %r", self.index)
def __len__(self):
"""Get size of index."""
return self.index.shape[0]
def get_similarities(self, query):
"""Get similarity between `query` and this index.
Warnings
--------
Do not use this function directly; use the `self[query]` syntax instead.
Parameters
----------
query : {list of (int, number), iterable of list of (int, number), :class:`scipy.sparse.csr_matrix`}
Document or collection of documents.
Return
------
:class:`numpy.ndarray`
Similarity matrix (if maintain_sparsity=False) **OR**
:class:`scipy.sparse.csc`
otherwise
"""
is_corpus, query = utils.is_corpus(query)
if is_corpus:
query = matutils.corpus2csc(query, self.index.shape[1], dtype=self.index.dtype)
else:
if scipy.sparse.issparse(query):
query = query.T # convert documents=rows to documents=columns
elif isinstance(query, numpy.ndarray):
if query.ndim == 1:
query.shape = (1, len(query))
query = scipy.sparse.csr_matrix(query, dtype=self.index.dtype).T
else:
# default case: query is a single vector, in sparse gensim format
query = matutils.corpus2csc([query], self.index.shape[1], dtype=self.index.dtype)
# compute cosine similarity against every other document in the collection
result = self.index * query.tocsc() # N x T * T x C = N x C
if result.shape[1] == 1 and not is_corpus:
# for queries of one document, return a 1d array
result = result.toarray().flatten()
elif self.maintain_sparsity:
# avoid converting to dense array if maintaining sparsity
result = result.T
else:
# otherwise, return a 2d matrix (#queries x #index)
result = result.toarray().T
return result
| SparseMatrixSimilarity |
python | pytorch__pytorch | torch/_dynamo/package.py | {
"start": 11461,
"end": 14517
} | class ____:
"""
System information including Python, PyTorch, and GPU details.
This information is used to ensure compiled artifacts can only be loaded
with compatible system configurations.
"""
python_version: str
torch_version: str
toolkit_version: Optional[str]
triton_version: Optional[tuple[int, int]]
gpu_name: Optional[str]
CHECK_GPUS = ("cuda", "xpu")
@classmethod
def current(cls) -> "SystemInfo":
"""Create a SystemInfo instance with current system information."""
# Get GPU name if CUDA or XPU is available
gpu_name = None
from torch.utils._triton import get_triton_version
gpu_name, toolkit_version = None, None
for device_type in cls.CHECK_GPUS:
if getattr(torch, device_type).is_available():
try:
gpu_name = getattr(torch, device_type).get_device_name()
toolkit_version = getattr(torch.version, device_type)
break
except Exception:
pass
return cls(
python_version=platform.python_version(),
torch_version=torch.__version__,
toolkit_version=toolkit_version,
triton_version=get_triton_version((0, 0)),
gpu_name=gpu_name,
)
def check_compatibility(
self, other: "SystemInfo", device_type: str = "cpu"
) -> None:
"""
Check if this SystemInfo is compatible with another SystemInfo.
Raises RuntimeError if incompatible.
"""
if self.python_version != other.python_version:
raise RuntimeError(
f"Compile package was created with a different Python version: {self.python_version}"
)
if self.torch_version != other.torch_version:
raise RuntimeError(
f"Compile package was created with a different PyTorch version: {self.torch_version}"
)
if device_type in self.CHECK_GPUS:
if not getattr(torch, device_type).is_available():
raise RuntimeError(f"{device_type} is not available")
if self.toolkit_version != other.toolkit_version:
raise RuntimeError(
f"Compile package was created with a different toolkit version: {self.toolkit_version}"
)
if (
other.triton_version != (0, 0)
and self.triton_version != other.triton_version
):
raise RuntimeError(
f"Compile package was created with a different Triton version: {self.triton_version}"
)
# Check GPU name if CUDA/XPU was used
if other.gpu_name is not None and self.gpu_name != other.gpu_name:
raise RuntimeError(
f"Compile package was created with different GPU: "
f"cached={self.gpu_name}, current={other.gpu_name}"
)
@dataclasses.dataclass
| SystemInfo |
python | python-poetry__poetry | src/poetry/repositories/cached_repository.py | {
"start": 617,
"end": 2461
} | class ____(Repository, ABC):
CACHE_VERSION = parse_constraint("2.0.0")
def __init__(
self, name: str, *, disable_cache: bool = False, config: Config | None = None
) -> None:
super().__init__(name)
self._disable_cache = disable_cache
self._cache_dir = (config or Config.create()).repository_cache_directory / name
self._release_cache: FileCache[dict[str, Any]] = FileCache(path=self._cache_dir)
@abstractmethod
def _get_release_info(
self, name: NormalizedName, version: Version
) -> dict[str, Any]: ...
def get_release_info(self, name: NormalizedName, version: Version) -> PackageInfo:
"""
Return the release information given a package name and a version.
The information is returned from the cache if it exists
or retrieved from the remote server.
"""
from poetry.inspection.info import PackageInfo
if self._disable_cache:
return PackageInfo.load(self._get_release_info(name, version))
cached = self._release_cache.remember(
f"{name}:{version}", lambda: self._get_release_info(name, version)
)
cache_version = cached.get("_cache_version", "0.0.0")
if parse_constraint(cache_version) != self.CACHE_VERSION:
# The cache must be updated
self._log(
f"The cache for {name} {version} is outdated. Refreshing.",
level="debug",
)
cached = self._get_release_info(name, version)
self._release_cache.put(f"{name}:{version}", cached)
return PackageInfo.load(cached)
def package(self, name: str, version: Version) -> Package:
return self.get_release_info(canonicalize_name(name), version).to_package(
name=name
)
| CachedRepository |
python | charliermarsh__ruff | crates/ruff_benchmark/resources/tomllib/_parser.py | {
"start": 1555,
"end": 4175
} | class ____(ValueError):
"""An error raised if a document is not valid TOML."""
def load(fp: BinaryIO, /, *, parse_float: ParseFloat = float) -> dict[str, Any]:
"""Parse TOML from a binary file object."""
b = fp.read()
try:
s = b.decode()
except AttributeError:
raise TypeError(
"File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`"
) from None
return loads(s, parse_float=parse_float)
def loads(s: str, /, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901
"""Parse TOML from a string."""
# The spec allows converting "\r\n" to "\n", even in string
# literals. Let's do so to simplify parsing.
src = s.replace("\r\n", "\n")
pos = 0
out = Output(NestedDict(), Flags())
header: Key = ()
parse_float = make_safe_parse_float(parse_float)
# Parse one statement at a time
# (typically means one line in TOML source)
while True:
# 1. Skip line leading whitespace
pos = skip_chars(src, pos, TOML_WS)
# 2. Parse rules. Expect one of the following:
# - end of file
# - end of line
# - comment
# - key/value pair
# - append dict to list (and move to its namespace)
# - create dict (and move to its namespace)
# Skip trailing whitespace when applicable.
try:
char = src[pos]
except IndexError:
break
if char == "\n":
pos += 1
continue
if char in KEY_INITIAL_CHARS:
pos = key_value_rule(src, pos, out, header, parse_float)
pos = skip_chars(src, pos, TOML_WS)
elif char == "[":
try:
second_char: str | None = src[pos + 1]
except IndexError:
second_char = None
out.flags.finalize_pending()
if second_char == "[":
pos, header = create_list_rule(src, pos, out)
else:
pos, header = create_dict_rule(src, pos, out)
pos = skip_chars(src, pos, TOML_WS)
elif char != "#":
raise suffixed_err(src, pos, "Invalid statement")
# 3. Skip comment
pos = skip_comment(src, pos)
# 4. Expect end of line or end of file
try:
char = src[pos]
except IndexError:
break
if char != "\n":
raise suffixed_err(
src, pos, "Expected newline or end of document after a statement"
)
pos += 1
return out.data.dict
| TOMLDecodeError |
python | huggingface__transformers | tests/models/dpt/test_modeling_dpt_auto_backbone.py | {
"start": 8434,
"end": 12366
} | class ____(unittest.TestCase):
def test_inference_depth_estimation_dinov2(self):
image_processor = DPTImageProcessor.from_pretrained("facebook/dpt-dinov2-small-kitti")
model = DPTForDepthEstimation.from_pretrained("facebook/dpt-dinov2-small-kitti").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 576, 736))
self.assertEqual(predicted_depth.shape, expected_shape)
expectations = Expectations(
{
(None, None): [[6.0336, 7.1502, 7.4130], [6.8977, 7.2383, 7.2268], [7.9180, 8.0525, 8.0134]],
("cuda", 8): [[6.0350, 7.1518, 7.4144], [6.8992, 7.2396, 7.2280], [7.9194, 8.0538, 8.0145]],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
def test_inference_depth_estimation_beit(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-beit-base-384")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-beit-base-384").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 384, 384))
self.assertEqual(predicted_depth.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[2669.7061, 2663.7144, 2674.9399],
[2633.9326, 2650.9092, 2665.4270],
[2621.8271, 2632.0129, 2637.2290],
],
("cuda", 8): [
[2669.4292, 2663.4121, 2674.6233],
[2633.7400, 2650.7026, 2665.2085],
[2621.6572, 2631.8452, 2637.0525],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
def test_inference_depth_estimation_swinv2(self):
image_processor = DPTImageProcessor.from_pretrained("Intel/dpt-swinv2-tiny-256")
model = DPTForDepthEstimation.from_pretrained("Intel/dpt-swinv2-tiny-256").to(torch_device)
image = prepare_img()
inputs = image_processor(images=image, return_tensors="pt").to(torch_device)
# forward pass
with torch.no_grad():
outputs = model(**inputs)
predicted_depth = outputs.predicted_depth
# verify the predicted depth
expected_shape = torch.Size((1, 256, 256))
self.assertEqual(predicted_depth.shape, expected_shape)
expectations = Expectations(
{
(None, None): [
[1032.7719, 1025.1886, 1030.2661],
[1023.7619, 1021.0075, 1024.9121],
[1022.5667, 1018.8522, 1021.4145],
],
("cuda", 8): [
[1032.7170, 1025.0629, 1030.1941],
[1023.7309, 1020.9786, 1024.8594],
[1022.5233, 1018.8235, 1021.3312],
],
}
)
expected_slice = torch.tensor(expectations.get_expectation()).to(torch_device)
torch.testing.assert_close(outputs.predicted_depth[0, :3, :3], expected_slice, rtol=2e-4, atol=2e-4)
| DPTModelIntegrationTest |
python | pdm-project__pdm | src/pdm/cli/commands/info.py | {
"start": 238,
"end": 3444
} | class ____(BaseCommand):
"""Show the project information"""
def add_arguments(self, parser: argparse.ArgumentParser) -> None:
venv_option.add_to_parser(parser)
group = ArgumentGroup("fields", is_mutually_exclusive=True)
group.add_argument("--python", action="store_true", help="Show the interpreter path")
group.add_argument(
"--where",
dest="where",
action="store_true",
help="Show the project root path",
)
group.add_argument("--packages", action="store_true", help="Show the local packages root")
group.add_argument("--env", action="store_true", help="Show PEP 508 environment markers")
group.add_argument("--json", action="store_true", help="Dump the information in JSON")
group.add_to_parser(parser)
def handle(self, project: Project, options: argparse.Namespace) -> None:
check_project_file(project)
interpreter = project.environment.interpreter
packages_path = ""
if project.environment.is_local:
packages_path = project.environment.packages_path # type: ignore[attr-defined]
else:
# For virtual environments and other non-local environments,
# show the site-packages path (purelib is the standard location)
paths = project.environment.get_paths()
packages_path = paths.get("purelib", "")
if options.python:
project.core.ui.echo(str(interpreter.executable))
elif options.where:
project.core.ui.echo(str(project.root))
elif options.packages:
project.core.ui.echo(str(packages_path))
elif options.env:
project.core.ui.echo(json.dumps(project.environment.spec.markers_with_defaults(), indent=2))
elif options.json:
print_json(
data={
"pdm": {"version": project.core.version},
"python": {
"interpreter": str(interpreter.executable),
"version": interpreter.identifier,
"markers": project.environment.spec.markers_with_defaults(),
},
"project": {
"root": str(project.root),
"pypackages": str(packages_path),
},
}
)
else:
for name, value in zip(
[
f"[primary]{key}[/]:"
for key in [
"PDM version",
f"{'Global ' if project.is_global else ''}Python Interpreter",
f"{'Global ' if project.is_global else ''}Project Root",
f"{'Global ' if project.is_global else ''}Local Packages",
]
],
[
project.core.version,
f"{interpreter.executable} ({interpreter.identifier})",
project.root.as_posix(),
str(packages_path),
],
):
project.core.ui.echo(f"{name}\n {value}")
| Command |
python | ansible__ansible | test/lib/ansible_test/_internal/provider/source/unsupported.py | {
"start": 138,
"end": 605
} | class ____(SourceProvider):
"""Source provider to use when the layout is unsupported."""
sequence = 0 # disable automatic detection
@staticmethod
def is_content_root(path: str) -> bool:
"""Return True if the given path is a content root for this provider."""
return False
def get_paths(self, path: str) -> list[str]:
"""Return the list of available content paths under the given path."""
return []
| UnsupportedSource |
python | pyca__cryptography | tests/hazmat/primitives/test_aead.py | {
"start": 9601,
"end": 19032
} | class ____:
@pytest.mark.skipif(
sys.platform not in {"linux", "darwin"} or sys.maxsize < 2**31,
reason="mmap and 64-bit platform required",
)
def test_data_too_large(self):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = b"0" * 12
large_data = large_mmap()
with pytest.raises(OverflowError):
aesccm.encrypt(nonce, large_data, b"")
with pytest.raises(OverflowError):
aesccm.encrypt(nonce, b"", large_data)
def test_default_tag_length(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b"hello"
ct = aesccm.encrypt(nonce, pt, None)
assert len(ct) == len(pt) + 16
def test_invalid_tag_length(self, backend):
key = AESCCM.generate_key(128)
with pytest.raises(ValueError):
AESCCM(key, tag_length=7)
with pytest.raises(ValueError):
AESCCM(key, tag_length=2)
with pytest.raises(TypeError):
AESCCM(key, tag_length="notanint") # type:ignore[arg-type]
def test_invalid_nonce_length(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b"hello"
nonce = os.urandom(14)
with pytest.raises(ValueError):
aesccm.encrypt(nonce, pt, None)
with pytest.raises(ValueError):
aesccm.encrypt(nonce[:6], pt, None)
with pytest.raises(ValueError):
buf = bytearray(16)
aesccm.decrypt_into(nonce, b"x" * 20, None, buf)
with pytest.raises(ValueError):
buf = bytearray(16)
aesccm.decrypt_into(nonce[:6], b"x" * 20, None, buf)
def test_vectors(self, subtests, backend):
vectors = _load_all_params(
os.path.join("ciphers", "AES", "CCM"),
[
"DVPT128.rsp",
"DVPT192.rsp",
"DVPT256.rsp",
"VADT128.rsp",
"VADT192.rsp",
"VADT256.rsp",
"VNT128.rsp",
"VNT192.rsp",
"VNT256.rsp",
"VPT128.rsp",
"VPT192.rsp",
"VPT256.rsp",
],
load_nist_ccm_vectors,
)
for vector in vectors:
with subtests.test():
key = binascii.unhexlify(vector["key"])
nonce = binascii.unhexlify(vector["nonce"])
adata = binascii.unhexlify(vector["adata"])[: vector["alen"]]
ct = binascii.unhexlify(vector["ct"])
pt = binascii.unhexlify(vector["payload"])[: vector["plen"]]
aesccm = AESCCM(key, vector["tlen"])
if vector.get("fail"):
with pytest.raises(InvalidTag):
aesccm.decrypt(nonce, ct, adata)
else:
computed_pt = aesccm.decrypt(nonce, ct, adata)
assert computed_pt == pt
assert aesccm.encrypt(nonce, pt, adata) == ct
def test_roundtrip(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b"encrypt me"
ad = b"additional"
nonce = os.urandom(12)
ct = aesccm.encrypt(nonce, pt, ad)
computed_pt = aesccm.decrypt(nonce, ct, ad)
assert computed_pt == pt
def test_nonce_too_long(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b"encrypt me" * 6600
# pt can be no more than 65536 bytes when nonce is 13 bytes
nonce = os.urandom(13)
with pytest.raises(ValueError):
aesccm.encrypt(nonce, pt, None)
with pytest.raises(ValueError):
aesccm.decrypt(nonce, pt, None)
@pytest.mark.parametrize(
("nonce", "data", "associated_data"),
[
[object(), b"data", b""],
[b"0" * 12, object(), b""],
[b"0" * 12, b"data", object()],
],
)
def test_params_not_bytes(self, nonce, data, associated_data, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
with pytest.raises(TypeError):
aesccm.encrypt(nonce, data, associated_data)
def test_bad_key(self, backend):
with pytest.raises(TypeError):
AESCCM(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESCCM(b"0" * 31)
def test_bad_generate_key(self, backend):
with pytest.raises(TypeError):
AESCCM.generate_key(object()) # type:ignore[arg-type]
with pytest.raises(ValueError):
AESCCM.generate_key(129)
def test_associated_data_none_equal_to_empty_bytestring(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
ct1 = aesccm.encrypt(nonce, b"some_data", None)
ct2 = aesccm.encrypt(nonce, b"some_data", b"")
assert ct1 == ct2
pt1 = aesccm.decrypt(nonce, ct1, None)
pt2 = aesccm.decrypt(nonce, ct2, b"")
assert pt1 == pt2
def test_decrypt_data_too_short(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
with pytest.raises(InvalidTag):
aesccm.decrypt(b"0" * 12, b"0", None)
with pytest.raises(InvalidTag):
buf = bytearray(16)
aesccm.decrypt_into(b"0" * 12, b"0", None, buf)
def test_buffer_protocol(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b"encrypt me"
ad = b"additional"
nonce = os.urandom(12)
ct = aesccm.encrypt(nonce, pt, ad)
computed_pt = aesccm.decrypt(nonce, ct, ad)
assert computed_pt == pt
aesccm2 = AESCCM(bytearray(key))
ct2 = aesccm2.encrypt(bytearray(nonce), pt, ad)
assert ct2 == ct
computed_pt2 = aesccm2.decrypt(bytearray(nonce), ct2, ad)
assert computed_pt2 == pt
def test_max_data_length(self):
plaintext = b"A" * 65535
aad = b"authenticated but unencrypted data"
aesccm = AESCCM(AESCCM.generate_key(128))
nonce = os.urandom(13)
ciphertext = aesccm.encrypt(nonce, plaintext, aad)
decrypted_data = aesccm.decrypt(nonce, ciphertext, aad)
assert decrypted_data == plaintext
def test_encrypt_into(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b"encrypt me"
ad = b"additional"
buf = bytearray(len(pt) + 16)
n = aesccm.encrypt_into(nonce, pt, ad, buf)
assert n == len(pt) + 16
ct = aesccm.encrypt(nonce, pt, ad)
assert buf == ct
@pytest.mark.parametrize(
("ptlen", "buflen"), [(10, 25), (10, 27), (15, 30), (20, 37)]
)
def test_encrypt_into_buffer_incorrect_size(self, ptlen, buflen, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b"x" * ptlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesccm.encrypt_into(nonce, pt, None, buf)
def test_decrypt_into(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b"decrypt me"
ad = b"additional"
ct = aesccm.encrypt(nonce, pt, ad)
buf = bytearray(len(pt))
n = aesccm.decrypt_into(nonce, ct, ad, buf)
assert n == len(pt)
assert buf == pt
@pytest.mark.parametrize(
("ctlen", "buflen"), [(26, 9), (26, 11), (31, 14), (36, 21)]
)
def test_decrypt_into_buffer_incorrect_size(self, ctlen, buflen, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
ct = b"x" * ctlen
buf = bytearray(buflen)
with pytest.raises(ValueError, match="buffer must be"):
aesccm.decrypt_into(nonce, ct, None, buf)
def test_decrypt_into_invalid_tag(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
nonce = os.urandom(12)
pt = b"some data"
ad = b"additional"
ct = aesccm.encrypt(nonce, pt, ad)
# Corrupt the ciphertext
corrupted_ct = bytearray(ct)
corrupted_ct[0] ^= 1
buf = bytearray(len(pt))
with pytest.raises(InvalidTag):
aesccm.decrypt_into(nonce, bytes(corrupted_ct), ad, buf)
def test_decrypt_into_nonce_too_long(self, backend):
key = AESCCM.generate_key(128)
aesccm = AESCCM(key)
pt = b"encrypt me" * 6600
nonce = os.urandom(13)
buf = bytearray(len(pt))
with pytest.raises(ValueError, match="Data too long for nonce"):
aesccm.decrypt_into(nonce, pt, None, buf)
def _load_gcm_vectors():
vectors = _load_all_params(
os.path.join("ciphers", "AES", "GCM"),
[
"gcmDecrypt128.rsp",
"gcmDecrypt192.rsp",
"gcmDecrypt256.rsp",
"gcmEncryptExtIV128.rsp",
"gcmEncryptExtIV192.rsp",
"gcmEncryptExtIV256.rsp",
],
load_nist_vectors,
)
return [x for x in vectors if len(x["tag"]) == 32 and len(x["iv"]) >= 16]
| TestAESCCM |
python | jmcnamara__XlsxWriter | xlsxwriter/test/comparison/test_chart_axis50.py | {
"start": 315,
"end": 1387
} | class ____(ExcelComparisonTest):
"""
Test file created by XlsxWriter against a file created by Excel.
"""
def setUp(self):
self.set_filename("chart_axis50.xlsx")
def test_create_file(self):
"""Test the creation of a simple XlsxWriter file."""
workbook = Workbook(self.got_filename)
worksheet = workbook.add_worksheet()
chart = workbook.add_chart({"type": "column"})
chart.axis_ids = [47711360, 47712896]
data = [
[1, 2, 3, 4, 5],
[2, 4, 6, 8, 10],
[3, 6, 9, 12, 15],
]
worksheet.write_column("A1", data[0])
worksheet.write_column("B1", data[1])
worksheet.write_column("C1", data[2])
chart.add_series({"values": "=Sheet1!$A$1:$A$5"})
chart.add_series({"values": "=Sheet1!$B$1:$B$5"})
chart.add_series({"values": "=Sheet1!$C$1:$C$5"})
chart.set_y_axis({"visible": 0})
worksheet.insert_chart("E9", chart)
workbook.close()
self.assertExcelEqual()
| TestCompareXLSXFiles |
python | keras-team__keras | keras/src/constraints/constraints_test.py | {
"start": 309,
"end": 3513
} | class ____(testing.TestCase):
def test_max_norm(self):
constraint_fn = constraints.MaxNorm(2.0)
x = np.array([[0, 0, 0], [1.0, 0, 0], [3, 0, 0], [3, 3, 3]]).T
target = np.array(
[
[0, 0, 0],
[1.0, 0, 0],
[2.0, 0, 0],
[2.0 / np.sqrt(3), 2.0 / np.sqrt(3), 2.0 / np.sqrt(3)],
]
).T
output = constraint_fn(x)
self.assertAllClose(target, output)
def test_non_neg(self):
constraint_fn = constraints.NonNeg()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
self.assertTrue((np.min(output, axis=1) >= 0.0).all())
def test_unit_norm(self):
constraint_fn = constraints.UnitNorm()
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertAllClose(l2, 1.0)
def test_min_max_norm(self):
constraint_fn = constraints.MinMaxNorm(min_value=0.2, max_value=0.5)
output = constraint_fn(get_example_array())
output = backend.convert_to_numpy(output)
l2 = np.sqrt(np.sum(np.square(output), axis=0))
self.assertTrue(np.all(l2 >= 0.2))
self.assertTrue(np.all(l2 <= 0.5 + 1e-6))
def test_get_method(self):
obj = constraints.get("unit_norm")
self.assertTrue(obj, constraints.UnitNorm)
obj = constraints.get(None)
self.assertEqual(obj, None)
with self.assertRaises(ValueError):
constraints.get("typo")
def test_default_constraint_call(self):
constraint_fn = constraints.Constraint()
x = np.array([1.0, 2.0, 3.0])
output = constraint_fn(x)
self.assertAllClose(x, output)
def test_constraint_get_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
self.assertEqual(config, {})
def test_constraint_from_config(self):
constraint_fn = constraints.Constraint()
config = constraint_fn.get_config()
recreated_constraint_fn = constraints.Constraint.from_config(config)
self.assertIsInstance(recreated_constraint_fn, constraints.Constraint)
def test_max_norm_get_config(self):
constraint_fn = constraints.MaxNorm(max_value=3.0, axis=1)
config = constraint_fn.get_config()
expected_config = {"max_value": 3.0, "axis": 1}
self.assertEqual(config, expected_config)
def test_unit_norm_get_config(self):
constraint_fn = constraints.UnitNorm(axis=1)
config = constraint_fn.get_config()
expected_config = {"axis": 1}
self.assertEqual(config, expected_config)
def test_min_max_norm_get_config(self):
constraint_fn = constraints.MinMaxNorm(
min_value=0.5, max_value=2.0, rate=0.7, axis=1
)
config = constraint_fn.get_config()
expected_config = {
"min_value": 0.5,
"max_value": 2.0,
"rate": 0.7,
"axis": 1,
}
self.assertEqual(config, expected_config)
| ConstraintsTest |
python | doocs__leetcode | solution/1900-1999/1940.Longest Common Subsequence Between Sorted Arrays/Solution.py | {
"start": 0,
"end": 265
} | class ____:
def longestCommonSubsequence(self, arrays: List[List[int]]) -> List[int]:
cnt = [0] * 101
for row in arrays:
for x in row:
cnt[x] += 1
return [x for x, v in enumerate(cnt) if v == len(arrays)]
| Solution |
python | apache__airflow | providers/amazon/tests/unit/amazon/aws/triggers/test_base.py | {
"start": 1437,
"end": 4308
} | class ____:
def setup_method(self):
self.trigger = TestImplem(
serialized_fields={},
waiter_name="",
waiter_args={},
failure_message="",
status_message="",
status_queries=[],
return_value=None,
waiter_delay=0,
waiter_max_attempts=0,
aws_conn_id="",
)
def test_region_serialized(self):
self.trigger.region_name = "my_region"
_, args = self.trigger.serialize()
assert "region_name" in args
assert args["region_name"] == "my_region"
@pytest.mark.parametrize("verify", [True, False, pytest.param("/foo/bar.pem", id="path")])
def test_verify_serialized(self, verify):
self.trigger.verify = verify
_, args = self.trigger.serialize()
assert "verify" in args
assert args["verify"] == verify
@pytest.mark.parametrize(
"botocore_config",
[
pytest.param({"read_timeout": 10, "connect_timeout": 42, "keepalive": True}, id="non-empty-dict"),
pytest.param({}, id="empty-dict"),
],
)
def test_botocore_config_serialized(self, botocore_config):
self.trigger.botocore_config = botocore_config
_, args = self.trigger.serialize()
assert "botocore_config" in args
assert args["botocore_config"] == botocore_config
@pytest.mark.parametrize("param_name", ["region_name", "verify", "botocore_config"])
def test_hooks_args_not_serialized_if_omitted(self, param_name):
_, args = self.trigger.serialize()
assert param_name not in args
def test_region_name_not_serialized_if_empty_string(self):
"""
Compatibility with previous behaviour when empty string region name not serialised.
It would evaluate as None, however empty string it is not valid region name in boto3.
"""
self.trigger.region_name = ""
_, args = self.trigger.serialize()
assert "region_name" not in args
def test_serialize_extra_fields(self):
self.trigger.serialized_fields = {"foo": "bar", "foz": "baz"}
_, args = self.trigger.serialize()
assert "foo" in args
assert args["foo"] == "bar"
assert "foz" in args
assert args["foz"] == "baz"
@pytest.mark.asyncio
@mock.patch("airflow.providers.amazon.aws.triggers.base.async_wait")
async def test_run(self, wait_mock: MagicMock):
self.trigger.return_key = "hello"
self.trigger.return_value = "world"
generator = self.trigger.run()
res: TriggerEvent = await generator.asend(None)
wait_mock.assert_called_once()
assert isinstance(res.payload, dict)
assert res.payload["status"] == "success"
assert res.payload["hello"] == "world"
| TestAwsBaseWaiterTrigger |
python | getsentry__sentry | src/sentry/api/bases/organization_request_change.py | {
"start": 694,
"end": 829
} | class ____(OrganizationEndpoint):
permission_classes = (OrganizationRequestChangeEndpointPermission,)
| OrganizationRequestChangeEndpoint |
python | tensorflow__tensorflow | tensorflow/python/ops/quantized_ops_test.py | {
"start": 948,
"end": 4017
} | class ____(test.TestCase):
def __init__(self, method_name="runTest"):
super(QuantizedOpsTest, self).__init__(method_name)
def testQuantizeOp(self):
expected_output = [1, 1, 2, 127, 255, 255]
with self.session(use_gpu=False) as sess:
x = constant_op.constant(
[1.0, 1.25, 1.75, 127.0, 255.0, 500.0],
shape=[6],
dtype=dtypes.float32)
x_min = 0.0
x_max = 255.0
op = array_ops.quantize(x, x_min, x_max, dtypes.quint8, mode="MIN_FIRST")
value = self.evaluate(op)
self.assertArrayNear(expected_output, value.output, 0.1)
def testDequantizeOp(self):
expected_output = [1.0, 2.0, 4.0, 8.0, 16.0, 255.0]
inp = np.array([1, 2, 4, 8, 16, 255]).astype(np.uint8)
with self.session(use_gpu=False) as sess:
x = constant_op.constant(inp, shape=[6], dtype=dtypes.quint8)
x_min = 0.0
x_max = 255.0
op = array_ops.dequantize(x, x_min, x_max, mode="MIN_FIRST")
value = self.evaluate(op)
self.assertArrayNear(expected_output, value, 0.1)
def testAxis(self):
# Generates a tensor of the specified `shape` using values from `values`
# scaled by (slice_idx + 1) along `axis` dimension.
def scale_per_slice(shape, axis, values):
# Note: repeats the values if the shape is larger than values.
out = np.take(values, np.remainder(np.arange(np.prod(shape)),
len(values))).reshape(shape)
if axis is not None:
scale_shape = [1] * len(shape)
scale_shape[axis] = shape[axis]
out *= np.arange(1, shape[axis] + 1).reshape(scale_shape)
return out
shape = np.array([2, 3, 4, 5])
values = np.array([-1, -0.5, 0, 0.3, 0.8, 0.555, 0.5], dtype=np.float32)
quant_values = np.array([-128, -64, 0, 38, 102, 71, 64], dtype=np.int32)
for axis in [None, 0, 1, 2, 3]:
inputs = constant_op.constant(scale_per_slice(shape, axis, values))
expected_quantized = scale_per_slice(shape, None, quant_values)
if axis is None:
min_range, max_range = -1.0, 0.8
else:
num_slices = shape[axis]
min_range, max_range = [], []
for slice_idx in range(num_slices):
min_range.append(-1.0 * (slice_idx + 1))
max_range.append(0.8 * (slice_idx + 1))
quantized = self.evaluate(
array_ops.quantize(
inputs,
min_range,
max_range,
T=dtypes.qint8,
mode="SCALED",
round_mode="HALF_TO_EVEN",
axis=axis)).output
self.assertAllEqual(quantized, expected_quantized)
if axis is not None:
quantized = self.evaluate(
array_ops.quantize(
inputs,
min_range,
max_range,
T=dtypes.qint8,
mode="SCALED",
round_mode="HALF_TO_EVEN",
axis=(axis - 4))).output
self.assertAllClose(quantized, expected_quantized)
if __name__ == "__main__":
test.main()
| QuantizedOpsTest |
python | qdrant__qdrant-client | qdrant_client/http/models/models.py | {
"start": 99134,
"end": 99635
} | class ____(BaseModel, extra="forbid"):
positive: Optional[List["VectorInput"]] = Field(
default=None, description="Look for vectors closest to the vectors from these points"
)
negative: Optional[List["VectorInput"]] = Field(
default=None, description="Try to avoid vectors like the vector from these points"
)
strategy: Optional["RecommendStrategy"] = Field(
default=None, description="How to use the provided vectors to find the results"
)
| RecommendInput |
python | ZoranPandovski__al-go-rithms | games/Python/Pong Game/scoreboard.py | {
"start": 28,
"end": 690
} | class ____(Turtle):
def __init__(self):
super().__init__()
self.penup()
self.color("white")
self.hideturtle()
self.l_score=0
self.r_score=0
self.update_scoreboard()
def update_scoreboard(self):
self.clear()
self.goto(-90,200)
self.write(self.l_score,align="center",font=("Courier",70,"normal"))
self.goto(90,200)
self.write(self.r_score,align="center",font=("Courier",70,"normal"))
def l_point(self):
self.l_score += 1
self.update_scoreboard()
def r_point(self):
self.r_score += 1
self.update_scoreboard() | ScoreBoard |
python | sphinx-doc__sphinx | sphinx/util/docfields.py | {
"start": 11645,
"end": 18282
} | class ____:
"""Transforms field lists in "doc field" syntax into better-looking
equivalents, using the field type definitions given on a domain.
"""
typemap: dict[str, tuple[Field, bool]]
def __init__(self, directive: ObjectDescription[ObjDescT]) -> None:
self.directive = directive
self.typemap = directive.get_field_type_map()
def transform_all(self, node: addnodes.desc_content) -> None:
"""Transform all field list children of a node."""
# don't traverse, only handle field lists that are immediate children
for child in node:
if isinstance(child, nodes.field_list):
self.transform(child)
def transform(self, node: nodes.field_list) -> None:
"""Transform a single field list *node*."""
entries: list[nodes.field | _EntriesTriple] = []
groupindices: dict[str, int] = {}
types: dict[str, _FieldTypes] = {}
# step 1: traverse all fields and collect field types and content
for field in cast('list[nodes.field]', node):
self._transform_step_1(field, entries, types, groupindices)
new_list = self._transform_step_2(entries, types)
node.replace_self(new_list)
def _transform_step_1(
self,
field: nodes.field,
entries: list[nodes.field | _EntriesTriple],
types: dict[str, _FieldTypes],
group_indices: dict[str, int],
) -> None:
assert len(field) == 2
field_name = cast('nodes.field_name', field[0])
field_body = cast('nodes.field_body', field[1])
try:
# split into field type and argument
fieldtype_name, fieldarg = field_name.astext().split(None, maxsplit=1)
except ValueError:
# maybe an argument-less field type?
fieldtype_name, fieldarg = field_name.astext(), ''
typedesc, is_typefield = self.typemap.get(fieldtype_name, (None, None))
# collect the content, trying not to keep unnecessary paragraphs
if _is_single_paragraph(field_body):
paragraph = cast('nodes.paragraph', field_body[0])
content = paragraph.children
else:
content = field_body.children
# sort out unknown fields
if typedesc is None or typedesc.has_arg != bool(fieldarg):
# either the field name is unknown, or the argument doesn't
# match the spec; capitalize field name and be done with it
new_fieldname = fieldtype_name[0:1].upper() + fieldtype_name[1:]
if fieldarg:
new_fieldname += ' ' + fieldarg
field_name[0] = nodes.Text(new_fieldname)
entries.append(field)
# but if this has a type then we can at least link it
if (
typedesc
and is_typefield
and content
and len(content) == 1
and isinstance(content[0], nodes.Text)
):
typed_field = cast('TypedField', typedesc)
target = content[0].astext()
xrefs = typed_field.make_xrefs(
typed_field.typerolename,
self.directive.domain or '',
target,
contnode=content[0],
env=self.directive.env,
)
if _is_single_paragraph(field_body):
paragraph = cast('nodes.paragraph', field_body[0])
paragraph.clear()
paragraph.extend(xrefs)
else:
field_body.clear()
field_body += nodes.paragraph('', '', *xrefs)
return
typename = typedesc.name
# if the field specifies a type, put it in the types collection
if is_typefield:
# filter out only inline nodes; others will result in invalid
# markup being written out
content = [n for n in content if isinstance(n, (nodes.Inline, nodes.Text))]
if content:
types.setdefault(typename, {})[fieldarg] = content
return
# also support syntax like ``:param type name:``
if typedesc.is_typed:
try:
argtype, argname = fieldarg.rsplit(None, 1)
except ValueError:
pass
else:
types.setdefault(typename, {})[argname] = [nodes.Text(argtype)]
fieldarg = argname
translatable_content = nodes.inline(field_body.rawsource, translatable=True)
translatable_content.document = field_body.parent.document
translatable_content.source = field_body.parent.source
translatable_content.line = field_body.parent.line
translatable_content += content
# grouped entries need to be collected in one entry, while others
# get one entry per field
if typedesc.is_grouped:
if typename in group_indices:
group = cast(
'tuple[Field, list[_FieldEntry], Node]',
entries[group_indices[typename]],
)
else:
group_indices[typename] = len(entries)
group = (typedesc, [], field)
entries.append(group)
new_entry = typedesc.make_entry(fieldarg, [translatable_content])
group[1].append(new_entry)
else:
new_entry = typedesc.make_entry(fieldarg, [translatable_content])
entries.append((typedesc, new_entry, field))
def _transform_step_2(
self,
entries: list[nodes.field | _EntriesTriple],
types: dict[str, _FieldTypes],
) -> nodes.field_list:
# step 2: all entries are collected, construct the new field list
new_list = nodes.field_list()
for entry in entries:
if isinstance(entry, nodes.field):
# pass-through old field
new_list += entry
else:
fieldtype, items, location = entry
fieldtypes = types.get(fieldtype.name, {})
env = self.directive.env
inliner = self.directive.state.inliner
domain = self.directive.domain or ''
new_list += fieldtype.make_field(
fieldtypes,
domain,
items, # type: ignore[arg-type]
env=env,
inliner=inliner,
location=location,
)
return new_list
| DocFieldTransformer |
python | doocs__leetcode | solution/1100-1199/1119.Remove Vowels from a String/Solution.py | {
"start": 0,
"end": 116
} | class ____:
def removeVowels(self, s: str) -> str:
return "".join(c for c in s if c not in "aeiou")
| Solution |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.