language stringclasses 1 value | repo stringclasses 346 values | path stringlengths 6 201 | class_span dict | source stringlengths 21 2.38M | target stringlengths 1 96 |
|---|---|---|---|---|---|
python | matplotlib__matplotlib | tools/stubtest.py | {
"start": 183,
"end": 4459
} | class ____(ast.NodeVisitor):
def __init__(self, filepath, output, existing_allowed):
self.filepath = filepath
self.context = list(filepath.with_suffix("").relative_to(lib).parts)
self.output = output
self.existing_allowed = existing_allowed
def _is_already_allowed(self, parts):
# Skip outputting a path if it's already allowed before.
candidates = ['.'.join(parts[:s]) for s in range(1, len(parts))]
for allow in self.existing_allowed:
if any(allow.fullmatch(path) for path in candidates):
return True
return False
def visit_FunctionDef(self, node):
# delete_parameter adds a private sentinel value that leaks
# we do not want that sentinel value in the type hints but it breaks typing
# Does not apply to variadic arguments (args/kwargs)
for dec in node.decorator_list:
if "delete_parameter" in ast.unparse(dec):
deprecated_arg = dec.args[1].value
if (
node.args.vararg is not None
and node.args.vararg.arg == deprecated_arg
):
continue
if (
node.args.kwarg is not None
and node.args.kwarg.arg == deprecated_arg
):
continue
parents = []
if hasattr(node, "parent"):
parent = node.parent
while hasattr(parent, "parent") and not isinstance(
parent, ast.Module
):
parents.insert(0, parent.name)
parent = parent.parent
parts = [*self.context, *parents, node.name]
if not self._is_already_allowed(parts):
self.output.write("\\.".join(parts) + "\n")
break
def visit_ClassDef(self, node):
for dec in node.decorator_list:
if "define_aliases" in ast.unparse(dec):
parents = []
if hasattr(node, "parent"):
parent = node.parent
while hasattr(parent, "parent") and not isinstance(
parent, ast.Module
):
parents.insert(0, parent.name)
parent = parent.parent
aliases = ast.literal_eval(dec.args[0])
# Written as a regex rather than two lines to avoid unused entries
# for setters on items with only a getter
for substitutions in aliases.values():
parts = self.context + parents + [node.name]
for a in substitutions:
if not (self._is_already_allowed([*parts, f"get_{a}"]) and
self._is_already_allowed([*parts, f"set_{a}"])):
self.output.write("\\.".join([*parts, f"[gs]et_{a}\n"]))
for child in ast.iter_child_nodes(node):
self.visit(child)
existing_allowed = []
with (root / 'ci/mypy-stubtest-allowlist.txt').open() as f:
for line in f:
line, _, _ = line.partition('#')
line = line.strip()
if line:
existing_allowed.append(re.compile(line))
with tempfile.TemporaryDirectory() as d:
p = pathlib.Path(d) / "allowlist.txt"
with p.open("wt") as f:
for path in mpl.glob("**/*.py"):
v = Visitor(path, f, existing_allowed)
tree = ast.parse(path.read_text())
# Assign parents to tree so they can be backtraced
for node in ast.walk(tree):
for child in ast.iter_child_nodes(node):
child.parent = node
v.visit(tree)
proc = subprocess.run(
[
"stubtest",
"--mypy-config-file=pyproject.toml",
"--ignore-disjoint-bases",
"--allowlist=ci/mypy-stubtest-allowlist.txt",
f"--allowlist={p}",
"matplotlib",
],
cwd=root,
env=os.environ | {"MPLBACKEND": "agg"},
)
try:
os.unlink(f.name)
except OSError:
pass
sys.exit(proc.returncode)
| Visitor |
python | networkx__networkx | networkx/classes/reportviews.py | {
"start": 38504,
"end": 41643
} | class ____(OutEdgeView):
"""A EdgeView class for edges of a Graph
This densely packed View allows iteration over edges, data lookup
like a dict and set operations on edges represented by node-tuples.
In addition, edge data can be controlled by calling this object
possibly creating an EdgeDataView. Typically edges are iterated over
and reported as `(u, v)` node tuples or `(u, v, key)` node/key tuples
for multigraphs. Those edge representations can also be using to
lookup the data dict for any edge. Set operations also are available
where those tuples are the elements of the set.
Calling this object with optional arguments `data`, `default` and `keys`
controls the form of the tuple (see EdgeDataView). Optional argument
`nbunch` allows restriction to edges only involving certain nodes.
If `data is False` (the default) then iterate over 2-tuples `(u, v)`.
If `data is True` iterate over 3-tuples `(u, v, datadict)`.
Otherwise iterate over `(u, v, datadict.get(data, default))`.
For Multigraphs, if `keys is True`, replace `u, v` with `u, v, key` above.
Parameters
==========
graph : NetworkX graph-like class
nbunch : (default= all nodes in graph) only report edges with these nodes
keys : (only for MultiGraph. default=False) report edge key in tuple
data : bool or string (default=False) see above
default : object (default=None)
Examples
========
>>> G = nx.path_graph(4)
>>> EV = G.edges()
>>> (2, 3) in EV
True
>>> for u, v in EV:
... print((u, v))
(0, 1)
(1, 2)
(2, 3)
>>> assert EV & {(1, 2), (3, 4)} == {(1, 2)}
>>> EVdata = G.edges(data="color", default="aqua")
>>> G.add_edge(2, 3, color="blue")
>>> assert (2, 3, "blue") in EVdata
>>> for u, v, c in EVdata:
... print(f"({u}, {v}) has color: {c}")
(0, 1) has color: aqua
(1, 2) has color: aqua
(2, 3) has color: blue
>>> EVnbunch = G.edges(nbunch=2)
>>> assert (2, 3) in EVnbunch
>>> assert (0, 1) not in EVnbunch
>>> for u, v in EVnbunch:
... assert u == 2 or v == 2
>>> MG = nx.path_graph(4, create_using=nx.MultiGraph)
>>> EVmulti = MG.edges(keys=True)
>>> (2, 3, 0) in EVmulti
True
>>> (2, 3) in EVmulti # 2-tuples work even when keys is True
True
>>> key = MG.add_edge(2, 3)
>>> for u, v, k in EVmulti:
... print((u, v, k))
(0, 1, 0)
(1, 2, 0)
(2, 3, 0)
(2, 3, 1)
"""
__slots__ = ()
dataview = EdgeDataView
def __len__(self):
num_nbrs = (len(nbrs) + (n in nbrs) for n, nbrs in self._nodes_nbrs())
return sum(num_nbrs) // 2
def __iter__(self):
seen = {}
for n, nbrs in self._nodes_nbrs():
for nbr in list(nbrs):
if nbr not in seen:
yield (n, nbr)
seen[n] = 1
del seen
def __contains__(self, e):
try:
u, v = e[:2]
return v in self._adjdict[u] or u in self._adjdict[v]
except (KeyError, ValueError):
return False
| EdgeView |
python | encode__django-rest-framework | tests/test_negotiation.py | {
"start": 3218,
"end": 3680
} | class ____(TestCase):
def setUp(self):
self.negotiator = BaseContentNegotiation()
def test_raise_error_for_abstract_select_parser_method(self):
with pytest.raises(NotImplementedError):
self.negotiator.select_parser(None, None)
def test_raise_error_for_abstract_select_renderer_method(self):
with pytest.raises(NotImplementedError):
self.negotiator.select_renderer(None, None)
| BaseContentNegotiationTests |
python | catalyst-team__catalyst | tests/catalyst/callbacks/test_control_flow.py | {
"start": 155,
"end": 284
} | class ____:
def __init__(self, loader_key, epoch):
self.loader_key = loader_key
self.epoch_step = epoch
| _Runner |
python | gevent__gevent | src/gevent/testing/flaky.py | {
"start": 1662,
"end": 1795
} | class ____(FlakyTest):
"""
Use this when the flaky test is definitely caused by a race condition.
"""
| FlakyTestRaceCondition |
python | coleifer__peewee | tests/cockroachdb.py | {
"start": 879,
"end": 11800
} | class ____(ModelTestCase):
@requires_models(KV)
def test_retry_transaction_ok(self):
@self.database.retry_transaction()
def succeeds(db):
k1 = KV.create(k='k1', v=1)
k2 = KV.create(k='k2', v=2)
return [k1.id, k2.id]
id_list = succeeds()
self.assertEqual(KV.select().count(), 2)
kv_list = [kv.id for kv in KV.select().order_by(KV.k)]
self.assertEqual(kv_list, id_list)
@requires_models(KV)
def test_retry_transfer_example(self):
k1 = KV.create(k='k1', v=100)
k2 = KV.create(k='k2', v=1)
def transfer_funds(from_k, to_k, amt):
query = KV.select().where(KV.k.in_((from_k, to_k)))
ka, kb = list(query)
if from_k != ka.k:
ka, kb = kb, ka # Swap order.
if ka.v < amt:
return False, ka.v, kb.v
from_v, = (KV
.update(v=KV.v - amt)
.where(KV.k == from_k)
.returning(KV.v)
.execute())
to_v, = (KV
.update(v=KV.v + amt)
.where(KV.k == to_k)
.returning(KV.v)
.execute())
return True, from_v.v, to_v.v
def thunk(db_ref):
return transfer_funds('k1', 'k2', 90)
self.assertEqual(run_transaction(self.database, thunk), (True, 10, 91))
def thunk(db_ref):
return transfer_funds('k1', 'k2', 5)
self.assertEqual(run_transaction(self.database, thunk), (True, 5, 96))
def thunk(db_ref):
return transfer_funds('k1', 'k2', 6)
self.assertEqual(run_transaction(self.database, thunk), (False, 5, 96))
@requires_models(KV)
def test_retry_transfer_example2(self):
k1 = KV.create(k='k1', v=100)
k2 = KV.create(k='k2', v=1)
def transfer_funds(from_k, to_k, amount):
def thunk(db_ref):
src, dest = KV.select().where(KV.k.in_([from_k, to_k]))
if src.k != from_k:
src, dest = dest, src
if src.v < amount:
return False, src.v, dest.v
src, = (KV
.update(v=KV.v - amount)
.where(KV.k == from_k)
.returning(KV.v)
.execute())
dest, = (KV
.update(v=KV.v + amount)
.where(KV.k == to_k)
.returning(KV.v)
.execute())
return True, src.v, dest.v
return run_transaction(self.database, thunk, max_attempts=10)
self.assertEqual(transfer_funds('k1', 'k2', 90), (True, 10, 91))
self.assertEqual(transfer_funds('k1', 'k2', 11), (False, 10, 91))
self.assertEqual(transfer_funds('k1', 'k2', 10), (True, 0, 101))
@requires_models(KV)
def test_retry_transaction_integrityerror(self):
KV.create(k='kx', v=0)
@self.database.retry_transaction()
def fails(db):
KV.create(k='k1', v=1)
KV.create(k='kx', v=1)
with self.assertRaises(IntegrityError):
fails()
self.assertEqual(KV.select().count(), 1)
kv = KV.get(KV.k == 'kx')
self.assertEqual(kv.v, 0)
@requires_models(KV)
def test_run_transaction_helper(self):
def succeeds(db):
KV.insert_many([('k%s' % i, i) for i in range(10)]).execute()
run_transaction(self.database, succeeds)
self.assertEqual([(kv.k, kv.v) for kv in KV.select().order_by(KV.k)],
[('k%s' % i, i) for i in range(10)])
@requires_models(KV)
def test_cannot_nest_run_transaction(self):
def insert_row(db):
KV.create(k='k1', v=1)
with self.database.atomic():
self.assertRaises(Exception, run_transaction,
self.database, insert_row)
self.assertEqual(KV.select().count(), 0)
@requires_models(User)
def test_retry_transaction_docs_example(self):
def create_user(username):
def thunk(db_ref):
return User.create(username=username)
return self.database.run_transaction(thunk, max_attempts=5)
users = [create_user(u) for u in 'abc']
self.assertEqual([u.username for u in users], ['a', 'b', 'c'])
query = User.select().order_by(User.username)
self.assertEqual([u.username for u in query], ['a', 'b', 'c'])
@requires_models(KV)
def test_retry_transaction_decorator(self):
@self.database.retry_transaction()
def retry_decorator(db):
content = []
for i in range(5):
kv = KV.create(k='k%s' % i, v=i)
content.append(kv.k)
return content
self.assertEqual(retry_decorator(), ['k0', 'k1', 'k2', 'k3', 'k4'])
@requires_models(Arr)
def test_array_field(self):
a1 = Arr.create(title='a1', tags=['t1', 't2'])
a2 = Arr.create(title='a2', tags=['t2', 't3'])
# Ensure we can read an array back.
a1_db = Arr.get(Arr.title == 'a1')
self.assertEqual(a1_db.tags, ['t1', 't2'])
# Ensure we can filter on arrays.
a2_db = Arr.get(Arr.tags == ['t2', 't3'])
self.assertEqual(a2_db.id, a2.id)
# Item lookups.
a1_db = Arr.get(Arr.tags[1] == 't2')
self.assertEqual(a1_db.id, a1.id)
self.assertRaises(Arr.DoesNotExist, Arr.get, Arr.tags[2] == 'x')
@requires_models(Arr)
def test_array_field_search(self):
def assertAM(where, id_list):
query = Arr.select().where(where).order_by(Arr.title)
self.assertEqual([a.id for a in query], id_list)
data = (
('a1', ['t1', 't2']),
('a2', ['t2', 't3']),
('a3', ['t3', 't4']))
id_list = Arr.insert_many(data).execute()
a1, a2, a3 = [pk for pk, in id_list]
assertAM(Value('t2') == fn.ANY(Arr.tags), [a1, a2])
assertAM(Value('t1') == fn.Any(Arr.tags), [a1])
assertAM(Value('tx') == fn.Any(Arr.tags), [])
# Use the contains operator explicitly.
assertAM(SQL("tags::text[] @> ARRAY['t2']"), [a1, a2])
# Use the porcelain.
assertAM(Arr.tags.contains('t2'), [a1, a2])
assertAM(Arr.tags.contains('t3'), [a2, a3])
assertAM(Arr.tags.contains('t1', 't2'), [a1])
assertAM(Arr.tags.contains('t3', 't4'), [a3])
assertAM(Arr.tags.contains('t2', 't3', 't4'), [])
assertAM(Arr.tags.contains_any('t2'), [a1, a2])
assertAM(Arr.tags.contains_any('t3'), [a2, a3])
assertAM(Arr.tags.contains_any('t1', 't2'), [a1, a2])
assertAM(Arr.tags.contains_any('t3', 't4'), [a2, a3])
assertAM(Arr.tags.contains_any('t2', 't3', 't4'), [a1, a2, a3])
@requires_models(Arr)
def test_array_field_index(self):
a1 = Arr.create(title='a1', tags=['a1', 'a2'])
a2 = Arr.create(title='a2', tags=['a2', 'a3', 'a4', 'a5'])
# NOTE: CRDB does not support array slicing.
query = (Arr
.select(Arr.tags[1].alias('st'))
.order_by(Arr.title))
self.assertEqual([a.st for a in query], ['a2', 'a3'])
@requires_models(UID)
def test_uuid_key_field(self):
# UUID primary-key is automatically populated and returned, and is of
# the correct type.
u1 = UID.create(title='u1')
self.assertTrue(u1.id is not None)
self.assertTrue(isinstance(u1.id, uuid.UUID))
# Bulk-insert works as expected.
id_list = UID.insert_many([('u2',), ('u3',)]).execute()
u2_id, u3_id = [pk for pk, in id_list]
self.assertTrue(isinstance(u2_id, uuid.UUID))
# We can perform lookups using UUID() type.
u2 = UID.get(UID.id == u2_id)
self.assertEqual(u2.title, 'u2')
# Get the UUID hex and query using that.
u3 = UID.get(UID.id == u3_id.hex)
self.assertEqual(u3.title, 'u3')
@requires_models(RID)
def test_rowid_field(self):
r1 = RID.create(title='r1')
self.assertTrue(r1.id is not None)
# Bulk-insert works as expected.
id_list = RID.insert_many([('r2',), ('r3',)]).execute()
r2_id, r3_id = [pk for pk, in id_list]
r2 = RID.get(RID.id == r2_id)
self.assertEqual(r2.title, 'r2')
@requires_models(KV)
def test_readonly_transaction(self):
kv = KV.create(k='k1', v=1)
# Table doesn't exist yet.
with self.assertRaises((ProgrammingError, InternalError)):
with self.database.atomic('-10s'):
kv_db = KV.get(KV.k == 'k1')
# Cannot write in a read-only transaction
with self.assertRaises((ProgrammingError, InternalError)):
with self.database.atomic(datetime.datetime.now()):
KV.create(k='k2', v=2)
# Without system time there are no issues.
with self.database.atomic():
kv_db = KV.get(KV.k == 'k1')
self.assertEqual(kv.id, kv_db.id)
@requires_models(KV)
def test_transaction_priority(self):
with self.database.atomic(priority='HIGH'):
KV.create(k='k1', v=1)
with self.assertRaises(IntegrityError):
with self.database.atomic(priority='LOW'):
KV.create(k='k1', v=2)
with self.assertRaises(ValueError):
with self.database.atomic(priority='HUH'):
KV.create(k='k2', v=2)
self.assertEqual(KV.select().count(), 1)
kv = KV.get()
self.assertEqual((kv.k, kv.v), ('k1', 1))
@requires_models(UID, UIDNote)
def test_uuid_key_as_fk(self):
# This is covered thoroughly elsewhere, but added here just for fun.
u1, u2, u3 = [UID.create(title='u%s' % i) for i in (1, 2, 3)]
UIDNote.create(uid=u1, note='u1-1')
UIDNote.create(uid=u2, note='u2-1')
UIDNote.create(uid=u2, note='u2-2')
with self.assertQueryCount(1):
query = (UIDNote
.select(UIDNote, UID)
.join(UID)
.where(UID.title == 'u2')
.order_by(UIDNote.note))
self.assertEqual([(un.note, un.uid.title) for un in query],
[('u2-1', 'u2'), ('u2-2', 'u2')])
query = (UID
.select(UID, fn.COUNT(UIDNote.id).alias('note_count'))
.join(UIDNote, JOIN.LEFT_OUTER)
.group_by(UID)
.order_by(fn.COUNT(UIDNote.id).desc()))
self.assertEqual([(u.title, u.note_count) for u in query],
[('u2', 2), ('u1', 1), ('u3', 0)])
@skip_unless(IS_CRDB)
| TestCockroachDatabase |
python | pypa__setuptools | setuptools/_vendor/zipp/__init__.py | {
"start": 1412,
"end": 1832
} | class ____:
"""
Mix-in to save the initialization state for pickling.
"""
def __init__(self, *args, **kwargs):
self.__args = args
self.__kwargs = kwargs
super().__init__(*args, **kwargs)
def __getstate__(self):
return self.__args, self.__kwargs
def __setstate__(self, state):
args, kwargs = state
super().__init__(*args, **kwargs)
| InitializedState |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/asset_health.py | {
"start": 5572,
"end": 5757
} | class ____(graphene.ObjectType):
lastMaterializedTimestamp = graphene.Field(graphene.Float)
class Meta:
name = "AssetHealthFreshnessMeta"
| GrapheneAssetHealthFreshnessMeta |
python | tensorflow__tensorflow | tensorflow/core/function/trace_type/serialization_test.py | {
"start": 1456,
"end": 2162
} | class ____(serialization.Serializable):
def __init__(self, *elements):
self.elements = elements
@classmethod
def experimental_type_proto(cls):
return serialization_test_pb2.MyCompositeRepresentation
@classmethod
def experimental_from_proto(cls, proto):
return MyCompositeClass(
*[serialization.deserialize(element) for element in proto.elements])
def experimental_as_proto(self):
serialized_elements = [
serialization.serialize(element) for element in self.elements
]
proto = serialization_test_pb2.MyCompositeRepresentation(
elements=serialized_elements)
return proto
serialization.register_serializable(MyCompositeClass)
| MyCompositeClass |
python | pydantic__pydantic | tests/mypy/modules/plugin_success.py | {
"start": 6695,
"end": 6794
} | class ____(BaseModel):
my_field: str = Field(alias='my_alias')
m4 = Model4(my_alias='foo')
| Model4 |
python | apache__airflow | providers/amazon/src/airflow/providers/amazon/aws/operators/rds.py | {
"start": 24776,
"end": 29639
} | class ____(RdsBaseOperator):
"""
Creates an RDS DB instance.
.. seealso::
For more information on how to use this operator, take a look at the guide:
:ref:`howto/operator:RdsCreateDbInstanceOperator`
:param db_instance_identifier: The DB instance identifier, must start with a letter and
contain from 1 to 63 letters, numbers, or hyphens
:param db_instance_class: The compute and memory capacity of the DB instance, for example db.m5.large
:param engine: The name of the database engine to be used for this instance
:param rds_kwargs: Named arguments to pass to boto3 RDS client function ``create_db_instance``
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/rds.html#RDS.Client.create_db_instance
:param wait_for_completion: If True, waits for creation of the DB instance to complete. (default: True)
:param waiter_delay: Time (in seconds) to wait between two consecutive calls to check DB instance state
:param waiter_max_attempts: The maximum number of attempts to check DB instance state
:param deferrable: If True, the operator will wait asynchronously for the DB instance to be created.
This implies waiting for completion. This mode requires aiobotocore module to be installed.
(default: False)
:param region_name: AWS region_name. If not specified then the default boto3 behaviour is used.
:param verify: Whether or not to verify SSL certificates. See:
https://boto3.amazonaws.com/v1/documentation/api/latest/reference/core/session.html
:param botocore_config: Configuration dictionary (key-values) for botocore client. See:
https://botocore.amazonaws.com/v1/documentation/api/latest/reference/config.html
"""
template_fields = aws_template_fields(
"db_instance_identifier", "db_instance_class", "engine", "rds_kwargs"
)
def __init__(
self,
*,
db_instance_identifier: str,
db_instance_class: str,
engine: str,
rds_kwargs: dict | None = None,
wait_for_completion: bool = True,
deferrable: bool = conf.getboolean("operators", "default_deferrable", fallback=False),
waiter_delay: int = 30,
waiter_max_attempts: int = 60,
**kwargs,
):
super().__init__(**kwargs)
self.db_instance_identifier = db_instance_identifier
self.db_instance_class = db_instance_class
self.engine = engine
self.rds_kwargs = rds_kwargs or {}
self.wait_for_completion = False if deferrable else wait_for_completion
self.deferrable = deferrable
self.waiter_delay = waiter_delay
self.waiter_max_attempts = waiter_max_attempts
def execute(self, context: Context) -> str:
self.log.info("Creating new DB instance %s", self.db_instance_identifier)
create_db_instance = self.hook.conn.create_db_instance(
DBInstanceIdentifier=self.db_instance_identifier,
DBInstanceClass=self.db_instance_class,
Engine=self.engine,
**self.rds_kwargs,
)
if self.deferrable:
self.defer(
trigger=RdsDbAvailableTrigger(
db_identifier=self.db_instance_identifier,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
aws_conn_id=self.aws_conn_id,
region_name=self.region_name,
# ignoring type because create_db_instance is a dict
response=create_db_instance, # type: ignore[arg-type]
db_type=RdsDbType.INSTANCE,
),
method_name="execute_complete",
timeout=timedelta(seconds=self.waiter_delay * self.waiter_max_attempts),
)
if self.wait_for_completion:
waiter = self.hook.conn.get_waiter("db_instance_available")
wait(
waiter=waiter,
waiter_delay=self.waiter_delay,
waiter_max_attempts=self.waiter_max_attempts,
args={"DBInstanceIdentifier": self.db_instance_identifier},
failure_message="DB instance creation failed",
status_message="DB Instance status is",
status_args=["DBInstances[0].DBInstanceStatus"],
)
return json.dumps(create_db_instance, default=str)
def execute_complete(self, context: Context, event: dict[str, Any] | None = None) -> str:
validated_event = validate_execute_complete_event(event)
if validated_event["status"] != "success":
raise AirflowException(f"DB instance creation failed: {validated_event}")
return json.dumps(validated_event["response"], default=str)
| RdsCreateDbInstanceOperator |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/generated/sources.py | {
"start": 42289,
"end": 42894
} | class ____(GeneratedAirbyteSource):
@public
def __init__(self, name: str, docker_username: str):
"""Airbyte Source for Dockerhub.
Documentation can be found at https://docs.airbyte.com/integrations/sources/dockerhub
Args:
name (str): The name of the destination.
docker_username (str): Username of DockerHub person or organization (for https://hub.docker.com/v2/repositories/USERNAME/ API call)
"""
self.docker_username = check.str_param(docker_username, "docker_username")
super().__init__("Dockerhub", name)
| DockerhubSource |
python | getsentry__sentry | tests/sentry/workflow_engine/handlers/condition/test_event_frequency_handlers.py | {
"start": 11059,
"end": 11495
} | class ____(TestEventFrequencyCountCondition):
def setUp(self) -> None:
super().setUp()
self.condition = Condition.EVENT_UNIQUE_USER_FREQUENCY_COUNT
self.payload: dict[str, str | int | float] = {
"interval": "1h",
"id": EventUniqueUserFrequencyCondition.id,
"value": 50,
"comparisonType": ComparisonType.COUNT,
}
| TestEventUniqueUserFrequencyCountCondition |
python | sqlalchemy__sqlalchemy | lib/sqlalchemy/testing/suite/test_insert.py | {
"start": 783,
"end": 2573
} | class ____(fixtures.TablesTest):
run_deletes = "each"
__backend__ = True
__requires__ = "implements_get_lastrowid", "autoincrement_insert"
@classmethod
def define_tables(cls, metadata):
Table(
"autoinc_pk",
metadata,
Column(
"id", Integer, primary_key=True, test_needs_autoincrement=True
),
Column("data", String(50)),
implicit_returning=False,
)
Table(
"manual_pk",
metadata,
Column("id", Integer, primary_key=True, autoincrement=False),
Column("data", String(50)),
implicit_returning=False,
)
def _assert_round_trip(self, table, conn):
row = conn.execute(table.select()).first()
eq_(
row,
(
conn.dialect.default_sequence_base,
"some data",
),
)
def test_autoincrement_on_insert(self, connection):
connection.execute(
self.tables.autoinc_pk.insert(), dict(data="some data")
)
self._assert_round_trip(self.tables.autoinc_pk, connection)
def test_last_inserted_id(self, connection):
r = connection.execute(
self.tables.autoinc_pk.insert(), dict(data="some data")
)
pk = connection.scalar(select(self.tables.autoinc_pk.c.id))
eq_(r.inserted_primary_key, (pk,))
@requirements.dbapi_lastrowid
def test_native_lastrowid_autoinc(self, connection):
r = connection.execute(
self.tables.autoinc_pk.insert(), dict(data="some data")
)
lastrowid = r.lastrowid
pk = connection.scalar(select(self.tables.autoinc_pk.c.id))
eq_(lastrowid, pk)
| LastrowidTest |
python | wandb__wandb | wandb/sdk/interface/summary_record.py | {
"start": 157,
"end": 1044
} | class ____:
"""Encodes a diff -- analogous to the SummaryRecord protobuf message."""
update: t.List["SummaryItem"]
remove: t.List["SummaryItem"]
def __init__(self):
self.update = []
self.remove = []
def __str__(self):
s = "SummaryRecord:\n Update:\n "
s += "\n ".join([str(item) for item in self.update])
s += "\n Remove:\n "
s += "\n ".join([str(item) for item in self.remove])
s += "\n"
return s
__repr__ = __str__
def _add_next_parent(self, parent_key):
with_next_parent = SummaryRecord()
with_next_parent.update = [
item._add_next_parent(parent_key) for item in self.update
]
with_next_parent.remove = [
item._add_next_parent(parent_key) for item in self.remove
]
return with_next_parent
| SummaryRecord |
python | huggingface__transformers | src/transformers/models/omdet_turbo/modeling_omdet_turbo.py | {
"start": 19357,
"end": 20260
} | class ____(nn.Module):
"""
RepVGG architecture block introduced by the work "RepVGG: Making VGG-style ConvNets Great Again".
"""
def __init__(self, config: OmDetTurboConfig):
super().__init__()
activation = config.csp_activation
hidden_channels = int(config.encoder_hidden_dim * config.hidden_expansion)
self.conv1 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 3, 1, padding=1)
self.conv2 = OmDetTurboConvNormLayer(config, hidden_channels, hidden_channels, 1, 1, padding=0)
self.activation = nn.Identity() if activation is None else ACT2CLS[activation]()
def forward(self, x):
y = self.conv1(x) + self.conv2(x)
return self.activation(y)
# Copied from transformers.models.rt_detr.modeling_rt_detr.RTDetrCSPRepLayer with RTDetr->OmDetTurbo, activation_function->csp_activation
| OmDetTurboRepVggBlock |
python | huggingface__transformers | tests/models/mllama/test_processing_mllama.py | {
"start": 957,
"end": 17671
} | class ____(ProcessorTesterMixin, unittest.TestCase):
processor_class = MllamaProcessor
model_id = "hf-internal-testing/mllama-11b"
@classmethod
def _setup_test_attributes(cls, processor):
cls.image1 = Image.new("RGB", (224, 220))
cls.image2 = Image.new("RGB", (512, 128))
cls.image_token = processor.image_token
cls.image_token_id = processor.image_token_id
cls.pad_token_id = processor.tokenizer.pad_token_id
cls.bos_token = processor.bos_token
cls.bos_token_id = processor.tokenizer.bos_token_id
@staticmethod
def prepare_processor_dict():
return {"chat_template": "{% for message in messages %}{% if loop.index0 == 0 %}{{ bos_token }}{% endif %}{{ '<|start_header_id|>' + message['role'] + '<|end_header_id|>\n\n' }}{% if message['content'] is string %}{{ message['content'] }}{% else %}{% for content in message['content'] %}{% if content['type'] == 'image' %}{{ '<|image|>' }}{% elif content['type'] == 'text' %}{{ content['text'] }}{% endif %}{% endfor %}{% endif %}{{ '<|eot_id|>' }}{% endfor %}{% if add_generation_prompt %}{{ '<|start_header_id|>assistant<|end_header_id|>\n\n' }}{% endif %}"} # fmt: skip
@unittest.skip("MllamaProcessor does not return tensors")
def test_image_processor_defaults(self):
pass
@unittest.skip("MllamaProcessor modifies input text")
def test_tokenizer_defaults(self):
pass
# Override as Mllama needs images to be an explicitly nested batch
def prepare_image_inputs(self, batch_size: int | None = None):
"""This function prepares a list of PIL images for testing"""
images = super().prepare_image_inputs(batch_size)
if isinstance(images, (list, tuple)):
images = [[image] for image in images]
return images
def test_chat_template_is_saved(self):
processor_loaded = self.processor_class.from_pretrained(self.tmpdirname)
processor_dict_loaded = json.loads(processor_loaded.to_json_string())
# chat templates aren't serialized to json in processors
self.assertFalse("chat_template" in processor_dict_loaded)
# they have to be saved as separate file and loaded back from that file
# so we check if the same template is loaded
processor_dict = self.prepare_processor_dict()
self.assertTrue(processor_loaded.chat_template == processor_dict.get("chat_template", None))
def test_apply_chat_template(self):
# Message contains content which a mix of lists with images and image urls and string
messages = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "image"},
{"type": "text", "text": "What do these images show?"},
],
},
{
"role": "assistant",
"content": [
{"type": "text", "text": "The first image shows the statue of Liberty in New York."},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "And who is that?"},
],
},
]
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
expected_rendered = (
"<|begin_of_text|>"
"<|start_header_id|>user<|end_header_id|>\n\n"
"<|image|><|image|>What do these images show?"
"<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
"The first image shows the statue of Liberty in New York."
"<|eot_id|>"
"<|start_header_id|>user<|end_header_id|>\n\n"
"And who is that?"
"<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
self.assertEqual(rendered, expected_rendered)
messages = [
{
"role": "system",
"content": [
{"type": "text", "text": "This is a test sentence."},
],
},
{
"role": "user",
"content": [
{"type": "text", "text": "This is a response."},
],
},
]
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
expected_ids = [
[
128000, # <|begin_of_text|>
128006, # <|start_header_id|>
9125, # "system"
128007, # <|end_of_header|>
271, # "\n\n"
2028,
374,
264,
1296,
11914,
13, # "This is a test sentence."
128009, # <|eot_id|>
128006, # <|start_header_id|>
882, # "user"
128007, # <|end_of_header|>
271, # "\n\n"
2028,
374,
264,
2077,
13, # "This is a response.",
128009, # <|eot_id|>
128006, # <|start_header_id|>
78191, # "assistant"
128007, # <|end_of_header|>
271, # "\n\n"
]
]
self.assertEqual(input_ids, expected_ids)
# test image in multiple locations
messages = [
{
"role": "user",
"content": [
{"type": "text", "text": "Describe this image in two sentences"},
{
"type": "image",
"url": url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
},
{"type": "text", "text": " Test sentence "},
{
"type": "image",
"url": url_to_local_path(
"https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg"
),
},
{"type": "text", "text": "ok\n"},
],
}
]
rendered = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
expected_rendered = (
"<|begin_of_text|><|start_header_id|>user<|end_header_id|>\n\n"
"Describe this image in two sentences<|image|> Test sentence <|image|>ok\n<|eot_id|>"
"<|start_header_id|>assistant<|end_header_id|>\n\n"
)
self.assertEqual(rendered, expected_rendered)
input_ids = processor.apply_chat_template(messages, add_generation_prompt=True, tokenize=True)
# fmt: off
expected_ids = [[
128000, 128006, 882, 128007, 271, 75885, 420, 2217, 304, 1403, 23719, 128256,
3475, 11914, 262, 128256, 564, 198, 128009, 128006, 78191, 128007, 271,
]]
# fmt: on
self.assertEqual(input_ids, expected_ids)
# text format for content
messages_list = [
{
"role": "user",
"content": [
{"type": "image"},
{"type": "text", "text": "Describe this image in two sentences"},
],
}
]
messages_str = [
{
"role": "user",
"content": "<|image|>Describe this image in two sentences",
}
]
rendered_list = processor.apply_chat_template(messages_list, add_generation_prompt=True, tokenize=False)
rendered_str = processor.apply_chat_template(messages_str, add_generation_prompt=True, tokenize=False)
self.assertEqual(rendered_list, rendered_str)
def test_process_interleaved_images_prompts_image_splitting(self):
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
# Test that a single image is processed correctly
inputs = processor(images=self.image2, size={"width": 224, "height": 224})
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 4, 3, 224, 224))
# Test that text is processed correctly
text = "<|begin_of_text|>This is a test sentence.<|end_of_text|>"
inputs = processor(text=text)
expected_ids = [128000, 2028, 374, 264, 1296, 11914, 13, 128001]
self.assertEqual(inputs["input_ids"][0], expected_ids)
self.assertEqual(inputs["attention_mask"][0], [1] * len(expected_ids))
self.assertEqual(inputs.get("cross_attention_mask"), None)
# Test a single sample with image and text
image_str = "<|image|>"
text_str = "This is a test sentence."
text = image_str + text_str
inputs = processor(
text=text,
images=self.image1,
size={"width": 128, "height": 128},
)
expected_ids = [self.image_token_id, self.bos_token_id] + [2028, 374, 264, 1296, 11914, 13]
self.assertEqual(inputs["pixel_values"].shape, (1, 1, 4, 3, 128, 128))
self.assertEqual(inputs["input_ids"][0], expected_ids)
self.assertEqual(inputs["attention_mask"][0], [1] * len(expected_ids))
cross_attention_mask = inputs["cross_attention_mask"]
self.assertEqual(cross_attention_mask.shape, (1, 8, 1, 4))
self.assertTrue(
np.all(cross_attention_mask == 1), f"Cross attention mask is not all ones: {cross_attention_mask}"
)
# Test batch
text = [
"<|image|>This is a test sentence.",
"This is a test sentence.<|image|><|image|>This is a test sentence.",
]
# fmt: off
expected_ids = [
[self.image_token_id, self.bos_token_id, 2028, 374, 264, 1296, 11914, 13],
[self.bos_token_id, 2028, 374, 264, 1296, 11914, 13, self.image_token_id, self.image_token_id, 2028, 374, 264, 1296, 11914, 13],
]
# fmt: on
images = [[self.image1], [self.image1, self.image2]]
inputs = processor(text=text, images=images, padding=True, size={"width": 256, "height": 256})
self.assertEqual(inputs["pixel_values"].shape, (2, 2, 4, 3, 256, 256))
for input_ids_i, attention_mask_i, expected_ids_i in zip(
inputs["input_ids"], inputs["attention_mask"], expected_ids
):
pad_ids = [id for id, m in zip(input_ids_i, attention_mask_i) if m == 0]
input_ids = [id for id, m in zip(input_ids_i, attention_mask_i) if m == 1]
self.assertEqual(input_ids, expected_ids_i)
self.assertEqual(pad_ids, [self.pad_token_id] * len(pad_ids))
cross_attention_mask = inputs["cross_attention_mask"]
self.assertEqual(cross_attention_mask.shape, (2, 15, 2, 4))
# Check that only first tile of first sample is attended to all text tokens
first_sample_mask = cross_attention_mask[0].copy()
first_image_first_tile_attention = first_sample_mask[:, :1, :1] # text tokens, images, tiles
self.assertTrue(
np.all(first_image_first_tile_attention == 1),
f"Cross attention mask is not all ones: {first_image_first_tile_attention}",
)
# zero out first tile of first image
first_image_first_tile_attention[:, :1, :1] = 0
self.assertTrue(
np.all(first_image_first_tile_attention == 0),
f"Cross attention mask is not all zeros: {first_image_first_tile_attention}",
)
# second sample
second_sample_mask = cross_attention_mask[1].copy()
first_image_first_tile_attention = second_sample_mask[7:, :1, :1] # text tokens, images, tiles
self.assertTrue(
np.all(first_image_first_tile_attention == 1),
f"Cross attention mask is not all ones: {first_image_first_tile_attention}",
)
second_image_two_tiles_attention = second_sample_mask[8:, 1:2, :2] # text tokens, images, tiles
self.assertTrue(
np.all(second_image_two_tiles_attention == 1),
f"Cross attention mask is not all ones: {second_image_two_tiles_attention}",
)
# zero out both images masks
second_sample_mask[7:, :1, :1] = 0
second_sample_mask[8:, 1:2, :2] = 0
self.assertTrue(
np.all(second_sample_mask == 0), f"Cross attention mask is not all zeros: {second_sample_mask}"
)
def test_process_interleaved_images_prompts_image_error(self):
text = [
"This is a test sentence.",
"In this other sentence we try some good things",
]
processor = MllamaProcessor.from_pretrained(self.tmpdirname)
inputs = processor(text=text, images=None, padding=True)
self.assertIsNotNone(inputs["input_ids"])
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things",
]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
images = [[self.image1], []]
with self.assertRaises(ValueError):
processor(text=text, images=images, padding=True)
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things<|image|>",
]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
text = [
"This is a test sentence.<|image|>",
"In this other sentence we try some good things<|image|>",
]
images = [[self.image1], [self.image2]]
inputs = processor(text=text, images=images, padding=True)
images = [[self.image1, self.image2], []]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
# see https://github.com/huggingface/transformers/pull/35934
images = [self.image1, self.image2]
with self.assertRaises(ValueError):
processor(text=text, images=None, padding=True)
def test_unstructured_kwargs_batched(self):
# Overridden because Mllama expects images in nested format. For 2 images it can't infer
# the correct nesting, so we better throw an error
if "image_processor" not in self.processor_class.get_attributes():
self.skipTest(f"image_processor attribute not present in {self.processor_class}")
processor_components = self.prepare_components()
processor_kwargs = self.prepare_processor_dict()
processor = self.processor_class(**processor_components, **processor_kwargs)
self.skip_processor_without_typed_kwargs(processor)
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
inputs = processor(
text=input_str,
images=image_input,
return_tensors="pt",
do_rescale=True,
rescale_factor=-1.0,
padding="longest",
max_length=76,
)
self.assertLessEqual(inputs[self.images_input_name][0][0].mean(), 0)
self.assertTrue(
len(inputs[self.text_input_name][0]) == len(inputs[self.text_input_name][1])
and len(inputs[self.text_input_name][1]) < 76
)
def test_special_mm_token_truncation(self):
"""Tests that special vision tokens do not get truncated when `truncation=True` is set."""
processor = self.get_processor()
input_str = self.prepare_text_inputs(batch_size=2, modalities="image")
image_input = self.prepare_image_inputs(batch_size=2)
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=None,
padding=True,
)
with self.assertRaises(ValueError):
_ = processor(
text=input_str,
images=image_input,
return_tensors="pt",
truncation=True,
padding=True,
max_length=3,
)
@unittest.skip("Mllama can't process inputs with no image ttogether with multimodal inputs")
def test_processor_text_has_no_visual(self):
pass
| MllamaProcessorTest |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 22109,
"end": 22417
} | class ____(sgqlc.types.Enum):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__choices__ = (
"GIST",
"ISSUE",
"ORGANIZATION",
"PROJECT",
"PULL_REQUEST",
"REPOSITORY",
"TEAM",
"USER",
)
| PinnableItemType |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 4954,
"end": 5345
} | class ____(BaseParam[bool]):
"""Filter on is_stale."""
def to_orm(self, select: Select) -> Select:
if self.value and self.skip_none:
return select.where(DagModel.is_stale != self.value)
return select
@classmethod
def depends(cls, exclude_stale: bool = True) -> _ExcludeStaleFilter:
return cls().set_value(exclude_stale)
| _ExcludeStaleFilter |
python | kamyu104__LeetCode-Solutions | Python/twisted-mirror-path-count.py | {
"start": 46,
"end": 718
} | class ____(object):
def uniquePaths(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
MOD = 10**9+7
def get(r, c):
return grid[r][c] if len(grid) > len(grid[0]) else grid[c][r]
dp = [[0]*2 for _ in xrange(min(len(grid), len(grid[0]))+1)]
dp[1] = [1]*2
for r in xrange(max(len(grid), len(grid[0]))):
for c in xrange(len(dp)-1):
if get(r, c):
dp[c+1] = [dp[c+1][1], dp[c][0]]
else:
dp[c+1] = [(dp[c+1][1]+dp[c][0])%MOD]*2
return dp[-1][0]
# Time: O(m * n)
# Space: O(n)
# dp
| Solution |
python | ray-project__ray | rllib/algorithms/algorithm_config.py | {
"start": 3578,
"end": 310942
} | class ____(_Config):
"""A RLlib AlgorithmConfig builds an RLlib Algorithm from a given configuration.
.. testcode::
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.algorithms.callbacks import MemoryTrackingCallbacks
# Construct a generic config object, specifying values within different
# sub-categories, e.g. "training".
config = (
PPOConfig()
.training(gamma=0.9, lr=0.01)
.environment(env="CartPole-v1")
.env_runners(num_env_runners=0)
.callbacks(MemoryTrackingCallbacks)
)
# A config object can be used to construct the respective Algorithm.
rllib_algo = config.build()
.. testcode::
from ray.rllib.algorithms.ppo import PPOConfig
from ray import tune
# In combination with a tune.grid_search:
config = PPOConfig()
config.training(lr=tune.grid_search([0.01, 0.001]))
# Use `to_dict()` method to get the legacy plain python config dict
# for usage with `tune.Tuner().fit()`.
tune.Tuner("PPO", param_space=config.to_dict())
"""
@staticmethod
def DEFAULT_AGENT_TO_MODULE_MAPPING_FN(agent_id, episode):
# The default agent ID to module ID mapping function to use in the multi-agent
# case if None is provided.
# Map any agent ID to "default_policy".
return DEFAULT_MODULE_ID
# @OldAPIStack
# TODO (sven): Deprecate in new API stack.
@staticmethod
def DEFAULT_POLICY_MAPPING_FN(aid, episode, worker, **kwargs):
# The default policy mapping function to use if None provided.
# Map any agent ID to "default_policy".
return DEFAULT_POLICY_ID
@classmethod
def from_dict(cls, config_dict: dict) -> Self:
"""Creates an AlgorithmConfig from a legacy python config dict.
.. testcode::
from ray.rllib.algorithms.ppo.ppo import PPOConfig
# pass a RLlib config dict
ppo_config = PPOConfig.from_dict({})
ppo = ppo_config.build(env="Pendulum-v1")
Args:
config_dict: The legacy formatted python config dict for some algorithm.
Returns:
A new AlgorithmConfig object that matches the given python config dict.
"""
# Create a default config object of this class.
config_obj = cls()
# Remove `_is_frozen` flag from config dict in case the AlgorithmConfig that
# the dict was derived from was already frozen (we don't want to copy the
# frozenness).
config_dict.pop("_is_frozen", None)
config_obj.update_from_dict(config_dict)
return config_obj
@classmethod
def overrides(cls, **kwargs):
"""Generates and validates a set of config key/value pairs (passed via kwargs).
Validation whether given config keys are valid is done immediately upon
construction (by comparing against the properties of a default AlgorithmConfig
object of this class).
Allows combination with a full AlgorithmConfig object to yield a new
AlgorithmConfig object.
Used anywhere, we would like to enable the user to only define a few config
settings that would change with respect to some main config, e.g. in multi-agent
setups and evaluation configs.
.. testcode::
from ray.rllib.algorithms.ppo import PPOConfig
from ray.rllib.policy.policy import PolicySpec
config = (
PPOConfig()
.multi_agent(
policies={
"pol0": PolicySpec(config=PPOConfig.overrides(lambda_=0.95))
},
)
)
.. testcode::
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
from ray.rllib.algorithms.ppo import PPOConfig
config = (
PPOConfig()
.evaluation(
evaluation_num_env_runners=1,
evaluation_interval=1,
evaluation_config=AlgorithmConfig.overrides(explore=False),
)
)
Returns:
A dict mapping valid config property-names to values.
Raises:
KeyError: In case a non-existing property name (kwargs key) is being
passed in. Valid property names are taken from a default
AlgorithmConfig object of `cls`.
"""
default_config = cls()
config_overrides = {}
for key, value in kwargs.items():
if not hasattr(default_config, key):
raise KeyError(
f"Invalid property name {key} for config class {cls.__name__}!"
)
# Allow things like "lambda" as well.
key = cls._translate_special_keys(key, warn_deprecated=True)
config_overrides[key] = value
return config_overrides
def __init__(self, algo_class: Optional[type] = None):
"""Initializes an AlgorithmConfig instance.
Args:
algo_class: An optional Algorithm class that this config class belongs to.
Used (if provided) to build a respective Algorithm instance from this
config.
"""
# Define all settings and their default values.
# Define the default RLlib Algorithm class that this AlgorithmConfig is applied
# to.
self.algo_class = algo_class
# `self.python_environment()`
self.extra_python_environs_for_driver = {}
self.extra_python_environs_for_worker = {}
# `self.resources()`
self.placement_strategy = "PACK"
self.num_gpus = 0 # @OldAPIStack
self._fake_gpus = False # @OldAPIStack
self.num_cpus_for_main_process = 1
# `self.framework()`
self.framework_str = "torch"
self.eager_tracing = True
self.eager_max_retraces = 20
self.tf_session_args = {
# note: overridden by `local_tf_session_args`
"intra_op_parallelism_threads": 2,
"inter_op_parallelism_threads": 2,
"gpu_options": {
"allow_growth": True,
},
"log_device_placement": False,
"device_count": {"CPU": 1},
# Required by multi-GPU (num_gpus > 1).
"allow_soft_placement": True,
}
self.local_tf_session_args = {
# Allow a higher level of parallelism by default, but not unlimited
# since that can cause crashes with many concurrent drivers.
"intra_op_parallelism_threads": 8,
"inter_op_parallelism_threads": 8,
}
# Torch compile settings
self.torch_compile_learner = False
self.torch_compile_learner_what_to_compile = (
TorchCompileWhatToCompile.FORWARD_TRAIN
)
# AOT Eager is a dummy backend and doesn't result in speedups.
self.torch_compile_learner_dynamo_backend = (
"aot_eager" if sys.platform == "darwin" else "inductor"
)
self.torch_compile_learner_dynamo_mode = None
self.torch_compile_worker = False
# AOT Eager is a dummy backend and doesn't result in speedups.
self.torch_compile_worker_dynamo_backend = (
"aot_eager" if sys.platform == "darwin" else "onnxrt"
)
self.torch_compile_worker_dynamo_mode = None
# Default kwargs for `torch.nn.parallel.DistributedDataParallel`.
self.torch_ddp_kwargs = {}
# Default setting for skipping `nan` gradient updates.
self.torch_skip_nan_gradients = False
# `self.environment()`
self.env = None
self.env_config = {}
self.observation_space = None
self.action_space = None
self.clip_rewards = None
self.normalize_actions = True
self.clip_actions = False
self._is_atari = None
self.disable_env_checking = False
# Deprecated settings:
self.render_env = False
self.action_mask_key = "action_mask"
# `self.env_runners()`
self.env_runner_cls = None
self.num_env_runners = 0
self.create_local_env_runner = True
self.num_envs_per_env_runner = 1
# TODO (sven): Once new ormsgpack system in place, replace the string
# with proper `gym.envs.registration.VectorizeMode.SYNC`.
self.gym_env_vectorize_mode = "sync"
self.num_cpus_per_env_runner = 1
self.num_gpus_per_env_runner = 0
self.custom_resources_per_env_runner = {}
self.validate_env_runners_after_construction = True
self.episodes_to_numpy = True
self.max_requests_in_flight_per_env_runner = 1
self.sample_timeout_s = 60.0
self.create_env_on_local_worker = False
self._env_to_module_connector = None
self.add_default_connectors_to_env_to_module_pipeline = True
self._module_to_env_connector = None
self.add_default_connectors_to_module_to_env_pipeline = True
self.merge_env_runner_states = "training_only"
self.broadcast_env_runner_states = True
self.episode_lookback_horizon = 1
# TODO (sven): Rename into `sample_timesteps` (or `sample_duration`
# and `sample_duration_unit` (replacing batch_mode), like we do it
# in the evaluation config).
self.rollout_fragment_length = 200
# TODO (sven): Rename into `sample_mode`.
self.batch_mode = "truncate_episodes"
self.compress_observations = False
# @OldAPIStack
self.remote_worker_envs = False
self.remote_env_batch_wait_ms = 0
self.enable_tf1_exec_eagerly = False
self.sample_collector = SimpleListCollector
self.preprocessor_pref = "deepmind"
self.observation_filter = "NoFilter"
self.update_worker_filter_stats = True
self.use_worker_filter_stats = True
self.sampler_perf_stats_ema_coef = None
self._is_online = True
# `self.learners()`
self.num_learners = 0
self.num_gpus_per_learner = 0
self.num_cpus_per_learner = "auto"
self.num_aggregator_actors_per_learner = 0
self.max_requests_in_flight_per_aggregator_actor = 3
self.local_gpu_idx = 0
# TODO (sven): This probably works even without any restriction
# (allowing for any arbitrary number of requests in-flight). Test with
# 3 first, then with unlimited, and if both show the same behavior on
# an async algo, remove this restriction entirely.
self.max_requests_in_flight_per_learner = 3
# `self.training()`
self.gamma = 0.99
self.lr = 0.001
self.grad_clip = None
self.grad_clip_by = "global_norm"
# Simple logic for now: If None, use `train_batch_size`.
self._train_batch_size_per_learner = None
self.train_batch_size = 32 # @OldAPIStack
# These setting have been adopted from the original PPO batch settings:
# num_sgd_iter, minibatch_size, and shuffle_sequences.
self.num_epochs = 1
self.minibatch_size = None
self.shuffle_batch_per_epoch = False
# TODO (sven): Unsolved problem with RLModules sometimes requiring settings from
# the main AlgorithmConfig. We should not require the user to provide those
# settings in both, the AlgorithmConfig (as property) AND the model config
# dict. We should generally move to a world, in which there exists an
# AlgorithmConfig that a) has-a user provided model config object and b)
# is given a chance to compile a final model config (dict or object) that is
# then passed into the RLModule/Catalog. This design would then match our
# "compilation" pattern, where we compile automatically those settings that
# should NOT be touched by the user.
# In case, an Algorithm already uses the above described pattern (and has
# `self.model` as a @property, ignore AttributeError (for trying to set this
# property).
try:
self.model = copy.deepcopy(MODEL_DEFAULTS)
except AttributeError:
pass
self._learner_connector = None
self.add_default_connectors_to_learner_pipeline = True
self.learner_config_dict = {}
self.optimizer = {} # @OldAPIStack
self._learner_class = None
# `self.callbacks()`
# TODO (sven): Set this default to None, once the old API stack has been
# deprecated.
self.callbacks_class = RLlibCallback
self.callbacks_on_algorithm_init = None
self.callbacks_on_env_runners_recreated = None
self.callbacks_on_offline_eval_runners_recreated = None
self.callbacks_on_checkpoint_loaded = None
self.callbacks_on_environment_created = None
self.callbacks_on_episode_created = None
self.callbacks_on_episode_start = None
self.callbacks_on_episode_step = None
self.callbacks_on_episode_end = None
self.callbacks_on_evaluate_start = None
self.callbacks_on_evaluate_end = None
self.callbacks_on_evaluate_offline_start = None
self.callbacks_on_evaluate_offline_end = None
self.callbacks_on_sample_end = None
self.callbacks_on_train_result = None
# `self.explore()`
self.explore = True
# This is not compatible with RLModules, which have a method
# `forward_exploration` to specify custom exploration behavior.
if not hasattr(self, "exploration_config"):
# Helper to keep track of the original exploration config when dis-/enabling
# rl modules.
self._prior_exploration_config = None
self.exploration_config = {}
# `self.api_stack()`
self.enable_rl_module_and_learner = True
self.enable_env_runner_and_connector_v2 = True
self.api_stack(
enable_rl_module_and_learner=True,
enable_env_runner_and_connector_v2=True,
)
# `self.multi_agent()`
# TODO (sven): Prepare multi-agent setup for logging each agent's and each
# RLModule's steps taken thus far (and passing this information into the
# EnvRunner metrics and the RLModule's forward pass). Thereby, deprecate the
# `count_steps_by` config setting AND - at the same time - allow users to
# specify the batch size unit instead (agent- vs env steps).
self.count_steps_by = "env_steps"
# self.agent_to_module_mapping_fn = self.DEFAULT_AGENT_TO_MODULE_MAPPING_FN
# Soon to be Deprecated.
self.policies = {DEFAULT_POLICY_ID: PolicySpec()}
self.policy_map_capacity = 100
self.policy_mapping_fn = self.DEFAULT_POLICY_MAPPING_FN
self.policies_to_train = None
self.policy_states_are_swappable = False
self.observation_fn = None
# `self.offline_data()`
self.input_ = "sampler"
self.offline_data_class = None
self.offline_data_class = None
self.input_read_method = "read_parquet"
self.input_read_method_kwargs = {}
self.input_read_schema = {}
self.input_read_episodes = False
self.input_read_sample_batches = False
self.input_read_batch_size = None
self.input_filesystem = None
self.input_filesystem_kwargs = {}
self.input_compress_columns = [Columns.OBS, Columns.NEXT_OBS]
self.input_spaces_jsonable = True
self.materialize_data = False
self.materialize_mapped_data = True
self.map_batches_kwargs = {}
self.iter_batches_kwargs = {}
# Use always the final observation until the user explicitly ask
# to ignore it.
self.ignore_final_observation = False
self.prelearner_class = None
self.prelearner_buffer_class = None
self.prelearner_buffer_kwargs = {}
self.prelearner_module_synch_period = 10
self.dataset_num_iters_per_learner = None
self.input_config = {}
self.actions_in_input_normalized = False
self.postprocess_inputs = False
self.shuffle_buffer_size = 0
self.output = None
self.output_config = {}
self.output_compress_columns = [Columns.OBS, Columns.NEXT_OBS]
self.output_max_file_size = 64 * 1024 * 1024
self.output_max_rows_per_file = None
self.output_write_remaining_data = False
self.output_write_method = "write_parquet"
self.output_write_method_kwargs = {}
self.output_filesystem = None
self.output_filesystem_kwargs = {}
self.output_write_episodes = True
self.offline_sampling = False
# `self.evaluation()`
self.evaluation_interval = None
self.evaluation_duration = 10
self.evaluation_duration_unit = "episodes"
self.evaluation_sample_timeout_s = 120.0
self.evaluation_auto_duration_min_env_steps_per_sample = 100
self.evaluation_auto_duration_max_env_steps_per_sample = 2000
self.evaluation_parallel_to_training = False
self.evaluation_force_reset_envs_before_iteration = True
self.evaluation_config = None
self.off_policy_estimation_methods = {}
self.ope_split_batch_by_episode = True
self.evaluation_num_env_runners = 0
self.custom_evaluation_function = None
# TODO: Set this flag still in the config or - much better - in the
# RolloutWorker as a property.
self.in_evaluation = False
# TODO (sven): Deprecate this setting (it's not user-accessible right now any
# way). Replace by logic within `training_step` to merge and broadcast the
# EnvRunner (connector) states.
self.sync_filters_on_rollout_workers_timeout_s = 10.0
# Offline evaluation.
self.offline_evaluation_interval = None
self.num_offline_eval_runners = 0
self.offline_evaluation_type: str = None
self.offline_eval_runner_class = None
# TODO (simon): Only `_offline_evaluate_with_fixed_duration` works. Also,
# decide, if we use `offline_evaluation_duration` or
# `dataset_num_iters_per_offline_eval_runner`. Should the user decide here?
# The latter will be much faster, but runs per runner call all evaluation.
self.offline_loss_for_module_fn = None
self.offline_evaluation_duration = 1
self.offline_evaluation_parallel_to_training = False
self.offline_evaluation_timeout_s = 120.0
self.num_cpus_per_offline_eval_runner = 1
self.num_gpus_per_offline_eval_runner = 0
self.custom_resources_per_offline_eval_runner = {}
self.restart_failed_offline_eval_runners = True
self.ignore_offline_eval_runner_failures = False
self.max_num_offline_eval_runner_restarts = 1000
self.offline_eval_runner_restore_timeout_s = 1800.0
self.max_requests_in_flight_per_offline_eval_runner = 1
self.validate_offline_eval_runners_after_construction = True
self.offline_eval_runner_health_probe_timeout_s = 30.0
self.offline_eval_rl_module_inference_only = False
self.broadcast_offline_eval_runner_states = False
self.offline_eval_batch_size_per_runner = 256
self.dataset_num_iters_per_eval_runner = 1
# `self.reporting()`
self.keep_per_episode_custom_metrics = False
self.metrics_episode_collection_timeout_s = 60.0
self.metrics_num_episodes_for_smoothing = 100
self.min_time_s_per_iteration = None
self.min_train_timesteps_per_iteration = 0
self.min_sample_timesteps_per_iteration = 0
self.log_gradients = False
# `self.checkpointing()`
self.export_native_model_files = False
self.checkpoint_trainable_policies_only = False
# `self.debugging()`
self.logger_creator = None
self.logger_config = None
self.log_level = "WARN"
self.log_sys_usage = True
self.fake_sampler = False
self.seed = None
# `self.fault_tolerance()`
self.restart_failed_env_runners = True
self.ignore_env_runner_failures = False
# By default, restart failed worker a thousand times.
# This should be enough to handle normal transient failures.
# This also prevents infinite number of restarts in case the worker or env has
# a bug.
self.max_num_env_runner_restarts = 1000
# Small delay between worker restarts. In case EnvRunners or eval EnvRunners
# have remote dependencies, this delay can be adjusted to make sure we don't
# flood them with re-connection requests, and allow them enough time to recover.
# This delay also gives Ray time to stream back error logging and exceptions.
self.delay_between_env_runner_restarts_s = 60.0
self.restart_failed_sub_environments = False
self.num_consecutive_env_runner_failures_tolerance = 100
self.env_runner_health_probe_timeout_s = 30.0
self.env_runner_restore_timeout_s = 1800.0
# `self.rl_module()`
self._model_config = {}
self._rl_module_spec = None
# Module ID specific config overrides.
self.algorithm_config_overrides_per_module = {}
# Cached, actual AlgorithmConfig objects derived from
# `self.algorithm_config_overrides_per_module`.
self._per_module_overrides: Dict[ModuleID, "AlgorithmConfig"] = {}
# `self.experimental()`
self._validate_config = True
self._use_msgpack_checkpoints = False
self._torch_grad_scaler_class = None
self._torch_lr_scheduler_classes = None
self._tf_policy_handles_more_than_one_loss = False
self._disable_preprocessor_api = False
self._disable_action_flattening = False
self._disable_initialize_loss_from_dummy_batch = False
self._dont_auto_sync_env_runner_states = False
# Has this config object been frozen (cannot alter its attributes anymore).
self._is_frozen = False
# TODO: Remove, once all deprecation_warning calls upon using these keys
# have been removed.
# === Deprecated keys ===
self.env_task_fn = DEPRECATED_VALUE
self.enable_connectors = DEPRECATED_VALUE
self.simple_optimizer = DEPRECATED_VALUE
self.monitor = DEPRECATED_VALUE
self.evaluation_num_episodes = DEPRECATED_VALUE
self.metrics_smoothing_episodes = DEPRECATED_VALUE
self.timesteps_per_iteration = DEPRECATED_VALUE
self.min_iter_time_s = DEPRECATED_VALUE
self.collect_metrics_timeout = DEPRECATED_VALUE
self.min_time_s_per_reporting = DEPRECATED_VALUE
self.min_train_timesteps_per_reporting = DEPRECATED_VALUE
self.min_sample_timesteps_per_reporting = DEPRECATED_VALUE
self.input_evaluation = DEPRECATED_VALUE
self.policy_map_cache = DEPRECATED_VALUE
self.worker_cls = DEPRECATED_VALUE
self.synchronize_filters = DEPRECATED_VALUE
self.enable_async_evaluation = DEPRECATED_VALUE
self.custom_async_evaluation_function = DEPRECATED_VALUE
self._enable_rl_module_api = DEPRECATED_VALUE
self.auto_wrap_old_gym_envs = DEPRECATED_VALUE
self.always_attach_evaluation_results = DEPRECATED_VALUE
# The following values have moved because of the new ReplayBuffer API
self.buffer_size = DEPRECATED_VALUE
self.prioritized_replay = DEPRECATED_VALUE
self.learning_starts = DEPRECATED_VALUE
self.replay_batch_size = DEPRECATED_VALUE
# -1 = DEPRECATED_VALUE is a valid value for replay_sequence_length
self.replay_sequence_length = None
self.replay_mode = DEPRECATED_VALUE
self.prioritized_replay_alpha = DEPRECATED_VALUE
self.prioritized_replay_beta = DEPRECATED_VALUE
self.prioritized_replay_eps = DEPRECATED_VALUE
self.min_time_s_per_reporting = DEPRECATED_VALUE
self.min_train_timesteps_per_reporting = DEPRECATED_VALUE
self.min_sample_timesteps_per_reporting = DEPRECATED_VALUE
self._disable_execution_plan_api = DEPRECATED_VALUE
def to_dict(self) -> AlgorithmConfigDict:
"""Converts all settings into a legacy config dict for backward compatibility.
Returns:
A complete AlgorithmConfigDict, usable in backward-compatible Tune/RLlib
use cases.
"""
config = copy.deepcopy(vars(self))
config.pop("algo_class")
config.pop("_is_frozen")
# Worst naming convention ever: NEVER EVER use reserved key-words...
if "lambda_" in config:
assert hasattr(self, "lambda_")
config["lambda"] = self.lambda_
config.pop("lambda_")
if "input_" in config:
assert hasattr(self, "input_")
config["input"] = self.input_
config.pop("input_")
# Convert `policies` (PolicySpecs?) into dict.
# Convert policies dict such that each policy ID maps to a old-style.
# 4-tuple: class, obs-, and action space, config.
if "policies" in config and isinstance(config["policies"], dict):
policies_dict = {}
for policy_id, policy_spec in config.pop("policies").items():
if isinstance(policy_spec, PolicySpec):
policies_dict[policy_id] = policy_spec.get_state()
else:
policies_dict[policy_id] = policy_spec
config["policies"] = policies_dict
# Switch out deprecated vs new config keys.
config["callbacks"] = config.pop("callbacks_class", None)
config["create_env_on_driver"] = config.pop("create_env_on_local_worker", 1)
config["custom_eval_function"] = config.pop("custom_evaluation_function", None)
config["framework"] = config.pop("framework_str", None)
# Simplify: Remove all deprecated keys that have as value `DEPRECATED_VALUE`.
# These would be useless in the returned dict anyways.
for dep_k in [
"monitor",
"evaluation_num_episodes",
"metrics_smoothing_episodes",
"timesteps_per_iteration",
"min_iter_time_s",
"collect_metrics_timeout",
"buffer_size",
"prioritized_replay",
"learning_starts",
"replay_batch_size",
"replay_mode",
"prioritized_replay_alpha",
"prioritized_replay_beta",
"prioritized_replay_eps",
"min_time_s_per_reporting",
"min_train_timesteps_per_reporting",
"min_sample_timesteps_per_reporting",
"input_evaluation",
"_enable_new_api_stack",
]:
if config.get(dep_k) == DEPRECATED_VALUE:
config.pop(dep_k, None)
return config
def update_from_dict(
self,
config_dict: PartialAlgorithmConfigDict,
) -> Self:
"""Modifies this AlgorithmConfig via the provided python config dict.
Warns if `config_dict` contains deprecated keys.
Silently sets even properties of `self` that do NOT exist. This way, this method
may be used to configure custom Policies which do not have their own specific
AlgorithmConfig classes, e.g.
`ray.rllib.examples.policy.random_policy::RandomPolicy`.
Args:
config_dict: The old-style python config dict (PartialAlgorithmConfigDict)
to use for overriding some properties defined in there.
Returns:
This updated AlgorithmConfig object.
"""
eval_call = {}
# We deal with this special key before all others because it may influence
# stuff like "exploration_config".
# Namely, we want to re-instantiate the exploration config this config had
# inside `self.experimental()` before potentially overwriting it in the
# following.
enable_new_api_stack = config_dict.get(
"enable_rl_module_and_learner",
config_dict.get("enable_env_runner_and_connector_v2"),
)
if enable_new_api_stack is not None:
self.api_stack(
enable_rl_module_and_learner=enable_new_api_stack,
enable_env_runner_and_connector_v2=enable_new_api_stack,
)
# Modify our properties one by one.
for key, value in config_dict.items():
key = self._translate_special_keys(key, warn_deprecated=False)
# Ray Tune saves additional data under this magic keyword.
# This should not get treated as AlgorithmConfig field.
if key == TRIAL_INFO:
continue
if key in ["_enable_new_api_stack"]:
# We've dealt with this above.
continue
# Set our multi-agent settings.
elif key == "multiagent":
kwargs = {
k: value[k]
for k in [
"policies",
"policy_map_capacity",
"policy_mapping_fn",
"policies_to_train",
"policy_states_are_swappable",
"observation_fn",
"count_steps_by",
]
if k in value
}
self.multi_agent(**kwargs)
# Some keys specify config sub-dicts and therefore should go through the
# correct methods to properly `.update()` those from given config dict
# (to not lose any sub-keys).
elif key == "callbacks_class" and value != NOT_SERIALIZABLE:
# For backward compatibility reasons, only resolve possible
# classpath if value is a str type.
if isinstance(value, str):
value = deserialize_type(value, error=True)
self.callbacks(callbacks_class=value)
elif key == "env_config":
self.environment(env_config=value)
elif key.startswith("evaluation_"):
eval_call[key] = value
elif key == "exploration_config":
if enable_new_api_stack:
self.exploration_config = value
continue
if isinstance(value, dict) and "type" in value:
value["type"] = deserialize_type(value["type"])
self.env_runners(exploration_config=value)
elif key == "model":
# Resolve possible classpath.
if isinstance(value, dict) and value.get("custom_model"):
value["custom_model"] = deserialize_type(value["custom_model"])
self.training(**{key: value})
elif key == "optimizer":
self.training(**{key: value})
elif key == "replay_buffer_config":
if isinstance(value, dict) and "type" in value:
value["type"] = deserialize_type(value["type"])
self.training(**{key: value})
elif key == "sample_collector":
# Resolve possible classpath.
value = deserialize_type(value)
self.env_runners(sample_collector=value)
# Set the property named `key` to `value`.
else:
setattr(self, key, value)
self.evaluation(**eval_call)
return self
def get_state(self) -> Dict[str, Any]:
"""Returns a dict state that can be pickled.
Returns:
A dictionary containing all attributes of the instance.
"""
state = self.__dict__.copy()
state["class"] = type(self)
state.pop("algo_class")
state.pop("_is_frozen")
state = {k: v for k, v in state.items() if v != DEPRECATED_VALUE}
# Convert `policies` (PolicySpecs?) into dict.
# Convert policies dict such that each policy ID maps to a old-style.
# 4-tuple: class, obs-, and action space, config.
# TODO (simon, sven): Remove when deprecating old stack.
if "policies" in state and isinstance(state["policies"], dict):
policies_dict = {}
for policy_id, policy_spec in state.pop("policies").items():
if isinstance(policy_spec, PolicySpec):
policies_dict[policy_id] = policy_spec.get_state()
else:
policies_dict[policy_id] = policy_spec
state["policies"] = policies_dict
# state = self._serialize_dict(state)
return state
@classmethod
def from_state(cls, state: Dict[str, Any]) -> Union[Self, Any]:
"""Returns an instance constructed from the state.
Args:
state: A dictionary containing the state of an `AlgorithmConfig`.
See `AlgorithmConfig.get_state` for creating a state.
The constructed class will be of ``state["class"]``.
Returns:
An `AlgorithmConfig` instance with attributes from the `state`.
"""
# As ctor could be any other class add Any to the return type to indicate this.
ctor = state["class"]
config = ctor()
config.__dict__.update(state)
return config
# TODO(sven): We might want to have a `deserialize` method as well. Right now,
# simply using the from_dict() API works in this same (deserializing) manner,
# whether the dict used is actually code-free (already serialized) or not
# (i.e. a classic RLlib config dict with e.g. "callbacks" key still pointing to
# a class).
def serialize(self) -> Dict[str, Any]:
"""Returns a mapping from str to JSON'able values representing this config.
The resulting values don't have any code in them.
Classes (such as `callbacks_class`) are converted to their full
classpath, e.g. `ray.rllib.callbacks.callbacks.RLlibCallback`.
Actual code such as lambda functions ware written as their source
code (str) plus any closure information for properly restoring the
code inside the AlgorithmConfig object made from the returned dict data.
Dataclass objects get converted to dicts.
Returns:
A dict mapping from str to JSON'able values.
"""
config = self.to_dict()
return self._serialize_dict(config)
def copy(self, copy_frozen: Optional[bool] = None) -> Self:
"""Creates a deep copy of this config and (un)freezes if necessary.
Args:
copy_frozen: Whether the created deep copy is frozen or not. If None,
keep the same frozen status that `self` currently has.
Returns:
A deep copy of `self` that is (un)frozen.
"""
cp = copy.deepcopy(self)
if copy_frozen is True:
cp.freeze()
elif copy_frozen is False:
cp._is_frozen = False
if isinstance(cp.evaluation_config, AlgorithmConfig):
cp.evaluation_config._is_frozen = False
return cp
def freeze(self) -> None:
"""Freezes this config object, such that no attributes can be set anymore.
Algorithms should use this method to make sure that their config objects
remain read-only after this.
"""
if self._is_frozen:
return
self._is_frozen = True
# Also freeze underlying eval config, if applicable.
if isinstance(self.evaluation_config, AlgorithmConfig):
self.evaluation_config.freeze()
# TODO: Flip out all set/dict/list values into frozen versions
# of themselves? This way, users won't even be able to alter those values
# directly anymore.
@OverrideToImplementCustomLogic_CallToSuperRecommended
def validate(self) -> None:
"""Validates all values in this config."""
# Validation is blocked.
if not self._validate_config:
return
self._validate_env_runner_settings()
self._validate_callbacks_settings()
self._validate_framework_settings()
self._validate_resources_settings()
self._validate_multi_agent_settings()
self._validate_input_settings()
self._validate_evaluation_settings()
self._validate_offline_settings()
self._validate_new_api_stack_settings()
self._validate_to_be_deprecated_settings()
def build_algo(
self,
env: Optional[Union[str, EnvType]] = None,
logger_creator: Optional[Callable[[], Logger]] = None,
use_copy: bool = True,
) -> "Algorithm":
"""Builds an Algorithm from this AlgorithmConfig (or a copy thereof).
Args:
env: Name of the environment to use (e.g. a gym-registered str),
a full class path (e.g.
"ray.rllib.examples.envs.classes.random_env.RandomEnv"), or an Env
class directly. Note that this arg can also be specified via
the "env" key in `config`.
logger_creator: Callable that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
use_copy: Whether to deepcopy `self` and pass the copy to the Algorithm
(instead of `self`) as config. This is useful in case you would like to
recycle the same AlgorithmConfig over and over, e.g. in a test case, in
which we loop over different DL-frameworks.
Returns:
A ray.rllib.algorithms.algorithm.Algorithm object.
"""
if env is not None:
self.env = env
if self.evaluation_config is not None:
self.evaluation_config["env"] = env
if logger_creator is not None:
self.logger_creator = logger_creator
algo_class = self.algo_class
if isinstance(self.algo_class, str):
algo_class = get_trainable_cls(self.algo_class)
return algo_class(
config=self if not use_copy else copy.deepcopy(self),
logger_creator=self.logger_creator,
)
def build_env_to_module_connector(
self,
env=None,
spaces=None,
device=None,
) -> ConnectorV2:
from ray.rllib.connectors.env_to_module import (
AddObservationsFromEpisodesToBatch,
AddStatesFromEpisodesToBatch,
AddTimeDimToBatchAndZeroPad,
AgentToModuleMapping,
BatchIndividualItems,
EnvToModulePipeline,
NumpyToTensor,
)
custom_connectors = []
# Create an env-to-module connector pipeline (including RLlib's default
# env->module connector piece) and return it.
if self._env_to_module_connector is not None:
try:
val_ = self._env_to_module_connector(env, spaces, device)
# Try deprecated signature, if necessary.
except TypeError as e:
if "positional argument" in e.args[0]:
if log_once("env-to-module-wrong-signature"):
logger.error(
"Your `config.env_to_module_connector` function seems to "
"have a wrong or outdated signature! It should be: "
"`def myfunc(env, spaces, device): ...`, where any of "
"these arguments are optional and may be None.\n"
"`env` is the (vectorized) gym env.\n"
"`spaces` is a dict of structure `{'__env__': (["
"vectorized env obs. space, vectorized env act. space]),"
"'__env_single__': ([env obs. space, env act. space])}`.\n"
"`device` is a (torch) device.\n"
)
val_ = self._env_to_module_connector(env)
else:
raise e
# ConnectorV2 (piece or pipeline).
if isinstance(val_, ConnectorV2):
custom_connectors = [val_]
# Sequence of individual ConnectorV2 pieces.
elif isinstance(val_, (list, tuple)):
custom_connectors = list(val_)
# Unsupported return value.
else:
raise ValueError(
"`AlgorithmConfig.env_runners(env_to_module_connector=..)` must "
"return a ConnectorV2 object or a list thereof to be added to a "
f"connector pipeline! Your function returned {val_}."
)
if env is not None:
obs_space = getattr(env, "single_observation_space", env.observation_space)
elif spaces is not None and INPUT_ENV_SINGLE_SPACES in spaces:
obs_space = spaces[INPUT_ENV_SINGLE_SPACES][0]
else:
obs_space = self.observation_space
if obs_space is None and self.is_multi_agent:
obs_space = gym.spaces.Dict(
{
aid: env.envs[0].unwrapped.get_observation_space(aid)
for aid in env.envs[0].unwrapped.possible_agents
}
)
if env is not None:
act_space = getattr(env, "single_action_space", env.action_space)
elif spaces is not None and INPUT_ENV_SINGLE_SPACES in spaces:
act_space = spaces[INPUT_ENV_SINGLE_SPACES][1]
else:
act_space = self.action_space
if act_space is None and self.is_multi_agent:
act_space = gym.spaces.Dict(
{
aid: env.envs[0].unwrapped.get_action_space(aid)
for aid in env.envs[0].unwrapped.possible_agents
}
)
pipeline = EnvToModulePipeline(
input_observation_space=obs_space,
input_action_space=act_space,
connectors=custom_connectors,
)
if self.add_default_connectors_to_env_to_module_pipeline:
# Append OBS handling.
pipeline.append(AddObservationsFromEpisodesToBatch())
# Append time-rank handler.
pipeline.append(AddTimeDimToBatchAndZeroPad())
# Append STATE_IN/STATE_OUT handler.
pipeline.append(AddStatesFromEpisodesToBatch())
# If multi-agent -> Map from AgentID-based data to ModuleID based data.
if self.is_multi_agent:
pipeline.append(
AgentToModuleMapping(
rl_module_specs=(
self.rl_module_spec.rl_module_specs
if isinstance(self.rl_module_spec, MultiRLModuleSpec)
else set(self.policies)
),
agent_to_module_mapping_fn=self.policy_mapping_fn,
)
)
# Batch all data.
pipeline.append(BatchIndividualItems(multi_agent=self.is_multi_agent))
# Convert to Tensors.
pipeline.append(NumpyToTensor(device=device))
return pipeline
def build_module_to_env_connector(self, env=None, spaces=None) -> ConnectorV2:
from ray.rllib.connectors.module_to_env import (
GetActions,
ListifyDataForVectorEnv,
ModuleToAgentUnmapping,
ModuleToEnvPipeline,
NormalizeAndClipActions,
RemoveSingleTsTimeRankFromBatch,
TensorToNumpy,
UnBatchToIndividualItems,
)
custom_connectors = []
# Create a module-to-env connector pipeline (including RLlib's default
# module->env connector piece) and return it.
if self._module_to_env_connector is not None:
try:
val_ = self._module_to_env_connector(env, spaces)
# Try deprecated signature, if necessary.
except TypeError as e:
if "positional argument" in e.args[0]:
if log_once("module-to-env-wrong-signature"):
logger.error(
"Your `config.module_to_env_connector` function seems to "
"have a wrong or outdated signature! It should be: "
"`def myfunc(env, spaces): ...`, where any of "
"these arguments are optional and may be None.\n"
"`env` is the (vectorized) gym env.\n"
"`spaces` is a dict of structure `{'__env__': (["
"vectorized env obs. space, vectorized env act. space]),"
"'__env_single__': ([env obs. space, env act. space])}`.\n"
)
val_ = self._module_to_env_connector(env)
# ConnectorV2 (piece or pipeline).
if isinstance(val_, ConnectorV2):
custom_connectors = [val_]
# Sequence of individual ConnectorV2 pieces.
elif isinstance(val_, (list, tuple)):
custom_connectors = list(val_)
# Unsupported return value.
else:
raise ValueError(
"`AlgorithmConfig.env_runners(module_to_env_connector=..)` must "
"return a ConnectorV2 object or a list thereof to be added to a "
f"connector pipeline! Your function returned {val_}."
)
if env is not None:
obs_space = getattr(env, "single_observation_space", env.observation_space)
elif spaces is not None and INPUT_ENV_SINGLE_SPACES in spaces:
obs_space = spaces[INPUT_ENV_SINGLE_SPACES][0]
else:
obs_space = self.observation_space
if obs_space is None and self.is_multi_agent:
obs_space = gym.spaces.Dict(
{
aid: env.envs[0].unwrapped.get_observation_space(aid)
for aid in env.envs[0].unwrapped.possible_agents
}
)
if env is not None:
act_space = getattr(env, "single_action_space", env.action_space)
elif spaces is not None and INPUT_ENV_SINGLE_SPACES in spaces:
act_space = spaces[INPUT_ENV_SINGLE_SPACES][1]
else:
act_space = self.action_space
if act_space is None and self.is_multi_agent:
act_space = gym.spaces.Dict(
{
aid: env.envs[0].unwrapped.get_action_space(aid)
for aid in env.envs[0].unwrapped.possible_agents
}
)
pipeline = ModuleToEnvPipeline(
input_observation_space=obs_space,
input_action_space=act_space,
connectors=custom_connectors,
)
if self.add_default_connectors_to_module_to_env_pipeline:
# Prepend: Anything that has to do with plain data processing (not
# particularly with the actions).
# Remove extra time-rank, if applicable.
pipeline.prepend(RemoveSingleTsTimeRankFromBatch())
# If multi-agent -> Map from ModuleID-based data to AgentID based data.
if self.is_multi_agent:
pipeline.prepend(ModuleToAgentUnmapping())
# Unbatch all data.
pipeline.prepend(UnBatchToIndividualItems())
# Convert to numpy.
pipeline.prepend(TensorToNumpy())
# Sample actions from ACTION_DIST_INPUTS (if ACTIONS not present).
pipeline.prepend(GetActions())
# Append: Anything that has to do with action sampling.
# Unsquash/clip actions based on config and action space.
pipeline.append(
NormalizeAndClipActions(
normalize_actions=self.normalize_actions,
clip_actions=self.clip_actions,
)
)
# Listify data from ConnectorV2-data format to normal lists that we can
# index into by env vector index. These lists contain individual items
# for single-agent and multi-agent dicts for multi-agent.
pipeline.append(ListifyDataForVectorEnv())
return pipeline
def build_learner_connector(
self,
input_observation_space,
input_action_space,
device=None,
) -> ConnectorV2:
from ray.rllib.connectors.learner import (
AddColumnsFromEpisodesToTrainBatch,
AddObservationsFromEpisodesToBatch,
AddStatesFromEpisodesToBatch,
AddTimeDimToBatchAndZeroPad,
AgentToModuleMapping,
BatchIndividualItems,
LearnerConnectorPipeline,
NumpyToTensor,
)
custom_connectors = []
# Create a learner connector pipeline (including RLlib's default
# learner connector piece) and return it.
if self._learner_connector is not None:
val_ = self._learner_connector(
input_observation_space,
input_action_space,
# device, # TODO (sven): Also pass device into custom builder.
)
# ConnectorV2 (piece or pipeline).
if isinstance(val_, ConnectorV2):
custom_connectors = [val_]
# Sequence of individual ConnectorV2 pieces.
elif isinstance(val_, (list, tuple)):
custom_connectors = list(val_)
# Unsupported return value.
else:
raise ValueError(
"`AlgorithmConfig.learners(learner_connector=..)` must return "
"a ConnectorV2 object or a list thereof to be added to a connector "
f"pipeline! Your function returned {val_}."
)
pipeline = LearnerConnectorPipeline(
connectors=custom_connectors,
input_observation_space=input_observation_space,
input_action_space=input_action_space,
)
if self.add_default_connectors_to_learner_pipeline:
# Append OBS handling.
pipeline.append(
AddObservationsFromEpisodesToBatch(as_learner_connector=True)
)
# Append all other columns handling.
pipeline.append(AddColumnsFromEpisodesToTrainBatch())
# Append time-rank handler.
pipeline.append(AddTimeDimToBatchAndZeroPad(as_learner_connector=True))
# Append STATE_IN/STATE_OUT handler.
pipeline.append(AddStatesFromEpisodesToBatch(as_learner_connector=True))
# If multi-agent -> Map from AgentID-based data to ModuleID based data.
if self.is_multi_agent:
pipeline.append(
AgentToModuleMapping(
rl_module_specs=(
self.rl_module_spec.rl_module_specs
if isinstance(self.rl_module_spec, MultiRLModuleSpec)
else set(self.policies)
),
agent_to_module_mapping_fn=self.policy_mapping_fn,
)
)
# Batch all data.
pipeline.append(BatchIndividualItems(multi_agent=self.is_multi_agent))
# Convert to Tensors.
pipeline.append(NumpyToTensor(as_learner_connector=True, device=device))
return pipeline
def build_learner_group(
self,
*,
env: Optional[EnvType] = None,
spaces: Optional[Dict[ModuleID, Tuple[gym.Space, gym.Space]]] = None,
rl_module_spec: Optional[RLModuleSpecType] = None,
placement_group: Optional["PlacementGroup"] = None,
) -> "LearnerGroup":
"""Builds and returns a new LearnerGroup object based on settings in `self`.
Args:
env: An optional EnvType object (e.g. a gym.Env) useful for extracting space
information for the to-be-constructed RLModule inside the LearnerGroup's
Learner workers. Note that if RLlib cannot infer any space information
either from this `env` arg, from the optional `spaces` arg or from
`self`, the LearnerGroup cannot be created.
spaces: An optional dict mapping ModuleIDs to
(observation-space, action-space)-tuples for the to-be-constructed
RLModule inside the LearnerGroup's Learner workers. Note that if RLlib
cannot infer any space information either from this `spces` arg,
from the optional `env` arg or from `self`, the LearnerGroup cannot
be created.
rl_module_spec: An optional (single-agent or multi-agent) RLModuleSpec to
use for the constructed LearnerGroup. If None, RLlib tries to infer
the RLModuleSpec using the other information given and stored in this
`AlgorithmConfig` object.
Returns:
The newly created `LearnerGroup` object.
"""
from ray.rllib.core.learner.learner_group import LearnerGroup
# If `spaces` or `env` provided -> Create a MultiRLModuleSpec first to be
# passed into the LearnerGroup constructor.
if rl_module_spec is None:
rl_module_spec = self.get_multi_rl_module_spec(env=env, spaces=spaces)
# Construct the actual LearnerGroup.
learner_group = LearnerGroup(
config=self.copy(),
module_spec=rl_module_spec,
placement_group=placement_group,
)
return learner_group
def build_learner(
self,
*,
env: Optional[EnvType] = None,
spaces: Optional[Dict[PolicyID, Tuple[gym.Space, gym.Space]]] = None,
) -> "Learner":
"""Builds and returns a new Learner object based on settings in `self`.
This Learner object already has its `build()` method called, meaning
its RLModule is already constructed.
Args:
env: An optional EnvType object (e.g. a gym.Env) useful for extracting space
information for the to-be-constructed RLModule inside the Learner.
Note that if RLlib cannot infer any space information
either from this `env` arg, from the optional `spaces` arg or from
`self`, the Learner cannot be created.
spaces: An optional dict mapping ModuleIDs to
(observation-space, action-space)-tuples for the to-be-constructed
RLModule inside the Learner. Note that if RLlib cannot infer any
space information either from this `spces` arg, from the optional
`env` arg or from `self`, the Learner cannot be created.
Returns:
The newly created (and already built) Learner object.
"""
# If `spaces` or `env` provided -> Create a MultiRLModuleSpec first to be
# passed into the LearnerGroup constructor.
rl_module_spec = None
if env is not None or spaces is not None:
rl_module_spec = self.get_multi_rl_module_spec(env=env, spaces=spaces)
# Construct the actual Learner object.
learner = self.learner_class(config=self, module_spec=rl_module_spec)
# `build()` the Learner (internal structures such as RLModule, etc..).
learner.build()
return learner
def get_config_for_module(self, module_id: ModuleID) -> Self:
"""Returns an AlgorithmConfig object, specific to the given module ID.
In a multi-agent setup, individual modules might override one or more
AlgorithmConfig properties (e.g. `train_batch_size`, `lr`) using the
`overrides()` method.
In order to retrieve a full AlgorithmConfig instance (with all these overrides
already translated and built-in), users can call this method with the respective
module ID.
Args:
module_id: The module ID for which to get the final AlgorithmConfig object.
Returns:
A new AlgorithmConfig object for the specific module ID.
"""
# ModuleID NOT found in cached ModuleID, but in overrides dict.
# Create new algo config object and cache it.
if (
module_id not in self._per_module_overrides
and module_id in self.algorithm_config_overrides_per_module
):
self._per_module_overrides[module_id] = self.copy().update_from_dict(
self.algorithm_config_overrides_per_module[module_id]
)
# Return the module specific algo config object.
if module_id in self._per_module_overrides:
return self._per_module_overrides[module_id]
# No overrides for ModuleID -> return self.
else:
return self
def python_environment(
self,
*,
extra_python_environs_for_driver: Optional[dict] = NotProvided,
extra_python_environs_for_worker: Optional[dict] = NotProvided,
) -> Self:
"""Sets the config's python environment settings.
Args:
extra_python_environs_for_driver: Any extra python env vars to set in the
algorithm's process, e.g., {"OMP_NUM_THREADS": "16"}.
extra_python_environs_for_worker: The extra python environments need to set
for worker processes.
Returns:
This updated AlgorithmConfig object.
"""
if extra_python_environs_for_driver is not NotProvided:
self.extra_python_environs_for_driver = extra_python_environs_for_driver
if extra_python_environs_for_worker is not NotProvided:
self.extra_python_environs_for_worker = extra_python_environs_for_worker
return self
def resources(
self,
*,
num_cpus_for_main_process: Optional[int] = NotProvided,
num_gpus: Optional[Union[float, int]] = NotProvided, # @OldAPIStack
_fake_gpus: Optional[bool] = NotProvided, # @OldAPIStack
placement_strategy: Optional[str] = NotProvided,
# Deprecated args.
num_cpus_per_worker=DEPRECATED_VALUE, # moved to `env_runners`
num_gpus_per_worker=DEPRECATED_VALUE, # moved to `env_runners`
custom_resources_per_worker=DEPRECATED_VALUE, # moved to `env_runners`
num_learner_workers=DEPRECATED_VALUE, # moved to `learners`
num_cpus_per_learner_worker=DEPRECATED_VALUE, # moved to `learners`
num_gpus_per_learner_worker=DEPRECATED_VALUE, # moved to `learners`
local_gpu_idx=DEPRECATED_VALUE, # moved to `learners`
num_cpus_for_local_worker=DEPRECATED_VALUE,
) -> Self:
"""Specifies resources allocated for an Algorithm and its ray actors/workers.
Args:
num_cpus_for_main_process: Number of CPUs to allocate for the main algorithm
process that runs `Algorithm.training_step()`.
Note: This is only relevant when running RLlib through Tune. Otherwise,
`Algorithm.training_step()` runs in the main program (driver).
num_gpus: Number of GPUs to allocate to the algorithm process.
Note that not all algorithms can take advantage of GPUs.
Support for multi-GPU is currently only available for
tf-[PPO/IMPALA/DQN/PG]. This can be fractional (e.g., 0.3 GPUs).
_fake_gpus: Set to True for debugging (multi-)?GPU funcitonality on a
CPU machine. GPU towers are simulated by graphs located on
CPUs in this case. Use `num_gpus` to test for different numbers of
fake GPUs.
placement_strategy: The strategy for the placement group factory returned by
`Algorithm.default_resource_request()`. A PlacementGroup defines, which
devices (resources) should always be co-located on the same node.
For example, an Algorithm with 2 EnvRunners and 1 Learner (with
1 GPU) requests a placement group with the bundles:
[{"cpu": 1}, {"gpu": 1, "cpu": 1}, {"cpu": 1}, {"cpu": 1}], where the
first bundle is for the local (main Algorithm) process, the second one
for the 1 Learner worker and the last 2 bundles are for the two
EnvRunners. These bundles can now be "placed" on the same or different
nodes depending on the value of `placement_strategy`:
"PACK": Packs bundles into as few nodes as possible.
"SPREAD": Places bundles across distinct nodes as even as possible.
"STRICT_PACK": Packs bundles into one node. The group is not allowed
to span multiple nodes.
"STRICT_SPREAD": Packs bundles across distinct nodes.
Returns:
This updated AlgorithmConfig object.
"""
if num_cpus_per_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_cpus_per_worker)",
new="AlgorithmConfig.env_runners(num_cpus_per_env_runner)",
error=False,
)
self.num_cpus_per_env_runner = num_cpus_per_worker
if num_gpus_per_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_gpus_per_worker)",
new="AlgorithmConfig.env_runners(num_gpus_per_env_runner)",
error=False,
)
self.num_gpus_per_env_runner = num_gpus_per_worker
if custom_resources_per_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(custom_resources_per_worker)",
new="AlgorithmConfig.env_runners(custom_resources_per_env_runner)",
error=False,
)
self.custom_resources_per_env_runner = custom_resources_per_worker
if num_learner_workers != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_learner_workers)",
new="AlgorithmConfig.learners(num_learner)",
error=False,
)
self.num_learners = num_learner_workers
if num_cpus_per_learner_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_cpus_per_learner_worker)",
new="AlgorithmConfig.learners(num_cpus_per_learner)",
error=False,
)
self.num_cpus_per_learner = num_cpus_per_learner_worker
if num_gpus_per_learner_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_gpus_per_learner_worker)",
new="AlgorithmConfig.learners(num_gpus_per_learner)",
error=False,
)
self.num_gpus_per_learner = num_gpus_per_learner_worker
if local_gpu_idx != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(local_gpu_idx)",
new="AlgorithmConfig.learners(local_gpu_idx)",
error=False,
)
self.local_gpu_idx = local_gpu_idx
if num_cpus_for_local_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.resources(num_cpus_for_local_worker)",
new="AlgorithmConfig.resources(num_cpus_for_main_process)",
error=False,
)
self.num_cpus_for_main_process = num_cpus_for_local_worker
if num_cpus_for_main_process is not NotProvided:
self.num_cpus_for_main_process = num_cpus_for_main_process
if num_gpus is not NotProvided:
self.num_gpus = num_gpus
if _fake_gpus is not NotProvided:
self._fake_gpus = _fake_gpus
if placement_strategy is not NotProvided:
self.placement_strategy = placement_strategy
return self
def framework(
self,
framework: Optional[str] = NotProvided,
*,
eager_tracing: Optional[bool] = NotProvided,
eager_max_retraces: Optional[int] = NotProvided,
tf_session_args: Optional[Dict[str, Any]] = NotProvided,
local_tf_session_args: Optional[Dict[str, Any]] = NotProvided,
torch_compile_learner: Optional[bool] = NotProvided,
torch_compile_learner_what_to_compile: Optional[str] = NotProvided,
torch_compile_learner_dynamo_mode: Optional[str] = NotProvided,
torch_compile_learner_dynamo_backend: Optional[str] = NotProvided,
torch_compile_worker: Optional[bool] = NotProvided,
torch_compile_worker_dynamo_backend: Optional[str] = NotProvided,
torch_compile_worker_dynamo_mode: Optional[str] = NotProvided,
torch_ddp_kwargs: Optional[Dict[str, Any]] = NotProvided,
torch_skip_nan_gradients: Optional[bool] = NotProvided,
) -> Self:
"""Sets the config's DL framework settings.
Args:
framework: torch: PyTorch; tf2: TensorFlow 2.x (eager execution or traced
if eager_tracing=True); tf: TensorFlow (static-graph);
eager_tracing: Enable tracing in eager mode. This greatly improves
performance (speedup ~2x), but makes it slightly harder to debug
since Python code won't be evaluated after the initial eager pass.
Only possible if framework=tf2.
eager_max_retraces: Maximum number of tf.function re-traces before a
runtime error is raised. This is to prevent unnoticed retraces of
methods inside the `..._eager_traced` Policy, which could slow down
execution by a factor of 4, without the user noticing what the root
cause for this slowdown could be.
Only necessary for framework=tf2.
Set to None to ignore the re-trace count and never throw an error.
tf_session_args: Configures TF for single-process operation by default.
local_tf_session_args: Override the following tf session args on the local
worker
torch_compile_learner: If True, forward_train methods on TorchRLModules
on the learner are compiled. If not specified, the default is to compile
forward train on the learner.
torch_compile_learner_what_to_compile: A TorchCompileWhatToCompile
mode specifying what to compile on the learner side if
torch_compile_learner is True. See TorchCompileWhatToCompile for
details and advice on its usage.
torch_compile_learner_dynamo_backend: The torch dynamo backend to use on
the learner.
torch_compile_learner_dynamo_mode: The torch dynamo mode to use on the
learner.
torch_compile_worker: If True, forward exploration and inference methods on
TorchRLModules on the workers are compiled. If not specified,
the default is to not compile forward methods on the workers because
retracing can be expensive.
torch_compile_worker_dynamo_backend: The torch dynamo backend to use on
the workers.
torch_compile_worker_dynamo_mode: The torch dynamo mode to use on the
workers.
torch_ddp_kwargs: The kwargs to pass into
`torch.nn.parallel.DistributedDataParallel` when using `num_learners
> 1`. This is specifically helpful when searching for unused parameters
that are not used in the backward pass. This can give hints for errors
in custom models where some parameters do not get touched in the
backward pass although they should.
torch_skip_nan_gradients: If updates with `nan` gradients should be entirely
skipped. This skips updates in the optimizer entirely if they contain
any `nan` gradient. This can help to avoid biasing moving-average based
optimizers - like Adam. This can help in training phases where policy
updates can be highly unstable such as during the early stages of
training or with highly exploratory policies. In such phases many
gradients might turn `nan` and setting them to zero could corrupt the
optimizer's internal state. The default is `False` and turns `nan`
gradients to zero. If many `nan` gradients are encountered consider (a)
monitoring gradients by setting `log_gradients` in `AlgorithmConfig` to
`True`, (b) use proper weight initialization (e.g. Xavier, Kaiming) via
the `model_config_dict` in `AlgorithmConfig.rl_module` and/or (c)
gradient clipping via `grad_clip` in `AlgorithmConfig.training`.
Returns:
This updated AlgorithmConfig object.
"""
if framework is not NotProvided:
if framework == "tfe":
deprecation_warning(
old="AlgorithmConfig.framework('tfe')",
new="AlgorithmConfig.framework('tf2')",
error=True,
)
self.framework_str = framework
if eager_tracing is not NotProvided:
self.eager_tracing = eager_tracing
if eager_max_retraces is not NotProvided:
self.eager_max_retraces = eager_max_retraces
if tf_session_args is not NotProvided:
self.tf_session_args = tf_session_args
if local_tf_session_args is not NotProvided:
self.local_tf_session_args = local_tf_session_args
if torch_compile_learner is not NotProvided:
self.torch_compile_learner = torch_compile_learner
if torch_compile_learner_dynamo_backend is not NotProvided:
self.torch_compile_learner_dynamo_backend = (
torch_compile_learner_dynamo_backend
)
if torch_compile_learner_dynamo_mode is not NotProvided:
self.torch_compile_learner_dynamo_mode = torch_compile_learner_dynamo_mode
if torch_compile_learner_what_to_compile is not NotProvided:
self.torch_compile_learner_what_to_compile = (
torch_compile_learner_what_to_compile
)
if torch_compile_worker is not NotProvided:
self.torch_compile_worker = torch_compile_worker
if torch_compile_worker_dynamo_backend is not NotProvided:
self.torch_compile_worker_dynamo_backend = (
torch_compile_worker_dynamo_backend
)
if torch_compile_worker_dynamo_mode is not NotProvided:
self.torch_compile_worker_dynamo_mode = torch_compile_worker_dynamo_mode
if torch_ddp_kwargs is not NotProvided:
self.torch_ddp_kwargs = torch_ddp_kwargs
if torch_skip_nan_gradients is not NotProvided:
self.torch_skip_nan_gradients = torch_skip_nan_gradients
return self
def api_stack(
self,
enable_rl_module_and_learner: Optional[bool] = NotProvided,
enable_env_runner_and_connector_v2: Optional[bool] = NotProvided,
) -> Self:
"""Sets the config's API stack settings.
Args:
enable_rl_module_and_learner: Enables the usage of `RLModule` (instead of
`ModelV2`) and Learner (instead of the training-related parts of
`Policy`). Must be used with `enable_env_runner_and_connector_v2=True`.
Together, these two settings activate the "new API stack" of RLlib.
enable_env_runner_and_connector_v2: Enables the usage of EnvRunners
(SingleAgentEnvRunner and MultiAgentEnvRunner) and ConnectorV2.
When setting this to True, `enable_rl_module_and_learner` must be True
as well. Together, these two settings activate the "new API stack" of
RLlib.
Returns:
This updated AlgorithmConfig object.
"""
if enable_rl_module_and_learner is not NotProvided:
self.enable_rl_module_and_learner = enable_rl_module_and_learner
if enable_rl_module_and_learner is True and self.exploration_config:
self._prior_exploration_config = self.exploration_config
self.exploration_config = {}
elif enable_rl_module_and_learner is False and not self.exploration_config:
if self._prior_exploration_config is not None:
self.exploration_config = self._prior_exploration_config
self._prior_exploration_config = None
else:
logger.warning(
"config.enable_rl_module_and_learner was set to False, but no "
"prior exploration config was found to be restored."
)
if enable_env_runner_and_connector_v2 is not NotProvided:
self.enable_env_runner_and_connector_v2 = enable_env_runner_and_connector_v2
return self
def environment(
self,
env: Optional[Union[str, EnvType]] = NotProvided,
*,
env_config: Optional[EnvConfigDict] = NotProvided,
observation_space: Optional[gym.Space] = NotProvided,
action_space: Optional[gym.Space] = NotProvided,
render_env: Optional[bool] = NotProvided,
clip_rewards: Optional[Union[bool, float]] = NotProvided,
normalize_actions: Optional[bool] = NotProvided,
clip_actions: Optional[bool] = NotProvided,
disable_env_checking: Optional[bool] = NotProvided,
is_atari: Optional[bool] = NotProvided,
action_mask_key: Optional[str] = NotProvided,
# Deprecated args.
env_task_fn=DEPRECATED_VALUE,
) -> Self:
"""Sets the config's RL-environment settings.
Args:
env: The environment specifier. This can either be a tune-registered env,
via `tune.register_env([name], lambda env_ctx: [env object])`,
or a string specifier of an RLlib supported type. In the latter case,
RLlib tries to interpret the specifier as either an Farama-Foundation
gymnasium env, a PyBullet env, or a fully qualified classpath to an Env
class, e.g. "ray.rllib.examples.envs.classes.random_env.RandomEnv".
env_config: Arguments dict passed to the env creator as an EnvContext
object (which is a dict plus the properties: `num_env_runners`,
`worker_index`, `vector_index`, and `remote`).
observation_space: The observation space for the Policies of this Algorithm.
action_space: The action space for the Policies of this Algorithm.
render_env: If True, try to render the environment on the local worker or on
worker 1 (if num_env_runners > 0). For vectorized envs, this usually
means that only the first sub-environment is rendered.
In order for this to work, your env has to implement the
`render()` method which either:
a) handles window generation and rendering itself (returning True) or
b) returns a numpy uint8 image of shape [height x width x 3 (RGB)].
clip_rewards: Whether to clip rewards during Policy's postprocessing.
None (default): Clip for Atari only (r=sign(r)).
True: r=sign(r): Fixed rewards -1.0, 1.0, or 0.0.
False: Never clip.
[float value]: Clip at -value and + value.
Tuple[value1, value2]: Clip at value1 and value2.
normalize_actions: If True, RLlib learns entirely inside a normalized
action space (0.0 centered with small stddev; only affecting Box
components). RLlib unsquashes actions (and clip, just in case) to the
bounds of the env's action space before sending actions back to the env.
clip_actions: If True, the RLlib default ModuleToEnv connector clips
actions according to the env's bounds (before sending them into the
`env.step()` call).
disable_env_checking: Disable RLlib's env checks after a gymnasium.Env
instance has been constructed in an EnvRunner. Note that the checks
include an `env.reset()` and `env.step()` (with a random action), which
might tinker with your env's logic and behavior and thus negatively
influence sample collection- and/or learning behavior.
is_atari: This config can be used to explicitly specify whether the env is
an Atari env or not. If not specified, RLlib tries to auto-detect
this.
action_mask_key: If observation is a dictionary, expect the value by
the key `action_mask_key` to contain a valid actions mask (`numpy.int8`
array of zeros and ones). Defaults to "action_mask".
Returns:
This updated AlgorithmConfig object.
"""
if env_task_fn != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.environment(env_task_fn=..)",
error=True,
)
if env is not NotProvided:
self.env = env
if env_config is not NotProvided:
deep_update(self.env_config, env_config, True)
if observation_space is not NotProvided:
self.observation_space = observation_space
if action_space is not NotProvided:
self.action_space = action_space
if render_env is not NotProvided:
self.render_env = render_env
if clip_rewards is not NotProvided:
self.clip_rewards = clip_rewards
if normalize_actions is not NotProvided:
self.normalize_actions = normalize_actions
if clip_actions is not NotProvided:
self.clip_actions = clip_actions
if disable_env_checking is not NotProvided:
self.disable_env_checking = disable_env_checking
if is_atari is not NotProvided:
self._is_atari = is_atari
if action_mask_key is not NotProvided:
self.action_mask_key = action_mask_key
return self
def env_runners(
self,
*,
env_runner_cls: Optional[type] = NotProvided,
num_env_runners: Optional[int] = NotProvided,
create_local_env_runner: Optional[bool] = NotProvided,
create_env_on_local_worker: Optional[bool] = NotProvided,
num_envs_per_env_runner: Optional[int] = NotProvided,
gym_env_vectorize_mode: Optional[Union[str, gym.VectorizeMode]] = NotProvided,
num_cpus_per_env_runner: Optional[int] = NotProvided,
num_gpus_per_env_runner: Optional[Union[float, int]] = NotProvided,
custom_resources_per_env_runner: Optional[dict] = NotProvided,
validate_env_runners_after_construction: Optional[bool] = NotProvided,
sample_timeout_s: Optional[float] = NotProvided,
max_requests_in_flight_per_env_runner: Optional[int] = NotProvided,
env_to_module_connector: Optional[
Callable[[EnvType], Union["ConnectorV2", List["ConnectorV2"]]]
] = NotProvided,
module_to_env_connector: Optional[
Callable[[EnvType, "RLModule"], Union["ConnectorV2", List["ConnectorV2"]]]
] = NotProvided,
add_default_connectors_to_env_to_module_pipeline: Optional[bool] = NotProvided,
add_default_connectors_to_module_to_env_pipeline: Optional[bool] = NotProvided,
episode_lookback_horizon: Optional[int] = NotProvided,
merge_env_runner_states: Optional[Union[str, bool]] = NotProvided,
broadcast_env_runner_states: Optional[bool] = NotProvided,
compress_observations: Optional[bool] = NotProvided,
rollout_fragment_length: Optional[Union[int, str]] = NotProvided,
batch_mode: Optional[str] = NotProvided,
explore: Optional[bool] = NotProvided,
episodes_to_numpy: Optional[bool] = NotProvided,
# @OldAPIStack settings.
use_worker_filter_stats: Optional[bool] = NotProvided,
update_worker_filter_stats: Optional[bool] = NotProvided,
exploration_config: Optional[dict] = NotProvided, # @OldAPIStack
sample_collector: Optional[Type[SampleCollector]] = NotProvided, # @OldAPIStack
remote_worker_envs: Optional[bool] = NotProvided, # @OldAPIStack
remote_env_batch_wait_ms: Optional[float] = NotProvided, # @OldAPIStack
preprocessor_pref: Optional[str] = NotProvided, # @OldAPIStack
observation_filter: Optional[str] = NotProvided, # @OldAPIStack
enable_tf1_exec_eagerly: Optional[bool] = NotProvided, # @OldAPIStack
sampler_perf_stats_ema_coef: Optional[float] = NotProvided, # @OldAPIStack
# Deprecated args.
num_rollout_workers=DEPRECATED_VALUE,
num_envs_per_worker=DEPRECATED_VALUE,
validate_workers_after_construction=DEPRECATED_VALUE,
ignore_worker_failures=DEPRECATED_VALUE,
recreate_failed_workers=DEPRECATED_VALUE,
restart_failed_sub_environments=DEPRECATED_VALUE,
num_consecutive_worker_failures_tolerance=DEPRECATED_VALUE,
worker_health_probe_timeout_s=DEPRECATED_VALUE,
worker_restore_timeout_s=DEPRECATED_VALUE,
synchronize_filter=DEPRECATED_VALUE,
enable_connectors=DEPRECATED_VALUE,
) -> Self:
"""Sets the rollout worker configuration.
Args:
env_runner_cls: The EnvRunner class to use for environment rollouts (data
collection).
num_env_runners: Number of EnvRunner actors to create for parallel sampling.
Setting this to 0 forces sampling to be done in the local
EnvRunner (main process or the Algorithm's actor when using Tune).
num_envs_per_env_runner: Number of environments to step through
(vector-wise) per EnvRunner. This enables batching when computing
actions through RLModule inference, which can improve performance
for inference-bottlenecked workloads.
gym_env_vectorize_mode: The gymnasium vectorization mode for vector envs.
Must be a `gymnasium.VectorizeMode` (enum) value.
Default is SYNC. Set this to ASYNC to parallelize the individual sub
environments within the vector. This can speed up your EnvRunners
significantly when using heavier environments. Set this to
VECTOR_ENTRY_POINT in case your env creator, also known as
"gym entry point", already returns a gym.vector.VectorEnv and you
don't need RLlib to vectorize the environments for the runners.
num_cpus_per_env_runner: Number of CPUs to allocate per EnvRunner.
num_gpus_per_env_runner: Number of GPUs to allocate per EnvRunner. This can
be fractional. This is usually needed only if your env itself requires a
GPU (i.e., it is a GPU-intensive video game), or model inference is
unusually expensive.
custom_resources_per_env_runner: Any custom Ray resources to allocate per
EnvRunner.
sample_timeout_s: The timeout in seconds for calling `sample()` on remote
EnvRunner workers. Results (episode list) from workers that take longer
than this time are discarded. Only used by algorithms that sample
synchronously in turn with their update step (e.g., PPO or DQN). Not
relevant for any algos that sample asynchronously, such as APPO or
IMPALA.
max_requests_in_flight_per_env_runner: Max number of in-flight requests
to each EnvRunner (actor)). See the
`ray.rllib.utils.actor_manager.FaultTolerantActorManager` class for more
details.
Tuning these values is important when running experiments with
large sample batches, where there is the risk that the object store may
fill up, causing spilling of objects to disk. This can cause any
asynchronous requests to become very slow, making your experiment run
slowly as well. You can inspect the object store during your experiment
through a call to `ray memory` on your head node, and by using the Ray
dashboard. If you're seeing that the object store is filling up,
turn down the number of remote requests in flight or enable compression
or increase the object store memory through, for example:
`ray.init(object_store_memory=10 * 1024 * 1024 * 1024) # =10 GB`
sample_collector: For the old API stack only. The SampleCollector class to
be used to collect and retrieve environment-, model-, and sampler data.
Override the SampleCollector base class to implement your own
collection/buffering/retrieval logic.
create_local_env_runner: If True, create a local EnvRunner instance, besides
the `num_env_runners` remote EnvRunner actors. If `num_env_runners` is
0, this setting is ignored and one local EnvRunner is created
regardless.
create_env_on_local_worker: When `num_env_runners` > 0, the driver
(local_worker; worker-idx=0) does not need an environment. This is
because it doesn't have to sample (done by remote_workers;
worker_indices > 0) nor evaluate (done by evaluation workers;
see below).
env_to_module_connector: A callable taking an Env as input arg and returning
an env-to-module ConnectorV2 (might be a pipeline) object.
module_to_env_connector: A callable taking an Env and an RLModule as input
args and returning a module-to-env ConnectorV2 (might be a pipeline)
object.
add_default_connectors_to_env_to_module_pipeline: If True (default), RLlib's
EnvRunners automatically add the default env-to-module ConnectorV2
pieces to the EnvToModulePipeline. These automatically perform adding
observations and states (in case of stateful Module(s)), agent-to-module
mapping, batching, and conversion to tensor data. Only if you know
exactly what you are doing, you should set this setting to False.
Note that this setting is only relevant if the new API stack is used
(including the new EnvRunner classes).
add_default_connectors_to_module_to_env_pipeline: If True (default), RLlib's
EnvRunners automatically add the default module-to-env ConnectorV2
pieces to the ModuleToEnvPipeline. These automatically perform removing
the additional time-rank (if applicable, in case of stateful
Module(s)), module-to-agent unmapping, un-batching (to lists), and
conversion from tensor data to numpy. Only if you know exactly what you
are doing, you should set this setting to False.
Note that this setting is only relevant if the new API stack is used
(including the new EnvRunner classes).
episode_lookback_horizon: The amount of data (in timesteps) to keep from the
preceeding episode chunk when a new chunk (for the same episode) is
generated to continue sampling at a later time. The larger this value,
the more an env-to-module connector can look back in time
and compile RLModule input data from this information. For example, if
your custom env-to-module connector (and your custom RLModule) requires
the previous 10 rewards as inputs, you must set this to at least 10.
merge_env_runner_states: True, if remote EnvRunner actor states should be
merged into central connector pipelines. Use "training_only" (default)
for only doing this for the training EnvRunners, NOT for the evaluation
EnvRunners.
broadcast_env_runner_states: True, if merged EnvRunner states (from the
central connector pipelines) should be broadcast back to all remote
EnvRunner actors.
use_worker_filter_stats: Whether to use the workers in the EnvRunnerGroup to
update the central filters (held by the local worker). If False, stats
from the workers aren't used and are discarded.
update_worker_filter_stats: Whether to push filter updates from the central
filters (held by the local worker) to the remote workers' filters.
Setting this to True might be useful within the evaluation config in
order to disable the usage of evaluation trajectories for synching
the central filter (used for training).
rollout_fragment_length: Divide episodes into fragments of this many steps
each during sampling. Trajectories of this size are collected from
EnvRunners and combined into a larger batch of `train_batch_size`
for learning.
For example, given rollout_fragment_length=100 and
train_batch_size=1000:
1. RLlib collects 10 fragments of 100 steps each from rollout workers.
2. These fragments are concatenated and we perform an epoch of SGD.
When using multiple envs per worker, the fragment size is multiplied by
`num_envs_per_env_runner`. This is since we are collecting steps from
multiple envs in parallel. For example, if num_envs_per_env_runner=5,
then EnvRunners return experiences in chunks of 5*100 = 500 steps.
The dataflow here can vary per algorithm. For example, PPO further
divides the train batch into minibatches for multi-epoch SGD.
Set `rollout_fragment_length` to "auto" to have RLlib compute an exact
value to match the given batch size.
batch_mode: How to build individual batches with the EnvRunner(s). Batches
coming from distributed EnvRunners are usually concat'd to form the
train batch. Note that "steps" below can mean different things (either
env- or agent-steps) and depends on the `count_steps_by` setting,
adjustable via `AlgorithmConfig.multi_agent(count_steps_by=..)`:
1) "truncate_episodes": Each call to `EnvRunner.sample()` returns a
batch of at most `rollout_fragment_length * num_envs_per_env_runner` in
size. The batch is exactly `rollout_fragment_length * num_envs`
in size if postprocessing does not change batch sizes. Episodes
may be truncated in order to meet this size requirement.
This mode guarantees evenly sized batches, but increases
variance as the future return must now be estimated at truncation
boundaries.
2) "complete_episodes": Each call to `EnvRunner.sample()` returns a
batch of at least `rollout_fragment_length * num_envs_per_env_runner` in
size. Episodes aren't truncated, but multiple episodes
may be packed within one batch to meet the (minimum) batch size.
Note that when `num_envs_per_env_runner > 1`, episode steps are
buffered until the episode completes, and hence batches may contain
significant amounts of off-policy data.
explore: Default exploration behavior, iff `explore=None` is passed into
compute_action(s). Set to False for no exploration behavior (e.g.,
for evaluation).
episodes_to_numpy: Whether to numpy'ize episodes before
returning them from an EnvRunner. False by default. If True, EnvRunners
call `to_numpy()` on those episode (chunks) to be returned by
`EnvRunners.sample()`.
exploration_config: A dict specifying the Exploration object's config.
remote_worker_envs: If using num_envs_per_env_runner > 1, whether to create
those new envs in remote processes instead of in the same worker.
This adds overheads, but can make sense if your envs can take much
time to step / reset (e.g., for StarCraft). Use this cautiously;
overheads are significant.
remote_env_batch_wait_ms: Timeout that remote workers are waiting when
polling environments. 0 (continue when at least one env is ready) is
a reasonable default, but optimal value could be obtained by measuring
your environment step / reset and model inference perf.
validate_env_runners_after_construction: Whether to validate that each
created remote EnvRunner is healthy after its construction process.
preprocessor_pref: Whether to use "rllib" or "deepmind" preprocessors by
default. Set to None for using no preprocessor. In this case, the
model has to handle possibly complex observations from the
environment.
observation_filter: Element-wise observation filter, either "NoFilter"
or "MeanStdFilter".
compress_observations: Whether to LZ4 compress individual observations
in the SampleBatches collected during rollouts.
enable_tf1_exec_eagerly: Explicitly tells the rollout worker to enable
TF eager execution. This is useful for example when framework is
"torch", but a TF2 policy needs to be restored for evaluation or
league-based purposes.
sampler_perf_stats_ema_coef: If specified, perf stats are in EMAs. This
is the coeff of how much new data points contribute to the averages.
Default is None, which uses simple global average instead.
The EMA update rule is: updated = (1 - ema_coef) * old + ema_coef * new
Returns:
This updated AlgorithmConfig object.
"""
if enable_connectors != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(enable_connectors=...)",
error=False,
)
if num_rollout_workers != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(num_rollout_workers)",
new="AlgorithmConfig.env_runners(num_env_runners)",
error=True,
)
if num_envs_per_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(num_envs_per_worker)",
new="AlgorithmConfig.env_runners(num_envs_per_env_runner)",
error=True,
)
if validate_workers_after_construction != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(validate_workers_after_construction)",
new="AlgorithmConfig.env_runners(validate_env_runners_after_"
"construction)",
error=True,
)
if env_runner_cls is not NotProvided:
self.env_runner_cls = env_runner_cls
if num_env_runners is not NotProvided:
self.num_env_runners = num_env_runners
if num_envs_per_env_runner is not NotProvided:
if num_envs_per_env_runner <= 0:
raise ValueError(
f"`num_envs_per_env_runner` ({num_envs_per_env_runner}) must be "
"larger 0!"
)
self.num_envs_per_env_runner = num_envs_per_env_runner
if gym_env_vectorize_mode is not NotProvided:
self.gym_env_vectorize_mode = gym_env_vectorize_mode
if num_cpus_per_env_runner is not NotProvided:
self.num_cpus_per_env_runner = num_cpus_per_env_runner
if num_gpus_per_env_runner is not NotProvided:
self.num_gpus_per_env_runner = num_gpus_per_env_runner
if custom_resources_per_env_runner is not NotProvided:
self.custom_resources_per_env_runner = custom_resources_per_env_runner
if sample_timeout_s is not NotProvided:
self.sample_timeout_s = sample_timeout_s
if max_requests_in_flight_per_env_runner is not NotProvided:
self.max_requests_in_flight_per_env_runner = (
max_requests_in_flight_per_env_runner
)
if sample_collector is not NotProvided:
self.sample_collector = sample_collector
if create_local_env_runner is not NotProvided:
self.create_local_env_runner = create_local_env_runner
if create_env_on_local_worker is not NotProvided:
self.create_env_on_local_worker = create_env_on_local_worker
if env_to_module_connector is not NotProvided:
self._env_to_module_connector = env_to_module_connector
if module_to_env_connector is not NotProvided:
self._module_to_env_connector = module_to_env_connector
if add_default_connectors_to_env_to_module_pipeline is not NotProvided:
self.add_default_connectors_to_env_to_module_pipeline = (
add_default_connectors_to_env_to_module_pipeline
)
if add_default_connectors_to_module_to_env_pipeline is not NotProvided:
self.add_default_connectors_to_module_to_env_pipeline = (
add_default_connectors_to_module_to_env_pipeline
)
if episode_lookback_horizon is not NotProvided:
self.episode_lookback_horizon = episode_lookback_horizon
if merge_env_runner_states is not NotProvided:
self.merge_env_runner_states = merge_env_runner_states
if broadcast_env_runner_states is not NotProvided:
self.broadcast_env_runner_states = broadcast_env_runner_states
if use_worker_filter_stats is not NotProvided:
self.use_worker_filter_stats = use_worker_filter_stats
if update_worker_filter_stats is not NotProvided:
self.update_worker_filter_stats = update_worker_filter_stats
if rollout_fragment_length is not NotProvided:
if not (
(
isinstance(rollout_fragment_length, int)
and rollout_fragment_length > 0
)
or rollout_fragment_length == "auto"
):
raise ValueError("`rollout_fragment_length` must be int >0 or 'auto'!")
self.rollout_fragment_length = rollout_fragment_length
if batch_mode is not NotProvided:
if batch_mode not in ["truncate_episodes", "complete_episodes"]:
raise ValueError(
f"`batch_mode` ({batch_mode}) must be one of [truncate_episodes|"
"complete_episodes]!"
)
self.batch_mode = batch_mode
if explore is not NotProvided:
self.explore = explore
if episodes_to_numpy is not NotProvided:
self.episodes_to_numpy = episodes_to_numpy
# @OldAPIStack
if exploration_config is not NotProvided:
# Override entire `exploration_config` if `type` key changes.
# Update, if `type` key remains the same or is not specified.
new_exploration_config = deep_update(
{"exploration_config": self.exploration_config},
{"exploration_config": exploration_config},
False,
["exploration_config"],
["exploration_config"],
)
self.exploration_config = new_exploration_config["exploration_config"]
if remote_worker_envs is not NotProvided:
self.remote_worker_envs = remote_worker_envs
if remote_env_batch_wait_ms is not NotProvided:
self.remote_env_batch_wait_ms = remote_env_batch_wait_ms
if validate_env_runners_after_construction is not NotProvided:
self.validate_env_runners_after_construction = (
validate_env_runners_after_construction
)
if preprocessor_pref is not NotProvided:
self.preprocessor_pref = preprocessor_pref
if observation_filter is not NotProvided:
self.observation_filter = observation_filter
if synchronize_filter is not NotProvided:
self.synchronize_filters = synchronize_filter
if compress_observations is not NotProvided:
self.compress_observations = compress_observations
if enable_tf1_exec_eagerly is not NotProvided:
self.enable_tf1_exec_eagerly = enable_tf1_exec_eagerly
if sampler_perf_stats_ema_coef is not NotProvided:
self.sampler_perf_stats_ema_coef = sampler_perf_stats_ema_coef
# Deprecated settings.
if synchronize_filter != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(synchronize_filter=..)",
new="AlgorithmConfig.env_runners(update_worker_filter_stats=..)",
error=True,
)
if ignore_worker_failures != DEPRECATED_VALUE:
deprecation_warning(
old="ignore_worker_failures is deprecated, and will soon be a no-op",
error=True,
)
if recreate_failed_workers != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(recreate_failed_workers=..)",
new="AlgorithmConfig.fault_tolerance(recreate_failed_workers=..)",
error=True,
)
if restart_failed_sub_environments != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(restart_failed_sub_environments=..)",
new=(
"AlgorithmConfig.fault_tolerance("
"restart_failed_sub_environments=..)"
),
error=True,
)
if num_consecutive_worker_failures_tolerance != DEPRECATED_VALUE:
deprecation_warning(
old=(
"AlgorithmConfig.env_runners("
"num_consecutive_worker_failures_tolerance=..)"
),
new=(
"AlgorithmConfig.fault_tolerance("
"num_consecutive_worker_failures_tolerance=..)"
),
error=True,
)
if worker_health_probe_timeout_s != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(worker_health_probe_timeout_s=..)",
new="AlgorithmConfig.fault_tolerance(worker_health_probe_timeout_s=..)",
error=True,
)
if worker_restore_timeout_s != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.env_runners(worker_restore_timeout_s=..)",
new="AlgorithmConfig.fault_tolerance(worker_restore_timeout_s=..)",
error=True,
)
return self
def learners(
self,
*,
num_learners: Optional[int] = NotProvided,
num_cpus_per_learner: Optional[Union[str, float, int]] = NotProvided,
num_gpus_per_learner: Optional[Union[float, int]] = NotProvided,
num_aggregator_actors_per_learner: Optional[int] = NotProvided,
max_requests_in_flight_per_aggregator_actor: Optional[float] = NotProvided,
local_gpu_idx: Optional[int] = NotProvided,
max_requests_in_flight_per_learner: Optional[int] = NotProvided,
) -> Self:
"""Sets LearnerGroup and Learner worker related configurations.
Args:
num_learners: Number of Learner workers used for updating the RLModule.
A value of 0 means training takes place on a local Learner on main
process CPUs or 1 GPU (determined by `num_gpus_per_learner`).
For multi-gpu training, you have to set `num_learners` to > 1 and set
`num_gpus_per_learner` accordingly (e.g., 4 GPUs total and model fits on
1 GPU: `num_learners=4; num_gpus_per_learner=1` OR 4 GPUs total and
model requires 2 GPUs: `num_learners=2; num_gpus_per_learner=2`).
num_cpus_per_learner: Number of CPUs allocated per Learner worker.
If "auto" (default), use 1 if `num_gpus_per_learner=0`, otherwise 0.
Only necessary for custom processing pipeline inside each Learner
requiring multiple CPU cores.
If `num_learners=0`, RLlib creates only one local Learner instance and
the number of CPUs on the main process is
`max(num_cpus_per_learner, num_cpus_for_main_process)`.
num_gpus_per_learner: Number of GPUs allocated per Learner worker. If
`num_learners=0`, any value greater than 0 runs the
training on a single GPU on the main process, while a value of 0 runs
the training on main process CPUs.
num_aggregator_actors_per_learner: The number of aggregator actors per
Learner (if num_learners=0, one local learner is created). Must be at
least 1. Aggregator actors perform the task of a) converting episodes
into a train batch and b) move that train batch to the same GPU that
the corresponding learner is located on. Good values are 1 or 2, but
this strongly depends on your setup and `EnvRunner` throughput.
max_requests_in_flight_per_aggregator_actor: How many in-flight requests
are allowed per aggregator actor before new requests are dropped?
local_gpu_idx: If `num_gpus_per_learner` > 0, and
`num_learners` < 2, then RLlib uses this GPU index for training. This is
an index into the available
CUDA devices. For example if `os.environ["CUDA_VISIBLE_DEVICES"] = "1"`
and `local_gpu_idx=0`, RLlib uses the GPU with ID=1 on the node.
max_requests_in_flight_per_learner: Max number of in-flight requests
to each Learner (actor). You normally do not have to tune this setting
(default is 3), however, for asynchronous algorithms, this determines
the "queue" size for incoming batches (or lists of episodes) into each
Learner worker, thus also determining, how much off-policy'ness would be
acceptable. The off-policy'ness is the difference between the numbers of
updates a policy has undergone on the Learner vs the EnvRunners.
See the `ray.rllib.utils.actor_manager.FaultTolerantActorManager` class
for more details.
Returns:
This updated AlgorithmConfig object.
"""
if num_learners is not NotProvided:
self.num_learners = num_learners
if num_cpus_per_learner is not NotProvided:
self.num_cpus_per_learner = num_cpus_per_learner
if num_gpus_per_learner is not NotProvided:
self.num_gpus_per_learner = num_gpus_per_learner
if num_aggregator_actors_per_learner is not NotProvided:
self.num_aggregator_actors_per_learner = num_aggregator_actors_per_learner
if max_requests_in_flight_per_aggregator_actor is not NotProvided:
self.max_requests_in_flight_per_aggregator_actor = (
max_requests_in_flight_per_aggregator_actor
)
if local_gpu_idx is not NotProvided:
self.local_gpu_idx = local_gpu_idx
if max_requests_in_flight_per_learner is not NotProvided:
self.max_requests_in_flight_per_learner = max_requests_in_flight_per_learner
return self
def training(
self,
*,
gamma: Optional[float] = NotProvided,
lr: Optional[LearningRateOrSchedule] = NotProvided,
grad_clip: Optional[float] = NotProvided,
grad_clip_by: Optional[str] = NotProvided,
train_batch_size: Optional[int] = NotProvided,
train_batch_size_per_learner: Optional[int] = NotProvided,
num_epochs: Optional[int] = NotProvided,
minibatch_size: Optional[int] = NotProvided,
shuffle_batch_per_epoch: Optional[bool] = NotProvided,
model: Optional[dict] = NotProvided,
optimizer: Optional[dict] = NotProvided,
# Deprecated args.
num_aggregator_actors_per_learner=DEPRECATED_VALUE,
max_requests_in_flight_per_aggregator_actor=DEPRECATED_VALUE,
num_sgd_iter=DEPRECATED_VALUE,
max_requests_in_flight_per_sampler_worker=DEPRECATED_VALUE,
# Moved to `learners()` method.
learner_class: Optional[Type["Learner"]] = NotProvided,
learner_connector: Optional[
Callable[
[gym.spaces.Space, gym.spaces.Space],
Union["ConnectorV2", List["ConnectorV2"]],
]
] = NotProvided,
add_default_connectors_to_learner_pipeline: Optional[bool] = NotProvided,
learner_config_dict: Optional[Dict[str, Any]] = NotProvided,
) -> Self:
"""Sets the training related configuration.
Args:
gamma: Float specifying the discount factor of the Markov Decision process.
lr: The learning rate (float) or learning rate schedule in the format of
[[timestep, lr-value], [timestep, lr-value], ...]
In case of a schedule, intermediary timesteps are assigned to
linearly interpolated learning rate values. A schedule config's first
entry must start with timestep 0, i.e.: [[0, initial_value], [...]].
Note: If you require a) more than one optimizer (per RLModule),
b) optimizer types that are not Adam, c) a learning rate schedule that
is not a linearly interpolated, piecewise schedule as described above,
or d) specifying c'tor arguments of the optimizer that are not the
learning rate (e.g. Adam's epsilon), then you must override your
Learner's `configure_optimizer_for_module()` method and handle
lr-scheduling yourself.
grad_clip: If None, no gradient clipping is applied. Otherwise,
depending on the setting of `grad_clip_by`, the (float) value of
`grad_clip` has the following effect:
If `grad_clip_by=value`: Clips all computed gradients individually
inside the interval [-`grad_clip`, +`grad_clip`].
If `grad_clip_by=norm`, computes the L2-norm of each weight/bias
gradient tensor individually and then clip all gradients such that these
L2-norms do not exceed `grad_clip`. The L2-norm of a tensor is computed
via: `sqrt(SUM(w0^2, w1^2, ..., wn^2))` where w[i] are the elements of
the tensor (no matter what the shape of this tensor is).
If `grad_clip_by=global_norm`, computes the square of the L2-norm of
each weight/bias gradient tensor individually, sum up all these squared
L2-norms across all given gradient tensors (e.g. the entire module to
be updated), square root that overall sum, and then clip all gradients
such that this global L2-norm does not exceed the given value.
The global L2-norm over a list of tensors (e.g. W and V) is computed
via:
`sqrt[SUM(w0^2, w1^2, ..., wn^2) + SUM(v0^2, v1^2, ..., vm^2)]`, where
w[i] and v[j] are the elements of the tensors W and V (no matter what
the shapes of these tensors are).
grad_clip_by: See `grad_clip` for the effect of this setting on gradient
clipping. Allowed values are `value`, `norm`, and `global_norm`.
train_batch_size_per_learner: Train batch size per individual Learner
worker. This setting only applies to the new API stack. The number
of Learner workers can be set via `config.resources(
num_learners=...)`. The total effective batch size is then
`num_learners` x `train_batch_size_per_learner` and you can
access it with the property `AlgorithmConfig.total_train_batch_size`.
train_batch_size: Training batch size, if applicable. When on the new API
stack, this setting should no longer be used. Instead, use
`train_batch_size_per_learner` (in combination with
`num_learners`).
num_epochs: The number of complete passes over the entire train batch (per
Learner). Each pass might be further split into n minibatches (if
`minibatch_size` provided).
minibatch_size: The size of minibatches to use to further split the train
batch into.
shuffle_batch_per_epoch: Whether to shuffle the train batch once per epoch.
If the train batch has a time rank (axis=1), shuffling only takes
place along the batch axis to not disturb any intact (episode)
trajectories.
model: Arguments passed into the policy model. See models/catalog.py for a
full list of the available model options.
TODO: Provide ModelConfig objects instead of dicts.
optimizer: Arguments to pass to the policy optimizer. This setting is not
used when `enable_rl_module_and_learner=True`.
Returns:
This updated AlgorithmConfig object.
"""
if learner_class is not NotProvided:
deprecation_warning(
old="config.training(learner_class=..)",
new="config.learners(learner_class=..)",
error=False,
)
self._learner_class = learner_class
if learner_connector is not NotProvided:
deprecation_warning(
old="config.training(learner_connector=..)",
new="config.learners(learner_connector=..)",
error=False,
)
self._learner_connector = learner_connector
if add_default_connectors_to_learner_pipeline is not NotProvided:
deprecation_warning(
old="config.training(add_default_connectors_to_learner_pipeline=..)",
new="config.learners(add_default_connectors_to_learner_pipeline=..)",
error=False,
)
self.add_default_connectors_to_learner_pipeline = (
add_default_connectors_to_learner_pipeline
)
if learner_config_dict is not NotProvided:
deprecation_warning(
old="config.training(learner_config_dict=..)",
new="config.learners(learner_config_dict=..)",
error=False,
)
self.learner_config_dict.update(learner_config_dict)
if num_aggregator_actors_per_learner != DEPRECATED_VALUE:
deprecation_warning(
old="config.training(num_aggregator_actors_per_learner=..)",
new="config.learners(num_aggregator_actors_per_learner=..)",
error=False,
)
self.num_aggregator_actors_per_learner = num_aggregator_actors_per_learner
if max_requests_in_flight_per_aggregator_actor != DEPRECATED_VALUE:
deprecation_warning(
old="config.training(max_requests_in_flight_per_aggregator_actor=..)",
new="config.learners(max_requests_in_flight_per_aggregator_actor=..)",
error=False,
)
self.max_requests_in_flight_per_aggregator_actor = (
max_requests_in_flight_per_aggregator_actor
)
if num_sgd_iter != DEPRECATED_VALUE:
deprecation_warning(
old="config.training(num_sgd_iter=..)",
new="config.training(num_epochs=..)",
error=False,
)
num_epochs = num_sgd_iter
if max_requests_in_flight_per_sampler_worker != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.training("
"max_requests_in_flight_per_sampler_worker=...)",
new="AlgorithmConfig.env_runners("
"max_requests_in_flight_per_env_runner=...)",
error=False,
)
self.env_runners(
max_requests_in_flight_per_env_runner=(
max_requests_in_flight_per_sampler_worker
),
)
if gamma is not NotProvided:
self.gamma = gamma
if lr is not NotProvided:
self.lr = lr
if grad_clip is not NotProvided:
self.grad_clip = grad_clip
if grad_clip_by is not NotProvided:
if grad_clip_by not in ["value", "norm", "global_norm"]:
raise ValueError(
f"`grad_clip_by` ({grad_clip_by}) must be one of: 'value', 'norm', "
"or 'global_norm'!"
)
self.grad_clip_by = grad_clip_by
if train_batch_size_per_learner is not NotProvided:
self._train_batch_size_per_learner = train_batch_size_per_learner
if train_batch_size is not NotProvided:
self.train_batch_size = train_batch_size
if num_epochs is not NotProvided:
self.num_epochs = num_epochs
if minibatch_size is not NotProvided:
self.minibatch_size = minibatch_size
if shuffle_batch_per_epoch is not NotProvided:
self.shuffle_batch_per_epoch = shuffle_batch_per_epoch
if model is not NotProvided:
self.model.update(model)
if (
model.get("_use_default_native_models", DEPRECATED_VALUE)
!= DEPRECATED_VALUE
):
deprecation_warning(
old="AlgorithmConfig.training(_use_default_native_models=True)",
help="_use_default_native_models is not supported "
"anymore. To get rid of this error, set `config.api_stack("
"enable_rl_module_and_learner=True)`. Native models will "
"be better supported by the upcoming RLModule API.",
# Error out if user tries to enable this.
error=model["_use_default_native_models"],
)
if optimizer is not NotProvided:
self.optimizer = merge_dicts(self.optimizer, optimizer)
return self
def callbacks(
self,
callbacks_class: Optional[
Union[Type[RLlibCallback], List[Type[RLlibCallback]]]
] = NotProvided,
*,
on_algorithm_init: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_train_result: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_evaluate_start: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_evaluate_end: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_evaluate_offline_start: Optional[
Union[Callable, List[Callable]]
] = NotProvided,
on_evaluate_offline_end: Optional[
Union[Callable, List[Callable]]
] = NotProvided,
on_env_runners_recreated: Optional[
Union[Callable, List[Callable]]
] = NotProvided,
on_offline_eval_runners_recreated: Optional[
Union[Callable, List[Callable]]
] = NotProvided,
on_checkpoint_loaded: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_environment_created: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_episode_created: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_episode_start: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_episode_step: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_episode_end: Optional[Union[Callable, List[Callable]]] = NotProvided,
on_sample_end: Optional[Union[Callable, List[Callable]]] = NotProvided,
) -> Self:
"""Sets the callbacks configuration.
Args:
callbacks_class: RLlibCallback class, whose methods are called during
various phases of training and RL environment sample collection.
TODO (sven): Change the link to new rst callbacks page.
See the `RLlibCallback` class and
`examples/metrics/custom_metrics_and_callbacks.py` for more information.
on_algorithm_init: A callable or a list of callables. If a list, RLlib calls
the items in the same sequence. `on_algorithm_init` methods overridden
in `callbacks_class` take precedence and are called first.
See
:py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_algorithm_init` # noqa
for more information.
on_evaluate_start: A callable or a list of callables. If a list, RLlib calls
the items in the same sequence. `on_evaluate_start` methods overridden
in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_evaluate_start` # noqa
for more information.
on_evaluate_end: A callable or a list of callables. If a list, RLlib calls
the items in the same sequence. `on_evaluate_end` methods overridden
in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_evaluate_end` # noqa
for more information.
on_env_runners_recreated: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_env_runners_recreated`
methods overridden in `callbacks_class` take precedence and are called
first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_env_runners_recreated` # noqa
for more information.
on_checkpoint_loaded: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_checkpoint_loaded`
methods overridden in `callbacks_class` take precedence and are called
first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_checkpoint_loaded` # noqa
for more information.
on_environment_created: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_environment_created`
methods overridden in `callbacks_class` take precedence and are called
first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_environment_created` # noqa
for more information.
on_episode_created: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_episode_created` methods
overridden in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_episode_created` # noqa
for more information.
on_episode_start: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_episode_start` methods
overridden in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_episode_start` # noqa
for more information.
on_episode_step: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_episode_step` methods
overridden in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_episode_step` # noqa
for more information.
on_episode_end: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_episode_end` methods
overridden in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_episode_end` # noqa
for more information.
on_sample_end: A callable or a list of callables. If a list,
RLlib calls the items in the same sequence. `on_sample_end` methods
overridden in `callbacks_class` take precedence and are called first.
See :py:meth:`~ray.rllib.callbacks.callbacks.RLlibCallback.on_sample_end` # noqa
for more information.
Returns:
This updated AlgorithmConfig object.
"""
if callbacks_class is None:
callbacks_class = RLlibCallback
if callbacks_class is not NotProvided:
# Check, whether given `callbacks` is a callable.
# TODO (sven): Once the old API stack is deprecated, this can also be None
# (which should then become the default value for this attribute).
to_check = force_list(callbacks_class)
if not all(callable(c) for c in to_check):
raise ValueError(
"`config.callbacks_class` must be a callable or list of callables that "
"returns a subclass of DefaultCallbacks, got "
f"{callbacks_class}!"
)
self.callbacks_class = callbacks_class
if on_algorithm_init is not NotProvided:
self.callbacks_on_algorithm_init = on_algorithm_init
if on_train_result is not NotProvided:
self.callbacks_on_train_result = on_train_result
if on_evaluate_start is not NotProvided:
self.callbacks_on_evaluate_start = on_evaluate_start
if on_evaluate_end is not NotProvided:
self.callbacks_on_evaluate_end = on_evaluate_end
if on_evaluate_offline_start is not NotProvided:
self.callbacks_on_evaluate_offline_start = on_evaluate_offline_start
if on_evaluate_offline_end is not NotProvided:
self.callbacks_on_evaluate_offline_end = on_evaluate_offline_end
if on_env_runners_recreated is not NotProvided:
self.callbacks_on_env_runners_recreated = on_env_runners_recreated
if on_offline_eval_runners_recreated is not NotProvided:
self.callbacks_on_offline_eval_runners_recreated = (
on_offline_eval_runners_recreated
)
if on_checkpoint_loaded is not NotProvided:
self.callbacks_on_checkpoint_loaded = on_checkpoint_loaded
if on_environment_created is not NotProvided:
self.callbacks_on_environment_created = on_environment_created
if on_episode_created is not NotProvided:
self.callbacks_on_episode_created = on_episode_created
if on_episode_start is not NotProvided:
self.callbacks_on_episode_start = on_episode_start
if on_episode_step is not NotProvided:
self.callbacks_on_episode_step = on_episode_step
if on_episode_end is not NotProvided:
self.callbacks_on_episode_end = on_episode_end
if on_sample_end is not NotProvided:
self.callbacks_on_sample_end = on_sample_end
return self
def evaluation(
self,
*,
evaluation_interval: Optional[int] = NotProvided,
evaluation_duration: Optional[Union[int, str]] = NotProvided,
evaluation_duration_unit: Optional[str] = NotProvided,
evaluation_auto_duration_min_env_steps_per_sample: Optional[int] = NotProvided,
evaluation_auto_duration_max_env_steps_per_sample: Optional[int] = NotProvided,
evaluation_sample_timeout_s: Optional[float] = NotProvided,
evaluation_parallel_to_training: Optional[bool] = NotProvided,
evaluation_force_reset_envs_before_iteration: Optional[bool] = NotProvided,
evaluation_config: Optional[
Union["AlgorithmConfig", PartialAlgorithmConfigDict]
] = NotProvided,
off_policy_estimation_methods: Optional[Dict] = NotProvided,
ope_split_batch_by_episode: Optional[bool] = NotProvided,
evaluation_num_env_runners: Optional[int] = NotProvided,
custom_evaluation_function: Optional[Callable] = NotProvided,
# Offline evaluation.
offline_evaluation_interval: Optional[int] = NotProvided,
num_offline_eval_runners: Optional[int] = NotProvided,
offline_evaluation_type: Optional[Callable] = NotProvided,
offline_eval_runner_class: Optional[Callable] = NotProvided,
offline_loss_for_module_fn: Optional[Callable] = NotProvided,
offline_eval_batch_size_per_runner: Optional[int] = NotProvided,
dataset_num_iters_per_offline_eval_runner: Optional[int] = NotProvided,
offline_eval_rl_module_inference_only: Optional[bool] = NotProvided,
num_cpus_per_offline_eval_runner: Optional[int] = NotProvided,
num_gpus_per_offline_eval_runner: Optional[int] = NotProvided,
custom_resources_per_offline_eval_runner: Optional[
Dict[str, Any]
] = NotProvided,
offline_evaluation_timeout_s: Optional[float] = NotProvided,
max_requests_in_flight_per_offline_eval_runner: Optional[int] = NotProvided,
broadcast_offline_eval_runner_states: Optional[bool] = NotProvided,
validate_offline_eval_runners_after_construction: Optional[bool] = NotProvided,
restart_failed_offline_eval_runners: Optional[bool] = NotProvided,
ignore_offline_eval_runner_failures: Optional[bool] = NotProvided,
max_num_offline_eval_runner_restarts: Optional[int] = NotProvided,
offline_eval_runner_health_probe_timeout_s: Optional[float] = NotProvided,
offline_eval_runner_restore_timeout_s: Optional[float] = NotProvided,
# Deprecated args.
always_attach_evaluation_results=DEPRECATED_VALUE,
evaluation_num_workers=DEPRECATED_VALUE,
) -> Self:
"""Sets the config's evaluation settings.
Args:
evaluation_interval: Evaluate with every `evaluation_interval` training
iterations. The evaluation stats are reported under the "evaluation"
metric key. Set to None (or 0) for no evaluation.
evaluation_duration: Duration for which to run evaluation each
`evaluation_interval`. The unit for the duration can be set via
`evaluation_duration_unit` to either "episodes" (default) or
"timesteps". If using multiple evaluation workers (EnvRunners) in the
`evaluation_num_env_runners > 1` setting, the amount of
episodes/timesteps to run are split amongst these.
A special value of "auto" can be used in case
`evaluation_parallel_to_training=True`. This is the recommended way when
trying to save as much time on evaluation as possible. The Algorithm
then runs as many timesteps via the evaluation workers as possible,
while not taking longer than the parallely running training step and
thus, never wasting any idle time on either training- or evaluation
workers. When using this setting (`evaluation_duration="auto"`), it is
strongly advised to set `evaluation_interval=1` and
`evaluation_force_reset_envs_before_iteration=True` at the same time.
evaluation_duration_unit: The unit, with which to count the evaluation
duration. Either "episodes" (default) or "timesteps". Note that this
setting is ignored if `evaluation_duration="auto"`.
evaluation_auto_duration_min_env_steps_per_sample: If `evaluation_duration`
is "auto" (in which case `evaluation_duration_unit` is always
"timesteps"), at least how many timesteps should be done per remote
`sample()` call.
evaluation_auto_duration_max_env_steps_per_sample: If `evaluation_duration`
is "auto" (in which case `evaluation_duration_unit` is always
"timesteps"), at most how many timesteps should be done per remote
`sample()` call.
evaluation_sample_timeout_s: The timeout (in seconds) for evaluation workers
to sample a complete episode in the case your config settings are:
`evaluation_duration != auto` and `evaluation_duration_unit=episode`.
After this time, the user receives a warning and instructions on how
to fix the issue.
evaluation_parallel_to_training: Whether to run evaluation in parallel to
the `Algorithm.training_step()` call, using threading. Default=False.
E.g. for evaluation_interval=1 -> In every call to `Algorithm.train()`,
the `Algorithm.training_step()` and `Algorithm.evaluate()` calls
run in parallel. Note that this setting - albeit extremely efficient b/c
it wastes no extra time for evaluation - causes the evaluation results
to lag one iteration behind the rest of the training results. This is
important when picking a good checkpoint. For example, if iteration 42
reports a good evaluation `episode_return_mean`, be aware that these
results were achieved on the weights trained in iteration 41, so you
should probably pick the iteration 41 checkpoint instead.
evaluation_force_reset_envs_before_iteration: Whether all environments
should be force-reset (even if they are not done yet) right before
the evaluation step of the iteration begins. Setting this to True
(default) makes sure that the evaluation results aren't polluted with
episode statistics that were actually (at least partially) achieved with
an earlier set of weights. Note that this setting is only
supported on the new API stack w/ EnvRunners and ConnectorV2
(`config.enable_rl_module_and_learner=True` AND
`config.enable_env_runner_and_connector_v2=True`).
evaluation_config: Typical usage is to pass extra args to evaluation env
creator and to disable exploration by computing deterministic actions.
IMPORTANT NOTE: Policy gradient algorithms are able to find the optimal
policy, even if this is a stochastic one. Setting "explore=False" here
results in the evaluation workers not using this optimal policy!
off_policy_estimation_methods: Specify how to evaluate the current policy,
along with any optional config parameters. This only has an effect when
reading offline experiences ("input" is not "sampler").
Available keys:
{ope_method_name: {"type": ope_type, ...}} where `ope_method_name`
is a user-defined string to save the OPE results under, and
`ope_type` can be any subclass of OffPolicyEstimator, e.g.
ray.rllib.offline.estimators.is::ImportanceSampling
or your own custom subclass, or the full class path to the subclass.
You can also add additional config arguments to be passed to the
OffPolicyEstimator in the dict, e.g.
{"qreg_dr": {"type": DoublyRobust, "q_model_type": "qreg", "k": 5}}
ope_split_batch_by_episode: Whether to use SampleBatch.split_by_episode() to
split the input batch to episodes before estimating the ope metrics. In
case of bandits you should make this False to see improvements in ope
evaluation speed. In case of bandits, it is ok to not split by episode,
since each record is one timestep already. The default is True.
evaluation_num_env_runners: Number of parallel EnvRunners to use for
evaluation. Note that this is set to zero by default, which means
evaluation is run in the algorithm process (only if
`evaluation_interval` is not 0 or None). If you increase this, also
increases the Ray resource usage of the algorithm since evaluation
workers are created separately from those EnvRunners used to sample data
for training.
custom_evaluation_function: Customize the evaluation method. This must be a
function of signature (algo: Algorithm, eval_workers: EnvRunnerGroup) ->
(metrics: dict, env_steps: int, agent_steps: int) (metrics: dict if
`enable_env_runner_and_connector_v2=True`), where `env_steps` and
`agent_steps` define the number of sampled steps during the evaluation
iteration. See the Algorithm.evaluate() method to see the default
implementation. The Algorithm guarantees all eval workers have the
latest policy state before this function is called.
offline_evaluation_interval: Evaluate offline with every
`offline_evaluation_interval` training iterations. The offline evaluation
stats are reported under the "evaluation/offline_evaluation" metric key. Set
to None (or 0) for no offline evaluation.
num_offline_eval_runners: Number of OfflineEvaluationRunner actors to create
for parallel evaluation. Setting this to 0 forces sampling to be done in the
local OfflineEvaluationRunner (main process or the Algorithm's actor when
using Tune).
offline_evaluation_type: Type of offline evaluation to run. Either `"eval_loss"`
for evaluating the validation loss of the policy, `"is"` for importance
sampling, or `"pdis"` for per-decision importance sampling. If you want to
implement your own offline evaluation method write an `OfflineEvaluationRunner`
and use the `AlgorithmConfig.offline_eval_runner_class`.
offline_eval_runner_class: An `OfflineEvaluationRunner` class that implements
custom offline evaluation logic.
offline_loss_for_module_fn: A callable to compute the loss per `RLModule` in
offline evaluation. If not provided the training loss function (
`Learner.compute_loss_for_module`) is used. The signature must be (
runner: OfflineEvaluationRunner, module_id: ModuleID, config: AlgorithmConfig,
batch: Dict[str, Any], fwd_out: Dict[str, TensorType]).
offline_eval_batch_size_per_runner: Evaluation batch size per individual
OfflineEvaluationRunner worker. This setting only applies to the new API
stack. The number of OfflineEvaluationRunner workers can be set via
`config.evaluation(num_offline_eval_runners=...)`. The total effective batch
size is then `num_offline_eval_runners` x
`offline_eval_batch_size_per_runner`.
dataset_num_iters_per_offline_eval_runner: Number of batches to evaluate in each
OfflineEvaluationRunner during a single evaluation. If None, each learner runs a
complete epoch over its data block (the dataset is partitioned into
at least as many blocks as there are runners). The default is `1`.
offline_eval_rl_module_inference_only: If `True`, the module spec is used in an
inference-only setting (no-loss) and the RLModule can thus be built in
its light version (if available). For example, the `inference_only`
version of an RLModule might only contain the networks required for
computing actions, but misses additional target- or critic networks.
Also, if `True`, the module does NOT contain those (sub) RLModules that have
their `learner_only` flag set to True.
num_cpus_per_offline_eval_runner: Number of CPUs to allocate per
OfflineEvaluationRunner.
num_gpus_per_offline_eval_runner: Number of GPUs to allocate per
OfflineEvaluationRunner. This can be fractional. This is usually needed only if
your (custom) loss function itself requires a GPU (i.e., it contains GPU-
intensive computations), or model inference is unusually expensive.
custom_resources_per_eval_runner: Any custom Ray resources to allocate per
OfflineEvaluationRunner.
offline_evaluation_timeout_s: The timeout in seconds for calling `run()` on remote
OfflineEvaluationRunner workers. Results (episode list) from workers that take
longer than this time are discarded.
max_requests_in_flight_per_offline_eval_runner: Max number of in-flight requests
to each OfflineEvaluationRunner (actor)). See the
`ray.rllib.utils.actor_manager.FaultTolerantActorManager` class for more
details.
Tuning these values is important when running experiments with
large evaluation batches, where there is the risk that the object store may
fill up, causing spilling of objects to disk. This can cause any
asynchronous requests to become very slow, making your experiment run
slowly as well. You can inspect the object store during your experiment
through a call to `ray memory` on your head node, and by using the Ray
dashboard. If you're seeing that the object store is filling up,
turn down the number of remote requests in flight or enable compression
or increase the object store memory through, for example:
`ray.init(object_store_memory=10 * 1024 * 1024 * 1024) # =10 GB`.
broadcast_offline_eval_runner_states: True, if merged OfflineEvaluationRunner
states (from the central connector pipelines) should be broadcast back to
all remote OfflineEvaluationRunner actors.
validate_offline_eval_runners_after_construction: Whether to validate that each
created remote OfflineEvaluationRunner is healthy after its construction process.
restart_failed_offline_eval_runners: Whether - upon an OfflineEvaluationRunner
failure - RLlib tries to restart the lost OfflineEvaluationRunner(s) as an
identical copy of the failed one(s). You should set this to True when training
on SPOT instances that may preempt any time and/or if you need to evaluate always a
complete dataset b/c OfflineEvaluationRunner(s) evaluate through streaming split
iterators on disjoint batches. The new, recreated OfflineEvaluationRunner(s) only
differ from the failed one in their `self.recreated_worker=True` property value
and have the same `worker_index` as the original(s). If this setting is True, the
value of the `ignore_offline_eval_runner_failures` setting is ignored.
ignore_offline_eval_runner_failures: Whether to ignore any OfflineEvalautionRunner
failures and continue running with the remaining OfflineEvaluationRunners. This
setting is ignored, if `restart_failed_offline_eval_runners=True`.
max_num_offline_eval_runner_restarts: The maximum number of times any
OfflineEvaluationRunner is allowed to be restarted (if
`restart_failed_offline_eval_runners` is True).
offline_eval_runner_health_probe_timeout_s: Max amount of time in seconds, we should
spend waiting for OfflineEvaluationRunner health probe calls
(`OfflineEvaluationRunner.ping.remote()`) to respond. Health pings are very cheap,
however, we perform the health check via a blocking `ray.get()`, so the
default value should not be too large.
offline_eval_runner_restore_timeout_s: Max amount of time we should wait to restore
states on recovered OfflineEvaluationRunner actors. Default is 30 mins.
Returns:
This updated AlgorithmConfig object.
"""
if always_attach_evaluation_results != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.evaluation(always_attach_evaluation_results=..)",
help="This setting is no longer needed, b/c Tune does not error "
"anymore (only warns) when a metrics key can't be found in the "
"results.",
error=True,
)
if evaluation_num_workers != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.evaluation(evaluation_num_workers=..)",
new="AlgorithmConfig.evaluation(evaluation_num_env_runners=..)",
error=False,
)
self.evaluation_num_env_runners = evaluation_num_workers
if evaluation_interval is not NotProvided:
self.evaluation_interval = evaluation_interval
if evaluation_duration is not NotProvided:
self.evaluation_duration = evaluation_duration
if evaluation_duration_unit is not NotProvided:
self.evaluation_duration_unit = evaluation_duration_unit
if evaluation_auto_duration_min_env_steps_per_sample is not NotProvided:
self.evaluation_auto_duration_min_env_steps_per_sample = (
evaluation_auto_duration_min_env_steps_per_sample
)
if evaluation_auto_duration_max_env_steps_per_sample is not NotProvided:
self.evaluation_auto_duration_max_env_steps_per_sample = (
evaluation_auto_duration_max_env_steps_per_sample
)
if evaluation_sample_timeout_s is not NotProvided:
self.evaluation_sample_timeout_s = evaluation_sample_timeout_s
if evaluation_parallel_to_training is not NotProvided:
self.evaluation_parallel_to_training = evaluation_parallel_to_training
if evaluation_force_reset_envs_before_iteration is not NotProvided:
self.evaluation_force_reset_envs_before_iteration = (
evaluation_force_reset_envs_before_iteration
)
if evaluation_config is not NotProvided:
# If user really wants to set this to None, we should allow this here,
# instead of creating an empty dict.
if evaluation_config is None:
self.evaluation_config = None
# Update (don't replace) the existing overrides with the provided ones.
else:
from ray.rllib.algorithms.algorithm import Algorithm
self.evaluation_config = deep_update(
self.evaluation_config or {},
evaluation_config,
True,
Algorithm._allow_unknown_subkeys,
Algorithm._override_all_subkeys_if_type_changes,
Algorithm._override_all_key_list,
)
if off_policy_estimation_methods is not NotProvided:
self.off_policy_estimation_methods = off_policy_estimation_methods
if evaluation_num_env_runners is not NotProvided:
self.evaluation_num_env_runners = evaluation_num_env_runners
if custom_evaluation_function is not NotProvided:
self.custom_evaluation_function = custom_evaluation_function
if ope_split_batch_by_episode is not NotProvided:
self.ope_split_batch_by_episode = ope_split_batch_by_episode
# Offline evaluation.
if offline_evaluation_interval is not NotProvided:
self.offline_evaluation_interval = offline_evaluation_interval
if num_offline_eval_runners is not NotProvided:
self.num_offline_eval_runners = num_offline_eval_runners
if offline_evaluation_type is not NotProvided:
self.offline_evaluation_type = offline_evaluation_type
if offline_eval_runner_class is not NotProvided:
self.offline_eval_runner_class = offline_eval_runner_class
if offline_loss_for_module_fn is not NotProvided:
self.offline_loss_for_module_fn = offline_loss_for_module_fn
if offline_eval_batch_size_per_runner is not NotProvided:
self.offline_eval_batch_size_per_runner = offline_eval_batch_size_per_runner
if dataset_num_iters_per_offline_eval_runner is not NotProvided:
self.dataset_num_iters_per_eval_runner = (
dataset_num_iters_per_offline_eval_runner
)
if offline_eval_rl_module_inference_only is not NotProvided:
self.offline_eval_rl_module_inference_only = (
offline_eval_rl_module_inference_only
)
if num_cpus_per_offline_eval_runner is not NotProvided:
self.num_cpus_per_offline_eval_runner = num_cpus_per_offline_eval_runner
if num_gpus_per_offline_eval_runner is not NotProvided:
self.num_gpus_per_offline_eval_runner = num_gpus_per_offline_eval_runner
if custom_resources_per_offline_eval_runner is not NotProvided:
self.custom_resources_per_offline_eval_runner = (
custom_resources_per_offline_eval_runner
)
if offline_evaluation_timeout_s is not NotProvided:
self.offline_evaluation_timeout_s = offline_evaluation_timeout_s
if max_requests_in_flight_per_offline_eval_runner is not NotProvided:
self.max_requests_in_flight_per_offline_eval_runner = (
max_requests_in_flight_per_offline_eval_runner
)
if broadcast_offline_eval_runner_states is not NotProvided:
self.broadcast_offline_eval_runner_states = (
broadcast_offline_eval_runner_states
)
if validate_offline_eval_runners_after_construction is not NotProvided:
self.validate_offline_eval_runners_after_construction = (
validate_offline_eval_runners_after_construction
)
if restart_failed_offline_eval_runners is not NotProvided:
self.restart_failed_offline_eval_runners = (
restart_failed_offline_eval_runners
)
if ignore_offline_eval_runner_failures is not NotProvided:
self.ignore_offline_eval_runner_failures = (
ignore_offline_eval_runner_failures
)
if max_num_offline_eval_runner_restarts is not NotProvided:
self.max_num_offline_eval_runner_restarts = (
max_num_offline_eval_runner_restarts
)
if offline_eval_runner_health_probe_timeout_s is not NotProvided:
self.offline_eval_runner_health_probe_timeout_s = (
offline_eval_runner_health_probe_timeout_s
)
if offline_eval_runner_restore_timeout_s is not NotProvided:
self.offline_eval_runner_restore_timeout_s = (
offline_eval_runner_restore_timeout_s
)
return self
def offline_data(
self,
*,
input_: Optional[Union[str, Callable[[IOContext], InputReader]]] = NotProvided,
offline_data_class: Optional[Type] = NotProvided,
input_read_method: Optional[Union[str, Callable]] = NotProvided,
input_read_method_kwargs: Optional[Dict] = NotProvided,
input_read_schema: Optional[Dict[str, str]] = NotProvided,
input_read_episodes: Optional[bool] = NotProvided,
input_read_sample_batches: Optional[bool] = NotProvided,
input_read_batch_size: Optional[int] = NotProvided,
input_filesystem: Optional[str] = NotProvided,
input_filesystem_kwargs: Optional[Dict] = NotProvided,
input_compress_columns: Optional[List[str]] = NotProvided,
materialize_data: Optional[bool] = NotProvided,
materialize_mapped_data: Optional[bool] = NotProvided,
map_batches_kwargs: Optional[Dict] = NotProvided,
iter_batches_kwargs: Optional[Dict] = NotProvided,
ignore_final_observation: Optional[bool] = NotProvided,
prelearner_class: Optional[Type] = NotProvided,
prelearner_buffer_class: Optional[Type] = NotProvided,
prelearner_buffer_kwargs: Optional[Dict] = NotProvided,
prelearner_module_synch_period: Optional[int] = NotProvided,
dataset_num_iters_per_learner: Optional[int] = NotProvided,
input_config: Optional[Dict] = NotProvided,
actions_in_input_normalized: Optional[bool] = NotProvided,
postprocess_inputs: Optional[bool] = NotProvided,
shuffle_buffer_size: Optional[int] = NotProvided,
output: Optional[str] = NotProvided,
output_config: Optional[Dict] = NotProvided,
output_compress_columns: Optional[List[str]] = NotProvided,
output_max_file_size: Optional[float] = NotProvided,
output_max_rows_per_file: Optional[int] = NotProvided,
output_write_remaining_data: Optional[bool] = NotProvided,
output_write_method: Optional[str] = NotProvided,
output_write_method_kwargs: Optional[Dict] = NotProvided,
output_filesystem: Optional[str] = NotProvided,
output_filesystem_kwargs: Optional[Dict] = NotProvided,
output_write_episodes: Optional[bool] = NotProvided,
offline_sampling: Optional[str] = NotProvided,
) -> Self:
"""Sets the config's offline data settings.
Args:
input_: Specify how to generate experiences:
- "sampler": Generate experiences via online (env) simulation (default).
- A local directory or file glob expression (e.g., "/tmp/*.json").
- A list of individual file paths/URIs (e.g., ["/tmp/1.json",
"s3://bucket/2.json"]).
- A dict with string keys and sampling probabilities as values (e.g.,
{"sampler": 0.4, "/tmp/*.json": 0.4, "s3://bucket/expert.json": 0.2}).
- A callable that takes an `IOContext` object as only arg and returns a
`ray.rllib.offline.InputReader`.
- A string key that indexes a callable with
`tune.registry.register_input`
offline_data_class: An optional `OfflineData` class that is used to define
the offline data pipeline, including the dataset and the sampling
methodology. Override the `OfflineData` class and pass your derived
class here, if you need some primer transformations specific to your
data or your loss. Usually overriding the `OfflinePreLearner` and using
the resulting customization via `prelearner_class` suffices for most
cases. The default is `None` which uses the base `OfflineData` defined
in `ray.rllib.offline.offline_data.OfflineData`.
input_read_method: Read method for the `ray.data.Dataset` to read in the
offline data from `input_`. The default is `read_parquet` for Parquet
files. See https://docs.ray.io/en/latest/data/api/input_output.html for
more info about available read methods in `ray.data`.
input_read_method_kwargs: Keyword args for `input_read_method`. These
are passed by RLlib into the read method without checking. Use these
keyword args together with `map_batches_kwargs` and
`iter_batches_kwargs` to tune the performance of the data pipeline.
It is strongly recommended to rely on Ray Data's automatic read
performance tuning.
input_read_schema: Table schema for converting offline data to episodes.
This schema maps the offline data columns to
ray.rllib.core.columns.Columns:
`{Columns.OBS: 'o_t', Columns.ACTIONS: 'a_t', ...}`. Columns in
the data set that are not mapped via this schema are sorted into
episodes' `extra_model_outputs`. If no schema is passed in the default
schema used is `ray.rllib.offline.offline_data.SCHEMA`. If your data set
contains already the names in this schema, no `input_read_schema` is
needed. The same applies if the data is in RLlib's `EpisodeType` or its
old `SampleBatch` format.
input_read_episodes: Whether offline data is already stored in RLlib's
`EpisodeType` format, i.e. `ray.rllib.env.SingleAgentEpisode` (multi
-agent is planned but not supported, yet). Reading episodes directly
avoids additional transform steps and is usually faster and
therefore the recommended format when your application remains fully
inside of RLlib's schema. The other format is a columnar format and is
agnostic to the RL framework used. Use the latter format, if you are
unsure when to use the data or in which RL framework. The default is
to read column data, for example, `False`. `input_read_episodes`, and
`input_read_sample_batches` can't be `True` at the same time. See
also `output_write_episodes` to define the output data format when
recording.
input_read_sample_batches: Whether offline data is stored in RLlib's old
stack `SampleBatch` type. This is usually the case for older data
recorded with RLlib in JSON line format. Reading in `SampleBatch`
data needs extra transforms and might not concatenate episode chunks
contained in different `SampleBatch`es in the data. If possible avoid
to read `SampleBatch`es and convert them in a controlled form into
RLlib's `EpisodeType` (i.e. `SingleAgentEpisode`). The default is
`False`. `input_read_episodes`, and `input_read_sample_batches` can't
be `True` at the same time.
input_read_batch_size: Batch size to pull from the data set. This could
differ from the `train_batch_size_per_learner`, if a dataset holds
`EpisodeType` (i.e., `SingleAgentEpisode`) or `SampleBatch`, or any
other data type that contains multiple timesteps in a single row of
the dataset. In such cases a single batch of size
`train_batch_size_per_learner` will potentially pull a multiple of
`train_batch_size_per_learner` timesteps from the offline dataset. The
default is `None` in which the `train_batch_size_per_learner` is pulled.
input_filesystem: A cloud filesystem to handle access to cloud storage when
reading experiences. Can be either "gcs" for Google Cloud Storage,
"s3" for AWS S3 buckets, "abs" for Azure Blob Storage, or any
filesystem supported by PyArrow. In general the file path is sufficient
for accessing data from public or local storage systems. See
https://arrow.apache.org/docs/python/filesystems.html for details.
input_filesystem_kwargs: A dictionary holding the kwargs for the filesystem
given by `input_filesystem`. See `gcsfs.GCSFilesystem` for GCS,
`pyarrow.fs.S3FileSystem`, for S3, and `ablfs.AzureBlobFilesystem` for
ABS filesystem arguments.
input_compress_columns: What input columns are compressed with LZ4 in the
input data. If data is stored in RLlib's `SingleAgentEpisode` (
`MultiAgentEpisode` not supported, yet). Note the providing
`rllib.core.columns.Columns.OBS` also tries to decompress
`rllib.core.columns.Columns.NEXT_OBS`.
materialize_data: Whether the raw data should be materialized in memory.
This boosts performance, but requires enough memory to avoid an OOM, so
make sure that your cluster has the resources available. For very large
data you might want to switch to streaming mode by setting this to
`False` (default). If your algorithm does not need the RLModule in the
Learner connector pipeline or all (learner) connectors are stateless
you should consider setting `materialize_mapped_data` to `True`
instead (and set `materialize_data` to `False`). If your data does not
fit into memory and your Learner connector pipeline requires an RLModule
or is stateful, set both `materialize_data` and
`materialize_mapped_data` to `False`.
materialize_mapped_data: Whether the data should be materialized after
running it through the Learner connector pipeline (i.e. after running
the `OfflinePreLearner`). This improves performance, but should only be
used in case the (learner) connector pipeline does not require an
RLModule and the (learner) connector pipeline is stateless. For example,
MARWIL's Learner connector pipeline requires the RLModule for value
function predictions and training batches would become stale after some
iterations causing learning degradation or divergence. Also ensure that
your cluster has enough memory available to avoid an OOM. If set to
`True` (True), make sure that `materialize_data` is set to `False` to
avoid materialization of two datasets. If your data does not fit into
memory and your Learner connector pipeline requires an RLModule or is
stateful, set both `materialize_data` and `materialize_mapped_data` to
`False`.
map_batches_kwargs: Keyword args for the `map_batches` method. These are
passed into the `ray.data.Dataset.map_batches` method when sampling
without checking. If no arguments passed in the default arguments
`{'concurrency': max(2, num_learners), 'zero_copy_batch': True}` is
used. Use these keyword args together with `input_read_method_kwargs`
and `iter_batches_kwargs` to tune the performance of the data pipeline.
iter_batches_kwargs: Keyword args for the `iter_batches` method. These are
passed into the `ray.data.Dataset.iter_batches` method when sampling
without checking. If no arguments are passed in, the default argument
`{'prefetch_batches': 2}` is used. Use these keyword args
together with `input_read_method_kwargs` and `map_batches_kwargs` to
tune the performance of the data pipeline.
ignore_final_observation: If the final observation in an episode chunk should
be ignored. This concerns mainly column-based data and instead of using a
user-provided `NEXT_OBS` sets final observations to zero. This should be
used with BC only, as in true Offline RL algorithms the final observation
is important.
prelearner_class: An optional `OfflinePreLearner` class that is used to
transform data batches in `ray.data.map_batches` used in the
`OfflineData` class to transform data from columns to batches that can
be used in the `Learner.update...()` methods. Override the
`OfflinePreLearner` class and pass your derived class in here, if you
need to make some further transformations specific for your data or
loss. The default is None which uses the base `OfflinePreLearner`
defined in `ray.rllib.offline.offline_prelearner`.
prelearner_buffer_class: An optional `EpisodeReplayBuffer` class that RLlib
uses to buffer experiences when data is in `EpisodeType` or
RLlib's previous `SampleBatch` type format. In this case, a single
data row may contain multiple timesteps and the buffer serves two
purposes: (a) to store intermediate data in memory, and (b) to ensure
that RLlib samples exactly `train_batch_size_per_learner` experiences
per batch. The default is RLlib's `EpisodeReplayBuffer`.
prelearner_buffer_kwargs: Optional keyword arguments for initializing the
`EpisodeReplayBuffer`. In most cases this value is simply the `capacity`
for the default buffer that RLlib uses (`EpisodeReplayBuffer`), but it
may differ if the `prelearner_buffer_class` uses a custom buffer.
prelearner_module_synch_period: The period (number of batches converted)
after which the `RLModule` held by the `PreLearner` should sync weights.
The `PreLearner` is used to preprocess batches for the learners. The
higher this value, the more off-policy the `PreLearner`'s module is.
Values too small force the `PreLearner` to sync more frequently
and thus might slow down the data pipeline. The default value chosen
by the `OfflinePreLearner` is 10.
dataset_num_iters_per_learner: Number of updates to run in each learner
during a single training iteration. If None, each learner runs a
complete epoch over its data block (the dataset is partitioned into
at least as many blocks as there are learners). The default is `None`.
This value must be set to `1`, if RLlib uses a single (local) learner.
input_config: Arguments that describe the settings for reading the input.
If input is "sample", this is the environment configuration, e.g.
`env_name` and `env_config`, etc. See `EnvContext` for more info.
If the input is "dataset", this contains e.g. `format`, `path`.
actions_in_input_normalized: True, if the actions in a given offline "input"
are already normalized (between -1.0 and 1.0). This is usually the case
when the offline file has been generated by another RLlib algorithm
(e.g. PPO or SAC), while "normalize_actions" was set to True.
postprocess_inputs: Whether to run postprocess_trajectory() on the
trajectory fragments from offline inputs. Note that postprocessing is
done using the *current* policy, not the *behavior* policy, which
is typically undesirable for on-policy algorithms.
shuffle_buffer_size: If positive, input batches are shuffled via a
sliding window buffer of this number of batches. Use this if the input
data is not in random enough order. Input is delayed until the shuffle
buffer is filled.
output: Specify where experiences should be saved:
- None: don't save any experiences
- "logdir" to save to the agent log dir
- a path/URI to save to a custom output directory (e.g., "s3://bckt/")
- a function that returns a rllib.offline.OutputWriter
output_config: Arguments accessible from the IOContext for configuring
custom output.
output_compress_columns: What sample batch columns to LZ4 compress in the
output data. Note that providing `rllib.core.columns.Columns.OBS` also
compresses `rllib.core.columns.Columns.NEXT_OBS`.
output_max_file_size: Max output file size (in bytes) before rolling over
to a new file.
output_max_rows_per_file: Max output row numbers before rolling over to a
new file.
output_write_remaining_data: Determines whether any remaining data in the
recording buffers should be stored to disk. It is only applicable if
`output_max_rows_per_file` is defined. When sampling data, it is
buffered until the threshold specified by `output_max_rows_per_file`
is reached. Only complete multiples of `output_max_rows_per_file` are
written to disk, while any leftover data remains in the buffers. If a
recording session is stopped, residual data may still reside in these
buffers. Setting `output_write_remaining_data` to `True` ensures this
data is flushed to disk. By default, this attribute is set to `False`.
output_write_method: Write method for the `ray.data.Dataset` to write the
offline data to `output`. The default is `read_parquet` for Parquet
files. See https://docs.ray.io/en/latest/data/api/input_output.html for
more info about available read methods in `ray.data`.
output_write_method_kwargs: `kwargs` for the `output_write_method`. These
are passed into the write method without checking.
output_filesystem: A cloud filesystem to handle access to cloud storage when
writing experiences. Should be either "gcs" for Google Cloud Storage,
"s3" for AWS S3 buckets, or "abs" for Azure Blob Storage.
output_filesystem_kwargs: A dictionary holding the kwargs for the filesystem
given by `output_filesystem`. See `gcsfs.GCSFilesystem` for GCS,
`pyarrow.fs.S3FileSystem`, for S3, and `ablfs.AzureBlobFilesystem` for
ABS filesystem arguments.
output_write_episodes: If RLlib should record data in its RLlib's
`EpisodeType` format (that is, `SingleAgentEpisode` objects). Use this
format, if you need RLlib to order data in time and directly group by
episodes for example to train stateful modules or if you plan to use
recordings exclusively in RLlib. Otherwise RLlib records data in tabular
(columnar) format. Default is `True`.
offline_sampling: Whether sampling for the Algorithm happens via
reading from offline data. If True, EnvRunners don't limit the number
of collected batches within the same `sample()` call based on
the number of sub-environments within the worker (no sub-environments
present).
Returns:
This updated AlgorithmConfig object.
"""
if input_ is not NotProvided:
self.input_ = input_
if offline_data_class is not NotProvided:
self.offline_data_class = offline_data_class
if input_read_method is not NotProvided:
self.input_read_method = input_read_method
if input_read_method_kwargs is not NotProvided:
self.input_read_method_kwargs = input_read_method_kwargs
if input_read_schema is not NotProvided:
self.input_read_schema = input_read_schema
if input_read_episodes is not NotProvided:
self.input_read_episodes = input_read_episodes
if input_read_sample_batches is not NotProvided:
self.input_read_sample_batches = input_read_sample_batches
if input_read_batch_size is not NotProvided:
self.input_read_batch_size = input_read_batch_size
if input_filesystem is not NotProvided:
self.input_filesystem = input_filesystem
if input_filesystem_kwargs is not NotProvided:
self.input_filesystem_kwargs = input_filesystem_kwargs
if input_compress_columns is not NotProvided:
self.input_compress_columns = input_compress_columns
if materialize_data is not NotProvided:
self.materialize_data = materialize_data
if materialize_mapped_data is not NotProvided:
self.materialize_mapped_data = materialize_mapped_data
if map_batches_kwargs is not NotProvided:
self.map_batches_kwargs = map_batches_kwargs
if iter_batches_kwargs is not NotProvided:
self.iter_batches_kwargs = iter_batches_kwargs
if ignore_final_observation is not NotProvided:
self.ignore_final_observation = ignore_final_observation
if prelearner_class is not NotProvided:
self.prelearner_class = prelearner_class
if prelearner_buffer_class is not NotProvided:
self.prelearner_buffer_class = prelearner_buffer_class
if prelearner_buffer_kwargs is not NotProvided:
self.prelearner_buffer_kwargs = prelearner_buffer_kwargs
if prelearner_module_synch_period is not NotProvided:
self.prelearner_module_synch_period = prelearner_module_synch_period
if dataset_num_iters_per_learner is not NotProvided:
self.dataset_num_iters_per_learner = dataset_num_iters_per_learner
if input_config is not NotProvided:
if not isinstance(input_config, dict):
raise ValueError(
f"input_config must be a dict, got {type(input_config)}."
)
# TODO (Kourosh) Once we use a complete separation between rollout worker
# and input dataset reader we can remove this.
# For now Error out if user attempts to set these parameters.
msg = "{} should not be set in the input_config. RLlib uses {} instead."
if input_config.get("num_cpus_per_read_task") is not None:
raise ValueError(
msg.format(
"num_cpus_per_read_task",
"config.env_runners(num_cpus_per_env_runner=..)",
)
)
if input_config.get("parallelism") is not None:
if self.in_evaluation:
raise ValueError(
msg.format(
"parallelism",
"config.evaluation(evaluation_num_env_runners=..)",
)
)
else:
raise ValueError(
msg.format(
"parallelism", "config.env_runners(num_env_runners=..)"
)
)
self.input_config = input_config
if actions_in_input_normalized is not NotProvided:
self.actions_in_input_normalized = actions_in_input_normalized
if postprocess_inputs is not NotProvided:
self.postprocess_inputs = postprocess_inputs
if shuffle_buffer_size is not NotProvided:
self.shuffle_buffer_size = shuffle_buffer_size
# TODO (simon): Enable storing to general log-directory.
if output is not NotProvided:
self.output = output
if output_config is not NotProvided:
self.output_config = output_config
if output_compress_columns is not NotProvided:
self.output_compress_columns = output_compress_columns
if output_max_file_size is not NotProvided:
self.output_max_file_size = output_max_file_size
if output_max_rows_per_file is not NotProvided:
self.output_max_rows_per_file = output_max_rows_per_file
if output_write_remaining_data is not NotProvided:
self.output_write_remaining_data = output_write_remaining_data
if output_write_method is not NotProvided:
self.output_write_method = output_write_method
if output_write_method_kwargs is not NotProvided:
self.output_write_method_kwargs = output_write_method_kwargs
if output_filesystem is not NotProvided:
self.output_filesystem = output_filesystem
if output_filesystem_kwargs is not NotProvided:
self.output_filesystem_kwargs = output_filesystem_kwargs
if output_write_episodes is not NotProvided:
self.output_write_episodes = output_write_episodes
if offline_sampling is not NotProvided:
self.offline_sampling = offline_sampling
return self
def multi_agent(
self,
*,
policies: Optional[
Union[MultiAgentPolicyConfigDict, Collection[PolicyID]]
] = NotProvided,
policy_map_capacity: Optional[int] = NotProvided,
policy_mapping_fn: Optional[
Callable[[AgentID, "EpisodeType"], PolicyID]
] = NotProvided,
policies_to_train: Optional[
Union[Collection[PolicyID], Callable[[PolicyID, SampleBatchType], bool]]
] = NotProvided,
policy_states_are_swappable: Optional[bool] = NotProvided,
observation_fn: Optional[Callable] = NotProvided,
count_steps_by: Optional[str] = NotProvided,
# Deprecated args:
algorithm_config_overrides_per_module=DEPRECATED_VALUE,
replay_mode=DEPRECATED_VALUE,
# Now done via Ray object store, which has its own cloud-supported
# spillover mechanism.
policy_map_cache=DEPRECATED_VALUE,
) -> Self:
"""Sets the config's multi-agent settings.
Validates the new multi-agent settings and translates everything into
a unified multi-agent setup format. For example a `policies` list or set
of IDs is properly converted into a dict mapping these IDs to PolicySpecs.
Args:
policies: Map of type MultiAgentPolicyConfigDict from policy ids to either
4-tuples of (policy_cls, obs_space, act_space, config) or PolicySpecs.
These tuples or PolicySpecs define the class of the policy, the
observation- and action spaces of the policies, and any extra config.
policy_map_capacity: Keep this many policies in the "policy_map" (before
writing least-recently used ones to disk/S3).
policy_mapping_fn: Function mapping agent ids to policy ids. The signature
is: `(agent_id, episode, worker, **kwargs) -> PolicyID`.
policies_to_train: Determines those policies that should be updated.
Options are:
- None, for training all policies.
- An iterable of PolicyIDs that should be trained.
- A callable, taking a PolicyID and a SampleBatch or MultiAgentBatch
and returning a bool (indicating whether the given policy is trainable
or not, given the particular batch). This allows you to have a policy
trained only on certain data (e.g. when playing against a certain
opponent).
policy_states_are_swappable: Whether all Policy objects in this map can be
"swapped out" via a simple `state = A.get_state(); B.set_state(state)`,
where `A` and `B` are policy instances in this map. You should set
this to True for significantly speeding up the PolicyMap's cache lookup
times, iff your policies all share the same neural network
architecture and optimizer types. If True, the PolicyMap doesn't
have to garbage collect old, least recently used policies, but instead
keeps them in memory and simply override their state with the state of
the most recently accessed one.
For example, in a league-based training setup, you might have 100s of
the same policies in your map (playing against each other in various
combinations), but all of them share the same state structure
(are "swappable").
observation_fn: Optional function that can be used to enhance the local
agent observations to include more state. See
rllib/evaluation/observation_function.py for more info.
count_steps_by: Which metric to use as the "batch size" when building a
MultiAgentBatch. The two supported values are:
"env_steps": Count each time the env is "stepped" (no matter how many
multi-agent actions are passed/how many multi-agent observations
have been returned in the previous step).
"agent_steps": Count each individual agent step as one step.
Returns:
This updated AlgorithmConfig object.
"""
if policies is not NotProvided:
# Make sure our Policy IDs are ok (this should work whether `policies`
# is a dict or just any Sequence).
for pid in policies:
validate_module_id(pid, error=True)
# Collection: Convert to dict.
if isinstance(policies, (set, tuple, list)):
policies = {p: PolicySpec() for p in policies}
# Validate each policy spec in a given dict.
if isinstance(policies, dict):
for pid, spec in policies.items():
# If not a PolicySpec object, values must be lists/tuples of len 4.
if not isinstance(spec, PolicySpec):
if not isinstance(spec, (list, tuple)) or len(spec) != 4:
raise ValueError(
"Policy specs must be tuples/lists of "
"(cls or None, obs_space, action_space, config), "
f"got {spec} for PolicyID={pid}"
)
# TODO: Switch from dict to AlgorithmConfigOverride, once available.
# Config not a dict.
elif (
not isinstance(spec.config, (AlgorithmConfig, dict))
and spec.config is not None
):
raise ValueError(
f"Multi-agent policy config for {pid} must be a dict or "
f"AlgorithmConfig object, but got {type(spec.config)}!"
)
self.policies = policies
else:
raise ValueError(
"`policies` must be dict mapping PolicyID to PolicySpec OR a "
"set/tuple/list of PolicyIDs!"
)
if algorithm_config_overrides_per_module != DEPRECATED_VALUE:
deprecation_warning(old="", error=False)
self.rl_module(
algorithm_config_overrides_per_module=(
algorithm_config_overrides_per_module
)
)
if policy_map_capacity is not NotProvided:
self.policy_map_capacity = policy_map_capacity
if policy_mapping_fn is not NotProvided:
# Create `policy_mapping_fn` from a config dict.
# Helpful if users would like to specify custom callable classes in
# yaml files.
if isinstance(policy_mapping_fn, dict):
policy_mapping_fn = from_config(policy_mapping_fn)
self.policy_mapping_fn = policy_mapping_fn
if observation_fn is not NotProvided:
self.observation_fn = observation_fn
if policy_map_cache != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.multi_agent(policy_map_cache=..)",
error=True,
)
if replay_mode != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.multi_agent(replay_mode=..)",
new="AlgorithmConfig.training("
"replay_buffer_config={'replay_mode': ..})",
error=True,
)
if count_steps_by is not NotProvided:
if count_steps_by not in ["env_steps", "agent_steps"]:
raise ValueError(
"config.multi_agent(count_steps_by=..) must be one of "
f"[env_steps|agent_steps], not {count_steps_by}!"
)
self.count_steps_by = count_steps_by
if policies_to_train is not NotProvided:
assert (
isinstance(policies_to_train, (list, set, tuple))
or callable(policies_to_train)
or policies_to_train is None
), (
"ERROR: `policies_to_train` must be a [list|set|tuple] or a "
"callable taking PolicyID and SampleBatch and returning "
"True|False (trainable or not?) or None (for always training all "
"policies)."
)
# Check `policies_to_train` for invalid entries.
if isinstance(policies_to_train, (list, set, tuple)):
if len(policies_to_train) == 0:
logger.warning(
"`config.multi_agent(policies_to_train=..)` is empty! "
"Make sure - if you would like to learn at least one policy - "
"to add its ID to that list."
)
self.policies_to_train = policies_to_train
if policy_states_are_swappable is not NotProvided:
self.policy_states_are_swappable = policy_states_are_swappable
return self
def reporting(
self,
*,
keep_per_episode_custom_metrics: Optional[bool] = NotProvided,
metrics_episode_collection_timeout_s: Optional[float] = NotProvided,
metrics_num_episodes_for_smoothing: Optional[int] = NotProvided,
min_time_s_per_iteration: Optional[float] = NotProvided,
min_train_timesteps_per_iteration: Optional[int] = NotProvided,
min_sample_timesteps_per_iteration: Optional[int] = NotProvided,
log_gradients: Optional[bool] = NotProvided,
) -> Self:
"""Sets the config's reporting settings.
Args:
keep_per_episode_custom_metrics: Store raw custom metrics without
calculating max, min, mean
metrics_episode_collection_timeout_s: Wait for metric batches for at most
this many seconds. Those that have not returned in time are collected
in the next train iteration.
metrics_num_episodes_for_smoothing: Smooth rollout metrics over this many
episodes, if possible.
In case rollouts (sample collection) just started, there may be fewer
than this many episodes in the buffer and we'll compute metrics
over this smaller number of available episodes.
In case there are more than this many episodes collected in a single
training iteration, use all of these episodes for metrics computation,
meaning don't ever cut any "excess" episodes.
Set this to 1 to disable smoothing and to always report only the most
recently collected episode's return.
min_time_s_per_iteration: Minimum time (in sec) to accumulate within a
single `Algorithm.train()` call. This value does not affect learning,
only the number of times `Algorithm.training_step()` is called by
`Algorithm.train()`. If - after one such step attempt, the time taken
has not reached `min_time_s_per_iteration`, performs n more
`Algorithm.training_step()` calls until the minimum time has been
consumed. Set to 0 or None for no minimum time.
min_train_timesteps_per_iteration: Minimum training timesteps to accumulate
within a single `train()` call. This value does not affect learning,
only the number of times `Algorithm.training_step()` is called by
`Algorithm.train()`. If - after one such step attempt, the training
timestep count has not been reached, performs n more
`training_step()` calls until the minimum timesteps have been
executed. Set to 0 or None for no minimum timesteps.
min_sample_timesteps_per_iteration: Minimum env sampling timesteps to
accumulate within a single `train()` call. This value does not affect
learning, only the number of times `Algorithm.training_step()` is
called by `Algorithm.train()`. If - after one such step attempt, the env
sampling timestep count has not been reached, performs n more
`training_step()` calls until the minimum timesteps have been
executed. Set to 0 or None for no minimum timesteps.
log_gradients: Log gradients to results. If this is `True` the global norm
of the gradients dictionariy for each optimizer is logged to results.
The default is `False`.
Returns:
This updated AlgorithmConfig object.
"""
if keep_per_episode_custom_metrics is not NotProvided:
self.keep_per_episode_custom_metrics = keep_per_episode_custom_metrics
if metrics_episode_collection_timeout_s is not NotProvided:
self.metrics_episode_collection_timeout_s = (
metrics_episode_collection_timeout_s
)
if metrics_num_episodes_for_smoothing is not NotProvided:
self.metrics_num_episodes_for_smoothing = metrics_num_episodes_for_smoothing
if min_time_s_per_iteration is not NotProvided:
self.min_time_s_per_iteration = min_time_s_per_iteration
if min_train_timesteps_per_iteration is not NotProvided:
self.min_train_timesteps_per_iteration = min_train_timesteps_per_iteration
if min_sample_timesteps_per_iteration is not NotProvided:
self.min_sample_timesteps_per_iteration = min_sample_timesteps_per_iteration
if log_gradients is not NotProvided:
self.log_gradients = log_gradients
return self
def checkpointing(
self,
export_native_model_files: Optional[bool] = NotProvided,
checkpoint_trainable_policies_only: Optional[bool] = NotProvided,
) -> Self:
"""Sets the config's checkpointing settings.
Args:
export_native_model_files: Whether an individual Policy-
or the Algorithm's checkpoints also contain (tf or torch) native
model files. These could be used to restore just the NN models
from these files w/o requiring RLlib. These files are generated
by calling the tf- or torch- built-in saving utility methods on
the actual models.
checkpoint_trainable_policies_only: Whether to only add Policies to the
Algorithm checkpoint (in sub-directory "policies/") that are trainable
according to the `is_trainable_policy` callable of the local worker.
Returns:
This updated AlgorithmConfig object.
"""
if export_native_model_files is not NotProvided:
self.export_native_model_files = export_native_model_files
if checkpoint_trainable_policies_only is not NotProvided:
self.checkpoint_trainable_policies_only = checkpoint_trainable_policies_only
return self
def debugging(
self,
*,
logger_creator: Optional[Callable[[], Logger]] = NotProvided,
logger_config: Optional[dict] = NotProvided,
log_level: Optional[str] = NotProvided,
log_sys_usage: Optional[bool] = NotProvided,
fake_sampler: Optional[bool] = NotProvided,
seed: Optional[int] = NotProvided,
) -> Self:
"""Sets the config's debugging settings.
Args:
logger_creator: Callable that creates a ray.tune.Logger
object. If unspecified, a default logger is created.
logger_config: Define logger-specific configuration to be used inside Logger
Default value None allows overwriting with nested dicts.
log_level: Set the ray.rllib.* log level for the agent process and its
workers. Should be one of DEBUG, INFO, WARN, or ERROR. The DEBUG level
also periodically prints out summaries of relevant internal dataflow
(this is also printed out once at startup at the INFO level).
log_sys_usage: Log system resource metrics to results. This requires
`psutil` to be installed for sys stats, and `gputil` for GPU metrics.
fake_sampler: Use fake (infinite speed) sampler. For testing only.
seed: This argument, in conjunction with worker_index, sets the random
seed of each worker, so that identically configured trials have
identical results. This makes experiments reproducible.
Returns:
This updated AlgorithmConfig object.
"""
if logger_creator is not NotProvided:
self.logger_creator = logger_creator
if logger_config is not NotProvided:
self.logger_config = logger_config
if log_level is not NotProvided:
self.log_level = log_level
if log_sys_usage is not NotProvided:
self.log_sys_usage = log_sys_usage
if fake_sampler is not NotProvided:
self.fake_sampler = fake_sampler
if seed is not NotProvided:
self.seed = seed
return self
def fault_tolerance(
self,
*,
restart_failed_env_runners: Optional[bool] = NotProvided,
ignore_env_runner_failures: Optional[bool] = NotProvided,
max_num_env_runner_restarts: Optional[int] = NotProvided,
delay_between_env_runner_restarts_s: Optional[float] = NotProvided,
restart_failed_sub_environments: Optional[bool] = NotProvided,
num_consecutive_env_runner_failures_tolerance: Optional[int] = NotProvided,
env_runner_health_probe_timeout_s: Optional[float] = NotProvided,
env_runner_restore_timeout_s: Optional[float] = NotProvided,
# Deprecated args.
recreate_failed_env_runners=DEPRECATED_VALUE,
ignore_worker_failures=DEPRECATED_VALUE,
recreate_failed_workers=DEPRECATED_VALUE,
max_num_worker_restarts=DEPRECATED_VALUE,
delay_between_worker_restarts_s=DEPRECATED_VALUE,
num_consecutive_worker_failures_tolerance=DEPRECATED_VALUE,
worker_health_probe_timeout_s=DEPRECATED_VALUE,
worker_restore_timeout_s=DEPRECATED_VALUE,
) -> Self:
"""Sets the config's fault tolerance settings.
Args:
restart_failed_env_runners: Whether - upon an EnvRunner failure - RLlib
tries to restart the lost EnvRunner(s) as an identical copy of the
failed one(s). You should set this to True when training on SPOT
instances that may preempt any time. The new, recreated EnvRunner(s)
only differ from the failed one in their `self.recreated_worker=True`
property value and have the same `worker_index` as the original(s).
If this setting is True, the value of the `ignore_env_runner_failures`
setting is ignored.
ignore_env_runner_failures: Whether to ignore any EnvRunner failures
and continue running with the remaining EnvRunners. This setting is
ignored, if `restart_failed_env_runners=True`.
max_num_env_runner_restarts: The maximum number of times any EnvRunner
is allowed to be restarted (if `restart_failed_env_runners` is True).
delay_between_env_runner_restarts_s: The delay (in seconds) between two
consecutive EnvRunner restarts (if `restart_failed_env_runners` is
True).
restart_failed_sub_environments: If True and any sub-environment (within
a vectorized env) throws any error during env stepping, the
EnvRunner tries to restart the faulty sub-environment. This is done
without disturbing the other (still intact) sub-environment and without
the EnvRunner crashing. You can raise
`ray.rllib.env.env_runner.StepFailedRecreateEnvError` from your
environment's `step` method to not log the error.
num_consecutive_env_runner_failures_tolerance: The number of consecutive
times an EnvRunner failure (also for evaluation) is tolerated before
finally crashing the Algorithm. Only useful if either
`ignore_env_runner_failures` or `restart_failed_env_runners` is True.
Note that for `restart_failed_sub_environments` and sub-environment
failures, the EnvRunner itself is NOT affected and won't throw any
errors as the flawed sub-environment is silently restarted under the
hood.
env_runner_health_probe_timeout_s: Max amount of time in seconds, we should
spend waiting for EnvRunner health probe calls
(`EnvRunner.ping.remote()`) to respond. Health pings are very cheap,
however, we perform the health check via a blocking `ray.get()`, so the
default value should not be too large.
env_runner_restore_timeout_s: Max amount of time we should wait to restore
states on recovered EnvRunner actors. Default is 30 mins.
Returns:
This updated AlgorithmConfig object.
"""
if recreate_failed_env_runners != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(recreate_failed_env_runners)",
new="AlgorithmConfig.fault_tolerance(restart_failed_env_runners)",
error=True,
)
if ignore_worker_failures != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(ignore_worker_failures)",
new="AlgorithmConfig.fault_tolerance(ignore_env_runner_failures)",
error=True,
)
if recreate_failed_workers != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(recreate_failed_workers)",
new="AlgorithmConfig.fault_tolerance(restart_failed_env_runners)",
error=True,
)
if max_num_worker_restarts != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(max_num_worker_restarts)",
new="AlgorithmConfig.fault_tolerance(max_num_env_runner_restarts)",
error=True,
)
if delay_between_worker_restarts_s != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(delay_between_worker_restarts_s)",
new="AlgorithmConfig.fault_tolerance(delay_between_env_runner_"
"restarts_s)",
error=True,
)
if num_consecutive_worker_failures_tolerance != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(num_consecutive_worker_"
"failures_tolerance)",
new="AlgorithmConfig.fault_tolerance(num_consecutive_env_runner_"
"failures_tolerance)",
error=True,
)
if worker_health_probe_timeout_s != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(worker_health_probe_timeout_s)",
new="AlgorithmConfig.fault_tolerance("
"env_runner_health_probe_timeout_s)",
error=True,
)
if worker_restore_timeout_s != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.fault_tolerance(worker_restore_timeout_s)",
new="AlgorithmConfig.fault_tolerance(env_runner_restore_timeout_s)",
error=True,
)
if ignore_env_runner_failures is not NotProvided:
self.ignore_env_runner_failures = ignore_env_runner_failures
if restart_failed_env_runners is not NotProvided:
self.restart_failed_env_runners = restart_failed_env_runners
if max_num_env_runner_restarts is not NotProvided:
self.max_num_env_runner_restarts = max_num_env_runner_restarts
if delay_between_env_runner_restarts_s is not NotProvided:
self.delay_between_env_runner_restarts_s = (
delay_between_env_runner_restarts_s
)
if restart_failed_sub_environments is not NotProvided:
self.restart_failed_sub_environments = restart_failed_sub_environments
if num_consecutive_env_runner_failures_tolerance is not NotProvided:
self.num_consecutive_env_runner_failures_tolerance = (
num_consecutive_env_runner_failures_tolerance
)
if env_runner_health_probe_timeout_s is not NotProvided:
self.env_runner_health_probe_timeout_s = env_runner_health_probe_timeout_s
if env_runner_restore_timeout_s is not NotProvided:
self.env_runner_restore_timeout_s = env_runner_restore_timeout_s
return self
def rl_module(
self,
*,
model_config: Optional[Union[Dict[str, Any], DefaultModelConfig]] = NotProvided,
rl_module_spec: Optional[RLModuleSpecType] = NotProvided,
algorithm_config_overrides_per_module: Optional[
Dict[ModuleID, PartialAlgorithmConfigDict]
] = NotProvided,
# Deprecated arg.
model_config_dict=DEPRECATED_VALUE,
_enable_rl_module_api=DEPRECATED_VALUE,
) -> Self:
"""Sets the config's RLModule settings.
Args:
model_config: The DefaultModelConfig object (or a config dictionary) passed
as `model_config` arg into each RLModule's constructor. This is used
for all RLModules, if not otherwise specified through `rl_module_spec`.
rl_module_spec: The RLModule spec to use for this config. It can be either
a RLModuleSpec or a MultiRLModuleSpec. If the
observation_space, action_space, catalog_class, or the model config is
not specified it is inferred from the env and other parts of the
algorithm config object.
algorithm_config_overrides_per_module: Only used if
`enable_rl_module_and_learner=True`.
A mapping from ModuleIDs to per-module AlgorithmConfig override dicts,
which apply certain settings,
e.g. the learning rate, from the main AlgorithmConfig only to this
particular module (within a MultiRLModule).
You can create override dicts by using the `AlgorithmConfig.overrides`
utility. For example, to override your learning rate and (PPO) lambda
setting just for a single RLModule with your MultiRLModule, do:
config.multi_agent(algorithm_config_overrides_per_module={
"module_1": PPOConfig.overrides(lr=0.0002, lambda_=0.75),
})
Returns:
This updated AlgorithmConfig object.
"""
if _enable_rl_module_api != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.rl_module(_enable_rl_module_api=..)",
new="AlgorithmConfig.api_stack(enable_rl_module_and_learner=..)",
error=True,
)
if model_config_dict != DEPRECATED_VALUE:
deprecation_warning(
old="AlgorithmConfig.rl_module(model_config_dict=..)",
new="AlgorithmConfig.rl_module(model_config=..)",
error=False,
)
model_config = model_config_dict
if model_config is not NotProvided:
self._model_config = model_config
if rl_module_spec is not NotProvided:
self._rl_module_spec = rl_module_spec
if algorithm_config_overrides_per_module is not NotProvided:
if not isinstance(algorithm_config_overrides_per_module, dict):
raise ValueError(
"`algorithm_config_overrides_per_module` must be a dict mapping "
"module IDs to config override dicts! You provided "
f"{algorithm_config_overrides_per_module}."
)
self.algorithm_config_overrides_per_module.update(
algorithm_config_overrides_per_module
)
return self
def experimental(
self,
*,
_validate_config: Optional[bool] = True,
_use_msgpack_checkpoints: Optional[bool] = NotProvided,
_torch_grad_scaler_class: Optional[Type] = NotProvided,
_torch_lr_scheduler_classes: Optional[
Union[List[Type], Dict[ModuleID, List[Type]]]
] = NotProvided,
_tf_policy_handles_more_than_one_loss: Optional[bool] = NotProvided,
_disable_preprocessor_api: Optional[bool] = NotProvided,
_disable_action_flattening: Optional[bool] = NotProvided,
_disable_initialize_loss_from_dummy_batch: Optional[bool] = NotProvided,
) -> Self:
"""Sets the config's experimental settings.
Args:
_validate_config: Whether to run `validate()` on this config. True by
default. If False, ignores any calls to `self.validate()`.
_use_msgpack_checkpoints: Create state files in all checkpoints through
msgpack rather than pickle.
_torch_grad_scaler_class: Class to use for torch loss scaling (and gradient
unscaling). The class must implement the following methods to be
compatible with a `TorchLearner`. These methods/APIs match exactly those
of torch's own `torch.amp.GradScaler` (see here for more details
https://pytorch.org/docs/stable/amp.html#gradient-scaling):
`scale([loss])` to scale the loss by some factor.
`get_scale()` to get the current scale factor value.
`step([optimizer])` to unscale the grads (divide by the scale factor)
and step the given optimizer.
`update()` to update the scaler after an optimizer step (for example to
adjust the scale factor).
_torch_lr_scheduler_classes: A list of `torch.lr_scheduler.LRScheduler`
(see here for more details
https://pytorch.org/docs/stable/optim.html#how-to-adjust-learning-rate)
classes or a dictionary mapping module IDs to such a list of respective
scheduler classes. Multiple scheduler classes can be applied in sequence
and are stepped in the same sequence as defined here. Note, most
learning rate schedulers need arguments to be configured, that is, you
might have to partially initialize the schedulers in the list(s) using
`functools.partial`.
_tf_policy_handles_more_than_one_loss: Experimental flag.
If True, TFPolicy handles more than one loss or optimizer.
Set this to True, if you would like to return more than
one loss term from your `loss_fn` and an equal number of optimizers
from your `optimizer_fn`.
_disable_preprocessor_api: Experimental flag.
If True, no (observation) preprocessor is created and
observations arrive in model as they are returned by the env.
_disable_action_flattening: Experimental flag.
If True, RLlib doesn't flatten the policy-computed actions into
a single tensor (for storage in SampleCollectors/output files/etc..),
but leave (possibly nested) actions as-is. Disabling flattening affects:
- SampleCollectors: Have to store possibly nested action structs.
- Models that have the previous action(s) as part of their input.
- Algorithms reading from offline files (incl. action information).
Returns:
This updated AlgorithmConfig object.
"""
if _validate_config is not NotProvided:
self._validate_config = _validate_config
if _use_msgpack_checkpoints is not NotProvided:
self._use_msgpack_checkpoints = _use_msgpack_checkpoints
if _tf_policy_handles_more_than_one_loss is not NotProvided:
self._tf_policy_handles_more_than_one_loss = (
_tf_policy_handles_more_than_one_loss
)
if _disable_preprocessor_api is not NotProvided:
self._disable_preprocessor_api = _disable_preprocessor_api
if _disable_action_flattening is not NotProvided:
self._disable_action_flattening = _disable_action_flattening
if _disable_initialize_loss_from_dummy_batch is not NotProvided:
self._disable_initialize_loss_from_dummy_batch = (
_disable_initialize_loss_from_dummy_batch
)
if _torch_grad_scaler_class is not NotProvided:
self._torch_grad_scaler_class = _torch_grad_scaler_class
if _torch_lr_scheduler_classes is not NotProvided:
self._torch_lr_scheduler_classes = _torch_lr_scheduler_classes
return self
@property
def is_atari(self) -> bool:
"""True if if specified env is an Atari env."""
# Not yet determined, try to figure this out.
if self._is_atari is None:
# Atari envs are usually specified via a string like "PongNoFrameskip-v4"
# or "ale_py:ALE/Breakout-v5".
# We do NOT attempt to auto-detect Atari env for other specified types like
# a callable, to avoid running heavy logics in validate().
# For these cases, users can explicitly set `environment(atari=True)`.
if type(self.env) is not str:
return False
try:
env = gym.make(self.env)
# Any gymnasium error -> Cannot be an Atari env.
except gym.error.Error:
return False
self._is_atari = is_atari(env)
# Clean up env's resources, if any.
env.close()
return self._is_atari
@property
def is_multi_agent(self) -> bool:
"""Returns whether this config specifies a multi-agent setup.
Returns:
True, if a) >1 policies defined OR b) 1 policy defined, but its ID is NOT
DEFAULT_POLICY_ID.
"""
return len(self.policies) > 1 or DEFAULT_POLICY_ID not in self.policies
@property
def learner_class(self) -> Type["Learner"]:
"""Returns the Learner sub-class to use by this Algorithm.
Either
a) User sets a specific learner class via calling `.training(learner_class=...)`
b) User leaves learner class unset (None) and the AlgorithmConfig itself
figures out the actual learner class by calling its own
`.get_default_learner_class()` method.
"""
return self._learner_class or self.get_default_learner_class()
@property
def model_config(self):
"""Defines the model configuration used.
This method combines the auto configuration `self _model_config_auto_includes`
defined by an algorithm with the user-defined configuration in
`self._model_config`.This configuration dictionary is used to
configure the `RLModule` in the new stack and the `ModelV2` in the old
stack.
Returns:
A dictionary with the model configuration.
"""
return self._model_config_auto_includes | (
self._model_config
if isinstance(self._model_config, dict)
else dataclasses.asdict(self._model_config)
)
@property
def rl_module_spec(self):
default_rl_module_spec = self.get_default_rl_module_spec()
_check_rl_module_spec(default_rl_module_spec)
# `self._rl_module_spec` has been user defined (via call to `self.rl_module()`).
if self._rl_module_spec is not None:
# Merge provided RL Module spec class with defaults.
_check_rl_module_spec(self._rl_module_spec)
# Merge given spec with default one (in case items are missing, such as
# spaces, module class, etc.)
if isinstance(self._rl_module_spec, RLModuleSpec):
if isinstance(default_rl_module_spec, RLModuleSpec):
default_rl_module_spec.update(self._rl_module_spec)
return default_rl_module_spec
elif isinstance(default_rl_module_spec, MultiRLModuleSpec):
raise ValueError(
"Cannot merge MultiRLModuleSpec with RLModuleSpec!"
)
else:
multi_rl_module_spec = copy.deepcopy(self._rl_module_spec)
multi_rl_module_spec.update(default_rl_module_spec)
return multi_rl_module_spec
# `self._rl_module_spec` has not been user defined -> return default one.
else:
return default_rl_module_spec
@property
def train_batch_size_per_learner(self) -> int:
# If not set explicitly, try to infer the value.
if self._train_batch_size_per_learner is None:
return self.train_batch_size // (self.num_learners or 1)
return self._train_batch_size_per_learner
@train_batch_size_per_learner.setter
def train_batch_size_per_learner(self, value: int) -> None:
self._train_batch_size_per_learner = value
@property
def total_train_batch_size(self) -> int:
"""Returns the effective total train batch size.
New API stack: `train_batch_size_per_learner` * [effective num Learners].
@OldAPIStack: User never touches `train_batch_size_per_learner` or
`num_learners`) -> `train_batch_size`.
"""
return self.train_batch_size_per_learner * (self.num_learners or 1)
# TODO: Make rollout_fragment_length as read-only property and replace the current
# self.rollout_fragment_length a private variable.
def get_rollout_fragment_length(self, worker_index: int = 0) -> int:
"""Automatically infers a proper rollout_fragment_length setting if "auto".
Uses the simple formula:
`rollout_fragment_length` = `total_train_batch_size` /
(`num_envs_per_env_runner` * `num_env_runners`)
If result is a fraction AND `worker_index` is provided, makes
those workers add additional timesteps, such that the overall batch size (across
the workers) adds up to exactly the `total_train_batch_size`.
Returns:
The user-provided `rollout_fragment_length` or a computed one (if user
provided value is "auto"), making sure `total_train_batch_size` is reached
exactly in each iteration.
"""
if self.rollout_fragment_length == "auto":
# Example:
# 2 workers, 2 envs per worker, 2000 train batch size:
# -> 2000 / 4 -> 500
# 4 workers, 3 envs per worker, 2500 train batch size:
# -> 2500 / 12 -> 208.333 -> diff=4 (208 * 12 = 2496)
# -> worker 1, 2: 209, workers 3, 4: 208
# 2 workers, 20 envs per worker, 512 train batch size:
# -> 512 / 40 -> 12.8 -> diff=32 (12 * 40 = 480)
# -> worker 1: 13, workers 2: 12
rollout_fragment_length = self.total_train_batch_size / (
self.num_envs_per_env_runner * (self.num_env_runners or 1)
)
if int(rollout_fragment_length) != rollout_fragment_length:
diff = self.total_train_batch_size - int(
rollout_fragment_length
) * self.num_envs_per_env_runner * (self.num_env_runners or 1)
if ((worker_index - 1) * self.num_envs_per_env_runner) >= diff:
return int(rollout_fragment_length)
else:
return int(rollout_fragment_length) + 1
return int(rollout_fragment_length)
else:
return self.rollout_fragment_length
# TODO: Make evaluation_config as read-only property and replace the current
# self.evaluation_config a private variable.
def get_evaluation_config_object(
self,
) -> Optional["AlgorithmConfig"]:
"""Creates a full AlgorithmConfig object from `self.evaluation_config`.
Returns:
A fully valid AlgorithmConfig object that can be used for the evaluation
EnvRunnerGroup. If `self` is already an evaluation config object, return
None.
"""
if self.in_evaluation:
assert self.evaluation_config is None
return None
evaluation_config = self.evaluation_config
# Already an AlgorithmConfig -> copy and use as-is.
if isinstance(evaluation_config, AlgorithmConfig):
eval_config_obj = evaluation_config.copy(copy_frozen=False)
# Create unfrozen copy of self to be used as the to-be-returned eval
# AlgorithmConfig.
else:
eval_config_obj = self.copy(copy_frozen=False)
# Update with evaluation override settings:
eval_config_obj.update_from_dict(evaluation_config or {})
# Switch on the `in_evaluation` flag and remove `evaluation_config`
# (set to None).
eval_config_obj.in_evaluation = True
eval_config_obj.evaluation_config = None
# Force-set the `num_env_runners` setting to `self.evaluation_num_env_runners`.
# Actually, the `self.evaluation_num_env_runners` is merely a convenience
# attribute and might be set instead through:
# `config.evaluation(evaluation_config={"num_env_runners": ...})`
eval_config_obj.num_env_runners = self.evaluation_num_env_runners
# NOTE: The following if-block is only relevant for the old API stack.
# For the new API stack (EnvRunners), the evaluation methods of Algorithm
# explicitly tell each EnvRunner on each sample call, how many timesteps
# of episodes to collect.
# Evaluation duration unit: episodes.
# Switch on `complete_episode` rollouts. Also, make sure
# rollout fragments are short so we never have more than one
# episode in one rollout.
if self.evaluation_duration_unit == "episodes":
eval_config_obj.batch_mode = "complete_episodes"
eval_config_obj.rollout_fragment_length = 1
# Evaluation duration unit: timesteps.
# - Set `batch_mode=truncate_episodes` so we don't perform rollouts
# strictly along episode borders.
# Set `rollout_fragment_length` such that desired steps are divided
# equally amongst workers or - in "auto" duration mode - set it
# to a reasonably small number (10), such that a single `sample()`
# call doesn't take too much time and we can stop evaluation as soon
# as possible after the train step is completed.
else:
eval_config_obj.batch_mode = "truncate_episodes"
eval_config_obj.rollout_fragment_length = (
# Set to a moderately small (but not too small) value in order
# to a) not overshoot too much the parallelly running `training_step`
# but also to b) avoid too many `sample()` remote calls.
# 100 seems like a good middle ground.
100
if self.evaluation_duration == "auto"
else int(
math.ceil(
self.evaluation_duration
/ (self.evaluation_num_env_runners or 1)
)
)
)
return eval_config_obj
def validate_train_batch_size_vs_rollout_fragment_length(self) -> None:
"""Detects mismatches for `train_batch_size` vs `rollout_fragment_length`.
Only applicable for algorithms, whose train_batch_size should be directly
dependent on rollout_fragment_length (synchronous sampling, on-policy PG algos).
If rollout_fragment_length != "auto", makes sure that the product of
`rollout_fragment_length` x `num_env_runners` x `num_envs_per_env_runner`
roughly (10%) matches the provided `train_batch_size`. Otherwise, errors with
asking the user to set rollout_fragment_length to `auto` or to a matching
value.
Raises:
ValueError: If there is a mismatch between user provided
`rollout_fragment_length` and `total_train_batch_size`.
"""
if self.rollout_fragment_length != "auto" and not self.in_evaluation:
min_batch_size = (
max(self.num_env_runners, 1)
* self.num_envs_per_env_runner
* self.rollout_fragment_length
)
batch_size = min_batch_size
while batch_size < self.total_train_batch_size:
batch_size += min_batch_size
if batch_size - self.total_train_batch_size > (
0.1 * self.total_train_batch_size
) or batch_size - min_batch_size - self.total_train_batch_size > (
0.1 * self.total_train_batch_size
):
suggested_rollout_fragment_length = self.total_train_batch_size // (
self.num_envs_per_env_runner * (self.num_env_runners or 1)
)
self._value_error(
"Your desired `total_train_batch_size` "
f"({self.total_train_batch_size}={self.num_learners} "
f"learners x {self.train_batch_size_per_learner}) "
"or a value 10% off of that cannot be achieved with your other "
f"settings (num_env_runners={self.num_env_runners}; "
f"num_envs_per_env_runner={self.num_envs_per_env_runner}; "
f"rollout_fragment_length={self.rollout_fragment_length})! "
"Try setting `rollout_fragment_length` to 'auto' OR to a value of "
f"{suggested_rollout_fragment_length}."
)
def get_torch_compile_worker_config(self):
"""Returns the TorchCompileConfig to use on workers."""
from ray.rllib.core.rl_module.torch.torch_compile_config import (
TorchCompileConfig,
)
return TorchCompileConfig(
torch_dynamo_backend=self.torch_compile_worker_dynamo_backend,
torch_dynamo_mode=self.torch_compile_worker_dynamo_mode,
)
def get_default_rl_module_spec(self) -> RLModuleSpecType:
"""Returns the RLModule spec to use for this algorithm.
Override this method in the subclass to return the RLModule spec, given
the input framework.
Returns:
The RLModuleSpec (or MultiRLModuleSpec) to
use for this algorithm's RLModule.
"""
raise NotImplementedError
def get_default_learner_class(self) -> Union[Type["Learner"], str]:
"""Returns the Learner class to use for this algorithm.
Override this method in the sub-class to return the Learner class type given
the input framework.
Returns:
The Learner class to use for this algorithm either as a class type or as
a string (e.g. "ray.rllib.algorithms.ppo.ppo_learner.PPOLearner").
"""
raise NotImplementedError
def get_rl_module_spec(
self,
env: Optional[EnvType] = None,
spaces: Optional[Dict[str, Tuple[gym.Space, gym.Space]]] = None,
inference_only: Optional[bool] = None,
) -> RLModuleSpec:
"""Returns the RLModuleSpec based on the given env/spaces and this config.
Args:
env: An optional environment instance, from which to infer the observation-
and action spaces for the RLModule. If not provided, tries to infer
from `spaces`, otherwise from `self.observation_space` and
`self.action_space`. Raises an error, if no information on spaces can be
inferred.
spaces: Optional dict mapping ModuleIDs to 2-tuples of observation- and
action space that should be used for the respective RLModule.
These spaces are usually provided by an already instantiated remote
EnvRunner (call `EnvRunner.get_spaces()` to receive this dict). If not
provided, RLlib tries to infer this from `env`, if provided, otherwise
from `self.observation_space` and `self.action_space`. Raises an error,
if no information on spaces can be inferred.
inference_only: If `True`, the returned module spec is used in an
inference-only setting (sampling) and the RLModule can thus be built in
its light version (if available). For example, the `inference_only`
version of an RLModule might only contain the networks required for
computing actions, but misses additional target- or critic networks.
Returns:
A new RLModuleSpec instance that can be used to build an RLModule.
"""
rl_module_spec = copy.deepcopy(self.rl_module_spec)
# If a MultiRLModuleSpec -> Reduce to single-agent (and assert that
# all non DEFAULT_MODULE_IDs are `learner_only` (so they are not built on
# EnvRunner).
if isinstance(rl_module_spec, MultiRLModuleSpec):
error = False
if DEFAULT_MODULE_ID not in rl_module_spec:
error = True
if inference_only:
for mid, spec in rl_module_spec.rl_module_specs.items():
if mid != DEFAULT_MODULE_ID:
if not spec.learner_only:
error = True
elif len(rl_module_spec) > 1:
error = True
if error:
raise ValueError(
"When calling `AlgorithmConfig.get_rl_module_spec()`, the "
"configuration must contain the `DEFAULT_MODULE_ID` key and all "
"other keys' specs must have the setting `learner_only=True`! If "
"you are using a more complex setup, call "
"`AlgorithmConfig.get_multi_rl_module_spec(...)` instead."
)
rl_module_spec = rl_module_spec[DEFAULT_MODULE_ID]
if rl_module_spec.observation_space is None:
if spaces is not None:
rl_module_spec.observation_space = spaces[DEFAULT_MODULE_ID][0]
elif env is not None and isinstance(env, gym.Env):
rl_module_spec.observation_space = getattr(
env, "single_observation_space", env.observation_space
)
else:
rl_module_spec.observation_space = self.observation_space
if rl_module_spec.action_space is None:
if spaces is not None:
rl_module_spec.action_space = spaces[DEFAULT_MODULE_ID][1]
elif env is not None and isinstance(env, gym.Env):
rl_module_spec.action_space = getattr(
env, "single_action_space", env.action_space
)
else:
rl_module_spec.action_space = self.action_space
# If module_config_dict is not defined, set to our generic one.
if rl_module_spec.model_config is None:
rl_module_spec.model_config = self.model_config
# Otherwise we combine the two dictionaries where settings from the
# `RLModuleSpec` have higher priority.
else:
rl_module_spec.model_config = (
self.model_config | rl_module_spec._get_model_config()
)
if inference_only is not None:
rl_module_spec.inference_only = inference_only
return rl_module_spec
def get_multi_rl_module_spec(
self,
*,
env: Optional[EnvType] = None,
spaces: Optional[Dict[PolicyID, Tuple[gym.Space, gym.Space]]] = None,
inference_only: bool = False,
# @HybridAPIStack
policy_dict: Optional[Dict[str, PolicySpec]] = None,
single_agent_rl_module_spec: Optional[RLModuleSpec] = None,
) -> MultiRLModuleSpec:
"""Returns the MultiRLModuleSpec based on the given env/spaces.
Args:
env: An optional environment instance, from which to infer the different
spaces for the individual RLModules. If not provided, tries to infer
from `spaces`, otherwise from `self.observation_space` and
`self.action_space`. Raises an error, if no information on spaces can be
inferred.
spaces: Optional dict mapping ModuleIDs to 2-tuples of observation- and
action space that should be used for the respective RLModule.
These spaces are usually provided by an already instantiated remote
EnvRunner (call `EnvRunner.get_spaces()`). If not provided, tries
to infer from `env`, otherwise from `self.observation_space` and
`self.action_space`. Raises an error, if no information on spaces can be
inferred.
inference_only: If `True`, the returned module spec is used in an
inference-only setting (sampling) and the RLModule can thus be built in
its light version (if available). For example, the `inference_only`
version of an RLModule might only contain the networks required for
computing actions, but misses additional target- or critic networks.
Also, if `True`, the returned spec does NOT contain those (sub)
RLModuleSpecs that have their `learner_only` flag set to True.
Returns:
A new MultiRLModuleSpec instance that can be used to build a MultiRLModule.
"""
# TODO (Kourosh,sven): When we replace policy entirely there is no need for
# this function to map policy_dict to multi_rl_module_specs anymore. The module
# spec is directly given by the user or inferred from env and spaces.
if policy_dict is None:
policy_dict, _ = self.get_multi_agent_setup(env=env, spaces=spaces)
# TODO (Kourosh): Raise an error if the config is not frozen
# If the module is single-agent convert it to multi-agent spec
# The default RLModuleSpec (might be multi-agent or single-agent).
default_rl_module_spec = self.get_default_rl_module_spec()
# The currently configured RLModuleSpec (might be multi-agent or single-agent).
# If None, use the default one.
current_rl_module_spec = self._rl_module_spec or default_rl_module_spec
# Algorithm is currently setup as a single-agent one.
if isinstance(current_rl_module_spec, RLModuleSpec):
# Use either the provided `single_agent_rl_module_spec` (a
# RLModuleSpec), the currently configured one of this
# AlgorithmConfig object, or the default one.
single_agent_rl_module_spec = (
single_agent_rl_module_spec or current_rl_module_spec
)
single_agent_rl_module_spec.inference_only = inference_only
# Now construct the proper MultiRLModuleSpec.
multi_rl_module_spec = MultiRLModuleSpec(
rl_module_specs={
k: copy.deepcopy(single_agent_rl_module_spec)
for k in policy_dict.keys()
},
)
# Algorithm is currently setup as a multi-agent one.
else:
# The user currently has a MultiAgentSpec setup (either via
# self._rl_module_spec or the default spec of this AlgorithmConfig).
assert isinstance(current_rl_module_spec, MultiRLModuleSpec)
# Default is single-agent but the user has provided a multi-agent spec
# so the use-case is multi-agent.
if isinstance(default_rl_module_spec, RLModuleSpec):
# The individual (single-agent) module specs are defined by the user
# in the currently setup MultiRLModuleSpec -> Use that
# RLModuleSpec.
if isinstance(current_rl_module_spec.rl_module_specs, RLModuleSpec):
single_agent_spec = single_agent_rl_module_spec or (
current_rl_module_spec.rl_module_specs
)
single_agent_spec.inference_only = inference_only
module_specs = {
k: copy.deepcopy(single_agent_spec) for k in policy_dict.keys()
}
# The individual (single-agent) module specs have not been configured
# via this AlgorithmConfig object -> Use provided single-agent spec or
# the default spec (which is also a RLModuleSpec in this
# case).
else:
single_agent_spec = (
single_agent_rl_module_spec or default_rl_module_spec
)
single_agent_spec.inference_only = inference_only
module_specs = {
k: copy.deepcopy(
current_rl_module_spec.rl_module_specs.get(
k, single_agent_spec
)
)
for k in (
policy_dict | current_rl_module_spec.rl_module_specs
).keys()
}
# Now construct the proper MultiRLModuleSpec.
# We need to infer the multi-agent class from `current_rl_module_spec`
# and fill in the module_specs dict.
multi_rl_module_spec = current_rl_module_spec.__class__(
multi_rl_module_class=current_rl_module_spec.multi_rl_module_class,
rl_module_specs=module_specs,
modules_to_load=current_rl_module_spec.modules_to_load,
load_state_path=current_rl_module_spec.load_state_path,
model_config=current_rl_module_spec.model_config,
)
# Default is multi-agent and user wants to override it -> Don't use the
# default.
else:
# User provided an override RLModuleSpec -> Use this to
# construct the individual RLModules within the MultiRLModuleSpec.
if single_agent_rl_module_spec is not None:
pass
# User has NOT provided an override RLModuleSpec.
else:
# But the currently setup multi-agent spec has a SingleAgentRLModule
# spec defined -> Use that to construct the individual RLModules
# within the MultiRLModuleSpec.
if isinstance(current_rl_module_spec.rl_module_specs, RLModuleSpec):
# The individual module specs are not given, it is given as one
# RLModuleSpec to be re-used for all
single_agent_rl_module_spec = (
current_rl_module_spec.rl_module_specs
)
# The currently set up multi-agent spec has NO
# RLModuleSpec in it -> Error (there is no way we can
# infer this information from anywhere at this point).
else:
raise ValueError(
"We have a MultiRLModuleSpec "
f"({current_rl_module_spec}), but no "
"`RLModuleSpec`s to compile the individual "
"RLModules' specs! Use "
"`AlgorithmConfig.get_multi_rl_module_spec("
"policy_dict=.., rl_module_spec=..)`."
)
single_agent_rl_module_spec.inference_only = inference_only
# Now construct the proper MultiRLModuleSpec.
multi_rl_module_spec = current_rl_module_spec.__class__(
multi_rl_module_class=current_rl_module_spec.multi_rl_module_class,
rl_module_specs={
k: copy.deepcopy(single_agent_rl_module_spec)
for k in policy_dict.keys()
},
modules_to_load=current_rl_module_spec.modules_to_load,
load_state_path=current_rl_module_spec.load_state_path,
model_config=current_rl_module_spec.model_config,
)
# Fill in the missing values from the specs that we already have. By combining
# PolicySpecs and the default RLModuleSpec.
for module_id in policy_dict | multi_rl_module_spec.rl_module_specs:
# Remove/skip `learner_only=True` RLModules if `inference_only` is True.
module_spec = multi_rl_module_spec.rl_module_specs[module_id]
if inference_only and module_spec.learner_only:
multi_rl_module_spec.remove_modules(module_id)
continue
if module_spec.module_class is None:
if isinstance(default_rl_module_spec, RLModuleSpec):
module_spec.module_class = default_rl_module_spec.module_class
elif isinstance(default_rl_module_spec.rl_module_specs, RLModuleSpec):
module_class = default_rl_module_spec.rl_module_specs.module_class
# This should be already checked in validate() but we check it
# again here just in case
if module_class is None:
raise ValueError(
"The default rl_module spec cannot have an empty "
"module_class under its RLModuleSpec."
)
module_spec.module_class = module_class
elif module_id in default_rl_module_spec.rl_module_specs:
module_spec.module_class = default_rl_module_spec.rl_module_specs[
module_id
].module_class
else:
raise ValueError(
f"Module class for module {module_id} cannot be inferred. "
f"It is neither provided in the rl_module_spec that "
"is passed in nor in the default module spec used in "
"the algorithm."
)
if module_spec.catalog_class is None:
if isinstance(default_rl_module_spec, RLModuleSpec):
module_spec.catalog_class = default_rl_module_spec.catalog_class
elif isinstance(default_rl_module_spec.rl_module_specs, RLModuleSpec):
catalog_class = default_rl_module_spec.rl_module_specs.catalog_class
module_spec.catalog_class = catalog_class
elif module_id in default_rl_module_spec.rl_module_specs:
module_spec.catalog_class = default_rl_module_spec.rl_module_specs[
module_id
].catalog_class
else:
raise ValueError(
f"Catalog class for module {module_id} cannot be inferred. "
f"It is neither provided in the rl_module_spec that "
"is passed in nor in the default module spec used in "
"the algorithm."
)
# TODO (sven): Find a good way to pack module specific parameters from
# the algorithms into the `model_config_dict`.
if (
module_spec.observation_space is None
or module_spec.action_space is None
):
policy_spec = policy_dict.get(
module_id, policy_dict.get(DEFAULT_MODULE_ID)
)
if policy_spec is not None:
if module_spec.observation_space is None:
module_spec.observation_space = policy_spec.observation_space
if module_spec.action_space is None:
module_spec.action_space = policy_spec.action_space
# In case the `RLModuleSpec` does not have a model config dict, we use the
# the one defined by the auto keys and the `model_config_dict` arguments in
# `self.rl_module()`.
if module_spec.model_config is None:
module_spec.model_config = self.model_config
# Otherwise we combine the two dictionaries where settings from the
# `RLModuleSpec` have higher priority.
else:
module_spec.model_config = (
self.model_config | module_spec._get_model_config()
)
return multi_rl_module_spec
def __setattr__(self, key, value):
"""Gatekeeper in case we are in frozen state and need to error."""
# If we are frozen, do not allow to set any attributes anymore.
if hasattr(self, "_is_frozen") and self._is_frozen:
# TODO: Remove `simple_optimizer` entirely.
# Remove need to set `worker_index` in RolloutWorker's c'tor.
if key not in ["simple_optimizer", "worker_index", "_is_frozen"]:
raise AttributeError(
f"Cannot set attribute ({key}) of an already frozen "
"AlgorithmConfig!"
)
# Backward compatibility for checkpoints taken with wheels, in which
# `self.rl_module_spec` was still settable (now it's a property).
if key == "rl_module_spec":
key = "_rl_module_spec"
super().__setattr__(key, value)
def __getitem__(self, item):
"""Shim method to still support accessing properties by key lookup.
This way, an AlgorithmConfig object can still be used as if a dict, e.g.
by Ray Tune.
Examples:
.. testcode::
from ray.rllib.algorithms.algorithm_config import AlgorithmConfig
config = AlgorithmConfig()
print(config["lr"])
.. testoutput::
0.001
"""
# TODO: Uncomment this once all algorithms use AlgorithmConfigs under the
# hood (as well as Ray Tune).
# if log_once("algo_config_getitem"):
# logger.warning(
# "AlgorithmConfig objects should NOT be used as dict! "
# f"Try accessing `{item}` directly as a property."
# )
# In case user accesses "old" keys, e.g. "num_workers", which need to
# be translated to their correct property names.
item = self._translate_special_keys(item)
return getattr(self, item)
def __setitem__(self, key, value):
# TODO: Remove comments once all methods/functions only support
# AlgorithmConfigs and there is no more ambiguity anywhere in the code
# on whether an AlgorithmConfig is used or an old python config dict.
# raise AttributeError(
# "AlgorithmConfig objects should not have their values set like dicts"
# f"(`config['{key}'] = {value}`), "
# f"but via setting their properties directly (config.{prop} = {value})."
# )
if key == "multiagent":
raise AttributeError(
"Cannot set `multiagent` key in an AlgorithmConfig!\nTry setting "
"the multi-agent components of your AlgorithmConfig object via the "
"`multi_agent()` method and its arguments.\nE.g. `config.multi_agent("
"policies=.., policy_mapping_fn.., policies_to_train=..)`."
)
super().__setattr__(key, value)
def __contains__(self, item) -> bool:
"""Shim method to help pretend we are a dict."""
prop = self._translate_special_keys(item, warn_deprecated=False)
return hasattr(self, prop)
def get(self, key, default=None):
"""Shim method to help pretend we are a dict."""
prop = self._translate_special_keys(key, warn_deprecated=False)
return getattr(self, prop, default)
def pop(self, key, default=None):
"""Shim method to help pretend we are a dict."""
return self.get(key, default)
def keys(self):
"""Shim method to help pretend we are a dict."""
return self.to_dict().keys()
def values(self):
"""Shim method to help pretend we are a dict."""
return self.to_dict().values()
def items(self):
"""Shim method to help pretend we are a dict."""
return self.to_dict().items()
@property
def _model_config_auto_includes(self) -> Dict[str, Any]:
"""Defines which `AlgorithmConfig` settings/properties should be
auto-included into `self.model_config`.
The dictionary in this property contains the default configuration of an
algorithm. Together with the `self._model`, this method is used to
define the configuration sent to the `RLModule`.
Returns:
A dictionary with the automatically included properties/settings of this
`AlgorithmConfig` object into `self.model_config`.
"""
return {}
# -----------------------------------------------------------
# Various validation methods for different types of settings.
# -----------------------------------------------------------
def _value_error(self, errmsg) -> None:
msg = errmsg + (
"\nTo suppress all validation errors, set "
"`config.experimental(_validate_config=False)` at your own risk."
)
if self._validate_config:
raise ValueError(msg)
else:
logger.warning(errmsg)
def _validate_env_runner_settings(self) -> None:
allowed_vectorize_modes = set(
list(gym.VectorizeMode) + [mode.value for mode in gym.VectorizeMode]
)
if self.gym_env_vectorize_mode not in allowed_vectorize_modes:
self._value_error(
f"`gym_env_vectorize_mode` ({self.gym_env_vectorize_mode}) "
"must be a member of `gymnasium.VectorizeMode`! "
f"Allowed values are {allowed_vectorize_modes}."
)
def _validate_callbacks_settings(self) -> None:
"""Validates callbacks settings."""
# Old API stack:
# - self.callbacks_cls must be a subclass of RLlibCallback.
# - All self.callbacks_... attributes must be None.
if not self.enable_env_runner_and_connector_v2:
if (
self.callbacks_on_environment_created is not None
or self.callbacks_on_algorithm_init is not None
or self.callbacks_on_train_result is not None
or self.callbacks_on_evaluate_start is not None
or self.callbacks_on_evaluate_end is not None
or self.callbacks_on_sample_end is not None
or self.callbacks_on_environment_created is not None
or self.callbacks_on_episode_created is not None
or self.callbacks_on_episode_start is not None
or self.callbacks_on_episode_step is not None
or self.callbacks_on_episode_end is not None
or self.callbacks_on_checkpoint_loaded is not None
or self.callbacks_on_env_runners_recreated is not None
or self.callbacks_on_offline_eval_runners_recreated is not None
):
self._value_error(
"Config settings `config.callbacks(on_....=lambda ..)` aren't "
"supported on the old API stack! Switch to the new API stack "
"through `config.api_stack(enable_env_runner_and_connector_v2=True,"
" enable_rl_module_and_learner=True)`."
)
def _validate_framework_settings(self) -> None:
"""Validates framework settings and checks whether framework is installed."""
_tf1, _tf, _tfv = None, None, None
_torch = None
if self.framework_str not in {"tf", "tf2"} and self.framework_str != "torch":
return
elif self.framework_str in {"tf", "tf2"}:
_tf1, _tf, _tfv = try_import_tf()
else:
_torch, _ = try_import_torch()
# Can not use "tf" with learner API.
if self.framework_str == "tf" and self.enable_rl_module_and_learner:
self._value_error(
"Cannot use `framework=tf` with the new API stack! Either switch to tf2"
" via `config.framework('tf2')` OR disable the new API stack via "
"`config.api_stack(enable_rl_module_and_learner=False)`."
)
# Check if torch framework supports torch.compile.
if (
_torch is not None
and self.framework_str == "torch"
and version.parse(_torch.__version__) < TORCH_COMPILE_REQUIRED_VERSION
and (self.torch_compile_learner or self.torch_compile_worker)
):
self._value_error("torch.compile is only supported from torch 2.0.0")
# Make sure the Learner's torch-what-to-compile setting is supported.
if self.torch_compile_learner:
from ray.rllib.core.learner.torch.torch_learner import (
TorchCompileWhatToCompile,
)
if self.torch_compile_learner_what_to_compile not in [
TorchCompileWhatToCompile.FORWARD_TRAIN,
TorchCompileWhatToCompile.COMPLETE_UPDATE,
]:
self._value_error(
f"`config.torch_compile_learner_what_to_compile` must be one of ["
f"TorchCompileWhatToCompile.forward_train, "
f"TorchCompileWhatToCompile.complete_update] but is"
f" {self.torch_compile_learner_what_to_compile}"
)
self._check_if_correct_nn_framework_installed(_tf1, _tf, _torch)
self._resolve_tf_settings(_tf1, _tfv)
def _validate_resources_settings(self):
"""Checks, whether resources related settings make sense."""
pass
def _validate_multi_agent_settings(self):
"""Checks, whether multi-agent related settings make sense."""
# Check `policies_to_train` for invalid entries.
if isinstance(self.policies_to_train, (list, set, tuple)):
for pid in self.policies_to_train:
if pid not in self.policies:
self._value_error(
"`config.multi_agent(policies_to_train=..)` contains "
f"policy ID ({pid}) that was not defined in "
f"`config.multi_agent(policies=..)`!"
)
def _validate_evaluation_settings(self):
"""Checks, whether evaluation related settings make sense."""
# Async evaluation has been deprecated. Use "simple" parallel mode instead
# (which is also async):
# `config.evaluation(evaluation_parallel_to_training=True)`.
if self.enable_async_evaluation is True:
self._value_error(
"`enable_async_evaluation` has been deprecated (you should set this to "
"False)! Use `config.evaluation(evaluation_parallel_to_training=True)` "
"instead."
)
# If `evaluation_num_env_runners` > 0, warn if `evaluation_interval` is 0 or
# None.
if self.evaluation_num_env_runners > 0 and not self.evaluation_interval:
logger.warning(
f"You have specified {self.evaluation_num_env_runners} "
"evaluation workers, but your `evaluation_interval` is 0 or None! "
"Therefore, evaluation doesn't occur automatically with each"
" call to `Algorithm.train()`. Instead, you have to call "
"`Algorithm.evaluate()` manually in order to trigger an "
"evaluation run."
)
# If `evaluation_num_env_runners=0` and
# `evaluation_parallel_to_training=True`, warn that you need
# at least one remote eval worker for parallel training and
# evaluation, and set `evaluation_parallel_to_training` to False.
if (
self.evaluation_num_env_runners == 0
and self.num_offline_eval_runners == 0
and self.evaluation_parallel_to_training
):
self._value_error(
"`evaluation_parallel_to_training` can only be done if "
"`evaluation_num_env_runners` > 0! Try setting "
"`config.evaluation_parallel_to_training` to False."
)
# If `evaluation_duration=auto`, error if
# `evaluation_parallel_to_training=False`.
if self.evaluation_duration == "auto":
if not self.evaluation_parallel_to_training:
self._value_error(
"`evaluation_duration=auto` not supported for "
"`evaluation_parallel_to_training=False`!"
)
elif self.evaluation_duration_unit == "episodes":
logger.warning(
"When using `config.evaluation_duration='auto'`, the sampling unit "
"used is always 'timesteps'! You have set "
"`config.evaluation_duration_unit='episodes'`, which is ignored."
)
# Make sure, `evaluation_duration` is an int otherwise.
elif (
not isinstance(self.evaluation_duration, int)
or self.evaluation_duration <= 0
):
self._value_error(
f"`evaluation_duration` ({self.evaluation_duration}) must be an "
f"int and >0!"
)
def _validate_input_settings(self):
"""Checks, whether input related settings make sense."""
if self.input_ == "sampler" and self.off_policy_estimation_methods:
self._value_error(
"Off-policy estimation methods can only be used if the input is a "
"dataset. We currently do not support applying off_policy_estimation_"
"method on a sampler input."
)
if self.input_ == "dataset":
# If you need to read a Ray dataset set the parallelism and
# num_cpus_per_read_task from rollout worker settings
self.input_config["num_cpus_per_read_task"] = self.num_cpus_per_env_runner
if self.in_evaluation:
# If using dataset for evaluation, the parallelism gets set to
# evaluation_num_env_runners for backward compatibility and num_cpus
# gets set to num_cpus_per_env_runner from rollout worker. User only
# needs to set evaluation_num_env_runners.
self.input_config["parallelism"] = self.evaluation_num_env_runners or 1
else:
# If using dataset for training, the parallelism and num_cpus gets set
# based on rollout worker parameters. This is for backwards
# compatibility for now. User only needs to set num_env_runners.
self.input_config["parallelism"] = self.num_env_runners or 1
def _validate_new_api_stack_settings(self):
"""Checks, whether settings related to the new API stack make sense."""
# Old API stack checks.
if not self.enable_rl_module_and_learner:
# Throw a warning if the user has used `self.rl_module(rl_module_spec=...)`
# but has not enabled the new API stack at the same time.
if self._rl_module_spec is not None:
logger.warning(
"You have setup a RLModuleSpec (via calling "
"`config.rl_module(...)`), but have not enabled the new API stack. "
"To enable it, call `config.api_stack(enable_rl_module_and_learner="
"True)`."
)
# Throw a warning if the user has used `self.training(learner_class=...)`
# but has not enabled the new API stack at the same time.
if self._learner_class is not None:
logger.warning(
"You specified a custom Learner class (via "
f"`AlgorithmConfig.training(learner_class={self._learner_class})`, "
f"but have the new API stack disabled. You need to enable it via "
"`AlgorithmConfig.api_stack(enable_rl_module_and_learner=True)`."
)
# User is using the new EnvRunners, but forgot to switch on
# `enable_rl_module_and_learner`.
if self.enable_env_runner_and_connector_v2:
self._value_error(
"You are using the new API stack EnvRunners (SingleAgentEnvRunner "
"or MultiAgentEnvRunner), but have forgotten to switch on the new "
"API stack! Try setting "
"`config.api_stack(enable_rl_module_and_learner=True)`."
)
# Early out. The rest of this method is only for
# `enable_rl_module_and_learner=True`.
return
# Warn about new API stack on by default.
if log_once(f"{self.algo_class.__name__}_on_new_api_stack"):
logger.warning(
f"You are running {self.algo_class.__name__} on the new API stack! "
"This is the new default behavior for this algorithm. If you don't "
"want to use the new API stack, set `config.api_stack("
"enable_rl_module_and_learner=False,"
"enable_env_runner_and_connector_v2=False)`. For a detailed migration "
"guide, see here: https://docs.ray.io/en/master/rllib/new-api-stack-migration-guide.html" # noqa
)
# Disabled hybrid API stack. Now, both `enable_rl_module_and_learner` and
# `enable_env_runner_and_connector_v2` must be True or both False.
if not self.enable_env_runner_and_connector_v2:
self._value_error(
"Setting `enable_rl_module_and_learner` to True and "
"`enable_env_runner_and_connector_v2` to False ('hybrid API stack'"
") is not longer supported! Set both to True (new API stack) or both "
"to False (old API stack), instead."
)
# For those users that accidentally use the new API stack (because it's the
# default now for many algos), we need to make sure they are warned.
try:
tree.assert_same_structure(self.model, MODEL_DEFAULTS)
# Create copies excluding the specified key
check(
{k: v for k, v in self.model.items() if k != "vf_share_layers"},
{k: v for k, v in MODEL_DEFAULTS.items() if k != "vf_share_layers"},
)
except Exception:
logger.warning(
"You configured a custom `model` config (probably through calling "
"config.training(model=..), whereas your config uses the new API "
"stack! In order to switch off the new API stack, set in your config: "
"`config.api_stack(enable_rl_module_and_learner=False, "
"enable_env_runner_and_connector_v2=False)`. If you DO want to use "
"the new API stack, configure your model, instead, through: "
"`config.rl_module(model_config={..})`."
)
# LR-schedule checking.
Scheduler.validate(
fixed_value_or_schedule=self.lr,
setting_name="lr",
description="learning rate",
)
# This is not compatible with RLModules, which all have a method
# `forward_exploration` to specify custom exploration behavior.
if self.exploration_config:
self._value_error(
"When the RLModule API is enabled, exploration_config can not be "
"set. If you want to implement custom exploration behaviour, "
"please modify the `forward_exploration` method of the "
"RLModule at hand. On configs that have a default exploration "
"config, this must be done via "
"`config.exploration_config={}`."
)
not_compatible_w_rlm_msg = (
"Cannot use `{}` option with the new API stack (RLModule and "
"Learner APIs)! `{}` is part of the ModelV2 API and Policy API,"
" which are not compatible with the new API stack. You can either "
"deactivate the new stack via `config.api_stack( "
"enable_rl_module_and_learner=False)`,"
"or use the new stack (incl. RLModule API) and implement your "
"custom model as an RLModule."
)
if self.model["custom_model"] is not None:
self._value_error(
not_compatible_w_rlm_msg.format("custom_model", "custom_model")
)
if self.model["custom_model_config"] != {}:
self._value_error(
not_compatible_w_rlm_msg.format(
"custom_model_config", "custom_model_config"
)
)
# TODO (sven): Once everything is on the new API stack, we won't need this method
# anymore.
def _validate_to_be_deprecated_settings(self):
# `render_env` is deprecated on new API stack.
if self.enable_env_runner_and_connector_v2 and self.render_env is not False:
deprecation_warning(
old="AlgorithmConfig.render_env",
help="The `render_env` setting is not supported on the new API stack! "
"In order to log videos to WandB (or other loggers), take a look at "
"this example here: "
"https://github.com/ray-project/ray/blob/master/rllib/examples/envs/env_rendering_and_recording.py", # noqa
)
if self.preprocessor_pref not in ["rllib", "deepmind", None]:
self._value_error(
"`config.preprocessor_pref` must be either 'rllib', 'deepmind' or None!"
)
# Check model config.
# If no preprocessing, propagate into model's config as well
# (so model knows whether inputs are preprocessed or not).
if self._disable_preprocessor_api is True:
self.model["_disable_preprocessor_api"] = True
# If no action flattening, propagate into model's config as well
# (so model knows whether action inputs are already flattened or not).
if self._disable_action_flattening is True:
self.model["_disable_action_flattening"] = True
if self.model.get("custom_preprocessor"):
deprecation_warning(
old="AlgorithmConfig.training(model={'custom_preprocessor': ...})",
help="Custom preprocessors are deprecated, "
"since they sometimes conflict with the built-in "
"preprocessors for handling complex observation spaces. "
"Please use wrapper classes around your environment "
"instead.",
error=True,
)
# Multi-GPU settings.
if self.simple_optimizer is True:
pass
# Multi-GPU setting: Must use MultiGPUTrainOneStep.
elif not self.enable_rl_module_and_learner and self.num_gpus > 1:
# TODO: AlphaStar uses >1 GPUs differently (1 per policy actor), so this is
# ok for tf2 here.
# Remove this hacky check, once we have fully moved to the Learner API.
if self.framework_str == "tf2" and type(self).__name__ != "AlphaStar":
self._value_error(
"`num_gpus` > 1 not supported yet for "
f"framework={self.framework_str}!"
)
elif self.simple_optimizer is True:
self._value_error(
"Cannot use `simple_optimizer` if `num_gpus` > 1! "
"Consider not setting `simple_optimizer` in your config."
)
self.simple_optimizer = False
# Auto-setting: Use simple-optimizer for tf-eager or multiagent,
# otherwise: MultiGPUTrainOneStep (if supported by the algo's execution
# plan).
elif self.simple_optimizer == DEPRECATED_VALUE:
# tf-eager: Must use simple optimizer.
if self.framework_str not in ["tf", "torch"]:
self.simple_optimizer = True
# Multi-agent case: Try using MultiGPU optimizer (only
# if all policies used are DynamicTFPolicies or TorchPolicies).
elif self.is_multi_agent:
from ray.rllib.policy.dynamic_tf_policy import DynamicTFPolicy
from ray.rllib.policy.torch_policy import TorchPolicy
default_policy_cls = None
if self.algo_class:
default_policy_cls = self.algo_class.get_default_policy_class(self)
policies = self.policies
policy_specs = (
[
PolicySpec(*spec) if isinstance(spec, (tuple, list)) else spec
for spec in policies.values()
]
if isinstance(policies, dict)
else [PolicySpec() for _ in policies]
)
if any(
(spec.policy_class or default_policy_cls) is None
or not issubclass(
spec.policy_class or default_policy_cls,
(DynamicTFPolicy, TorchPolicy),
)
for spec in policy_specs
):
self.simple_optimizer = True
else:
self.simple_optimizer = False
else:
self.simple_optimizer = False
# User manually set simple-optimizer to False -> Error if tf-eager.
elif self.simple_optimizer is False:
if self.framework_str == "tf2":
self._value_error(
"`simple_optimizer=False` not supported for "
f"config.framework({self.framework_str})!"
)
def _validate_offline_settings(self):
# If a user does not have an environment and cannot run evaluation,
# or does not want to run evaluation, she needs to provide at least
# action and observation spaces. Note, we require here the spaces,
# i.e. a user cannot provide an environment instead because we do
# not want to create the environment to receive spaces.
if (
self.is_offline
and not self.is_online
and (
not (self.evaluation_num_env_runners > 0 or self.evaluation_interval)
and (self.action_space is None or self.observation_space is None)
)
):
self._value_error(
"If no evaluation should be run, `action_space` and "
"`observation_space` must be provided."
)
if self.ignore_final_observation and self.algo_class.__name__ != "BC":
logger.warning(
"`ignore_final_observation=True` (zeros-out truncation observations), "
"but the algorithm isn't `BC`. It is recommended to use this "
"setting only with `BC`, b/c other RL algorithms rely on truncation-"
"observations due to value function estimates."
)
from ray.rllib.offline.offline_data import OfflineData
from ray.rllib.offline.offline_prelearner import OfflinePreLearner
if self.offline_data_class and not issubclass(
self.offline_data_class, OfflineData
):
self._value_error(
"Unknown `offline_data_class`. OfflineData class needs to inherit "
"from `OfflineData` class."
)
if self.prelearner_class and not issubclass(
self.prelearner_class, OfflinePreLearner
):
self._value_error(
"Unknown `prelearner_class`. PreLearner class needs to inherit "
"from `OfflinePreLearner` class."
)
from ray.rllib.utils.replay_buffers.episode_replay_buffer import (
EpisodeReplayBuffer,
)
if self.prelearner_buffer_class and not issubclass(
self.prelearner_buffer_class, EpisodeReplayBuffer
):
self._value_error(
"Unknown `prelearner_buffer_class`. The buffer class for the "
"prelearner needs to inherit from `EpisodeReplayBuffer`. "
"Specifically it needs to store and sample lists of "
"`Single-/MultiAgentEpisode`s."
)
if self.input_read_batch_size and not (
self.input_read_episodes or self.input_read_sample_batches
):
self._value_error(
"Setting `input_read_batch_size` is only allowed in case of a "
"dataset that holds either `EpisodeType` or `BatchType` data (i.e. "
"rows that contains multiple timesteps), but neither "
"`input_read_episodes` nor `input_read_sample_batches` is set to "
"`True`."
)
if (
self.output
and self.output_write_episodes
and self.batch_mode != "complete_episodes"
):
self._value_error(
"When recording episodes only complete episodes should be "
"recorded (i.e. `batch_mode=='complete_episodes'`). Otherwise "
"recorded episodes cannot be read in for training."
)
# Offline evaluation.
from ray.rllib.offline.offline_policy_evaluation_runner import (
OfflinePolicyEvaluationTypes,
)
offline_eval_types = list(OfflinePolicyEvaluationTypes)
if (
self.offline_evaluation_type
and self.offline_evaluation_type != "eval_loss"
and self.offline_evaluation_type not in OfflinePolicyEvaluationTypes
):
self._value_error(
f"Unknown offline evaluation type: {self.offline_evaluation_type}."
"Available types of offline evaluation are either `'eval_loss' to evaluate "
f"the training loss on a validation dataset or {offline_eval_types}."
)
from ray.rllib.offline.offline_evaluation_runner import OfflineEvaluationRunner
if self.offline_eval_runner_class and not issubclass(
self.offline_eval_runner_class, OfflineEvaluationRunner
):
self._value_error(
"Unknown `offline_eval_runner_class`. OfflineEvaluationRunner class needs to inherit "
"from `OfflineEvaluationRunner` class."
)
@property
def is_online(self) -> bool:
"""Defines if this config is for online RL.
Note, a config can be for on- and offline training at the same time.
"""
return self._is_online
@property
def is_offline(self) -> bool:
"""Defines, if this config is for offline RL."""
return (
# Does the user provide any input path/class?
bool(self.input_)
# Is it a real string path or list of such paths.
and (
isinstance(self.input_, str)
or (isinstance(self.input_, list) and isinstance(self.input_[0], str))
)
# Could be old stack - which is considered very differently.
and self.input_ != "sampler"
and self.enable_rl_module_and_learner
)
@staticmethod
def _serialize_dict(config):
# Serialize classes to classpaths:
if "callbacks_class" in config:
config["callbacks"] = config.pop("callbacks_class")
if "class" in config:
config["class"] = serialize_type(config["class"])
config["callbacks"] = serialize_type(config["callbacks"])
config["sample_collector"] = serialize_type(config["sample_collector"])
if isinstance(config["env"], type):
config["env"] = serialize_type(config["env"])
if "replay_buffer_config" in config and (
isinstance(config["replay_buffer_config"].get("type"), type)
):
config["replay_buffer_config"]["type"] = serialize_type(
config["replay_buffer_config"]["type"]
)
if isinstance(config["exploration_config"].get("type"), type):
config["exploration_config"]["type"] = serialize_type(
config["exploration_config"]["type"]
)
if isinstance(config["model"].get("custom_model"), type):
config["model"]["custom_model"] = serialize_type(
config["model"]["custom_model"]
)
# List'ify `policies`, iff a set or tuple (these types are not JSON'able).
ma_config = config.get("multiagent")
if ma_config is not None:
if isinstance(ma_config.get("policies"), (set, tuple)):
ma_config["policies"] = list(ma_config["policies"])
# Do NOT serialize functions/lambdas.
if ma_config.get("policy_mapping_fn"):
ma_config["policy_mapping_fn"] = NOT_SERIALIZABLE
if ma_config.get("policies_to_train"):
ma_config["policies_to_train"] = NOT_SERIALIZABLE
# However, if these "multiagent" settings have been provided directly
# on the top-level (as they should), we override the settings under
# "multiagent". Note that the "multiagent" key should no longer be used anyways.
if isinstance(config.get("policies"), (set, tuple)):
config["policies"] = list(config["policies"])
# Do NOT serialize functions/lambdas.
if config.get("policy_mapping_fn"):
config["policy_mapping_fn"] = NOT_SERIALIZABLE
if config.get("policies_to_train"):
config["policies_to_train"] = NOT_SERIALIZABLE
return config
@staticmethod
def _translate_special_keys(key: str, warn_deprecated: bool = True) -> str:
# Handle special key (str) -> `AlgorithmConfig.[some_property]` cases.
if key == "callbacks":
key = "callbacks_class"
elif key == "create_env_on_driver":
key = "create_env_on_local_worker"
elif key == "custom_eval_function":
key = "custom_evaluation_function"
elif key == "framework":
key = "framework_str"
elif key == "input":
key = "input_"
elif key == "lambda":
key = "lambda_"
elif key == "num_cpus_for_driver":
key = "num_cpus_for_main_process"
elif key == "num_workers":
key = "num_env_runners"
# Deprecated keys.
if warn_deprecated:
if key == "collect_metrics_timeout":
deprecation_warning(
old="collect_metrics_timeout",
new="metrics_episode_collection_timeout_s",
error=True,
)
elif key == "metrics_smoothing_episodes":
deprecation_warning(
old="config.metrics_smoothing_episodes",
new="config.metrics_num_episodes_for_smoothing",
error=True,
)
elif key == "min_iter_time_s":
deprecation_warning(
old="config.min_iter_time_s",
new="config.min_time_s_per_iteration",
error=True,
)
elif key == "min_time_s_per_reporting":
deprecation_warning(
old="config.min_time_s_per_reporting",
new="config.min_time_s_per_iteration",
error=True,
)
elif key == "min_sample_timesteps_per_reporting":
deprecation_warning(
old="config.min_sample_timesteps_per_reporting",
new="config.min_sample_timesteps_per_iteration",
error=True,
)
elif key == "min_train_timesteps_per_reporting":
deprecation_warning(
old="config.min_train_timesteps_per_reporting",
new="config.min_train_timesteps_per_iteration",
error=True,
)
elif key == "timesteps_per_iteration":
deprecation_warning(
old="config.timesteps_per_iteration",
new="`config.min_sample_timesteps_per_iteration` OR "
"`config.min_train_timesteps_per_iteration`",
error=True,
)
elif key == "evaluation_num_episodes":
deprecation_warning(
old="config.evaluation_num_episodes",
new="`config.evaluation_duration` and "
"`config.evaluation_duration_unit=episodes`",
error=True,
)
return key
def _check_if_correct_nn_framework_installed(self, _tf1, _tf, _torch):
"""Check if tf/torch experiment is running and tf/torch installed."""
if self.framework_str in {"tf", "tf2"}:
if not (_tf1 or _tf):
raise ImportError(
(
"TensorFlow was specified as the framework to use (via `config."
"framework([tf|tf2])`)! However, no installation was "
"found. You can install TensorFlow via `pip install tensorflow`"
)
)
elif self.framework_str == "torch":
if not _torch:
raise ImportError(
(
"PyTorch was specified as the framework to use (via `config."
"framework('torch')`)! However, no installation was found. You "
"can install PyTorch via `pip install torch`."
)
)
def _resolve_tf_settings(self, _tf1, _tfv):
"""Check and resolve tf settings."""
if _tf1 and self.framework_str == "tf2":
if self.framework_str == "tf2" and _tfv < 2:
raise ValueError(
"You configured `framework`=tf2, but your installed "
"pip tf-version is < 2.0! Make sure your TensorFlow "
"version is >= 2.x."
)
if not _tf1.executing_eagerly():
_tf1.enable_eager_execution()
# Recommend setting tracing to True for speedups.
logger.info(
f"Executing eagerly (framework='{self.framework_str}'),"
f" with eager_tracing={self.eager_tracing}. For "
"production workloads, make sure to set eager_tracing=True"
" in order to match the speed of tf-static-graph "
"(framework='tf'). For debugging purposes, "
"`eager_tracing=False` is the best choice."
)
# Tf-static-graph (framework=tf): Recommend upgrading to tf2 and
# enabling eager tracing for similar speed.
elif _tf1 and self.framework_str == "tf":
logger.info(
"Your framework setting is 'tf', meaning you are using "
"static-graph mode. Set framework='tf2' to enable eager "
"execution with tf2.x. You may also then want to set "
"eager_tracing=True in order to reach similar execution "
"speed as with static-graph mode."
)
@OldAPIStack
def get_multi_agent_setup(
self,
*,
policies: Optional[MultiAgentPolicyConfigDict] = None,
env: Optional[EnvType] = None,
spaces: Optional[Dict[PolicyID, Tuple[gym.Space, gym.Space]]] = None,
default_policy_class: Optional[Type[Policy]] = None,
) -> Tuple[MultiAgentPolicyConfigDict, Callable[[PolicyID, SampleBatchType], bool]]:
r"""Compiles complete multi-agent config (dict) from the information in `self`.
Infers the observation- and action spaces, the policy classes, and the policy's
configs. The returned `MultiAgentPolicyConfigDict` is fully unified and strictly
maps PolicyIDs to complete PolicySpec objects (with all their fields not-None).
Examples:
.. testcode::
import gymnasium as gym
from ray.rllib.algorithms.ppo import PPOConfig
config = (
PPOConfig()
.environment("CartPole-v1")
.framework("torch")
.multi_agent(policies={"pol1", "pol2"}, policies_to_train=["pol1"])
)
policy_dict, is_policy_to_train = config.get_multi_agent_setup(
env=gym.make("CartPole-v1"))
is_policy_to_train("pol1")
is_policy_to_train("pol2")
Args:
policies: An optional multi-agent `policies` dict, mapping policy IDs
to PolicySpec objects. If not provided uses `self.policies`
instead. Note that the `policy_class`, `observation_space`, and
`action_space` properties in these PolicySpecs may be None and must
therefore be inferred here.
env: An optional env instance, from which to infer the different spaces for
the different policies. If not provided, tries to infer from
`spaces`. Otherwise from `self.observation_space` and
`self.action_space`. Raises an error, if no information on spaces can be
infered.
spaces: Optional dict mapping policy IDs to tuples of 1) observation space
and 2) action space that should be used for the respective policy.
These spaces were usually provided by an already instantiated remote
EnvRunner. Note that if the `env` argument is provided, tries to
infer spaces from `env` first.
default_policy_class: The Policy class to use should a PolicySpec have its
policy_class property set to None.
Returns:
A tuple consisting of 1) a MultiAgentPolicyConfigDict and 2) a
`is_policy_to_train(PolicyID, SampleBatchType) -> bool` callable.
Raises:
ValueError: In case, no spaces can be infered for the policy/ies.
ValueError: In case, two agents in the env map to the same PolicyID
(according to `self.policy_mapping_fn`), but have different action- or
observation spaces according to the infered space information.
"""
policies = copy.deepcopy(policies or self.policies)
# Policies given as set/list/tuple (of PolicyIDs) -> Setup each policy
# automatically via empty PolicySpec (makes RLlib infer observation- and
# action spaces as well as the Policy's class).
if isinstance(policies, (set, list, tuple)):
policies = {pid: PolicySpec() for pid in policies}
# Try extracting spaces from env or from given spaces dict.
env_obs_space = None
env_act_space = None
# Env is a ray.remote: Get spaces via its (automatically added)
# `_get_spaces()` method.
if isinstance(env, ray.actor.ActorHandle):
env_obs_space, env_act_space = ray.get(env._get_spaces.remote())
# Normal env (gym.Env or MultiAgentEnv): These should have the
# `observation_space` and `action_space` properties.
elif env is not None:
# `env` is a gymnasium.vector.Env.
if hasattr(env, "single_observation_space") and isinstance(
env.single_observation_space, gym.Space
):
env_obs_space = env.single_observation_space
# `env` is a gymnasium.Env.
elif hasattr(env, "observation_space") and isinstance(
env.observation_space, gym.Space
):
env_obs_space = env.observation_space
# `env` is a gymnasium.vector.Env.
if hasattr(env, "single_action_space") and isinstance(
env.single_action_space, gym.Space
):
env_act_space = env.single_action_space
# `env` is a gymnasium.Env.
elif hasattr(env, "action_space") and isinstance(
env.action_space, gym.Space
):
env_act_space = env.action_space
# Last resort: Try getting the env's spaces from the spaces
# dict's special __env__ key.
if spaces is not None:
if env_obs_space is None:
env_obs_space = spaces.get(INPUT_ENV_SPACES, [None])[0]
if env_act_space is None:
env_act_space = spaces.get(INPUT_ENV_SPACES, [None, None])[1]
# Check each defined policy ID and unify its spec.
for pid, policy_spec in policies.copy().items():
# Convert to PolicySpec if plain list/tuple.
if not isinstance(policy_spec, PolicySpec):
policies[pid] = policy_spec = PolicySpec(*policy_spec)
# Infer policy classes for policies dict, if not provided (None).
if policy_spec.policy_class is None and default_policy_class is not None:
policies[pid].policy_class = default_policy_class
# Infer observation space.
if policy_spec.observation_space is None:
env_unwrapped = env.unwrapped if hasattr(env, "unwrapped") else env
# Module's space is provided -> Use it as-is.
if spaces is not None and pid in spaces:
obs_space = spaces[pid][0]
# MultiAgentEnv -> Check, whether agents have different spaces.
elif isinstance(env_unwrapped, MultiAgentEnv):
obs_space = None
mapping_fn = self.policy_mapping_fn
aids = list(
env_unwrapped.possible_agents
if hasattr(env_unwrapped, "possible_agents")
and env_unwrapped.possible_agents
else env_unwrapped.get_agent_ids()
)
if len(aids) == 0:
one_obs_space = env_unwrapped.observation_space
else:
one_obs_space = env_unwrapped.get_observation_space(aids[0])
# If all obs spaces are the same, just use the first space.
if all(
env_unwrapped.get_observation_space(aid) == one_obs_space
for aid in aids
):
obs_space = one_obs_space
# Need to reverse-map spaces (for the different agents) to certain
# policy IDs. We have to compare the ModuleID with all possible
# AgentIDs and find the agent ID that matches.
elif mapping_fn:
for aid in aids:
# Match: Assign spaces for this agentID to the PolicyID.
if mapping_fn(aid, None, worker=None) == pid:
# Make sure, different agents that map to the same
# policy don't have different spaces.
if (
obs_space is not None
and env_unwrapped.get_observation_space(aid)
!= obs_space
):
raise ValueError(
"Two agents in your environment map to the "
"same policyID (as per your `policy_mapping"
"_fn`), however, these agents also have "
"different observation spaces!"
)
obs_space = env_unwrapped.get_observation_space(aid)
# Just use env's obs space as-is.
elif env_obs_space is not None:
obs_space = env_obs_space
# Space given directly in config.
elif self.observation_space:
obs_space = self.observation_space
else:
raise ValueError(
"`observation_space` not provided in PolicySpec for "
f"{pid} and env does not have an observation space OR "
"no spaces received from other workers' env(s) OR no "
"`observation_space` specified in config!"
)
policies[pid].observation_space = obs_space
# Infer action space.
if policy_spec.action_space is None:
env_unwrapped = env.unwrapped if hasattr(env, "unwrapped") else env
# Module's space is provided -> Use it as-is.
if spaces is not None and pid in spaces:
act_space = spaces[pid][1]
# MultiAgentEnv -> Check, whether agents have different spaces.
elif isinstance(env_unwrapped, MultiAgentEnv):
act_space = None
mapping_fn = self.policy_mapping_fn
aids = list(
env_unwrapped.possible_agents
if hasattr(env_unwrapped, "possible_agents")
and env_unwrapped.possible_agents
else env_unwrapped.get_agent_ids()
)
if len(aids) == 0:
one_act_space = env_unwrapped.action_space
else:
one_act_space = env_unwrapped.get_action_space(aids[0])
# If all obs spaces are the same, just use the first space.
if all(
env_unwrapped.get_action_space(aid) == one_act_space
for aid in aids
):
act_space = one_act_space
# Need to reverse-map spaces (for the different agents) to certain
# policy IDs. We have to compare the ModuleID with all possible
# AgentIDs and find the agent ID that matches.
elif mapping_fn:
for aid in aids:
# Match: Assign spaces for this AgentID to the PolicyID.
if mapping_fn(aid, None, worker=None) == pid:
# Make sure, different agents that map to the same
# policy don't have different spaces.
if (
act_space is not None
and env_unwrapped.get_action_space(aid) != act_space
):
raise ValueError(
"Two agents in your environment map to the "
"same policyID (as per your `policy_mapping"
"_fn`), however, these agents also have "
"different action spaces!"
)
act_space = env_unwrapped.get_action_space(aid)
# Just use env's action space as-is.
elif env_act_space is not None:
act_space = env_act_space
elif self.action_space:
act_space = self.action_space
else:
raise ValueError(
"`action_space` not provided in PolicySpec for "
f"{pid} and env does not have an action space OR "
"no spaces received from other workers' env(s) OR no "
"`action_space` specified in config!"
)
policies[pid].action_space = act_space
# Create entire AlgorithmConfig object from the provided override.
# If None, use {} as override.
if not isinstance(policies[pid].config, AlgorithmConfig):
assert policies[pid].config is None or isinstance(
policies[pid].config, dict
)
policies[pid].config = self.copy(copy_frozen=False).update_from_dict(
policies[pid].config or {}
)
# If collection given, construct a simple default callable returning True
# if the PolicyID is found in the list/set of IDs.
if self.policies_to_train is not None and not callable(self.policies_to_train):
pols = set(self.policies_to_train)
def is_policy_to_train(pid, batch=None):
return pid in pols
else:
is_policy_to_train = self.policies_to_train
return policies, is_policy_to_train
@Deprecated(new="AlgorithmConfig.build_algo", error=False)
def build(self, *args, **kwargs):
return self.build_algo(*args, **kwargs)
@Deprecated(new="AlgorithmConfig.get_multi_rl_module_spec()", error=True)
def get_marl_module_spec(self, *args, **kwargs):
pass
@Deprecated(new="AlgorithmConfig.env_runners(..)", error=True)
def rollouts(self, *args, **kwargs):
pass
@Deprecated(new="AlgorithmConfig.env_runners(..)", error=True)
def exploration(self, *args, **kwargs):
pass
@property
@Deprecated(
new="AlgorithmConfig.fault_tolerance(restart_failed_env_runners=..)",
error=True,
)
def recreate_failed_env_runners(self):
pass
@recreate_failed_env_runners.setter
def recreate_failed_env_runners(self, value):
deprecation_warning(
old="AlgorithmConfig.recreate_failed_env_runners",
new="AlgorithmConfig.restart_failed_env_runners",
error=True,
)
@property
@Deprecated(new="AlgorithmConfig._enable_new_api_stack", error=True)
def _enable_new_api_stack(self):
pass
@_enable_new_api_stack.setter
def _enable_new_api_stack(self, value):
deprecation_warning(
old="AlgorithmConfig._enable_new_api_stack",
new="AlgorithmConfig.enable_rl_module_and_learner",
error=True,
)
@property
@Deprecated(new="AlgorithmConfig.enable_env_runner_and_connector_v2", error=True)
def uses_new_env_runners(self):
pass
@property
@Deprecated(new="AlgorithmConfig.num_env_runners", error=True)
def num_rollout_workers(self):
pass
@num_rollout_workers.setter
def num_rollout_workers(self, value):
deprecation_warning(
old="AlgorithmConfig.num_rollout_workers",
new="AlgorithmConfig.num_env_runners",
error=True,
)
@property
@Deprecated(new="AlgorithmConfig.evaluation_num_workers", error=True)
def evaluation_num_workers(self):
pass
@evaluation_num_workers.setter
def evaluation_num_workers(self, value):
deprecation_warning(
old="AlgorithmConfig.evaluation_num_workers",
new="AlgorithmConfig.evaluation_num_env_runners",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_envs_per_env_runner", error=True)
def num_envs_per_worker(self):
pass
@num_envs_per_worker.setter
def num_envs_per_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_envs_per_worker",
new="AlgorithmConfig.num_envs_per_env_runner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.ignore_env_runner_failures", error=True)
def ignore_worker_failures(self):
pass
@ignore_worker_failures.setter
def ignore_worker_failures(self, value):
deprecation_warning(
old="AlgorithmConfig.ignore_worker_failures",
new="AlgorithmConfig.ignore_env_runner_failures",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.restart_failed_env_runners", error=True)
def recreate_failed_workers(self):
pass
@recreate_failed_workers.setter
def recreate_failed_workers(self, value):
deprecation_warning(
old="AlgorithmConfig.recreate_failed_workers",
new="AlgorithmConfig.restart_failed_env_runners",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.max_num_env_runner_restarts", error=True)
def max_num_worker_restarts(self):
pass
@max_num_worker_restarts.setter
def max_num_worker_restarts(self, value):
deprecation_warning(
old="AlgorithmConfig.max_num_worker_restarts",
new="AlgorithmConfig.max_num_env_runner_restarts",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.delay_between_env_runner_restarts_s", error=True)
def delay_between_worker_restarts_s(self):
pass
@delay_between_worker_restarts_s.setter
def delay_between_worker_restarts_s(self, value):
deprecation_warning(
old="AlgorithmConfig.delay_between_worker_restarts_s",
new="AlgorithmConfig.delay_between_env_runner_restarts_s",
error=True,
)
pass
@property
@Deprecated(
new="AlgorithmConfig.num_consecutive_env_runner_failures_tolerance", error=True
)
def num_consecutive_worker_failures_tolerance(self):
pass
@num_consecutive_worker_failures_tolerance.setter
def num_consecutive_worker_failures_tolerance(self, value):
deprecation_warning(
old="AlgorithmConfig.num_consecutive_worker_failures_tolerance",
new="AlgorithmConfig.num_consecutive_env_runner_failures_tolerance",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.env_runner_health_probe_timeout_s", error=True)
def worker_health_probe_timeout_s(self):
pass
@worker_health_probe_timeout_s.setter
def worker_health_probe_timeout_s(self, value):
deprecation_warning(
old="AlgorithmConfig.worker_health_probe_timeout_s",
new="AlgorithmConfig.env_runner_health_probe_timeout_s",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.env_runner_restore_timeout_s", error=True)
def worker_restore_timeout_s(self):
pass
@worker_restore_timeout_s.setter
def worker_restore_timeout_s(self, value):
deprecation_warning(
old="AlgorithmConfig.worker_restore_timeout_s",
new="AlgorithmConfig.env_runner_restore_timeout_s",
error=True,
)
pass
@property
@Deprecated(
new="AlgorithmConfig.validate_env_runners_after_construction",
error=True,
)
def validate_workers_after_construction(self):
pass
@validate_workers_after_construction.setter
def validate_workers_after_construction(self, value):
deprecation_warning(
old="AlgorithmConfig.validate_workers_after_construction",
new="AlgorithmConfig.validate_env_runners_after_construction",
error=True,
)
pass
# Cleanups from `resources()`.
@property
@Deprecated(new="AlgorithmConfig.num_cpus_per_env_runner", error=True)
def num_cpus_per_worker(self):
pass
@num_cpus_per_worker.setter
def num_cpus_per_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_cpus_per_worker",
new="AlgorithmConfig.num_cpus_per_env_runner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_gpus_per_env_runner", error=True)
def num_gpus_per_worker(self):
pass
@num_gpus_per_worker.setter
def num_gpus_per_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_gpus_per_worker",
new="AlgorithmConfig.num_gpus_per_env_runner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.custom_resources_per_env_runner", error=True)
def custom_resources_per_worker(self):
pass
@custom_resources_per_worker.setter
def custom_resources_per_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.custom_resources_per_worker",
new="AlgorithmConfig.custom_resources_per_env_runner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_learners", error=True)
def num_learner_workers(self):
pass
@num_learner_workers.setter
def num_learner_workers(self, value):
deprecation_warning(
old="AlgorithmConfig.num_learner_workers",
new="AlgorithmConfig.num_learners",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_cpus_per_learner", error=True)
def num_cpus_per_learner_worker(self):
pass
@num_cpus_per_learner_worker.setter
def num_cpus_per_learner_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_cpus_per_learner_worker",
new="AlgorithmConfig.num_cpus_per_learner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_gpus_per_learner", error=True)
def num_gpus_per_learner_worker(self):
pass
@num_gpus_per_learner_worker.setter
def num_gpus_per_learner_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_gpus_per_learner_worker",
new="AlgorithmConfig.num_gpus_per_learner",
error=True,
)
pass
@property
@Deprecated(new="AlgorithmConfig.num_cpus_for_local_worker", error=True)
def num_cpus_for_local_worker(self):
pass
@num_cpus_for_local_worker.setter
def num_cpus_for_local_worker(self, value):
deprecation_warning(
old="AlgorithmConfig.num_cpus_for_local_worker",
new="AlgorithmConfig.num_cpus_for_main_process",
error=True,
)
pass
| AlgorithmConfig |
python | PrefectHQ__prefect | src/integrations/prefect-github/prefect_github/schemas/graphql_schema.py | {
"start": 530358,
"end": 530975
} | class ____(sgqlc.types.relay.Connection):
"""
See source code for more info.
"""
__schema__ = graphql_schema
__field_names__ = ("edges", "nodes", "page_info", "total_count")
edges = sgqlc.types.Field(
sgqlc.types.list_of("ProjectViewEdge"), graphql_name="edges"
)
nodes = sgqlc.types.Field(sgqlc.types.list_of("ProjectView"), graphql_name="nodes")
page_info = sgqlc.types.Field(
sgqlc.types.non_null(PageInfo), graphql_name="pageInfo"
)
total_count = sgqlc.types.Field(
sgqlc.types.non_null(Int), graphql_name="totalCount"
)
| ProjectViewConnection |
python | run-llama__llama_index | llama-index-integrations/vector_stores/llama-index-vector-stores-couchbase/tests/test_couchbase_query_vector_store.py | {
"start": 8024,
"end": 25639
} | class ____:
@classmethod
def setup_class(cls) -> None:
"""Set up test class with vector index creation."""
cls.cluster = get_cluster()
# Create scope and collection if they don't exist
create_scope_and_collection(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME
)
# Create vector index for testing
create_vector_index(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME, INDEX_NAME
)
@classmethod
def teardown_class(cls) -> None:
"""Clean up after all tests."""
try:
# Drop the vector index
drop_vector_index(
cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME, INDEX_NAME
)
delete_documents(cls.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME)
except Exception:
pass
def setup_method(self) -> None:
"""Set up each test method."""
# Delete all the documents in the collection
delete_documents(self.cluster, BUCKET_NAME, SCOPE_NAME, COLLECTION_NAME)
self.vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.DOT,
nprobes=50,
)
def test_initialization_default_params(self) -> None:
"""Test initialization with default parameters."""
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.COSINE,
nprobes=50,
)
assert vector_store._search_type == QueryVectorSearchType.ANN
assert vector_store._similarity == QueryVectorSearchSimilarity.COSINE
assert vector_store._nprobes == 50
assert vector_store._text_key == "text"
assert vector_store._embedding_key == "embedding"
assert vector_store._metadata_key == "metadata"
def test_initialization_custom_params(self) -> None:
"""Test initialization with custom parameters."""
custom_timeout = timedelta(seconds=120)
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.KNN,
similarity="euclidean",
text_key="content",
embedding_key="vector",
metadata_key="meta",
query_options=QueryOptions(timeout=custom_timeout),
)
assert vector_store._search_type == QueryVectorSearchType.KNN
assert vector_store._similarity == QueryVectorSearchSimilarity.EUCLIDEAN
assert vector_store._text_key == "content"
assert vector_store._embedding_key == "vector"
assert vector_store._metadata_key == "meta"
assert vector_store._query_options["timeout"] == custom_timeout
def test_initialization_with_string_search_type(self) -> None:
"""Test initialization with string search type."""
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type="KNN",
similarity="EUCLIDEAN",
)
assert vector_store._search_type == QueryVectorSearchType.KNN
assert vector_store._similarity == QueryVectorSearchSimilarity.EUCLIDEAN
assert vector_store._nprobes is None
def test_add_documents(self, node_embeddings: List[TextNode]) -> None:
"""Test adding documents to Couchbase query vector store."""
input_doc_ids = [node_embedding.id_ for node_embedding in node_embeddings]
# Add nodes to the couchbase vector store
doc_ids = self.vector_store.add(node_embeddings)
# Ensure that all nodes are returned & they are the same as input
assert len(doc_ids) == len(node_embeddings)
for doc_id in doc_ids:
assert doc_id in input_doc_ids
def test_ann_search(self, node_embeddings: List[TextNode]) -> None:
"""Test ANN vector search functionality."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# ANN similarity search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"), similarity_top_k=1
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== node_embeddings[0].text
)
assert result.similarities is not None
def test_knn_search(self, node_embeddings: List[TextNode]) -> None:
"""Test KNN vector search functionality."""
# Create a KNN vector store
knn_vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.KNN,
similarity=QueryVectorSearchSimilarity.L2,
nprobes=50,
)
# Add nodes to the couchbase vector store
knn_vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# KNN similarity search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"), similarity_top_k=1
)
result = knn_vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== node_embeddings[0].text
)
assert result.similarities is not None
def test_search_with_filters(self, node_embeddings: List[TextNode]) -> None:
"""Test vector search with metadata filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test equality filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="genre", value="Thriller", operator=FilterOperator.EQ
),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("genre") == "Thriller"
def test_search_with_numeric_filters(self, node_embeddings: List[TextNode]) -> None:
"""Test vector search with numeric metadata filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test greater than filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(key="pages", value=10, operator=FilterOperator.GT),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("pages") == 20
# Test less than or equal filter
q = VectorStoreQuery(
query_embedding=text_to_embedding("bar"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(key="pages", value=10, operator=FilterOperator.LTE),
]
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 2
for node in result.nodes:
assert node.metadata.get("pages") <= 10
def test_search_with_combined_filters(
self, node_embeddings: List[TextNode]
) -> None:
"""Test vector search with multiple combined filters."""
# Add nodes to the couchbase vector store
self.vector_store.add(node_embeddings)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Test combined filters with AND condition
q = VectorStoreQuery(
query_embedding=text_to_embedding("baz"),
similarity_top_k=3,
filters=MetadataFilters(
filters=[
MetadataFilter(
key="genre", value="Thriller", operator=FilterOperator.EQ
),
MetadataFilter(key="rating", value=4.0, operator=FilterOperator.GT),
],
condition="and",
),
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.nodes[0].metadata.get("genre") == "Thriller"
assert result.nodes[0].metadata.get("rating") > 4.0
def test_delete_document(self) -> None:
"""Test delete document from Couchbase query vector store."""
storage_context = StorageContext.from_defaults(vector_store=self.vector_store)
# Add a document to the vector store
VectorStoreIndex.from_documents(
[
Document(
text="hello world",
metadata={"name": "John Doe", "age": 30, "city": "New York"},
),
],
storage_context=storage_context,
)
# Wait for the documents to be indexed
time.sleep(SLEEP_DURATION)
# Search for the document
search_embedding = OpenAIEmbedding().get_text_embedding("hello world")
q = VectorStoreQuery(
query_embedding=search_embedding,
similarity_top_k=1,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
# Get the document ID to delete
ref_doc_id_to_delete = result.nodes[0].ref_doc_id
# Delete the document
self.vector_store.delete(ref_doc_id=ref_doc_id_to_delete)
# Wait for the deletion to be processed
time.sleep(SLEEP_DURATION)
# Ensure that no results are returned
result = self.vector_store.query(q)
assert len(result.nodes) == 0
def test_empty_query_embedding_error(self) -> None:
"""Test that empty query embedding raises ValueError."""
q = VectorStoreQuery(
query_embedding=None,
similarity_top_k=1,
)
with pytest.raises(ValueError, match="Query embedding must not be empty"):
self.vector_store.query(q)
def test_different_similarity_metrics(
self, node_embeddings: List[TextNode]
) -> None:
"""Test different similarity metrics."""
similarity_metrics = [
QueryVectorSearchSimilarity.COSINE,
QueryVectorSearchSimilarity.EUCLIDEAN,
QueryVectorSearchSimilarity.DOT,
]
for metric in similarity_metrics:
# Create vector store with specific similarity metric
vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
similarity=metric,
search_type=QueryVectorSearchType.ANN,
nprobes=50,
)
# Add nodes to the vector store
vector_store.add(node_embeddings)
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Test search
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=1,
)
result = vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert result.similarities is not None
def test_custom_field_names(self) -> None:
"""Test vector store with custom field names."""
custom_vector_store = CouchbaseQueryVectorStore(
cluster=self.cluster,
bucket_name=BUCKET_NAME,
scope_name=SCOPE_NAME,
collection_name=COLLECTION_NAME,
search_type=QueryVectorSearchType.ANN,
similarity=QueryVectorSearchSimilarity.COSINE,
nprobes=50,
text_key="content",
embedding_key="vector",
metadata_key="meta",
)
# Create a test node with custom field mapping
test_node = TextNode(
text="custom field test",
id_="custom-test-id",
metadata={"category": "test"},
embedding=text_to_embedding("custom field test"),
)
# Add the node
doc_ids = custom_vector_store.add([test_node])
assert len(doc_ids) == 1
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Search for the document
q = VectorStoreQuery(
query_embedding=text_to_embedding("custom field test"),
similarity_top_k=1,
)
result = custom_vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 1
assert (
result.nodes[0].get_content(metadata_mode=MetadataMode.NONE)
== "custom field test"
)
def test_batch_insert(self, node_embeddings: List[TextNode]) -> None:
"""Test batch insert with custom batch size."""
# Test with small batch size
doc_ids = self.vector_store.add(node_embeddings, batch_size=2)
assert len(doc_ids) == len(node_embeddings)
# Wait for indexing
time.sleep(SLEEP_DURATION)
# Verify all documents are searchable
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=3,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 3
def test_vector_index_utilization(self, node_embeddings: List[TextNode]) -> None:
"""Test that vector search actually utilizes the GSI vector index."""
# Add nodes to the vector store
self.vector_store.add(node_embeddings)
# Wait for GSI indexing
time.sleep(SLEEP_DURATION)
# Test that we can perform vector search (this implicitly tests index usage)
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=2,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 2
assert result.similarities is not None
assert len(result.similarities) == 2
def test_vector_search_relevance(self, node_embeddings: List[TextNode]) -> None:
"""Test that vector search returns relevant results."""
# Add nodes to the vector store
self.vector_store.add(node_embeddings)
# Wait for GSI indexing
time.sleep(SLEEP_DURATION)
# Search for "foo" - should return "foo" document with best score
q = VectorStoreQuery(
query_embedding=text_to_embedding("foo"),
similarity_top_k=3,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 3
# The first result should be the most similar (lowest distance for dot product)
assert result.nodes[0].get_content(metadata_mode=MetadataMode.NONE) == "foo"
# Verify scores are ordered (ascending for distance-based similarity)
scores = result.similarities
print(f"scores: {scores}")
assert scores[0] <= scores[1]
assert scores[1] <= scores[2]
def test_large_batch_processing(self) -> None:
"""Test handling of larger document batches."""
# Create a larger batch of documents
large_batch = []
for i in range(2000):
node = TextNode(
text=f"document_{i}",
id_=f"large_batch_{i}",
metadata={"batch_id": "large", "doc_num": i},
embedding=text_to_embedding(f"document_{i}"),
)
large_batch.append(node)
# Add the large batch
doc_ids = self.vector_store.add(large_batch, batch_size=10)
assert len(doc_ids) == len(large_batch)
# Wait for indexing
time.sleep(SLEEP_DURATION * 2) # Extra time for larger batch
# Test search works with larger dataset
q = VectorStoreQuery(
query_embedding=text_to_embedding("document_25"),
similarity_top_k=5,
)
result = self.vector_store.query(q)
assert result.nodes is not None and len(result.nodes) == 5
| TestCouchbaseQueryVectorStore |
python | huggingface__transformers | tests/models/parakeet/test_modeling_parakeet.py | {
"start": 1329,
"end": 5977
} | class ____:
def __init__(
self,
parent,
batch_size=13,
seq_length=1024,
is_training=True,
hidden_size=64,
num_hidden_layers=2,
num_attention_heads=4,
intermediate_size=256,
hidden_act="silu",
dropout=0, # so gradient checkpointing doesn't fail
conv_kernel_size=9,
subsampling_factor=8,
subsampling_conv_channels=32,
use_bias=True,
num_mel_bins=80,
scale_input=True,
):
# testing suite parameters
self.parent = parent
self.batch_size = batch_size
self.seq_length = seq_length
self.num_mel_bins = num_mel_bins
self.is_training = is_training
# config parameters
self.hidden_size = hidden_size
self.num_hidden_layers = num_hidden_layers
self.num_attention_heads = num_attention_heads
self.intermediate_size = intermediate_size
self.hidden_act = hidden_act
self.dropout = dropout
self.conv_kernel_size = conv_kernel_size
self.subsampling_factor = subsampling_factor
self.subsampling_conv_channels = subsampling_conv_channels
self.use_bias = use_bias
self.num_mel_bins = num_mel_bins
self.scale_input = scale_input
# Calculate output sequence length after subsampling
self.output_seq_length = seq_length // subsampling_factor
self.encoder_seq_length = self.output_seq_length
self.key_length = self.output_seq_length
def prepare_config_and_inputs(self):
input_features = floats_tensor([self.batch_size, self.seq_length, self.num_mel_bins])
attention_mask = random_attention_mask([self.batch_size, self.seq_length])
config = self.get_config()
return config, input_features, attention_mask
def get_config(self):
return ParakeetEncoderConfig(
hidden_size=self.hidden_size,
num_hidden_layers=self.num_hidden_layers,
num_attention_heads=self.num_attention_heads,
intermediate_size=self.intermediate_size,
hidden_act=self.hidden_act,
dropout=self.dropout,
dropout_positions=self.dropout,
layerdrop=self.dropout,
activation_dropout=self.dropout,
attention_dropout=self.dropout,
conv_kernel_size=self.conv_kernel_size,
subsampling_factor=self.subsampling_factor,
subsampling_conv_channels=self.subsampling_conv_channels,
use_bias=self.use_bias,
num_mel_bins=self.num_mel_bins,
scale_input=self.scale_input,
)
def create_and_check_model(self, config, input_features, attention_mask):
model = ParakeetEncoder(config=config)
model.to(torch_device)
model.eval()
with torch.no_grad():
result = model(input_features, attention_mask=attention_mask)
self.parent.assertEqual(
result.last_hidden_state.shape, (self.batch_size, self.output_seq_length, config.hidden_size)
)
def prepare_config_and_inputs_for_common(self):
config, input_features, attention_mask = self.prepare_config_and_inputs()
inputs_dict = {
"input_features": input_features,
"attention_mask": attention_mask,
}
return config, inputs_dict
def check_ctc_loss(self, config, input_values, *args):
model = ParakeetForCTC(config=config)
model.to(torch_device)
# make sure that dropout is disabled
model.eval()
input_values = input_values[:3]
attention_mask = torch.ones(input_values.shape, device=torch_device, dtype=torch.long)
input_lengths = [input_values.shape[-1] // i for i in [4, 2, 1]]
max_length_labels = model._get_feat_extract_output_lengths(torch.tensor(input_lengths))
labels = ids_tensor((input_values.shape[0], min(max_length_labels) - 1), model.config.vocab_size)
# pad input
for i in range(len(input_lengths)):
input_values[i, input_lengths[i] :] = 0.0
attention_mask[i, input_lengths[i] :] = 0
model.config.ctc_loss_reduction = "sum"
sum_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
model.config.ctc_loss_reduction = "mean"
mean_loss = model(input_values, attention_mask=attention_mask, labels=labels).loss.item()
self.parent.assertTrue(isinstance(sum_loss, float))
self.parent.assertTrue(isinstance(mean_loss, float))
@require_torch
| ParakeetEncoderModelTester |
python | tensorflow__tensorflow | tensorflow/python/training/monitored_session.py | {
"start": 52440,
"end": 54903
} | class ____(_WrappedSession):
"""A wrapped session that works with a `tf.Coordinator`.
Calls to `run()` are delegated to the wrapped session. If a call
raises an exception, the exception is reported to the coordinator.
In addition, after each call to `run()` this session ask the coordinator if
the session should stop. In that case it will join all the threads
registered with the coordinator before returning.
If the coordinator was requested to stop with an exception, that exception
will be re-raised from the call to `run()`.
"""
def __init__(self, sess, coord, stop_grace_period_secs=120):
"""Create a new `_CoordinatedSession`.
Args:
sess: A `tf.compat.v1.Session` object. The wrapped session.
coord: A `tf.train.Coordinator` object.
stop_grace_period_secs: Number of seconds given to threads to stop after
`close()` has been called.
"""
_WrappedSession.__init__(self, sess)
self._coord = coord
self._stop_grace_period_secs = stop_grace_period_secs
def _check_stop(self):
# If the coordinator was asked to stop due to an exception, then it needs
# to be propagated to this stack.
self._coord.raise_requested_exception()
# At this point, no exceptions are recorded in the coordinator.
return self._coord.should_stop()
def close(self):
self._coord.request_stop()
try:
self._coord.join(
stop_grace_period_secs=self._stop_grace_period_secs,
ignore_live_threads=True)
finally:
try:
_WrappedSession.close(self)
except Exception: # pylint: disable=broad-except
# We intentionally suppress exceptions from the close() here since
# useful exceptions are already reported by join().
pass
def run(self, *args, **kwargs):
try:
return self._sess.run(*args, **kwargs)
except _PREEMPTION_ERRORS:
raise
except Exception as original_exception: # pylint: disable=broad-except
# A non-preemption error could have been caused by a preemption error
# in the coordinator. If this is the case, raise that exception instead,
# since it's the root cause. Otherwise, stick to the `original_exception`.
try:
self._coord.raise_requested_exception()
except _PREEMPTION_ERRORS:
raise
except Exception: # pylint: disable=broad-except
raise original_exception from None
else:
raise
| _CoordinatedSession |
python | Lightning-AI__lightning | tests/tests_pytorch/loops/test_all.py | {
"start": 1163,
"end": 2279
} | class ____(Callback):
def on_train_batch_start(self, trainer, pl_module, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_train_batch_end(self, trainer, pl_module, outputs, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_validation_batch_start(self, trainer, pl_module, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_validation_batch_end(self, trainer, pl_module, outputs, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_test_batch_start(self, trainer, pl_module, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_test_batch_end(self, trainer, pl_module, outputs, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_predict_batch_start(self, trainer, pl_module, batch, *_):
_device_check_helper(batch.device, pl_module.device)
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, *_):
_device_check_helper(batch.device, pl_module.device)
| BatchHookObserverCallback |
python | falconry__falcon | tests/test_headers.py | {
"start": 4676,
"end": 5447
} | class ____:
def __init__(self):
self._links = []
def add_link(self, *args, **kwargs):
self._links.append(('add_link', args, kwargs))
def append_link(self, *args, **kwargs):
self._links.append(('append_link', args, kwargs))
def on_get(self, req, resp):
resp.text = '{}'
for method_name, args, kwargs in self._links:
append_method = getattr(resp, method_name)
if method_name == 'append_link':
append_method(*args, **kwargs)
else:
with pytest.warns(
DeprecatedWarning,
match='Call to deprecated function add_link(...)',
):
append_method(*args, **kwargs)
| LinkHeaderResource |
python | pdm-project__pdm | src/pdm/models/backends.py | {
"start": 1024,
"end": 1249
} | class ____(BuildBackend):
@classmethod
def build_system(cls) -> BuildSystem:
return {
"requires": ["setuptools>=61"],
"build-backend": "setuptools.build_meta",
}
| SetuptoolsBackend |
python | jazzband__django-polymorphic | example/pexp/management/commands/polymorphic_create_test_data.py | {
"start": 157,
"end": 561
} | class ____(BaseCommand):
help = ""
def handle_noargs(self, **options):
Project.objects.all().delete()
o = Project.objects.create(topic="John's gathering")
o = ArtProject.objects.create(topic="Sculpting with Tim", artist="T. Turner")
o = ResearchProject.objects.create(topic="Swallow Aerodynamics", supervisor="Dr. Winter")
print(Project.objects.all())
| Command |
python | scipy__scipy | scipy/odr/_models.py | {
"start": 1738,
"end": 4599
} | class ____(Model):
r"""
Arbitrary-dimensional linear model
This model is defined by :math:`y=\beta_0 + \sum_{i=1}^m \beta_i x_i`
Examples
--------
We can calculate orthogonal distance regression with an arbitrary
dimensional linear model:
>>> from scipy import odr
>>> import numpy as np
>>> x = np.linspace(0.0, 5.0)
>>> y = 10.0 + 5.0 * x
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, odr.multilinear)
>>> output = odr_obj.run()
>>> print(output.beta)
[10. 5.]
"""
def __init__(self):
super().__init__(
_lin_fcn, fjacb=_lin_fjb, fjacd=_lin_fjd, estimate=_lin_est,
meta={'name': 'Arbitrary-dimensional Linear',
'equ': 'y = B_0 + Sum[i=1..m, B_i * x_i]',
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^m \beta_i x_i$'})
multilinear = _MultilinearModel()
def polynomial(order):
"""
Factory function for a general polynomial model.
Parameters
----------
order : int or sequence
If an integer, it becomes the order of the polynomial to fit. If
a sequence of numbers, then these are the explicit powers in the
polynomial.
A constant term (power 0) is always included, so don't include 0.
Thus, polynomial(n) is equivalent to polynomial(range(1, n+1)).
Returns
-------
polynomial : Model instance
Model instance.
Examples
--------
We can fit an input data using orthogonal distance regression (ODR) with
a polynomial model:
>>> import numpy as np
>>> import matplotlib.pyplot as plt
>>> from scipy import odr
>>> x = np.linspace(0.0, 5.0)
>>> y = np.sin(x)
>>> poly_model = odr.polynomial(3) # using third order polynomial model
>>> data = odr.Data(x, y)
>>> odr_obj = odr.ODR(data, poly_model)
>>> output = odr_obj.run() # running ODR fitting
>>> poly = np.poly1d(output.beta[::-1])
>>> poly_y = poly(x)
>>> plt.plot(x, y, label="input data")
>>> plt.plot(x, poly_y, label="polynomial ODR")
>>> plt.legend()
>>> plt.show()
"""
powers = np.asarray(order)
if powers.shape == ():
# Scalar.
powers = np.arange(1, powers + 1)
powers = powers.reshape((len(powers), 1))
len_beta = len(powers) + 1
def _poly_est(data, len_beta=len_beta):
# Eh. Ignore data and return all ones.
return np.ones((len_beta,), float)
return Model(_poly_fcn, fjacd=_poly_fjacd, fjacb=_poly_fjacb,
estimate=_poly_est, extra_args=(powers,),
meta={'name': 'Sorta-general Polynomial',
'equ': 'y = B_0 + Sum[i=1..%s, B_i * (x**i)]' % (len_beta-1),
'TeXequ': r'$y=\beta_0 + \sum_{i=1}^{%s} \beta_i x^i$' %
(len_beta-1)})
| _MultilinearModel |
python | matplotlib__matplotlib | lib/matplotlib/tri/_triinterpolate.py | {
"start": 50284,
"end": 62445
} | class ____:
def __init__(self, vals, rows, cols, shape):
"""
Create a sparse matrix in COO format.
*vals*: arrays of values of non-null entries of the matrix
*rows*: int arrays of rows of non-null entries of the matrix
*cols*: int arrays of cols of non-null entries of the matrix
*shape*: 2-tuple (n, m) of matrix shape
"""
self.n, self.m = shape
self.vals = np.asarray(vals, dtype=np.float64)
self.rows = np.asarray(rows, dtype=np.int32)
self.cols = np.asarray(cols, dtype=np.int32)
def dot(self, V):
"""
Dot product of self by a vector *V* in sparse-dense to dense format
*V* dense vector of shape (self.m,).
"""
assert V.shape == (self.m,)
return np.bincount(self.rows,
weights=self.vals*V[self.cols],
minlength=self.m)
def compress_csc(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csc format.
"""
_, unique, indices = np.unique(
self.rows + self.n*self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def compress_csr(self):
"""
Compress rows, cols, vals / summing duplicates. Sort for csr format.
"""
_, unique, indices = np.unique(
self.m*self.rows + self.cols,
return_index=True, return_inverse=True)
self.rows = self.rows[unique]
self.cols = self.cols[unique]
self.vals = np.bincount(indices, weights=self.vals)
def to_dense(self):
"""
Return a dense matrix representing self, mainly for debugging purposes.
"""
ret = np.zeros([self.n, self.m], dtype=np.float64)
nvals = self.vals.size
for i in range(nvals):
ret[self.rows[i], self.cols[i]] += self.vals[i]
return ret
def __str__(self):
return self.to_dense().__str__()
@property
def diag(self):
"""Return the (dense) vector of the diagonal elements."""
in_diag = (self.rows == self.cols)
diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
diag[self.rows[in_diag]] = self.vals[in_diag]
return diag
def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
"""
Use Preconditioned Conjugate Gradient iteration to solve A x = b
A simple Jacobi (diagonal) preconditioner is used.
Parameters
----------
A : _Sparse_Matrix_coo
*A* must have been compressed before by compress_csc or
compress_csr method.
b : array
Right hand side of the linear system.
x0 : array, optional
Starting guess for the solution. Defaults to the zero vector.
tol : float, optional
Tolerance to achieve. The algorithm terminates when the relative
residual is below tol. Default is 1e-10.
maxiter : int, optional
Maximum number of iterations. Iteration will stop after *maxiter*
steps even if the specified tolerance has not been achieved. Defaults
to 1000.
Returns
-------
x : array
The converged solution.
err : float
The absolute error np.linalg.norm(A.dot(x) - b)
"""
n = b.size
assert A.n == n
assert A.m == n
b_norm = np.linalg.norm(b)
# Jacobi pre-conditioner
kvec = A.diag
# For diag elem < 1e-6 we keep 1e-6.
kvec = np.maximum(kvec, 1e-6)
# Initial guess
if x0 is None:
x = np.zeros(n)
else:
x = x0
r = b - A.dot(x)
w = r/kvec
p = np.zeros(n)
beta = 0.0
rho = np.dot(r, w)
k = 0
# Following C. T. Kelley
while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
p = w + beta*p
z = A.dot(p)
alpha = rho/np.dot(p, z)
r = r - alpha*z
w = r/kvec
rhoold = rho
rho = np.dot(r, w)
x = x + alpha*p
beta = rho/rhoold
# err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
k += 1
err = np.linalg.norm(A.dot(x) - b)
return x, err
# The following private functions:
# :func:`_safe_inv22_vectorized`
# :func:`_pseudo_inv22sym_vectorized`
# :func:`_scalar_vectorized`
# :func:`_transpose_vectorized`
# :func:`_roll_vectorized`
# :func:`_to_matrix_vectorized`
# :func:`_extract_submatrices`
# provide fast numpy implementation of some standard operations on arrays of
# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
# Development note: Dealing with pathologic 'flat' triangles in the
# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions
# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
#
# Goals:
# 1) The CubicTriInterpolator should be able to handle flat or almost flat
# triangles without raising an error,
# 2) These degenerated triangles should have no impact on the automatic dof
# calculation (associated with null weight for the _DOF_estimator_geom and
# with null energy for the _DOF_estimator_min_E),
# 3) Linear patch test should be passed exactly on degenerated meshes,
# 4) Interpolation (with :meth:`_interpolate_single_key` or
# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
# the pathologic triangles, to interact correctly with a TriRefiner class.
#
# Difficulties:
# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
# *metric* (the metric tensor = J x J.T). Computation of the local
# tangent plane is also problematic.
#
# Implementation:
# Most of the time, when computing the inverse of a rank-deficient matrix it
# is safe to simply return the null matrix (which is the implementation in
# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
# enforced by:
# - null area hence null energy in :class:`_DOF_estimator_min_E`
# - angles close or equal to 0 or np.pi hence null weight in
# :class:`_DOF_estimator_geom`.
# Note that the function angle -> weight is continuous and maximum for an
# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
# The exception is the computation of barycentric coordinates, which is done
# by inversion of the *metric* matrix. In this case, we need to compute a set
# of valid coordinates (1 among numerous possibilities), to ensure point 4).
# We benefit here from the symmetry of metric = J x J.T, which makes it easier
# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
def _safe_inv22_vectorized(M):
"""
Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient
matrices.
*M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
"""
_api.check_shape((None, 2, 2), M=M)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
# We set delta_inv to 0. in case of a rank deficient matrix; a
# rank-deficient input matrix *M* will lead to a null matrix in output
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
delta_inv = 1./delta
else:
# 'Pathologic' flow.
delta_inv = np.zeros(M.shape[0])
delta_inv[rank2] = 1./delta[rank2]
M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
return M_inv
def _pseudo_inv22sym_vectorized(M):
"""
Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the
(Moore-Penrose) pseudo-inverse for rank-deficient matrices.
In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
In case M is of rank 0, we return the null matrix.
*M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
"""
_api.check_shape((None, 2, 2), M=M)
M_inv = np.empty_like(M)
prod1 = M[:, 0, 0]*M[:, 1, 1]
delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
if np.all(rank2):
# Normal 'optimized' flow.
M_inv[:, 0, 0] = M[:, 1, 1] / delta
M_inv[:, 0, 1] = -M[:, 0, 1] / delta
M_inv[:, 1, 0] = -M[:, 1, 0] / delta
M_inv[:, 1, 1] = M[:, 0, 0] / delta
else:
# 'Pathologic' flow.
# Here we have to deal with 2 sub-cases
# 1) First sub-case: matrices of rank 2:
delta = delta[rank2]
M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
# 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
rank01 = ~rank2
tr = M[rank01, 0, 0] + M[rank01, 1, 1]
tr_zeros = (np.abs(tr) < 1.e-8)
sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
# sq_tr_inv = 1. / tr**2
M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
return M_inv
def _scalar_vectorized(scalar, M):
"""
Scalar product between scalars and matrices.
"""
return scalar[:, np.newaxis, np.newaxis]*M
def _transpose_vectorized(M):
"""
Transposition of an array of matrices *M*.
"""
return np.transpose(M, [0, 2, 1])
def _roll_vectorized(M, roll_indices, axis):
"""
Roll an array of matrices along *axis* (0: rows, 1: columns) according to
an array of indices *roll_indices*.
"""
assert axis in [0, 1]
ndim = M.ndim
assert ndim == 3
ndim_roll = roll_indices.ndim
assert ndim_roll == 1
sh = M.shape
r, c = sh[-2:]
assert sh[0] == roll_indices.shape[0]
vec_indices = np.arange(sh[0], dtype=np.int32)
# Builds the rolled matrix
M_roll = np.empty_like(M)
if axis == 0:
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
else: # 1
for ir in range(r):
for ic in range(c):
M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
return M_roll
def _to_matrix_vectorized(M):
"""
Build an array of matrices from individuals np.arrays of identical shapes.
Parameters
----------
M
ncols-list of nrows-lists of shape sh.
Returns
-------
M_res : np.array of shape (sh, nrow, ncols)
*M_res* satisfies ``M_res[..., i, j] = M[i][j]``.
"""
assert isinstance(M, (tuple, list))
assert all(isinstance(item, (tuple, list)) for item in M)
c_vec = np.asarray([len(item) for item in M])
assert np.all(c_vec-c_vec[0] == 0)
r = len(M)
c = c_vec[0]
M00 = np.asarray(M[0][0])
dt = M00.dtype
sh = [M00.shape[0], r, c]
M_ret = np.empty(sh, dtype=dt)
for irow in range(r):
for icol in range(c):
M_ret[:, irow, icol] = np.asarray(M[irow][icol])
return M_ret
def _extract_submatrices(M, block_indices, block_size, axis):
"""
Extract selected blocks of a matrices *M* depending on parameters
*block_indices* and *block_size*.
Returns the array of extracted matrices *Mres* so that ::
M_res[..., ir, :] = M[(block_indices*block_size+ir), :]
"""
assert block_indices.ndim == 1
assert axis in [0, 1]
r, c = M.shape
if axis == 0:
sh = [block_indices.shape[0], block_size, c]
else: # 1
sh = [block_indices.shape[0], r, block_size]
dt = M.dtype
M_res = np.empty(sh, dtype=dt)
if axis == 0:
for ir in range(block_size):
M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
else: # 1
for ic in range(block_size):
M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
return M_res
| _Sparse_Matrix_coo |
python | pytorch__pytorch | test/test_cuda.py | {
"start": 3217,
"end": 159240
} | class ____(TestCase):
_do_cuda_memory_leak_check = True
_do_cuda_non_default_stream = True
FIFTY_MIL_CYCLES = 50000000
def setUp(self):
super().setUp()
def tearDown(self):
super().tearDown()
@property
def expandable_segments(self):
return EXPANDABLE_SEGMENTS
def test_pinned_memory_with_cudaregister(self):
try:
torch.cuda.memory._set_allocator_settings(
"pinned_use_cuda_host_register:True,pinned_num_register_threads:8"
)
t = torch.ones(20)
self.assertFalse(t.is_pinned())
try:
pinned_t = torch.ones(1 << 21).pin_memory()
self.assertTrue(pinned_t.is_pinned())
pinned_t = torch.ones(1 << 24).pin_memory()
self.assertTrue(pinned_t.is_pinned())
except RuntimeError as e:
# Some GPUs don't support same address space on host and device side
pass
finally:
torch.cuda.memory._set_allocator_settings(
"pinned_use_cuda_host_register:False"
)
def test_pinned_memory_with_cudaregister_multithread(self):
num_threads = 4
threads = [
threading.Thread(target=self.test_pinned_memory_with_cudaregister)
for t in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
@serialTest()
def test_host_memory_stats(self):
# Helper functions
def empty_stats():
return {
"allocated_bytes.allocated": 0,
"allocated_bytes.current": 0,
"allocated_bytes.freed": 0,
"allocated_bytes.peak": 0,
"allocations.allocated": 0,
"allocations.current": 0,
"allocations.freed": 0,
"allocations.peak": 0,
"host_alloc_time.count": 0,
"host_free_time.count": 0,
"num_host_alloc": 0,
"num_host_free": 0,
"active_bytes.allocated": 0,
"active_bytes.current": 0,
"active_bytes.freed": 0,
"active_bytes.peak": 0,
"active_requests.allocated": 0,
"active_requests.current": 0,
"active_requests.freed": 0,
"active_requests.peak": 0,
}
def check_stats(expected):
stats = torch.cuda.host_memory_stats()
for k, v in expected.items():
if v != stats[k]:
print(f"key: {k}, expected: {v}, stats: {stats[k]}")
self.assertEqual(v, stats[k])
# Setup the test cleanly
alloc1 = 10
alloc1_aligned = 16
alloc2 = 20
alloc2_aligned = 32
expected = empty_stats()
# Reset any lingering state
gc.collect()
torch._C._host_emptyCache()
# Check that stats are empty
check_stats(expected)
# Make first allocation and check stats
t1 = torch.ones(alloc1 * 1024, pin_memory=True)
self.assertTrue(t1.is_pinned())
for prefix in ["active_requests", "allocations"]:
for suffix in ["allocated", "current", "peak"]:
expected[prefix + "." + suffix] += 1
allocation_size1 = alloc1_aligned * 1024 * 4
for prefix in ["allocated_bytes", "active_bytes"]:
for suffix in ["allocated", "current", "peak"]:
expected[prefix + "." + suffix] += allocation_size1
expected["num_host_alloc"] += 1
expected["host_alloc_time.count"] += 1
check_stats(expected)
# Make second allocation and check stats
t2 = torch.ones(alloc2 * 1024, pin_memory=True)
self.assertTrue(t2.is_pinned())
for prefix in ["active_requests", "allocations"]:
for suffix in ["allocated", "current", "peak"]:
expected[prefix + "." + suffix] += 1
allocation_size2 = alloc2_aligned * 1024 * 4
for prefix in ["allocated_bytes", "active_bytes"]:
for suffix in ["allocated", "current", "peak"]:
expected[prefix + "." + suffix] += allocation_size2
expected["num_host_alloc"] += 1
expected["host_alloc_time.count"] += 1
check_stats(expected)
# Empty cache and check stats
torch._C._host_emptyCache()
check_stats(expected)
# Finally, check the reset of peak and accumulated stats
torch.cuda.reset_peak_host_memory_stats()
torch.cuda.reset_accumulated_host_memory_stats()
expected = empty_stats()
def test_pinned_memory_empty_cache(self):
try:
for alloc_settings in (True, False):
torch.cuda.memory._set_allocator_settings(
f"pinned_use_cuda_host_register:{alloc_settings}"
)
try:
t = torch.ones(1024 * 1024, pin_memory=True)
self.assertTrue(t.is_pinned())
del t
torch._C._host_emptyCache()
except RuntimeError as e:
# Some GPUs don't support same address space on host and device side
pass
finally:
torch.cuda.memory._set_allocator_settings(
"pinned_use_cuda_host_register:False"
)
def test_pinned_memory_use_background_threads(self):
script = """
import torch
torch.cuda.memory._set_allocator_settings(
f"pinned_use_background_threads:True"
)
t = torch.ones(1024 * 1024, pin_memory=True)
print(t.is_pinned())
"""
proc = subprocess.run([sys.executable, "-c", script], capture_output=True)
self.assertEqual(proc.returncode, 0)
def test_cudart_register(self):
t = torch.ones(20)
self.assertFalse(t.is_pinned())
cudart = torch.cuda.cudart()
r = cudart.cudaHostRegister(t.data_ptr(), t.numel() * t.element_size(), 0)
self.assertEqual(r, 0)
self.assertTrue(t.is_pinned())
r = cudart.cudaHostUnregister(t.data_ptr())
self.assertEqual(r, 0)
self.assertFalse(t.is_pinned())
def test_memory_allocation(self):
gc.collect()
torch.cuda.empty_cache()
mem = None
size = 1
prev = 0
try:
prev = torch.cuda.memory_allocated()
mem = torch.cuda.caching_allocator_alloc(size)
self.assertGreater(torch.cuda.memory_allocated(), prev)
finally:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
self.assertEqual(torch.cuda.memory_allocated(), prev)
def test_memory_stats(self):
gc.collect()
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
torch.cuda.reset_accumulated_memory_stats()
prev_allocated = torch.accelerator.memory_allocated()
prev_reserved = torch.accelerator.memory_reserved()
prev_max_allocated = torch.accelerator.max_memory_allocated()
prev_max_reserved = torch.accelerator.max_memory_reserved()
self.assertEqual(prev_allocated, prev_max_allocated)
self.assertEqual(prev_reserved, prev_max_reserved)
# Activate 1kB memory
prev_active_current = torch.accelerator.memory_stats()[
"active_bytes.all.current"
]
tmp = torch.randn(256, device="cuda")
# Detect if the current active memory is 1kB
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.current"],
1024 + prev_active_current,
)
self.assertEqual(torch.accelerator.memory_stats()["active_bytes.all.freed"], 0)
del tmp
gc.collect()
torch.accelerator.empty_cache()
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.current"],
prev_active_current,
)
self.assertEqual(
torch.accelerator.memory_stats()["active_bytes.all.freed"], 1024
)
torch.accelerator.reset_peak_memory_stats()
self.assertEqual(torch.accelerator.max_memory_allocated(), prev_max_allocated)
self.assertEqual(torch.accelerator.max_memory_reserved(), prev_max_reserved)
def test_check_error(self):
# Assert this call doesn't raise.
torch.cuda.check_error(0)
with self.assertRaisesRegex(
torch.cuda.CudaError, "out of memory|hipErrorOutOfMemory"
):
torch.cuda.check_error(2)
def test_cuda_get_device_name(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_name = torch.cuda.get_device_name(current_device)
device_name_None = torch.cuda.get_device_name(None)
self.assertEqual(current_device_name, device_name_None)
# Testing the behaviour for No argument
device_name_no_argument = torch.cuda.get_device_name()
self.assertEqual(current_device_name, device_name_no_argument)
def test_cuda_get_device_capability(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_capability = torch.cuda.get_device_capability(current_device)
device_capability_None = torch.cuda.get_device_capability(None)
self.assertEqual(current_device_capability, device_capability_None)
# Testing the behaviour for No argument
device_capability_no_argument = torch.cuda.get_device_capability()
self.assertEqual(current_device_capability, device_capability_no_argument)
def test_cuda_get_device_properties(self):
# Testing the behaviour with None as an argument
current_device = torch.cuda.current_device()
current_device_properties = torch.cuda.get_device_properties(current_device)
device_properties_None = torch.cuda.get_device_properties(None)
self.assertEqual(current_device_properties, device_properties_None)
# Testing the behaviour for No argument
device_properties_no_argument = torch.cuda.get_device_properties()
self.assertEqual(current_device_properties, device_properties_no_argument)
@unittest.skipIf(
IS_JETSON, "oom reporting has issues on jetson igx due to partial nvml support"
)
def test_out_of_memory(self):
tensor = torch.zeros(1024, device="cuda")
oom_regex = (
"would exceed allowed memory"
if TEST_CUDAMALLOCASYNC
else f"Tried to allocate 800000000.00 GiB. GPU {tensor.device.index} has a total capacity of"
)
with self.assertRaisesRegex(RuntimeError, oom_regex):
torch.empty(1024 * 1024 * 1024 * 800000000, dtype=torch.int8, device="cuda")
with self.assertRaisesRegex(
RuntimeError, "Tried to allocate more than 1EB memory"
):
torch.empty(
1024 * 1024 * 1024 * 8000000000, dtype=torch.int8, device="cuda"
)
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
@unittest.skipIf(
TEST_CUDAMALLOCASYNC or IS_JETSON, "Segmentation fault (core dumped)"
)
@serialTest()
def test_out_of_memory_retry(self):
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
oom_regex = (
"would exceed allowed memory"
if TEST_CUDAMALLOCASYNC
else "Tried to allocate"
)
size = int(total_memory * 0.5)
a = torch.empty(size, dtype=torch.int8, device="cuda")
with self.assertRaisesRegex(RuntimeError, oom_regex):
b = torch.empty(size, dtype=torch.int8, device="cuda")
del a
b = torch.empty(size, dtype=torch.int8, device="cuda")
del b
# We used a lot of memory here, clean up so we don't affect other tests too much
torch.cuda.empty_cache()
torch.cuda.reset_peak_memory_stats()
@serialTest()
@unittest.skipIf(
IS_JETSON, "oom reporting has issues on jetson igx due to partial nvml support"
)
def test_set_per_process_memory_fraction(self):
orig = torch.cuda.get_per_process_memory_fraction(0)
torch.cuda.reset_peak_memory_stats(0)
try:
# test invalid fraction value.
with self.assertRaisesRegex(TypeError, "Invalid type"):
torch.cuda.set_per_process_memory_fraction(1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(-0.1)
with self.assertRaisesRegex(ValueError, "Invalid fraction value"):
torch.cuda.set_per_process_memory_fraction(2.0)
tensor = torch.zeros(1024, device="cuda")
torch.cuda.empty_cache()
total_memory = torch.cuda.get_device_properties(0).total_memory
torch.cuda.set_per_process_memory_fraction(0.5, 0)
# test 0.499 allocation is ok.
application = int(total_memory * 0.499) - torch.cuda.max_memory_reserved()
tmp_tensor = torch.empty(application, dtype=torch.int8, device="cuda")
del tmp_tensor
torch.cuda.empty_cache()
application = int(total_memory * 0.5)
# it will get OOM when try to allocate more than half memory.
oom_regex = (
"would exceed allowed memory"
if TEST_CUDAMALLOCASYNC
else "out of memory"
)
with self.assertRaisesRegex(RuntimeError, oom_regex):
torch.empty(application, dtype=torch.int8, device="cuda")
# ensure out of memory error doesn't disturb subsequent kernel
tensor.fill_(1)
self.assertTrue((tensor == 1).all())
finally:
torch.cuda.set_per_process_memory_fraction(orig, 0)
@unittest.skipIf(
IS_JETSON, "oom reporting has issues on jetson igx due to partial nvml support"
)
@serialTest()
def test_get_per_process_memory_fraction(self):
# get the initial memory fraction
init_fraction = torch.cuda.get_per_process_memory_fraction()
# set and get the limiting cases
torch.cuda.set_per_process_memory_fraction(1.0)
self.assertEqual(torch.cuda.get_per_process_memory_fraction(), 1.0)
torch.cuda.set_per_process_memory_fraction(0.0)
self.assertEqual(torch.cuda.get_per_process_memory_fraction(), 0.0)
# test a few random cases
for val in torch.rand(3):
torch.cuda.set_per_process_memory_fraction(float(val))
self.assertEqual(torch.cuda.get_per_process_memory_fraction(), float(val))
# restore the initial memory fraction
torch.cuda.set_per_process_memory_fraction(init_fraction)
def test_uuid(self):
uuid = torch.cuda.get_device_properties(0).uuid
self.assertEqual(len(str(uuid)), 36) # xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
self.assertEqual(len(uuid.bytes), 16)
def test_copy_non_blocking(self):
def _test_copy_non_blocking(a, b):
event = torch.cuda.Event()
a.copy_(b, non_blocking=True)
event.record()
event.synchronize()
self.assertEqual(a, b)
# 10MB copies
x = torch.ones(10000000, dtype=torch.uint8).cuda()
y = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
_test_copy_non_blocking(x, y)
x = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
y = torch.ones(10000000, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
# Test the case where the pinned data_ptr is not equal to the storage data_ptr.
x_base = torch.zeros(10000000, dtype=torch.uint8).pin_memory()
x = x_base[1:]
self.assertTrue(x.is_pinned())
self.assertTrue(x_base.is_pinned())
self.assertNotEqual(x_base.data_ptr(), x.data_ptr())
self.assertEqual(x_base.storage().data_ptr(), x.storage().data_ptr())
y = torch.ones(10000000 - 1, dtype=torch.uint8).cuda()
_test_copy_non_blocking(x, y)
def test_copy_non_blocking_type_conversion(self):
a = torch.ones(1, device="cuda")
b = torch.zeros(1, device="cpu", pin_memory=True)
c = torch.empty(1, device="cuda", dtype=torch.long)
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b.copy_(a, non_blocking=True)
c.copy_(b, non_blocking=True)
self.assertEqual(a, c, exact_dtype=False)
@serialTest()
def test_to_non_blocking(self):
stream = torch.cuda.current_stream()
def _test_to_non_blocking(a, non_blocking, dst):
torch.cuda.synchronize()
# Pushes an 0.1 second spin to stream so if the copy is non blocking,
# stream will almost surely be active when we query().
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
b = a.to(device=dst, non_blocking=non_blocking)
self.assertEqual(stream.query(), not non_blocking)
stream.synchronize()
self.assertEqual(a, b)
self.assertTrue(b.is_pinned() == (non_blocking and dst == "cpu"))
for dst, try_non_blocking in product(("cuda", "cpu"), (True, False)):
# Creates source on the opposite device from destination.
src = torch.randn(
1000000,
device="cuda" if dst == "cpu" else "cpu",
pin_memory=dst == "cuda",
)
_test_to_non_blocking(src, try_non_blocking, dst)
def test_to_cpu_blocking_by_default(self):
src = torch.randn(1000000, device="cuda")
torch.cuda.synchronize()
torch.cuda._sleep(int(100 * get_cycles_per_ms()))
dst = src.to(device="cpu")
self.assertEqual(torch.cuda.current_stream().query(), True)
self.assertEqual(src, dst)
self.assertFalse(dst.is_pinned())
def test_serialization_array_with_storage(self):
x = torch.randn(5, 5).cuda()
y = torch.IntTensor(2, 5).fill_(0).cuda()
q = [x, y, x, y.storage()]
with tempfile.NamedTemporaryFile() as f:
torch.save(q, f)
f.seek(0)
q_copy = torch.load(f)
self.assertEqual(q_copy, q, atol=0, rtol=0)
q_copy[0].fill_(5)
self.assertEqual(q_copy[0], q_copy[2], atol=0, rtol=0)
self.assertTrue(isinstance(q_copy[0], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[1], torch.cuda.IntTensor))
self.assertTrue(isinstance(q_copy[2], torch.cuda.FloatTensor))
self.assertTrue(isinstance(q_copy[3], torch.storage.TypedStorage))
self.assertTrue(isinstance(q_copy[3]._untyped_storage, torch.UntypedStorage))
q_copy[1].fill_(10)
self.assertEqual(q_copy[3], torch.cuda.IntStorage(10).fill_(10))
@unittest.skipIf(IS_FBCODE or IS_SANDCASTLE, "Does not work in fbcode yet")
@setBlasBackendsToDefaultFinally
def test_preferred_blas_library_settings(self):
def _check_default():
default = torch.backends.cuda.preferred_blas_library()
if torch.version.cuda:
# CUDA logic is easy, it's always cublas
self.assertTrue(default == torch._C._BlasBackend.Cublas)
else:
# ROCm logic is less so, it's cublaslt for some Instinct, cublas for all else
gcn_arch = str(
torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0]
)
if gcn_arch in ["gfx90a", "gfx942", "gfx950"]:
self.assertTrue(default == torch._C._BlasBackend.Cublaslt)
else:
self.assertTrue(default == torch._C._BlasBackend.Cublas)
_check_default()
# "Default" can be set but is immediately reset internally to the actual default value.
self.assertTrue(
torch.backends.cuda.preferred_blas_library("default")
!= torch._C._BlasBackend.Default
)
_check_default()
self.assertTrue(
torch.backends.cuda.preferred_blas_library("cublas")
== torch._C._BlasBackend.Cublas
)
self.assertTrue(
torch.backends.cuda.preferred_blas_library("hipblas")
== torch._C._BlasBackend.Cublas
)
# check bad strings
with self.assertRaisesRegex(
RuntimeError,
"Unknown input value. Choose from: default, cublas, hipblas, cublaslt, hipblaslt, ck.",
):
torch.backends.cuda.preferred_blas_library("unknown")
# check bad input type
with self.assertRaisesRegex(RuntimeError, "Unknown input value type."):
torch.backends.cuda.preferred_blas_library(1.0)
# check env var override
custom_envs = [
{"TORCH_BLAS_PREFER_CUBLASLT": "1"},
{"TORCH_BLAS_PREFER_HIPBLASLT": "1"},
]
test_script = "import torch;print(torch.backends.cuda.preferred_blas_library())"
for env_config in custom_envs:
env = os.environ.copy()
for key, value in env_config.items():
env[key] = value
r = (
subprocess.check_output([sys.executable, "-c", test_script], env=env)
.decode("ascii")
.strip()
)
self.assertEqual("_BlasBackend.Cublaslt", r)
@unittest.skipIf(TEST_CUDAMALLOCASYNC, "temporarily disabled for async")
@setBlasBackendsToDefaultFinally
def test_cublas_workspace_explicit_allocation(self):
torch.backends.cuda.preferred_blas_library("cublas")
a = torch.randn(7, 7, device="cuda", requires_grad=False)
if torch.version.hip:
default_workspace_size = 1024 * 32 * 1024 # :1024:32 32MiB
# different size (128 MiB) expected on MI300 GPU
gcn_arch = str(
torch.cuda.get_device_properties(0).gcnArchName.split(":", 1)[0]
)
if "gfx94" in gcn_arch or "gfx95" in gcn_arch:
default_workspace_size = 1024 * 128 * 1024 # :1024:128
else:
default_workspace_size = (
4096 * 2 * 1024 + 16 * 8 * 1024
) # :4096:2:16:8 8MiB
# different size (32 MiB) expected on Hopper GPU
if torch.cuda.get_device_capability() == (9, 0):
default_workspace_size = 4096 * 8 * 1024
def check_workspace_size(inp):
torch._C._cuda_clearCublasWorkspaces()
start = torch.cuda.memory_stats()["active_bytes.all.allocated"]
with torch.no_grad():
torch.matmul(inp, inp)
finish = torch.cuda.memory_stats()["active_bytes.all.allocated"]
return finish - start
# check default
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ""
self.assertTrue(abs(check_workspace_size(a) - default_workspace_size) < 524288)
# check default with bad user config
os.environ["CUBLAS_WORKSPACE_CONFIG"] = "-1"
self.assertTrue(abs(check_workspace_size(a) - default_workspace_size) < 524288)
# check valid config
os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":128:8:64:16:32:32"
self.assertTrue(abs(check_workspace_size(a) - (3072 * 1024)) < 524288)
torch._C._cuda_clearCublasWorkspaces()
def test_cublas_allow_tf32_get_set(self):
skip_tf32_cublas = "TORCH_ALLOW_TF32_CUBLAS_OVERRIDE" in os.environ and int(
os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"]
)
if skip_tf32_cublas:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
return
orig = torch.backends.cuda.matmul.allow_tf32
self.assertEqual(torch._C._get_cublas_allow_tf32(), orig)
torch.backends.cuda.matmul.allow_tf32 = not orig
self.assertEqual(torch._C._get_cublas_allow_tf32(), not orig)
torch.backends.cuda.matmul.allow_tf32 = orig
def test_float32_matmul_precision_get_set(self):
orig = torch.get_float32_matmul_precision()
skip_tf32_cublas = "TORCH_ALLOW_TF32_CUBLAS_OVERRIDE" in os.environ and int(
os.environ["TORCH_ALLOW_TF32_CUBLAS_OVERRIDE"]
)
# this is really just checking that the environment variable is respected during testing
# and not overwritten by another function that doesn't revert it to the initial value
if not skip_tf32_cublas:
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
self.assertEqual(torch.get_float32_matmul_precision(), "highest")
else:
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
for p in ("medium", "high"):
torch.set_float32_matmul_precision(p)
self.assertEqual(torch.get_float32_matmul_precision(), p)
self.assertTrue(torch.backends.cuda.matmul.allow_tf32)
torch.set_float32_matmul_precision("highest")
self.assertEqual(torch.get_float32_matmul_precision(), "highest")
self.assertFalse(torch.backends.cuda.matmul.allow_tf32)
torch.set_float32_matmul_precision(orig)
def test_cublas_allow_fp16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction
orig_splitk = (
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction_split_k
)
self.assertEqual(
torch._C._get_cublas_allow_fp16_reduced_precision_reduction(),
(orig, orig_splitk),
)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = not orig
self.assertEqual(
torch._C._get_cublas_allow_fp16_reduced_precision_reduction(),
(not orig, True),
)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (
False,
False,
)
self.assertEqual(
torch._C._get_cublas_allow_fp16_reduced_precision_reduction(),
(False, False),
)
with self.assertRaisesRegex(RuntimeError, "allow_splitk=False"):
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (
True,
False,
)
torch.backends.cuda.matmul.allow_fp16_reduced_precision_reduction = (
orig,
orig_splitk,
)
def test_cublas_allow_bf16_reduced_precision_reduction_get_set(self):
orig = torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction
orig_splitk = (
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction_split_k
)
self.assertEqual(
torch._C._get_cublas_allow_bf16_reduced_precision_reduction(),
(orig, orig_splitk),
)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = not orig
self.assertEqual(
torch._C._get_cublas_allow_bf16_reduced_precision_reduction(),
(not orig, True),
)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = (
False,
False,
)
self.assertEqual(
torch._C._get_cublas_allow_bf16_reduced_precision_reduction(),
(False, False),
)
with self.assertRaisesRegex(RuntimeError, "allow_splitk=False"):
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = (
True,
False,
)
torch.backends.cuda.matmul.allow_bf16_reduced_precision_reduction = (
orig,
orig_splitk,
)
def test_cublas_allow_fp16_accumulation_get_set(self):
orig = torch.backends.cuda.matmul.allow_fp16_accumulation
self.assertEqual(torch._C._get_cublas_allow_fp16_accumulation(), orig)
torch.backends.cuda.matmul.allow_fp16_accumulation = not orig
self.assertEqual(torch._C._get_cublas_allow_fp16_accumulation(), not orig)
torch.backends.cuda.matmul.allow_fp16_accumulation = orig
def test_cudnn_allow_tf32_get_set(self):
with torch.backends.cudnn.flags(
enabled=None, benchmark=None, deterministic=None, allow_tf32=False
):
self.assertFalse(torch.backends.cudnn.allow_tf32)
with torch.backends.cudnn.flags(
enabled=None, benchmark=None, deterministic=None, allow_tf32=True
):
self.assertTrue(torch.backends.cudnn.allow_tf32)
@recover_orig_fp32_precision
def test_fp32_precision_with_tf32(self):
with torch.backends.cudnn.flags(
enabled=None,
benchmark=None,
benchmark_limit=None,
deterministic=None,
allow_tf32=True,
fp32_precision="none",
):
self.assertEqual(torch.backends.cudnn.conv.fp32_precision, "tf32")
self.assertEqual(torch.backends.cudnn.rnn.fp32_precision, "tf32")
with torch.backends.cudnn.flags(
enabled=None,
benchmark=None,
benchmark_limit=None,
deterministic=None,
allow_tf32=False,
fp32_precision="none",
):
self.assertEqual(torch.backends.cudnn.conv.fp32_precision, "none")
self.assertEqual(torch.backends.cudnn.rnn.fp32_precision, "none")
@recover_orig_fp32_precision
def test_fp32_precision_with_float32_matmul_precision(self):
torch.set_float32_matmul_precision("highest")
self.assertEqual(torch.backends.cuda.matmul.fp32_precision, "ieee")
torch.set_float32_matmul_precision("high")
self.assertEqual(torch.backends.cuda.matmul.fp32_precision, "tf32")
torch.set_float32_matmul_precision("medium")
self.assertEqual(torch.backends.cuda.matmul.fp32_precision, "tf32")
@recover_orig_fp32_precision
def test_invalid_status_for_legacy_api(self):
torch.backends.cudnn.conv.fp32_precision = "none"
torch.backends.cudnn.rnn.fp32_precision = "tf32"
with self.assertRaisesRegex(RuntimeError, "mix of the legacy and new APIs"):
print(torch.backends.cudnn.allow_tf32)
torch.set_float32_matmul_precision("highest")
torch.backends.cuda.matmul.fp32_precision = "tf32"
with self.assertRaisesRegex(RuntimeError, "mix of the legacy and new APIs"):
print(torch.get_float32_matmul_precision())
if not TEST_WITH_ROCM:
with self.assertRaisesRegex(RuntimeError, "mix of the legacy and new APIs"):
print(torch.backends.cuda.matmul.allow_tf32)
def test_type_conversions(self):
x = torch.randn(5, 5)
self.assertIsInstance(x.float(), torch.FloatTensor)
self.assertIsInstance(x.cuda().double(), torch.cuda.DoubleTensor)
self.assertIsInstance(x.cuda().float(), torch.cuda.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu(), torch.FloatTensor)
self.assertIsInstance(x.cuda().float().cpu().int(), torch.IntTensor)
y = x.storage()
self.assertIsInstance(y.float(), torch.FloatStorage)
self.assertIsInstance(y.cuda().double(), torch.cuda.DoubleStorage)
self.assertIsInstance(y.cuda().float(), torch.cuda.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu(), torch.FloatStorage)
self.assertIsInstance(y.cuda().float().cpu().int(), torch.IntStorage)
@unittest.skip("was disabled due to not enough memory, but actually it always fail")
def test_arithmetic_large_tensor(self):
x = torch.empty(2**30, device="cuda")
x.fill_(1)
self.assertEqual(x.sum(), 2**30)
x += 1
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x -= 0.5
self.assertEqual(x.sum(), 2**29)
x.fill_(1)
x *= 2
self.assertEqual(x.sum(), 2**31)
x.fill_(1)
x /= 2
self.assertEqual(x.sum(), 2**29)
def test_gather_bool(self):
t = torch.tensor([[False, True], [True, True]], device="cuda")
self.assertEqual(
torch.gather(t, 1, torch.tensor([[0, 0], [1, 0]], device="cuda")),
torch.tensor([[False, False], [True, True]], device="cuda"),
)
def test_torch_manual_seed_seeds_cuda_devices(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
torch.manual_seed(2)
y = x.clone().uniform_()
self.assertEqual(x, y)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_manual_seed(self):
with freeze_rng_state():
x = torch.zeros(4, 4).float().cuda()
torch.cuda.manual_seed(2)
self.assertEqual(torch.cuda.initial_seed(), 2)
x.uniform_()
a = torch.bernoulli(torch.full_like(x, 0.5))
torch.cuda.manual_seed(2)
y = x.clone().uniform_()
b = torch.bernoulli(torch.full_like(x, 0.5))
self.assertEqual(x, y)
self.assertEqual(a, b)
self.assertEqual(torch.cuda.initial_seed(), 2)
def test_specify_improper_device_name(self):
with tempfile.TemporaryDirectory() as tmpdir:
fname = os.path.join(tmpdir, "tempfile.pt")
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
torch.save(
[torch.nn.Parameter(torch.randn(10, 10))],
fname,
_use_new_zipfile_serialization=True,
)
torch.load(fname, "cuda0")
def test_get_device_index(self):
from torch.cuda._utils import _get_device_index
with self.assertRaisesRegex(RuntimeError, "Invalid device string"):
_get_device_index("cuda0", optional=True)
with self.assertRaisesRegex(ValueError, "Expected a cuda device"):
cpu_device = torch.device("cpu")
_get_device_index(cpu_device, optional=True)
def test_serialization_array_with_empty(self):
x = [torch.randn(4, 4).cuda(), torch.cuda.FloatTensor()]
with tempfile.NamedTemporaryFile() as f:
torch.save(x, f)
f.seek(0)
x_copy = torch.load(f)
for original, copy in zip(x, x_copy):
self.assertEqual(copy, original)
self.assertIs(type(copy), type(original))
self.assertEqual(copy.get_device(), original.get_device())
@skipCUDANonDefaultStreamIf(True)
def test_streams(self):
default_stream = torch.cuda.current_stream()
user_stream = torch.cuda.Stream()
self.assertEqual(torch.cuda.current_stream(), default_stream)
self.assertNotEqual(default_stream, user_stream)
self.assertEqual(default_stream.cuda_stream, 0)
self.assertNotEqual(user_stream.cuda_stream, 0)
with torch.cuda.stream(user_stream):
self.assertEqual(torch.cuda.current_stream(), user_stream)
self.assertTrue(user_stream.query())
tensor1 = torch.ByteTensor(5).pin_memory()
default_stream.synchronize()
self.assertTrue(default_stream.query())
def test_stream_event_repr(self):
s = torch.cuda.current_stream()
self.assertTrue("torch.cuda.Stream" in s.__repr__())
e = torch.cuda.Event()
self.assertTrue("torch.cuda.Event" in e.__repr__())
s.record_event(e)
self.assertTrue("torch.cuda.Event" in e.__repr__())
def test_cuda_stream_protocol(self):
stream = torch.cuda.Stream()
self.assertTrue(hasattr(stream, "__cuda_stream__"))
result = stream.__cuda_stream__()
self.assertIsInstance(result, tuple)
self.assertEqual(len(result), 2)
self.assertEqual(result[0], 0) # Protocol version
self.assertEqual(result[1], stream.cuda_stream) # Stream handle
external_stream = torch.cuda.ExternalStream(stream.cuda_stream)
external_result = external_stream.__cuda_stream__()
self.assertEqual(external_result[0], 0)
self.assertEqual(external_result[1], external_stream.cuda_stream)
def test_events(self):
stream = torch.cuda.current_stream()
event = torch.cuda.Event(enable_timing=True)
self.assertTrue(event.query())
start_event = torch.cuda.Event(enable_timing=True)
stream.record_event(start_event)
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
stream.record_event(event)
self.assertFalse(event.query())
event.synchronize()
self.assertTrue(event.query())
self.assertGreater(start_event.elapsed_time(event), 0)
event = torch.cuda.Event(enable_timing=True)
self.assertEqual(event.cuda_event, 0)
self.assertEqual(event.event_id, 0)
event.record()
self.assertNotEqual(event.cuda_event, 0)
self.assertNotEqual(event.event_id, 0)
self.assertEqual(event.cuda_event, event.event_id)
def test_events_elapsedtime(self):
event1 = torch.cuda.Event(enable_timing=False)
event2 = torch.cuda.Event(enable_timing=False)
with self.assertRaisesRegex(
ValueError,
"Both events must be created with argument 'enable_timing=True'",
):
event1.elapsed_time(event2)
event1 = torch.cuda.Event(enable_timing=True)
event2 = torch.cuda.Event(enable_timing=True)
with self.assertRaisesRegex(
ValueError, "Both events must be recorded before calculating elapsed time"
):
event1.elapsed_time(event2)
# check default value of enable_timing: False
event1 = torch.cuda.Event()
event2 = torch.cuda.Event()
with self.assertRaisesRegex(
ValueError,
"Both events must be created with argument 'enable_timing=True'",
):
event1.elapsed_time(event2)
def test_generic_stream_event(self):
stream = torch.Stream("cuda")
self.assertEqual(stream.device_index, torch.cuda.current_device())
cuda_stream = torch.cuda.Stream(
stream_id=stream.stream_id,
device_index=stream.device_index,
device_type=stream.device_type,
)
self.assertIsInstance(cuda_stream, torch.Stream)
self.assertTrue(issubclass(type(cuda_stream), torch.Stream))
self.assertTrue(torch.Stream in type(cuda_stream).mro())
self.assertEqual(stream.stream_id, cuda_stream.stream_id)
self.assertNotEqual(stream.stream_id, torch.cuda.current_stream().stream_id)
event1 = torch.Event("cuda", enable_timing=True)
event2 = torch.Event("cuda", enable_timing=True)
self.assertEqual(event1.event_id, 0)
a = torch.randn(1000)
b = torch.randn(1000)
with torch.cuda.stream(cuda_stream):
a_cuda = a.to("cuda", non_blocking=True)
b_cuda = b.to("cuda", non_blocking=True)
self.assertEqual(stream.stream_id, torch.cuda.current_stream().stream_id)
event1.record(stream)
event1.synchronize()
self.assertTrue(event1.query())
c_cuda = a_cuda + b_cuda
event2.record()
event2.synchronize()
self.assertTrue(event2.query())
self.assertNotEqual(event1.event_id, event2.event_id)
self.assertEqual(c_cuda.cpu(), a + b)
self.assertTrue(event1.elapsed_time(event2) > 0)
cuda_event = torch.cuda.Event()
self.assertIsInstance(cuda_event, torch.Event)
self.assertTrue(issubclass(type(cuda_event), torch.Event))
self.assertTrue(torch.Event in type(cuda_event).mro())
def test_stream_compatibility(self):
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
torch.accelerator.set_stream(s1)
self.assertEqual(torch.accelerator.current_stream().stream_id, s1.stream_id)
torch.accelerator.set_stream(s2)
self.assertEqual(torch.accelerator.current_stream().stream_id, s2.stream_id)
with self.assertRaisesRegex(
RuntimeError, "Device index value .* is out of index range"
):
torch.accelerator.current_stream(torch.accelerator.device_count())
def test_record_stream(self):
cycles_per_ms = get_cycles_per_ms()
t = torch.FloatTensor([1, 2, 3, 4]).pin_memory()
result = torch.cuda.FloatTensor(t.size())
stream = torch.cuda.Stream()
ptr = [None]
# Performs the CPU->GPU copy in a background stream
def perform_copy():
with torch.cuda.stream(stream):
tmp = t.cuda(non_blocking=True)
ptr[0] = tmp.data_ptr()
torch.cuda.current_stream().wait_stream(stream)
tmp.record_stream(torch.cuda.current_stream())
torch.cuda._sleep(int(50 * cycles_per_ms)) # delay the copy
result.copy_(tmp)
perform_copy()
with torch.cuda.stream(stream):
tmp2 = torch.cuda.FloatTensor(t.size())
tmp2.zero_()
self.assertNotEqual(
tmp2.data_ptr(), ptr[0], msg="allocation reused to soon"
)
self.assertEqual(result.tolist(), [1, 2, 3, 4])
if not TEST_CUDAMALLOCASYNC:
# In the native allocator, we expect "tmp"'s side-stream-tagged block will be reused
# in that side stream after result.copy_(tmp) in the main stream finishes.
torch.cuda.current_stream().synchronize()
with torch.cuda.stream(stream):
tmp3 = torch.cuda.FloatTensor(t.size())
self.assertEqual(tmp3.data_ptr(), ptr[0], msg="allocation not reused")
def test_record_stream_on_shifted_view(self):
# See issue #27366
# This test detects unexpected block reallocation. For reliable test,
# the stream to allocate tensors is isolated. The allocator will not
# reuse free blocks which were allocated from another stream.
stream_alloc = torch.cuda.Stream()
with torch.cuda.stream(stream_alloc):
base = torch.cuda.FloatTensor([10, 10])
# Record another stream on a shifted view tensor.
view = base[5:]
self.assertTrue(view.storage_offset() > 0)
stream_record = torch.cuda.Stream()
with torch.cuda.stream(stream_record):
torch.cuda._busy_wait_for_flag()
view.record_stream(stream_record)
# Delete those tensors to make the block free soon.
data_ptr = base.data_ptr()
del base, view
# A new tensor should not be allocated to the block above.
stream_alloc.synchronize()
with torch.cuda.stream(stream_alloc):
try_realloc = torch.cuda.FloatTensor([10, 10])
torch.cuda._clear_flag()
self.assertNotEqual(try_realloc.data_ptr(), data_ptr)
def test_device_context_manager(self):
prev_device = torch.cuda.current_device()
with torch.accelerator.device_index(None):
self.assertEqual(torch.cuda.current_device(), prev_device)
self.assertEqual(torch.cuda.current_device(), prev_device)
with torch.accelerator.device_index(0):
self.assertEqual(torch.cuda.current_device(), 0)
self.assertEqual(torch.cuda.current_device(), prev_device)
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_multi_device_context_manager(self):
src_device = 0
dst_device = 1
torch.cuda.set_device(src_device)
with torch.accelerator.device_index(dst_device):
self.assertEqual(torch.cuda.current_device(), 1)
self.assertEqual(torch.cuda.current_device(), src_device)
def test_stream_context_manager(self):
prev_stream = torch.cuda.current_stream()
with torch.cuda.Stream() as stream:
self.assertEqual(stream, torch.cuda.current_stream())
self.assertEqual(prev_stream, torch.cuda.current_stream())
@unittest.skipIf(not TEST_MULTIGPU, "only one GPU detected")
def test_multi_device_stream_context_manager(self):
src_device = 0
dst_device = 1
torch.cuda.set_device(src_device)
src_prev_stream = torch.cuda.current_stream(src_device)
dst_prev_stream = torch.cuda.current_stream(dst_device)
with torch.cuda.Stream(dst_device) as dst_stream:
self.assertEqual(dst_device, torch.cuda.current_device())
self.assertEqual(dst_stream, torch.cuda.current_stream())
self.assertEqual(src_prev_stream, torch.cuda.current_stream(src_device))
self.assertEqual(src_device, torch.cuda.current_device())
self.assertEqual(src_prev_stream, torch.cuda.current_stream())
self.assertEqual(dst_prev_stream, torch.cuda.current_stream(dst_device))
def test_noncontiguous_pinned_memory(self):
# See issue #3266
x = torch.arange(0, 10).view((2, 5))
self.assertEqual(x.t(), x.t().pin_memory())
def test_caching_pinned_memory(self):
cycles_per_ms = get_cycles_per_ms()
# check that allocations are reused after deletion
t = torch.FloatTensor([1]).pin_memory()
ptr = t.data_ptr()
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertEqual(t.data_ptr(), ptr, msg="allocation not reused")
# check that the allocation is not reused if it's in-use by a copy
gpu_tensor = torch.cuda.FloatTensor([0])
torch.cuda._sleep(int(1000 * cycles_per_ms)) # delay the copy by 1s
gpu_tensor.copy_(t, non_blocking=True)
del t
t = torch.FloatTensor([1]).pin_memory()
self.assertNotEqual(t.data_ptr(), ptr, msg="allocation reused too soon")
self.assertEqual(list(gpu_tensor), [1])
def test_caching_allocator_record_stream_oom(self):
"""allocations delayed by a record_stream call should still be freed on
an out-of-memory in cuda_malloc_retry. see issue #19219"""
stream = torch.cuda.Stream()
with torch.cuda.stream(stream):
y = torch.zeros(40 * 1024 * 1024, device="cuda")
for _ in range(100):
x = torch.empty(40 * 1024 * 1024, device="cuda")
with torch.cuda.stream(stream):
y += x
# delays reuse of `x` until after all operations in `stream`
x.record_stream(stream)
del x
# we've made a mess by allocating up to the device capacity. free any
# cached blocks in case it affects future tests.
torch.cuda.empty_cache()
# Tests for historic illegal memory access, see #17040.
def test_reduction_gpu_memory_accessing(self):
x = torch.ones(512, 8, dtype=torch.float32, device="cuda")
torch.sum(x, 0)
def test_sum_fp16(self):
x = torch.zeros(10, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(), 0)
x = torch.ones(65504, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(), 65504)
self.assertEqual(x.sum(dtype=torch.float32), 65504)
x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.sum(dtype=torch.float32), 65536)
a = torch.zeros(1203611).bernoulli_(0.0005)
x = a.to(device="cuda", dtype=torch.float16)
self.assertEqual(x.sum().item(), a.sum().item())
a = torch.zeros(100, 121, 80).bernoulli_(0.0005)
x = a.to(device="cuda", dtype=torch.float16)
self.assertEqual(x.sum((0, 2)).float().cpu(), a.sum((0, 2)))
def test_mean_fp16(self):
x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.mean(), 1)
x = torch.ones(65536, device="cuda", dtype=torch.float16)
self.assertEqual(x.mean(dtype=torch.float32), 1)
def test_prod_large(self):
# tests global reduction (should_global_reduce = true) in case of non-zero identity element
x = torch.ones(240000, device="cuda", dtype=torch.float32)
self.assertEqual(x.prod(), 1)
# test for complex types. Note 240k is divisible by 4
for dtype in [torch.cfloat, torch.cdouble]:
x = torch.ones(240000, device="cuda", dtype=dtype) * (0 + 1j)
self.assertEqual(x.prod(), 1)
def test_multinomial_ext(self):
# Test two corner cases from older PyTorch (Issue #4858)
freqs = torch.cuda.FloatTensor(
[
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
0.03178183361887932,
0.027680952101945877,
0.033176131546497345,
0.046052902936935425,
0.07742464542388916,
0.11543981730937958,
0.14148041605949402,
0.15784293413162231,
0.13180233538150787,
0.08271478116512299,
0.049702685326337814,
0.027557924389839172,
0.018125897273421288,
0.011851548217236996,
0.010252203792333603,
0.007422595750540495,
0.005372154992073774,
0.0045109698548913,
0.0036087757907807827,
0.0035267581697553396,
0.0018864056328311563,
0.0024605290964245796,
0.0022964938543736935,
0.0018453967059031129,
0.0010662291897460818,
0.0009842115687206388,
0.00045109697384759784,
0.0007791675161570311,
0.00020504408166743815,
0.00020504408166743815,
0.00020504408166743815,
0.00012302644609007984,
0.0,
0.00012302644609007984,
4.100881778867915e-05,
0.0,
0.0,
0.0,
0.0,
0.0,
0.0,
]
)
torch.cuda.manual_seed(11042)
sample = torch.multinomial(freqs, 1000, True)
self.assertNotEqual(freqs[sample].min(), 0)
p = torch.zeros(3421, 2, device="cuda", dtype=torch.float)
p[:, 1] = 1
torch.cuda.manual_seed(5214)
r = torch.multinomial(p, 1)
self.assertNotEqual(r.min().item(), 0)
# test corner case from Issue #13867
torch.cuda.manual_seed(33)
probs = torch.randn(1000000, device="cuda").clamp(min=0) * 3e-5
samples = probs.multinomial(1000000, replacement=True)
self.assertGreater(probs[samples].min().item(), 0)
def _spawn_test_multinomial_invalid_probs_cuda(self, probs):
import subprocess
try:
p = subprocess.Popen(
[
sys.executable,
"-c",
f"""\
import sys
import torch
from torch import inf, nan
try:
with torch.random.fork_rng(devices=[0]):
torch.multinomial(torch.tensor({probs}).to('cuda'), 2, replacement=True)
torch.cuda.synchronize()
sys.exit(-1) # Should not be reached
except RuntimeError as e:
sys.exit(-2)
""",
],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
universal_newlines=True,
)
out, err = p.communicate(timeout=10)
p.wait(timeout=10)
except subprocess.TimeoutExpired:
p.kill()
out, err = p.communicate()
expected_messages = [
"device-side assert triggered", # CUDA
"Assertion", # CUDA
"HSA_STATUS_ERROR_EXCEPTION", # ROCm
"Device-side assertion", # ROCm
]
self.assertTrue(any(msg in out or msg in err for msg in expected_messages))
@slowTest
@unittest.skipIf(TEST_WITH_ROCM, "ROCm doesn't support device side asserts")
def test_multinomial_invalid_probs_cuda(self):
self._spawn_test_multinomial_invalid_probs_cuda([1.0, -1.0, 1.0])
self._spawn_test_multinomial_invalid_probs_cuda([1.0, inf, 1.0])
self._spawn_test_multinomial_invalid_probs_cuda([1.0, -inf, 1.0])
self._spawn_test_multinomial_invalid_probs_cuda([1.0, 1.0, nan])
@staticmethod
def _mute_init():
os.dup2(os.open(os.devnull, os.O_WRONLY), sys.stderr.fileno())
def _spawn_method(self, method, arg):
ctx = torch.multiprocessing.get_context("spawn")
with ctx.Pool(1, initializer=self._mute_init) as pool:
errors = pool.map(method, [arg])
for e in errors:
if "device-side assert triggered" not in str(e):
self.fail(e)
if e.error_code != 710: # cudaErrorAssert == 710
self.fail(e)
@staticmethod
def _test_index_bounds_cuda(idx):
x = torch.arange(10, device="cuda")
try:
y = x[torch.tensor([idx])]
return f"x[torch.tensor([{idx})]={y}"
except RuntimeError as err:
return err
@slowTest
@skipIfRocm
def test_index_out_of_bounds_exception_cuda(self):
test_method = TestCuda._test_index_bounds_cuda
# Test in-bound access works fine
self.assertEqual(
test_method(1), "x[torch.tensor([1)]=tensor([1], device='cuda:0')"
)
# Test that indexing out of bounds causes assert
self._spawn_method(test_method, 11)
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@serialTest()
def test_huge_index(self):
src = torch.empty(15000000, 45, device="cuda", dtype=torch.long).random_(
0, 2**22
)
idx = torch.randperm(src.shape[0], device="cuda")
res = src[idx]
res_cpu = src.cpu()[idx.cpu()]
self.assertEqual(res.cpu(), res_cpu)
def test_randint_randomness_for_large_range(self) -> None:
# For large ranges, randint generation is slightly different. This lead to a subtle bug where some Philox
# offsets were not calculated correctly, resulting in reused random states.
# See https://github.com/pytorch/pytorch/issues/125224
size = 1_000_000
high = 6_000_000_000 # Keep this above 2**32
def run(dev: torch.device) -> int:
# Measure how many unique numbers are generated in 2 consecutive calls to randint. If random states are
# reused, this will yield fewer unique numbers.
gen = torch.Generator(device=dev)
gen.manual_seed(0)
t1 = torch.randint(
0, high, [size], device=dev, generator=gen, dtype=torch.int64
)
t2 = torch.randint(
0, high, [size], device=dev, generator=gen, dtype=torch.int64
)
return torch.stack([t1, t2]).unique().shape[0]
# Use CPU as reference. The results should not deviate too much.
self.assertTrue(
abs(run(torch.device("cuda")) - run(torch.device("cpu"))) < 10_000
)
@largeTensorTest("20GB", "cuda")
@serialTest()
def test_randint_generation_for_large_numel(self) -> None:
numel = 2**31 + 1
s = torch.randint(2, (numel,), device="cuda", dtype=torch.int8).sum()
self.assertTrue(s > 0, "expected randint in [0, 1] to generate nonzero values")
@parametrize("dtype", [torch.float32, torch.double])
def test_random_no_reused_random_states(self, dtype: torch.dtype) -> None:
# Test if random states do not overlap between consecutive rand/randn calls.
# See https://github.com/pytorch/pytorch/issues/125224
def run(func, dev: torch.device, dtype: torch.dtype) -> int:
# Measure how many unique numbers are generated in 2 consecutive calls. If random states are
# reused, this will yield fewer unique numbers.
size = 1000000
gen = torch.Generator(device=dev)
gen.manual_seed(0)
t1 = func((size,), device=dev, generator=gen, dtype=dtype)
t2 = func((size,), device=dev, generator=gen, dtype=dtype)
return torch.stack([t1, t2]).unique().shape[0]
# Use CPU as reference. The results should not deviate too much.
for func in [torch.rand, torch.randn]:
deviation = abs(
run(func, torch.device("cuda"), dtype)
- run(func, torch.device("cpu"), dtype)
)
self.assertTrue(deviation < 50_000, deviation)
def test_min_max_inits(self):
# Testing if THC_reduceAll received the correct index initialization.
# This affects the result of THC_reduceAll operations at extreme values
x = torch.cuda.ByteTensor([0])
y = torch.cuda.ByteTensor([255])
expected = torch.cuda.LongTensor([0])[0]
_, v = x.max(dim=0)
self.assertEqual(v, expected)
_, v = y.min(dim=0)
self.assertEqual(v, expected)
def test_nvtx(self):
# Just making sure we can see the symbols
torch.cuda.nvtx.range_push("foo")
torch.cuda.nvtx.mark("bar")
torch.cuda.nvtx.range_pop()
range_handle = torch.cuda.nvtx.range_start("range_start")
torch.cuda.nvtx.range_end(range_handle)
def test_bincount_ext(self):
# ensure CUDA code coverage
input_size = (100000,)
w = torch.randn(input_size, dtype=torch.double, device="cuda")
w_cpu = w.cpu()
# test shared memory impl
t = torch.randint(50, input_size, dtype=torch.int8, device="cuda")
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
# test global memory impl
# see `CUDAHistogramMemoryType` in SummaryOps.cu
# 50000 * sizeof(int64_t) == 390 KiB, which should exceed smem of any known GPU
t = torch.randint(50000, input_size, dtype=torch.int64, device="cuda")
self.assertEqual(t.cpu().bincount(), t.bincount())
self.assertEqual(t.cpu().bincount(w_cpu), t.bincount(w))
t = torch.zeros([10], dtype=torch.int32, device="cuda")
# 35488 * 65536 as int32 would cause overflow to negative value
# giving negative bin offset
t[0] = 35488
counted = t.bincount(minlength=65536)
self.assertEqual(torch.sum(counted), 10)
def test_tiny_half_norm_(self):
a = torch.arange(25).cuda().float()
a /= 100000000
b = a.half()
self.assertGreater(b.norm().item(), 0)
def test_norm_type_conversion(self):
a = torch.ones(65536).cuda().half()
self.assertEqual(a.norm(p=0, dtype=torch.float32), 65536)
def test_cuda_memory_leak_detection_propagates_errors(self):
with self.assertRaisesRegex(
RuntimeError, r"The size of tensor a \(3\) must match"
):
with self.assertLeaksNoCudaTensors():
x = torch.randn(3, 1, device="cuda")
y = torch.randn(2, 1, device="cuda")
x + y
@unittest.skipIf(not TEST_MEDIUM_TENSOR, "not enough memory")
@serialTest()
def test_cuda_kernel_loop_overflow(self):
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**30 + 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**30]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**30], expected)
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@gcIfJetson
@serialTest()
def test_cuda_kernel_loop_overflow_large(self):
# Make sure input.numel() > INT_MAX is handled:
x = torch.randn(1, 1, 1, 2**31, dtype=torch.float16, device="cuda")
with self.assertRaisesRegex(RuntimeError, "integer out of range"):
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
# Issue #24309: In extreme cases, the loop variable could overflow and continue
# the kernel loop with a negative index, causing a RuntimeError (invalid write):
x = torch.randn(1, 1, 1, 2**31 - 1, dtype=torch.float16, device="cuda")
expected = x[0, 0, 0, 2**31 - 2]
y = torch.nn.functional.avg_pool2d(x, kernel_size=1)
torch.cuda.synchronize()
self.assertEqual(y[0, 0, 0, 2**31 - 2], expected)
# this might create a reference cycle on self...
def _make_multiply_in_stream(self):
class MultiplyInStream(torch.autograd.Function):
@staticmethod
def forward(ctx, x, val):
ctx.val = val
ctx.stream = torch.cuda.current_stream()
return x * val
@staticmethod
def backward(ctx, grad):
self.assertEqual(torch.cuda.current_stream(), ctx.stream)
# delays the operation in the background stream
torch.cuda._sleep(1000 * 5000)
return grad * ctx.val, None
return MultiplyInStream
@skipCUDANonDefaultStreamIf(True)
def test_streaming_backwards_sync(self):
default_stream = torch.cuda.current_stream()
stream = torch.cuda.Stream()
MultiplyInStream = self._make_multiply_in_stream()
# Tests using grads outside the backward() stream context
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
x = torch.randn(5, 5, device="cuda", requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 2)
output.sum().backward()
# sync needed
default_stream.wait_stream(stream)
self.assertEqual(x.grad, torch.ones_like(x) * 2)
self.assertEqual(torch.cuda.current_stream(), default_stream)
# Tests that using grads in the same stream context as backward()
# is safe regardless what streams bwd ops ran on
bwd_ambient_stream = torch.cuda.Stream()
x = torch.randn(5, 5, device="cuda", requires_grad=True)
with torch.cuda.stream(stream):
stream.wait_stream(default_stream)
output = MultiplyInStream.apply(x, 3)
with torch.cuda.stream(bwd_ambient_stream):
bwd_ambient_stream.wait_stream(stream)
output.sum().backward()
# x was first used on "stream" so its AccumulateGrad leaf should run on "stream".
# The end of backward() should have synced "bwd_ambient_stream" with "stream"
# so it should be safe to use x.grad here without any syncs.
self.assertEqual(x.grad, torch.ones_like(x) * 3)
self.assertEqual(torch.cuda.current_stream(), bwd_ambient_stream)
def test_streaming_backwards_multiple_streams(self):
MultiplyInStream = self._make_multiply_in_stream()
class StreamModel(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.event = torch.cuda.Event()
self.stream0 = torch.cuda.Stream()
self.stream1 = torch.cuda.Stream()
def forward(self, x, x_first_use_on_ambient):
if x_first_use_on_ambient:
x0 = x.clone()
self.stream0.wait_stream(torch.cuda.current_stream())
self.stream1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(self.stream0):
if not x_first_use_on_ambient:
x0 = x.clone()
y0 = MultiplyInStream.apply(x0, 2)
self.event.record(stream=torch.cuda.current_stream())
with torch.cuda.stream(self.stream1):
y1 = MultiplyInStream.apply(x, 3)
self.stream1.wait_event(self.event)
return y0 + y1
stream = torch.cuda.Stream()
for x_first_use_on_ambient in (True, False):
# the out_of_place=False, iters=1 case stresses if proper syncs are inserted
# when grads are initially None and stolen by backward ops.
for out_of_place, iters in ((True, 1), (False, 1), (False, 5)):
with torch.cuda.stream(stream):
x = torch.randn(5, 5, device="cuda", requires_grad=True)
model = StreamModel().cuda()
x.register_hook(
lambda grad: self.assertEqual(
torch.cuda.current_stream(),
stream if x_first_use_on_ambient else model.stream0,
)
)
for p in model.parameters():
self.assertTrue(p.grad is None)
for _ in range(iters):
loss = model(x, x_first_use_on_ambient).sum()
if out_of_place:
x_grad = torch.autograd.grad((loss,), (x,))[0]
else:
loss.backward()
# See "Stream semantics of backward passes" on https://pytorch.org/docs/stable/notes/cuda.html
torch.cuda.current_stream().wait_stream(stream)
if out_of_place:
self.assertEqual(x_grad, torch.ones_like(x) * 5 * iters)
else:
self.assertEqual(x.grad, torch.ones_like(x) * 5 * iters)
def test_streaming_backwards_sync_graph_root(self):
# This function tests if bwd ops running on a side stream properly sync with the GraphRoot.
# The potential bug it targets is a race condition. The test uses multiple trials and
# torch.cuda._sleep such that if the race condition exists, the test will almost certainly fail,
# but there's a chance it may spuriously pass. Passing does not guarantee the backend is bug-free,
# but failure does guarantee there is a bug.
fwd_bwd_op_stream = torch.cuda.Stream()
bwd_ambient_stream = torch.cuda.Stream()
# We need these streams to be different otherwise the test is meaningless.
self.assertTrue(fwd_bwd_op_stream != bwd_ambient_stream)
size = int(1e3)
a = torch.full((size,), 2.0, device="cuda", requires_grad=True)
b = torch.full((size,), 3.0, device="cuda", requires_grad=True)
# I don't think we need any manual record_streams below.
# a and b remain in scope for the entire test.
# c and grad remain in scope for each iteration, and there's a full sync between iterations.
for trial in range(5):
torch.cuda.synchronize()
a.grad = b.grad = None
with torch.cuda.stream(fwd_bwd_op_stream):
c = a * b
with torch.cuda.stream(bwd_ambient_stream):
torch.cuda.synchronize()
# Long-running dummy kernel on bwd_ambient_stream delays filling of grad
torch.cuda._sleep(int(50 * get_cycles_per_ms()))
# Fills grad on bwd_ambient_stream
grad = torch.full((size,), float(trial + 1), device="cuda")
# Bwd ops still run on fwd_bwd_ops_stream, so the following will likely fail if
# bwd ops don't sync with bwd_ambient_stream before consuming grad.
torch.autograd.backward(tensors=c, grad_tensors=grad)
# See https://github.com/pytorch/pytorch/issues/47028
# assertEquals below run on bwd_ambient_stream, so this test may also fail
# if backward() fails to sync with bwd_ambient_stream at the end.
# Synchronizing here works around the issue until a proper fix can be made.
torch.cuda.synchronize()
with torch.no_grad():
self.assertEqual(a.grad, grad * b)
self.assertEqual(b.grad, grad * a)
def test_streaming_backwards_callback(self):
# Tests if autograd callbacks sync properly with respect to leaf streams and
# the user-facing stream surrounding backward(). If it fails, first suspect is
# sync logic where "final_callbacks_" are called in torch/csrc/autograd/engine.cpp
MultiplyInStream = self._make_multiply_in_stream()
size = int(1e3)
a = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
b = torch.full((size,), 1, device="cuda", dtype=torch.float, requires_grad=True)
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
stash = []
# sets up a nontrivial structure of leaf streams
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
c = MultiplyInStream.apply(a, 2)
s1.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s1):
d = MultiplyInStream.apply(b, 3)
s1.wait_stream(s0)
e = c * d
def clone_leaf_grads():
stash.append(a.grad.clone())
stash.append(b.grad.clone())
# Use a hook on e to install the callback
e.register_hook(
lambda grad: torch.autograd.Variable._execution_engine.queue_callback(
clone_leaf_grads
)
)
s2.wait_stream(s1)
with torch.cuda.stream(s2):
e.sum().backward()
# The autograd engine should sync s2 with all leaf streams then run the callback clone_leaf_grads on s2.
# If those things happened properly, checking the values of the cloned grads on s2 should be safe:
self.assertEqual(stash[0], torch.full_like(a, 6))
self.assertEqual(stash[1], torch.full_like(a, 6))
@unittest.skipIf(
TEST_WITH_ROCM,
"In ROCm, kernel asserts are disabled due to performance overhead",
)
def test_fixed_cuda_assert_async(self):
with self.assertRaisesRegex(
RuntimeError, "Boolean value of Tensor with no values is ambiguous"
):
torch._assert_async(torch.tensor([], device="cuda"))
with self.assertRaisesRegex(
RuntimeError,
"Boolean value of Tensor with more than one value is ambiguous",
):
torch._assert_async(torch.tensor([0, 0], device="cuda"))
torch._assert_async(torch.tensor(1, device="cuda"))
torch._assert_async(torch.tensor(0.1, device="cuda"))
torch._assert_async(torch.tensor(-0.1, device="cuda"))
torch._assert_async(torch.tensor(True, device="cuda"))
torch._assert_async(torch.tensor(0 + 0.1j, device="cuda"))
fail_stmts = [
"torch._assert_async(torch.tensor(0, device='cuda'))",
"torch._assert_async(torch.tensor(0.0, device='cuda'))",
"torch._assert_async(torch.tensor(False, device='cuda'))",
"torch._assert_async(torch.tensor(0 + 0j, device='cuda'))",
]
import subprocess
for stmt in fail_stmts:
with self.subTest(stmt=stmt):
r = subprocess.call(
[
sys.executable,
"-c",
f"""\
import torch
{stmt}
torch.cuda.synchronize()
""",
]
)
self.assertTrue(r != 0)
@unittest.skipIf(TEST_CUDAMALLOCASYNC, "FAIL")
def test_cublas_multiple_threads_same_device(self):
# Note, these parameters should be very carefully tuned
# Too small number makes it hard for the racing condition
# to happen, while too large number sometimes cause hang
size = 1024
num_threads = 2
trials = 3
test_iters = 100
weight = torch.ones((size, size), device="cuda")
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = torch.mm(results[t], weight)
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device="cuda")
threads = [
threading.Thread(target=_worker, args=(t,)) for t in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
# Test is flaky on Windows (https://github.com/pytorch/pytorch/issues/57401)
@unittest.skipIf(IS_WINDOWS, "Test is flaky on Windows (see issue 57401)")
@unittest.skipIf(not TEST_CUDNN, "CUDNN not available")
@skipIfRocm
def test_cudnn_multiple_threads_same_device(self):
# This function is intended to test the lazy creation and reuse of per-thread
# cudnn handles on each device in aten/src/ATen/cudnn/Handles.cpp.
# Failure here likely indicates something wrong with that logic.
weight = torch.ones((1, 1, 2, 2), device="cuda")
results = {}
num_threads = 2
trials = 3
test_iters = 1000
barrier = threading.Barrier(num_threads)
with torch.backends.cudnn.flags(enabled=True):
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cudnn handle,
# the following sequence may occur:
# thread 0 calls setCuDNNStreamToCurrent()
# thread 1 calls setCuDNNStreamToCurrent()
# thread 0 launches its raw convolution, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but now races with its convolution.
results[t] = torch.nn.functional.conv2d(
results[t], weight, padding=0
)
results[t].div_(4.0)
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((1, 1, 2048, 2048), device="cuda")
threads = [
threading.Thread(target=_worker, args=(t,))
for t in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(
results[t].sum().item(),
(2048 - test_iters) * (2048 - test_iters),
)
def test_cusparse_multiple_threads_same_device(self):
size = 1024
num_threads = 2
trials = 3
test_iters = 500
def ones_sparse(size):
a = torch.arange(size, device="cuda")
indices = torch.cartesian_prod(a, a).t()
values = torch.ones(size * size, device="cuda")
return torch.sparse_coo_tensor(indices, values)
weight = ones_sparse(size)
results = {}
barrier = threading.Barrier(num_threads)
def _worker(t):
my_stream = torch.cuda.Stream()
# Hard sync so we don't need to worry about creating and using tensors
# across streams or the fact that default streams are thread-local.
# Those issues are not the target of this test.
torch.cuda.synchronize()
# Line up threads to increase likelihood of race conditions.
barrier.wait()
with torch.cuda.stream(my_stream):
for _ in range(test_iters):
# If all threads are sharing the same cublas handle,
# the following sequence may occur:
# thread 0 calls cublasSetStream()
# thread 1 calls cublasSetStream()
# thread 0 launches its raw gemm, which it thinks is in
# its own stream, but is actually in thread 1's stream.
# thread 0 enqueues its div_, which IS is its own stream,
# but actually now races with its gemm.
results[t] = weight.mm(results[t])
results[t].div_(float(size))
torch.cuda.synchronize()
for _ in range(trials):
for t in range(num_threads):
results[t] = torch.ones((size, size), device="cuda")
threads = [
threading.Thread(target=_worker, args=(t,)) for t in range(num_threads)
]
for thread in threads:
thread.start()
for thread in threads:
thread.join()
for t in range(num_threads):
self.assertEqual(results[t].sum().item(), size * size)
@slowTest
@unittest.skipIf(not TEST_LARGE_TENSOR, "not enough memory")
@serialTest()
def test_max_large_axis(self):
x = torch.zeros(2**32, device="cuda", dtype=torch.int8)
x[-1] = 1
val, idx = x.max(0)
self.assertEqual(val, 1)
self.assertEqual(idx, x.shape[0] - 1)
@unittest.skipIf(not TEST_NUMPY, "Numpy not found")
def test_to_numpy(self):
self.assertRaises(TypeError, lambda: torch.empty(1, device="cuda").numpy())
def test_graph_is_current_stream_capturing(self):
self.assertFalse(torch.cuda.is_current_stream_capturing())
if TEST_CUDA and (not TEST_WITH_ROCM):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
g = torch.cuda.CUDAGraph()
self.assertFalse(torch.cuda.is_current_stream_capturing())
g.capture_begin()
self.assertTrue(torch.cuda.is_current_stream_capturing())
g.capture_end()
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_capture_simple(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertEqual(b.sum().item(), 11000.0)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graphsafe_set_get_rng_state(self):
# Define a function to create generator states, with optional graph registration
def create_states(generator):
"""Initializes generator states and registers them with a CUDA graph if provided."""
# Ensure the CUDA generator is initialized
torch.rand(1, device="cuda")
generator.manual_seed(0)
# Save the current state of the generator
old_state = generator.graphsafe_get_state()
# Create and save a cloned state of the generator
new_state = generator.clone_state()
# Return the original generator and its two states
return generator, old_state, new_state
def register_states_to_graph(generator_state, graph):
_, old_state, new_state = generator_state
graph.register_generator_state(old_state)
graph.register_generator_state(new_state)
# Define a function to perform specific RNG actions using the generator's states
def perform_random_generation_steps(generator_state):
generator, old_state, new_state = generator_state
random_values = []
# Generate random numbers with the new generator state
generator.graphsafe_set_state(new_state)
random_values.append(torch.rand(5, device="cuda", generator=generator))
# Generate random numbers twice with the old generator state
generator.graphsafe_set_state(old_state)
random_values.extend(
[torch.rand(5, device="cuda", generator=generator) for _ in range(2)]
)
return random_values
# Define a function to retrieve the final offsets of the original and new generator states
def get_final_offsets_of_states(generator_state):
_, old_state, new_state = generator_state
old_state_offset = old_state.get_offset()
new_state_offset = new_state.get_offset()
return old_state_offset, new_state_offset
# Set up and test a new CUDA generator
generator = torch.Generator(device="cuda")
generator_state = create_states(generator)
# Set up and test the default CUDA generator with a CUDA Graph
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
default_generator = torch.cuda.default_generators[0]
default_generator_state = create_states(default_generator)
register_states_to_graph(default_generator_state, g)
# Perform random number generation within a CUDA graph
with torch.cuda.stream(s):
g.capture_begin()
graphed_random_values = perform_random_generation_steps(
default_generator_state
)
g.capture_end()
# Synchronize the streams and replay the graph
torch.cuda.current_stream().wait_stream(s)
for _ in range(3):
random_values = perform_random_generation_steps(generator_state)
g.replay()
offset = get_final_offsets_of_states(generator_state)
graph_offset = get_final_offsets_of_states(default_generator_state)
# Compare the final offsets of states for both generators to ensure consistency
self.assertEqual(offset, graph_offset)
# Compare the states generated outside and inside the graph
self.assertEqual(random_values, graphed_random_values)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_memory_stats_of_multiple_generators_and_graphs(self):
# Function to clear CUDA cache and collect garbage
def clear_cuda_cache():
gc.collect()
torch.cuda.empty_cache()
# Executes a simple graph task which includes capturing and executing a random number generation within a CUDA graph.
def simple_graph_task(graph):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
graph.capture_begin()
torch.rand(1, device="cuda")
graph.capture_end()
torch.cuda.current_stream().wait_stream(s)
graph.replay() # Replays the captured operations
def get_memory_stats():
stats = torch.cuda.memory_stats()
num_blocks = stats["active.all.current"]
total_size = stats["active_bytes.all.current"]
return num_blocks, total_size
def test(num_graphs, num_generators):
baseline = get_memory_stats()
baseline_num_blocks, baseline_total_size = baseline
# Allocate CUDA graphs
graphs = [torch.cuda.CUDAGraph() for _ in range(num_graphs)]
# Allocate and manage generator states
default_generator = torch.cuda.default_generators[0]
generators = [default_generator.graphsafe_get_state()]
# Starts from 1 as one state is already added
for _ in range(1, num_generators):
generators.append(default_generator.clone_state())
for graph in graphs:
for generator_state in generators:
graph.register_generator_state(generator_state)
simple_graph_task(graph)
# Assert conditions after graph tasks
num_blocks, total_size = get_memory_stats()
# The allocated blocks should only be proportional to the number of generators
expected_blocks_diff = 2 * num_generators
expected_size_diff = 2 * 512 * num_generators # Each block's size is 512
self.assertEqual(
(num_blocks - baseline_num_blocks),
expected_blocks_diff,
"Unexpected number of active blocks.",
)
self.assertEqual(
(total_size - baseline_total_size),
expected_size_diff,
"Unexpected total memory size.",
)
# Cleanup graphs and clear CUDA cache
while graphs:
graph = graphs.pop()
del graph
clear_cuda_cache()
# Assert that memory stats return to baseline after cleanup
self.assertEqual(
get_memory_stats(),
baseline,
"Memory stats do not match baseline after cleanup.",
)
# Running the test function with different parameters
test(1, 1)
test(3, 2)
test(10, 20)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_capture_reset_recapture(self):
s = torch.cuda.Stream()
with torch.cuda.stream(s):
a = torch.full((1000,), 1, device="cuda")
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
b = a
for _ in range(10):
b = b + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertEqual(b.sum().item(), 11000.0)
g.reset()
with torch.cuda.stream(s):
g.capture_begin()
b.fill_(2.0)
for _ in range(10):
b = b + 2
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
self.assertEqual(b.sum().item(), 22000.0)
g.reset()
del g
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_debugdump(self):
torch.cuda.empty_cache()
x = torch.randn(10240000, device="cuda")
y = torch.rand_like(x)
g = torch.cuda.CUDAGraph()
g.enable_debug_mode()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
g.capture_begin()
z = x + y
with torch.cuda.stream(s1):
s1.wait_stream(s0)
z + y
s0.wait_stream(s1)
g.capture_end()
s0.synchronize()
torch.cuda.synchronize()
with tempfile.TemporaryDirectory() as tempdir:
g.debug_dump(os.path.join(tempdir, "out_multi_stream.dot"))
@unittest.skipIf(
not TEST_CUDA_GRAPH or TEST_WITH_ROCM,
"CUDA >= 11.0 required for external events in cuda graphs. rocm does not support external events",
)
def test_graph_timing(self):
torch.cuda.empty_cache()
x = torch.randn(10240000, device="cuda")
y = torch.rand_like(x)
g = torch.cuda.CUDAGraph()
start_event = torch.cuda.Event(enable_timing=True, external=True)
end_event = torch.cuda.Event(enable_timing=True, external=True)
with torch.cuda.graph(g):
start_event.record()
z = x + y
end_event.record()
torch.cuda.synchronize()
g.replay()
torch.cuda.synchronize()
self.assertTrue(start_event.elapsed_time(end_event) > 0)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_error(self):
# We need to run this test in a separate thread as the error we trigger
# puts the cuda context in a bad state
script = """
import torch
g = torch.cuda.CUDAGraph()
try:
g.capture_begin()
except RuntimeError as e:
if "CUDA graphs must be captured on a non-default stream." in str(e):
exit(0)
else:
exit(1)
exit(2)
"""
try:
subprocess.check_output(
[sys.executable, "-c", script],
stderr=subprocess.STDOUT,
# On Windows, opening the subprocess with the default CWD makes `import torch`
# fail, so just set CWD to this script's directory
cwd=os.path.dirname(os.path.realpath(__file__)),
)
except subprocess.CalledProcessError as e:
if e.returncode == 1:
self.assertTrue(
False,
"Error raise by starting capture without a stream is not the expected one",
)
elif e.returncode == 2:
self.assertTrue(
False,
"Error raised by starting capture without a stream was not caught",
)
@unittest.skipIf(
(not TEST_CUDA) or TEST_WITH_ROCM,
"CUDA >= 11.0 required for graphs",
)
def test_graph_warn_if_has_zero_nodes(self):
with warnings.catch_warnings(record=True) as caught:
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
with torch.cuda.stream(s):
g.capture_begin()
g.capture_end()
self.assertTrue(
any("The CUDA Graph is empty" in str(w.message) for w in caught)
)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@unittest.skipIf(
IS_JETSON, "oom reporting has issues on jetson igx due to partial nvml support"
)
def test_graph_capture_oom(self):
oom_regex = (
"would exceed allowed memory" if TEST_CUDAMALLOCASYNC else "out of memory"
)
with self.assertRaisesRegex(RuntimeError, oom_regex):
with torch.cuda.graph(torch.cuda.CUDAGraph()):
torch.zeros(2**40, device="cuda")
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@serialTest()
@setBlasBackendsToDefaultFinally
def test_repeat_graph_capture_cublas_workspace_memory(self):
torch.backends.cuda.preferred_blas_library("cublas")
(x, y, z) = 1024, 512, 64
a = torch.rand((x, y), device="cuda")
b = torch.rand((y, z), device="cuda")
# warmup
torch.mm(a, b)
free_bytes_before, total_bytes = torch.cuda.mem_get_info()
used_gb_before = (total_bytes - free_bytes_before) / 1e9
for _ in range(100):
torch_graph = torch.cuda.CUDAGraph()
with torch.cuda.graph(torch_graph):
torch.mm(a, b)
torch_graph.replay()
free_bytes_after, _ = torch.cuda.mem_get_info()
used_gb_after = (total_bytes - free_bytes_after) / 1e9
self.assertFalse(used_gb_before + 0.1 < used_gb_after)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_rng_functional(self):
ops_with_kwargs = (
(torch.nn.functional.dropout, {"p": 0.1}),
(torch.nn.functional.rrelu, {"training": True}),
)
size = 10000
def run(op, kwargs):
a = torch.randn((size,), device="cuda", dtype=torch.float)
# Control
torch.cuda.manual_seed(5)
eager_out = a
for _ in range(6):
eager_out = op(eager_out, **kwargs)
graph_in = a.clone()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
g.capture_begin()
graph_out = graph_in
for _ in range(2):
graph_out = op(graph_out, **kwargs)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
# Runs a graphed->eager->graphed sequence of RNG ops.
# replay() plays 2 invocations of the op, so the sequence has 6
# invocations total, matching Control.
# replay() reads from graph_in and writes to graph_out.
g.replay()
out = op(graph_out, **kwargs)
out = op(out, **kwargs)
graph_in.copy_(out)
g.replay()
# If replay() updated RNG state correctly, graph_out
# should now hold data equal to eager_out.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# Do the same operations varying seeds
seeds = [6, 128, 9999]
for seed in seeds:
torch.cuda.manual_seed(seed)
graph_in.copy_(a)
for _ in range(3):
g.replay()
# If the random seed was not updated then the graph would
# generate the same output as in previous check.
try:
self.assertNotEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# Now repeat the same operations in non-graphed mode.
torch.cuda.manual_seed(seed)
for _ in range(3):
eager_out.copy_(a)
eager_out = op(eager_out, **kwargs)
eager_out = op(eager_out, **kwargs)
# In the end, graph_out and eager_out must be equal
# as they went under the same set of operations.
try:
self.assertEqual(eager_out, graph_out)
except Exception as e:
raise RuntimeError("Failed on ", op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op, kwargs in ops_with_kwargs:
run(op, kwargs)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_rng_distributions(self):
size = 10000
input = torch.rand((size,), device="cuda", dtype=torch.float)
alloc = torch.empty((size,), device="cuda", dtype=torch.float)
# Torch ops to test with sample args (tuple) and kwargs (dict)
torch_with_args = (
("bernoulli", (input.clone(),), {}),
# multinomial uses some uncapturable CUDA calls.
# TODO: reenable multinomial tests if/when the implementation is capturable.
# ("multinomial", (input.clone(), size, True), {}),
# ("multinomial", (input.clone(), size // 2, False), {}),
# TODO: reenable normal test, where std is a device
# tensor, when graph test failures are fixed
# ("normal", (input.clone() + 1, input.clone()), {}),
("normal", (input.clone() + 1, 1.0), {}),
("poisson", (input.clone(),), {}),
("rand", (size,), {"device": "cuda", "dtype": torch.float}),
("randint", (0, 3, (size,)), {"device": "cuda", "dtype": torch.float}),
("randn", (size,), {"device": "cuda", "dtype": torch.float}),
)
# Tensor methods to test with sample args (tuple)
tensor_with_args = (
("bernoulli_", (input.clone(),)),
("cauchy_", ()),
("exponential_", ()),
("geometric_", (0.3,)),
("log_normal_", ()),
("normal_", ()),
("random_", ()),
("uniform_", ()),
)
def run(module, op, args, kwargs):
torch.cuda.manual_seed(5)
# Each path runs a dummy op to increment the state a bit before creating controls.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
dummy = alloc.clone()
control1 = alloc.clone()
control2 = alloc.clone()
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
torch.cuda.manual_seed(5)
g = torch.cuda.CUDAGraph()
torch.cuda.empty_cache()
if module == "torch":
g.capture_begin()
t1 = getattr(torch, op)(*args, **kwargs)
t2 = getattr(torch, op)(*args, **kwargs)
g.capture_end()
else:
t1 = alloc.clone()
t2 = alloc.clone()
g.capture_begin()
getattr(t1, op)(*args)
getattr(t2, op)(*args)
g.capture_end()
torch.cuda.current_stream().wait_stream(stream)
if not TEST_CUDAMALLOCASYNC:
# Makes sure values haven't been populated yet
# (in other words, makes sure capture didn't actually run ops).
# We can only try this with the native allocator, for which captured
# addresses are already backed by cudaMalloced memory.
# If we try it with cudaMallocAsync, CUDA won't event consider
# the captured addresses allocated until replay(), and if we
# access them before replay() we get IMAs.
try:
self.assertNotEqual(control1, t1)
self.assertNotEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# Set a new seed to check if graph would use it
for seed in [6, 314, 271]:
torch.cuda.manual_seed(seed)
# Runs a dummy op prelude, as for controls, to make sure replay()
# picks up the dummy op's state increment.
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
control1 = getattr(torch, op)(*args, **kwargs)
control2 = getattr(torch, op)(*args, **kwargs)
else:
getattr(dummy, op)(*args)
getattr(control1, op)(*args)
getattr(control2, op)(*args)
torch.cuda.manual_seed(seed)
if module == "torch":
dummy = getattr(torch, op)(*args, **kwargs)
else:
getattr(dummy, op)(*args)
# see above comment on TEST_CUDAMALLOCASYNC
if not TEST_CUDAMALLOCASYNC:
t1.copy_(alloc)
t2.copy_(alloc)
# Runs RNG ops that fill t1 and t2.
g.replay()
try:
self.assertEqual(control1, t1)
self.assertEqual(control2, t2)
except Exception as e:
raise RuntimeError("Failed on " + module + "." + op) from e
# We hold references to all tensors used across streams up til this sync,
# so no need to call record_stream on those tensors.
torch.cuda.synchronize()
for op_with_args in torch_with_args:
run("torch", *op_with_args)
for meth_with_args in tensor_with_args:
# Adds an empty dict for kwargs, which none of the Tensor methods use
run("Tensor", *(meth_with_args + ({},)))
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_two_successive(self):
torch.cuda.empty_cache()
size = 1000
kSmallBuffer = 2097152
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (
(torch.cuda.graph_pool_handle(),)
if share_mem == "via graph_pool_handle()"
else ()
)
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
for _ in range(5):
b = func_with_temps(b, 1)
g1.capture_end()
torch.cuda.current_stream().wait_stream(s)
# mixes unrelated eager ops with replays
c = a.clone()
for _ in range(2):
c = func_with_temps(c, 3)
g0.replay()
for _ in range(2):
c = func_with_temps(c, 3)
g1.replay()
for _ in range(2):
c = func_with_temps(c, 3)
self.assertEqual(b.sum().item(), size * 3070)
self.assertEqual(c.sum().item(), size * 442)
if not TEST_CUDAMALLOCASYNC:
# These stat checks are specific to the native allocator.
if share_mem != "Don't share":
self.assertEqual(
reserved_no_sharing # noqa: F821
- torch.cuda.memory_stats()["reserved_bytes.all.current"],
kSmallBuffer,
)
else:
reserved_no_sharing = torch.cuda.memory_stats()[
"reserved_bytes.all.current"
]
del a, b, c, g0, g1
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf(
(not TEST_CUDA_GRAPH)
or IS_WINDOWS
or ( # appears to still be broken on Windows as of 11.4+
torch.version.cuda
and int(torch.version.cuda.split(".")[0]) == 11
and int(torch.version.cuda.split(".")[1]) < 4
),
"Graph bindings disallow concurrent replay for CUDA < 11.4, see "
+ "https://github.com/pytorch/pytorch/pull/57556",
)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_concurrent_replay(self):
torch.cuda.empty_cache()
size = 1000000 # largeish to help expose race conditions
def func_with_temps(t, val):
x = t.clone() + val
y = t.clone() + val
return x + y
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
a = torch.ones((size,), device="cuda")
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (
(torch.cuda.graph_pool_handle(),)
if share_mem == "via graph_pool_handle()"
else ()
)
g0.capture_begin(*g0_args)
b = a.clone()
for _ in range(5):
b = func_with_temps(b, 1)
g0.capture_end()
g1_args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*g1_args)
c = a.clone()
for _ in range(5):
c = func_with_temps(c, 2)
g1.capture_end()
# To reproduce data corruption, I need g0 and g1's kernels to run concurrently.
# But replay() (especially cudaGraphLaunch) can incur significant CPU overhead.
# The following pattern helps align device-side execution of g0 and g1's kernels.
torch.cuda.synchronize()
with torch.cuda.stream(s0):
torch.cuda._sleep(1000000)
s1.wait_stream(s0)
g0.replay()
with torch.cuda.stream(s1):
g1.replay()
torch.cuda.current_stream().wait_stream(s0)
torch.cuda.current_stream().wait_stream(s1)
if (not TEST_CUDAMALLOCASYNC) and (share_mem != "Don't share"):
# If we used the native allocator and shared mempools,
# we expect the concurrent replays corrupted each other.
self.assertNotEqual(b.sum().item(), size * 94)
self.assertNotEqual(c.sum().item(), size * 156)
else:
# If we EITHER
# - used the native allocator without sharing mempools, OR
# - used cudaMallocAsync, which ignores graph pool-sharing hints and should always be safe
# we don't expect memory corruption.
self.assertEqual(b.sum().item(), size * 94)
self.assertEqual(c.sum().item(), size * 156)
del a, b, c, g0, g1
# Tensors used across streams (a, b, c) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_three_successive(self):
torch.cuda.empty_cache()
size = 1000
s = torch.cuda.Stream()
for share_mem in ("Don't share", "via pool()", "via graph_pool_handle()"):
a = torch.ones((size,), device="cuda")
g0 = torch.cuda.CUDAGraph()
g1 = torch.cuda.CUDAGraph()
g2 = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g0_args = (
(torch.cuda.graph_pool_handle(),)
if share_mem == "via graph_pool_handle()"
else ()
)
g0.capture_begin(*g0_args)
b = a.clone()
c = b + 1
d = b + 2
g0.capture_end()
args = (g0.pool(),) if share_mem == "via pool()" else g0_args
g1.capture_begin(*args)
e = c + 3
del c
g1.capture_end()
g2.capture_begin(*args)
f = d + 4
g2.capture_end()
torch.cuda.current_stream().wait_stream(s)
# Tests that replaying in capture order is valid
g0.replay()
g1.replay()
g2.replay()
self.assertEqual(e.sum().item(), size * 5)
self.assertEqual(f.sum().item(), size * 7)
# Tests that replaying as g0, g2, g1 is only valid if they don't share a pool
g0.replay()
g2.replay()
g1.replay()
expect_corruption = (not TEST_CUDAMALLOCASYNC) and (
share_mem != "Don't share"
)
# If we used the native allocator and shared mempools, g2's capture should have reused c's memory for f.
# We replayed g2 then g1, so we expect g1's captured "e = c + 3" mistakenly filled e with "f's vals + 3".
self.assertEqual(
e.sum().item(), size * (7 + 3) if expect_corruption else size * 5
)
self.assertEqual(f.sum().item(), size * 7)
del a, b, d, e, f, g0, g1, g2
# Tensors used across streams (a, e, f) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf(
(not TEST_CUDA_GRAPH) or TEST_CUDAMALLOCASYNC,
"CUDA >= 11.0 or ROCM >= 5.3 required for graphs",
)
def test_graph_memory_stats_and_use_result_after_destroy_graph(self):
kSmallSize = 1048576
kSmallBuffer = 2097152
kLargeBuffer = 20971520
kMinLargeAlloc = 10485760
kRoundLarge = 2097152
elem = 4
# this was annoying to write but stresses the expectations pretty rigorously
cases = (
(512 // elem, 1, kSmallBuffer, kSmallBuffer, "small_pool"),
(kSmallSize // elem, 2, 2 * kSmallBuffer, kSmallBuffer, "small_pool"),
((kSmallSize + 512) // elem, 1, kLargeBuffer, kLargeBuffer, "large_pool"),
(
(kMinLargeAlloc - 512) // elem,
2,
2 * kLargeBuffer,
kLargeBuffer,
"large_pool",
),
(
(kMinLargeAlloc + 512) // elem,
3,
3
* (
kRoundLarge
* ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge)
),
kRoundLarge * ((kMinLargeAlloc + 512 + kRoundLarge - 1) // kRoundLarge),
"large_pool",
),
)
stats_to_check = ("segment.", "reserved_bytes.", "active.", "active_bytes.")
gc.collect()
torch.cuda.empty_cache()
s = torch.cuda.Stream()
for (
numel,
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_cudaMalloc_bytes_post_del_g,
pool_string,
) in cases:
if pool_string == "small_pool":
delta_active_blocks = 3 # one from "b" plus a sneaky two from CUDAGraph's one-element rng seed and offset holders
delta_active_bytes = (
numel * elem + 1024
) # + 1024 for CUDAGraph's rng seed and offset holders each
else:
delta_active_blocks = 1 # We only check the large pool, which isn't affected by rng offset holder
delta_active_bytes = numel * elem
g = torch.cuda.CUDAGraph()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
# Allocation stat estimates assume input is created on the same stream as capture_begin()
# (in other words, the same stream silo as the rng offset holder, which is not allocated from the
# capture's private pool).
a = torch.ones((numel,), device="cuda")
precapture_stats = torch.cuda.memory_stats()
g.capture_begin()
b = a.clone()
for _ in range(5):
b = b.clone() + 1
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
gc.collect()
postcapture_stats = torch.cuda.memory_stats()
expecteds = (
delta_cudaMallocs,
delta_cudaMalloc_bytes,
delta_active_blocks,
delta_active_bytes,
)
# Double checks replay and stats before and after a call to empty_cache
for i in range(2):
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postcapture_stats[stat] - precapture_stats[stat]
# There will only ever be one expandable segment in each of the small and large pools. The way the
# bookkeeping is done in the allocator means that we never increment the number of segments.
if self.expandable_segments and "segment" in stat:
expected = 0
# These two cases hit an edge case where the PyTorch allocator won't immediately unmap part of an
# expandable segment (and as a result reduce the number of reserved bytes) if the block to unmap is
# smaller than the page size
if (
self.expandable_segments
and "reserved" in stat
and (numel == cases[3][0] or numel == cases[4][0])
):
expected = 2 * kLargeBuffer
self.assertEqual(
current,
expected,
"Pre to post capture delta of "
+ stat
+ f" = {current}, expected = {expected}, numel = {numel}",
)
g.replay()
self.assertEqual(b.sum().item(), 6 * numel)
if i == 0:
torch.cuda.empty_cache()
del g
gc.collect()
torch.cuda.empty_cache()
postdel_stats = torch.cuda.memory_stats()
# Uses graph result b after graph has been deleted
self.assertEqual(b.sum().item(), 6 * numel)
# b should be the only live reference remaining from the graph's private pool
expecteds = (1, delta_cudaMalloc_bytes_post_del_g, 1, numel * elem)
for stat, expected in zip(stats_to_check, expecteds):
stat = stat + pool_string + ".current"
current = postdel_stats[stat] - precapture_stats[stat]
# There will only ever be one expandable segment in each of the small and large pools. The way the
# bookkeeping is done in the allocator means that we never increment the number of segments.
if self.expandable_segments and "segment" in stat:
expected = 0
# These two cases hit an edge case where the PyTorch allocator won't immediately unmap part of an
# expandable segment (and as a result reduce the number of reserved bytes) if the block to unmap is
# smaller than the page size
if (
self.expandable_segments
and "reserved" in stat
and numel == cases[3][0]
):
expected = 2 * kLargeBuffer
if (
self.expandable_segments
and "reserved" in stat
and numel == cases[4][0]
):
expected = kLargeBuffer
self.assertEqual(
current,
expected,
"Pre capture to post graph delete delta of "
+ stat
+ f" = {current}, expected = {expected}, numel = {numel}",
)
# del a, b before the next case is essential, otherwise overwriting a and b in the next case
# can throw off its allocation/deallocation counts.
del a, b
# Tensors used across streams (a and b) were held until just now, so no need to call record_stream on them.
torch.cuda.synchronize()
torch.cuda.empty_cache()
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_record_stream(self):
# Makes sure graph capture defers attempting to reclaim allocations used across streams. See
# "Q. Why skip process_events if a capture might be underway?" in c10/cuda/CUDACachingAllocator.cpp
torch.cuda.empty_cache()
potential_problem = torch.zeros((3,), device="cuda")
a = torch.zeros((3,), device="cuda")
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s2 = torch.cuda.Stream()
g = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
with torch.cuda.stream(s0):
potential_problem.record_stream(s0)
torch.cuda._sleep(TestCuda.FIFTY_MIL_CYCLES)
potential_problem.fill_(1.0)
del potential_problem
with torch.cuda.stream(s1):
g.capture_begin()
# potential_problem's allocation should still be outstanding. if DeviceCachingAllocator::malloc
# mistakenly calls process_events, it will trigger cudaEventQueries on potential_problem's end-of-life
# event, which will cause the capture to error.
b = a.clone()
# Let's also see what happens if we record_stream on a tensor during capture.
s2.wait_stream(s1)
with torch.cuda.stream(s2):
b.fill_(1.0)
b.record_stream(s2) # dummy record_stream
del b
s1.wait_stream(s2)
g.capture_end()
torch.cuda.synchronize()
# dummy allocation triggers process_events, Hopefully successfully processes b's end-of-life event.
torch.zeros((3,), device="cuda")
@skipIfRocm
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
# If this test is the first in the process to try cudnn rnns with dropout, it'll initialize
# DropoutState's long-lived internal buffer. Calling code perceives this (correct) behavior
# as a memory leak unless we skip the leak check.
@skipCUDAMemoryLeakCheckIf(True)
@serialTest()
def test_graph_cudnn_dropout(self):
# Tests the interaction of cuda graph capture with DropoutState's syncs in ATen/native/cudnn/RNN.cpp.
# In particular, if user runs a sequence of captured and noncaptured cudnn rnns, DropoutState should
# avoid syncing noncapturing streams with captured events or vice versa.
torch.cuda.empty_cache()
model = torch.nn.LSTM(512, 512, 2, dropout=0.5).cuda()
x = torch.ones(100, 192, 512, device="cuda")
model(x)
g = torch.cuda.CUDAGraph()
s = torch.cuda.Stream()
s.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s):
g.capture_begin()
model(x)
g.capture_end()
torch.cuda.current_stream().wait_stream(s)
g.replay()
model(x)
@skipIfRocm
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@serialTest()
def test_graph_checkpoint_preserve_rng_state(self):
torch.cuda.manual_seed(42)
def fn(x):
return x * torch.sigmoid(torch.randn(1, device="cuda"))
fn(torch.ones(1, device="cuda"))
torch.cuda.manual_seed(42)
eager_in = torch.ones(1, device="cuda", requires_grad=True)
eager_out = torch.utils.checkpoint.checkpoint(
fn, eager_in, use_reentrant=False, preserve_rng_state=True
)
(eager_in_grad,) = torch.autograd.grad(eager_out, eager_in)
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
graph_in = torch.ones(1, device="cuda", requires_grad=True)
graph_out = torch.utils.checkpoint.checkpoint(
fn, graph_in, use_reentrant=False, preserve_rng_state=True
)
(graph_in_grad,) = torch.autograd.grad(graph_out, graph_in)
torch.cuda.manual_seed(42)
g.replay()
self.assertEqual(eager_in_grad, graph_in_grad, rtol=0.0, atol=0.0)
@skipIfRocm
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@serialTest()
def test_graph_manual_seed_mismatch_raises(self):
torch.cuda.manual_seed(0)
g = torch.cuda.CUDAGraph()
with self.assertRaisesRegex(
RuntimeError,
"CUDAGeneratorImpl::set_current_seed can be called during stream capture only if new seed is the same as the original seed.", # noqa: B950
):
with torch.cuda.graph(g):
torch.cuda.manual_seed(1)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@parametrize(
"with_amp,cache_enabled,allow_unused_input",
[
subtest((True, True, True), decorators=[unittest.expectedFailure]),
subtest((False, False, False), decorators=[unittest.expectedFailure]),
],
name_fn=lambda x, y, z: "{}{}{}".format(
{True: "with_amp", False: "without_amp"}[x],
{True: "_cache_enabled", False: "_cache_disabled"}[y] if x else "",
{True: "_allow_unused_input", False: "_not_allow_unused_input"}[z],
),
)
@serialTest()
def test_graph_make_graphed_callables(
self, with_amp, cache_enabled, allow_unused_input
):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, D_out = 640, 4096, 2048, 1024
class MLP1(torch.nn.Module):
def __init__(self, D_in: int, H: int, D_out: int):
super().__init__()
self.net_1 = torch.nn.Sequential(
torch.nn.Linear(D_in, H), torch.nn.Dropout(p=0.1)
).cuda()
self.net_2 = torch.nn.Sequential(
torch.nn.Linear(H, D_out), torch.nn.Dropout(p=0.2)
).cuda()
def forward(self, input_dict: dict):
x = input_dict["x"]
return self.net_2(self.net_1(x))
class MLP2(torch.nn.Module):
def __init__(self, D_in: int, H: int, D_out: int):
super().__init__()
self.net_1 = torch.nn.Sequential(
torch.nn.Linear(D_in, H), torch.nn.Dropout(p=0.1)
).cuda()
self.net_2 = torch.nn.Sequential(
torch.nn.Linear(H, D_out), torch.nn.Dropout(p=0.2)
).cuda()
def forward(self, x):
return self.net_2(self.net_1(x))
class ParameterlessModule(torch.nn.Module):
def forward(self, x):
idx = (
torch.arange(x.size(0), device=x.device)
.view(-1, 1)
.repeat(1, x.size(1))
)
return {"output": torch.gather(x, 0, idx)}
models = []
for _ in range(2):
model_section1 = MLP1(D_in, H, H).cuda()
model_section2 = MLP2(H, H, D_out).cuda()
model_section3 = ParameterlessModule().cuda()
models.append(
torch.nn.Sequential(model_section1, model_section2, model_section3)
)
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
opt_graphed = torch.optim.SGD(model_graphed.parameters(), lr=0.1)
opt_control = torch.optim.SGD(model_control.parameters(), lr=0.1)
x = torch.randn(N, D_in, device="cuda")
h = torch.randn(N, H, device="cuda", requires_grad=True)
h2 = torch.randn(N, D_out, device="cuda", requires_grad=True)
unused_input = torch.randn(N, H, device="cuda", requires_grad=True)
y_pred = torch.randn(N, D_out, device="cuda", requires_grad=True)
y = torch.randn(N, D_out, device="cuda")
loss_fn_control = torch.nn.functional.mse_loss
relu_control = torch.nn.functional.relu
# This is a good stress test. It graphs four callables: two Modules and two python functions.
with torch.amp.autocast(
device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled
):
(
model_graphed[0],
model_graphed[1],
model_graphed[2],
relu_graphed,
loss_fn_graphed,
) = torch.cuda.make_graphed_callables(
(
model_graphed[0],
model_graphed[1],
model_graphed[2],
relu_control,
loss_fn_control,
),
(
({"x": x, "unused_input": unused_input},),
(h,),
(h2,),
(y_pred,),
(y_pred, y),
),
allow_unused_input=allow_unused_input,
)
real_inputs = [torch.rand_like(x) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m, opt, relu, loss_fn in zip(
(model_graphed, model_control),
(opt_graphed, opt_control),
(relu_graphed, relu_control),
(loss_fn_graphed, loss_fn_control),
):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, target in zip(real_inputs, real_targets):
opt.zero_grad(set_to_none=True)
with torch.amp.autocast(
device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled
):
y_pred = m({"x": data, "unused_input": unused_input})["output"]
y_pred = relu(y_pred)
loss = loss_fn(y_pred, target)
loss.backward()
opt.step()
for p, pc in zip(model_graphed.parameters(), model_control.parameters()):
self.assertEqual(p, pc)
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(
model_graphed({"x": real_inputs[0]}), model_control({"x": real_inputs[0]})
)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
@parametrize(
"with_amp,cache_enabled,allow_unused_input",
[
subtest((False, False, True)),
subtest((True, False, True)),
subtest((True, True, True), decorators=[unittest.expectedFailure]),
subtest((False, False, False)),
],
name_fn=lambda x, y, z: "{}{}{}".format(
{True: "with_amp", False: "without_amp"}[x],
{True: "_cache_enabled", False: "_cache_disabled"}[y] if x else "",
{True: "_allow_unused_input", False: "_not_allow_unused_input"}[z],
),
)
@serialTest()
def test_graph_make_graphed_callables_parameterless_nograd_module(
self, with_amp, cache_enabled, allow_unused_input
):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
N, D_in, H, _ = 640, 4096, 2048, 1024
class ParameterlessModule(torch.nn.Module):
def forward(self, input_dict: dict):
x = input_dict["x"]
idx = (
torch.arange(x.size(0), device=x.device)
.view(-1, 1)
.repeat(1, x.size(1))
)
return {"output": torch.gather(x, 0, idx)}
models = []
for _ in range(2):
model_section1 = ParameterlessModule().cuda()
models.append(torch.nn.Sequential(model_section1))
model_graphed = models[0]
model_control = models[1]
model_graphed.load_state_dict(model_control.state_dict())
x = torch.randn(N, D_in, device="cuda", requires_grad=False)
unused_input = torch.randn(N, H, device="cuda", requires_grad=False)
y = torch.randn(N, D_in, device="cuda")
# This is a good stress test. It graphs four callables: two Modules and two python functions.
with torch.amp.autocast(
device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled
):
model_graphed[0] = torch.cuda.make_graphed_callables(
model_graphed[0],
({"x": x, "unused_input": unused_input},),
allow_unused_input=allow_unused_input,
)
real_inputs = [torch.rand_like(x, requires_grad=True) for _ in range(10)]
real_targets = [torch.rand_like(y) for _ in range(10)]
for m in (model_graphed, model_control):
# Resets RNC states before iterations for graphed and ungraphed models,
# so dropout math should be bitwise identical for both.
torch.manual_seed(5)
torch.cuda.manual_seed(5)
for data, _ in zip(real_inputs, real_targets):
with torch.amp.autocast(
device_type="cuda", enabled=with_amp, cache_enabled=cache_enabled
):
m({"x": data, "unused_input": unused_input})["output"]
# We graphed the models in training mode. Eval should still run ungraphed.
model_graphed.eval()
model_control.eval()
self.assertEqual(
model_graphed({"x": real_inputs[0]}), model_control({"x": real_inputs[0]})
)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_make_graphed_callables_same_pool(self):
torch.manual_seed(5)
torch.cuda.manual_seed(5)
models = []
num_models = 3
for _ in range(num_models):
models.append(
torch.nn.Sequential(
torch.nn.Linear(32, 128),
torch.nn.ReLU(),
torch.nn.Linear(128, 128),
).cuda()
)
# we will reuse the same pool for all graph captures
mempool = torch.cuda.graph_pool_handle()
graphed_models = []
for model in models:
x = torch.randn([64, 32], device="cuda")
graphed_model = deepcopy(model)
graphed_model = torch.cuda.make_graphed_callables(
graphed_model, (x,), pool=mempool
)
graphed_models.append(graphed_model)
for model, graphed_model in zip(models, graphed_models):
x = torch.randn([64, 32], device="cuda")
y = model(x)
yg = graphed_model(x)
l = y.norm()
lg = yg.norm()
l.backward()
lg.backward()
self.assertEqual(y, yg)
self.assertEqual(l, lg)
for p, pg in zip(model.parameters(), graphed_model.parameters()):
self.assertEqual(p, pg)
self.assertEqual(p.grad, pg.grad)
self.assertNotEqual(p.data_ptr(), pg.data_ptr())
self.assertNotEqual(p.grad.data_ptr(), pg.grad.data_ptr())
def _test_graphed_optimizer(
self, steps_warmup, steps_train, optimizer_ctor, kwargs
):
for actually_do_graphs in (True, False):
params = [torch.randn((i + 5, i + 5), device="cuda") for i in range(2)] + [
torch.randn((), device="cuda")
]
params_control = [p.clone().requires_grad_() for p in params]
params_graphed = [p.clone().requires_grad_() for p in params]
grads = [
[torch.randn_like(p) for p in params]
for _ in range(steps_warmup + steps_train)
]
# Control (capturable=False)
opt = optimizer_ctor(params_control, capturable=False, **kwargs)
for i in range(steps_warmup + steps_train):
for j, p in enumerate(params_control):
p.grad = grads[i][j]
opt.step()
# capturable=True
opt = optimizer_ctor(params_graphed, capturable=True, **kwargs)
for i in range(steps_warmup):
for j, p in enumerate(params_graphed):
p.grad = grads[i][j]
opt.step()
if actually_do_graphs:
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
opt.step()
for i in range(steps_train):
if actually_do_graphs:
for j, p in enumerate(params_graphed):
p.grad.copy_(grads[i + steps_warmup][j])
g.replay()
else:
# Passing capturable=True to the constructor and running without graphs should still be
# numerically correct, even if it's not ideal for performance.
for j, p in enumerate(params_graphed):
p.grad = grads[i + steps_warmup][j]
opt.step()
for p_control, p_graphed in zip(params_control, params_graphed):
self.assertEqual(p_control, p_graphed)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_graph_optims_with_explicitly_capturable_param_groups(self):
# mimicking `_test_graphed_optimizer` maladroitly to pass two param_groups to optimizer.__init__
n_warmup, n_replay = 3, 2
for optimizer, second_param_group_capturable in product(
(
torch.optim.Adam,
torch.optim.AdamW,
torch.optim.ASGD,
torch.optim.Adamax,
torch.optim.NAdam,
torch.optim.RAdam,
torch.optim.Adadelta,
torch.optim.RMSprop,
torch.optim.Rprop,
),
(True, False),
):
ref_p1, param1 = (
torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)
)
ref_p2, param2 = (
torch.nn.Parameter(torch.ones(1, device="cuda")) for _ in range(2)
)
grads1, grads2 = (
[torch.randn_like(param1) for _ in range(n_warmup + n_replay)]
for _ in range(2)
)
ref_grads1, ref_grads2 = (
[t.clone() for t in tensors] for tensors in (grads1, grads2)
)
params = [
{"params": [param1], "capturable": True},
{"params": [param2], "capturable": second_param_group_capturable},
]
opt = optimizer(params)
opt_ = optimizer(
[
{"params": [ref_p1], "capturable": False},
{"params": [ref_p2], "capturable": False},
]
)
for i in range(n_warmup + n_replay):
ref_p1.grad = ref_grads1[i]
ref_p2.grad = ref_grads2[i]
opt_.step()
for i in range(n_warmup):
param1.grad = grads1[i]
param2.grad = grads2[i]
opt.step()
g = torch.cuda.CUDAGraph()
if not second_param_group_capturable:
with self.assertRaisesRegex(RuntimeError, "Attempting CUDA graph"):
with torch.cuda.graph(g):
opt.step()
else:
with torch.cuda.graph(g):
opt.step()
for i in range(n_replay):
param1.grad.copy_(grads1[n_warmup + i])
param2.grad.copy_(grads2[n_warmup + i])
g.replay()
self.assertEqual(ref_p1, param1)
self.assertEqual(ref_p2, param2)
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_cuda_graph_error_options(self):
def fn():
x = torch.zeros([2000], device="cuda")
y = x + x + x
return y
mem = None
def raw_malloc():
global mem
mem = None
stream = torch.cuda.Stream()
try:
with torch.cuda.stream(stream):
mem = torch.cuda.caching_allocator_alloc(1024)
except BaseException: # noqa: B036
if mem is None:
return
try:
torch.cuda.caching_allocator_delete(mem)
mem = None
return None
except BaseException: # noqa: B036
pass
def throws_on_cuda_event(capture_error_mode):
graph = torch.cuda.CUDAGraph()
torch.cuda.synchronize()
stream = torch.cuda.Stream()
stream.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(stream):
fn()
stream.synchronize()
torch.cuda.current_stream().wait_stream(stream)
torch.cuda.synchronize()
try:
with torch.cuda.graph(
graph, stream=stream, capture_error_mode=capture_error_mode
):
out = fn()
thread = threading.Thread(target=raw_malloc)
thread.start()
thread.join()
except Exception:
if mem is not None:
torch.cuda.caching_allocator_delete(mem)
return True
return False
self.assertFalse(throws_on_cuda_event("thread_local"))
self.assertFalse(throws_on_cuda_event("relaxed"))
# Exception would Corrupt Process and make other tests fail
# self.assertTrue(throws_on_cuda_event("global"))
@unittest.skipIf(
not TEST_CUDA_GRAPH,
"CUDA >= 11.0 or ROCM >= 5.3 required for graphs, cuda-python must be installed",
)
def test_cuda_graph_raw_graph_keep_graph_false(self):
graph = torch.cuda.CUDAGraph(keep_graph=False)
x = torch.zeros([2000], device="cuda")
y = torch.ones([2000], device="cuda")
with torch.cuda.graph(graph, capture_error_mode="relaxed"):
z = x + y
with self.assertRaisesRegex(
RuntimeError,
r"instantiate\(\) is intended to be called by the user only when keep_graph=true",
):
raw_pointer = graph.instantiate()
with self.assertRaisesRegex(
RuntimeError,
r"You cannot access the raw (cuda|hip)Graph_t instance unless CUDAGraph was initialized with keep_graph=true",
):
raw_pointer = graph.raw_cuda_graph()
@unittest.skipIf(
not TEST_CUDA_GRAPH or not TEST_CUDA_PYTHON_BINDINGS,
"CUDA >= 11.0 or ROCM >= 5.3 required for graphs, cuda-bindings must be installed",
)
def test_cuda_graph_raw_graph(self):
import cuda.bindings.runtime as cudart
graph = torch.cuda.CUDAGraph(keep_graph=True)
x = torch.zeros([2000], device="cuda")
y = torch.ones([2000], device="cuda")
with torch.cuda.graph(graph, capture_error_mode="relaxed"):
z = x + y
raw_pointer = graph.raw_cuda_graph()
cudart_cuda_graph = cudart.cudaGraph_t(init_value=raw_pointer)
_, num_nodes = cuda_python_error_check(
cudart.cudaGraphGetNodes(cudart_cuda_graph)
)
nodes, _ = cuda_python_error_check(
cudart.cudaGraphGetNodes(cudart_cuda_graph, num_nodes)
)
for node in nodes:
cuda_python_error_check(cudart.cudaGraphNodeGetType(node))
graph.replay()
@unittest.skipIf(
not TEST_CUDA_GRAPH or not TEST_CUDA_PYTHON_BINDINGS,
"CUDA >= 11.0 or ROCM >= 5.3 required for graphs, cuda-bindings must be installed",
)
@parametrize("keep_graph", [True, False])
def test_cuda_graph_raw_graph_exec(self, keep_graph):
import cuda.bindings.runtime as cudart
graph = torch.cuda.CUDAGraph(keep_graph=keep_graph)
x = torch.zeros([2000], device="cuda")
y = torch.ones([2000], device="cuda")
with torch.cuda.graph(graph, capture_error_mode="relaxed"):
z = x + y
if keep_graph:
with self.assertRaisesRegex(
RuntimeError,
r"You cannot access the raw (cuda|hip)GraphExec_t instance until instantiate\(\) has been called",
):
graph.raw_cuda_graph_exec()
graph.instantiate()
raw_pointer = graph.raw_cuda_graph_exec()
cudart_cuda_graph_exec = cudart.cudaGraphExec_t(init_value=raw_pointer)
cuda_python_error_check(cudart.cudaGraphExecGetFlags(cudart_cuda_graph_exec))
graph.replay()
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_cuda_graph_raw_graph_reset_and_recapture(self):
graph = torch.cuda.CUDAGraph(keep_graph=True)
x = torch.zeros([2000], device="cuda")
with torch.cuda.graph(graph, capture_error_mode="relaxed"):
x += 1.0
graph.instantiate()
graph.replay()
self.assertTrue(torch.all(x == 1.0))
# Exercise the code path where you reinstantiate the cuda graph twice.
graph.instantiate()
graph.replay()
self.assertTrue(torch.all(x == 2.0))
graph.replay()
self.assertTrue(torch.all(x == 3.0))
# Check that graph capture can succeed after resetting.
graph.reset()
# Don't do x[:] = 0.0 because we want to capture a new address
# in the next cuda graph, to make sure we are running a new
# cuda graph.
x = torch.zeros([2000], device="cuda")
with torch.cuda.graph(graph, capture_error_mode="relaxed"):
x += 2.0
graph.instantiate()
graph.replay()
self.assertTrue(torch.all(x == 2.0))
# Exercise the code path where you reinstantiate the cuda graph twice.
graph.instantiate()
graph.replay()
self.assertTrue(torch.all(x == 4.0))
graph.replay()
self.assertTrue(torch.all(x == 6.0))
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_cuda_graph_allocator_propagates_stream(self):
segments = torch.cuda.memory_snapshot()
existing_pools = {s["segment_pool_id"] for s in segments}
x = torch.randn(10240000, device="cuda")
y = torch.rand_like(x)
g = torch.cuda.CUDAGraph()
s0 = torch.cuda.Stream()
s1 = torch.cuda.Stream()
s0.wait_stream(torch.cuda.current_stream())
with torch.cuda.stream(s0):
g.capture_begin()
z = x + y
with torch.cuda.stream(s1):
s1.wait_stream(s0)
z + y
s0.wait_stream(s1)
with torch.cuda.stream(s0):
g.capture_end()
segments = torch.cuda.memory_snapshot()
x = [
s["segment_pool_id"]
for s in segments
if s["segment_pool_id"] not in existing_pools
]
self.assertEqual(len(x), 2)
self.assertEqual(x[0], x[1])
@unittest.skipIf(
not TEST_CUDA_GRAPH, "CUDA >= 11.0 or ROCM >= 5.3 required for graphs"
)
def test_cuda_graph_tensor_item_not_allowed(self):
test_script = """\
import torch
import sys
# Tensor.item() calls a synchronize which is not allowed in a cudagraph
# Valid for CUDA and ROCm
def my_func(a: torch.Tensor, b: torch.Tensor, perm: torch.Tensor):
idx = perm[0]
a[0] *= b[idx] # should raise an error during capture
return a
a = torch.rand(500, 500, device="cuda")
b = torch.rand(500, 500, device="cuda")
perm = torch.randint(0, 500, (500,), device="cuda")
g = torch.cuda.CUDAGraph()
with torch.cuda.graph(g):
output = my_func(a, b, perm)
"""
with self.assertRaisesRegex(
subprocess.CalledProcessError,
"calls a synchronize which is not allowed in a cudagraph",
):
r = (
subprocess.check_output([sys.executable, "-c", test_script])
.decode("ascii")
.strip()
)
def test_batch_norm_gather_stats(self):
input = torch.randn(1, 3, 3, 3, device="cuda")
mean, invstd = torch.batch_norm_gather_stats(
input,
mean=torch.ones(2, 3, device="cuda"),
invstd=torch.ones(2, 3, device="cuda"),
running_mean=None,
running_var=None,
momentum=0.1,
eps=1e-5,
count=2,
)
self.assertEqual(mean, torch.ones(3, device="cuda"))
self.assertEqual(invstd, torch.ones(3, device="cuda"))
def test_matmul_memory_use(self):
def get_max_used():
torch.cuda.synchronize()
val = torch.cuda.max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
return val
a = torch.rand(1, 32, 32, device="cuda")
b = torch.rand(24, 32, 1, device="cuda")
get_max_used()
torch.matmul(a, b)
matmul_mem = get_max_used()
a = a.expand(24, 32, 32)
torch.matmul(a, b)
matmul_expand_mem = get_max_used()
torch.bmm(a, b)
bmm_mem = get_max_used()
self.assertEqual(matmul_expand_mem, matmul_mem)
self.assertEqual(bmm_mem, matmul_mem)
@unittest.skipIf(not TEST_WITH_ROCM, "ROCm-only test")
def test_rocm_backward_pass_guard(self):
# The test exercises a ROCm-specific feature.
class MyFunction(torch.autograd.Function):
@staticmethod
def forward(ctx, tensor, constant):
self.assertFalse(torch._C._rocm_is_backward_pass())
ctx.constant = constant
return tensor * constant
@staticmethod
def backward(ctx, grad_output):
self.assertTrue(torch._C._rocm_is_backward_pass())
return grad_output * ctx.constant, None
class MyModule(torch.nn.Module):
def __init__(self) -> None:
super().__init__()
self.a = torch.nn.Parameter(torch.randn(()))
def forward(self, x):
return MyFunction.apply(x, self.a)
model = MyModule()
criterion = torch.nn.MSELoss(reduction="sum")
optimizer = torch.optim.SGD(model.parameters(), lr=1e-6)
x = torch.randn(5, 5)
result = model(x)
loss = criterion(result, x)
optimizer.zero_grad()
loss.backward()
optimizer.step()
def test_matmul_device_mismatch(self):
cpu = torch.rand((10, 10))
cuda = cpu.cuda()
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
cpu @ cuda
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
cuda @ cpu
for s, m1, m2 in product((cpu, cuda), repeat=3):
if s.device == m1.device == m2.device:
torch.addmm(s, m1, m2)
else:
with self.assertRaisesRegex(
RuntimeError, "Expected all tensors to be on the same device"
):
torch.addmm(s, m1, m2)
@unittest.skipIf(TEST_MULTIGPU, "Testing on one GPU is sufficient")
def test_lazy_init(self):
"""Validate that no CUDA calls are made during `import torch` call"""
def check_output(script: str) -> str:
return (
subprocess.check_output([sys.executable, "-c", script])
.decode("ascii")
.strip()
)
VISIBLE_DEVICES = (
"HIP_VISIBLE_DEVICES" if TEST_WITH_ROCM else "CUDA_VISIBLE_DEVICES"
)
test_script = f"import os; import torch;os.environ['{VISIBLE_DEVICES}']='32';print(torch.cuda.device_count())"
rc = check_output(test_script)
self.assertEqual(rc, "0")
if not TEST_WITH_ROCM:
# Check that `cuInit` was not called during the import
# By using ctypes and calling cuDeviceCountGet() and expect CUDA_ERROR_NOT_INITIALIZED == 3
# See https://github.com/pytorch/pytorch/issues/116276 for more details
libcuda_name = "libcuda.so.1" if not IS_WINDOWS else "nvcuda.dll"
cuda_driver_api_call = (
f"ctypes.CDLL('{libcuda_name}').cuDeviceGetCount(ctypes.byref(x))"
)
rc = check_output(
f"import torch; import ctypes;x=ctypes.c_int(-1);print({cuda_driver_api_call})"
)
self.assertEqual(rc, "3")
@unittest.skipIf(not TEST_WITH_ROCM, "not relevant for CUDA testing")
def test_hip_device_count(self):
"""Validate device_count works with both CUDA/HIP visible devices"""
test_script = """\
import torch
import os
print(f"{torch.cuda.device_count()}")
"""
custom_envs = [
{"CUDA_VISIBLE_DEVICES": "0", "HIP_VISIBLE_DEVICES": None},
{"CUDA_VISIBLE_DEVICES": None, "HIP_VISIBLE_DEVICES": "0"},
{"CUDA_VISIBLE_DEVICES": "0,1,2,3", "HIP_VISIBLE_DEVICES": "0"},
{"ROCR_VISIBLE_DEVICES": "0", "HIP_VISIBLE_DEVICES": None},
]
if torch.cuda.device_count() >= 2:
custom_envs.extend(
[
{"ROCR_VISIBLE_DEVICES": "1,2,3", "HIP_VISIBLE_DEVICES": "0"},
]
)
for env_config in custom_envs:
env = os.environ.copy()
for key, value in env_config.items():
if value is None:
env.pop(key, None)
else:
env[key] = value
r = (
subprocess.check_output([sys.executable, "-c", test_script], env=env)
.decode("ascii")
.strip()
)
self.assertEqual("1", r)
@unittest.skipIf(not TEST_MULTIGPU, "requires multiple devices")
def test_device_count_not_cached_pre_init(self):
visible_devices = (
"HIP_VISIBLE_DEVICES" if torch.version.hip else "CUDA_VISIBLE_DEVICES"
)
test_script = f"""\
import torch
import os
r1 = torch.cuda.device_count()
os.environ['{visible_devices}'] = '0'
r2 = torch.cuda.device_count()
torch.empty(10, device='cuda')
print(f"{{r1}}, {{r2}}")
"""
r = (
subprocess.check_output([sys.executable, "-c", test_script])
.decode("ascii")
.strip()
)
x = torch.cuda.device_count()
self.assertEqual(f"{x}, 1", r)
def test_gds_fails_in_ci(self):
if IS_WINDOWS or TEST_WITH_ROCM:
error_msg = "is not supported on this platform"
else:
error_msg = "cuFileHandleRegister failed"
with TemporaryFileName() as f:
with self.assertRaisesRegex(RuntimeError, error_msg):
torch.cuda.gds.GdsFile(f, os.O_CREAT | os.O_RDWR)
def test_is_pinned_no_context(self):
test_script = """\
import torch
import multiprocessing
def fork_and_check_is_pinned():
# Create a pipe to communicate between parent and child processes
parent_conn, child_conn = multiprocessing.Pipe()
def worker(conn):
try:
x = torch.randn(10)
x.is_pinned()
dev = torch.accelerator.current_accelerator()
x = torch.ones(10, device=dev)[0].item()
conn.send(x)
except Exception as e:
conn.send(str(e))
finally:
conn.close()
# Fork a new process
p = multiprocessing.Process(target=worker, args=(child_conn,))
p.start()
# Receive the result from the child process
result = parent_conn.recv()
parent_conn.close()
# Wait for the child process to finish
p.join()
if isinstance(result, str) and result.startswith("Error"):
raise RuntimeError(result)
return result
x = torch.randn(10)
# check that is_pinned won't poison future fork
x.is_pinned()
ret = fork_and_check_is_pinned()
print(ret)
"""
r = (
subprocess.check_output([sys.executable, "-c", test_script])
.decode("ascii")
.strip()
)
self.assertEqual(r, "1.0")
@unittest.skipIf(not TEST_CUDA, "CUDA not available, skipping tests")
@torch.testing._internal.common_utils.markDynamoStrictTest
| TestCuda |
python | pandas-dev__pandas | pandas/tests/series/indexing/test_setitem.py | {
"start": 40677,
"end": 41123
} | class ____(SetitemCastingEquivalents):
@pytest.fixture
def obj(self):
return Series([1, 2, 3], dtype=np.int8)
@pytest.fixture
def key(self):
return 1
@pytest.fixture
def expected(self):
return Series([1, 512, 3], dtype=np.int16)
@pytest.fixture
def raises(self):
return True
@pytest.mark.parametrize("val", [2**33 + 1.0, 2**33 + 1.1, 2**62])
| TestSetitemIntoIntegerSeriesNeedsUpcast |
python | eventlet__eventlet | tests/pools_test.py | {
"start": 6532,
"end": 6614
} | class ____(pools.Pool):
def create(self):
raise RuntimeError()
| RaisePool |
python | sqlalchemy__sqlalchemy | test/sql/test_compare.py | {
"start": 4798,
"end": 5064
} | class ____(HasCacheKey):
def __init__(self, name, element):
self.name = name
self.element = element
_cache_key_traversal = [
("name", InternalTraversal.dp_string),
("element", InternalTraversal.dp_clauseelement),
]
| MyEntity |
python | jazzband__django-polymorphic | src/polymorphic/tests/models.py | {
"start": 9133,
"end": 9215
} | class ____(ProxyModelBase):
field1 = models.CharField(max_length=30)
| ProxyModelA |
python | microsoft__pyright | packages/pyright-internal/src/tests/samples/abstractClass6.py | {
"start": 244,
"end": 641
} | class ____(ABC):
@abstractmethod
def method1(self, x: int) -> int:
pass
def func1(base_cls: Type[Base]):
base_cls()
def func2():
# This should generate an error.
Base()
def func3(base_cls: type[Base]):
base_cls()
T = TypeVar("T")
def create_instance(cls: Type[T]) -> T:
return cls()
def func4():
base = create_instance(Base)
base.method1(1)
| Base |
python | apache__airflow | airflow-core/src/airflow/api_fastapi/common/parameters.py | {
"start": 3466,
"end": 3832
} | class ____(BaseParam[NonNegativeInt]):
"""Filter on offset."""
def to_orm(self, select: Select) -> Select:
if self.value is None and self.skip_none:
return select
return select.offset(self.value)
@classmethod
def depends(cls, offset: NonNegativeInt = 0) -> OffsetFilter:
return cls().set_value(offset)
| OffsetFilter |
python | wandb__wandb | wandb/vendor/pygments/lexers/automation.py | {
"start": 10167,
"end": 19648
} | class ____(RegexLexer):
"""
For `AutoIt <http://www.autoitscript.com/site/autoit/>`_ files.
AutoIt is a freeware BASIC-like scripting language
designed for automating the Windows GUI and general scripting
.. versionadded:: 1.6
"""
name = 'AutoIt'
aliases = ['autoit']
filenames = ['*.au3']
mimetypes = ['text/x-autoit']
# Keywords, functions, macros from au3.keywords.properties
# which can be found in AutoIt installed directory, e.g.
# c:\Program Files (x86)\AutoIt3\SciTE\au3.keywords.properties
keywords = """\
#include-once #include #endregion #forcedef #forceref #region
and byref case continueloop dim do else elseif endfunc endif
endselect exit exitloop for func global
if local next not or return select step
then to until wend while exit""".split()
functions = """\
abs acos adlibregister adlibunregister asc ascw asin assign atan
autoitsetoption autoitwingettitle autoitwinsettitle beep binary binarylen
binarymid binarytostring bitand bitnot bitor bitrotate bitshift bitxor
blockinput break call cdtray ceiling chr chrw clipget clipput consoleread
consolewrite consolewriteerror controlclick controlcommand controldisable
controlenable controlfocus controlgetfocus controlgethandle controlgetpos
controlgettext controlhide controllistview controlmove controlsend
controlsettext controlshow controltreeview cos dec dircopy dircreate
dirgetsize dirmove dirremove dllcall dllcalladdress dllcallbackfree
dllcallbackgetptr dllcallbackregister dllclose dllopen dllstructcreate
dllstructgetdata dllstructgetptr dllstructgetsize dllstructsetdata
drivegetdrive drivegetfilesystem drivegetlabel drivegetserial drivegettype
drivemapadd drivemapdel drivemapget drivesetlabel drivespacefree
drivespacetotal drivestatus envget envset envupdate eval execute exp
filechangedir fileclose filecopy filecreatentfslink filecreateshortcut
filedelete fileexists filefindfirstfile filefindnextfile fileflush
filegetattrib filegetencoding filegetlongname filegetpos filegetshortcut
filegetshortname filegetsize filegettime filegetversion fileinstall filemove
fileopen fileopendialog fileread filereadline filerecycle filerecycleempty
filesavedialog fileselectfolder filesetattrib filesetpos filesettime
filewrite filewriteline floor ftpsetproxy guicreate guictrlcreateavi
guictrlcreatebutton guictrlcreatecheckbox guictrlcreatecombo
guictrlcreatecontextmenu guictrlcreatedate guictrlcreatedummy
guictrlcreateedit guictrlcreategraphic guictrlcreategroup guictrlcreateicon
guictrlcreateinput guictrlcreatelabel guictrlcreatelist
guictrlcreatelistview guictrlcreatelistviewitem guictrlcreatemenu
guictrlcreatemenuitem guictrlcreatemonthcal guictrlcreateobj
guictrlcreatepic guictrlcreateprogress guictrlcreateradio
guictrlcreateslider guictrlcreatetab guictrlcreatetabitem
guictrlcreatetreeview guictrlcreatetreeviewitem guictrlcreateupdown
guictrldelete guictrlgethandle guictrlgetstate guictrlread guictrlrecvmsg
guictrlregisterlistviewsort guictrlsendmsg guictrlsendtodummy
guictrlsetbkcolor guictrlsetcolor guictrlsetcursor guictrlsetdata
guictrlsetdefbkcolor guictrlsetdefcolor guictrlsetfont guictrlsetgraphic
guictrlsetimage guictrlsetlimit guictrlsetonevent guictrlsetpos
guictrlsetresizing guictrlsetstate guictrlsetstyle guictrlsettip guidelete
guigetcursorinfo guigetmsg guigetstyle guiregistermsg guisetaccelerators
guisetbkcolor guisetcoord guisetcursor guisetfont guisethelp guiseticon
guisetonevent guisetstate guisetstyle guistartgroup guiswitch hex hotkeyset
httpsetproxy httpsetuseragent hwnd inetclose inetget inetgetinfo inetgetsize
inetread inidelete iniread inireadsection inireadsectionnames
inirenamesection iniwrite iniwritesection inputbox int isadmin isarray
isbinary isbool isdeclared isdllstruct isfloat ishwnd isint iskeyword
isnumber isobj isptr isstring log memgetstats mod mouseclick mouseclickdrag
mousedown mousegetcursor mousegetpos mousemove mouseup mousewheel msgbox
number objcreate objcreateinterface objevent objevent objget objname
onautoitexitregister onautoitexitunregister opt ping pixelchecksum
pixelgetcolor pixelsearch pluginclose pluginopen processclose processexists
processgetstats processlist processsetpriority processwait processwaitclose
progressoff progresson progressset ptr random regdelete regenumkey
regenumval regread regwrite round run runas runaswait runwait send
sendkeepactive seterror setextended shellexecute shellexecutewait shutdown
sin sleep soundplay soundsetwavevolume splashimageon splashoff splashtexton
sqrt srandom statusbargettext stderrread stdinwrite stdioclose stdoutread
string stringaddcr stringcompare stringformat stringfromasciiarray
stringinstr stringisalnum stringisalpha stringisascii stringisdigit
stringisfloat stringisint stringislower stringisspace stringisupper
stringisxdigit stringleft stringlen stringlower stringmid stringregexp
stringregexpreplace stringreplace stringright stringsplit stringstripcr
stringstripws stringtoasciiarray stringtobinary stringtrimleft
stringtrimright stringupper tan tcpaccept tcpclosesocket tcpconnect
tcplisten tcpnametoip tcprecv tcpsend tcpshutdown tcpstartup timerdiff
timerinit tooltip traycreateitem traycreatemenu traygetmsg trayitemdelete
trayitemgethandle trayitemgetstate trayitemgettext trayitemsetonevent
trayitemsetstate trayitemsettext traysetclick trayseticon traysetonevent
traysetpauseicon traysetstate traysettooltip traytip ubound udpbind
udpclosesocket udpopen udprecv udpsend udpshutdown udpstartup vargettype
winactivate winactive winclose winexists winflash wingetcaretpos
wingetclasslist wingetclientsize wingethandle wingetpos wingetprocess
wingetstate wingettext wingettitle winkill winlist winmenuselectitem
winminimizeall winminimizeallundo winmove winsetontop winsetstate
winsettitle winsettrans winwait winwaitactive winwaitclose
winwaitnotactive""".split()
macros = """\
@appdatacommondir @appdatadir @autoitexe @autoitpid @autoitversion
@autoitx64 @com_eventobj @commonfilesdir @compiled @computername @comspec
@cpuarch @cr @crlf @desktopcommondir @desktopdepth @desktopdir
@desktopheight @desktoprefresh @desktopwidth @documentscommondir @error
@exitcode @exitmethod @extended @favoritescommondir @favoritesdir
@gui_ctrlhandle @gui_ctrlid @gui_dragfile @gui_dragid @gui_dropid
@gui_winhandle @homedrive @homepath @homeshare @hotkeypressed @hour
@ipaddress1 @ipaddress2 @ipaddress3 @ipaddress4 @kblayout @lf
@logondnsdomain @logondomain @logonserver @mday @min @mon @msec @muilang
@mydocumentsdir @numparams @osarch @osbuild @oslang @osservicepack @ostype
@osversion @programfilesdir @programscommondir @programsdir @scriptdir
@scriptfullpath @scriptlinenumber @scriptname @sec @startmenucommondir
@startmenudir @startupcommondir @startupdir @sw_disable @sw_enable @sw_hide
@sw_lock @sw_maximize @sw_minimize @sw_restore @sw_show @sw_showdefault
@sw_showmaximized @sw_showminimized @sw_showminnoactive @sw_showna
@sw_shownoactivate @sw_shownormal @sw_unlock @systemdir @tab @tempdir
@tray_id @trayiconflashing @trayiconvisible @username @userprofiledir @wday
@windowsdir @workingdir @yday @year""".split()
tokens = {
'root': [
(r';.*\n', Comment.Single),
(r'(#comments-start|#cs)(.|\n)*?(#comments-end|#ce)',
Comment.Multiline),
(r'[\[\]{}(),;]', Punctuation),
(r'(and|or|not)\b', Operator.Word),
(r'[$|@][a-zA-Z_]\w*', Name.Variable),
(r'!=|==|:=|\.=|<<|>>|[-~+/*%=<>&^|?:!.]', Operator),
include('commands'),
include('labels'),
include('builtInFunctions'),
include('builtInMarcros'),
(r'"', String, combined('stringescape', 'dqs')),
include('numbers'),
(r'[a-zA-Z_#@$][\w#@$]*', Name),
(r'\\|\'', Text),
(r'\`([,%`abfnrtv\-+;])', String.Escape),
(r'_\n', Text), # Line continuation
include('garbage'),
],
'commands': [
(r'(?i)(\s*)(%s)\b' % '|'.join(keywords),
bygroups(Text, Name.Builtin)),
],
'builtInFunctions': [
(r'(?i)(%s)\b' % '|'.join(functions),
Name.Function),
],
'builtInMarcros': [
(r'(?i)(%s)\b' % '|'.join(macros),
Name.Variable.Global),
],
'labels': [
# sendkeys
(r'(^\s*)(\{\S+?\})', bygroups(Text, Name.Label)),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'stringescape': [
(r'\"\"|\`([,%`abfnrtv])', String.Escape),
],
'strings': [
(r'[^"\n]+', String),
],
'dqs': [
(r'"', String, '#pop'),
include('strings')
],
'garbage': [
(r'[^\S\n]', Text),
],
}
| AutoItLexer |
python | doocs__leetcode | solution/1000-1099/1085.Sum of Digits in the Minimum Number/Solution.py | {
"start": 0,
"end": 190
} | class ____:
def sumOfDigits(self, nums: List[int]) -> int:
x = min(nums)
s = 0
while x:
s += x % 10
x //= 10
return s & 1 ^ 1
| Solution |
python | plotly__plotly.py | plotly/graph_objs/contour/_contours.py | {
"start": 233,
"end": 14525
} | class ____(_BaseTraceHierarchyType):
_parent_path_str = "contour"
_path_str = "contour.contours"
_valid_props = {
"coloring",
"end",
"labelfont",
"labelformat",
"operation",
"showlabels",
"showlines",
"size",
"start",
"type",
"value",
}
@property
def coloring(self):
"""
Determines the coloring method showing the contour values. If
"fill", coloring is done evenly between each contour level If
"heatmap", a heatmap gradient coloring is applied between each
contour level. If "lines", coloring is done on the contour
lines. If "none", no coloring is applied on this trace.
The 'coloring' property is an enumeration that may be specified as:
- One of the following enumeration values:
['fill', 'heatmap', 'lines', 'none']
Returns
-------
Any
"""
return self["coloring"]
@coloring.setter
def coloring(self, val):
self["coloring"] = val
@property
def end(self):
"""
Sets the end contour level value. Must be more than
`contours.start`
The 'end' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["end"]
@end.setter
def end(self, val):
self["end"] = val
@property
def labelfont(self):
"""
Sets the font used for labeling the contour levels. The default
color comes from the lines, if shown. The default family and
size come from `layout.font`.
The 'labelfont' property is an instance of Labelfont
that may be specified as:
- An instance of :class:`plotly.graph_objs.contour.contours.Labelfont`
- A dict of string/value properties that will be passed
to the Labelfont constructor
Returns
-------
plotly.graph_objs.contour.contours.Labelfont
"""
return self["labelfont"]
@labelfont.setter
def labelfont(self, val):
self["labelfont"] = val
@property
def labelformat(self):
"""
Sets the contour label formatting rule using d3 formatting
mini-languages which are very similar to those in Python. For
numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
The 'labelformat' property is a string and must be specified as:
- A string
- A number that will be converted to a string
Returns
-------
str
"""
return self["labelformat"]
@labelformat.setter
def labelformat(self, val):
self["labelformat"] = val
@property
def operation(self):
"""
Sets the constraint operation. "=" keeps regions equal to
`value` "<" and "<=" keep regions less than `value` ">" and
">=" keep regions greater than `value` "[]", "()", "[)", and
"(]" keep regions inside `value[0]` to `value[1]` "][", ")(",
"](", ")[" keep regions outside `value[0]` to value[1]` Open
vs. closed intervals make no difference to constraint display,
but all versions are allowed for consistency with filter
transforms.
The 'operation' property is an enumeration that may be specified as:
- One of the following enumeration values:
['=', '<', '>=', '>', '<=', '[]', '()', '[)', '(]', '][',
')(', '](', ')[']
Returns
-------
Any
"""
return self["operation"]
@operation.setter
def operation(self, val):
self["operation"] = val
@property
def showlabels(self):
"""
Determines whether to label the contour lines with their
values.
The 'showlabels' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlabels"]
@showlabels.setter
def showlabels(self, val):
self["showlabels"] = val
@property
def showlines(self):
"""
Determines whether or not the contour lines are drawn. Has an
effect only if `contours.coloring` is set to "fill".
The 'showlines' property must be specified as a bool
(either True, or False)
Returns
-------
bool
"""
return self["showlines"]
@showlines.setter
def showlines(self, val):
self["showlines"] = val
@property
def size(self):
"""
Sets the step between each contour level. Must be positive.
The 'size' property is a number and may be specified as:
- An int or float in the interval [0, inf]
Returns
-------
int|float
"""
return self["size"]
@size.setter
def size(self, val):
self["size"] = val
@property
def start(self):
"""
Sets the starting contour level value. Must be less than
`contours.end`
The 'start' property is a number and may be specified as:
- An int or float
Returns
-------
int|float
"""
return self["start"]
@start.setter
def start(self, val):
self["start"] = val
@property
def type(self):
"""
If `levels`, the data is represented as a contour plot with
multiple levels displayed. If `constraint`, the data is
represented as constraints with the invalid region shaded as
specified by the `operation` and `value` parameters.
The 'type' property is an enumeration that may be specified as:
- One of the following enumeration values:
['levels', 'constraint']
Returns
-------
Any
"""
return self["type"]
@type.setter
def type(self, val):
self["type"] = val
@property
def value(self):
"""
Sets the value or values of the constraint boundary. When
`operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number. When
`operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be an array
of two numbers where the first is the lower bound and the
second is the upper bound.
The 'value' property accepts values of any type
Returns
-------
Any
"""
return self["value"]
@value.setter
def value(self, val):
self["value"] = val
@property
def _prop_descriptions(self):
return """\
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number.
When `operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be
an array of two numbers where the first is the lower
bound and the second is the upper bound.
"""
def __init__(
self,
arg=None,
coloring=None,
end=None,
labelfont=None,
labelformat=None,
operation=None,
showlabels=None,
showlines=None,
size=None,
start=None,
type=None,
value=None,
**kwargs,
):
"""
Construct a new Contours object
Parameters
----------
arg
dict of properties compatible with this constructor or
an instance of
:class:`plotly.graph_objs.contour.Contours`
coloring
Determines the coloring method showing the contour
values. If "fill", coloring is done evenly between each
contour level If "heatmap", a heatmap gradient coloring
is applied between each contour level. If "lines",
coloring is done on the contour lines. If "none", no
coloring is applied on this trace.
end
Sets the end contour level value. Must be more than
`contours.start`
labelfont
Sets the font used for labeling the contour levels. The
default color comes from the lines, if shown. The
default family and size come from `layout.font`.
labelformat
Sets the contour label formatting rule using d3
formatting mini-languages which are very similar to
those in Python. For numbers, see:
https://github.com/d3/d3-format/tree/v1.4.5#d3-format.
operation
Sets the constraint operation. "=" keeps regions equal
to `value` "<" and "<=" keep regions less than `value`
">" and ">=" keep regions greater than `value` "[]",
"()", "[)", and "(]" keep regions inside `value[0]` to
`value[1]` "][", ")(", "](", ")[" keep regions outside
`value[0]` to value[1]` Open vs. closed intervals make
no difference to constraint display, but all versions
are allowed for consistency with filter transforms.
showlabels
Determines whether to label the contour lines with
their values.
showlines
Determines whether or not the contour lines are drawn.
Has an effect only if `contours.coloring` is set to
"fill".
size
Sets the step between each contour level. Must be
positive.
start
Sets the starting contour level value. Must be less
than `contours.end`
type
If `levels`, the data is represented as a contour plot
with multiple levels displayed. If `constraint`, the
data is represented as constraints with the invalid
region shaded as specified by the `operation` and
`value` parameters.
value
Sets the value or values of the constraint boundary.
When `operation` is set to one of the comparison values
(`=,<,>=,>,<=`) "value" is expected to be a number.
When `operation` is set to one of the interval values
(`[],(),[),(],][,)(,](,)[`) "value" is expected to be
an array of two numbers where the first is the lower
bound and the second is the upper bound.
Returns
-------
Contours
"""
super().__init__("contours")
if "_parent" in kwargs:
self._parent = kwargs["_parent"]
return
if arg is None:
arg = {}
elif isinstance(arg, self.__class__):
arg = arg.to_plotly_json()
elif isinstance(arg, dict):
arg = _copy.copy(arg)
else:
raise ValueError("""\
The first argument to the plotly.graph_objs.contour.Contours
constructor must be a dict or
an instance of :class:`plotly.graph_objs.contour.Contours`""")
self._skip_invalid = kwargs.pop("skip_invalid", False)
self._validate = kwargs.pop("_validate", True)
self._set_property("coloring", arg, coloring)
self._set_property("end", arg, end)
self._set_property("labelfont", arg, labelfont)
self._set_property("labelformat", arg, labelformat)
self._set_property("operation", arg, operation)
self._set_property("showlabels", arg, showlabels)
self._set_property("showlines", arg, showlines)
self._set_property("size", arg, size)
self._set_property("start", arg, start)
self._set_property("type", arg, type)
self._set_property("value", arg, value)
self._process_kwargs(**dict(arg, **kwargs))
self._skip_invalid = False
| Contours |
python | sqlalchemy__sqlalchemy | test/ext/test_mutable.py | {
"start": 1935,
"end": 1988
} | class ____(BasicEntity):
__hash__ = None
| FooWNoHash |
python | jazzband__django-oauth-toolkit | oauth2_provider/exceptions.py | {
"start": 1012,
"end": 1169
} | class ____(InvalidRequestFatalError):
description = "Mismatch between the Client ID of the ID Token and the Client ID that was provided."
| ClientIdMissmatch |
python | apache__airflow | providers/apache/beam/tests/unit/apache/beam/hooks/test_beam.py | {
"start": 16621,
"end": 17553
} | class ____:
@pytest.mark.parametrize(
("options", "expected_args"),
[
({"key": "val"}, ["--key=val"]),
({"key": None}, []),
({"key": True}, ["--key"]),
({"key": False}, []),
({"key": ["a", "b", "c"]}, ["--key=a", "--key=b", "--key=c"]),
({"key": {"a_key": "a_val", "b_key": "b_val"}}, ['--key={"a_key": "a_val", "b_key": "b_val"}']),
# Sets False value cases
({"use_public_ips": False}, ["--no_use_public_ips"]),
({"usePublicIps": False}, ["--usePublicIps=false"]),
],
)
def test_beam_options_to_args(self, options, expected_args):
args = beam_options_to_args(options)
assert args == expected_args
@pytest.fixture
def mocked_beam_version_async():
with mock.patch.object(BeamAsyncHook, "_beam_version", return_value="2.39.0") as m:
yield m
| TestBeamOptionsToArgs |
python | django__django | tests/cache/tests.py | {
"start": 64060,
"end": 65361
} | class ____(BaseMemcachedTests, TestCase):
base_params = PyMemcacheCache_params
@property
def incr_decr_type_error(self):
return cache._lib.exceptions.MemcacheClientError
def test_pymemcache_highest_pickle_version(self):
self.assertEqual(
cache._cache.default_kwargs["serde"]._serialize_func.keywords[
"pickle_version"
],
pickle.HIGHEST_PROTOCOL,
)
for cache_key in settings.CACHES:
for client_key, client in caches[cache_key]._cache.clients.items():
with self.subTest(cache_key=cache_key, server=client_key):
self.assertEqual(
client.serde._serialize_func.keywords["pickle_version"],
pickle.HIGHEST_PROTOCOL,
)
@override_settings(
CACHES=caches_setting_for_tests(
base=PyMemcacheCache_params,
exclude=memcached_excluded_caches,
OPTIONS={"no_delay": True},
)
)
def test_pymemcache_options(self):
self.assertIs(cache._cache.default_kwargs["no_delay"], True)
@override_settings(
CACHES=caches_setting_for_tests(
BACKEND="django.core.cache.backends.filebased.FileBasedCache",
)
)
| PyMemcacheCacheTests |
python | jazzband__django-polymorphic | src/polymorphic/formsets/models.py | {
"start": 12826,
"end": 15213
} | class ____(BaseInlineFormSet, BasePolymorphicModelFormSet):
"""
Polymorphic formset variation for inline formsets
"""
def _construct_form(self, i, **kwargs):
return super()._construct_form(i, **kwargs)
def polymorphic_inlineformset_factory(
parent_model,
model,
formset_children,
formset=BasePolymorphicInlineFormSet,
fk_name=None,
# Base field
# TODO: should these fields be removed in favor of creating
# the base form as a formset child too?
form=ModelForm,
fields=None,
exclude=None,
extra=1,
can_order=False,
can_delete=True,
max_num=None,
formfield_callback=None,
widgets=None,
validate_max=False,
localized_fields=None,
labels=None,
help_texts=None,
error_messages=None,
min_num=None,
validate_min=False,
field_classes=None,
child_form_kwargs=None,
):
"""
Construct the class for an inline polymorphic formset.
All arguments are identical to :func:'~django.forms.models.inlineformset_factory',
with the exception of the ''formset_children'' argument.
:param formset_children: A list of all child :class:'PolymorphicFormSetChild' objects
that tell the inline how to render the child model types.
:type formset_children: Iterable[PolymorphicFormSetChild]
:rtype: type
"""
kwargs = {
"parent_model": parent_model,
"model": model,
"form": form,
"formfield_callback": formfield_callback,
"formset": formset,
"fk_name": fk_name,
"extra": extra,
"can_delete": can_delete,
"can_order": can_order,
"fields": fields,
"exclude": exclude,
"min_num": min_num,
"max_num": max_num,
"widgets": widgets,
"validate_min": validate_min,
"validate_max": validate_max,
"localized_fields": localized_fields,
"labels": labels,
"help_texts": help_texts,
"error_messages": error_messages,
"field_classes": field_classes,
}
FormSet = inlineformset_factory(**kwargs)
child_kwargs = {
# 'exclude': exclude,
}
if child_form_kwargs:
child_kwargs.update(child_form_kwargs)
FormSet.child_forms = polymorphic_child_forms_factory(formset_children, **child_kwargs)
return FormSet
| BasePolymorphicInlineFormSet |
python | scikit-learn__scikit-learn | sklearn/utils/_set_output.py | {
"start": 4528,
"end": 5629
} | class ____:
container_lib = "polars"
def create_container(self, X_output, X_original, columns, inplace=True):
pl = check_library_installed("polars")
columns = get_columns(columns)
columns = columns.tolist() if isinstance(columns, np.ndarray) else columns
if not inplace or not isinstance(X_output, pl.DataFrame):
# In all these cases, we need to create a new DataFrame
return pl.DataFrame(X_output, schema=columns, orient="row")
if columns is not None:
return self.rename_columns(X_output, columns)
return X_output
def is_supported_container(self, X):
pl = check_library_installed("polars")
return isinstance(X, pl.DataFrame)
def rename_columns(self, X, columns):
# we cannot use `rename` since it takes a dictionary and at this stage we have
# potentially duplicate column names in `X`
X.columns = columns
return X
def hstack(self, Xs):
pl = check_library_installed("polars")
return pl.concat(Xs, how="horizontal")
| PolarsAdapter |
python | numba__numba | numba/cuda/cudamath.py | {
"start": 2463,
"end": 2677
} | class ____(ConcreteTemplate):
cases = [
signature(types.float32, types.float32, types.float32),
signature(types.float64, types.float64, types.float64),
]
@infer_global(math.pow)
| Math_remainder |
python | PyCQA__pylint | tests/functional/s/slots_checks.py | {
"start": 1014,
"end": 1069
} | class ____: # [invalid-slots]
__slots__ = 1
| SecondBad |
python | graphql-python__graphene | graphene/tests/issues/test_720.py | {
"start": 258,
"end": 694
} | class ____(graphene.InputObjectType):
@classmethod
def __init_subclass_with_meta__(
cls, container=None, _meta=None, fields=None, **options
):
if _meta is None:
_meta = graphene.types.inputobjecttype.InputObjectTypeOptions(cls)
_meta.fields = fields
super(MyInputClass, cls).__init_subclass_with_meta__(
container=container, _meta=_meta, **options
)
| MyInputClass |
python | kamyu104__LeetCode-Solutions | Python/find-the-k-sum-of-an-array.py | {
"start": 72,
"end": 726
} | class ____(object):
def kSum(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: int
"""
total = sum(x for x in nums if x > 0)
sorted_vals = sorted(abs(x) for x in nums)
max_heap = [(-total, 0)]
for _ in xrange(k):
result, i = heapq.heappop(max_heap)
result = -result
if i == len(sorted_vals):
continue
heapq.heappush(max_heap, (-(result-sorted_vals[i]), i+1))
if i-1 >= 0:
heapq.heappush(max_heap, (-(result+sorted_vals[i-1]-sorted_vals[i]), i+1))
return result
| Solution |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/response_builder/helpers.py | {
"start": 305,
"end": 961
} | class ____(HttpResponseBuilder):
def __init__(
self,
template: List[Any],
records_path: Optional[Union[FieldPath, NestedPath]] = None,
pagination_strategy: Optional[PaginationStrategy] = None,
):
self._response = template
self._records: List[RecordBuilder] = []
self._records_path = records_path
self._pagination_strategy = pagination_strategy
self._status_code = 200
def build(self) -> HttpResponse:
self._response.extend([record.build() for record in self._records])
return HttpResponse(json.dumps(self._response), self._status_code)
| RootHttpResponseBuilder |
python | ray-project__ray | python/ray/tests/test_runtime_env_standalone.py | {
"start": 4385,
"end": 5936
} | class ____(RuntimeEnvPlugin):
name = RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_NAME
def __init__(self):
# This happens in Runtime Env Agent start up process. Make it slow.
time.sleep(5)
print("starting...")
@pytest.mark.parametrize(
"set_runtime_env_plugins",
[
'[{"class":"' + RT_ENV_AGENT_SLOW_STARTUP_PLUGIN_CLASS_PATH + '"}]',
],
indirect=True,
)
def test_slow_runtime_env_agent_startup_on_task_pressure(
shutdown_only, set_runtime_env_plugins
):
"""
Starts nodes with runtime env agent and a slow plugin. Then when the runtime env
agent is still starting up, we submit a lot of tasks to the cluster. The tasks
should wait for the runtime env agent to start up and then run.
https://github.com/ray-project/ray/issues/45353
"""
@ray.remote(num_cpus=0.1)
def get_foo():
return os.environ.get("foo")
print("Submitting 20 tasks...")
# Each task has a different runtime env to ensure the agent is invoked for each.
vals = ray.get(
[
get_foo.options(runtime_env={"env_vars": {"foo": f"bar{i}"}}).remote()
for i in range(20)
]
)
print("20 tasks done.")
assert vals == [f"bar{i}" for i in range(20)]
MY_PLUGIN_CLASS_PATH = "ray.tests.test_runtime_env_standalone.MyPlugin"
MY_PLUGIN_NAME = "MyPlugin"
success_retry_number = 3
runtime_env_retry_times = 0
# This plugin can make runtime env creation failed before the retry times
# increased to `success_retry_number`.
| RtEnvAgentSlowStartupPlugin |
python | django__django | tests/auth_tests/test_auth_backends.py | {
"start": 30522,
"end": 32731
} | class ____(SimpleTestCase):
"""
Tests for AnonymousUser delegating to backend.
"""
def setUp(self):
self.user1 = AnonymousUser()
def test_has_perm(self):
self.assertIs(self.user1.has_perm("perm", TestObj()), False)
self.assertIs(self.user1.has_perm("anon", TestObj()), True)
async def test_ahas_perm(self):
self.assertIs(await self.user1.ahas_perm("perm", TestObj()), False)
self.assertIs(await self.user1.ahas_perm("anon", TestObj()), True)
def test_has_perms(self):
self.assertIs(self.user1.has_perms(["anon"], TestObj()), True)
self.assertIs(self.user1.has_perms(["anon", "perm"], TestObj()), False)
async def test_ahas_perms(self):
self.assertIs(await self.user1.ahas_perms(["anon"], TestObj()), True)
self.assertIs(await self.user1.ahas_perms(["anon", "perm"], TestObj()), False)
def test_has_perms_perm_list_invalid(self):
msg = "perm_list must be an iterable of permissions."
with self.assertRaisesMessage(ValueError, msg):
self.user1.has_perms("perm")
with self.assertRaisesMessage(ValueError, msg):
self.user1.has_perms(object())
async def test_ahas_perms_perm_list_invalid(self):
msg = "perm_list must be an iterable of permissions."
with self.assertRaisesMessage(ValueError, msg):
await self.user1.ahas_perms("perm")
with self.assertRaisesMessage(ValueError, msg):
await self.user1.ahas_perms(object())
def test_has_module_perms(self):
self.assertIs(self.user1.has_module_perms("app1"), True)
self.assertIs(self.user1.has_module_perms("app2"), False)
async def test_ahas_module_perms(self):
self.assertIs(await self.user1.ahas_module_perms("app1"), True)
self.assertIs(await self.user1.ahas_module_perms("app2"), False)
def test_get_all_permissions(self):
self.assertEqual(self.user1.get_all_permissions(TestObj()), {"anon"})
async def test_aget_all_permissions(self):
self.assertEqual(await self.user1.aget_all_permissions(TestObj()), {"anon"})
@override_settings(AUTHENTICATION_BACKENDS=[])
| AnonymousUserBackendTest |
python | numba__numba | numba/tests/support.py | {
"start": 32259,
"end": 32469
} | class ____(object):
"""Mixin to enable the NRT statistics counters."""
def setUp(self):
_nrt.memsys_enable_stats()
def tearDown(self):
_nrt.memsys_disable_stats()
| EnableNRTStatsMixin |
python | huggingface__transformers | src/transformers/models/apertus/modular_apertus.py | {
"start": 11500,
"end": 13182
} | class ____(LlamaDecoderLayer):
def __init__(self, config: ApertusConfig, layer_idx: int):
super().__init__(config, layer_idx)
self.attention_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
self.feedforward_layernorm = ApertusRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
del self.input_layernorm
del self.post_attention_layernorm
def forward(
self,
hidden_states: torch.Tensor,
attention_mask: Optional[torch.Tensor] = None,
position_ids: Optional[torch.LongTensor] = None,
past_key_values: Optional[Cache] = None,
use_cache: Optional[bool] = False,
cache_position: Optional[torch.LongTensor] = None,
position_embeddings: Optional[tuple[torch.Tensor, torch.Tensor]] = None,
**kwargs: Unpack[TransformersKwargs],
) -> tuple[torch.Tensor]:
residual = hidden_states
hidden_states = self.attention_layernorm(hidden_states)
hidden_states, _ = self.self_attn(
hidden_states=hidden_states,
attention_mask=attention_mask,
position_ids=position_ids,
past_key_values=past_key_values,
use_cache=use_cache,
cache_position=cache_position,
position_embeddings=position_embeddings,
**kwargs,
)
hidden_states = residual + hidden_states
# Fully Connected
residual = hidden_states
hidden_states = self.feedforward_layernorm(hidden_states)
hidden_states = self.mlp(hidden_states)
hidden_states = residual + hidden_states
return hidden_states
| ApertusDecoderLayer |
python | huggingface__transformers | src/transformers/models/llava_next_video/video_processing_llava_next_video.py | {
"start": 817,
"end": 1348
} | class ____(BaseVideoProcessor):
resample = PILImageResampling.BICUBIC
image_mean = OPENAI_CLIP_MEAN
image_std = OPENAI_CLIP_STD
size = {"shortest_edge": 224}
default_to_square = False
crop_size = {"height": 224, "width": 224}
do_resize = True
do_center_crop = True
do_rescale = True
do_normalize = True
do_convert_rgb = True
do_sample_frames = False # Set to False for BC, recommended to set `True` in new models
__all__ = ["LlavaNextVideoVideoProcessor"]
| LlavaNextVideoVideoProcessor |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-github/source_github/streams.py | {
"start": 70099,
"end": 71585
} | class ____(GithubStream):
"""
API docs: https://docs.github.com/en/rest/teams/members?apiVersion=2022-11-28#list-team-members
"""
use_cache = True
primary_key = ["id", "team_slug"]
def __init__(self, parent: Teams, **kwargs):
super().__init__(**kwargs)
self.parent = parent
def path(self, stream_slice: Mapping[str, Any] = None, **kwargs) -> str:
return f"orgs/{stream_slice['organization']}/teams/{stream_slice['team_slug']}/members"
def stream_slices(
self, sync_mode: SyncMode, cursor_field: List[str] = None, stream_state: Mapping[str, Any] = None
) -> Iterable[Optional[Mapping[str, Any]]]:
parent_stream_slices = self.parent.stream_slices(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_state=stream_state
)
for stream_slice in parent_stream_slices:
parent_records = self.parent.read_records(
sync_mode=SyncMode.full_refresh, cursor_field=cursor_field, stream_slice=stream_slice, stream_state=stream_state
)
for record in parent_records:
yield {"organization": record["organization"], "team_slug": record["slug"]}
def transform(self, record: MutableMapping[str, Any], stream_slice: Mapping[str, Any]) -> MutableMapping[str, Any]:
record["organization"] = stream_slice["organization"]
record["team_slug"] = stream_slice["team_slug"]
return record
| TeamMembers |
python | tensorflow__tensorflow | tensorflow/python/kernel_tests/control_flow/scan_ops_test.py | {
"start": 2117,
"end": 7077
} | class ____(test.TestCase):
valid_dtypes = [
np.int32,
np.int64,
np.float16,
np.float32,
np.float64,
np.complex64,
np.complex128,
dtypes.bfloat16.as_numpy_dtype,
]
def _compare(self, x, axis, exclusive, reverse):
np_out = handle_options(np.cumsum, x, axis, exclusive, reverse)
with self.cached_session():
tf_out = math_ops.cumsum(x, axis, exclusive, reverse).eval()
self.assertAllClose(np_out, tf_out)
def _compareAll(self, x, axis):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compare(x, axis, exclusive, reverse)
@test_util.run_deprecated_v1
def testEmpty(self):
for dtype in self.valid_dtypes:
x = np.zeros([0]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def testAxisType(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis_dtype in [dtypes.int64, dtypes.int32]:
with self.cached_session():
axis = constant_op.constant(0, axis_dtype)
tf_out = math_ops.cumsum(x, axis).eval()
@test_util.run_deprecated_v1
def testNaN(self):
for dtype in (
np.float16,
np.float32,
np.float64,
dtypes.bfloat16.as_numpy_dtype,
):
for nan_idx in range(0, 5):
x = np.arange(1, 6).reshape([5]).astype(dtype)
x[nan_idx] = np.nan
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test1D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 6).reshape([5]).astype(dtype)
for axis in (-1, 0):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test2D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 10).reshape([2, 5]).astype(dtype)
for axis in (-2, -1, 0, 1):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test3D(self):
for dtype in self.valid_dtypes:
x = np.arange(0, 20).reshape([2, 2, 5]).astype(dtype)
for axis in (-3, -2, -1, 0, 1, 2):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
def test6D(self):
for dtype in self.valid_dtypes:
x = np.arange(1, 145).reshape([2, 2, 3, 3, 2, 2]).astype(dtype)
for axis in range(-6, 6, 3):
self._compareAll(x, axis)
@test_util.run_deprecated_v1
@test_util.disable_xla("b/123860949") # The computation is constant folded
def testLarge(self):
for dtype in self.valid_dtypes:
if np.__version__ >= np.lib.NumpyVersion("2.0.0") and dtype == np.float16:
continue
if dtype == dtypes.bfloat16.as_numpy_dtype:
# https://github.com/numpy/numpy/issues/27709, which might be fixed
# in some numpy version after 2.1.3.
continue
x = np.ones([1000000], dtype=dtype) / 1024
self._compareAll(x, 0)
def testInvalidAxis(self):
x = np.arange(0, 10).reshape([2, 5]).astype(np.float32)
input_tensor = ops.convert_to_tensor(x)
with self.session():
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, -3).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "Expected scan axis in the range [-2, 2)" in str(e)):
math_ops.cumsum(input_tensor, 2).eval()
with self.assertRaisesWithPredicateMatch(
errors_impl.InvalidArgumentError,
lambda e: "axis must be a scalar" in str(e)):
math_ops.cumsum(input_tensor, [0]).eval()
def _compareGradient(self, shape, axis, exclusive, reverse):
x = np.arange(0, 50).reshape(shape).astype(np.float64)
with self.cached_session():
t = ops.convert_to_tensor(x)
result = math_ops.cumsum(t, axis, exclusive, reverse)
jacob_t, jacob_n = gradient_checker.compute_gradient(
t, shape, result, shape, x_init_value=x, delta=1)
self.assertAllClose(jacob_t, jacob_n, rtol=1e-8, atol=1e-8)
@test_util.run_deprecated_v1
def testGradient(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, False)
@test_util.run_deprecated_v1
def testGradientReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, False, True)
@test_util.run_deprecated_v1
def testGradientExclusive(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, False)
@test_util.run_deprecated_v1
def testGradientExclusiveReverse(self):
for axis in (-1, 0):
self._compareGradient([50], axis, True, True)
@test_util.run_deprecated_v1
def testGradient2D(self):
for axis in (-1, 0, 1):
for exclusive in [True, False]:
for reverse in [True, False]:
self._compareGradient([5, 10], axis, exclusive, reverse)
| CumsumTest |
python | numba__numba | numba/tests/test_array_exprs.py | {
"start": 2081,
"end": 3063
} | class ____(Compiler):
@classmethod
def mk_pipeline(cls, args, return_type=None, flags=None, locals=None,
library=None, typing_context=None, target_context=None):
if locals is None:
locals = {}
if not flags:
flags = Flags()
flags.nrt = True
if typing_context is None:
typing_context = cpu_target.typing_context
if target_context is None:
target_context = cpu_target.target_context
return cls(typing_context, target_context, library, args, return_type,
flags, locals)
@classmethod
def mk_no_rw_pipeline(cls, args, return_type=None, flags=None, locals=None,
library=None, **kws):
if locals is None:
locals = {}
if not flags:
flags = Flags()
flags.no_rewrites = True
return cls.mk_pipeline(args, return_type, flags, locals, library, **kws)
| RewritesTester |
python | run-llama__llama_index | llama-index-integrations/readers/llama-index-readers-singlestore/llama_index/readers/singlestore/base.py | {
"start": 208,
"end": 2284
} | class ____(BaseReader):
"""
SingleStore reader.
Args:
scheme (str): Database Scheme.
host (str): Database Host.
port (str): Database Port.
user (str): Database User.
password (str): Database Password.
dbname (str): Database Name.
table_name (str): Table Name.
content_field (str): Content Field.
vector_field (str): Vector Field.
"""
def __init__(
self,
scheme: str,
host: str,
port: str,
user: str,
password: str,
dbname: str,
table_name: str,
content_field: str = "text",
vector_field: str = "embedding",
):
"""Initialize with parameters."""
self.scheme = scheme
self.host = host
self.port = port
self.user = user
self.password = password
self.dbname = dbname
self.table_name = table_name
self.content_field = content_field
self.vector_field = vector_field
try:
import pymysql
pymysql.install_as_MySQLdb()
except ImportError:
pass
self.DatabaseReader = DatabaseReader
self.reader = self.DatabaseReader(
scheme=self.scheme,
host=self.host,
port=self.port,
user=self.user,
password=self.password,
dbname=self.dbname,
)
def load_data(self, search_embedding: str, top_k: int = 5) -> List[Document]:
"""
Load data from SingleStore.
Args:
search_embedding (str): The embedding to search.
top_k (int): Number of results to return.
Returns:
List[Document]: A list of documents.
"""
query = f"""
SELECT {self.content_field}, DOT_PRODUCT_F64({self.vector_field}, JSON_ARRAY_PACK_F64(\'{search_embedding}\')) AS score
FROM {self.table_name}
ORDER BY score
DESC LIMIT {top_k}
"""
return self.reader.load_data(query=query)
| SingleStoreReader |
python | altair-viz__altair | sphinxext/code_ref.py | {
"start": 9136,
"end": 9407
} | class ____(SphinxDirective):
"""Placeholder for non-theme related directive."""
has_content: ClassVar[bool] = False
option_spec = {"packages": directives.unchanged}
def run(self) -> Sequence[nodes.Node]:
raise NotImplementedError
| PyScriptDirective |
python | marshmallow-code__apispec | tests/test_ext_marshmallow.py | {
"start": 53286,
"end": 53821
} | class ____:
def test_timedelta_x_unit(self, spec):
class SchemaWithTimeDelta(Schema):
sec = TimeDelta("seconds")
day = TimeDelta("days")
spec.components.schema("SchemaWithTimeDelta", schema=SchemaWithTimeDelta)
assert (
get_schemas(spec)["SchemaWithTimeDelta"]["properties"]["sec"]["x-unit"]
== "seconds"
)
assert (
get_schemas(spec)["SchemaWithTimeDelta"]["properties"]["day"]["x-unit"]
== "days"
)
| TestTimeDelta |
python | langchain-ai__langchain | libs/langchain/langchain_classic/agents/openai_functions_agent/agent_token_buffer_memory.py | {
"start": 450,
"end": 3650
} | class ____(BaseChatMemory):
"""Memory used to save agent output AND intermediate steps.
Args:
human_prefix: Prefix for human messages.
ai_prefix: Prefix for AI messages.
llm: Language model.
memory_key: Key to save memory under.
max_token_limit: Maximum number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest
messages will be pruned.
return_messages: Whether to return messages.
output_key: Key to save output under.
intermediate_steps_key: Key to save intermediate steps under.
format_as_tools: Whether to format as tools.
"""
human_prefix: str = "Human"
ai_prefix: str = "AI"
llm: BaseLanguageModel
memory_key: str = "history"
max_token_limit: int = 12000
"""The max number of tokens to keep in the buffer.
Once the buffer exceeds this many tokens, the oldest messages will be pruned."""
return_messages: bool = True
output_key: str = "output"
intermediate_steps_key: str = "intermediate_steps"
format_as_tools: bool = False
@property
def buffer(self) -> list[BaseMessage]:
"""String buffer of memory."""
return self.chat_memory.messages
@property
def memory_variables(self) -> list[str]:
"""Always return list of memory variables."""
return [self.memory_key]
@override
def load_memory_variables(self, inputs: dict[str, Any]) -> dict[str, Any]:
"""Return history buffer.
Args:
inputs: Inputs to the agent.
Returns:
A dictionary with the history buffer.
"""
if self.return_messages:
final_buffer: Any = self.buffer
else:
final_buffer = get_buffer_string(
self.buffer,
human_prefix=self.human_prefix,
ai_prefix=self.ai_prefix,
)
return {self.memory_key: final_buffer}
def save_context(self, inputs: dict[str, Any], outputs: dict[str, Any]) -> None:
"""Save context from this conversation to buffer. Pruned.
Args:
inputs: Inputs to the agent.
outputs: Outputs from the agent.
"""
input_str, output_str = self._get_input_output(inputs, outputs)
self.chat_memory.add_messages(input_str) # type: ignore[arg-type]
format_to_messages = (
format_to_tool_messages
if self.format_as_tools
else format_to_openai_function_messages
)
steps = format_to_messages(outputs[self.intermediate_steps_key])
for msg in steps:
self.chat_memory.add_message(msg)
self.chat_memory.add_messages(output_str) # type: ignore[arg-type]
# Prune buffer if it exceeds max token limit
buffer = self.chat_memory.messages
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
if curr_buffer_length > self.max_token_limit:
while curr_buffer_length > self.max_token_limit:
buffer.pop(0)
curr_buffer_length = self.llm.get_num_tokens_from_messages(buffer)
| AgentTokenBufferMemory |
python | tornadoweb__tornado | tornado/test/web_test.py | {
"start": 19103,
"end": 20860
} | class ____(WebTestCase):
def get_handlers(self):
return [("/group/(.*)", EchoHandler), ("/slashes/([^/]*)/([^/]*)", EchoHandler)]
def fetch_json(self, path):
return json_decode(self.fetch(path).body)
def test_group_question_mark(self):
# Ensure that url-encoded question marks are handled properly
self.assertEqual(
self.fetch_json("/group/%3F"),
dict(path="/group/%3F", path_args=["?"], args={}),
)
self.assertEqual(
self.fetch_json("/group/%3F?%3F=%3F"),
dict(path="/group/%3F", path_args=["?"], args={"?": ["?"]}),
)
def test_group_encoding(self):
# Path components and query arguments should be decoded the same way
self.assertEqual(
self.fetch_json("/group/%C3%A9?arg=%C3%A9"),
{
"path": "/group/%C3%A9",
"path_args": ["\u00e9"],
"args": {"arg": ["\u00e9"]},
},
)
def test_slashes(self):
# Slashes may be escaped to appear as a single "directory" in the path,
# but they are then unescaped when passed to the get() method.
self.assertEqual(
self.fetch_json("/slashes/foo/bar"),
dict(path="/slashes/foo/bar", path_args=["foo", "bar"], args={}),
)
self.assertEqual(
self.fetch_json("/slashes/a%2Fb/c%2Fd"),
dict(path="/slashes/a%2Fb/c%2Fd", path_args=["a/b", "c/d"], args={}),
)
def test_error(self):
# Percent signs (encoded as %25) should not mess up printf-style
# messages in logs
with ExpectLog(gen_log, ".*Invalid unicode"):
self.fetch("/group/?arg=%25%e9")
| RequestEncodingTest |
python | apache__airflow | providers/exasol/tests/unit/exasol/hooks/test_sql.py | {
"start": 1204,
"end": 9901
} | class ____(ExasolHook):
conn_name_attr = "exasol_conn_id"
get_conn = MagicMock(name="conn")
@pytest.fixture(autouse=True)
def create_connection(create_connection_without_db):
create_connection_without_db(
Connection(
conn_id=DEFAULT_CONN_ID,
conn_type="exasol",
host=HOST,
login=None,
password=PASSWORD,
extra=None,
)
)
@pytest.fixture
def exasol_hook():
return ExasolHook()
def get_columns(fields: list[str]) -> dict[str, dict[str, Any]]:
return {
field: {"type": "VARCHAR", "nullable": True, "precision": None, "scale": None, "length": None}
for field in fields
}
index = 0
@pytest.mark.parametrize(
(
"return_last",
"split_statements",
"sql",
"cursor_calls",
"cursor_descriptions",
"cursor_results",
"hook_descriptions",
"hook_results",
),
[
pytest.param(
True,
False,
"select * from test.test",
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[1, 2], [11, 12]],
id="The return_last set and no split statements set on single query in string",
),
pytest.param(
False,
False,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[1, 2], [11, 12]],
id="The return_last not set and no split statements set on single query in string",
),
pytest.param(
True,
True,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[1, 2], [11, 12]],
id="The return_last set and split statements set on single query in string",
),
pytest.param(
False,
True,
"select * from test.test;",
["select * from test.test;"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[[1, 2], [11, 12]]],
id="The return_last not set and split statements set on single query in string",
),
pytest.param(
True,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test;", "select * from test.test2;"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[
[
("id2", "VARCHAR", None, None, None, None, True),
("value2", "VARCHAR", None, None, None, None, True),
]
],
[[3, 4], [13, 14]],
id="The return_last set and split statements set on multiple queries in string",
), # Failing
pytest.param(
False,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test;", "select * from test.test2;"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
],
[
("id2", "VARCHAR", None, None, None, None, True),
("value2", "VARCHAR", None, None, None, None, True),
],
],
[[[1, 2], [11, 12]], [[3, 4], [13, 14]]],
id="The return_last not set and split statements set on multiple queries in string",
),
pytest.param(
True,
True,
["select * from test.test;"],
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[[1, 2], [11, 12]]],
id="The return_last set on single query in list",
),
pytest.param(
False,
True,
["select * from test.test;"],
["select * from test.test"],
[["id", "value"]],
([[1, 2], [11, 12]],),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
]
],
[[[1, 2], [11, 12]]],
id="The return_last not set on single query in list",
),
pytest.param(
True,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test", "select * from test.test2"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[
[
("id2", "VARCHAR", None, None, None, None, True),
("value2", "VARCHAR", None, None, None, None, True),
]
],
[[3, 4], [13, 14]],
id="The return_last set on multiple queries in list",
),
pytest.param(
False,
True,
"select * from test.test;select * from test.test2;",
["select * from test.test", "select * from test.test2"],
[["id", "value"], ["id2", "value2"]],
([[1, 2], [11, 12]], [[3, 4], [13, 14]]),
[
[
("id", "VARCHAR", None, None, None, None, True),
("value", "VARCHAR", None, None, None, None, True),
],
[
("id2", "VARCHAR", None, None, None, None, True),
("value2", "VARCHAR", None, None, None, None, True),
],
],
[[[1, 2], [11, 12]], [[3, 4], [13, 14]]],
id="The return_last not set on multiple queries not set",
),
],
)
def test_query(
exasol_hook,
return_last,
split_statements,
sql,
cursor_calls,
cursor_descriptions,
cursor_results,
hook_descriptions,
hook_results,
):
with patch("airflow.providers.exasol.hooks.exasol.ExasolHook.get_conn") as mock_conn:
cursors = []
for index in range(len(cursor_descriptions)):
cur = mock.MagicMock(
rowcount=lambda: len(cursor_results[index]),
)
cur.columns.return_value = get_columns(cursor_descriptions[index])
cur.fetchall.return_value = cursor_results[index]
cursors.append(cur)
mock_conn.execute.side_effect = cursors
mock_conn.return_value = mock_conn
results = exasol_hook.run(
sql=sql, handler=fetch_all_handler, return_last=return_last, split_statements=split_statements
)
assert exasol_hook.descriptions == hook_descriptions
assert exasol_hook.last_description == hook_descriptions[-1]
assert results == hook_results
cur.close.assert_called()
@pytest.mark.parametrize(
"empty_statement",
[
pytest.param([], id="Empty list"),
pytest.param("", id="Empty string"),
pytest.param("\n", id="Only EOL"),
],
)
def test_no_query(empty_statement):
dbapi_hook = ExasolHookForTests()
dbapi_hook.get_conn.return_value.cursor.rowcount = lambda: 0
with pytest.raises(ValueError, match="List of SQL statements is empty"):
dbapi_hook.run(sql=empty_statement)
| ExasolHookForTests |
python | tensorflow__tensorflow | tensorflow/python/distribute/input_lib.py | {
"start": 56861,
"end": 59428
} | class ____(type_spec.TypeSpec):
"""Type specification for `_SingleWorkerOwnedDatasetIterator`."""
__slots__ = [
"_worker", "_devices", "_element_spec", "_options",
"_canonicalize_devices"
]
def __init__(self, worker, devices, element_spec, options,
canonicalize_devices=True):
self._worker = worker
if canonicalize_devices:
self._devices = tuple(device_util.canonicalize(d) for d in devices)
else:
self._devices = tuple(
device_util.canonicalize_without_job_and_task(d) for d in devices)
self._element_spec = element_spec
# `self._options` intentionally made not `None` for proper serialization.
self._options = (options if options is not None else
distribute_lib.InputOptions())
self._canonicalize_devices = canonicalize_devices
@property
def value_type(self):
return _SingleWorkerOwnedDatasetIterator
def _serialize(self):
return (self._worker, self._devices, self._element_spec, self._options,
self._canonicalize_devices)
def _get_multi_device_iterator_spec(self, specs):
device_scope = device_util.canonicalize(self._worker, device_util.current())
host_device = device_util.get_host_for_device(device_scope)
# source_device while creating iterator governs the worker device in
# iterator spec.
worker = host_device
specs.append(
multi_device_iterator_ops.MultiDeviceIteratorSpec(
self._devices, worker, element_spec=self._element_spec))
@property
def _component_specs(self):
specs = []
if _should_use_multi_device_iterator(self._options):
self._get_multi_device_iterator_spec(specs)
else:
specs.append(iterator_ops.IteratorSpec(element_spec=self._element_spec))
return specs
def _to_components(self, value):
return [value._iterator] # pylint: disable=protected-access
def _from_components(self, components):
return _SingleWorkerOwnedDatasetIterator(
dataset=None,
worker=self._worker,
devices=self._devices,
components=components,
element_spec=self._element_spec,
options=self._options,
canonicalize_devices=self._canonicalize_devices)
@staticmethod
def from_value(value):
# pylint: disable=protected-access
return _SingleWorkerDatasetIteratorSpec(value._worker, value._devices,
value._element_spec, value._options,
value._canonicalize_devices)
| _SingleWorkerDatasetIteratorSpec |
python | python__mypy | mypy/server/update.py | {
"start": 22602,
"end": 53491
} | class ____(NamedTuple):
module: str
path: str
remaining: list[tuple[str, str]]
messages: list[str]
UpdateResult: _TypeAlias = NormalUpdate | BlockedUpdate
def update_module_isolated(
module: str,
path: str,
manager: BuildManager,
previous_modules: dict[str, str],
graph: Graph,
force_removed: bool,
followed: bool,
) -> UpdateResult:
"""Build a new version of one changed module only.
Don't propagate changes to elsewhere in the program. Raise CompileError on
encountering a blocking error.
Args:
module: Changed module (modified, created or deleted)
path: Path of the changed module
manager: Build manager
graph: Build graph
force_removed: If True, consider the module removed from the build even it the
file exists
Returns a named tuple describing the result (see above for details).
"""
if module not in graph:
manager.log_fine_grained(f"new module {module!r}")
if not manager.fscache.isfile(path) or force_removed:
delete_module(module, path, graph, manager)
return NormalUpdate(module, path, [], None)
sources = get_sources(manager.fscache, previous_modules, [(module, path)], followed)
if module in manager.missing_modules:
manager.missing_modules.remove(module)
orig_module = module
orig_state = graph.get(module)
orig_tree = manager.modules.get(module)
def restore(ids: list[str]) -> None:
# For each of the modules in ids, restore that id's old
# manager.modules and graphs entries. (Except for the original
# module, this means deleting them.)
for id in ids:
if id == orig_module and orig_tree:
manager.modules[id] = orig_tree
elif id in manager.modules:
del manager.modules[id]
if id == orig_module and orig_state:
graph[id] = orig_state
elif id in graph:
del graph[id]
new_modules: list[State] = []
try:
if module in graph:
del graph[module]
load_graph(sources, manager, graph, new_modules)
except CompileError as err:
# Parse error somewhere in the program -- a blocker
assert err.module_with_blocker
restore([module] + [st.id for st in new_modules])
return BlockedUpdate(err.module_with_blocker, path, [], err.messages)
# Reparsing the file may have brought in dependencies that we
# didn't have before. Make sure that they are loaded to restore
# the invariant that a module having a loaded tree implies that
# its dependencies do as well.
ensure_trees_loaded(manager, graph, graph[module].dependencies)
# Find any other modules brought in by imports.
changed_modules = [(st.id, st.xpath) for st in new_modules]
for m in new_modules:
manager.import_map[m.id] = set(m.dependencies + m.suppressed)
# If there are multiple modules to process, only process one of them and return
# the remaining ones to the caller.
if len(changed_modules) > 1:
# As an optimization, look for a module that imports no other changed modules.
module, path = find_relative_leaf_module(changed_modules, graph)
changed_modules.remove((module, path))
remaining_modules = changed_modules
# The remaining modules haven't been processed yet so drop them.
restore([id for id, _ in remaining_modules])
manager.log_fine_grained(f"--> {module!r} (newly imported)")
else:
remaining_modules = []
state = graph[module]
# Process the changed file.
state.parse_file()
assert state.tree is not None, "file must be at least parsed"
t0 = time.time()
try:
semantic_analysis_for_scc(graph, [state.id], manager.errors)
except CompileError as err:
# There was a blocking error, so module AST is incomplete. Restore old modules.
restore([module])
return BlockedUpdate(module, path, remaining_modules, err.messages)
# Merge old and new ASTs.
new_modules_dict: dict[str, MypyFile | None] = {module: state.tree}
replace_modules_with_new_variants(manager, graph, {orig_module: orig_tree}, new_modules_dict)
t1 = time.time()
# Perform type checking.
state.type_checker().reset()
state.type_check_first_pass()
state.type_check_second_pass()
state.detect_possibly_undefined_vars()
state.generate_unused_ignore_notes()
state.generate_ignore_without_code_notes()
t2 = time.time()
state.finish_passes()
t3 = time.time()
manager.add_stats(semanal_time=t1 - t0, typecheck_time=t2 - t1, finish_passes_time=t3 - t2)
graph[module] = state
return NormalUpdate(module, path, remaining_modules, state.tree)
def find_relative_leaf_module(modules: list[tuple[str, str]], graph: Graph) -> tuple[str, str]:
"""Find a module in a list that directly imports no other module in the list.
If no such module exists, return the lexicographically first module from the list.
Always return one of the items in the modules list.
NOTE: If both 'abc' and 'typing' have changed, an effect of the above rule is that
we prefer 'abc', even if both are in the same SCC. This works around a false
positive in 'typing', at least in tests.
Args:
modules: List of (module, path) tuples (non-empty)
graph: Program import graph that contains all modules in the module list
"""
assert modules
# Sort for repeatable results.
modules = sorted(modules)
module_set = {module for module, _ in modules}
for module, path in modules:
state = graph[module]
if len(set(state.dependencies) & module_set) == 0:
# Found it!
return module, path
# Could not find any. Just return the first module (by lexicographic order).
return modules[0]
def delete_module(module_id: str, path: str, graph: Graph, manager: BuildManager) -> None:
manager.log_fine_grained(f"delete module {module_id!r}")
# TODO: Remove deps for the module (this only affects memory use, not correctness)
if module_id in graph:
del graph[module_id]
if module_id in manager.modules:
del manager.modules[module_id]
components = module_id.split(".")
if len(components) > 1:
# Delete reference to module in parent module.
parent_id = ".".join(components[:-1])
# If parent module is ignored, it won't be included in the modules dictionary.
if parent_id in manager.modules:
parent = manager.modules[parent_id]
if components[-1] in parent.names:
del parent.names[components[-1]]
# If the module is removed from the build but still exists, then
# we mark it as missing so that it will get picked up by import from still.
if manager.fscache.isfile(path):
manager.missing_modules.add(module_id)
def dedupe_modules(modules: list[tuple[str, str]]) -> list[tuple[str, str]]:
seen: set[str] = set()
result = []
for id, path in modules:
if id not in seen:
seen.add(id)
result.append((id, path))
return result
def get_module_to_path_map(graph: Graph) -> dict[str, str]:
return {module: node.xpath for module, node in graph.items()}
def get_sources(
fscache: FileSystemCache,
modules: dict[str, str],
changed_modules: list[tuple[str, str]],
followed: bool,
) -> list[BuildSource]:
sources = []
for id, path in changed_modules:
if fscache.isfile(path):
sources.append(BuildSource(path, id, None, followed=followed))
return sources
def calculate_active_triggers(
manager: BuildManager,
old_snapshots: dict[str, dict[str, SymbolSnapshot]],
new_modules: dict[str, MypyFile | None],
) -> set[str]:
"""Determine activated triggers by comparing old and new symbol tables.
For example, if only the signature of function m.f is different in the new
symbol table, return {'<m.f>'}.
"""
names: set[str] = set()
for id in new_modules:
snapshot1 = old_snapshots.get(id)
if snapshot1 is None:
names.add(id)
snapshot1 = {}
new = new_modules[id]
if new is None:
snapshot2 = snapshot_symbol_table(id, SymbolTable())
names.add(id)
else:
snapshot2 = snapshot_symbol_table(id, new.names)
diff = compare_symbol_table_snapshots(id, snapshot1, snapshot2)
package_nesting_level = id.count(".")
for item in diff.copy():
if item.count(".") <= package_nesting_level + 1 and item.split(".")[-1] not in (
"__builtins__",
"__file__",
"__name__",
"__package__",
"__doc__",
):
# Activate catch-all wildcard trigger for top-level module changes (used for
# "from m import *"). This also gets triggered by changes to module-private
# entries, but as these unneeded dependencies only result in extra processing,
# it's a minor problem.
#
# TODO: Some __* names cause mistriggers. Fix the underlying issue instead of
# special casing them here.
diff.add(id + WILDCARD_TAG)
if item.count(".") > package_nesting_level + 1:
# These are for changes within classes, used by protocols.
diff.add(item.rsplit(".", 1)[0] + WILDCARD_TAG)
names |= diff
return {make_trigger(name) for name in names}
def replace_modules_with_new_variants(
manager: BuildManager,
graph: dict[str, State],
old_modules: dict[str, MypyFile | None],
new_modules: dict[str, MypyFile | None],
) -> None:
"""Replace modules with newly builds versions.
Retain the identities of externally visible AST nodes in the
old ASTs so that references to the affected modules from other
modules will still be valid (unless something was deleted or
replaced with an incompatible definition, in which case there
will be dangling references that will be handled by
propagate_changes_using_dependencies).
"""
for id in new_modules:
preserved_module = old_modules.get(id)
new_module = new_modules[id]
if preserved_module and new_module is not None:
merge_asts(preserved_module, preserved_module.names, new_module, new_module.names)
manager.modules[id] = preserved_module
graph[id].tree = preserved_module
def propagate_changes_using_dependencies(
manager: BuildManager,
graph: dict[str, State],
deps: dict[str, set[str]],
triggered: set[str],
up_to_date_modules: set[str],
targets_with_errors: set[str],
processed_targets: list[str],
) -> list[tuple[str, str]]:
"""Transitively rechecks targets based on triggers and the dependency map.
Returns a list (module id, path) tuples representing modules that contain
a target that needs to be reprocessed but that has not been parsed yet.
Processed targets should be appended to processed_targets (used in tests only,
to test the order of processing targets).
"""
num_iter = 0
remaining_modules: list[tuple[str, str]] = []
# Propagate changes until nothing visible has changed during the last
# iteration.
while triggered or targets_with_errors:
num_iter += 1
if num_iter > MAX_ITER:
raise RuntimeError("Max number of iterations (%d) reached (endless loop?)" % MAX_ITER)
todo, unloaded, stale_protos = find_targets_recursive(
manager, graph, triggered, deps, up_to_date_modules
)
# TODO: we sort to make it deterministic, but this is *incredibly* ad hoc
remaining_modules.extend((id, graph[id].xpath) for id in sorted(unloaded))
# Also process targets that used to have errors, as otherwise some
# errors might be lost.
for target in targets_with_errors:
id = module_prefix(graph, target)
if id is not None and id not in up_to_date_modules:
if id not in todo:
todo[id] = set()
manager.log_fine_grained(f"process target with error: {target}")
more_nodes, _ = lookup_target(manager, target)
todo[id].update(more_nodes)
triggered = set()
# First invalidate subtype caches in all stale protocols.
# We need to do this to avoid false negatives if the protocol itself is
# unchanged, but was marked stale because its sub- (or super-) type changed.
for info in stale_protos:
type_state.reset_subtype_caches_for(info)
# Then fully reprocess all targets.
# TODO: Preserve order (set is not optimal)
for id, nodes in sorted(todo.items(), key=lambda x: x[0]):
assert id not in up_to_date_modules
triggered |= reprocess_nodes(manager, graph, id, nodes, deps, processed_targets)
# Changes elsewhere may require us to reprocess modules that were
# previously considered up to date. For example, there may be a
# dependency loop that loops back to an originally processed module.
up_to_date_modules = set()
targets_with_errors = set()
if is_verbose(manager):
manager.log_fine_grained(f"triggered: {list(triggered)!r}")
return remaining_modules
def find_targets_recursive(
manager: BuildManager,
graph: Graph,
triggers: set[str],
deps: dict[str, set[str]],
up_to_date_modules: set[str],
) -> tuple[dict[str, set[FineGrainedDeferredNode]], set[str], set[TypeInfo]]:
"""Find names of all targets that need to reprocessed, given some triggers.
Returns: A tuple containing a:
* Dictionary from module id to a set of stale targets.
* A set of module ids for unparsed modules with stale targets.
"""
result: dict[str, set[FineGrainedDeferredNode]] = {}
worklist = triggers
processed: set[str] = set()
stale_protos: set[TypeInfo] = set()
unloaded_files: set[str] = set()
# Find AST nodes corresponding to each target.
#
# TODO: Don't rely on a set, since the items are in an unpredictable order.
while worklist:
processed |= worklist
current = worklist
worklist = set()
for target in current:
if target.startswith("<"):
module_id = module_prefix(graph, trigger_to_target(target))
if module_id:
ensure_deps_loaded(module_id, deps, graph)
worklist |= deps.get(target, set()) - processed
else:
module_id = module_prefix(graph, target)
if module_id is None:
# Deleted module.
continue
if module_id in up_to_date_modules:
# Already processed.
continue
if (
module_id not in manager.modules
or manager.modules[module_id].is_cache_skeleton
):
# We haven't actually parsed and checked the module, so we don't have
# access to the actual nodes.
# Add it to the queue of files that need to be processed fully.
unloaded_files.add(module_id)
continue
if module_id not in result:
result[module_id] = set()
manager.log_fine_grained(f"process: {target}")
deferred, stale_proto = lookup_target(manager, target)
if stale_proto:
stale_protos.add(stale_proto)
result[module_id].update(deferred)
return result, unloaded_files, stale_protos
def reprocess_nodes(
manager: BuildManager,
graph: dict[str, State],
module_id: str,
nodeset: set[FineGrainedDeferredNode],
deps: dict[str, set[str]],
processed_targets: list[str],
) -> set[str]:
"""Reprocess a set of nodes within a single module.
Return fired triggers.
"""
if module_id not in graph:
manager.log_fine_grained("%s not in graph (blocking errors or deleted?)" % module_id)
return set()
file_node = manager.modules[module_id]
old_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
old_symbols = {name: names.copy() for name, names in old_symbols.items()}
old_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
def key(node: FineGrainedDeferredNode) -> int:
# Unlike modules which are sorted by name within SCC,
# nodes within the same module are sorted by line number, because
# this is how they are processed in normal mode.
return node.node.line
nodes = sorted(nodeset, key=key)
state = graph[module_id]
options = state.options
manager.errors.set_file_ignored_lines(
file_node.path, file_node.ignored_lines, options.ignore_errors or state.ignore_all
)
manager.errors.set_skipped_lines(file_node.path, file_node.skipped_lines)
targets = set()
for node in nodes:
target = target_from_node(module_id, node.node)
if target is not None:
targets.add(target)
manager.errors.clear_errors_in_targets(file_node.path, targets)
# If one of the nodes is the module itself, emit any errors that
# happened before semantic analysis.
for target in targets:
if target == module_id:
for info in graph[module_id].early_errors:
manager.errors.add_error_info(info)
# Strip semantic analysis information.
saved_attrs: SavedAttributes = {}
for deferred in nodes:
processed_targets.append(deferred.node.fullname)
strip_target(deferred.node, saved_attrs)
semantic_analysis_for_targets(graph[module_id], nodes, graph, saved_attrs)
# Merge symbol tables to preserve identities of AST nodes. The file node will remain
# the same, but other nodes may have been recreated with different identities, such as
# NamedTuples defined using assignment statements.
new_symbols = find_symbol_tables_recursive(file_node.fullname, file_node.names)
for name in old_symbols:
if name in new_symbols:
merge_asts(file_node, old_symbols[name], file_node, new_symbols[name])
# Type check.
checker = graph[module_id].type_checker()
checker.reset()
# We seem to need additional passes in fine-grained incremental mode.
checker.pass_num = 0
checker.last_pass = 3
# It is tricky to reliably invalidate constructor cache in fine-grained increments.
# See PR 19514 description for details.
more = checker.check_second_pass(nodes, allow_constructor_cache=False)
while more:
more = False
if graph[module_id].type_checker().check_second_pass(allow_constructor_cache=False):
more = True
if manager.options.export_types:
manager.all_types.update(graph[module_id].type_map())
new_symbols_snapshot = snapshot_symbol_table(file_node.fullname, file_node.names)
# Check if any attribute types were changed and need to be propagated further.
changed = compare_symbol_table_snapshots(
file_node.fullname, old_symbols_snapshot, new_symbols_snapshot
)
new_triggered = {make_trigger(name) for name in changed}
# Dependencies may have changed.
update_deps(module_id, nodes, graph, deps, options)
# Report missing imports.
graph[module_id].verify_dependencies()
graph[module_id].free_state()
return new_triggered
def find_symbol_tables_recursive(prefix: str, symbols: SymbolTable) -> dict[str, SymbolTable]:
"""Find all nested symbol tables.
Args:
prefix: Full name prefix (used for return value keys and to filter result so that
cross references to other modules aren't included)
symbols: Root symbol table
Returns a dictionary from full name to corresponding symbol table.
"""
result = {prefix: symbols}
for name, node in symbols.items():
if isinstance(node.node, TypeInfo) and node.node.fullname.startswith(prefix + "."):
more = find_symbol_tables_recursive(prefix + "." + name, node.node.names)
result.update(more)
return result
def update_deps(
module_id: str,
nodes: list[FineGrainedDeferredNode],
graph: dict[str, State],
deps: dict[str, set[str]],
options: Options,
) -> None:
for deferred in nodes:
node = deferred.node
type_map = graph[module_id].type_map()
tree = graph[module_id].tree
assert tree is not None, "Tree must be processed at this stage"
new_deps = get_dependencies_of_target(
module_id, tree, node, type_map, options.python_version
)
for trigger, targets in new_deps.items():
deps.setdefault(trigger, set()).update(targets)
# Merge also the newly added protocol deps (if any).
type_state.update_protocol_deps(deps)
def lookup_target(
manager: BuildManager, target: str
) -> tuple[list[FineGrainedDeferredNode], TypeInfo | None]:
"""Look up a target by fully-qualified name.
The first item in the return tuple is a list of deferred nodes that
needs to be reprocessed. If the target represents a TypeInfo corresponding
to a protocol, return it as a second item in the return tuple, otherwise None.
"""
def not_found() -> None:
manager.log_fine_grained(f"Can't find matching target for {target} (stale dependency?)")
modules = manager.modules
items = split_target(modules, target)
if items is None:
not_found() # Stale dependency
return [], None
module, rest = items
if rest:
components = rest.split(".")
else:
components = []
node: SymbolNode | None = modules[module]
file: MypyFile | None = None
active_class = None
for c in components:
if isinstance(node, TypeInfo):
active_class = node
if isinstance(node, MypyFile):
file = node
if not isinstance(node, (MypyFile, TypeInfo)) or c not in node.names:
not_found() # Stale dependency
return [], None
# Don't reprocess plugin generated targets. They should get
# stripped and regenerated when the containing target is
# reprocessed.
if node.names[c].plugin_generated:
return [], None
node = node.names[c].node
if isinstance(node, TypeInfo):
# A ClassDef target covers the body of the class and everything defined
# within it. To get the body we include the entire surrounding target,
# typically a module top-level, since we don't support processing class
# bodies as separate entities for simplicity.
assert file is not None
if node.fullname != target:
# This is a reference to a different TypeInfo, likely due to a stale dependency.
# Processing them would spell trouble -- for example, we could be refreshing
# a deserialized TypeInfo with missing attributes.
not_found()
return [], None
result = [FineGrainedDeferredNode(file, None)]
stale_info: TypeInfo | None = None
if node.is_protocol:
stale_info = node
for name, symnode in node.names.items():
node = symnode.node
if isinstance(node, FuncDef):
method, _ = lookup_target(manager, target + "." + name)
result.extend(method)
return result, stale_info
if isinstance(node, Decorator):
# Decorator targets actually refer to the function definition only.
node = node.func
if not isinstance(node, (FuncDef, MypyFile, OverloadedFuncDef)):
# The target can't be refreshed. It's possible that the target was
# changed to another type and we have a stale dependency pointing to it.
not_found()
return [], None
if node.fullname != target:
# Stale reference points to something unexpected. We shouldn't process since the
# context will be wrong and it could be a partially initialized deserialized node.
not_found()
return [], None
return [FineGrainedDeferredNode(node, active_class)], None
def is_verbose(manager: BuildManager) -> bool:
return manager.options.verbosity >= 1 or DEBUG_FINE_GRAINED
def target_from_node(module: str, node: FuncDef | MypyFile | OverloadedFuncDef) -> str | None:
"""Return the target name corresponding to a deferred node.
Args:
module: Must be module id of the module that defines 'node'
Returns the target name, or None if the node is not a valid target in the given
module (for example, if it's actually defined in another module).
"""
if isinstance(node, MypyFile):
if module != node.fullname:
# Actually a reference to another module -- likely a stale dependency.
return None
return module
else: # OverloadedFuncDef or FuncDef
if node.info:
return f"{node.info.fullname}.{node.name}"
else:
return f"{module}.{node.name}"
if sys.platform != "win32":
INIT_SUFFIXES: Final = ("/__init__.py", "/__init__.pyi")
else:
INIT_SUFFIXES: Final = (
os.sep + "__init__.py",
os.sep + "__init__.pyi",
os.altsep + "__init__.py",
os.altsep + "__init__.pyi",
)
def refresh_suppressed_submodules(
module: str,
path: str | None,
deps: dict[str, set[str]],
graph: Graph,
fscache: FileSystemCache,
refresh_file: Callable[[str, str], list[str]],
) -> list[str] | None:
"""Look for submodules that are now suppressed in target package.
If a submodule a.b gets added, we need to mark it as suppressed
in modules that contain "from a import b". Previously we assumed
that 'a.b' is not a module but a regular name.
This is only relevant when following imports normally.
Args:
module: target package in which to look for submodules
path: path of the module
refresh_file: function that reads the AST of a module (returns error messages)
Return a list of errors from refresh_file() if it was called. If the
return value is None, we didn't call refresh_file().
"""
messages = None
if path is None or not path.endswith(INIT_SUFFIXES):
# Only packages have submodules.
return None
# Find any submodules present in the directory.
pkgdir = os.path.dirname(path)
try:
entries = fscache.listdir(pkgdir)
except FileNotFoundError:
entries = []
for fnam in entries:
if (
not fnam.endswith((".py", ".pyi"))
or fnam.startswith("__init__.")
or fnam.count(".") != 1
):
continue
shortname = fnam.split(".")[0]
submodule = module + "." + shortname
trigger = make_trigger(submodule)
# We may be missing the required fine-grained deps.
ensure_deps_loaded(module, deps, graph)
if trigger in deps:
for dep in deps[trigger]:
# We can ignore <...> deps since a submodule can't trigger any.
state = graph.get(dep)
if not state:
# Maybe it's a non-top-level target. We only care about the module.
dep_module = module_prefix(graph, dep)
if dep_module is not None:
state = graph.get(dep_module)
if state:
# Is the file may missing an AST in case it's read from cache?
if state.tree is None:
# Create AST for the file. This may produce some new errors
# that we need to propagate.
assert state.path is not None
messages = refresh_file(state.id, state.path)
tree = state.tree
assert tree # Will be fine, due to refresh_file() above
for imp in tree.imports:
if isinstance(imp, ImportFrom):
if (
imp.id == module
and any(name == shortname for name, _ in imp.names)
and submodule not in state.suppressed_set
):
state.suppressed.append(submodule)
state.suppressed_set.add(submodule)
return messages
def extract_fnam_from_message(message: str) -> str | None:
m = re.match(r"([^:]+):[0-9]+: (error|note): ", message)
if m:
return m.group(1)
return None
def extract_possible_fnam_from_message(message: str) -> str:
# This may return non-path things if there is some random colon on the line
return message.split(":", 1)[0]
def sort_messages_preserving_file_order(
messages: list[str], prev_messages: list[str]
) -> list[str]:
"""Sort messages so that the order of files is preserved.
An update generates messages so that the files can be in a fairly
arbitrary order. Preserve the order of files to avoid messages
getting reshuffled continuously. If there are messages in
additional files, sort them towards the end.
"""
# Calculate file order from the previous messages
n = 0
order = {}
for msg in prev_messages:
fnam = extract_fnam_from_message(msg)
if fnam and fnam not in order:
order[fnam] = n
n += 1
# Related messages must be sorted as a group of successive lines
groups = []
i = 0
while i < len(messages):
msg = messages[i]
maybe_fnam = extract_possible_fnam_from_message(msg)
group = [msg]
if maybe_fnam in order:
# This looks like a file name. Collect all lines related to this message.
while (
i + 1 < len(messages)
and extract_possible_fnam_from_message(messages[i + 1]) not in order
and extract_fnam_from_message(messages[i + 1]) is None
and not messages[i + 1].startswith("mypy: ")
):
i += 1
group.append(messages[i])
groups.append((order.get(maybe_fnam, n), group))
i += 1
groups = sorted(groups, key=lambda g: g[0])
result = []
for key, group in groups:
result.extend(group)
return result
| BlockedUpdate |
python | airbytehq__airbyte | airbyte-integrations/connectors/source-hubspot/unit_tests/integrations/request_builders/__init__.py | {
"start": 71,
"end": 159
} | class ____:
@abc.abstractmethod
def build(self):
pass
| AbstractRequestBuilder |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 22347,
"end": 22424
} | class ____(MarkdownBlock):
"""A table head Markdown block."""
| MarkdownTHead |
python | Pylons__pyramid | tests/test_events.py | {
"start": 3473,
"end": 4223
} | class ____(unittest.TestCase):
def _getTargetClass(self):
from pyramid.events import ContextFound
return ContextFound
def _makeOne(self, request=None):
if request is None:
request = DummyRequest()
return self._getTargetClass()(request)
def test_class_conforms_to_IContextFound(self):
from zope.interface.verify import verifyClass
from pyramid.interfaces import IContextFound
verifyClass(IContextFound, self._getTargetClass())
def test_instance_conforms_to_IContextFound(self):
from zope.interface.verify import verifyObject
from pyramid.interfaces import IContextFound
verifyObject(IContextFound, self._makeOne())
| ContextFoundEventTests |
python | astropy__astropy | astropy/coordinates/builtin_frames/galactic.py | {
"start": 1865,
"end": 4082
} | class ____(BaseCoordinateFrame):
"""
A coordinate or frame in the Galactic coordinate system.
This frame is used in a variety of Galactic contexts because it has as its
x-y plane the plane of the Milky Way. The positive x direction (i.e., the
l=0, b=0 direction) points to the center of the Milky Way and the z-axis
points toward the North Galactic Pole (following the IAU's 1958 definition
[1]_). However, unlike the `~astropy.coordinates.Galactocentric` frame, the
*origin* of this frame in 3D space is the solar system barycenter, not
the center of the Milky Way.
"""
frame_specific_representation_info = {
r.SphericalRepresentation: [
RepresentationMapping("lon", "l"),
RepresentationMapping("lat", "b"),
],
r.CartesianRepresentation: [
RepresentationMapping("x", "u"),
RepresentationMapping("y", "v"),
RepresentationMapping("z", "w"),
],
r.CartesianDifferential: [
RepresentationMapping("d_x", "U", u.km / u.s),
RepresentationMapping("d_y", "V", u.km / u.s),
RepresentationMapping("d_z", "W", u.km / u.s),
],
}
default_representation = r.SphericalRepresentation
default_differential = r.SphericalCosLatDifferential
# North galactic pole and zeropoint of l in FK4/FK5 coordinates. Needed for
# transformations to/from FK4/5
# These are from the IAU's definition of galactic coordinates
_ngp_B1950 = FK4NoETerms(ra=192.25 * u.degree, dec=27.4 * u.degree)
_lon0_B1950 = Angle(123, u.degree)
# These are *not* from Reid & Brunthaler 2004 - instead, they were
# derived by doing:
#
# >>> FK4NoETerms(ra=192.25*u.degree, dec=27.4*u.degree).transform_to(FK5())
#
# This gives better consistency with other codes than using the values
# from Reid & Brunthaler 2004 and the best self-consistency between FK5
# -> Galactic and FK5 -> FK4 -> Galactic. The lon0 angle was found by
# optimizing the self-consistency.
_ngp_J2000 = FK5(ra=192.8594812065348 * u.degree, dec=27.12825118085622 * u.degree)
_lon0_J2000 = Angle(122.9319185680026, u.degree)
| Galactic |
python | matplotlib__matplotlib | lib/matplotlib/_enums.py | {
"start": 4000,
"end": 6175
} | class ____(str, Enum):
r"""
Define how the two endpoints (caps) of an unclosed line are drawn.
How to draw the start and end points of lines that represent a closed curve
(i.e. that end in a `~.path.Path.CLOSEPOLY`) is controlled by the line's
`JoinStyle`. For all other lines, how the start and end points are drawn is
controlled by the *CapStyle*.
For a visual impression of each *CapStyle*, `view these docs online
<CapStyle>` or run `CapStyle.demo`.
By default, `~.backend_bases.GraphicsContextBase` draws a stroked line as
squared off at its endpoints.
**Supported values:**
.. rst-class:: value-list
'butt'
the line is squared off at its endpoint.
'projecting'
the line is squared off as in *butt*, but the filled in area
extends beyond the endpoint a distance of ``linewidth/2``.
'round'
like *butt*, but a semicircular cap is added to the end of the
line, of radius ``linewidth/2``.
.. plot::
:alt: Demo of possible CapStyle's
from matplotlib._enums import CapStyle
CapStyle.demo()
"""
butt = "butt"
projecting = "projecting"
round = "round"
@staticmethod
def demo():
"""Demonstrate how each CapStyle looks for a thick line segment."""
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(4, 1.2))
ax = fig.add_axes((0, 0, 1, 0.8))
ax.set_title('Cap style')
for x, style in enumerate(['butt', 'round', 'projecting']):
ax.text(x+0.25, 0.85, style, ha='center')
xx = [x, x+0.5]
yy = [0, 0]
ax.plot(xx, yy, lw=12, color='tab:blue', solid_capstyle=style)
ax.plot(xx, yy, lw=1, color='black')
ax.plot(xx, yy, 'o', color='tab:red', markersize=3)
ax.set_ylim(-.5, 1.5)
ax.set_axis_off()
fig.show()
CapStyle.input_description = "{" \
+ ", ".join([f"'{cs.name}'" for cs in CapStyle]) \
+ "}"
_docstring.interpd.register(
JoinStyle=JoinStyle.input_description,
CapStyle=CapStyle.input_description,
)
| CapStyle |
python | pandas-dev__pandas | pandas/tests/indexes/period/test_constructors.py | {
"start": 3503,
"end": 23528
} | class ____:
def test_from_ordinals(self):
Period(ordinal=-1000, freq="Y")
Period(ordinal=0, freq="Y")
idx1 = PeriodIndex.from_ordinals(ordinals=[-1, 0, 1], freq="Y")
idx2 = PeriodIndex.from_ordinals(ordinals=np.array([-1, 0, 1]), freq="Y")
tm.assert_index_equal(idx1, idx2)
def test_construction_base_constructor(self):
# GH 13664
arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="M")]
tm.assert_index_equal(Index(arr), PeriodIndex(arr))
tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
arr = [np.nan, NaT, Period("2011-03", freq="M")]
tm.assert_index_equal(Index(arr), PeriodIndex(arr))
tm.assert_index_equal(Index(np.array(arr)), PeriodIndex(np.array(arr)))
arr = [Period("2011-01", freq="M"), NaT, Period("2011-03", freq="D")]
tm.assert_index_equal(Index(arr), Index(arr, dtype=object))
tm.assert_index_equal(Index(np.array(arr)), Index(np.array(arr), dtype=object))
def test_base_constructor_with_period_dtype(self):
dtype = PeriodDtype("D")
values = ["2011-01-01", "2012-03-04", "2014-05-01"]
result = Index(values, dtype=dtype)
expected = PeriodIndex(values, dtype=dtype)
tm.assert_index_equal(result, expected)
@pytest.mark.parametrize(
"values_constructor", [list, np.array, PeriodIndex, PeriodArray._from_sequence]
)
def test_index_object_dtype(self, values_constructor):
# Index(periods, dtype=object) is an Index (not a PeriodIndex)
periods = [
Period("2011-01", freq="M"),
NaT,
Period("2011-03", freq="M"),
]
values = values_constructor(periods)
result = Index(values, dtype=object)
assert type(result) is Index
tm.assert_numpy_array_equal(result.values, np.array(values))
def test_constructor_use_start_freq(self):
# GH #1118
msg1 = "Period with BDay freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg1):
p = Period("4/2/2012", freq="B")
msg2 = r"PeriodDtype\[B\] is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg2):
expected = period_range(start="4/2/2012", periods=10, freq="B")
with tm.assert_produces_warning(FutureWarning, match=msg2):
index = period_range(start=p, periods=10)
tm.assert_index_equal(index, expected)
def test_constructor_field_arrays(self):
# GH #1264
years = np.arange(1990, 2010).repeat(4)[2:-2]
quarters = np.tile(np.arange(1, 5), 20)[2:-2]
index = PeriodIndex.from_fields(year=years, quarter=quarters, freq="Q-DEC")
expected = period_range("1990Q3", "2009Q2", freq="Q-DEC")
tm.assert_index_equal(index, expected)
index2 = PeriodIndex.from_fields(year=years, quarter=quarters, freq="2Q-DEC")
tm.assert_numpy_array_equal(index.asi8, index2.asi8)
index = PeriodIndex.from_fields(year=years, quarter=quarters)
tm.assert_index_equal(index, expected)
years = [2007, 2007, 2007]
months = [1, 2]
msg = "Mismatched Period array lengths"
with pytest.raises(ValueError, match=msg):
PeriodIndex.from_fields(year=years, month=months, freq="M")
with pytest.raises(ValueError, match=msg):
PeriodIndex.from_fields(year=years, month=months, freq="2M")
years = [2007, 2007, 2007]
months = [1, 2, 3]
idx = PeriodIndex.from_fields(year=years, month=months, freq="M")
exp = period_range("2007-01", periods=3, freq="M")
tm.assert_index_equal(idx, exp)
def test_constructor_nano(self):
idx = period_range(
start=Period(ordinal=1, freq="ns"),
end=Period(ordinal=4, freq="ns"),
freq="ns",
)
exp = PeriodIndex(
[
Period(ordinal=1, freq="ns"),
Period(ordinal=2, freq="ns"),
Period(ordinal=3, freq="ns"),
Period(ordinal=4, freq="ns"),
],
freq="ns",
)
tm.assert_index_equal(idx, exp)
def test_constructor_arrays_negative_year(self):
years = np.arange(1960, 2000, dtype=np.int64).repeat(4)
quarters = np.tile(np.array([1, 2, 3, 4], dtype=np.int64), 40)
pindex = PeriodIndex.from_fields(year=years, quarter=quarters)
tm.assert_index_equal(pindex.year, Index(years))
tm.assert_index_equal(pindex.quarter, Index(quarters))
def test_constructor_invalid_quarters(self):
msg = "Quarter must be 1 <= q <= 4"
with pytest.raises(ValueError, match=msg):
PeriodIndex.from_fields(
year=range(2000, 2004), quarter=list(range(4)), freq="Q-DEC"
)
def test_period_range_fractional_period(self):
msg = "periods must be an integer, got 10.5"
with pytest.raises(TypeError, match=msg):
period_range("2007-01", periods=10.5, freq="M")
def test_constructor_with_without_freq(self):
# GH53687
start = Period("2002-01-01 00:00", freq="30min")
exp = period_range(start=start, periods=5, freq=start.freq)
result = period_range(start=start, periods=5)
tm.assert_index_equal(exp, result)
def test_constructor_fromarraylike(self):
idx = period_range("2007-01", periods=20, freq="M")
# values is an array of Period, thus can retrieve freq
tm.assert_index_equal(PeriodIndex(idx.values), idx)
tm.assert_index_equal(PeriodIndex(list(idx.values)), idx)
msg = "freq not specified and cannot be inferred"
with pytest.raises(ValueError, match=msg):
PeriodIndex(idx.asi8)
with pytest.raises(ValueError, match=msg):
PeriodIndex(list(idx.asi8))
msg = "'Period' object is not iterable"
with pytest.raises(TypeError, match=msg):
PeriodIndex(data=Period("2007", freq="Y"))
result = PeriodIndex(iter(idx))
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx)
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq="M")
tm.assert_index_equal(result, idx)
result = PeriodIndex(idx, freq=offsets.MonthEnd())
tm.assert_index_equal(result, idx)
assert result.freq == "ME"
result = PeriodIndex(idx, freq="2M")
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2ME"
result = PeriodIndex(idx, freq=offsets.MonthEnd(2))
tm.assert_index_equal(result, idx.asfreq("2M"))
assert result.freq == "2ME"
result = PeriodIndex(idx, freq="D")
exp = idx.asfreq("D", "e")
tm.assert_index_equal(result, exp)
def test_constructor_datetime64arr(self):
vals = np.arange(100000, 100000 + 10000, 100, dtype=np.int64)
vals = vals.view(np.dtype("M8[us]"))
pi = PeriodIndex(vals, freq="D")
expected = PeriodIndex(vals.astype("M8[ns]"), freq="D")
tm.assert_index_equal(pi, expected)
@pytest.mark.parametrize("box", [None, "series", "index"])
def test_constructor_datetime64arr_ok(self, box):
# https://github.com/pandas-dev/pandas/issues/23438
data = date_range("2017", periods=4, freq="ME")
if box is None:
data = data._values
elif box == "series":
data = Series(data)
result = PeriodIndex(data, freq="D")
expected = PeriodIndex(
["2017-01-31", "2017-02-28", "2017-03-31", "2017-04-30"], freq="D"
)
tm.assert_index_equal(result, expected)
def test_constructor_dtype(self):
# passing a dtype with a tz should localize
idx = PeriodIndex(["2013-01", "2013-03"], dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-03"], freq="M")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[M]"
idx = PeriodIndex(["2013-01-05", "2013-03-05"], dtype="period[3D]")
exp = PeriodIndex(["2013-01-05", "2013-03-05"], freq="3D")
tm.assert_index_equal(idx, exp)
assert idx.dtype == "period[3D]"
# if we already have a freq and its not the same, then asfreq
# (not changed)
idx = PeriodIndex(["2013-01-01", "2013-01-02"], freq="D")
res = PeriodIndex(idx, dtype="period[M]")
exp = PeriodIndex(["2013-01", "2013-01"], freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
res = PeriodIndex(idx, freq="M")
tm.assert_index_equal(res, exp)
assert res.dtype == "period[M]"
msg = "specified freq and dtype are different"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(["2011-01"], freq="M", dtype="period[D]")
def test_constructor_empty(self):
idx = PeriodIndex([], freq="M")
assert isinstance(idx, PeriodIndex)
assert len(idx) == 0
assert idx.freq == "ME"
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([])
def test_constructor_pi_nat(self):
idx = PeriodIndex(
[Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")]
)
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="M")])
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
[NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
exp = PeriodIndex(["NaT", "NaT", "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(
np.array(
[NaT, NaT, Period("2011-01", freq="M"), Period("2011-01", freq="M")]
)
)
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([NaT, NaT, "2011-01", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex([NaT, NaT])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array([NaT, NaT]))
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(["NaT", "NaT"])
with pytest.raises(ValueError, match="freq not specified"):
PeriodIndex(np.array(["NaT", "NaT"]))
def test_constructor_incompat_freq(self):
msg = "Input has different freq=D from PeriodIndex\\(freq=M\\)"
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex([Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[Period("2011-01", freq="M"), NaT, Period("2011-01", freq="D")]
)
)
# first element is NaT
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex([NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")])
with pytest.raises(IncompatibleFrequency, match=msg):
PeriodIndex(
np.array(
[NaT, Period("2011-01", freq="M"), Period("2011-01", freq="D")]
)
)
def test_constructor_mixed(self):
idx = PeriodIndex(["2011-01", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["2011-01", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex(["NaT", NaT, Period("2011-01", freq="M")])
exp = PeriodIndex(["NaT", "NaT", "2011-01"], freq="M")
tm.assert_index_equal(idx, exp)
idx = PeriodIndex([Period("2011-01-01", freq="D"), NaT, "2012-01-01"])
exp = PeriodIndex(["2011-01-01", "NaT", "2012-01-01"], freq="D")
tm.assert_index_equal(idx, exp)
@pytest.mark.parametrize("floats", [[1.1, 2.1], np.array([1.1, 2.1])])
def test_constructor_floats(self, floats):
msg = "PeriodIndex does not allow floating point in construction"
with pytest.raises(TypeError, match=msg):
PeriodIndex(floats)
def test_constructor_year_and_quarter(self):
year = Series([2001, 2002, 2003])
quarter = year - 2000
idx = PeriodIndex.from_fields(year=year, quarter=quarter)
strs = [f"{t[0]:d}Q{t[1]:d}" for t in zip(quarter, year)]
lops = list(map(Period, strs))
p = PeriodIndex(lops)
tm.assert_index_equal(p, idx)
def test_constructor_freq_mult(self):
# GH #7811
pidx = period_range(start="2014-01", freq="2M", periods=4)
expected = PeriodIndex(["2014-01", "2014-03", "2014-05", "2014-07"], freq="2M")
tm.assert_index_equal(pidx, expected)
pidx = period_range(start="2014-01-02", end="2014-01-15", freq="3D")
expected = PeriodIndex(
["2014-01-02", "2014-01-05", "2014-01-08", "2014-01-11", "2014-01-14"],
freq="3D",
)
tm.assert_index_equal(pidx, expected)
pidx = period_range(end="2014-01-01 17:00", freq="4h", periods=3)
expected = PeriodIndex(
["2014-01-01 09:00", "2014-01-01 13:00", "2014-01-01 17:00"], freq="4h"
)
tm.assert_index_equal(pidx, expected)
msg = "Frequency must be positive, because it represents span: -1M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="-1M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
PeriodIndex(["2011-01"], freq="0M")
msg = "Frequency must be positive, because it represents span: 0M"
with pytest.raises(ValueError, match=msg):
period_range("2011-01", periods=3, freq="0M")
@pytest.mark.parametrize(
"freq_offset, freq_period",
[
("YE", "Y"),
("ME", "M"),
("D", "D"),
("min", "min"),
("s", "s"),
],
)
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat(self, mult, freq_offset, freq_period):
freqstr_offset = str(mult) + freq_offset
freqstr_period = str(mult) + freq_period
pidx = period_range(start="2014-04-01", freq=freqstr_period, periods=10)
expected = date_range(
start="2014-04-01", freq=freqstr_offset, periods=10
).to_period(freqstr_period)
tm.assert_index_equal(pidx, expected)
@pytest.mark.parametrize("mult", [1, 2, 3, 4, 5])
def test_constructor_freq_mult_dti_compat_month(self, mult):
pidx = period_range(start="2014-04-01", freq=f"{mult}M", periods=10)
expected = date_range(
start="2014-04-01", freq=f"{mult}ME", periods=10
).to_period(f"{mult}M")
tm.assert_index_equal(pidx, expected)
def test_constructor_freq_combined(self):
for freq in ["1D1h", "1h1D"]:
pidx = PeriodIndex(["2016-01-01", "2016-01-02"], freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 00:00"], freq="25h")
for freq in ["1D1h", "1h1D"]:
pidx = period_range(start="2016-01-01", periods=2, freq=freq)
expected = PeriodIndex(["2016-01-01 00:00", "2016-01-02 01:00"], freq="25h")
tm.assert_index_equal(pidx, expected)
def test_period_range_length(self):
pi = period_range(freq="Y", start="1/1/2001", end="12/1/2009")
assert len(pi) == 9
pi = period_range(freq="Q", start="1/1/2001", end="12/1/2009")
assert len(pi) == 4 * 9
pi = period_range(freq="M", start="1/1/2001", end="12/1/2009")
assert len(pi) == 12 * 9
pi = period_range(freq="D", start="1/1/2001", end="12/31/2009")
assert len(pi) == 365 * 9 + 2
msg = "Period with BDay freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
pi = period_range(freq="B", start="1/1/2001", end="12/31/2009")
assert len(pi) == 261 * 9
pi = period_range(freq="h", start="1/1/2001", end="12/31/2001 23:00")
assert len(pi) == 365 * 24
pi = period_range(freq="Min", start="1/1/2001", end="1/1/2001 23:59")
assert len(pi) == 24 * 60
pi = period_range(freq="s", start="1/1/2001", end="1/1/2001 23:59:59")
assert len(pi) == 24 * 60 * 60
with tm.assert_produces_warning(FutureWarning, match=msg):
start = Period("02-Apr-2005", "B")
i1 = period_range(start=start, periods=20)
assert len(i1) == 20
assert i1.freq == start.freq
assert i1[0] == start
end_intv = Period("2006-12-31", "W")
i1 = period_range(end=end_intv, periods=10)
assert len(i1) == 10
assert i1.freq == end_intv.freq
assert i1[-1] == end_intv
msg = "'w' is deprecated and will be removed in a future version."
with tm.assert_produces_warning(Pandas4Warning, match=msg):
end_intv = Period("2006-12-31", "1w")
i2 = period_range(end=end_intv, periods=10)
assert len(i1) == len(i2)
assert (i1 == i2).all()
assert i1.freq == i2.freq
def test_infer_freq_from_first_element(self):
msg = "Period with BDay freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
start = Period("02-Apr-2005", "B")
end_intv = Period("2005-05-01", "B")
period_range(start=start, end=end_intv)
# infer freq from first element
i2 = PeriodIndex([end_intv, Period("2005-05-05", "B")])
assert len(i2) == 2
assert i2[0] == end_intv
with tm.assert_produces_warning(FutureWarning, match=msg):
i2 = PeriodIndex(np.array([end_intv, Period("2005-05-05", "B")]))
assert len(i2) == 2
assert i2[0] == end_intv
def test_mixed_freq_raises(self):
# Mixed freq should fail
msg = "Period with BDay freq is deprecated"
with tm.assert_produces_warning(FutureWarning, match=msg):
end_intv = Period("2005-05-01", "B")
vals = [end_intv, Period("2006-12-31", "W")]
msg = r"Input has different freq=W-SUN from PeriodIndex\(freq=B\)"
depr_msg = r"PeriodDtype\[B\] is deprecated"
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
PeriodIndex(vals)
vals = np.array(vals)
with pytest.raises(IncompatibleFrequency, match=msg):
with tm.assert_produces_warning(FutureWarning, match=depr_msg):
PeriodIndex(vals)
@pytest.mark.parametrize(
"freq", ["M", "Q", "Y", "D", "B", "min", "s", "ms", "us", "ns", "h"]
)
@pytest.mark.filterwarnings(
r"ignore:Period with BDay freq is deprecated:FutureWarning"
)
@pytest.mark.filterwarnings(r"ignore:PeriodDtype\[B\] is deprecated:FutureWarning")
def test_recreate_from_data(self, freq):
org = period_range(start="2001/04/01", freq=freq, periods=1)
idx = PeriodIndex(org.values, freq=freq)
tm.assert_index_equal(idx, org)
def test_map_with_string_constructor(self):
raw = [2005, 2007, 2009]
index = PeriodIndex(raw, freq="Y")
expected = Index([str(num) for num in raw])
res = index.map(str)
# should return an Index
assert isinstance(res, Index)
# preserve element types
assert all(isinstance(resi, str) for resi in res)
# lastly, values should compare equal
tm.assert_index_equal(res, expected)
| TestPeriodIndex |
python | lxml__lxml | src/lxml/doctestcompare.py | {
"start": 15073,
"end": 17731
} | class ____:
def __init__(self, dt_self, old_checker, new_checker, check_func, clone_func,
del_module):
self.dt_self = dt_self
self.checker = old_checker
self.checker._temp_call_super_check_output = self.call_super
self.checker._temp_override_self = new_checker
self.check_func = check_func
self.clone_func = clone_func
self.del_module = del_module
self.install_clone()
self.install_dt_self()
def install_clone(self):
self.func_code = self.check_func.__code__
self.func_globals = self.check_func.__globals__
self.check_func.__code__ = self.clone_func.__code__
def uninstall_clone(self):
self.check_func.__code__ = self.func_code
def install_dt_self(self):
self.prev_func = self.dt_self._DocTestRunner__record_outcome
self.dt_self._DocTestRunner__record_outcome = self
def uninstall_dt_self(self):
self.dt_self._DocTestRunner__record_outcome = self.prev_func
def uninstall_module(self):
if self.del_module:
import sys
del sys.modules[self.del_module]
if '.' in self.del_module:
package, module = self.del_module.rsplit('.', 1)
package_mod = sys.modules[package]
delattr(package_mod, module)
def __call__(self, *args, **kw):
self.uninstall_clone()
self.uninstall_dt_self()
del self.checker._temp_override_self
del self.checker._temp_call_super_check_output
result = self.prev_func(*args, **kw)
self.uninstall_module()
return result
def call_super(self, *args, **kw):
self.uninstall_clone()
try:
return self.check_func(*args, **kw)
finally:
self.install_clone()
def _find_doctest_frame():
import sys
frame = sys._getframe(1)
while frame:
l = frame.f_locals
if 'BOOM' in l:
# Sign of doctest
return frame
frame = frame.f_back
raise LookupError(
"Could not find doctest (only use this function *inside* a doctest)")
__test__ = {
'basic': '''
>>> temp_install()
>>> print """<xml a="1" b="2">stuff</xml>"""
<xml b="2" a="1">...</xml>
>>> print """<xml xmlns="http://example.com"><tag attr="bar" /></xml>"""
<xml xmlns="...">
<tag attr="..." />
</xml>
>>> print """<xml>blahblahblah<foo /></xml>""" # doctest: +NOPARSE_MARKUP, +ELLIPSIS
<xml>...foo /></xml>
'''}
if __name__ == '__main__':
import doctest
doctest.testmod()
| _RestoreChecker |
python | dagster-io__dagster | python_modules/dagster/dagster/components/lib/sql_component/sql_component.py | {
"start": 900,
"end": 2548
} | class ____(ExecutableComponent, ABC):
"""Base component which executes templated SQL. Subclasses
implement instructions on where to load the SQL content from.
"""
# Necessary to allow connection to be a SQLClient, which is an ABC
model_config = ConfigDict(arbitrary_types_allowed=True)
connection: Annotated[
Annotated[
SQLClient,
Resolver(lambda ctx, value: value, model_field_type=str),
],
Field(description="The SQL connection to use for executing the SQL content."),
]
execution: Annotated[Optional[OpSpec], Field(default=None)] = None
@abstractmethod
def get_sql_content(
self, context: AssetExecutionContext, component_load_context: ComponentLoadContext
) -> str:
"""The SQL content to execute."""
...
def execute(
self, context: AssetExecutionContext, component_load_context: ComponentLoadContext
) -> None:
"""Execute the SQL content using the Snowflake resource."""
self.connection.connect_and_execute(self.get_sql_content(context, component_load_context))
@property
def op_spec(self) -> OpSpec:
return self.execution or OpSpec()
def invoke_execute_fn(
self,
context: Union[AssetExecutionContext, AssetCheckExecutionContext],
component_load_context: ComponentLoadContext,
) -> Iterable[MaterializeResult]:
self.execute(
check.inst(context, AssetExecutionContext),
component_load_context,
)
for asset in self.assets or []:
yield MaterializeResult(asset_key=asset.key)
| SqlComponent |
python | doocs__leetcode | solution/1200-1299/1281.Subtract the Product and Sum of Digits of an Integer/Solution.py | {
"start": 0,
"end": 197
} | class ____:
def subtractProductAndSum(self, n: int) -> int:
x, y = 1, 0
while n:
n, v = divmod(n, 10)
x *= v
y += v
return x - y
| Solution |
python | great-expectations__great_expectations | great_expectations/render/renderer/profiling_results_overview_section_renderer.py | {
"start": 454,
"end": 13669
} | class ____(Renderer):
@classmethod
def render(cls, evrs, section_name=None):
content_blocks = []
# NOTE: I don't love the way this builds content_blocks as a side effect.
# The top-level API is clean and scannable, but the function internals are counterintutitive and hard to test. # noqa: E501 # FIXME CoP
# I wonder if we can enable something like jquery chaining for this. That would be concise AND testable. # noqa: E501 # FIXME CoP
# Pressing on for now...
cls._render_header(evrs, content_blocks)
cls._render_dataset_info(evrs, content_blocks)
cls._render_variable_types(evrs, content_blocks)
cls._render_warnings(evrs, content_blocks)
cls._render_expectation_types(evrs, content_blocks)
return RenderedSectionContent(
**{"section_name": section_name, "content_blocks": content_blocks}
)
@classmethod
def _render_header(cls, evrs, content_blocks) -> None:
content_blocks.append(
RenderedHeaderContent(
**{
"content_block_type": "header",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Overview",
"tag": "h5",
"styling": {"classes": ["m-0"]},
},
}
),
"styling": {
"classes": ["col-12", "p-0"],
"header": {"classes": ["alert", "alert-secondary"]},
},
}
)
)
@classmethod
def _render_dataset_info(cls, evrs, content_blocks) -> None:
expect_table_row_count_to_be_between_evr = cls._find_evr_by_type(
evrs["results"], "expect_table_row_count_to_be_between"
)
table_rows = []
table_rows.append(
[
"Number of variables",
len(cls._get_column_list_from_evrs(evrs)),
]
)
table_rows.append(
[
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Number of observations",
"tooltip": {"content": "expect_table_row_count_to_be_between"},
"params": {"tooltip_text": "Number of observations"},
},
}
),
"--"
if not expect_table_row_count_to_be_between_evr
else expect_table_row_count_to_be_between_evr.result["observed_value"],
]
)
table_rows += [
[
"Missing cells",
cls._get_percentage_missing_cells_str(evrs),
],
# ["Duplicate rows", "0 (0.0%)", ], #TODO: bring back when we have an expectation for this # noqa: E501 # FIXME CoP
]
content_blocks.append(
RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Dataset info",
"tag": "h6",
},
}
),
"table": table_rows,
"styling": {
"classes": ["col-6", "mt-1", "p-1"],
"body": {"classes": ["table", "table-sm"]},
},
}
)
)
@classmethod
def _render_variable_types(cls, evrs, content_blocks) -> None:
column_types = cls._get_column_types(evrs)
# TODO: check if we have the information to make this statement. Do all columns have type expectations? # noqa: E501 # FIXME CoP
column_type_counter = Counter(column_types.values())
table_rows = [
[type, str(column_type_counter[type])]
for type in ["int", "float", "string", "datetime", "bool", "unknown"]
]
content_blocks.append(
RenderedTableContent(
**{
"content_block_type": "table",
"header": RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "Variable types",
"tag": "h6",
},
}
),
"table": table_rows,
"styling": {
"classes": ["col-6", "table-responsive", "mt-1", "p-1"],
"body": {"classes": ["table", "table-sm"]},
},
}
)
)
@classmethod
def _render_expectation_types(cls, evrs, content_blocks) -> None:
type_counts = defaultdict(int)
for evr in evrs.results:
type_counts[evr.expectation_config.type] += 1
bullet_list_items = sorted(type_counts.items(), key=lambda kv: -1 * kv[1])
bullet_list_items = [
RenderedStringTemplateContent(
**{
"content_block_type": "string_template",
"string_template": {
"template": "$expectation_type $expectation_count",
"params": {
"expectation_type": tr[0],
"expectation_count": tr[1],
},
"styling": {
"classes": [
"list-group-item",
"d-flex",
"justify-content-between",
"align-items-center",
],
"params": {
"expectation_count": {
"classes": [
"badge",
"badge-secondary",
"badge-pill",
],
}
},
},
},
"styling": {"parent": {"styles": {"list-style-type": "none"}}},
}
)
for tr in bullet_list_items
]
bullet_list = RenderedBulletListContent(
**{
"content_block_type": "bullet_list",
"bullet_list": bullet_list_items,
"styling": {
"classes": ["col-12", "mt-1"],
"body": {
"classes": ["list-group"],
},
},
}
)
bullet_list_collapse = CollapseContent(
**{
"collapse_toggle_link": "Show Expectation Types...",
"collapse": [bullet_list],
"styling": {"classes": ["col-12", "p-1"]},
}
)
content_blocks.append(bullet_list_collapse)
@classmethod
def _render_warnings(cls, evrs, content_blocks):
return
# def render_warning_row(template, column, n, p, badge_label):
# return [{
# "template": template,
# "params": {
# "column": column,
# "n": n,
# "p": p,
# },
# "styling": {
# "params": {
# "column": {
# "classes": ["badge", "badge-primary", ]
# }
# }
# }
# }, {
# "template": "$badge_label",
# "params": {
# "badge_label": badge_label,
# },
# "styling": {
# "params": {
# "badge_label": {
# "classes": ["badge", "badge-warning", ]
# }
# }
# }
# }]
# table_rows = [
# render_warning_row(
# "$column has $n ($p%) missing values", "Age", 177, 19.9, "Missing"),
# render_warning_row(
# "$column has a high cardinality: $n distinct values", "Cabin", 148, None, "Warning"), # noqa: E501 # FIXME CoP
# render_warning_row(
# "$column has $n ($p%) missing values", "Cabin", 687, 77.1, "Missing"),
# render_warning_row(
# "$column has $n (< $p%) zeros", "Fare", 15, "0.1", "Zeros"),
# render_warning_row(
# "$column has $n (< $p%) zeros", "Parch", 678, "76.1", "Zeros"),
# render_warning_row(
# "$column has $n (< $p%) zeros", "SibSp", 608, "68.2", "Zeros"),
# ]
# content_blocks.append({
# "content_block_type": "table",
# "header": "Warnings",
# "table": table_rows,
# "styling": {
# "classes": ["col-12"],
# "styles": {
# "margin-top": "20px"
# },
# "body": {
# "classes": ["table", "table-sm"]
# }
# },
# })
@classmethod
def _get_percentage_missing_cells_str(cls, evrs):
columns = cls._get_column_list_from_evrs(evrs)
if not columns or len(columns) == 0:
warnings.warn("Cannot get % of missing cells - column list is empty")
return "?"
expect_column_values_to_not_be_null_evrs = cls._find_all_evrs_by_type(
evrs.results, "expect_column_values_to_not_be_null"
)
if len(columns) > len(expect_column_values_to_not_be_null_evrs):
warnings.warn(
"Cannot get % of missing cells - not all columns have expect_column_values_to_not_be_null expectations" # noqa: E501 # FIXME CoP
)
return "?"
# assume 100.0 missing for columns where ["result"]["unexpected_percent"] is not available
return "{:.2f}%".format(
sum(
evr.result["unexpected_percent"]
if "unexpected_percent" in evr.result
and evr.result["unexpected_percent"] is not None
else 100.0
for evr in expect_column_values_to_not_be_null_evrs
)
/ len(columns)
)
@classmethod
def _get_column_types(cls, evrs): # noqa: C901 # FIXME CoP
columns = cls._get_column_list_from_evrs(evrs)
type_evrs = cls._find_all_evrs_by_type(
evrs.results, "expect_column_values_to_be_in_type_list"
) + cls._find_all_evrs_by_type(evrs.results, "expect_column_values_to_be_of_type")
column_types = {}
for column in columns:
column_types[column] = "unknown"
for evr in type_evrs:
column = evr.expectation_config.kwargs["column"]
if evr.expectation_config.type == "expect_column_values_to_be_in_type_list":
if evr.expectation_config.kwargs["type_list"] is None:
column_types[column] = "unknown"
continue
else:
expected_types = set(evr.expectation_config.kwargs["type_list"])
else: # assuming expect_column_values_to_be_of_type
expected_types = {evr.expectation_config.kwargs["type_"]}
if expected_types.issubset(ProfilerTypeMapping.INT_TYPE_NAMES):
column_types[column] = "int"
elif expected_types.issubset(ProfilerTypeMapping.FLOAT_TYPE_NAMES):
column_types[column] = "float"
elif expected_types.issubset(ProfilerTypeMapping.STRING_TYPE_NAMES):
column_types[column] = "string"
elif expected_types.issubset(ProfilerTypeMapping.DATETIME_TYPE_NAMES):
column_types[column] = "datetime"
elif expected_types.issubset(ProfilerTypeMapping.BOOLEAN_TYPE_NAMES):
column_types[column] = "bool"
else:
warnings.warn(
"The expected type list is not a subset of any of the profiler type sets"
f": {expected_types}"
)
column_types[column] = "unknown"
return column_types
| ProfilingResultsOverviewSectionRenderer |
python | great-expectations__great_expectations | great_expectations/render/renderer/site_builder.py | {
"start": 20655,
"end": 41063
} | class ____:
def __init__( # noqa: PLR0913 # FIXME CoP
self,
name,
site_name,
data_context: AbstractDataContext,
target_store,
site_section_builders_config,
custom_styles_directory=None,
custom_views_directory=None,
show_how_to_buttons=True,
validation_results_limit=None,
renderer=None,
view=None,
data_context_id=None,
source_stores=None,
**kwargs,
) -> None:
# NOTE: This method is almost identical to DefaultSiteSectionBuilder
self.name = name
self.site_name = site_name
self.data_context = data_context
self.target_store = target_store
self.validation_results_limit = validation_results_limit
self.data_context_id = data_context_id
self.show_how_to_buttons = show_how_to_buttons
self.source_stores = source_stores or {}
self.site_section_builders_config = site_section_builders_config or {}
if renderer is None:
renderer = {
"module_name": "great_expectations.render.renderer",
"class_name": "SiteIndexPageRenderer",
}
module_name = renderer.get("module_name") or "great_expectations.render.renderer"
self.renderer_class = instantiate_class_from_config(
config=renderer,
runtime_environment={"data_context": data_context},
config_defaults={"module_name": module_name},
)
if not self.renderer_class:
raise exceptions.ClassInstantiationError(
module_name=module_name,
package_name=None,
class_name=renderer["class_name"],
)
module_name = "great_expectations.render.view"
if view is None:
view = {
"module_name": module_name,
"class_name": "DefaultJinjaIndexPageView",
}
module_name = view.get("module_name") or module_name
self.view_class = instantiate_class_from_config(
config=view,
runtime_environment={
"custom_styles_directory": custom_styles_directory,
"custom_views_directory": custom_views_directory,
},
config_defaults={"module_name": module_name},
)
if not self.view_class:
raise exceptions.ClassInstantiationError(
module_name=view["module_name"],
package_name=None,
class_name=view["class_name"],
)
def add_resource_info_to_index_links_dict( # noqa: PLR0913 # FIXME CoP
self,
index_links_dict,
expectation_suite_name,
section_name,
batch_identifier=None,
run_id=None,
validation_success=None,
run_time=None,
run_name=None,
asset_name=None,
batch_kwargs=None,
batch_spec=None,
):
import os
if f"{section_name}_links" not in index_links_dict:
index_links_dict[f"{section_name}_links"] = []
if run_id:
filepath = (
pathlib.Path(
*[
"validations",
*expectation_suite_name.split("."),
*run_id.to_tuple(),
batch_identifier,
]
).as_posix()
+ ".html"
)
else:
filepath = (
pathlib.Path(*["expectations", *expectation_suite_name.split(".")]).as_posix()
+ ".html"
)
url_encoded_filepath = urllib.parse.quote(filepath)
expectation_suite_filepath = os.path.join( # noqa: PTH118 # FIXME CoP
"expectations", *expectation_suite_name.split(".")
)
expectation_suite_filepath += ".html"
index_links_dict[f"{section_name}_links"].append(
{
"expectation_suite_name": expectation_suite_name,
"filepath": url_encoded_filepath,
"run_id": run_id,
"batch_identifier": batch_identifier,
"validation_success": validation_success,
"run_time": run_time,
"run_name": run_name,
"asset_name": asset_name,
"batch_kwargs": batch_kwargs,
"batch_spec": batch_spec,
"expectation_suite_filepath": expectation_suite_filepath if run_id else None,
}
)
return index_links_dict
def get_calls_to_action(self):
usage_statistics = None
# db_driver = None
# datasource_classes_by_name = self.data_context.list_datasources()
#
# if datasource_classes_by_name:
# last_datasource_class_by_name = datasource_classes_by_name[-1]
# last_datasource_class_name = last_datasource_class_by_name["
# class_name"]
# last_datasource_name = last_datasource_class_by_name["name"]
# last_datasource = self.data_context.get_datasource
# (last_datasource_name)
#
# if last_datasource_class_name == "SqlAlchemyDatasource":
# try:
# # NOTE: JPC - 20200327 - I do not believe datasource
# will *ever* have a drivername property
# (it's in credentials). Suspect this isn't working.
# db_driver = last_datasource.drivername
# except AttributeError:
# pass
#
# datasource_type = DATASOURCE_TYPE_BY_DATASOURCE_CLASS[
# last_datasource_class_name].value
# usage_statistics = "?utm_source={}&utm_medium={}
# &utm_campaign={}".format(
# "ge-init-datadocs-v2",
# datasource_type,
# db_driver,
# )
return {
"header": "To continue exploring Great Expectations check out one of these tutorials...", # noqa: E501 # FIXME CoP
"buttons": self._get_call_to_action_buttons(usage_statistics),
}
def _get_call_to_action_buttons(self, usage_statistics):
"""
Build project and user specific calls to action buttons.
This can become progressively smarter about project and user specific
calls to action.
"""
create_expectations = CallToActionButton(
"How to Create Expectations",
"https://docs.greatexpectations.io/docs/guides/expectations/how_to_create_and_edit_expectations_with_instant_feedback_from_a_sample_batch_of_data",
)
_ = CallToActionButton(
"See More Kinds of Expectations",
"https://greatexpectations.io/expectations",
)
validation_playground = CallToActionButton(
"How to Validate Data",
"https://docs.greatexpectations.io/docs/guides/validation/checkpoints/how_to_create_a_new_checkpoint",
)
_ = CallToActionButton(
"How to Customize Data Docs",
"https://docs.greatexpectations.io/docs/reference/data_docs#customizing-html-documentation",
)
team_site = CallToActionButton(
"How to Set Up a Team Site",
"https://docs.greatexpectations.io/docs/guides/setup/configuring_data_docs/how_to_host_and_share_data_docs_on_a_filesystem",
)
# TODO gallery does not yet exist
# gallery = CallToActionButton(
# "Great Expectations Gallery",
# "https://greatexpectations.io/gallery"
# )
results = []
results.append(create_expectations)
# Show these no matter what
results.append(validation_playground)
results.append(team_site)
if usage_statistics:
for button in results:
button.link = button.link + usage_statistics
return results
# TODO: deprecate dual batch api support
def build(
self, skip_and_clean_missing=True, build_index: bool = True
) -> Tuple[Any, Optional[OrderedDict]]:
"""
:param skip_and_clean_missing: if True, target html store keys without corresponding source store keys will
be skipped and removed from the target store
:param build_index: a flag if False, skips building the index page
:return: tuple(index_page_url, index_links_dict)
""" # noqa: E501 # FIXME CoP
# Loop over sections in the HtmlStore
logger.debug("DefaultSiteIndexBuilder.build")
if not build_index:
logger.debug("Skipping index rendering")
return None, None
index_links_dict = OrderedDict()
index_links_dict["site_name"] = self.site_name
if self.show_how_to_buttons:
index_links_dict["cta_object"] = self.get_calls_to_action()
self._add_expectations_to_index_links(index_links_dict, skip_and_clean_missing)
validation_and_profiling_result_site_keys = (
self._build_validation_and_profiling_result_site_keys(skip_and_clean_missing)
)
self._add_profiling_to_index_links(
index_links_dict, validation_and_profiling_result_site_keys
)
self._add_validations_to_index_links(
index_links_dict, validation_and_profiling_result_site_keys
)
viewable_content = ""
try:
rendered_content = self.renderer_class.render(index_links_dict)
viewable_content = self.view_class.render(
rendered_content,
data_context_id=self.data_context_id,
show_how_to_buttons=self.show_how_to_buttons,
)
except Exception as e:
exception_message = """\
An unexpected Exception occurred during data docs rendering. Because of this error, certain parts of data docs will \
not be rendered properly and/or may not appear altogether. Please use the trace, included in this message, to \
diagnose and repair the underlying issue. Detailed information follows:
""" # noqa: E501 # FIXME CoP
exception_traceback = traceback.format_exc()
exception_message += (
f'{type(e).__name__}: "{e!s}". Traceback: "{exception_traceback}".'
)
logger.error(exception_message) # noqa: TRY400 # FIXME CoP
return self.target_store.write_index_page(viewable_content), index_links_dict
def _add_expectations_to_index_links(
self, index_links_dict: OrderedDict, skip_and_clean_missing: bool
) -> None:
expectations = self.site_section_builders_config.get("expectations", "None")
if expectations and expectations not in FALSEY_YAML_STRINGS:
expectation_suite_source_keys = self.data_context.stores[
self.site_section_builders_config["expectations"].get("source_store_name")
].list_keys()
expectation_suite_site_keys = [
ExpectationSuiteIdentifier.from_tuple(expectation_suite_tuple)
for expectation_suite_tuple in self.target_store.store_backends[
ExpectationSuiteIdentifier
].list_keys()
]
if skip_and_clean_missing:
cleaned_keys = []
for expectation_suite_site_key in expectation_suite_site_keys:
if expectation_suite_site_key not in expectation_suite_source_keys:
self.target_store.store_backends[ExpectationSuiteIdentifier].remove_key(
expectation_suite_site_key
)
else:
cleaned_keys.append(expectation_suite_site_key)
expectation_suite_site_keys = cleaned_keys
for expectation_suite_key in expectation_suite_site_keys:
self.add_resource_info_to_index_links_dict(
index_links_dict=index_links_dict,
expectation_suite_name=expectation_suite_key.name,
section_name="expectations",
)
def _build_validation_and_profiling_result_site_keys(
self, skip_and_clean_missing: bool
) -> List[ValidationResultIdentifier]:
validation_and_profiling_result_site_keys = []
validations = self.site_section_builders_config.get("validations", "None")
profiling = self.site_section_builders_config.get("profiling", "None")
if (validations and validations not in FALSEY_YAML_STRINGS) or (
profiling and profiling not in FALSEY_YAML_STRINGS
):
source_store = (
"validations"
if (validations and validations not in FALSEY_YAML_STRINGS)
else "profiling"
)
validation_and_profiling_result_source_keys = set(
self.data_context.stores[
self.site_section_builders_config[source_store].get("source_store_name")
].list_keys()
)
validation_and_profiling_result_site_keys = [
ValidationResultIdentifier.from_tuple(validation_result_tuple)
for validation_result_tuple in self.target_store.store_backends[
ValidationResultIdentifier
].list_keys()
]
if skip_and_clean_missing:
cleaned_keys = []
for validation_result_site_key in validation_and_profiling_result_site_keys:
if (
validation_result_site_key
not in validation_and_profiling_result_source_keys
):
self.target_store.store_backends[ValidationResultIdentifier].remove_key(
validation_result_site_key
)
else:
cleaned_keys.append(validation_result_site_key)
validation_and_profiling_result_site_keys = cleaned_keys
return validation_and_profiling_result_site_keys
def _add_profiling_to_index_links(
self,
index_links_dict: OrderedDict,
validation_and_profiling_result_site_keys: List[ValidationResultIdentifier],
) -> None:
profiling = self.site_section_builders_config.get("profiling", "None")
if profiling and profiling not in FALSEY_YAML_STRINGS:
profiling_run_name_filter = self.site_section_builders_config["profiling"][
"run_name_filter"
]
profiling_result_site_keys = [
validation_result_key
for validation_result_key in validation_and_profiling_result_site_keys
if resource_key_passes_run_name_filter(
validation_result_key, profiling_run_name_filter
)
]
for profiling_result_key in profiling_result_site_keys:
try:
validation = self.data_context.get_validation_result(
batch_identifier=profiling_result_key.batch_identifier,
expectation_suite_name=profiling_result_key.expectation_suite_identifier.name,
run_id=profiling_result_key.run_id,
validation_results_store_name=self.source_stores.get("profiling"),
)
batch_kwargs = validation.meta.get("batch_kwargs", {})
batch_spec = validation.meta.get("batch_spec", {})
self.add_resource_info_to_index_links_dict(
index_links_dict=index_links_dict,
expectation_suite_name=profiling_result_key.expectation_suite_identifier.name,
section_name="profiling",
batch_identifier=profiling_result_key.batch_identifier,
run_id=profiling_result_key.run_id,
run_time=profiling_result_key.run_id.run_time,
run_name=profiling_result_key.run_id.run_name,
asset_name=_resolve_asset_name(validation),
batch_kwargs=batch_kwargs,
batch_spec=batch_spec,
)
except Exception:
error_msg = f"Profiling result not found: {profiling_result_key.to_tuple()!s:s} - skipping" # noqa: E501 # FIXME CoP
logger.warning(error_msg)
def _add_validations_to_index_links(
self,
index_links_dict: OrderedDict,
validation_and_profiling_result_site_keys: List[ValidationResultIdentifier],
) -> None:
validations = self.site_section_builders_config.get("validations", "None")
if validations and validations not in FALSEY_YAML_STRINGS:
validations_run_name_filter = self.site_section_builders_config["validations"][
"run_name_filter"
]
validation_result_site_keys = [
validation_result_key
for validation_result_key in validation_and_profiling_result_site_keys
if resource_key_passes_run_name_filter(
validation_result_key, validations_run_name_filter
)
]
validation_result_site_keys = sorted(
validation_result_site_keys,
key=lambda x: x.run_id.run_time,
reverse=True,
)
if self.validation_results_limit:
validation_result_site_keys = validation_result_site_keys[
: self.validation_results_limit
]
for validation_result_key in validation_result_site_keys:
try:
validation = self.data_context.get_validation_result(
batch_identifier=validation_result_key.batch_identifier,
expectation_suite_name=validation_result_key.expectation_suite_identifier.name,
run_id=validation_result_key.run_id,
validation_results_store_name=self.source_stores.get("validations"),
)
validation_success = validation.success
batch_kwargs = validation.meta.get("batch_kwargs", {})
batch_spec = validation.meta.get("batch_spec", {})
self.add_resource_info_to_index_links_dict(
index_links_dict=index_links_dict,
expectation_suite_name=validation_result_key.expectation_suite_identifier.name,
section_name="validations",
batch_identifier=validation_result_key.batch_identifier,
run_id=validation_result_key.run_id,
validation_success=validation_success,
run_time=validation_result_key.run_id.run_time,
run_name=validation_result_key.run_id.run_name,
asset_name=_resolve_asset_name(validation),
batch_kwargs=batch_kwargs,
batch_spec=batch_spec,
)
except Exception:
error_msg = f"Validation result not found: {validation_result_key.to_tuple()!s:s} - skipping" # noqa: E501 # FIXME CoP
logger.warning(error_msg)
def _resolve_asset_name(validation_results: ExpectationValidationResult) -> str | None:
"""
Resolve the asset name from the validation results meta data.
FDS does not store data_asset_name in batch_kwargs or batch_spec and it must be
pulled from the active batch definition.
"""
batch_kwargs = validation_results.meta.get("batch_kwargs", {})
batch_spec = validation_results.meta.get("batch_spec", {})
asset_name = batch_kwargs.get("data_asset_name") or batch_spec.get("data_asset_name")
if asset_name:
return asset_name
# FDS does not store data_asset_name in batch_kwargs or batch_spec
active_batch = validation_results.meta.get("active_batch_definition", {})
return active_batch.get("data_asset_name")
| DefaultSiteIndexBuilder |
python | celery__celery | t/unit/utils/test_text.py | {
"start": 665,
"end": 1935
} | class ____:
def test_textindent(self):
assert indent(RANDTEXT, 4) == RANDTEXT_RES
def test_format_queues(self, app):
app.amqp.queues = app.amqp.Queues(QUEUES)
assert (sorted(app.amqp.queues.format().split('\n')) ==
sorted([QUEUE_FORMAT1, QUEUE_FORMAT2]))
def test_ensure_newlines(self):
assert len(ensure_newlines('foo\nbar\nbaz\n').splitlines()) == 3
assert len(ensure_newlines('foo\nbar').splitlines()) == 2
@pytest.mark.parametrize('s,maxsize,expected', [
('ABCDEFGHI', 3, 'ABC...'),
('ABCDEFGHI', 10, 'ABCDEFGHI'),
])
def test_truncate_text(s, maxsize, expected):
assert truncate(s, maxsize) == expected
@pytest.mark.parametrize('args,expected', [
((None, 3), '???'),
(('ABCDEFGHI', 6), 'ABC...'),
(('ABCDEFGHI', 20), 'ABCDEFGHI'),
(('ABCDEFGHI', 6, None), 'ABCDEF'),
])
def test_abbr(args, expected):
assert abbr(*args) == expected
@pytest.mark.parametrize('s,maxsize,expected', [
(None, 3, '???'),
('feeds.tasks.refresh', 10, '[.]refresh'),
('feeds.tasks.refresh', 30, 'feeds.tasks.refresh'),
])
def test_abbrtask(s, maxsize, expected):
assert abbrtask(s, maxsize) == expected
def test_pretty():
assert pretty(('a', 'b', 'c'))
| test_Info |
python | django__django | tests/utils_tests/test_connection.py | {
"start": 99,
"end": 565
} | class ____(SimpleTestCase):
def test_create_connection(self):
handler = BaseConnectionHandler()
msg = "Subclasses must implement create_connection()."
with self.assertRaisesMessage(NotImplementedError, msg):
handler.create_connection(None)
def test_all_initialized_only(self):
handler = BaseConnectionHandler({"default": {}})
self.assertEqual(handler.all(initialized_only=True), [])
| BaseConnectionHandlerTests |
python | matplotlib__matplotlib | lib/matplotlib/backends/backend_wx.py | {
"start": 47911,
"end": 48118
} | class ____(backend_tools.ConfigureSubplotsBase):
def trigger(self, *args):
NavigationToolbar2Wx.configure_subplots(self)
@backend_tools._register_tool_class(_FigureCanvasWxBase)
| ConfigureSubplotsWx |
python | dagster-io__dagster | python_modules/dagster/dagster/_daemon/daemon.py | {
"start": 2145,
"end": 8282
} | class ____(AbstractContextManager, ABC, Generic[TContext]):
_logger: logging.Logger
_last_heartbeat_time: Optional[datetime.datetime]
def __init__(self):
self._logger = get_default_daemon_logger(type(self).__name__)
self._last_heartbeat_time = None
self._last_log_time = None
self._errors = deque(
maxlen=DAEMON_HEARTBEAT_ERROR_LIMIT
) # (SerializableErrorInfo, timestamp) tuples
self._first_error_logged = False
@classmethod
@abstractmethod
def daemon_type(cls) -> str:
"""returns: str."""
def __exit__(self, _exception_type, _exception_value, _traceback):
pass
def run_daemon_loop(
self,
workspace_process_context: TContext,
daemon_uuid: str,
daemon_shutdown_event: Event,
heartbeat_interval_seconds: float,
error_interval_seconds: int,
):
from dagster._core.telemetry_upload import uploading_logging_thread
with uploading_logging_thread():
daemon_generator = self.core_loop(workspace_process_context, daemon_shutdown_event)
try:
while not daemon_shutdown_event.is_set():
# Check to see if it's time to add a heartbeat initially and after each time
# the daemon yields
try:
self._check_add_heartbeat(
workspace_process_context.instance,
daemon_uuid,
heartbeat_interval_seconds,
error_interval_seconds,
)
except Exception:
self._logger.error(
"Failed to add heartbeat: \n%s",
serializable_error_info_from_exc_info(sys.exc_info()),
)
try:
result = next(daemon_generator)
if isinstance(result, SerializableErrorInfo):
self._errors.appendleft((result, get_current_datetime()))
except StopIteration:
self._logger.error(
"Daemon loop finished without raising an error - daemon loops should"
" run forever until they are interrupted."
)
break
except Exception:
error_info = DaemonErrorCapture.process_exception(
exc_info=sys.exc_info(),
logger=self._logger,
log_message="Caught error, daemon loop will restart",
)
self._errors.appendleft((error_info, get_current_datetime()))
daemon_generator.close()
# Wait a bit to ensure that errors don't happen in a tight loop
daemon_shutdown_event.wait(_get_error_sleep_interval())
daemon_generator = self.core_loop(
workspace_process_context, daemon_shutdown_event
)
finally:
# cleanup the generator if it was stopped part-way through
daemon_generator.close()
def _check_add_heartbeat(
self,
instance: DagsterInstance,
daemon_uuid: str,
heartbeat_interval_seconds: float,
error_interval_seconds: int,
) -> None:
error_max_time = get_current_datetime() - datetime.timedelta(seconds=error_interval_seconds)
while len(self._errors):
_earliest_error, earliest_timestamp = self._errors[-1]
if earliest_timestamp >= error_max_time:
break
self._errors.pop()
if instance.daemon_skip_heartbeats_without_errors and not self._errors:
# no errors to report, so we don't write a heartbeat
return
curr_time = get_current_datetime()
if (
self._last_heartbeat_time
and (curr_time - self._last_heartbeat_time).total_seconds() < heartbeat_interval_seconds
):
return
daemon_type = self.daemon_type()
last_stored_heartbeat = instance.get_daemon_heartbeats().get(daemon_type)
if (
self._last_heartbeat_time
and last_stored_heartbeat
and last_stored_heartbeat.daemon_id != daemon_uuid
):
self._logger.error(
"Another %s daemon is still sending heartbeats. You likely have multiple "
"daemon processes running at once, which is not supported. "
"Last heartbeat daemon id: %s, "
"Current daemon_id: %s",
daemon_type,
last_stored_heartbeat.daemon_id,
daemon_uuid,
)
self._last_heartbeat_time = curr_time
instance.add_daemon_heartbeat(
DaemonHeartbeat(
curr_time.timestamp(),
daemon_type,
daemon_uuid,
errors=[error for (error, timestamp) in self._errors],
)
)
if (
not self._last_log_time
or (curr_time - self._last_log_time).total_seconds() >= TELEMETRY_LOGGING_INTERVAL
):
log_action(
instance,
DAEMON_ALIVE,
metadata={"DAEMON_SESSION_ID": get_telemetry_daemon_session_id()},
)
self._last_log_time = curr_time
@abstractmethod
def core_loop(
self,
workspace_process_context: TContext,
shutdown_event: Event,
) -> DaemonIterator:
"""Execute the daemon loop, which should be a generator function that never finishes.
Should periodically yield so that the controller can check for heartbeats. Yields can be
either NoneType or a SerializableErrorInfo.
returns: generator (SerializableErrorInfo).
"""
| DagsterDaemon |
python | numpy__numpy | numpy/lib/tests/test_arraypad.py | {
"start": 49668,
"end": 51062
} | class ____:
def test_simple(self):
arr = np.arange(24).reshape(4, 6)
result = np.pad(arr, [(2, 3), (3, 1)], mode="empty")
assert result.shape == (9, 10)
assert_equal(arr, result[2:-3, 3:-1])
def test_pad_empty_dimension(self):
arr = np.zeros((3, 0, 2))
result = np.pad(arr, [(0,), (2,), (1,)], mode="empty")
assert result.shape == (3, 4, 4)
def test_legacy_vector_functionality():
def _padwithtens(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 10
vector[-pad_width[1]:] = 10
a = np.arange(6).reshape(2, 3)
a = np.pad(a, 2, _padwithtens)
b = np.array(
[[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 0, 1, 2, 10, 10],
[10, 10, 3, 4, 5, 10, 10],
[10, 10, 10, 10, 10, 10, 10],
[10, 10, 10, 10, 10, 10, 10]]
)
assert_array_equal(a, b)
def test_unicode_mode():
a = np.pad([1], 2, mode='constant')
b = np.array([0, 0, 1, 0, 0])
assert_array_equal(a, b)
@pytest.mark.parametrize("mode", ["edge", "symmetric", "reflect", "wrap"])
def test_object_input(mode):
# Regression test for issue gh-11395.
a = np.full((4, 3), fill_value=None)
pad_amt = ((2, 3), (3, 2))
b = np.full((9, 8), fill_value=None)
assert_array_equal(np.pad(a, pad_amt, mode=mode), b)
| TestEmpty |
python | huggingface__transformers | src/transformers/models/nystromformer/modeling_nystromformer.py | {
"start": 11361,
"end": 12027
} | class ____(nn.Module):
def __init__(self, config):
super().__init__()
self.dense = nn.Linear(config.hidden_size, config.intermediate_size)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = ACT2FN[config.hidden_act]
else:
self.intermediate_act_fn = config.hidden_act
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_bert.BertOutput with Bert->Nystromformer
| NystromformerIntermediate |
python | python-attrs__attrs | tests/test_functional.py | {
"start": 477,
"end": 595
} | class ____:
x = attr.ib(validator=attr.validators.instance_of(int))
y = attr.ib()
foo = None
@attr.s()
| C1Slots |
python | pyqtgraph__pyqtgraph | pyqtgraph/examples/GraphicsScene.py | {
"start": 145,
"end": 1161
} | class ____(QtWidgets.QGraphicsObject):
def __init__(self):
QtWidgets.QGraphicsObject.__init__(self)
def paint(self, p, *args):
p.setPen(pg.mkPen(200,200,200))
p.drawRect(self.boundingRect())
def boundingRect(self):
return QtCore.QRectF(0, 0, 20, 20)
def mouseClickEvent(self, ev):
if ev.double():
print("double click")
else:
print("click")
ev.accept()
#def mouseDragEvent(self, ev):
#print "drag"
#ev.accept()
#self.setPos(self.pos() + ev.pos()-ev.lastPos())
vb = pg.ViewBox()
win.setCentralItem(vb)
obj = Obj()
vb.addItem(obj)
obj2 = Obj()
win.addItem(obj2)
def clicked():
print("button click")
btn = QtWidgets.QPushButton("BTN")
btn.clicked.connect(clicked)
prox = QtWidgets.QGraphicsProxyWidget()
prox.setWidget(btn)
prox.setPos(100,0)
vb.addItem(prox)
g = pg.GridItem()
vb.addItem(g)
if __name__ == '__main__':
pg.exec()
| Obj |
python | pypa__pip | src/pip/_vendor/pygments/lexer.py | {
"start": 12247,
"end": 12446
} | class ____:
"""
Indicates the a state should inherit from its superclass.
"""
def __repr__(self):
return 'inherit'
inherit = _inherit() # pylint: disable=invalid-name
| _inherit |
python | getsentry__sentry | tests/sentry/integrations/slack/tasks/test_send_notifications_on_activity.py | {
"start": 2238,
"end": 3505
} | class ____(TestCase):
def setUp(self) -> None:
mock_slack_service = mock.MagicMock()
mock_default_method = mock.MagicMock(return_value=mock_slack_service)
mock_notify_all_threads_for_activity = mock.MagicMock()
mock_slack_service.default = mock_default_method
mock_slack_service.notify_all_threads_for_activity = mock_notify_all_threads_for_activity
self.mock_slack_service = mock_slack_service
def test_returns_early_when_no_activity_found(self) -> None:
with mock.patch(
"sentry.integrations.slack.tasks.send_notifications_on_activity.SlackService",
self.mock_slack_service,
):
send_activity_notifications_to_slack_threads(activity_id=123)
self.mock_slack_service.notify_all_threads_for_activity.assert_not_called()
def test_calls_notify_all_threads_for_activity(self) -> None:
with mock.patch(
"sentry.integrations.slack.tasks.send_notifications_on_activity.SlackService",
self.mock_slack_service,
):
send_activity_notifications_to_slack_threads(activity_id=self.activity.id)
self.mock_slack_service.notify_all_threads_for_activity.assert_called()
| TestSendActivityNotifications |
python | Textualize__textual | src/textual/widgets/_markdown.py | {
"start": 16684,
"end": 19874
} | class ____(Widget):
"""Renders a Markdown table."""
DEFAULT_CSS = """
MarkdownTableContent {
width: 1fr;
height: auto;
layout: grid;
grid-columns: auto;
grid-rows: auto;
grid-gutter: 1 1;
& > .cell {
margin: 0 0;
height: auto;
padding: 0 1;
text-overflow: ellipsis;
}
& > .header {
height: auto;
margin: 0 0;
padding: 0 1;
color: $primary;
text-overflow: ellipsis;
content-align: left bottom;
}
keyline: thin $foreground 20%;
}
MarkdownTableContent > .markdown-table--header {
text-style: bold;
}
"""
COMPONENT_CLASSES = {"markdown-table--header", "markdown-table--lines"}
def __init__(self, headers: list[Content], rows: list[list[Content]]):
self.headers = headers.copy()
"""List of header text."""
self.rows = rows.copy()
"""The row contents."""
super().__init__()
self.shrink = True
self.last_row = 0
def pre_layout(self, layout: Layout) -> None:
assert isinstance(layout, GridLayout)
layout.auto_minimum = True
layout.expand = not self.query_ancestor(MarkdownTable).styles.is_auto_width
layout.shrink = True
layout.stretch_height = True
def compose(self) -> ComposeResult:
for header in self.headers:
yield MarkdownTableCellContents(header, classes="header").with_tooltip(
header
)
for row_index, row in enumerate(self.rows, 1):
for cell in row:
yield MarkdownTableCellContents(
cell, classes=f"row{row_index} cell"
).with_tooltip(cell.plain)
self.last_row = row_index
def _update_content(self, headers: list[Content], rows: list[list[Content]]):
"""Update cell contents."""
self.headers = headers
self.rows = rows
cells: list[Content] = [
*self.headers,
*[cell for row in self.rows for cell in row],
]
for child, updated_cell in zip(self.query(MarkdownTableCellContents), cells):
child.update(updated_cell, layout=False)
async def _update_rows(self, updated_rows: list[list[Content]]) -> None:
self.styles.grid_size_columns = len(self.headers)
await self.query_children(f".cell.row{self.last_row}").remove()
new_cells: list[Static] = []
for row_index, row in enumerate(updated_rows, self.last_row):
for cell in row:
new_cells.append(
Static(cell, classes=f"row{row_index} cell").with_tooltip(cell)
)
self.last_row = row_index
await self.mount_all(new_cells)
def on_mount(self) -> None:
self.styles.grid_size_columns = len(self.headers)
async def action_link(self, href: str) -> None:
"""Pass a link action on to the MarkdownTable parent."""
if isinstance(self.parent, MarkdownTable):
await self.parent.action_link(href)
| MarkdownTableContent |
python | dagster-io__dagster | python_modules/dagster-graphql/dagster_graphql/schema/errors.py | {
"start": 14903,
"end": 15406
} | class ____(graphene.ObjectType):
class Meta:
interfaces = (GrapheneError,)
name = "PartitionKeysNotFoundError"
partition_keys = non_null_list(graphene.String)
def __init__(self, partition_keys: set[str]):
super().__init__()
self.partition_keys = check.list_param(
sorted(partition_keys), "partition_keys", of_type=str
)
self.message = f"Partition keys `{self.partition_keys}` could not be found."
| GraphenePartitionKeysNotFoundError |
python | django__django | tests/delete/models.py | {
"start": 2095,
"end": 3951
} | class ____(models.Model):
name = models.CharField(max_length=30)
auto = models.ForeignKey(R, models.CASCADE, related_name="auto_set")
auto_nullable = models.ForeignKey(
R, models.CASCADE, null=True, related_name="auto_nullable_set"
)
setvalue = models.ForeignKey(R, models.SET(get_default_r), related_name="setvalue")
setnull = models.ForeignKey(
R, models.SET_NULL, null=True, related_name="setnull_set"
)
setdefault = models.ForeignKey(
R, models.SET_DEFAULT, default=get_default_r, related_name="setdefault_set"
)
setdefault_none = models.ForeignKey(
R,
models.SET_DEFAULT,
default=None,
null=True,
related_name="setnull_nullable_set",
)
cascade = models.ForeignKey(R, models.CASCADE, related_name="cascade_set")
cascade_nullable = models.ForeignKey(
R, models.CASCADE, null=True, related_name="cascade_nullable_set"
)
protect = models.ForeignKey(
R, models.PROTECT, null=True, related_name="protect_set"
)
restrict = models.ForeignKey(
R, models.RESTRICT, null=True, related_name="restrict_set"
)
donothing = models.ForeignKey(
R, models.DO_NOTHING, null=True, related_name="donothing_set"
)
child = models.ForeignKey(RChild, models.CASCADE, related_name="child")
child_setnull = models.ForeignKey(
RChild, models.SET_NULL, null=True, related_name="child_setnull"
)
cascade_p = models.ForeignKey(
P, models.CASCADE, related_name="cascade_p_set", null=True
)
# A OneToOneField is just a ForeignKey unique=True, so we don't duplicate
# all the tests; just one smoke test to ensure on_delete works for it as
# well.
o2o_setnull = models.ForeignKey(
R, models.SET_NULL, null=True, related_name="o2o_nullable_set"
)
| A |
python | huggingface__transformers | tests/models/owlv2/test_image_processing_owlv2.py | {
"start": 1106,
"end": 2895
} | class ____:
def __init__(
self,
parent,
batch_size=7,
num_channels=3,
image_size=18,
min_resolution=30,
max_resolution=400,
do_resize=True,
size=None,
do_normalize=True,
image_mean=[0.48145466, 0.4578275, 0.40821073],
image_std=[0.26862954, 0.26130258, 0.27577711],
do_convert_rgb=True,
):
self.parent = parent
self.batch_size = batch_size
self.num_channels = num_channels
self.image_size = image_size
self.min_resolution = min_resolution
self.max_resolution = max_resolution
self.do_resize = do_resize
self.size = size if size is not None else {"height": 18, "width": 18}
self.do_normalize = do_normalize
self.image_mean = image_mean
self.image_std = image_std
self.do_convert_rgb = do_convert_rgb
def prepare_image_processor_dict(self):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
def expected_output_image_shape(self, images):
return self.num_channels, self.size["height"], self.size["width"]
def prepare_image_inputs(self, equal_resolution=False, numpify=False, torchify=False):
return prepare_image_inputs(
batch_size=self.batch_size,
num_channels=self.num_channels,
min_resolution=self.min_resolution,
max_resolution=self.max_resolution,
equal_resolution=equal_resolution,
numpify=numpify,
torchify=torchify,
)
@require_torch
@require_vision
| Owlv2ImageProcessingTester |
python | ansible__ansible | test/units/modules/test_unarchive.py | {
"start": 3293,
"end": 4037
} | class ____:
def test_no_tar_binary(self, mocker, fake_ansible_module):
mocker.patch("ansible.modules.unarchive.get_bin_path", side_effect=ValueError)
fake_ansible_module.params = {
"extra_opts": "",
"exclude": "",
"include": "",
"io_buffer_size": 65536,
}
fake_ansible_module.check_mode = False
t = TgzArchive(
src="",
b_dest="",
file_args="",
module=fake_ansible_module,
)
can_handle, reason = t.can_handle_archive()
assert can_handle is False
assert 'Unable to find required' in reason
assert t.cmd_path is None
assert t.tar_type is None
| TestCaseTgzArchive |
python | readthedocs__readthedocs.org | readthedocs/oauth/services/__init__.py | {
"start": 579,
"end": 727
} | class ____(SettingsOverrideObject):
_default_class = bitbucket.BitbucketService
_override_setting = "OAUTH_BITBUCKET_SERVICE"
| BitbucketService |
python | dagster-io__dagster | python_modules/libraries/dagster-airbyte/dagster_airbyte/managed/types.py | {
"start": 4060,
"end": 4982
} | class ____:
"""Represents a user-defined Airbyte source.
Args:
name (str): The display name of the source.
source_type (str): The type of the source, from Airbyte's list
of sources https://docs.airbyte.com/integrations/sources/.
source_configuration (Mapping[str, Any]): The configuration for the
source, as defined by Airbyte's API.
"""
@public
def __init__(self, name: str, source_type: str, source_configuration: Mapping[str, Any]):
self.name = check.str_param(name, "name")
self.source_type = check.str_param(source_type, "source_type")
self.source_configuration = check.mapping_param(
source_configuration, "source_configuration", key_type=str
)
def must_be_recreated(self, other: "AirbyteSource") -> bool:
return self.name != other.name or self.source_type != other.source_type
| AirbyteSource |
python | django-import-export__django-import-export | tests/core/tests/test_base_formats.py | {
"start": 2741,
"end": 5625
} | class ____(TestCase):
def setUp(self):
self.format = base_formats.XLSX()
self.filename = os.path.join(
os.path.dirname(__file__), os.path.pardir, "exports", "books.xlsx"
)
def test_binary_format(self):
self.assertTrue(self.format.is_binary())
@ignore_utcnow_deprecation_warning
def test_import(self):
with open(self.filename, self.format.get_read_mode()) as in_stream:
dataset = self.format.create_dataset(in_stream.read())
result = dataset.dict
self.assertEqual(1, len(result))
row = result.pop()
self.assertEqual(1, row["id"])
self.assertEqual("Some book", row["name"])
self.assertEqual("test@example.com", row["author_email"])
self.assertEqual(4, row["price"])
@mock.patch("openpyxl.load_workbook")
def test_that_load_workbook_called_with_required_args(self, mock_load_workbook):
self.format.create_dataset(b"abc")
mock_load_workbook.assert_called_with(
unittest.mock.ANY, read_only=True, data_only=True
)
@override_settings(IMPORT_EXPORT_IMPORT_IGNORE_BLANK_LINES=False)
def test_xlsx_create_dataset__empty_rows(self):
"""Default situation without the flag: do not ignore the empty rows for
backwards compatibility.
"""
rows_before = 3
empty_rows = 5
rows_after = 2
wb = openpyxl.Workbook()
ws = wb.active
ws.append(["Header1", "Header2", "Header3"])
for _ in range(rows_before):
ws.append(["Data1", "Data2", "Data3"])
for _ in range(empty_rows):
ws.append([None, None, None])
for _ in range(rows_after):
ws.append(["Data1", "Data2", "Data3"])
xlsx_data = BytesIO()
wb.save(xlsx_data)
xlsx_data.seek(0)
dataset = self.format.create_dataset(xlsx_data.getvalue())
assert len(dataset) == rows_before + empty_rows + rows_after # With empty rows
@override_settings(IMPORT_EXPORT_IMPORT_IGNORE_BLANK_LINES=True)
def test_xlsx_create_dataset__ignore_empty_rows(self):
"""Ensure that empty rows are not added to the dataset."""
rows_before = 3
empty_rows = 5
rows_after = 2
wb = openpyxl.Workbook()
ws = wb.active
ws.append(["Header1", "Header2", "Header3"])
for _ in range(rows_before):
ws.append(["Data1", "Data2", "Data3"])
for _ in range(empty_rows):
ws.append([None, None, None])
for _ in range(rows_after):
ws.append(["Data1", "Data2", "Data3"])
xlsx_data = BytesIO()
wb.save(xlsx_data)
xlsx_data.seek(0)
dataset = self.format.create_dataset(xlsx_data.getvalue())
assert len(dataset) == rows_before + rows_after # Without empty rows
| XLSXTest |
python | apache__airflow | task-sdk/tests/task_sdk/execution_time/test_context.py | {
"start": 11504,
"end": 12968
} | class ____:
def test_current_context_roundtrip(self):
example_context = {"Hello": "World"}
with set_current_context(example_context):
assert get_current_context() == example_context
def test_context_removed_after_exit(self):
example_context = {"Hello": "World"}
with set_current_context(example_context):
pass
with pytest.raises(RuntimeError):
get_current_context()
def test_nested_context(self):
"""
Nested execution context should be supported in case the user uses multiple context managers.
Each time the execute method of an operator is called, we set a new 'current' context.
This test verifies that no matter how many contexts are entered - order is preserved
"""
max_stack_depth = 15
ctx_list = []
for i in range(max_stack_depth):
# Create all contexts in ascending order
new_context = {"ContextId": i}
# Like 15 nested with statements
ctx_obj = set_current_context(new_context)
ctx_obj.__enter__()
ctx_list.append(ctx_obj)
for i in reversed(range(max_stack_depth)):
# Iterate over contexts in reverse order - stack is LIFO
ctx = get_current_context()
assert ctx["ContextId"] == i
# End of with statement
ctx_list[i].__exit__(None, None, None)
| TestCurrentContext |
python | ansible__ansible | test/units/module_utils/test_api.py | {
"start": 459,
"end": 674
} | class ____:
def test_ratelimit(self):
@rate_limit(rate=1, rate_limit=1)
def login_database():
return "success"
r = login_database()
assert r == 'success'
| TestRateLimit |
python | tensorflow__tensorflow | tensorflow/python/distribute/strategy_test_lib.py | {
"start": 6025,
"end": 20077
} | class ____(test.TestCase):
"""Some tests that should work with any DistributionStrategy."""
def _test_minimize_loss_eager(self, d):
with d.scope():
kernel = create_variable_like_keras_layer(
name="kernel", shape=(1, 1), dtype=dtypes.float32)
def loss(x):
y = array_ops.reshape(
math_ops.mat_mul(x, kernel), []) - array_ops.identity(1.)
return y * y
# TODO(isaprykin): Extract implicit_grad+get_filtered_grad_fn into a
# common `implicit_grad` function and put it in DistributionStrategy.
grad_fn = backprop.implicit_grad(loss)
grad_fn = optimizer.get_filtered_grad_fn(grad_fn)
def update(v, g):
return v.assign_sub(0.2 * g)
one = array_ops.identity([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
# control_dependencies irrelevant but harmless in eager execution
with ops.control_dependencies([fetched]):
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
for i in range(10):
b, a = step()
if i == 0:
before, = b # pylint: disable=unbalanced-tuple-unpacking
after, = a # pylint: disable=unbalanced-tuple-unpacking
error_before = abs(before.numpy() - 1)
error_after = abs(after.numpy() - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_minimize_loss_graph(self,
d,
soft_placement=False,
learning_rate=0.2):
config = config_pb2.ConfigProto()
config.allow_soft_placement = soft_placement
config.gpu_options.per_process_gpu_memory_fraction = 0.3
with context.graph_mode(), \
ops.Graph().as_default(), \
self.cached_session(config=config) as sess, \
d.scope():
kernel = create_variable_like_keras_layer(
name="kernel", shape=(1, 1), dtype=dtypes.float32)
def loss(x):
y = array_ops.reshape(
math_ops.mat_mul(x, kernel), []) - array_ops.identity(1.)
return y * y
grad_fn = backprop.implicit_grad(loss)
def update(v, g):
return v.assign_sub(learning_rate * g)
one = array_ops.identity([[1.]])
def step():
"""Perform one optimization step."""
# Run forward & backward to get gradients, variables list.
g_v = d.extended.call_for_each_replica(grad_fn, args=(one,))
# Update the variables using the gradients and the update() function.
before_list = []
after_list = []
for g, v in g_v:
fetched = d.extended.read_var(v)
before_list.append(fetched)
with ops.control_dependencies([fetched]):
g = d.extended.reduce_to(
reduce_util.ReduceOp.SUM, g, destinations=v)
with ops.control_dependencies(
d.extended.update(v, update, args=(g,), group=False)):
after_list.append(d.extended.read_var(v))
return before_list, after_list
before_out, after_out = step()
variables.global_variables_initializer().run()
for i in range(10):
b, a = sess.run((before_out, after_out))
if i == 0:
before, = b
after, = a
error_before = abs(before - 1)
error_after = abs(after - 1)
# Error should go down
self.assertLess(error_after, error_before)
def _test_summary_for_replica_zero_only(self, d):
logdir = tempfile.mkdtemp()
def run_fn():
"""Function executed for each replica."""
with summary_writer.as_default():
replica_id = distribute_lib.get_replica_context().replica_id_in_sync_group
return summary_ops.write("a", replica_id)
with self.cached_session() as sess, d.scope(), \
summary_ops.always_record_summaries():
# We need global_step because summary writing op *always* has global_step
# as input, even when we always record summary or never record summary.
global_step = training_util.get_or_create_global_step()
if not context.executing_eagerly():
# When executing eagerly, variables are initialized immediately after
# creation, and its initializer will be None.
global_step.initializer.run()
summary_ops.set_step(0)
summary_writer = summary_ops.create_file_writer(logdir)
output = d.extended.call_for_each_replica(run_fn)
unwrapped = d.unwrap(output)
if not context.executing_eagerly():
sess.run(summary_writer.init())
sess.run(unwrapped)
sess.run(summary_writer.close())
events = _events_from_logdir(self, logdir)
# There will be 2 entries: 1 summary file header entry, and 1 entry
# written by replica 0.
self.assertLen(events, 2)
self.assertEqual(events[1].summary.value[0].tag, "a")
self.assertEqual(events[1].summary.value[0].simple_value, 0.0)
def _test_replica_id(self, d):
with d.scope():
expected_devices = [False] * len(d.extended.worker_devices)
def mark_devices_fn():
replica_id = self.evaluate(
distribute_lib.get_replica_context().replica_id_in_sync_group)
self.assertLess(replica_id, len(d.extended.worker_devices))
self.assertFalse(expected_devices[replica_id])
expected_devices[replica_id] = True
d.extended.call_for_each_replica(mark_devices_fn)
self.assertAllEqual(expected_devices,
[True] * len(d.extended.worker_devices))
def _test_call_and_merge_exceptions(self, dist):
with dist.scope():
with self.assertRaises(_TestException):
dist.extended.call_for_each_replica(_raise_exception_fn)
with self.assertRaises(_TestException):
dist.extended.call_for_each_replica(_merge_raises_fn)
with self.assertRaises(_TestException):
dist.extended.call_for_each_replica(_merge_call_raises_fn)
with self.assertRaises(_TestException):
dist.extended.call_for_each_replica(_merge_call_merge_raises_fn)
def _input_fn_to_test_input_context(self, dataset_or_callable_fn,
expected_num_replicas_in_sync,
expected_num_input_pipelines,
expected_input_pipeline_id):
# Use a list of one element as counter so that it can be captured by the
# `_input_fn`. This counter is incremented by 1 each time an input_fn is
# called. We use this counter to check whether the `input_pipeline_id`
# matches the counter in the in-graph replication.
worker_id_counter = [0]
def _input_fn(input_context):
"""Input fn for testing."""
self.assertIsNotNone(input_context)
self.assertEqual(expected_num_replicas_in_sync,
input_context.num_replicas_in_sync)
self.assertEqual(expected_num_input_pipelines,
input_context.num_input_pipelines)
if expected_input_pipeline_id is not None:
self.assertEqual(expected_input_pipeline_id,
input_context.input_pipeline_id)
else:
self.assertEqual(worker_id_counter[0], input_context.input_pipeline_id)
worker_id_counter[0] += 1
return dataset_or_callable_fn()
return _input_fn
def _test_input_fn_iterable(
self, strategy, input_fn, expected_values, ignore_order=False):
assert_same = self.assertCountEqual if ignore_order else self.assertEqual
iterable = strategy.distribute_datasets_from_function(input_fn)
if context.executing_eagerly():
iterator = iter(iterable)
for expected_value in expected_values:
computed_value = self.evaluate(
list(strategy.experimental_local_results(next(iterator))))
assert_same(expected_value, computed_value)
with self.assertRaises(StopIteration):
self.evaluate(strategy.experimental_local_results(next(iterator)))
# After re-initializing the iterator, should be able to iterate again.
iterator = iter(iterable)
for expected_value in expected_values:
computed_value = self.evaluate(
list(strategy.experimental_local_results(next(iterator))))
assert_same(expected_value, computed_value)
else:
iterator = dataset_ops.make_initializable_iterator(iterable)
self._test_input_fn_iterator(iterator, strategy.extended.worker_devices,
expected_values, test_reinitialize=True,
ignore_order=ignore_order)
def _test_input_fn_iterator(self,
iterator,
devices,
expected_values,
sess=None,
test_reinitialize=True,
ignore_order=False):
evaluate = lambda x: sess.run(x) if sess else self.evaluate(x)
evaluate(iterator.initializer)
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate(
[distribute_utils.select_replica(r, next_element) for r in
range(len(devices))])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
with self.assertRaises(errors.OutOfRangeError):
next_element = iterator.get_next()
evaluate(
[distribute_utils.select_replica(r, next_element) for r in
range(len(devices))])
# After re-initializing the iterator, should be able to iterate again.
if test_reinitialize:
evaluate(iterator.initializer)
for expected_value in expected_values:
next_element = iterator.get_next()
computed_value = evaluate([
distribute_utils.select_replica(r, next_element) for r in
range(len(devices))
])
if ignore_order:
self.assertCountEqual(expected_value, computed_value)
else:
self.assertEqual(expected_value, computed_value)
def _test_global_step_update(self, strategy):
with strategy.scope():
global_step = variable_scope.get_variable(
"global_step",
shape=[],
dtype=dtypes.int64,
initializer=init_ops.zeros_initializer(),
trainable=False,
aggregation=variables.VariableAggregation.ONLY_FIRST_REPLICA)
self.evaluate(variables.global_variables_initializer())
def model_fn():
train_op = global_step.assign_add(1)
value = global_step.read_value()
return train_op, value
train_ops, value = strategy.extended.call_for_each_replica(model_fn)
self.evaluate(strategy.group(train_ops))
global_step_tensors = strategy.experimental_local_results(value)
global_step_values = self.evaluate(global_step_tensors)
self.assertEqual((1,) * len(global_step_tensors), global_step_values)
def _test_numpy_dataset(self, strategy, session=None, run_in_function=False):
if not isinstance(strategy, distribute_lib.StrategyV1):
self.skipTest("n/a: V1 only")
cached_session = session or self.cached_session()
with strategy.scope(), cached_session as sess:
x = np.asarray([[1, 2], [6, 12], [2, 4], [5, 10], [3, 6], [4, 8]])
y = np.asarray([5, 4, 3, 2, 1, 0])
batch_size = 6
if not strategy.extended._global_batch_size: # pylint: disable=protected-access
batch_size = batch_size // strategy.num_replicas_in_sync
ds = strategy.extended.experimental_make_numpy_dataset(
(x, y), session=sess or self.cached_session())
ds = ds.repeat(2) # 2 epochs
# We need to use the drop_remainder argument to get a known static
# input shape which is required for TPUs.
drop_remainder = strategy.extended.experimental_require_static_shapes
ds = ds.batch(batch_size, drop_remainder=drop_remainder)
i = strategy.make_dataset_iterator(ds)
self.evaluate(i.initializer)
def run_and_concatenate(strategy, i):
x, y = strategy.experimental_run(
_maybe_run_in_function(lambda z: z, run_in_function), i)
x, y = self.evaluate((strategy.experimental_local_results(x),
strategy.experimental_local_results(y)))
return np.concatenate(x), np.concatenate(y)
x_1, y_1 = run_and_concatenate(strategy, i)
self.assertAllEqual(x, x_1)
self.assertAllEqual(y, y_1)
x_2, y_2 = run_and_concatenate(strategy, i)
self.assertAllEqual(x, x_2)
self.assertAllEqual(y, y_2)
with self.assertRaises(errors.OutOfRangeError):
run_and_concatenate(strategy, i)
def _test_trainable_variable(self, strategy):
for cls in [variable_v1.VariableV1, variables.Variable]:
with strategy.scope():
v1 = cls(1.0)
self.assertEqual(True, v1.trainable)
v2 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ)
self.assertEqual(False, v2.trainable)
v3 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=True)
self.assertEqual(True, v3.trainable)
v4 = cls(1.0, synchronization=variables.VariableSynchronization.ON_READ,
trainable=False)
self.assertEqual(False, v4.trainable)
| DistributionTestBase |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.