file_path
stringlengths 3
280
| file_language
stringclasses 66
values | content
stringlengths 1
1.04M
| repo_name
stringlengths 5
92
| repo_stars
int64 0
154k
| repo_description
stringlengths 0
402
| repo_primary_language
stringclasses 108
values | developer_username
stringlengths 1
25
| developer_name
stringlengths 0
30
| developer_company
stringlengths 0
82
|
|---|---|---|---|---|---|---|---|---|---|
python/ray/tests/test_projects.py
|
Python
|
import jsonschema
import os
import pytest
import subprocess
import yaml
from click.testing import CliRunner
import sys
from unittest.mock import patch, DEFAULT
from contextlib import contextmanager
from ray.projects.scripts import (session_start, session_commands,
session_execute)
import ray
TEST_DIR = os.path.join(
os.path.dirname(os.path.abspath(__file__)), "project_files")
def load_project_description(project_file):
path = os.path.join(TEST_DIR, project_file)
with open(path) as f:
return yaml.safe_load(f)
def test_validation():
project_dirs = ["docker_project", "requirements_project", "shell_project"]
for project_dir in project_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
ray.projects.ProjectDefinition(project_dir)
bad_schema_dirs = ["no_project1"]
for project_dir in bad_schema_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
with pytest.raises(jsonschema.exceptions.ValidationError):
ray.projects.ProjectDefinition(project_dir)
bad_project_dirs = ["no_project2", "noproject3"]
for project_dir in bad_project_dirs:
project_dir = os.path.join(TEST_DIR, project_dir)
with pytest.raises(ValueError):
ray.projects.ProjectDefinition(project_dir)
def test_project_root():
path = os.path.join(TEST_DIR, "project1")
project_definition = ray.projects.ProjectDefinition(path)
assert os.path.normpath(project_definition.root) == os.path.normpath(path)
path2 = os.path.join(TEST_DIR, "project1", "subdir")
project_definition = ray.projects.ProjectDefinition(path2)
assert os.path.normpath(project_definition.root) == os.path.normpath(path)
path3 = "/tmp/"
with pytest.raises(ValueError):
project_definition = ray.projects.ProjectDefinition(path3)
def test_project_validation():
path = os.path.join(TEST_DIR, "project1")
subprocess.check_call(["ray", "project", "validate"], cwd=path)
def test_project_no_validation():
with pytest.raises(subprocess.CalledProcessError):
subprocess.check_call(["ray", "project", "validate"], cwd=TEST_DIR)
@contextmanager
def _chdir_and_back(d):
old_dir = os.getcwd()
try:
os.chdir(d)
yield
finally:
os.chdir(old_dir)
def run_test_project(project_dir, command, args):
# Run the CLI commands with patching
test_dir = os.path.join(TEST_DIR, project_dir)
with _chdir_and_back(test_dir):
runner = CliRunner()
with patch.multiple(
"ray.projects.scripts",
create_or_update_cluster=DEFAULT,
rsync=DEFAULT,
exec_cluster=DEFAULT,
) as mock_calls:
result = runner.invoke(command, args)
return result, mock_calls, test_dir
def test_session_start_default_project():
result, mock_calls, test_dir = run_test_project(
"session-tests/project-pass", session_start, ["default"])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
# Part 1/3: Cluster Launching Call
create_or_update_cluster_call = mock_calls["create_or_update_cluster"]
assert create_or_update_cluster_call.call_count == 1
_, kwargs = create_or_update_cluster_call.call_args
assert kwargs["config_file"] == loaded_project.cluster_yaml()
# Part 2/3: Rsync Calls
rsync_call = mock_calls["rsync"]
# 1 for rsyncing the project directory, 1 for rsyncing the
# requirements.txt.
assert rsync_call.call_count == 2
_, kwargs = rsync_call.call_args
assert kwargs["source"] == loaded_project.config["environment"][
"requirements"]
# Part 3/3: Exec Calls
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
expected_commands = loaded_project.config["environment"]["shell"]
expected_commands += [
command["command"] for command in loaded_project.config["commands"]
]
if "requirements" in loaded_project.config["environment"]:
assert any("pip install -r" for cmd in commands_executed)
# pop the `pip install` off commands executed
commands_executed = [
cmd for cmd in commands_executed if "pip install -r" not in cmd
]
assert expected_commands == commands_executed
def test_session_execute_default_project():
result, mock_calls, test_dir = run_test_project(
"session-tests/project-pass", session_execute, ["default"])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
assert mock_calls["rsync"].call_count == 0
assert mock_calls["create_or_update_cluster"].call_count == 0
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
expected_commands = [
command["command"] for command in loaded_project.config["commands"]
]
assert expected_commands == commands_executed
result, mock_calls, test_dir = run_test_project(
"session-tests/project-pass", session_execute, ["--shell", "uptime"])
assert result.exit_code == 0
def test_session_start_docker_fail():
result, _, _ = run_test_project("session-tests/with-docker-fail",
session_start, [])
assert result.exit_code == 1
assert ("Docker support in session is currently "
"not implemented") in result.output
def test_session_invalid_config_errored():
result, _, _ = run_test_project("session-tests/invalid-config-fail",
session_start, [])
assert result.exit_code == 1
assert "validation failed" in result.output
# check that we are displaying actional error message
assert "ray project validate" in result.output
def test_session_create_command():
result, mock_calls, test_dir = run_test_project(
"session-tests/commands-test", session_start,
["first", "--a", "1", "--b", "2"])
# Verify the project can be loaded.
ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
exec_cluster_call = mock_calls["exec_cluster"]
found_command = False
for _, kwargs in exec_cluster_call.call_args_list:
if "Starting ray job with 1 and 2" in kwargs["cmd"]:
found_command = True
assert found_command
def test_session_create_multiple():
for args in [{"a": "*", "b": "2"}, {"a": "1", "b": "*"}]:
result, mock_calls, test_dir = run_test_project(
"session-tests/commands-test", session_start,
["first", "--a", args["a"], "--b", args["b"]])
loaded_project = ray.projects.ProjectDefinition(test_dir)
assert result.exit_code == 0
exec_cluster_call = mock_calls["exec_cluster"]
commands_executed = []
for _, kwargs in exec_cluster_call.call_args_list:
commands_executed.append(kwargs["cmd"].replace(
"cd {}; ".format(loaded_project.working_directory()), ""))
assert commands_executed.count("echo \"Setting up\"") == 2
if args["a"] == "*":
assert commands_executed.count(
"echo \"Starting ray job with 1 and 2\"") == 1
assert commands_executed.count(
"echo \"Starting ray job with 2 and 2\"") == 1
if args["b"] == "*":
assert commands_executed.count(
"echo \"Starting ray job with 1 and 1\"") == 1
assert commands_executed.count(
"echo \"Starting ray job with 1 and 2\"") == 1
# Using multiple wildcards shouldn't work
result, mock_calls, test_dir = run_test_project(
"session-tests/commands-test", session_start,
["first", "--a", "*", "--b", "*"])
assert result.exit_code == 1
def test_session_commands():
result, mock_calls, test_dir = run_test_project(
"session-tests/commands-test", session_commands, [])
assert "This is the first parameter" in result.output
assert "This is the second parameter" in result.output
assert 'Command "first"' in result.output
assert 'Command "second"' in result.output
if __name__ == "__main__":
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_queue.py
|
Python
|
import pytest
import time
import ray
from ray.experimental.queue import Queue, Empty, Full
def test_queue(ray_start_regular):
@ray.remote
def get_async(queue, block, timeout, sleep):
time.sleep(sleep)
return queue.get(block, timeout)
@ray.remote
def put_async(queue, item, block, timeout, sleep):
time.sleep(sleep)
queue.put(item, block, timeout)
# Test simple usage.
q = Queue()
items = list(range(10))
for item in items:
q.put(item)
for item in items:
assert item == q.get()
# Test asynchronous usage.
q = Queue()
items = set(range(10))
producers = [ # noqa
put_async.remote(q, item, True, None, 0.5) for item in items
]
consumers = [get_async.remote(q, True, None, 0) for _ in items]
result = set(ray.get(consumers))
assert items == result
# Test put.
q = Queue(1)
item = 0
q.put(item, block=False)
assert q.get() == item
item = 1
q.put(item, timeout=0.2)
assert q.get() == item
with pytest.raises(ValueError):
q.put(0, timeout=-1)
q.put(0)
with pytest.raises(Full):
q.put_nowait(1)
with pytest.raises(Full):
q.put(1, timeout=0.2)
q.get()
q.put(1)
get_id = get_async.remote(q, False, None, 0.2)
q.put(2)
assert ray.get(get_id) == 1
# Test get.
q = Queue()
item = 0
q.put(item)
assert q.get(block=False) == item
item = 1
q.put(item)
assert q.get(timeout=0.2) == item
with pytest.raises(ValueError):
q.get(timeout=-1)
with pytest.raises(Empty):
q.get_nowait()
with pytest.raises(Empty):
q.get(timeout=0.2)
item = 0
put_async.remote(q, item, True, None, 0.2)
assert q.get() == item
# Test qsize.
q = Queue()
items = list(range(10))
size = 0
assert q.qsize() == size
for item in items:
q.put(item)
size += 1
assert q.qsize() == size
for item in items:
assert q.get() == item
size -= 1
assert q.qsize() == size
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_ray_init.py
|
Python
|
import os
import pytest
import redis
import ray
from ray.cluster_utils import Cluster
@pytest.fixture
def password():
random_bytes = os.urandom(128)
if hasattr(random_bytes, "hex"):
return random_bytes.hex() # Python 3
return random_bytes.encode("hex") # Python 2
class TestRedisPassword:
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't support Redis authentication yet.")
def test_redis_password(self, password, shutdown_only):
@ray.remote
def f():
return 1
info = ray.init(redis_password=password)
address = info["redis_address"]
redis_ip, redis_port = address.split(":")
# Check that we can run a task
object_id = f.remote()
ray.get(object_id)
# Check that Redis connections require a password
redis_client = redis.StrictRedis(
host=redis_ip, port=redis_port, password=None)
with pytest.raises(redis.exceptions.AuthenticationError):
redis_client.ping()
# Check that we can connect to Redis using the provided password
redis_client = redis.StrictRedis(
host=redis_ip, port=redis_port, password=password)
assert redis_client.ping()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="New GCS API doesn't support Redis authentication yet.")
def test_redis_password_cluster(self, password, shutdown_only):
@ray.remote
def f():
return 1
node_args = {"redis_password": password}
cluster = Cluster(
initialize_head=True, connect=True, head_node_args=node_args)
cluster.add_node(**node_args)
object_id = f.remote()
ray.get(object_id)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_reference_counting.py
|
Python
|
# coding: utf-8
import os
import json
import copy
import tempfile
import numpy as np
import time
import pytest
import logging
import uuid
import ray
import ray.cluster_utils
import ray.test_utils
logger = logging.getLogger(__name__)
def _check_refcounts(expected):
actual = ray.worker.global_worker.core_worker.get_all_reference_counts()
assert len(expected) == len(actual)
for object_id, (local, submitted) in expected.items():
assert object_id in actual
assert local == actual[object_id]["local"]
assert submitted == actual[object_id]["submitted"]
def check_refcounts(expected, timeout=10):
start = time.time()
while True:
try:
_check_refcounts(expected)
break
except AssertionError as e:
if time.time() - start > timeout:
raise e
else:
time.sleep(0.1)
def test_local_refcounts(ray_start_regular):
oid1 = ray.put(None)
check_refcounts({oid1: (1, 0)})
oid1_copy = copy.copy(oid1)
check_refcounts({oid1: (2, 0)})
del oid1
check_refcounts({oid1_copy: (1, 0)})
del oid1_copy
check_refcounts({})
def test_dependency_refcounts(ray_start_regular):
# Return a large object that will be spilled to plasma.
def large_object():
return np.zeros(10 * 1024 * 1024, dtype=np.uint8)
# TODO: Clean up tmpfiles?
def random_path():
return os.path.join(tempfile.gettempdir(), uuid.uuid4().hex)
def touch(path):
with open(path, "w"):
pass
def wait_for_file(path):
while True:
if os.path.exists(path):
break
time.sleep(0.1)
@ray.remote
def one_dep(dep, path=None, fail=False):
if path is not None:
wait_for_file(path)
if fail:
raise Exception("failed on purpose")
@ray.remote
def one_dep_large(dep, path=None):
if path is not None:
wait_for_file(path)
# This should be spilled to plasma.
return large_object()
# Test that regular plasma dependency refcounts are decremented once the
# task finishes.
f = random_path()
large_dep = ray.put(large_object())
result = one_dep.remote(large_dep, path=f)
check_refcounts({large_dep: (1, 1), result: (1, 0)})
touch(f)
# Reference count should be removed once the task finishes.
check_refcounts({large_dep: (1, 0), result: (1, 0)})
del large_dep, result
check_refcounts({})
# Test that inlined dependency refcounts are decremented once they are
# inlined.
f = random_path()
dep = one_dep.remote(None, path=f)
check_refcounts({dep: (1, 0)})
result = one_dep.remote(dep)
check_refcounts({dep: (1, 1), result: (1, 0)})
touch(f)
# Reference count should be removed as soon as the dependency is inlined.
check_refcounts({dep: (1, 0), result: (1, 0)}, timeout=1)
del dep, result
check_refcounts({})
# Test that spilled plasma dependency refcounts are decremented once
# the task finishes.
f1, f2 = random_path(), random_path()
dep = one_dep_large.remote(None, path=f1)
check_refcounts({dep: (1, 0)})
result = one_dep.remote(dep, path=f2)
check_refcounts({dep: (1, 1), result: (1, 0)})
touch(f1)
ray.get(dep, timeout=5.0)
# Reference count should remain because the dependency is in plasma.
check_refcounts({dep: (1, 1), result: (1, 0)})
touch(f2)
# Reference count should be removed because the task finished.
check_refcounts({dep: (1, 0), result: (1, 0)})
del dep, result
check_refcounts({})
# Test that regular plasma dependency refcounts are decremented if a task
# fails.
f = random_path()
large_dep = ray.put(large_object())
result = one_dep.remote(large_dep, path=f, fail=True)
check_refcounts({large_dep: (1, 1), result: (1, 0)})
touch(f)
# Reference count should be removed once the task finishes.
check_refcounts({large_dep: (1, 0), result: (1, 0)})
del large_dep, result
check_refcounts({})
# Test that spilled plasma dependency refcounts are decremented if a task
# fails.
f1, f2 = random_path(), random_path()
dep = one_dep_large.remote(None, path=f1)
check_refcounts({dep: (1, 0)})
result = one_dep.remote(dep, path=f2, fail=True)
check_refcounts({dep: (1, 1), result: (1, 0)})
touch(f1)
ray.get(dep, timeout=5.0)
# Reference count should remain because the dependency is in plasma.
check_refcounts({dep: (1, 1), result: (1, 0)})
touch(f2)
# Reference count should be removed because the task finished.
check_refcounts({dep: (1, 0), result: (1, 0)})
del dep, result
check_refcounts({})
def test_basic_pinning(shutdown_only):
ray.init(object_store_memory=100 * 1024 * 1024)
@ray.remote
def f(array):
return np.sum(array)
@ray.remote
class Actor(object):
def __init__(self):
# Hold a long-lived reference to a ray.put object's ID. The object
# should not be garbage collected while the actor is alive because
# the object is pinned by the raylet.
self.large_object = ray.put(
np.zeros(25 * 1024 * 1024, dtype=np.uint8))
def get_large_object(self):
return ray.get(self.large_object)
actor = Actor.remote()
# Fill up the object store with short-lived objects. These should be
# evicted before the long-lived object whose reference is held by
# the actor.
for batch in range(10):
intermediate_result = f.remote(
np.zeros(10 * 1024 * 1024, dtype=np.uint8))
ray.get(intermediate_result)
# The ray.get below would fail with only LRU eviction, as the object
# that was ray.put by the actor would have been evicted.
ray.get(actor.get_large_object.remote())
def test_pending_task_dependency_pinning(shutdown_only):
ray.init(object_store_memory=100 * 1024 * 1024, use_pickle=True)
@ray.remote
def pending(input1, input2):
return
@ray.remote
def slow(dep):
pass
# The object that is ray.put here will go out of scope immediately, so if
# pending task dependencies aren't considered, it will be evicted before
# the ray.get below due to the subsequent ray.puts that fill up the object
# store.
np_array = np.zeros(40 * 1024 * 1024, dtype=np.uint8)
random_id = ray.ObjectID.from_random()
oid = pending.remote(np_array, slow.remote(random_id))
for _ in range(2):
ray.put(np_array)
ray.worker.global_worker.put_object(None, object_id=random_id)
ray.get(oid)
def test_feature_flag(shutdown_only):
ray.init(
object_store_memory=100 * 1024 * 1024,
_internal_config=json.dumps({
"object_pinning_enabled": 0
}))
@ray.remote
def f(array):
return np.sum(array)
@ray.remote
class Actor(object):
def __init__(self):
self.large_object = ray.put(
np.zeros(25 * 1024 * 1024, dtype=np.uint8))
def wait_for_actor_to_start(self):
pass
def get_large_object(self):
return ray.get(self.large_object)
actor = Actor.remote()
ray.get(actor.wait_for_actor_to_start.remote())
for batch in range(10):
intermediate_result = f.remote(
np.zeros(10 * 1024 * 1024, dtype=np.uint8))
ray.get(intermediate_result)
# The ray.get below fails with only LRU eviction, as the object
# that was ray.put by the actor should have been evicted.
with pytest.raises(ray.exceptions.RayTimeoutError):
ray.get(actor.get_large_object.remote(), timeout=1)
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_signal.py
|
Python
|
import pytest
import time
import ray
import ray.experimental.signal as signal
class UserSignal(signal.Signal):
def __init__(self, value):
self.value = value
def receive_all_signals(sources, timeout):
# Get all signals from sources, until there is no signal for a time
# period of timeout.
results = []
while True:
r = signal.receive(sources, timeout=timeout)
if len(r) == 0:
return results
else:
results.extend(r)
def test_task_to_driver(ray_start_regular):
# Send a signal from a task to the driver.
@ray.remote
def task_send_signal(value):
signal.send(UserSignal(value))
return
signal_value = "simple signal"
object_id = task_send_signal.remote(signal_value)
result_list = signal.receive([object_id], timeout=10)
print(result_list[0][1])
assert len(result_list) == 1
def test_send_signal_from_actor_to_driver(ray_start_regular):
# Send several signals from an actor, and receive them in the driver.
@ray.remote
class ActorSendSignal:
def __init__(self):
pass
def send_signal(self, value):
signal.send(UserSignal(value))
a = ActorSendSignal.remote()
signal_value = "simple signal"
count = 6
for i in range(count):
ray.get(a.send_signal.remote(signal_value + str(i)))
result_list = receive_all_signals([a], timeout=5)
assert len(result_list) == count
for i in range(count):
assert signal_value + str(i) == result_list[i][1].value
def test_send_signals_from_actor_to_driver(ray_start_regular):
# Send "count" signal at intervals from an actor and get
# these signals in the driver.
@ray.remote
class ActorSendSignals:
def __init__(self):
pass
def send_signals(self, value, count):
for i in range(count):
signal.send(UserSignal(value + str(i)))
a = ActorSendSignals.remote()
signal_value = "simple signal"
count = 20
a.send_signals.remote(signal_value, count)
received_count = 0
while True:
result_list = signal.receive([a], timeout=5)
received_count += len(result_list)
if (received_count == count):
break
assert True
def test_task_crash(ray_start_regular):
# Get an error when ray.get() is called on the return of a failed task.
@ray.remote
def crashing_function():
raise Exception("exception message")
object_id = crashing_function.remote()
try:
ray.get(object_id)
except Exception as e:
assert type(e) == ray.exceptions.RayTaskError
finally:
result_list = signal.receive([object_id], timeout=5)
assert len(result_list) == 1
assert type(result_list[0][1]) == signal.ErrorSignal
def test_task_crash_without_get(ray_start_regular):
# Get an error when task failed.
@ray.remote
def crashing_function():
raise Exception("exception message")
object_id = crashing_function.remote()
result_list = signal.receive([object_id], timeout=5)
assert len(result_list) == 1
assert type(result_list[0][1]) == signal.ErrorSignal
def test_actor_crash(ray_start_regular):
# Get an error when ray.get() is called on a return parameter
# of a method that failed.
@ray.remote
class Actor:
def __init__(self):
pass
def crash(self):
raise Exception("exception message")
a = Actor.remote()
try:
ray.get(a.crash.remote())
except Exception as e:
assert type(e) == ray.exceptions.RayTaskError
finally:
result_list = signal.receive([a], timeout=5)
assert len(result_list) == 1
assert type(result_list[0][1]) == signal.ErrorSignal
def test_actor_crash_init(ray_start_regular):
# Get an error when an actor's __init__ failed.
@ray.remote
class ActorCrashInit:
def __init__(self):
raise Exception("exception message")
def m(self):
return 1
# Do not catch the exception in the __init__.
a = ActorCrashInit.remote()
result_list = signal.receive([a], timeout=5)
assert len(result_list) == 1
assert type(result_list[0][1]) == signal.ErrorSignal
def test_actor_crash_init2(ray_start_regular):
# Get errors when (1) __init__ fails, and (2) subsequently when
# ray.get() is called on the return parameter of another method
# of the actor.
@ray.remote
class ActorCrashInit:
def __init__(self):
raise Exception("exception message")
def method(self):
return 1
a = ActorCrashInit.remote()
try:
ray.get(a.method.remote())
except Exception as e:
assert type(e) == ray.exceptions.RayTaskError
finally:
result_list = receive_all_signals([a], timeout=5)
assert len(result_list) == 2
assert type(result_list[0][1]) == signal.ErrorSignal
def test_actor_crash_init3(ray_start_regular):
# Get errors when (1) __init__ fails, and (2) subsequently when
# another method of the actor is invoked.
@ray.remote
class ActorCrashInit:
def __init__(self):
raise Exception("exception message")
def method(self):
return 1
a = ActorCrashInit.remote()
a.method.remote()
# Wait for a.method.remote() to finish and generate an error.
time.sleep(10)
result_list = signal.receive([a], timeout=5)
assert len(result_list) == 2
assert type(result_list[0][1]) == signal.ErrorSignal
def test_send_signals_from_actor_to_actor(ray_start_regular):
# Send "count" signal at intervals of 100ms from two actors and get
# these signals in another actor.
@ray.remote
class ActorSendSignals:
def __init__(self):
pass
def send_signals(self, value, count):
for i in range(count):
signal.send(UserSignal(value + str(i)))
@ray.remote
class ActorGetSignalsAll:
def __init__(self):
self.received_signals = []
def register_handle(self, handle):
self.this_actor = handle
def get_signals(self, source_ids, count):
new_signals = receive_all_signals(source_ids, timeout=5)
for s in new_signals:
self.received_signals.append(s)
if len(self.received_signals) < count:
self.this_actor.get_signals.remote(source_ids, count)
else:
return
def get_count(self):
return len(self.received_signals)
a1 = ActorSendSignals.remote()
a2 = ActorSendSignals.remote()
signal_value = "simple signal"
count = 20
ray.get(a1.send_signals.remote(signal_value, count))
ray.get(a2.send_signals.remote(signal_value, count))
b = ActorGetSignalsAll.remote()
ray.get(b.register_handle.remote(b))
b.get_signals.remote([a1, a2], count)
received_count = ray.get(b.get_count.remote())
assert received_count == 2 * count
def test_forget(ray_start_regular):
# Send "count" signals on behalf of an actor, then ignore all these
# signals, and then send anther "count" signals on behalf of the same
# actor. Then show that the driver only gets the last "count" signals.
@ray.remote
class ActorSendSignals:
def __init__(self):
pass
def send_signals(self, value, count):
for i in range(count):
signal.send(UserSignal(value + str(i)))
a = ActorSendSignals.remote()
signal_value = "simple signal"
count = 5
ray.get(a.send_signals.remote(signal_value, count))
signal.forget([a])
ray.get(a.send_signals.remote(signal_value, count))
result_list = receive_all_signals([a], timeout=5)
assert len(result_list) == count
def test_signal_on_node_failure(two_node_cluster):
"""Test actor checkpointing on a remote node."""
class ActorSignal:
def __init__(self):
pass
def node_id(self):
return ray.worker.global_worker.node.unique_id
# Place the actor on the remote node.
cluster, remote_node = two_node_cluster
actor_cls = ray.remote(max_reconstructions=0)(ActorSignal)
actor = actor_cls.remote()
# Try until we put an actor on a different node.
while (ray.get(actor.node_id.remote()) != remote_node.unique_id):
actor = actor_cls.remote()
# Kill actor process.
cluster.remove_node(remote_node)
# Wait on signal from the actor on the failed node.
result_list = signal.receive([actor], timeout=10)
assert len(result_list) == 1
assert type(result_list[0][1]) == signal.ActorDiedSignal
def test_send_signal_from_two_tasks_to_driver(ray_start_regular):
# Define a remote function that sends a user-defined signal.
@ray.remote
def send_signal(value):
signal.send(UserSignal(value))
a = send_signal.remote(0)
b = send_signal.remote(0)
ray.get([a, b])
result_list = ray.experimental.signal.receive([a])
assert len(result_list) == 1
# Call again receive on "a" with no new signal.
result_list = ray.experimental.signal.receive([a, b])
assert len(result_list) == 1
def test_receiving_on_two_returns(ray_start_regular):
@ray.remote(num_return_vals=2)
def send_signal(value):
signal.send(UserSignal(value))
return 1, 2
x, y = send_signal.remote(0)
ray.get([x, y])
results = ray.experimental.signal.receive([x, y])
assert ((x == results[0][0] and y == results[1][0])
or (x == results[1][0] and y == results[0][0]))
def test_serial_tasks_reading_same_signal(shutdown_only):
ray.init(num_cpus=2)
@ray.remote
def send_signal(value):
signal.send(UserSignal(value))
a = send_signal.remote(0)
@ray.remote
def f(sources):
return ray.experimental.signal.receive(sources, timeout=1)
result_list = ray.get(f.remote([a]))
assert len(result_list) == 1
result_list = ray.get(f.remote([a]))
assert len(result_list) == 1
result_list = ray.get(f.remote([a]))
assert len(result_list) == 1
def test_non_integral_receive_timeout(ray_start_regular):
@ray.remote
def send_signal(value):
signal.send(UserSignal(value))
a = send_signal.remote(0)
# make sure send_signal had a chance to execute
ray.get(a)
result_list = ray.experimental.signal.receive([a], timeout=0.1)
assert len(result_list) == 1
def test_small_receive_timeout(ray_start_regular):
""" Test that receive handles timeout smaller than the 1ms min
"""
# 0.1 ms
small_timeout = 1e-4
@ray.remote
def send_signal(value):
signal.send(UserSignal(value))
a = send_signal.remote(0)
# make sure send_signal had a chance to execute
ray.get(a)
result_list = ray.experimental.signal.receive([a], timeout=small_timeout)
assert len(result_list) == 1
if __name__ == "__main__":
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_stress.py
|
Python
|
import numpy as np
import pytest
import time
import ray
from ray.cluster_utils import Cluster
@pytest.fixture(params=[(1, 4), (4, 4)])
def ray_start_combination(request):
num_nodes = request.param[0]
num_workers_per_scheduler = request.param[1]
# Start the Ray processes.
cluster = Cluster(
initialize_head=True,
head_node_args={
"num_cpus": 10,
"redis_max_memory": 10**7
})
for i in range(num_nodes - 1):
cluster.add_node(num_cpus=10)
ray.init(address=cluster.address)
yield num_nodes, num_workers_per_scheduler, cluster
# The code after the yield will run as teardown code.
ray.shutdown()
cluster.shutdown()
def test_submitting_tasks(ray_start_combination):
_, _, cluster = ray_start_combination
@ray.remote
def f(x):
return x
for _ in range(1):
ray.get([f.remote(1) for _ in range(1000)])
for _ in range(10):
ray.get([f.remote(1) for _ in range(100)])
for _ in range(100):
ray.get([f.remote(1) for _ in range(10)])
for _ in range(1000):
ray.get([f.remote(1) for _ in range(1)])
assert cluster.remaining_processes_alive()
def test_dependencies(ray_start_combination):
_, _, cluster = ray_start_combination
@ray.remote
def f(x):
return x
x = 1
for _ in range(1000):
x = f.remote(x)
ray.get(x)
@ray.remote
def g(*xs):
return 1
xs = [g.remote(1)]
for _ in range(100):
xs.append(g.remote(*xs))
xs.append(g.remote(1))
ray.get(xs)
assert cluster.remaining_processes_alive()
def test_wait(ray_start_combination):
num_nodes, num_workers_per_scheduler, cluster = ray_start_combination
num_workers = num_nodes * num_workers_per_scheduler
@ray.remote
def f(x):
return x
x_ids = [f.remote(i) for i in range(100)]
for i in range(len(x_ids)):
ray.wait([x_ids[i]])
for i in range(len(x_ids) - 1):
ray.wait(x_ids[i:])
@ray.remote
def g(x):
time.sleep(x)
for i in range(1, 5):
x_ids = [
g.remote(np.random.uniform(0, i)) for _ in range(2 * num_workers)
]
ray.wait(x_ids, num_returns=len(x_ids))
assert cluster.remaining_processes_alive()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_stress_failure.py
|
Python
|
import json
import numpy as np
import os
import pytest
import sys
import time
import ray
from ray.cluster_utils import Cluster
from ray.test_utils import flat_errors
import ray.ray_constants as ray_constants
@pytest.fixture(params=[1, 4])
def ray_start_reconstruction(request):
num_nodes = request.param
plasma_store_memory = int(0.5 * 10**9)
cluster = Cluster(
initialize_head=True,
head_node_args={
"num_cpus": 1,
"object_store_memory": plasma_store_memory // num_nodes,
"redis_max_memory": 10**7,
"_internal_config": json.dumps({
"initial_reconstruction_timeout_milliseconds": 200
})
})
for i in range(num_nodes - 1):
cluster.add_node(
num_cpus=1,
object_store_memory=plasma_store_memory // num_nodes,
_internal_config=json.dumps({
"initial_reconstruction_timeout_milliseconds": 200
}))
ray.init(address=cluster.address)
yield plasma_store_memory, num_nodes, cluster
# Clean up the Ray cluster.
ray.shutdown()
cluster.shutdown()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_simple(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a remote task with no dependencies, which returns a numpy
# array of the given size.
@ray.remote
def foo(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances of the remote task.
args = []
for i in range(num_objects):
args.append(foo.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
def sorted_random_indexes(total, output_num):
random_indexes = [np.random.randint(total) for _ in range(output_num)]
random_indexes.sort()
return random_indexes
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_recursive(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = int(plasma_store_memory * 1.5 / (num_objects * 8))
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with a single dependency, which returns its one
# argument.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it.
arg = no_dependency_task.remote(size)
args = []
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
# Get values sequentially, in chunks.
num_chunks = 4 * num_nodes
chunk = num_objects // num_chunks
for i in range(num_chunks):
values = ray.get(args[i * chunk:(i + 1) * chunk])
del values
assert cluster.remaining_processes_alive()
@pytest.mark.skip(reason="This test often hangs or fails in CI.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_multiple_recursive(ray_start_reconstruction):
plasma_store_memory, _, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a root task with no dependencies, which returns a numpy array
# of the given size.
@ray.remote
def no_dependency_task(size):
array = np.zeros(size)
return array
# Define a task with multiple dependencies, which returns its first
# argument.
@ray.remote
def multiple_dependency(i, arg1, arg2, arg3):
arg1 = np.copy(arg1)
arg1[0] = i
return arg1
# Launch num_args instances of the root task. Then launch num_objects
# instances of the multi-dependency remote task, each dependent on the
# num_args tasks before it.
num_args = 3
args = []
for i in range(num_args):
arg = no_dependency_task.remote(size)
args.append(arg)
for i in range(num_objects):
args.append(multiple_dependency.remote(i, *args[i:i + num_args]))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
args = args[num_args:]
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get 10 values randomly.
random_indexes = sorted_random_indexes(num_objects, 10)
for i in random_indexes:
value = ray.get(args[i])
assert value[0] == i
assert cluster.remaining_processes_alive()
def wait_for_errors(error_check):
# Wait for errors from all the nondeterministic tasks.
errors = []
time_left = 100
while time_left > 0:
errors = flat_errors()
if error_check(errors):
break
time_left -= 1
time.sleep(1)
# Make sure that enough errors came through.
assert error_check(errors)
return errors
@pytest.mark.skip("This test does not work yet.")
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
def test_nondeterministic_task(ray_start_reconstruction):
plasma_store_memory, num_nodes, cluster = ray_start_reconstruction
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 1000
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a nondeterministic remote task with no dependencies, which
# returns a random numpy array of the given size. This task should
# produce an error on the driver if it is ever reexecuted.
@ray.remote
def foo(i, size):
array = np.random.rand(size)
array[0] = i
return array
# Define a deterministic remote task with no dependencies, which
# returns a numpy array of zeros of the given size.
@ray.remote
def bar(i, size):
array = np.zeros(size)
array[0] = i
return array
# Launch num_objects instances, half deterministic and half
# nondeterministic.
args = []
for i in range(num_objects):
if i % 2 == 0:
args.append(foo.remote(i, size))
else:
args.append(bar.remote(i, size))
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value again to force reconstruction.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
def error_check(errors):
if num_nodes == 1:
# In a single-node setting, each object is evicted and
# reconstructed exactly once, so exactly half the objects will
# produce an error during reconstruction.
min_errors = num_objects // 2
else:
# In a multinode setting, each object is evicted zero or one
# times, so some of the nondeterministic tasks may not be
# reexecuted.
min_errors = 1
return len(errors) >= min_errors
errors = wait_for_errors(error_check)
# Make sure all the errors have the correct type.
assert all(error["type"] == ray_constants.HASH_MISMATCH_PUSH_ERROR
for error in errors)
assert cluster.remaining_processes_alive()
@pytest.mark.skipif(
os.environ.get("RAY_USE_NEW_GCS") == "on",
reason="Failing with new GCS API on Linux.")
@pytest.mark.parametrize(
"ray_start_object_store_memory", [10**9], indirect=True)
def test_driver_put_errors(ray_start_object_store_memory):
plasma_store_memory = ray_start_object_store_memory
# Define the size of one task's return argument so that the combined
# sum of all objects' sizes is at least twice the plasma stores'
# combined allotted memory.
num_objects = 100
size = plasma_store_memory * 2 // (num_objects * 8)
# Define a task with a single dependency, a numpy array, that returns
# another array.
@ray.remote
def single_dependency(i, arg):
arg = np.copy(arg)
arg[0] = i
return arg
# Launch num_objects instances of the remote task, each dependent on
# the one before it. The first instance of the task takes a numpy array
# as an argument, which is put into the object store.
args = []
arg = single_dependency.remote(0, np.zeros(size))
for i in range(num_objects):
arg = single_dependency.remote(i, arg)
args.append(arg)
# Get each value to force each task to finish. After some number of
# gets, old values should be evicted.
for i in range(num_objects):
value = ray.get(args[i])
assert value[0] == i
# Get each value starting from the beginning to force reconstruction.
# Currently, since we're not able to reconstruct `ray.put` objects that
# were evicted and whose originating tasks are still running, this
# for-loop should hang on its first iteration and push an error to the
# driver.
ray.worker.global_worker.raylet_client.fetch_or_reconstruct([args[0]],
False)
def error_check(errors):
return len(errors) > 1
errors = wait_for_errors(error_check)
assert all(error["type"] == ray_constants.PUT_RECONSTRUCTION_PUSH_ERROR
or "ray.exceptions.UnreconstructableError" in error["message"]
for error in errors)
# NOTE(swang): This test tries to launch 1000 workers and breaks.
# TODO(rkn): This test needs to be updated to use pytest.
# class WorkerPoolTests(unittest.TestCase):
#
# def tearDown(self):
# ray.shutdown()
#
# def testBlockingTasks(self):
# @ray.remote
# def f(i, j):
# return (i, j)
#
# @ray.remote
# def g(i):
# # Each instance of g submits and blocks on the result of another remote
# # task.
# object_ids = [f.remote(i, j) for j in range(10)]
# return ray.get(object_ids)
#
# ray.init(num_workers=1)
# ray.get([g.remote(i) for i in range(1000)])
# ray.shutdown()
if __name__ == "__main__":
import pytest
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_stress_sharded.py
|
Python
|
import numpy as np
import os
import pytest
import ray
@pytest.fixture(params=[1, 4])
def ray_start_sharded(request):
num_redis_shards = request.param
if os.environ.get("RAY_USE_NEW_GCS") == "on":
num_redis_shards = 1
# For now, RAY_USE_NEW_GCS supports 1 shard, and credis supports
# 1-node chain for that shard only.
# Start the Ray processes.
ray.init(
object_store_memory=int(0.5 * 10**9),
num_cpus=10,
num_redis_shards=num_redis_shards,
redis_max_memory=10**7)
yield None
# The code after the yield will run as teardown code.
ray.shutdown()
def test_submitting_many_tasks(ray_start_sharded):
@ray.remote
def f(x):
return 1
def g(n):
x = 1
for i in range(n):
x = f.remote(x)
return x
ray.get([g(100) for _ in range(100)])
assert ray.services.remaining_processes_alive()
def test_submitting_many_actors_to_one(ray_start_sharded):
@ray.remote
class Actor:
def __init__(self):
pass
def ping(self):
return
@ray.remote
class Worker:
def __init__(self, actor):
self.actor = actor
def ping(self):
return ray.get(self.actor.ping.remote())
a = Actor.remote()
workers = [Worker.remote(a) for _ in range(10)]
for _ in range(10):
out = ray.get([w.ping.remote() for w in workers])
assert out == [None for _ in workers]
def test_getting_and_putting(ray_start_sharded):
for n in range(8):
x = np.zeros(10**n)
for _ in range(100):
ray.put(x)
x_id = ray.put(x)
for _ in range(1000):
ray.get(x_id)
assert ray.services.remaining_processes_alive()
def test_getting_many_objects(ray_start_sharded):
@ray.remote
def f():
return 1
n = 10**4 # TODO(pcm): replace by 10 ** 5 once this is faster.
lst = ray.get([f.remote() for _ in range(n)])
assert lst == n * [1]
assert ray.services.remaining_processes_alive()
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_tempfile.py
|
Python
|
import os
import shutil
import time
import pytest
import ray
from ray.cluster_utils import Cluster
def test_conn_cluster():
# plasma_store_socket_name
with pytest.raises(Exception) as exc_info:
ray.init(
address="127.0.0.1:6379",
plasma_store_socket_name="/tmp/this_should_fail")
assert exc_info.value.args[0] == (
"When connecting to an existing cluster, "
"plasma_store_socket_name must not be provided.")
# raylet_socket_name
with pytest.raises(Exception) as exc_info:
ray.init(
address="127.0.0.1:6379",
raylet_socket_name="/tmp/this_should_fail")
assert exc_info.value.args[0] == (
"When connecting to an existing cluster, "
"raylet_socket_name must not be provided.")
# temp_dir
with pytest.raises(Exception) as exc_info:
ray.init(address="127.0.0.1:6379", temp_dir="/tmp/this_should_fail")
assert exc_info.value.args[0] == (
"When connecting to an existing cluster, "
"temp_dir must not be provided.")
def test_tempdir(shutdown_only):
shutil.rmtree("/tmp/ray", ignore_errors=True)
ray.init(temp_dir="/tmp/i_am_a_temp_dir")
assert os.path.exists(
"/tmp/i_am_a_temp_dir"), "Specified temp dir not found."
assert not os.path.exists("/tmp/ray"), "Default temp dir should not exist."
shutil.rmtree("/tmp/i_am_a_temp_dir", ignore_errors=True)
def test_tempdir_commandline():
shutil.rmtree("/tmp/ray", ignore_errors=True)
os.system("ray start --head --temp-dir=/tmp/i_am_a_temp_dir2")
assert os.path.exists(
"/tmp/i_am_a_temp_dir2"), "Specified temp dir not found."
assert not os.path.exists("/tmp/ray"), "Default temp dir should not exist."
os.system("ray stop")
shutil.rmtree("/tmp/i_am_a_temp_dir2", ignore_errors=True)
def test_raylet_socket_name(shutdown_only):
ray.init(raylet_socket_name="/tmp/i_am_a_temp_socket")
assert os.path.exists(
"/tmp/i_am_a_temp_socket"), "Specified socket path not found."
ray.shutdown()
try:
os.remove("/tmp/i_am_a_temp_socket")
except OSError:
pass # It could have been removed by Ray.
cluster = Cluster(True)
cluster.add_node(raylet_socket_name="/tmp/i_am_a_temp_socket_2")
assert os.path.exists(
"/tmp/i_am_a_temp_socket_2"), "Specified socket path not found."
cluster.shutdown()
try:
os.remove("/tmp/i_am_a_temp_socket_2")
except OSError:
pass # It could have been removed by Ray.
def test_temp_plasma_store_socket(shutdown_only):
ray.init(plasma_store_socket_name="/tmp/i_am_a_temp_socket")
assert os.path.exists(
"/tmp/i_am_a_temp_socket"), "Specified socket path not found."
ray.shutdown()
try:
os.remove("/tmp/i_am_a_temp_socket")
except OSError:
pass # It could have been removed by Ray.
cluster = Cluster(True)
cluster.add_node(plasma_store_socket_name="/tmp/i_am_a_temp_socket_2")
assert os.path.exists(
"/tmp/i_am_a_temp_socket_2"), "Specified socket path not found."
cluster.shutdown()
try:
os.remove("/tmp/i_am_a_temp_socket_2")
except OSError:
pass # It could have been removed by Ray.
def test_raylet_tempfiles(shutdown_only):
ray.init(num_cpus=0)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
log_files = set(os.listdir(node.get_logs_dir_path()))
assert log_files.issuperset({
"log_monitor.out", "log_monitor.err", "plasma_store.out",
"plasma_store.err", "monitor.out", "monitor.err", "raylet_monitor.out",
"raylet_monitor.err", "redis-shard_0.out", "redis-shard_0.err",
"redis.out", "redis.err", "raylet.out", "raylet.err"
}) # with raylet logs
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == {"plasma_store", "raylet"}
ray.shutdown()
ray.init(num_cpus=2)
node = ray.worker._global_node
top_levels = set(os.listdir(node.get_session_dir_path()))
assert top_levels.issuperset({"sockets", "logs"})
time.sleep(3) # wait workers to start
log_files = set(os.listdir(node.get_logs_dir_path()))
assert log_files.issuperset({
"log_monitor.out", "log_monitor.err", "plasma_store.out",
"plasma_store.err", "monitor.out", "monitor.err", "raylet_monitor.out",
"raylet_monitor.err", "redis-shard_0.out", "redis-shard_0.err",
"redis.out", "redis.err", "raylet.out", "raylet.err"
}) # with raylet logs
# Check numbers of worker log file.
assert sum(
1 for filename in log_files if filename.startswith("worker")) == 4
socket_files = set(os.listdir(node.get_sockets_dir_path()))
assert socket_files == {"plasma_store", "raylet"}
def test_tempdir_privilege(shutdown_only):
os.chmod("/tmp/ray", 0o000)
ray.init(num_cpus=1)
session_dir = ray.worker._global_node.get_session_dir_path()
assert os.path.exists(session_dir), "Specified socket path not found."
def test_session_dir_uniqueness():
session_dirs = set()
for _ in range(3):
ray.init(num_cpus=1)
session_dirs.add(ray.worker._global_node.get_session_dir_path)
ray.shutdown()
assert len(session_dirs) == 3
if __name__ == "__main__":
import sys
# Make subprocess happy in bazel.
os.environ["LC_ALL"] = "en_US.UTF-8"
os.environ["LANG"] = "en_US.UTF-8"
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_tensorflow.py
|
Python
|
from numpy.testing import assert_almost_equal
import tensorflow.compat.v1 as tf
import ray
import ray.experimental.tf_utils
def make_linear_network(w_name=None, b_name=None):
# Define the inputs.
x_data = tf.placeholder(tf.float32, shape=[100])
y_data = tf.placeholder(tf.float32, shape=[100])
# Define the weights and computation.
w = tf.Variable(tf.random_uniform([1], -1.0, 1.0), name=w_name)
b = tf.Variable(tf.zeros([1]), name=b_name)
y = w * x_data + b
# Return the loss and weight initializer.
return (tf.reduce_mean(tf.square(y - y_data)),
tf.global_variables_initializer(), x_data, y_data)
class LossActor:
def __init__(self, use_loss=True):
# Uses a separate graph for each network.
with tf.Graph().as_default():
# Create the network.
var = [tf.Variable(1)]
loss, init, _, _ = make_linear_network()
sess = tf.Session()
# Additional code for setting and getting the weights.
weights = ray.experimental.tf_utils.TensorFlowVariables(
loss if use_loss else None, sess, input_variables=var)
# Return all of the data needed to use the network.
self.values = [weights, init, sess]
sess.run(init)
def set_and_get_weights(self, weights):
self.values[0].set_weights(weights)
return self.values[0].get_weights()
def get_weights(self):
return self.values[0].get_weights()
class NetActor:
def __init__(self):
# Uses a separate graph for each network.
with tf.Graph().as_default():
# Create the network.
loss, init, _, _ = make_linear_network()
sess = tf.Session()
# Additional code for setting and getting the weights.
variables = ray.experimental.tf_utils.TensorFlowVariables(
loss, sess)
# Return all of the data needed to use the network.
self.values = [variables, init, sess]
sess.run(init)
def set_and_get_weights(self, weights):
self.values[0].set_weights(weights)
return self.values[0].get_weights()
def get_weights(self):
return self.values[0].get_weights()
class TrainActor:
def __init__(self):
# Almost the same as above, but now returns the placeholders and
# gradient.
with tf.Graph().as_default():
loss, init, x_data, y_data = make_linear_network()
sess = tf.Session()
variables = ray.experimental.tf_utils.TensorFlowVariables(
loss, sess)
optimizer = tf.train.GradientDescentOptimizer(0.9)
grads = optimizer.compute_gradients(loss)
train = optimizer.apply_gradients(grads)
self.values = [
loss, variables, init, sess, grads, train, [x_data, y_data]
]
sess.run(init)
def training_step(self, weights):
_, variables, _, sess, grads, _, placeholders = self.values
variables.set_weights(weights)
return sess.run(
[grad[0] for grad in grads],
feed_dict=dict(zip(placeholders, [[1] * 100, [2] * 100])))
def get_weights(self):
return self.values[1].get_weights()
def test_tensorflow_variables(ray_start_2_cpus):
sess = tf.Session()
loss, init, _, _ = make_linear_network()
sess.run(init)
variables = ray.experimental.tf_utils.TensorFlowVariables(loss, sess)
weights = variables.get_weights()
for (name, val) in weights.items():
weights[name] += 1.0
variables.set_weights(weights)
assert weights == variables.get_weights()
loss2, init2, _, _ = make_linear_network("w", "b")
sess.run(init2)
variables2 = ray.experimental.tf_utils.TensorFlowVariables(loss2, sess)
weights2 = variables2.get_weights()
for (name, val) in weights2.items():
weights2[name] += 2.0
variables2.set_weights(weights2)
assert weights2 == variables2.get_weights()
flat_weights = variables2.get_flat() + 2.0
variables2.set_flat(flat_weights)
assert_almost_equal(flat_weights, variables2.get_flat())
variables3 = ray.experimental.tf_utils.TensorFlowVariables([loss2])
assert variables3.sess is None
sess = tf.Session()
variables3.set_session(sess)
assert variables3.sess == sess
# Test that the variable names for the two different nets are not
# modified by TensorFlow to be unique (i.e., they should already
# be unique because of the variable prefix).
def test_variable_name_collision(ray_start_2_cpus):
net1 = NetActor()
net2 = NetActor()
# This is checking that the variable names of the two nets are the
# same, i.e., that the names in the weight dictionaries are the same.
net1.values[0].set_weights(net2.values[0].get_weights())
# Test that TensorFlowVariables can take in addition variables through
# input_variables arg and with no loss.
def test_additional_variables_no_loss(ray_start_2_cpus):
net = LossActor(use_loss=False)
assert len(net.values[0].variables.items()) == 1
assert len(net.values[0].placeholders.items()) == 1
net.values[0].set_weights(net.values[0].get_weights())
# Test that TensorFlowVariables can take in addition variables through
# input_variables arg and with a loss.
def test_additional_variables_with_loss(ray_start_2_cpus):
net = LossActor()
assert len(net.values[0].variables.items()) == 3
assert len(net.values[0].placeholders.items()) == 3
net.values[0].set_weights(net.values[0].get_weights())
# Test that different networks on the same worker are independent and
# we can get/set their weights without any interaction.
def test_networks_independent(ray_start_2_cpus):
# Note we use only one worker to ensure that all of the remote
# functions run on the same worker.
net1 = NetActor()
net2 = NetActor()
# Make sure the two networks have different weights. TODO(rkn): Note
# that equality comparisons of numpy arrays normally does not work.
# This only works because at the moment they have size 1.
weights1 = net1.get_weights()
weights2 = net2.get_weights()
assert weights1 != weights2
# Set the weights and get the weights, and make sure they are
# unchanged.
new_weights1 = net1.set_and_get_weights(weights1)
new_weights2 = net2.set_and_get_weights(weights2)
assert weights1 == new_weights1
assert weights2 == new_weights2
# Swap the weights.
new_weights1 = net2.set_and_get_weights(weights1)
new_weights2 = net1.set_and_get_weights(weights2)
assert weights1 == new_weights1
assert weights2 == new_weights2
# This test creates an additional network on the driver so that the
# tensorflow variables on the driver and the worker differ.
def test_network_driver_worker_independent(ray_start_2_cpus):
# Create a network on the driver locally.
sess1 = tf.Session()
loss1, init1, _, _ = make_linear_network()
ray.experimental.tf_utils.TensorFlowVariables(loss1, sess1)
sess1.run(init1)
net2 = ray.remote(NetActor).remote()
weights2 = ray.get(net2.get_weights.remote())
new_weights2 = ray.get(
net2.set_and_get_weights.remote(net2.get_weights.remote()))
assert weights2 == new_weights2
def test_variables_control_dependencies(ray_start_2_cpus):
# Creates a network and appends a momentum optimizer.
sess = tf.Session()
loss, init, _, _ = make_linear_network()
minimizer = tf.train.MomentumOptimizer(0.9, 0.9).minimize(loss)
net_vars = ray.experimental.tf_utils.TensorFlowVariables(minimizer, sess)
sess.run(init)
# Tests if all variables are properly retrieved, 2 variables and 2
# momentum variables.
assert len(net_vars.variables.items()) == 4
def test_remote_training_step(ray_start_2_cpus):
net = ray.remote(TrainActor).remote()
ray.get(net.training_step.remote(net.get_weights.remote()))
def test_remote_training_loss(ray_start_2_cpus):
net = ray.remote(TrainActor).remote()
net_values = TrainActor().values
loss, variables, _, sess, grads, train, placeholders = net_values
before_acc = sess.run(
loss, feed_dict=dict(zip(placeholders, [[2] * 100, [4] * 100])))
for _ in range(3):
gradients_list = ray.get([
net.training_step.remote(variables.get_weights()) for _ in range(2)
])
mean_grads = [
sum(gradients[i]
for gradients in gradients_list) / len(gradients_list)
for i in range(len(gradients_list[0]))
]
feed_dict = {
grad[0]: mean_grad
for (grad, mean_grad) in zip(grads, mean_grads)
}
sess.run(train, feed_dict=feed_dict)
after_acc = sess.run(
loss, feed_dict=dict(zip(placeholders, [[2] * 100, [4] * 100])))
assert before_acc < after_acc
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_unreconstructable_errors.py
|
Python
|
import numpy as np
import unittest
import ray
from ray import ray_constants
class TestUnreconstructableErrors(unittest.TestCase):
def setUp(self):
ray.init(
num_cpus=1,
object_store_memory=150 * 1024 * 1024,
redis_max_memory=10000000)
def tearDown(self):
ray.shutdown()
def testDriverPutEvictedCannotReconstruct(self):
x_id = ray.put(np.zeros(1 * 1024 * 1024), weakref=True)
ray.get(x_id)
for _ in range(20):
ray.put(np.zeros(10 * 1024 * 1024))
self.assertRaises(ray.exceptions.UnreconstructableError,
lambda: ray.get(x_id))
def testLineageEvictedReconstructionFails(self):
if ray_constants.direct_call_enabled():
return # not relevant
@ray.remote
def f(data):
return 0
x_id = f.remote(None)
ray.get(x_id)
# Hold references to the ray.put objects so they aren't LRU'd.
oids = []
for _ in range(400):
new_oids = [f.remote(np.zeros(10000)) for _ in range(50)]
oids.extend(new_oids)
ray.get(new_oids)
self.assertRaises(ray.exceptions.UnreconstructableError,
lambda: ray.get(x_id))
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tests/test_webui.py
|
Python
|
import re
import sys
import time
import pytest
import requests
import ray
@pytest.mark.skipif(
sys.version_info < (3, 5, 3), reason="requires python3.5.3 or higher")
def test_get_webui(shutdown_only):
addresses = ray.init(include_webui=True, num_cpus=1)
webui_url = addresses["webui_url"]
assert ray.get_webui_url() == webui_url
assert re.match(r"^(localhost|\d+\.\d+\.\d+\.\d+):\d+$", webui_url)
start_time = time.time()
while True:
try:
node_info = requests.get("http://" + webui_url +
"/api/node_info").json()
break
except requests.exceptions.ConnectionError:
if time.time() > start_time + 30:
raise Exception(
"Timed out while waiting for dashboard to start.")
assert node_info["error"] is None
assert node_info["result"] is not None
assert isinstance(node_info["timestamp"], float)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/__init__.py
|
Python
|
from ray.tune.error import TuneError
from ray.tune.tune import run_experiments, run
from ray.tune.experiment import Experiment
from ray.tune.analysis import ExperimentAnalysis, Analysis
from ray.tune.registry import register_env, register_trainable
from ray.tune.trainable import Trainable
from ray.tune.durable_trainable import DurableTrainable
from ray.tune.suggest import grid_search
from ray.tune.sample import (function, sample_from, uniform, choice, randint,
randn, loguniform)
__all__ = [
"Trainable",
"DurableTrainable",
"TuneError",
"grid_search",
"register_env",
"register_trainable",
"run",
"run_experiments",
"Experiment",
"function",
"sample_from",
"track",
"uniform",
"choice",
"randint",
"randn",
"loguniform",
"ExperimentAnalysis",
"Analysis",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/analysis/__init__.py
|
Python
|
from ray.tune.analysis.experiment_analysis import ExperimentAnalysis, Analysis
__all__ = ["ExperimentAnalysis", "Analysis"]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/analysis/experiment_analysis.py
|
Python
|
import json
import logging
import os
try:
import pandas as pd
except ImportError:
pd = None
from ray.tune.checkpoint_manager import Checkpoint
from ray.tune.error import TuneError
from ray.tune.result import EXPR_PROGRESS_FILE, EXPR_PARAM_FILE,\
CONFIG_PREFIX, TRAINING_ITERATION
from ray.tune.trial import Trial
from ray.tune.trainable import TrainableUtil
logger = logging.getLogger(__name__)
class Analysis:
"""Analyze all results from a directory of experiments."""
def __init__(self, experiment_dir):
experiment_dir = os.path.expanduser(experiment_dir)
if not os.path.isdir(experiment_dir):
raise ValueError(
"{} is not a valid directory.".format(experiment_dir))
self._experiment_dir = experiment_dir
self._configs = {}
self._trial_dataframes = {}
if not pd:
logger.warning(
"pandas not installed. Run `pip install pandas` for "
"Analysis utilities.")
else:
self.fetch_trial_dataframes()
def dataframe(self, metric=None, mode=None):
"""Returns a pandas.DataFrame object constructed from the trials.
Args:
metric (str): Key for trial info to order on.
If None, uses last result.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs(prefix=True)
for path, config in all_configs.items():
if path in rows:
rows[path].update(config)
rows[path].update(logdir=path)
return pd.DataFrame(list(rows.values()))
def get_best_config(self, metric, mode="max"):
"""Retrieve the best config corresponding to the trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
rows = self._retrieve_rows(metric=metric, mode=mode)
all_configs = self.get_all_configs()
compare_op = max if mode == "max" else min
best_path = compare_op(rows, key=lambda k: rows[k][metric])
return all_configs[best_path]
def get_best_logdir(self, metric, mode="max"):
"""Retrieve the logdir corresponding to the best trial.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
"""
df = self.dataframe(metric=metric, mode=mode)
if mode == "max":
return df.iloc[df[metric].idxmax()].logdir
elif mode == "min":
return df.iloc[df[metric].idxmin()].logdir
def fetch_trial_dataframes(self):
fail_count = 0
for path in self._get_trial_paths():
try:
self.trial_dataframes[path] = pd.read_csv(
os.path.join(path, EXPR_PROGRESS_FILE))
except Exception:
fail_count += 1
if fail_count:
logger.debug(
"Couldn't read results from {} paths".format(fail_count))
return self.trial_dataframes
def get_all_configs(self, prefix=False):
"""Returns a list of all configurations.
Parameters:
prefix (bool): If True, flattens the config dict
and prepends `config/`.
"""
fail_count = 0
for path in self._get_trial_paths():
try:
with open(os.path.join(path, EXPR_PARAM_FILE)) as f:
config = json.load(f)
if prefix:
for k in list(config):
config[CONFIG_PREFIX + k] = config.pop(k)
self._configs[path] = config
except Exception:
fail_count += 1
if fail_count:
logger.warning(
"Couldn't read config from {} paths".format(fail_count))
return self._configs
def get_trial_checkpoints_paths(self, trial, metric=TRAINING_ITERATION):
"""Returns a list of [path, metric] lists for all disk checkpoints of
a trial.
Arguments:
trial(Trial): The log directory of a trial, or a trial instance.
metric (str): key for trial info to return, e.g. "mean_accuracy".
"training_iteration" is used by default.
"""
if isinstance(trial, str):
trial_dir = os.path.expanduser(trial)
# get checkpoints from logdir
chkpt_df = TrainableUtil.get_checkpoints_paths(trial_dir)
# join with trial dataframe to get metrics
trial_df = self.trial_dataframes[trial_dir]
path_metric_df = chkpt_df.merge(
trial_df, on="training_iteration", how="inner")
return path_metric_df[["chkpt_path", metric]].values.tolist()
elif isinstance(trial, Trial):
checkpoints = trial.checkpoint_manager.best_checkpoints()
# TODO(ujvl): Remove condition once the checkpoint manager is
# modified to only track PERSISTENT checkpoints.
return [[c.value, c.result[metric]] for c in checkpoints
if c.storage == Checkpoint.PERSISTENT]
else:
raise ValueError("trial should be a string or a Trial instance.")
def _retrieve_rows(self, metric=None, mode=None):
assert mode is None or mode in ["max", "min"]
rows = {}
for path, df in self.trial_dataframes.items():
if mode == "max":
idx = df[metric].idxmax()
elif mode == "min":
idx = df[metric].idxmin()
else:
idx = -1
rows[path] = df.iloc[idx].to_dict()
return rows
def _get_trial_paths(self):
_trial_paths = []
for trial_path, _, files in os.walk(self._experiment_dir):
if EXPR_PROGRESS_FILE in files:
_trial_paths += [trial_path]
if not _trial_paths:
raise TuneError("No trials found in {}.".format(
self._experiment_dir))
return _trial_paths
@property
def trial_dataframes(self):
"""List of all dataframes of the trials."""
return self._trial_dataframes
class ExperimentAnalysis(Analysis):
"""Analyze results from a Tune experiment.
Parameters:
experiment_checkpoint_path (str): Path to a json file
representing an experiment state. Corresponds to
Experiment.local_dir/Experiment.name/experiment_state.json
Example:
>>> tune.run(my_trainable, name="my_exp", local_dir="~/tune_results")
>>> analysis = ExperimentAnalysis(
>>> experiment_checkpoint_path="~/tune_results/my_exp/state.json")
"""
def __init__(self, experiment_checkpoint_path, trials=None):
"""Initializer.
Args:
experiment_path (str): Path to where experiment is located.
trials (list|None): List of trials that can be accessed via
`analysis.trials`.
"""
with open(experiment_checkpoint_path) as f:
_experiment_state = json.load(f)
self._experiment_state = _experiment_state
if "checkpoints" not in _experiment_state:
raise TuneError("Experiment state invalid; no checkpoints found.")
self._checkpoints = _experiment_state["checkpoints"]
self.trials = trials
super(ExperimentAnalysis, self).__init__(
os.path.dirname(experiment_checkpoint_path))
def get_best_trial(self, metric, mode="max", scope="all"):
"""Retrieve the best trial object.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
if mode not in ["max", "min"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for mode {} not in [\"max\", \"min\"]".format(
metric, mode))
if scope not in ["all", "last"]:
raise ValueError(
"ExperimentAnalysis: attempting to get best trial for "
"metric {} for scope {} not in [\"all\", \"last\"]".format(
metric, scope))
best_trial = None
best_metric_score = None
for trial in self.trials:
if metric not in trial.metric_analysis:
continue
if scope == "last":
metric_score = trial.metric_analysis[metric]["last"]
else:
metric_score = trial.metric_analysis[metric][mode]
if best_metric_score is None:
best_metric_score = metric_score
best_trial = trial
continue
if (mode == "max") and (best_metric_score < metric_score):
best_metric_score = metric_score
best_trial = trial
elif (mode == "min") and (best_metric_score > metric_score):
best_metric_score = metric_score
best_trial = trial
return best_trial
def get_best_config(self, metric, mode="max", scope="all"):
"""Retrieve the best config corresponding to the trial.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.config if best_trial else None
def get_best_logdir(self, metric, mode="max", scope="all"):
"""Retrieve the logdir corresponding to the best trial.
Compares all trials' scores on `metric`.
Args:
metric (str): Key for trial info to order on.
mode (str): One of [min, max].
scope (str): One of [all, last]. If `scope=last`, only look at
each trial's final step for `metric`, and compare across
trials based on `mode=[min,max]`. If `scope=all`, find each
trial's min/max score for `metric` based on `mode`, and
compare trials based on `mode=[min,max]`.
"""
best_trial = self.get_best_trial(metric, mode, scope)
return best_trial.logdir if best_trial else None
def stats(self):
"""Returns a dictionary of the statistics of the experiment."""
return self._experiment_state.get("stats")
def runner_data(self):
"""Returns a dictionary of the TrialRunner data."""
return self._experiment_state.get("runner_data")
def _get_trial_paths(self):
"""Overwrites Analysis to only have trials of one experiment."""
if self.trials:
_trial_paths = [t.logdir for t in self.trials]
else:
logger.warning("No `self.trials`. Drawing logdirs from checkpoint "
"file. This may result in some information that is "
"out of sync, as checkpointing is periodic.")
_trial_paths = [
checkpoint["logdir"] for checkpoint in self._checkpoints
]
if not _trial_paths:
raise TuneError("No trials found.")
return _trial_paths
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automl/__init__.py
|
Python
|
from ray.tune.automl.genetic_searcher import GeneticSearch
from ray.tune.automl.search_policy import GridSearch, RandomSearch
from ray.tune.automl.search_space import SearchSpace, \
ContinuousSpace, DiscreteSpace
__all__ = [
"ContinuousSpace",
"DiscreteSpace",
"SearchSpace",
"GridSearch",
"RandomSearch",
"GeneticSearch",
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automl/genetic_searcher.py
|
Python
|
import logging
import numpy as np
from ray.tune.automl.search_policy import AutoMLSearcher
logger = logging.getLogger(__name__)
LOGGING_PREFIX = "[GENETIC SEARCH] "
class GeneticSearch(AutoMLSearcher):
"""Implement the genetic search.
Keep a collection of top-K parameter permutations as base genes,
then apply selection, crossover, and mutation to them to generate
new genes (a.k.a new generation). Hopefully, the performance of
the top population would increase generation by generation.
"""
def __init__(self,
search_space,
reward_attr,
max_generation=2,
population_size=10,
population_decay=0.95,
keep_top_ratio=0.2,
selection_bound=0.4,
crossover_bound=0.4):
"""
Initialize GeneticSearcher.
Args:
search_space (SearchSpace): The space to search.
reward_attr: The attribute name of the reward in the result.
max_generation: Max iteration number of genetic search.
population_size: Number of trials of the initial generation.
population_decay: Decay ratio of population size for the
next generation.
keep_top_ratio: Ratio of the top performance population.
selection_bound: Threshold for performing selection.
crossover_bound: Threshold for performing crossover.
"""
super(GeneticSearch, self).__init__(search_space, reward_attr)
self._cur_generation = 1
self._max_generation = max_generation
self._population_size = population_size
self._population_decay = population_decay
self._keep_top_ratio = keep_top_ratio
self._selection_bound = selection_bound
self._crossover_bound = crossover_bound
self._cur_config_list = []
self._cur_encoding_list = []
for _ in range(population_size):
one_hot = self.search_space.generate_random_one_hot_encoding()
self._cur_encoding_list.append(one_hot)
self._cur_config_list.append(
self.search_space.apply_one_hot_encoding(one_hot))
def _select(self):
population_size = len(self._cur_config_list)
logger.info(
LOGGING_PREFIX + "Generate the %sth generation, population=%s",
self._cur_generation, population_size)
return self._cur_config_list, self._cur_encoding_list
def _feedback(self, trials):
self._cur_generation += 1
if self._cur_generation > self._max_generation:
return AutoMLSearcher.TERMINATE
sorted_trials = sorted(
trials,
key=lambda t: t.best_result[self.reward_attr],
reverse=True)
self._cur_encoding_list = self._next_generation(sorted_trials)
self._cur_config_list = []
for one_hot in self._cur_encoding_list:
self._cur_config_list.append(
self.search_space.apply_one_hot_encoding(one_hot))
return AutoMLSearcher.CONTINUE
def _next_generation(self, sorted_trials):
"""Generate genes (encodings) for the next generation.
Use the top K (_keep_top_ratio) trials of the last generation
as candidates to generate the next generation. The action could
be selection, crossover and mutation according corresponding
ratio (_selection_bound, _crossover_bound).
Args:
sorted_trials: List of finished trials with top
performance ones first.
Returns:
A list of new genes (encodings)
"""
candidate = []
next_generation = []
num_population = self._next_population_size(len(sorted_trials))
top_num = int(max(num_population * self._keep_top_ratio, 2))
for i in range(top_num):
candidate.append(sorted_trials[i].extra_arg)
next_generation.append(sorted_trials[i].extra_arg)
for i in range(top_num, num_population):
flip_coin = np.random.uniform()
if flip_coin < self._selection_bound:
next_generation.append(GeneticSearch._selection(candidate))
else:
if flip_coin < self._selection_bound + self._crossover_bound:
next_generation.append(GeneticSearch._crossover(candidate))
else:
next_generation.append(GeneticSearch._mutation(candidate))
return next_generation
def _next_population_size(self, last_population_size):
"""Calculate the population size of the next generation.
Intuitively, the population should decay after each iteration since
it should converge. It can also decrease the total resource required.
Args:
last_population_size: The last population size.
Returns:
The new population size.
"""
# TODO: implement an generic resource allocate algorithm.
return int(max(last_population_size * self._population_decay, 3))
@staticmethod
def _selection(candidate):
"""Perform selection action to candidates.
For example, new gene = sample_1 + the 5th bit of sample2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _selection([gene1, gene2])
>>> # new_gene could be gene1 overwritten with the
>>> # 2nd parameter of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[0]
Returns:
New gene (encoding)
"""
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
select_index = np.random.choice(len(sample_1))
logger.info(
LOGGING_PREFIX + "Perform selection from %sth to %sth at index=%s",
sample_index2, sample_index1, select_index)
next_gen = []
for i in range(len(sample_1)):
if i is select_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen
@staticmethod
def _crossover(candidate):
"""Perform crossover action to candidates.
For example, new gene = 60% sample_1 + 40% sample_2.
Args:
candidate: List of candidate genes (encodings).
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> gene2 = np.array([[0, 1, 0], [1, 0], [0, 1]])
>>> new_gene = _crossover([gene1, gene2])
>>> # new_gene could be the first [n=1] parameters of
>>> # gene1 + the rest of gene2
>>> # in which case:
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene2[1]
>>> # new_gene[2] = gene1[1]
Returns:
New gene (encoding)
"""
sample_index1 = np.random.choice(len(candidate))
sample_index2 = np.random.choice(len(candidate))
sample_1 = candidate[sample_index1]
sample_2 = candidate[sample_index2]
cross_index = int(len(sample_1) * np.random.uniform(low=0.3, high=0.7))
logger.info(
LOGGING_PREFIX +
"Perform crossover between %sth and %sth at index=%s",
sample_index1, sample_index2, cross_index)
next_gen = []
for i in range(len(sample_1)):
if i > cross_index:
next_gen.append(sample_2[i])
else:
next_gen.append(sample_1[i])
return next_gen
@staticmethod
def _mutation(candidate, rate=0.1):
"""Perform mutation action to candidates.
For example, randomly change 10% of original sample
Args:
candidate: List of candidate genes (encodings).
rate: Percentage of mutation bits
Examples:
>>> # Genes that represent 3 parameters
>>> gene1 = np.array([[0, 0, 1], [0, 1], [1, 0]])
>>> new_gene = _mutation([gene1])
>>> # new_gene could be the gene1 with the 3rd parameter changed
>>> # new_gene[0] = gene1[0]
>>> # new_gene[1] = gene1[1]
>>> # new_gene[2] = [0, 1] != gene1[2]
Returns:
New gene (encoding)
"""
sample_index = np.random.choice(len(candidate))
sample = candidate[sample_index]
idx_list = []
for i in range(int(max(len(sample) * rate, 1))):
idx = np.random.choice(len(sample))
idx_list.append(idx)
field = sample[idx] # one-hot encoding
field[np.argmax(field)] = 0
bit = np.random.choice(field.shape[0])
field[bit] = 1
logger.info(LOGGING_PREFIX + "Perform mutation on %sth at index=%s",
sample_index, str(idx_list))
return sample
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automl/search_policy.py
|
Python
|
import time
import copy
import logging
from ray.tune.trial import Trial
from ray.tune.suggest import SearchAlgorithm
from ray.tune.experiment import convert_to_experiment_list
from ray.tune.suggest.variant_generator import generate_variants
from ray.tune.config_parser import make_parser, create_trial_from_spec
logger = logging.getLogger(__name__)
def deep_insert(path_list, value, config):
"""Inserts value into config by path, generating intermediate dictionaries.
Example:
>>> deep_insert(path.split("."), value, {})
"""
if len(path_list) > 1:
inside_config = config.setdefault(path_list[0], {})
deep_insert(path_list[1:], value, inside_config)
else:
config[path_list[0]] = value
class AutoMLSearcher(SearchAlgorithm):
"""Base class for AutoML search algorithm.
It works in a round-by-round way. For each experiment round,
it generates a bunch of parameter config permutations, submits
and keeps track of them. Once all of them finish, results will
be fed back to the algorithm as a whole.
"""
CONTINUE = "CONTINUE"
TERMINATE = "TERMINATE"
def __init__(self, search_space, reward_attr):
"""Initialize AutoMLSearcher.
Arguments:
search_space (SearchSpace): The space to search.
reward_attr: The attribute name of the reward in the result.
"""
# Pass experiment later to allow construction without this parameter
super(AutoMLSearcher, self).__init__()
self.search_space = search_space
self.reward_attr = reward_attr
self.experiment_list = []
self.best_trial = None
self._is_finished = False
self._parser = make_parser()
self._unfinished_count = 0
self._running_trials = {}
self._completed_trials = {}
self._iteration = 0
self._total_trial_num = 0
self._start_ts = 0
def add_configurations(self, experiments):
self.experiment_list = convert_to_experiment_list(experiments)
def get_best_trial(self):
"""Returns the Trial object with the best reward_attr"""
return self.best_trial
def next_trials(self):
if self._unfinished_count > 0:
# Last round not finished
return []
trials = []
raw_param_list, extra_arg_list = self._select()
if not extra_arg_list:
extra_arg_list = [None] * len(raw_param_list)
for exp in self.experiment_list:
for param_config, extra_arg in zip(raw_param_list, extra_arg_list):
tag = ""
new_spec = copy.deepcopy(exp.spec)
for path, value in param_config.items():
tag += "%s=%s-" % (path.split(".")[-1], value)
deep_insert(path.split("."), value, new_spec["config"])
trial = create_trial_from_spec(
new_spec, exp.name, self._parser, experiment_tag=tag)
# AutoML specific fields set in Trial
trial.results = []
trial.best_result = None
trial.param_config = param_config
trial.extra_arg = extra_arg
trials.append(trial)
self._running_trials[trial.trial_id] = trial
ntrial = len(trials)
self._iteration += 1
self._unfinished_count = ntrial
self._total_trial_num += ntrial
self._start_ts = time.time()
logger.info(
"=========== BEGIN Experiment-Round: %(round)s "
"[%(new)s NEW | %(total)s TOTAL] ===========", {
"round": self._iteration,
"new": ntrial,
"total": self._total_trial_num
})
return trials
def on_trial_result(self, trial_id, result):
if not result:
return
trial = self._running_trials[trial_id]
# Update trial's best result
trial.results.append(result)
if trial.best_result is None \
or result[self.reward_attr] \
> trial.best_result[self.reward_attr]:
trial.best_result = result
# Update job's best trial
if self.best_trial is None \
or (result[self.reward_attr]
> self.best_trial.best_result[self.reward_attr]):
self.best_trial = self._running_trials[trial_id]
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
self.on_trial_result(trial_id, result)
self._unfinished_count -= 1
if self._unfinished_count == 0:
total = len(self._running_trials)
succ = sum(t.status == Trial.TERMINATED
for t in self._running_trials.values())
# handle the last trial
this_trial = self._running_trials[trial_id]
if this_trial.status == Trial.RUNNING and not error:
succ += 1
elapsed = time.time() - self._start_ts
logger.info(
"=========== END Experiment-Round: %(round)s "
"[%(succ)s SUCC | %(fail)s FAIL] this round, "
"elapsed=%(elapsed).2f, "
"BEST %(reward_attr)s=%(reward)f ===========", {
"round": self._iteration,
"succ": succ,
"fail": total - succ,
"elapsed": elapsed,
"reward_attr": self.reward_attr,
"reward": self.best_trial.best_result[self.reward_attr]
if self.best_trial else None
})
action = self._feedback(self._running_trials.values())
if action == AutoMLSearcher.TERMINATE:
self._is_finished = True
self._completed_trials.update(self._running_trials)
self._running_trials = {}
def is_finished(self):
return self._is_finished
def _select(self):
"""Select a bunch of parameter permutations to run.
The permutations should be a list of dict, which contains the
<path, value> pair. The ``path`` could be a dot separated string,
which will be expanded to merge into the experiment's config by the
framework. For example:
pair : {"path.to.key": 1}
config in experiment : {"path": {"to": {"key": 1}, ...}, ...}
The framework generates 1 config for 1 Trial. User could also return
an extra list to add an additional argument to the trial
Returns:
A list of config + a list of extra argument (can be None)
"""
raise NotImplementedError
def _feedback(self, trials):
"""Feedback the completed trials corresponding to the last selected
parameter permutations
Arguments:
trials (list): A list of Trial object, where user can fetch the
result attribute, etc.
Returns:
Next action, i.e.: CONTINUE, TERMINATE
"""
raise NotImplementedError
class GridSearch(AutoMLSearcher):
"""Implement the grid search"""
def _select(self):
grid = self.search_space.to_grid_search()
configs = []
for _, config in generate_variants(grid):
configs.append(config)
return configs, None
def _feedback(self, trials):
return AutoMLSearcher.TERMINATE
class RandomSearch(AutoMLSearcher):
"""Implement the random search"""
def __init__(self, search_space, reward_attr, repeat):
super(RandomSearch, self).__init__(search_space, reward_attr)
self.repeat = repeat
def _select(self):
choices = self.search_space.to_random_choice()
configs = []
for _ in range(self.repeat):
for _, config in generate_variants(choices):
configs.append(config)
return configs, None
def _feedback(self, trials):
return AutoMLSearcher.TERMINATE
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automl/search_space.py
|
Python
|
import random
import logging
import numpy as np
from ray.tune import grid_search
logger = logging.getLogger(__name__)
class ParameterSpace:
"""Base class of a single parameter's search space.
"""
def __init__(self, name):
"""Initialize ParameterSpace.
Arguments:
name (str): Name of the parameter. Name can be dot separated,
which will be interpreted as path of a nested config
"""
self.name = name
class DiscreteSpace(ParameterSpace):
"""Search space with discrete choices.
"""
def __init__(self, name, choices):
"""Initialize DiscreteSpace.
Arguments:
name (str): Name of the parameter.
choices (list): List of all possible choices.
"""
super(DiscreteSpace, self).__init__(name)
self.choices = choices
def to_grid_search(self):
"""Returns a ray.tune.grid_search structure.
Contains all the choices inside and can be expanded by ray.tune.
"""
return grid_search(self.choices)
def to_random_choice(self):
"""Returns a lambda function that choose a value randomly.
Can be expanded by ray.tune.
"""
return lambda _: random.choice(self.choices)
def choices_count(self):
return len(self.choices)
def __str__(self):
return "DiscreteSpace %s: %s" % (self.name, str(self.choices))
class ContinuousSpace(ParameterSpace):
"""Search space of continuous type.
NOTE that it can be converted to ``DiscreteSpace`` by sampling under
certain distribution such as linear.
"""
LINEAR = "linear"
# TODO: logspace
def __init__(self, name, start, end, num, distribution=LINEAR):
"""Initialize ContinuousSpace.
Arguments:
name (str): Name of the parameter.
start: Start of the continuous space included.
end: End of the continuous space included.
num: Sampling count if possible.
distribution: Sampling distribution, should be in [LINEAR]
"""
super(ContinuousSpace, self).__init__(name)
self.start = float(start)
self.end = float(end)
self.num = num
if distribution == ContinuousSpace.LINEAR:
self.choices = np.linspace(start, end, num)
else:
raise NotImplementedError(
"Distribution %s not supported" % distribution)
self.distribution = distribution
def to_grid_search(self):
"""Returns a ray.tune.grid_search structure.
Apply sampling to get discrete choices.
"""
return grid_search(self.choices)
def to_random_choice(self):
"""Returns a lambda function that choose a value randomly.
Can be expanded by ray.tune.
"""
return lambda _: random.uniform(self.start, self.end)
def choices_count(self):
return len(self.choices)
def __str__(self):
return "ContinuousSpace %s: [%s, %s]" % (self.name, self.start,
self.end)
class SearchSpace:
"""Collection of ``ParameterSpace``, a.k.a <name, space> pair.
It's supposed to be used with a fixed experiment config, which
could be a very complicated (nested) dict. Each ``ParameterSpace``
points to a unique place in the experiment config using its name
as the path.
"""
def __init__(self, param_list):
"""Initialize SearchSpace.
Arguments:
param_list: List of ``ParameterSpace`` (or its subclass).
"""
self.param_list = param_list
for ps in param_list:
# ps MUST be ParameterSpace
logger.info("Add %s into SearchSpace" % ps)
def to_grid_search(self):
"""Returns a dict of {parameter name: grid_search}.
Apply ``to_grid_search`` to all ``ParameterSpace``.
"""
return {ps.name: ps.to_grid_search() for ps in self.param_list}
def to_random_choice(self):
"""Returns a dict of {parameter name: lambda function}.
Apply ``to_grid_search`` to all ``ParameterSpace``.
"""
return {ps.name: ps.to_random_choice() for ps in self.param_list}
def generate_random_one_hot_encoding(self):
"""Returns a list of one-hot encodings for all parameters.
1 one-hot np.array for 1 parameter,
and the 1's place is randomly chosen.
"""
encoding = []
for ps in self.param_list:
one_hot = np.zeros(ps.choices_count())
choice = random.randrange(ps.choices_count())
one_hot[choice] = 1
encoding.append(one_hot)
return encoding
def apply_one_hot_encoding(self, one_hot_encoding):
"""Apply one hot encoding to generate a specific config.
Arguments:
one_hot_encoding (list): A list of one hot encodings,
1 for each parameter. The shape of each encoding
should match that ``ParameterSpace``
Returns:
A dict config with specific <name, value> pair
"""
config = {}
for ps, one_hot in zip(self.param_list, one_hot_encoding):
index = np.argmax(one_hot)
config[ps.name] = ps.choices[index]
return config
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/backend/collector.py
|
Python
|
import logging
import os
import time
from threading import Thread
from ray.tune.automlboard.common.exception import CollectorError
from ray.tune.automlboard.common.utils import parse_json, \
parse_multiple_json, timestamp2date
from ray.tune.automlboard.models.models import JobRecord, \
TrialRecord, ResultRecord
from ray.tune.result import DEFAULT_RESULTS_DIR, JOB_META_FILE, \
EXPR_PARAM_FILE, EXPR_RESULT_FILE, EXPR_META_FILE
class CollectorService:
"""Server implementation to monitor the log directory.
The service will save the information of job and
trials information in db.
"""
def __init__(self,
log_dir=DEFAULT_RESULTS_DIR,
reload_interval=30,
standalone=True,
log_level="INFO"):
"""Initialize the collector service.
Args:
log_dir (str): Directory of the logs about trials' information.
reload_interval (int): Sleep time period after each polling round.
standalone (boolean): The service will not stop and if True.
log_level (str): Level of logging.
"""
self.logger = self.init_logger(log_level)
self.standalone = standalone
self.collector = Collector(
reload_interval=reload_interval,
logdir=log_dir,
logger=self.logger)
def run(self):
"""Start the collector worker thread.
If running in standalone mode, the current thread will wait
until the collector thread ends.
"""
self.collector.start()
if self.standalone:
self.collector.join()
def stop(self):
"""Stop the collector worker thread."""
self.collector.stop()
@classmethod
def init_logger(cls, log_level):
"""Initialize logger settings."""
logger = logging.getLogger("AutoMLBoard")
handler = logging.StreamHandler()
formatter = logging.Formatter("[%(levelname)s %(asctime)s] "
"%(filename)s: %(lineno)d "
"%(message)s")
handler.setFormatter(formatter)
logger.setLevel(log_level)
logger.addHandler(handler)
return logger
class Collector(Thread):
"""Worker thread for collector service."""
def __init__(self, reload_interval, logdir, logger):
"""Initialize collector worker thread.
Args
reload_interval (int): Time period to sleep after each round
of polling.
logdir (str): Directory path to save the status information of
jobs and trials.
logger (Logger): Logger for collector thread.
"""
super(Collector, self).__init__()
self._is_finished = False
self._reload_interval = reload_interval
self._logdir = logdir
self._monitored_jobs = set()
self._monitored_trials = set()
self._result_offsets = {}
self.logger = logger
self.daemon = True
def run(self):
"""Run the main event loop for collector thread.
In each round the collector traverse the results log directory
and reload trial information from the status files.
"""
self._initialize()
self._do_collect()
while not self._is_finished:
time.sleep(self._reload_interval)
self._do_collect()
self.logger.info("Collector stopped.")
def stop(self):
"""Stop the main polling loop."""
self._is_finished = True
def _initialize(self):
"""Initialize collector worker thread, Log path will be checked first.
Records in DB backend will be cleared.
"""
if not os.path.exists(self._logdir):
raise CollectorError("Log directory %s not exists" % self._logdir)
self.logger.info("Collector started, taking %s as parent directory"
"for all job logs." % self._logdir)
# clear old records
JobRecord.objects.filter().delete()
TrialRecord.objects.filter().delete()
ResultRecord.objects.filter().delete()
def _do_collect(self):
sub_dirs = os.listdir(self._logdir)
job_names = filter(
lambda d: os.path.isdir(os.path.join(self._logdir, d)), sub_dirs)
for job_name in job_names:
self.sync_job_info(job_name)
def sync_job_info(self, job_name):
"""Load information of the job with the given job name.
1. Traverse each experiment sub-directory and sync information
for each trial.
2. Create or update the job information, together with the job
meta file.
Args:
job_name (str) name of the Tune experiment
"""
job_path = os.path.join(self._logdir, job_name)
if job_name not in self._monitored_jobs:
self._create_job_info(job_path)
self._monitored_jobs.add(job_name)
else:
self._update_job_info(job_path)
expr_dirs = filter(lambda d: os.path.isdir(os.path.join(job_path, d)),
os.listdir(job_path))
for expr_dir_name in expr_dirs:
self.sync_trial_info(job_path, expr_dir_name)
self._update_job_info(job_path)
def sync_trial_info(self, job_path, expr_dir_name):
"""Load information of the trial from the given experiment directory.
Create or update the trial information, together with the trial
meta file.
Args:
job_path(str)
expr_dir_name(str)
"""
expr_name = expr_dir_name[-8:]
expr_path = os.path.join(job_path, expr_dir_name)
if expr_name not in self._monitored_trials:
self._create_trial_info(expr_path)
self._monitored_trials.add(expr_name)
else:
self._update_trial_info(expr_path)
def _create_job_info(self, job_dir):
"""Create information for given job.
Meta file will be loaded if exists, and the job information will
be saved in db backend.
Args:
job_dir (str): Directory path of the job.
"""
meta = self._build_job_meta(job_dir)
self.logger.debug("Create job: %s" % meta)
job_record = JobRecord.from_json(meta)
job_record.save()
@classmethod
def _update_job_info(cls, job_dir):
"""Update information for given job.
Meta file will be loaded if exists, and the job information in
in db backend will be updated.
Args:
job_dir (str): Directory path of the job.
Return:
Updated dict of job meta info
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if meta:
logging.debug("Update job info for %s" % meta["job_id"])
JobRecord.objects \
.filter(job_id=meta["job_id"]) \
.update(end_time=timestamp2date(meta["end_time"]))
def _create_trial_info(self, expr_dir):
"""Create information for given trial.
Meta file will be loaded if exists, and the trial information
will be saved in db backend.
Args:
expr_dir (str): Directory path of the experiment.
"""
meta = self._build_trial_meta(expr_dir)
self.logger.debug("Create trial for %s" % meta)
trial_record = TrialRecord.from_json(meta)
trial_record.save()
def _update_trial_info(self, expr_dir):
"""Update information for given trial.
Meta file will be loaded if exists, and the trial information
in db backend will be updated.
Args:
expr_dir(str)
"""
trial_id = expr_dir[-8:]
meta_file = os.path.join(expr_dir, EXPR_META_FILE)
meta = parse_json(meta_file)
result_file = os.path.join(expr_dir, EXPR_RESULT_FILE)
offset = self._result_offsets.get(trial_id, 0)
results, new_offset = parse_multiple_json(result_file, offset)
self._add_results(results, trial_id)
self._result_offsets[trial_id] = new_offset
if meta:
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(trial_status=meta["status"],
end_time=timestamp2date(meta.get("end_time", None)))
elif len(results) > 0:
metrics = {
"episode_reward": results[-1].get("episode_reward_mean", None),
"accuracy": results[-1].get("mean_accuracy", None),
"loss": results[-1].get("loss", None)
}
if results[-1].get("done"):
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(trial_status="TERMINATED",
end_time=results[-1].get("date", None),
metrics=str(metrics))
else:
TrialRecord.objects \
.filter(trial_id=trial_id) \
.update(metrics=str(metrics))
@classmethod
def _build_job_meta(cls, job_dir):
"""Build meta file for job.
Args:
job_dir (str): Directory path of the job.
Return:
A dict of job meta info.
"""
meta_file = os.path.join(job_dir, JOB_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_name = job_dir.split("/")[-1]
user = os.environ.get("USER", None)
meta = {
"job_id": job_name,
"job_name": job_name,
"user": user,
"type": "ray",
"start_time": os.path.getctime(job_dir),
"end_time": None,
"best_trial_id": None,
}
if meta.get("start_time", None):
meta["start_time"] = timestamp2date(meta["start_time"])
return meta
@classmethod
def _build_trial_meta(cls, expr_dir):
"""Build meta file for trial.
Args:
expr_dir (str): Directory path of the experiment.
Return:
A dict of trial meta info.
"""
meta_file = os.path.join(expr_dir, EXPR_META_FILE)
meta = parse_json(meta_file)
if not meta:
job_id = expr_dir.split("/")[-2]
trial_id = expr_dir[-8:]
params = parse_json(os.path.join(expr_dir, EXPR_PARAM_FILE))
meta = {
"trial_id": trial_id,
"job_id": job_id,
"status": "RUNNING",
"type": "TUNE",
"start_time": os.path.getctime(expr_dir),
"end_time": None,
"progress_offset": 0,
"result_offset": 0,
"params": params
}
if not meta.get("start_time", None):
meta["start_time"] = os.path.getctime(expr_dir)
if isinstance(meta["start_time"], float):
meta["start_time"] = timestamp2date(meta["start_time"])
if meta.get("end_time", None):
meta["end_time"] = timestamp2date(meta["end_time"])
meta["params"] = parse_json(os.path.join(expr_dir, EXPR_PARAM_FILE))
return meta
def _add_results(self, results, trial_id):
"""Add a list of results into db.
Args:
results (list): A list of json results.
trial_id (str): Id of the trial.
"""
for result in results:
self.logger.debug("Appending result: %s" % result)
result["trial_id"] = trial_id
result_record = ResultRecord.from_json(result)
result_record.save()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/common/exception.py
|
Python
|
class CollectorError(Exception):
"""Error raised from the collector service."""
pass
class DatabaseError(Exception):
"""Error raised from the database manager."""
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/common/utils.py
|
Python
|
import logging
import json
import os
import time
def dump_json(json_info, json_file, overwrite=True):
"""Dump a whole json record into the given file.
Overwrite the file if the overwrite flag set.
Args:
json_info (dict): Information dict to be dumped.
json_file (str): File path to be dumped to.
overwrite(boolean)
"""
if overwrite:
mode = "w"
else:
mode = "w+"
try:
with open(json_file, mode) as f:
f.write(json.dumps(json_info))
except BaseException as e:
logging.error(e.message)
def parse_json(json_file):
"""Parse a whole json record from the given file.
Return None if the json file does not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
Returns:
A dict of json info.
"""
if not os.path.exists(json_file):
return None
try:
with open(json_file, "r") as f:
info_str = f.readlines()
info_str = "".join(info_str)
json_info = json.loads(info_str)
return unicode2str(json_info)
except BaseException as e:
logging.error(e.message)
return None
def parse_multiple_json(json_file, offset=None):
"""Parse multiple json records from the given file.
Seek to the offset as the start point before parsing
if offset set. return empty list if the json file does
not exists or exception occurs.
Args:
json_file (str): File path to be parsed.
offset (int): Initial seek position of the file.
Returns:
A dict of json info.
New offset after parsing.
"""
json_info_list = []
if not os.path.exists(json_file):
return json_info_list
try:
with open(json_file, "r") as f:
if offset:
f.seek(offset)
for line in f:
if line[-1] != "\n":
# Incomplete line
break
json_info = json.loads(line)
json_info_list.append(json_info)
offset += len(line)
except BaseException as e:
logging.error(e.message)
return json_info_list, offset
def timestamp2date(timestamp):
"""Convert a timestamp to date."""
return time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(timestamp))
def unicode2str(content):
"""Convert the unicode element of the content to str recursively."""
if isinstance(content, dict):
result = {}
for key in content.keys():
result[unicode2str(key)] = unicode2str(content[key])
return result
elif isinstance(content, list):
return [unicode2str(element) for element in content]
elif isinstance(content, int) or isinstance(content, float):
return content
else:
return content.encode("utf-8")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/frontend/query.py
|
Python
|
from django.shortcuts import HttpResponse
from ray.tune.automlboard.models.models import JobRecord, TrialRecord
from ray.tune.trial import Trial
import json
def query_job(request):
"""Rest API to query the job info, with the given job_id.
The url pattern should be like this:
curl http://<server>:<port>/query_job?job_id=<job_id>
The response may be:
{
"running_trials": 0,
"start_time": "2018-07-19 20:49:40",
"current_round": 1,
"failed_trials": 0,
"best_trial_id": "2067R2ZD",
"name": "asynchyperband_test",
"job_id": "asynchyperband_test",
"user": "Grady",
"type": "RAY TUNE",
"total_trials": 4,
"end_time": "2018-07-19 20:50:10",
"progress": 100,
"success_trials": 4
}
"""
job_id = request.GET.get("job_id")
jobs = JobRecord.objects.filter(job_id=job_id)
trials = TrialRecord.objects.filter(job_id=job_id)
total_num = len(trials)
running_num = sum(t.trial_status == Trial.RUNNING for t in trials)
success_num = sum(t.trial_status == Trial.TERMINATED for t in trials)
failed_num = sum(t.trial_status == Trial.ERROR for t in trials)
if total_num == 0:
progress = 0
else:
progress = int(float(success_num) / total_num * 100)
if len(jobs) == 0:
resp = "Unkonwn job id %s.\n" % job_id
else:
job = jobs[0]
result = {
"job_id": job.job_id,
"name": job.name,
"user": job.user,
"type": job.type,
"start_time": job.start_time,
"end_time": job.end_time,
"success_trials": success_num,
"failed_trials": failed_num,
"running_trials": running_num,
"total_trials": total_num,
"best_trial_id": job.best_trial_id,
"progress": progress
}
resp = json.dumps(result)
return HttpResponse(resp, content_type="application/json;charset=utf-8")
def query_trial(request):
"""Rest API to query the trial info, with the given trial_id.
The url pattern should be like this:
curl http://<server>:<port>/query_trial?trial_id=<trial_id>
The response may be:
{
"app_url": "None",
"trial_status": "TERMINATED",
"params": {'a': 1, 'b': 2},
"job_id": "asynchyperband_test",
"end_time": "2018-07-19 20:49:44",
"start_time": "2018-07-19 20:49:40",
"trial_id": "2067R2ZD",
}
"""
trial_id = request.GET.get("trial_id")
trials = TrialRecord.objects \
.filter(trial_id=trial_id) \
.order_by("-start_time")
if len(trials) == 0:
resp = "Unkonwn trial id %s.\n" % trials
else:
trial = trials[0]
result = {
"trial_id": trial.trial_id,
"job_id": trial.job_id,
"trial_status": trial.trial_status,
"start_time": trial.start_time,
"end_time": trial.end_time,
"params": trial.params
}
resp = json.dumps(result)
return HttpResponse(resp, content_type="application/json;charset=utf-8")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/frontend/urls.py
|
Python
|
"""
Monitor URL Configuration.
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
import ray.tune.automlboard.frontend.view as view
import ray.tune.automlboard.frontend.query as query
urlpatterns = [
url(r"^admin/", admin.site.urls),
url(r"^$", view.index),
url(r"^job$", view.job),
url(r"^trial$", view.trial),
url(r"^query_job", query.query_job),
url(r"^query_trial", query.query_trial)
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/frontend/view.py
|
Python
|
from django.shortcuts import render
from ray.tune.automlboard.settings import AUTOMLBOARD_RELOAD_INTERVAL, \
AUTOMLBOARD_LOG_DIR
from ray.tune.automlboard.models.models import JobRecord, \
TrialRecord, ResultRecord
from ray.tune.trial import Trial
import datetime
def index(request):
"""View for the home page."""
recent_jobs = JobRecord.objects.order_by("-start_time")[0:100]
recent_trials = TrialRecord.objects.order_by("-start_time")[0:500]
total_num = len(recent_trials)
running_num = sum(t.trial_status == Trial.RUNNING for t in recent_trials)
success_num = sum(
t.trial_status == Trial.TERMINATED for t in recent_trials)
failed_num = sum(t.trial_status == Trial.ERROR for t in recent_trials)
job_records = []
for recent_job in recent_jobs:
job_records.append(get_job_info(recent_job))
context = {
"log_dir": AUTOMLBOARD_LOG_DIR,
"reload_interval": AUTOMLBOARD_RELOAD_INTERVAL,
"recent_jobs": job_records,
"job_num": len(job_records),
"trial_num": total_num,
"running_num": running_num,
"success_num": success_num,
"failed_num": failed_num
}
return render(request, "index.html", context)
def job(request):
"""View for a single job."""
job_id = request.GET.get("job_id")
recent_jobs = JobRecord.objects.order_by("-start_time")[0:100]
recent_trials = TrialRecord.objects \
.filter(job_id=job_id) \
.order_by("-start_time")
trial_records = []
for recent_trial in recent_trials:
trial_records.append(get_trial_info(recent_trial))
current_job = JobRecord.objects \
.filter(job_id=job_id) \
.order_by("-start_time")[0]
if len(trial_records) > 0:
param_keys = trial_records[0]["params"].keys()
else:
param_keys = []
# TODO: support custom metrics here
metric_keys = ["episode_reward", "accuracy", "loss"]
context = {
"current_job": get_job_info(current_job),
"recent_jobs": recent_jobs,
"recent_trials": trial_records,
"param_keys": param_keys,
"param_num": len(param_keys),
"metric_keys": metric_keys,
"metric_num": len(metric_keys)
}
return render(request, "job.html", context)
def trial(request):
"""View for a single trial."""
job_id = request.GET.get("job_id")
trial_id = request.GET.get("trial_id")
recent_trials = TrialRecord.objects \
.filter(job_id=job_id) \
.order_by("-start_time")
recent_results = ResultRecord.objects \
.filter(trial_id=trial_id) \
.order_by("-date")[0:2000]
current_trial = TrialRecord.objects \
.filter(trial_id=trial_id) \
.order_by("-start_time")[0]
context = {
"job_id": job_id,
"trial_id": trial_id,
"current_trial": current_trial,
"recent_results": recent_results,
"recent_trials": recent_trials
}
return render(request, "trial.html", context)
def get_job_info(current_job):
"""Get job information for current job."""
trials = TrialRecord.objects.filter(job_id=current_job.job_id)
total_num = len(trials)
running_num = sum(t.trial_status == Trial.RUNNING for t in trials)
success_num = sum(t.trial_status == Trial.TERMINATED for t in trials)
failed_num = sum(t.trial_status == Trial.ERROR for t in trials)
if total_num == 0:
progress = 0
else:
progress = int(float(success_num) / total_num * 100)
winner = get_winner(trials)
job_info = {
"job_id": current_job.job_id,
"job_name": current_job.name,
"user": current_job.user,
"type": current_job.type,
"start_time": current_job.start_time,
"end_time": current_job.end_time,
"total_num": total_num,
"running_num": running_num,
"success_num": success_num,
"failed_num": failed_num,
"best_trial_id": current_job.best_trial_id,
"progress": progress,
"winner": winner
}
return job_info
def get_trial_info(current_trial):
"""Get job information for current trial."""
if current_trial.end_time and ("_" in current_trial.end_time):
# end time is parsed from result.json and the format
# is like: yyyy-mm-dd_hh-MM-ss, which will be converted
# to yyyy-mm-dd hh:MM:ss here
time_obj = datetime.datetime.strptime(current_trial.end_time,
"%Y-%m-%d_%H-%M-%S")
end_time = time_obj.strftime("%Y-%m-%d %H:%M:%S")
else:
end_time = current_trial.end_time
if current_trial.metrics:
metrics = eval(current_trial.metrics)
else:
metrics = None
trial_info = {
"trial_id": current_trial.trial_id,
"job_id": current_trial.job_id,
"trial_status": current_trial.trial_status,
"start_time": current_trial.start_time,
"end_time": end_time,
"params": eval(current_trial.params.encode("utf-8")),
"metrics": metrics
}
return trial_info
def get_winner(trials):
"""Get winner trial of a job."""
winner = {}
# TODO: sort_key should be customized here
sort_key = "accuracy"
if trials and len(trials) > 0:
first_metrics = get_trial_info(trials[0])["metrics"]
if first_metrics and not first_metrics.get("accuracy", None):
sort_key = "episode_reward"
max_metric = float("-Inf")
for t in trials:
metrics = get_trial_info(t).get("metrics", None)
if metrics and metrics.get(sort_key, None):
current_metric = float(metrics[sort_key])
if current_metric > max_metric:
winner["trial_id"] = t.trial_id
winner["metric"] = sort_key + ": " + str(current_metric)
max_metric = current_metric
return winner
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/frontend/wsgi.py
|
Python
|
"""
WSGI config for monitor project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/
"""
from django.core.wsgi import get_wsgi_application
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"ray.tune.automlboard.settings")
application = get_wsgi_application()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/manage.py
|
Python
|
#!/usr/bin/env python
from django.core.management import execute_from_command_line
import os
import sys
if __name__ == "__main__":
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"ray.tune.automlboard.settings")
execute_from_command_line(sys.argv)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/models/__init__.py
|
Python
|
default_app_config = "ray.tune.automlboard.models.apps.ModelConfig"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/models/apps.py
|
Python
|
from django.apps import AppConfig
class ModelConfig(AppConfig):
"""Model Congig for models."""
name = "ray.tune.automlboard.models"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/models/models.py
|
Python
|
from django.db import models
class JobRecord(models.Model):
"""Information of an AutoML Job."""
job_id = models.CharField(max_length=50)
name = models.CharField(max_length=20)
user = models.CharField(max_length=20)
type = models.CharField(max_length=20)
start_time = models.CharField(max_length=50)
end_time = models.CharField(max_length=50)
best_trial_id = models.CharField(max_length=50)
@classmethod
def from_json(cls, json_info):
"""Build a Job instance from a json string."""
if json_info is None:
return None
return JobRecord(
job_id=json_info["job_id"],
name=json_info["job_name"],
user=json_info["user"],
type=json_info["type"],
start_time=json_info["start_time"])
def is_finished(self):
"""Judge whether this is a record for a finished job."""
return self.end_time is not None
class TrialRecord(models.Model):
"""Information of a single AutoML trial of the job."""
trial_id = models.CharField(max_length=50)
job_id = models.CharField(max_length=50)
trial_status = models.CharField(max_length=20)
start_time = models.CharField(max_length=50)
end_time = models.CharField(max_length=50)
params = models.CharField(max_length=50, blank=True, null=True)
metrics = models.CharField(max_length=256, null=True, blank=True)
@classmethod
def from_json(cls, json_info):
"""Build a Trial instance from a json string."""
if json_info is None:
return None
return TrialRecord(
trial_id=json_info["trial_id"],
job_id=json_info["job_id"],
trial_status=json_info["status"],
start_time=json_info["start_time"],
params=json_info["params"])
class ResultRecord(models.Model):
"""Information of a single result of a trial."""
trial_id = models.CharField(max_length=50)
timesteps_total = models.BigIntegerField(blank=True, null=True)
done = models.CharField(max_length=30, blank=True, null=True)
episode_reward_mean = models.CharField(
max_length=30, blank=True, null=True)
mean_accuracy = models.FloatField(blank=True, null=True)
mean_loss = models.FloatField(blank=True, null=True)
trainning_iteration = models.BigIntegerField(blank=True, null=True)
timesteps_this_iter = models.BigIntegerField(blank=True, null=True)
time_this_iter_s = models.BigIntegerField(blank=True, null=True)
time_total_s = models.BigIntegerField(blank=True, null=True)
date = models.CharField(max_length=30, blank=True, null=True)
hostname = models.CharField(max_length=50, blank=True, null=True)
node_ip = models.CharField(max_length=50, blank=True, null=True)
config = models.CharField(max_length=256, blank=True, null=True)
@classmethod
def from_json(cls, json_info):
"""Build a Result instance from a json string."""
if json_info is None:
return None
return ResultRecord(
trial_id=json_info["trial_id"],
timesteps_total=json_info["timesteps_total"],
done=json_info.get("done", None),
episode_reward_mean=json_info.get("episode_reward_mean", None),
mean_accuracy=json_info.get("mean_accuracy", None),
mean_loss=json_info.get("mean_loss", None),
trainning_iteration=json_info.get("training_iteration", None),
timesteps_this_iter=json_info.get("timesteps_this_iter", None),
time_this_iter_s=json_info.get("time_this_iter_s", None),
time_total_s=json_info.get("time_total_s", None),
date=json_info.get("date", None),
hostname=json_info.get("hostname", None),
node_ip=json_info.get("node_ip", None),
config=json_info.get("config", None))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/run.py
|
Python
|
import logging
import os
import re
import django
import argparse
from django.core.management import execute_from_command_line
from common.exception import DatabaseError
root_path = os.path.dirname(os.path.abspath(__file__))
logger = logging.getLogger(__name__)
def run_board(args):
"""
Run main entry for AutoMLBoard.
Args:
args: args parsed from command line
"""
init_config(args)
# backend service, should import after django settings initialized
from backend.collector import CollectorService
service = CollectorService(
args.logdir,
args.reload_interval,
standalone=False,
log_level=args.log_level)
service.run()
# frontend service
logger.info("Try to start automlboard on port %s\n" % args.port)
command = [
os.path.join(root_path, "manage.py"), "runserver",
"0.0.0.0:%s" % args.port, "--noreload"
]
execute_from_command_line(command)
def init_config(args):
"""
Initialize configs of the service.
Do the following things:
1. automl board settings
2. database settings
3. django settings
"""
os.environ["AUTOMLBOARD_LOGDIR"] = args.logdir
os.environ["AUTOMLBOARD_LOGLEVEL"] = args.log_level
os.environ["AUTOMLBOARD_RELOAD_INTERVAL"] = str(args.reload_interval)
if args.db:
try:
db_address_reg = re.compile(r"(.*)://(.*):(.*)@(.*):(.*)/(.*)")
match = re.match(db_address_reg, args.db_address)
os.environ["AUTOMLBOARD_DB_ENGINE"] = match.group(1)
os.environ["AUTOMLBOARD_DB_USER"] = match.group(2)
os.environ["AUTOMLBOARD_DB_PASSWORD"] = match.group(3)
os.environ["AUTOMLBOARD_DB_HOST"] = match.group(4)
os.environ["AUTOMLBOARD_DB_PORT"] = match.group(5)
os.environ["AUTOMLBOARD_DB_NAME"] = match.group(6)
logger.info("Using %s as the database backend." % match.group(1))
except BaseException as e:
raise DatabaseError(e)
else:
logger.info("Using sqlite3 as the database backend, "
"information will be stored in automlboard.db")
os.environ.setdefault("DJANGO_SETTINGS_MODULE",
"ray.tune.automlboard.settings")
django.setup()
command = [os.path.join(root_path, "manage.py"), "migrate", "--run-syncdb"]
execute_from_command_line(command)
def main():
parser = argparse.ArgumentParser()
parser.add_argument(
"--logdir",
type=str,
required=True,
help="Directory where AutoML Board will "
"look to find tuning logs it can display")
parser.add_argument(
"--port",
type=int,
default=8008,
help="What port to serve AutoMLBoard on, "
"(default: %(default)s)")
parser.add_argument(
"--db",
type=str,
default=None,
help="Set SQL database URI in "
"schema://user:password@host:port/database, "
"(default: sqlite3)"),
parser.add_argument(
"--reload_interval",
type=int,
default=5,
help="How often the backend should load more data, "
"(default: %(default)s)")
parser.add_argument(
"--log_level",
type=str,
default="INFO",
help="Set the logging level, "
"(default: %(default)s)")
cmd_args = parser.parse_args()
run_board(cmd_args)
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/settings.py
|
Python
|
"""
Django settings for monitor project.
Generated by 'django-admin startproject' using Django 1.11.14.
For more information on this file, see
https://docs.djangoproject.com/en/1.11/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.11/ref/settings/
"""
import os
# Build paths inside the project like this:
# os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
# You can specify your own secret key, here we just pick one randomly.
SECRET_KEY = "tktks103=$7a#5axn)52&b87!#w_qm(%*72^@hsq!nur%dtk4b"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ["*"]
# Application definition
INSTALLED_APPS = [
"django.contrib.admin",
"django.contrib.auth",
"django.contrib.contenttypes",
"django.contrib.sessions",
"django.contrib.messages",
"django.contrib.staticfiles",
"ray.tune.automlboard.models",
]
MIDDLEWARE = [
"django.middleware.security.SecurityMiddleware",
"django.contrib.sessions.middleware.SessionMiddleware",
"django.middleware.common.CommonMiddleware",
"django.middleware.csrf.CsrfViewMiddleware",
"django.contrib.auth.middleware.AuthenticationMiddleware",
"django.contrib.messages.middleware.MessageMiddleware",
"django.middleware.clickjacking.XFrameOptionsMiddleware",
]
ROOT_URLCONF = "ray.tune.automlboard.frontend.urls"
TEMPLATES = [
{
"BACKEND": "django.template.backends.django.DjangoTemplates",
"DIRS": [BASE_DIR + "/templates"],
"APP_DIRS": True,
"OPTIONS": {
"context_processors": [
"django.template.context_processors.debug",
"django.template.context_processors.request",
"django.contrib.auth.context_processors.auth",
"django.contrib.messages.context_processors.messages",
],
},
},
]
WSGI_APPLICATION = "ray.tune.automlboard.frontend.wsgi.application"
DB_ENGINE_NAME_MAP = {
"mysql": "django.db.backends.mysql",
"sqllite": "django.db.backends.sqlite3"
}
def lookup_db_engine(name):
"""Lookup db engine class name for engine name."""
return DB_ENGINE_NAME_MAP.get(name, DB_ENGINE_NAME_MAP["sqllite"])
# Database
# https://docs.djangoproject.com/en/1.11/ref/settings/#databases
if not os.environ.get("AUTOMLBOARD_DB_ENGINE", None):
DATABASES = {
"default": {
"ENGINE": "django.db.backends.sqlite3",
"NAME": "automlboard.db",
}
}
else:
DATABASES = {
"default": {
"ENGINE": lookup_db_engine(os.environ["AUTOMLBOARD_DB_ENGINE"]),
"NAME": os.environ["AUTOMLBOARD_DB_NAME"],
"USER": os.environ["AUTOMLBOARD_DB_USER"],
"PASSWORD": os.environ["AUTOMLBOARD_DB_PASSWORD"],
"HOST": os.environ["AUTOMLBOARD_DB_HOST"],
"PORT": os.environ["AUTOMLBOARD_DB_PORT"]
}
}
# Password validation
# https://docs.djangoproject.com/en/1.11/ref/settings/#auth-password-validators
VALIDATION_PREFIX = "django.contrib.auth.password_validation."
AUTH_PASSWORD_VALIDATORS = [
{
"NAME": VALIDATION_PREFIX + "UserAttributeSimilarityValidator",
},
{
"NAME": VALIDATION_PREFIX + "MinimumLengthValidator",
},
{
"NAME": VALIDATION_PREFIX + "CommonPasswordValidator",
},
{
"NAME": VALIDATION_PREFIX + "NumericPasswordValidator",
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.11/topics/i18n/
LANGUAGE_CODE = "en-us"
TIME_ZONE = "Asia/Shanghai"
USE_I18N = True
USE_L10N = True
USE_TZ = False
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.11/howto/static-files/
STATIC_URL = "/static/"
STATICFILES_DIRS = (os.path.join(BASE_DIR, "static").replace("\\", "/"), )
# automlboard settings
AUTOMLBOARD_LOG_DIR = os.environ.get("AUTOMLBOARD_LOGDIR", None)
AUTOMLBOARD_RELOAD_INTERVAL = os.environ.get("AUTOMLBOARD_RELOAD_INTERVAL",
None)
AUTOMLBOARD_LOG_LEVEL = os.environ.get("AUTOMLBOARD_LOGLEVEL", None)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/App.css
|
CSS
|
body {
font-size: 14px;
}
input[type=text], textarea {
font-size: 14px;
padding: 5px 10px;
border-radius: 4px;
border: 1px solid #ccc;
-webkit-box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
box-shadow: inset 0 1px 1px rgba(0,0,0,.075);
}
::-webkit-input-placeholder {
opacity: 0.6;
}
:-ms-input-placeholder {
opacity: 0.6;
}
::-ms-input-placeholder {
opacity: 0.6;
}
::placeholder {
opacity: 0.6;
}
button, .btn {
font-size: 14px;
background-color: #f5f5f5;
border-color: #cccccc;
}
button:hover, .btn:hover {
border-color: #c0c0c0;
}
a {
color: #017fcb;
}
a:hover, a:focus {
color: #015693;
}
.btn-primary {
background-color: #0193e1;
border-color: #0193e1;
}
.btn-primary:hover {
background-color: #017fcb;
border-color: #017fcb;
}
.btn-primary[disabled], .btn-primary[disabled]:hover {
background-color: #0193e1;
border-color: #0193e1;
opacity: 0.4;
}
.App-header {
background-color: #082142;
height: 60px;
color: white;
display: block;
}
.App-header-text {
position: relative;
top: 40%;
}
.App-experiments {
width: 200px;
}
.App-content {
width: 80%;
margin-right: auto;
margin-left: auto;
}
div.mlflow-logo {
display: inline-block;
}
img.mlflow-logo {
height: 40px;
margin-left: 64px;
margin-top: 10px;
margin-bottom: 10px;
}
div.github {
display: inline-block;
padding-right: 24px;
color: #ffffff;
}
div.docs {
display: inline-block;
color: #ffffff;
}
div.header-links {
display: inline-block;
float: right;
padding-top: 21px;
padding-right: 10%;
font-size: 16px;
}
h1 {
margin-top: 32px;
font-size: 24px;
font-weight: bold;
color: #333;
}
h2 {
font-size: 18px;
font-weight: normal;
color: #333;
}
label {
font-size: 14px;
font-weight: normal;
color: #333;
margin: 0;
}
div.metadata {
margin-top: 32px;
}
span.metadata {
font-size: 16px;
font-weight: normal;
white-space: nowrap;
margin-right: 100px;
}
span.metadata-header {
font-size: 16px;
font-weight: normal;
color: #888;
margin-right: 4px;
}
table th {
background-color: #fafafa;
color: #888888;
font-weight: 500;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/ExperimentList.css
|
CSS
|
.experiment-list-outer-container {
padding-left: 64px;
}
.experiment-list-container {
overflow-y: scroll;
overflow-x: hidden;
width: 236px;
min-height: 100%;
}
.active-experiment-list-item {
background: rgba(67, 199, 234, 0.1);
font-weight: bold;
}
.experiment-list-item {
overflow:hidden;
-o-text-overflow: ellipsis;
text-overflow: ellipsis;
white-space: nowrap;
font-size: 16px;
height: 40px;
width: 220px;
line-height: 40px;
padding-left: 12px;
}
.experiments-header {
font-weight: normal;
display: inline-block;
padding-bottom: 6px;
}
.collapser-container {
display: inline-block;
position: relative;
top: -2px;
}
.collapser {
display: inline-block;
background-color: #082142d6;
color: #FFFFFF;
font-size: 16px;
line-height: 24px;
width: 24px;
height: 24px;
text-align: center;
margin-left: 68px;
}
.login-icon {
background-color: #E95420;
}
.login-icon:hover {
background-color: #AEA79F;
}
.fa, .fas {
font-weight: 900;
padding-top: 3px;
}
.collapsed {
display: none; /* hide it for small displays */
}
@media (min-width: 992px) {
.collapsed {
display: block;
margin-left: -18%; /* same width as sidebar */
}
}
#row-main {
overflow-x: hidden; /* necessary to hide collapsed sidebar */
}
#sidebar {
-webkit-transition: margin 0.3s ease;
-moz-transition: margin 0.3s ease;
-o-transition: margin 0.3s ease;
transition: margin 0.3s ease;
}
#content {
-webkit-transition: width 0.3s ease;
-moz-transition: width 0.3s ease;
-o-transition: width 0.3s ease;
transition: width 0.3s ease;
}
.experiment-list-container .nav .nav-item .nav-link:hover {
padding-left: 25px;
margin-right: 25px;
color: #0193e1;
background-color: #e9ecef;
}
.nav-item .nav-link {
color: #888888;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/ExperimentView.css
|
CSS
|
.ExperimentView input[type=checkbox] {
width: auto;
}
.ExperimentView th {
background-color: #fafafa;
color: #888888;
font-weight: 500;
}
.ExperimentView td, .ExperimentView th {
border-top: 1px solid #e2e2e2;
border-bottom: 1px solid #e2e2e2;
}
.ExperimentView th.top-row {
text-align: center;
border-bottom: none;
border-top: none;
}
.ExperimentView th.bottom-row {
text-align: left;
border-top: none;
}
.ExperimentView .left-border {
border-left: 1px solid #e2e2e2;
}
.ExperimentView-run-buttons .btn {
margin-left: 16px;
}
.ExperimentView-run-buttons .run-count {
font-size: 14px;
color: #888888;
}
.ExperimentView-evenRow {
background-color: #bbbbbb;
}
.ExperimentView-evenRow:hover {
background-color: #acacac;
}
.ExperimentView-oddRow:hover {
background-color: #e1e1e1;
}
.ExperimentView-downloadCsv {
float: right;
}
.ExperimentView-search-controls {
margin-top: 30px;
}
.ExperimentView-run-buttons{
margin-top: 30px;
margin-bottom: 30px;
}
.ExperimentView-paramKeyFilter, .ExperimentView-metricKeyFilter {
display: inline-block;
width: 100%;
min-width: 210px;
margin-top: 16px;
}
.ExperimentView-paramKeyFilter, .ExperimentView-metricKeyFilter, .ExperimentView-search {
padding-right: 16px;
}
.ExperimentView-search-buttons {
float: right;
width: 100px;
}
.ExperimentView-search-buttons .btn {
display: block;
width: 100%;
margin-bottom: 12px;
}
.ExperimentView-search-inputs {
margin-right: 100px;
}
.ExperimentView-search-controls .filter-label {
width: 110px;
float: left;
margin-top: 6px;
}
.ExperimentView-search-controls .filter-wrapper {
margin-left: 110px;
}
.ExperimentView-search-controls input {
width: 100%;
}
.search-button {
margin-right: 30px;
}
div.error-message {
margin-left: 100px;
/*width: auto;*/
}
span.error-message {
color: red;
}
.metric-filler-bg {
position: relative;
}
.metric-filler-fg {
background-color: #def1ff;
position: absolute;
left: -3px;
top: -1px;
height: 22px;
}
.metric-text {
position: relative;
}
.fixed-table-container {
border: none;
}
.fixed-table-toolbar .btn-group .keep-open .btn-secondary {
color: #868e96;
}
.fixed-table-toolbar .btn-group .show .btn-secondary {
color: #fff;
}
.fixed-table-toolbar .btn-group .keep-open .btn-secondary:hover {
background-color: #9c948a;
color: #fff;
}
.fixed-table-container .fixed-table-body {
overflow-x: auto;
overflow-y: auto;
height: 50%;
}
.page-list .btn-group .btn-secondary {
color: #868e96;
}
.page-list .btn-group .btn-secondary:hover {
background-color: #9c948a;
color: #fff;
}
hr.divider {
-moz-border-bottom-colors: none;
-moz-border-image: none;
-moz-border-left-colors: none;
-moz-border-right-colors: none;
-moz-border-top-colors: none;
border-color: #EEEEEE -moz-use-text-color #FFFFFF;
border-style: solid none;
border-width: 1px 0;
margin: 18px 0;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/HomePage.css
|
CSS
|
.outer-container {
display: -ms-flexbox;
display: flex;
}
.HomePage-experiment-list-container {
width: 10%;
min-width: 333px;
}
.experiment-view-container {
width: 80%;
}
.experiment-view-right {
width: 10%;
}
/* BEGIN css for when experiment list collapsed */
.experiment-page-container {
width: 80%;
margin: 0 auto;
}
.collapsed-expander-container {
float: left;
}
.expander {
display: inline-block;
background-color: #082142d6;
color: #FFFFFF;
font-size: 16px;
line-height: 24px;
width: 24px;
height: 24px;
text-align: center;
vertical-align: bottom;
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/bootstrap.min.css
|
CSS
|
/*!
* Bootswatch v4.1.3
* Homepage: https://bootswatch.com
* Copyright 2012-2018 Thomas Park
* Licensed under MIT
* Based on Bootstrap
*//*!
* Bootstrap v4.1.3 (https://getbootstrap.com/)
* Copyright 2011-2018 The Bootstrap Authors
* Copyright 2011-2018 Twitter, Inc.
* Licensed under MIT (https://github.com/twbs/bootstrap/blob/master/LICENSE)
*/@import url("https://fonts.googleapis.com/css?family=Ubuntu:400,700");:root{--blue:#007bff;--indigo:#6610f2;--purple:#772953;--pink:#e83e8c;--red:#DF382C;--orange:#E95420;--yellow:#EFB73E;--green:#38B44A;--teal:#20c997;--cyan:#17a2b8;--white:#fff;--gray:#868e96;--gray-dark:#333;--primary:#E95420;--secondary:#AEA79F;--success:#38B44A;--info:#17a2b8;--warning:#EFB73E;--danger:#DF382C;--light:#e9ecef;--dark:#772953;--breakpoint-xs:0;--breakpoint-sm:576px;--breakpoint-md:768px;--breakpoint-lg:992px;--breakpoint-xl:1200px;--font-family-sans-serif:"Ubuntu", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";--font-family-monospace:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}*,*::before,*::after{-webkit-box-sizing:border-box;box-sizing:border-box}html{font-family:sans-serif;line-height:1.15;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%;-ms-overflow-style:scrollbar;-webkit-tap-highlight-color:transparent}@-ms-viewport{width:device-width}article,aside,figcaption,figure,footer,header,hgroup,main,nav,section{display:block}body{margin:0;font-family:"Ubuntu", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";font-size:1rem;font-weight:400;line-height:1.5;color:#333;text-align:left;background-color:#fff}[tabindex="-1"]:focus{outline:0 !important}hr{-webkit-box-sizing:content-box;box-sizing:content-box;height:0;overflow:visible}h1,h2,h3,h4,h5,h6{margin-top:0;margin-bottom:0.5rem}p{margin-top:0;margin-bottom:1rem}abbr[title],abbr[data-original-title]{text-decoration:underline;-webkit-text-decoration:underline dotted;text-decoration:underline dotted;cursor:help;border-bottom:0}address{margin-bottom:1rem;font-style:normal;line-height:inherit}ol,ul,dl{margin-top:0;margin-bottom:1rem}ol ol,ul ul,ol ul,ul ol{margin-bottom:0}dt{font-weight:700}dd{margin-bottom:.5rem;margin-left:0}blockquote{margin:0 0 1rem}dfn{font-style:italic}b,strong{font-weight:bolder}small{font-size:80%}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sub{bottom:-.25em}sup{top:-.5em}a{color:#E95420;text-decoration:none;background-color:transparent;-webkit-text-decoration-skip:objects}a:hover{color:#ac3911;text-decoration:underline}a:not([href]):not([tabindex]){color:inherit;text-decoration:none}a:not([href]):not([tabindex]):hover,a:not([href]):not([tabindex]):focus{color:inherit;text-decoration:none}a:not([href]):not([tabindex]):focus{outline:0}pre,code,kbd,samp{font-family:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;font-size:1em}pre{margin-top:0;margin-bottom:1rem;overflow:auto;-ms-overflow-style:scrollbar}figure{margin:0 0 1rem}img{vertical-align:middle;border-style:none}svg{overflow:hidden;vertical-align:middle}table{border-collapse:collapse}caption{padding-top:0.75rem;padding-bottom:0.75rem;color:#868e96;text-align:left;caption-side:bottom}th{text-align:inherit}label{display:inline-block;margin-bottom:0.5rem}button{border-radius:0}button:focus{outline:1px dotted;outline:5px auto -webkit-focus-ring-color}input,button,select,optgroup,textarea{margin:0;font-family:inherit;font-size:inherit;line-height:inherit}button,input{overflow:visible}button,select{text-transform:none}button,html [type="button"],[type="reset"],[type="submit"]{-webkit-appearance:button}button::-moz-focus-inner,[type="button"]::-moz-focus-inner,[type="reset"]::-moz-focus-inner,[type="submit"]::-moz-focus-inner{padding:0;border-style:none}input[type="radio"],input[type="checkbox"]{-webkit-box-sizing:border-box;box-sizing:border-box;padding:0}input[type="date"],input[type="time"],input[type="datetime-local"],input[type="month"]{-webkit-appearance:listbox}textarea{overflow:auto;resize:vertical}fieldset{min-width:0;padding:0;margin:0;border:0}legend{display:block;width:100%;max-width:100%;padding:0;margin-bottom:.5rem;font-size:1.5rem;line-height:inherit;color:inherit;white-space:normal}progress{vertical-align:baseline}[type="number"]::-webkit-inner-spin-button,[type="number"]::-webkit-outer-spin-button{height:auto}[type="search"]{outline-offset:-2px;-webkit-appearance:none}[type="search"]::-webkit-search-cancel-button,[type="search"]::-webkit-search-decoration{-webkit-appearance:none}::-webkit-file-upload-button{font:inherit;-webkit-appearance:button}output{display:inline-block}summary{display:list-item;cursor:pointer}template{display:none}[hidden]{display:none !important}h1,h2,h3,h4,h5,h6,.h1,.h2,.h3,.h4,.h5,.h6{margin-bottom:0.5rem;font-family:inherit;font-weight:500;line-height:1.2;color:inherit}h1,.h1{font-size:2.5rem}h2,.h2{font-size:2rem}h3,.h3{font-size:1.75rem}h4,.h4{font-size:1.5rem}h5,.h5{font-size:1.25rem}h6,.h6{font-size:1rem}.lead{font-size:1.25rem;font-weight:300}.display-1{font-size:6rem;font-weight:300;line-height:1.2}.display-2{font-size:5.5rem;font-weight:300;line-height:1.2}.display-3{font-size:4.5rem;font-weight:300;line-height:1.2}.display-4{font-size:3.5rem;font-weight:300;line-height:1.2}hr{margin-top:1rem;margin-bottom:1rem;border:0;border-top:1px solid rgba(0,0,0,0.1)}small,.small{font-size:80%;font-weight:400}mark,.mark{padding:0.2em;background-color:#fcf8e3}.list-unstyled{padding-left:0;list-style:none}.list-inline{padding-left:0;list-style:none}.list-inline-item{display:inline-block}.list-inline-item:not(:last-child){margin-right:0.5rem}.initialism{font-size:90%;text-transform:uppercase}.blockquote{margin-bottom:1rem;font-size:1.25rem}.blockquote-footer{display:block;font-size:80%;color:#868e96}.blockquote-footer::before{content:"\2014 \00A0"}.img-fluid{max-width:100%;height:auto}.img-thumbnail{padding:0.25rem;background-color:#fff;border:1px solid #dee2e6;border-radius:0.25rem;max-width:100%;height:auto}.figure{display:inline-block}.figure-img{margin-bottom:0.5rem;line-height:1}.figure-caption{font-size:90%;color:#868e96}code{font-size:87.5%;color:#e83e8c;word-break:break-word}a>code{color:inherit}kbd{padding:0.2rem 0.4rem;font-size:87.5%;color:#fff;background-color:#212529;border-radius:0.2rem}kbd kbd{padding:0;font-size:100%;font-weight:700}pre{display:block;font-size:87.5%;color:#212529}pre code{font-size:inherit;color:inherit;word-break:normal}.pre-scrollable{max-height:340px;overflow-y:scroll}.container{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}@media (min-width: 576px){.container{max-width:540px}}@media (min-width: 768px){.container{max-width:720px}}@media (min-width: 992px){.container{max-width:960px}}@media (min-width: 1200px){.container{max-width:1140px}}.container-fluid{width:100%;padding-right:15px;padding-left:15px;margin-right:auto;margin-left:auto}.row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-15px;margin-left:-15px}.no-gutters{margin-right:0;margin-left:0}.no-gutters>.col,.no-gutters>[class*="col-"]{padding-right:0;padding-left:0}.col-1,.col-2,.col-3,.col-4,.col-5,.col-6,.col-7,.col-8,.col-9,.col-10,.col-11,.col-12,.col,.col-auto,.col-sm-1,.col-sm-2,.col-sm-3,.col-sm-4,.col-sm-5,.col-sm-6,.col-sm-7,.col-sm-8,.col-sm-9,.col-sm-10,.col-sm-11,.col-sm-12,.col-sm,.col-sm-auto,.col-md-1,.col-md-2,.col-md-3,.col-md-4,.col-md-5,.col-md-6,.col-md-7,.col-md-8,.col-md-9,.col-md-10,.col-md-11,.col-md-12,.col-md,.col-md-auto,.col-lg-1,.col-lg-2,.col-lg-3,.col-lg-4,.col-lg-5,.col-lg-6,.col-lg-7,.col-lg-8,.col-lg-9,.col-lg-10,.col-lg-11,.col-lg-12,.col-lg,.col-lg-auto,.col-xl-1,.col-xl-2,.col-xl-3,.col-xl-4,.col-xl-5,.col-xl-6,.col-xl-7,.col-xl-8,.col-xl-9,.col-xl-10,.col-xl-11,.col-xl-12,.col-xl,.col-xl-auto{position:relative;width:100%;min-height:1px;padding-right:15px;padding-left:15px}.col{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-1{margin-left:8.3333333333%}.offset-2{margin-left:16.6666666667%}.offset-3{margin-left:25%}.offset-4{margin-left:33.3333333333%}.offset-5{margin-left:41.6666666667%}.offset-6{margin-left:50%}.offset-7{margin-left:58.3333333333%}.offset-8{margin-left:66.6666666667%}.offset-9{margin-left:75%}.offset-10{margin-left:83.3333333333%}.offset-11{margin-left:91.6666666667%}@media (min-width: 576px){.col-sm{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-sm-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-sm-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-sm-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-sm-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-sm-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-sm-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-sm-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-sm-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-sm-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-sm-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-sm-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-sm-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-sm-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-sm-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-sm-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-sm-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-sm-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-sm-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-sm-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-sm-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-sm-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-sm-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-sm-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-sm-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-sm-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-sm-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-sm-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-sm-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-sm-0{margin-left:0}.offset-sm-1{margin-left:8.3333333333%}.offset-sm-2{margin-left:16.6666666667%}.offset-sm-3{margin-left:25%}.offset-sm-4{margin-left:33.3333333333%}.offset-sm-5{margin-left:41.6666666667%}.offset-sm-6{margin-left:50%}.offset-sm-7{margin-left:58.3333333333%}.offset-sm-8{margin-left:66.6666666667%}.offset-sm-9{margin-left:75%}.offset-sm-10{margin-left:83.3333333333%}.offset-sm-11{margin-left:91.6666666667%}}@media (min-width: 768px){.col-md{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-md-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-md-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-md-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-md-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-md-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-md-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-md-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-md-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-md-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-md-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-md-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-md-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-md-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-md-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-md-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-md-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-md-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-md-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-md-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-md-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-md-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-md-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-md-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-md-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-md-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-md-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-md-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-md-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-md-0{margin-left:0}.offset-md-1{margin-left:8.3333333333%}.offset-md-2{margin-left:16.6666666667%}.offset-md-3{margin-left:25%}.offset-md-4{margin-left:33.3333333333%}.offset-md-5{margin-left:41.6666666667%}.offset-md-6{margin-left:50%}.offset-md-7{margin-left:58.3333333333%}.offset-md-8{margin-left:66.6666666667%}.offset-md-9{margin-left:75%}.offset-md-10{margin-left:83.3333333333%}.offset-md-11{margin-left:91.6666666667%}}@media (min-width: 992px){.col-lg{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-lg-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-lg-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-lg-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-lg-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-lg-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-lg-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-lg-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-lg-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-lg-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-lg-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-lg-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-lg-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-lg-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-lg-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-lg-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-lg-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-lg-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-lg-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-lg-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-lg-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-lg-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-lg-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-lg-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-lg-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-lg-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-lg-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-lg-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-lg-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-lg-0{margin-left:0}.offset-lg-1{margin-left:8.3333333333%}.offset-lg-2{margin-left:16.6666666667%}.offset-lg-3{margin-left:25%}.offset-lg-4{margin-left:33.3333333333%}.offset-lg-5{margin-left:41.6666666667%}.offset-lg-6{margin-left:50%}.offset-lg-7{margin-left:58.3333333333%}.offset-lg-8{margin-left:66.6666666667%}.offset-lg-9{margin-left:75%}.offset-lg-10{margin-left:83.3333333333%}.offset-lg-11{margin-left:91.6666666667%}}@media (min-width: 1200px){.col-xl{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;max-width:100%}.col-xl-auto{-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;width:auto;max-width:none}.col-xl-1{-webkit-box-flex:0;-ms-flex:0 0 8.3333333333%;flex:0 0 8.3333333333%;max-width:8.3333333333%}.col-xl-2{-webkit-box-flex:0;-ms-flex:0 0 16.6666666667%;flex:0 0 16.6666666667%;max-width:16.6666666667%}.col-xl-3{-webkit-box-flex:0;-ms-flex:0 0 25%;flex:0 0 25%;max-width:25%}.col-xl-4{-webkit-box-flex:0;-ms-flex:0 0 33.3333333333%;flex:0 0 33.3333333333%;max-width:33.3333333333%}.col-xl-5{-webkit-box-flex:0;-ms-flex:0 0 41.6666666667%;flex:0 0 41.6666666667%;max-width:41.6666666667%}.col-xl-6{-webkit-box-flex:0;-ms-flex:0 0 50%;flex:0 0 50%;max-width:50%}.col-xl-7{-webkit-box-flex:0;-ms-flex:0 0 58.3333333333%;flex:0 0 58.3333333333%;max-width:58.3333333333%}.col-xl-8{-webkit-box-flex:0;-ms-flex:0 0 66.6666666667%;flex:0 0 66.6666666667%;max-width:66.6666666667%}.col-xl-9{-webkit-box-flex:0;-ms-flex:0 0 75%;flex:0 0 75%;max-width:75%}.col-xl-10{-webkit-box-flex:0;-ms-flex:0 0 83.3333333333%;flex:0 0 83.3333333333%;max-width:83.3333333333%}.col-xl-11{-webkit-box-flex:0;-ms-flex:0 0 91.6666666667%;flex:0 0 91.6666666667%;max-width:91.6666666667%}.col-xl-12{-webkit-box-flex:0;-ms-flex:0 0 100%;flex:0 0 100%;max-width:100%}.order-xl-first{-webkit-box-ordinal-group:0;-ms-flex-order:-1;order:-1}.order-xl-last{-webkit-box-ordinal-group:14;-ms-flex-order:13;order:13}.order-xl-0{-webkit-box-ordinal-group:1;-ms-flex-order:0;order:0}.order-xl-1{-webkit-box-ordinal-group:2;-ms-flex-order:1;order:1}.order-xl-2{-webkit-box-ordinal-group:3;-ms-flex-order:2;order:2}.order-xl-3{-webkit-box-ordinal-group:4;-ms-flex-order:3;order:3}.order-xl-4{-webkit-box-ordinal-group:5;-ms-flex-order:4;order:4}.order-xl-5{-webkit-box-ordinal-group:6;-ms-flex-order:5;order:5}.order-xl-6{-webkit-box-ordinal-group:7;-ms-flex-order:6;order:6}.order-xl-7{-webkit-box-ordinal-group:8;-ms-flex-order:7;order:7}.order-xl-8{-webkit-box-ordinal-group:9;-ms-flex-order:8;order:8}.order-xl-9{-webkit-box-ordinal-group:10;-ms-flex-order:9;order:9}.order-xl-10{-webkit-box-ordinal-group:11;-ms-flex-order:10;order:10}.order-xl-11{-webkit-box-ordinal-group:12;-ms-flex-order:11;order:11}.order-xl-12{-webkit-box-ordinal-group:13;-ms-flex-order:12;order:12}.offset-xl-0{margin-left:0}.offset-xl-1{margin-left:8.3333333333%}.offset-xl-2{margin-left:16.6666666667%}.offset-xl-3{margin-left:25%}.offset-xl-4{margin-left:33.3333333333%}.offset-xl-5{margin-left:41.6666666667%}.offset-xl-6{margin-left:50%}.offset-xl-7{margin-left:58.3333333333%}.offset-xl-8{margin-left:66.6666666667%}.offset-xl-9{margin-left:75%}.offset-xl-10{margin-left:83.3333333333%}.offset-xl-11{margin-left:91.6666666667%}}.table{width:100%;margin-bottom:1rem;background-color:transparent}.table th,.table td{padding:0.75rem;vertical-align:top;border-top:1px solid #dee2e6}.table thead th{vertical-align:bottom;border-bottom:2px solid #dee2e6}.table tbody+tbody{border-top:2px solid #dee2e6}.table .table{background-color:#fff}.table-sm th,.table-sm td{padding:0.3rem}.table-bordered{border:1px solid #dee2e6}.table-bordered th,.table-bordered td{border:1px solid #dee2e6}.table-bordered thead th,.table-bordered thead td{border-bottom-width:2px}.table-borderless th,.table-borderless td,.table-borderless thead th,.table-borderless tbody+tbody{border:0}.table-striped tbody tr:nth-of-type(odd){background-color:rgba(0,0,0,0.05)}.table-hover tbody tr:hover{background-color:rgba(0,0,0,0.075)}.table-primary,.table-primary>th,.table-primary>td{background-color:#f9cfc1}.table-hover .table-primary:hover{background-color:#f7bdaa}.table-hover .table-primary:hover>td,.table-hover .table-primary:hover>th{background-color:#f7bdaa}.table-secondary,.table-secondary>th,.table-secondary>td{background-color:#e8e6e4}.table-hover .table-secondary:hover{background-color:#dcd9d6}.table-hover .table-secondary:hover>td,.table-hover .table-secondary:hover>th{background-color:#dcd9d6}.table-success,.table-success>th,.table-success>td{background-color:#c7eacc}.table-hover .table-success:hover{background-color:#b4e3bb}.table-hover .table-success:hover>td,.table-hover .table-success:hover>th{background-color:#b4e3bb}.table-info,.table-info>th,.table-info>td{background-color:#bee5eb}.table-hover .table-info:hover{background-color:#abdde5}.table-hover .table-info:hover>td,.table-hover .table-info:hover>th{background-color:#abdde5}.table-warning,.table-warning>th,.table-warning>td{background-color:#fbebc9}.table-hover .table-warning:hover{background-color:#f9e2b1}.table-hover .table-warning:hover>td,.table-hover .table-warning:hover>th{background-color:#f9e2b1}.table-danger,.table-danger>th,.table-danger>td{background-color:#f6c7c4}.table-hover .table-danger:hover{background-color:#f3b2ae}.table-hover .table-danger:hover>td,.table-hover .table-danger:hover>th{background-color:#f3b2ae}.table-light,.table-light>th,.table-light>td{background-color:#f9fafb}.table-hover .table-light:hover{background-color:#eaedf1}.table-hover .table-light:hover>td,.table-hover .table-light:hover>th{background-color:#eaedf1}.table-dark,.table-dark>th,.table-dark>td{background-color:#d9c3cf}.table-hover .table-dark:hover{background-color:#cfb3c3}.table-hover .table-dark:hover>td,.table-hover .table-dark:hover>th{background-color:#cfb3c3}.table-active,.table-active>th,.table-active>td{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover{background-color:rgba(0,0,0,0.075)}.table-hover .table-active:hover>td,.table-hover .table-active:hover>th{background-color:rgba(0,0,0,0.075)}.table .thead-dark th{color:#fff;background-color:#772953;border-color:#642246}.table .thead-light th{color:#495057;background-color:#e9ecef;border-color:#dee2e6}.table-dark{color:#fff;background-color:#772953}.table-dark th,.table-dark td,.table-dark thead th{border-color:#642246}.table-dark.table-bordered{border:0}.table-dark.table-striped tbody tr:nth-of-type(odd){background-color:rgba(255,255,255,0.05)}.table-dark.table-hover tbody tr:hover{background-color:rgba(255,255,255,0.075)}@media (max-width: 575.98px){.table-responsive-sm{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-sm>.table-bordered{border:0}}@media (max-width: 767.98px){.table-responsive-md{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-md>.table-bordered{border:0}}@media (max-width: 991.98px){.table-responsive-lg{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-lg>.table-bordered{border:0}}@media (max-width: 1199.98px){.table-responsive-xl{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive-xl>.table-bordered{border:0}}.table-responsive{display:block;width:100%;overflow-x:auto;-webkit-overflow-scrolling:touch;-ms-overflow-style:-ms-autohiding-scrollbar}.table-responsive>.table-bordered{border:0}.form-control{display:block;width:100%;height:calc(2.25rem + 2px);padding:0.375rem 0.75rem;font-size:1rem;line-height:1.5;color:#495057;background-color:#fff;background-clip:padding-box;border:1px solid #ced4da;border-radius:0.25rem;-webkit-transition:border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.form-control{-webkit-transition:none;transition:none}}.form-control::-ms-expand{background-color:transparent;border:0}.form-control:focus{color:#495057;background-color:#fff;border-color:#f4ad94;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25)}.form-control::-webkit-input-placeholder{color:#868e96;opacity:1}.form-control:-ms-input-placeholder{color:#868e96;opacity:1}.form-control::-ms-input-placeholder{color:#868e96;opacity:1}.form-control::placeholder{color:#868e96;opacity:1}.form-control:disabled,.form-control[readonly]{background-color:#e9ecef;opacity:1}select.form-control:focus::-ms-value{color:#495057;background-color:#fff}.form-control-file,.form-control-range{display:block;width:100%}.col-form-label{padding-top:calc(0.375rem + 1px);padding-bottom:calc(0.375rem + 1px);margin-bottom:0;font-size:inherit;line-height:1.5}.col-form-label-lg{padding-top:calc(0.5rem + 1px);padding-bottom:calc(0.5rem + 1px);font-size:1.25rem;line-height:1.5}.col-form-label-sm{padding-top:calc(0.25rem + 1px);padding-bottom:calc(0.25rem + 1px);font-size:0.875rem;line-height:1.5}.form-control-plaintext{display:block;width:100%;padding-top:0.375rem;padding-bottom:0.375rem;margin-bottom:0;line-height:1.5;color:#333;background-color:transparent;border:solid transparent;border-width:1px 0}.form-control-plaintext.form-control-sm,.form-control-plaintext.form-control-lg{padding-right:0;padding-left:0}.form-control-sm{height:calc(1.8125rem + 2px);padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.form-control-lg{height:calc(2.875rem + 2px);padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}select.form-control[size],select.form-control[multiple]{height:auto}textarea.form-control{height:auto}.form-group{margin-bottom:1rem}.form-text{display:block;margin-top:0.25rem}.form-row{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;margin-right:-5px;margin-left:-5px}.form-row>.col,.form-row>[class*="col-"]{padding-right:5px;padding-left:5px}.form-check{position:relative;display:block;padding-left:1.25rem}.form-check-input{position:absolute;margin-top:0.3rem;margin-left:-1.25rem}.form-check-input:disabled ~ .form-check-label{color:#868e96}.form-check-label{margin-bottom:0}.form-check-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding-left:0;margin-right:0.75rem}.form-check-inline .form-check-input{position:static;margin-top:0;margin-right:0.3125rem;margin-left:0}.valid-feedback{display:none;width:100%;margin-top:0.25rem;font-size:80%;color:#38B44A}.valid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:0.25rem 0.5rem;margin-top:.1rem;font-size:0.875rem;line-height:1.5;color:#fff;background-color:rgba(56,180,74,0.9);border-radius:0.25rem}.was-validated .form-control:valid,.form-control.is-valid,.was-validated .custom-select:valid,.custom-select.is-valid{border-color:#38B44A}.was-validated .form-control:valid:focus,.form-control.is-valid:focus,.was-validated .custom-select:valid:focus,.custom-select.is-valid:focus{border-color:#38B44A;-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.25);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.25)}.was-validated .form-control:valid ~ .valid-feedback,.was-validated .form-control:valid ~ .valid-tooltip,.form-control.is-valid ~ .valid-feedback,.form-control.is-valid ~ .valid-tooltip,.was-validated .custom-select:valid ~ .valid-feedback,.was-validated .custom-select:valid ~ .valid-tooltip,.custom-select.is-valid ~ .valid-feedback,.custom-select.is-valid ~ .valid-tooltip{display:block}.was-validated .form-control-file:valid ~ .valid-feedback,.was-validated .form-control-file:valid ~ .valid-tooltip,.form-control-file.is-valid ~ .valid-feedback,.form-control-file.is-valid ~ .valid-tooltip{display:block}.was-validated .form-check-input:valid ~ .form-check-label,.form-check-input.is-valid ~ .form-check-label{color:#38B44A}.was-validated .form-check-input:valid ~ .valid-feedback,.was-validated .form-check-input:valid ~ .valid-tooltip,.form-check-input.is-valid ~ .valid-feedback,.form-check-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid ~ .custom-control-label,.custom-control-input.is-valid ~ .custom-control-label{color:#38B44A}.was-validated .custom-control-input:valid ~ .custom-control-label::before,.custom-control-input.is-valid ~ .custom-control-label::before{background-color:#8fdc9a}.was-validated .custom-control-input:valid ~ .valid-feedback,.was-validated .custom-control-input:valid ~ .valid-tooltip,.custom-control-input.is-valid ~ .valid-feedback,.custom-control-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-control-input:valid:checked ~ .custom-control-label::before,.custom-control-input.is-valid:checked ~ .custom-control-label::before{background-color:#55ca66}.was-validated .custom-control-input:valid:focus ~ .custom-control-label::before,.custom-control-input.is-valid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(56,180,74,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(56,180,74,0.25)}.was-validated .custom-file-input:valid ~ .custom-file-label,.custom-file-input.is-valid ~ .custom-file-label{border-color:#38B44A}.was-validated .custom-file-input:valid ~ .custom-file-label::after,.custom-file-input.is-valid ~ .custom-file-label::after{border-color:inherit}.was-validated .custom-file-input:valid ~ .valid-feedback,.was-validated .custom-file-input:valid ~ .valid-tooltip,.custom-file-input.is-valid ~ .valid-feedback,.custom-file-input.is-valid ~ .valid-tooltip{display:block}.was-validated .custom-file-input:valid:focus ~ .custom-file-label,.custom-file-input.is-valid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.25);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.25)}.invalid-feedback{display:none;width:100%;margin-top:0.25rem;font-size:80%;color:#DF382C}.invalid-tooltip{position:absolute;top:100%;z-index:5;display:none;max-width:100%;padding:0.25rem 0.5rem;margin-top:.1rem;font-size:0.875rem;line-height:1.5;color:#fff;background-color:rgba(223,56,44,0.9);border-radius:0.25rem}.was-validated .form-control:invalid,.form-control.is-invalid,.was-validated .custom-select:invalid,.custom-select.is-invalid{border-color:#DF382C}.was-validated .form-control:invalid:focus,.form-control.is-invalid:focus,.was-validated .custom-select:invalid:focus,.custom-select.is-invalid:focus{border-color:#DF382C;-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.25);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.25)}.was-validated .form-control:invalid ~ .invalid-feedback,.was-validated .form-control:invalid ~ .invalid-tooltip,.form-control.is-invalid ~ .invalid-feedback,.form-control.is-invalid ~ .invalid-tooltip,.was-validated .custom-select:invalid ~ .invalid-feedback,.was-validated .custom-select:invalid ~ .invalid-tooltip,.custom-select.is-invalid ~ .invalid-feedback,.custom-select.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-control-file:invalid ~ .invalid-feedback,.was-validated .form-control-file:invalid ~ .invalid-tooltip,.form-control-file.is-invalid ~ .invalid-feedback,.form-control-file.is-invalid ~ .invalid-tooltip{display:block}.was-validated .form-check-input:invalid ~ .form-check-label,.form-check-input.is-invalid ~ .form-check-label{color:#DF382C}.was-validated .form-check-input:invalid ~ .invalid-feedback,.was-validated .form-check-input:invalid ~ .invalid-tooltip,.form-check-input.is-invalid ~ .invalid-feedback,.form-check-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid ~ .custom-control-label,.custom-control-input.is-invalid ~ .custom-control-label{color:#DF382C}.was-validated .custom-control-input:invalid ~ .custom-control-label::before,.custom-control-input.is-invalid ~ .custom-control-label::before{background-color:#f0a09b}.was-validated .custom-control-input:invalid ~ .invalid-feedback,.was-validated .custom-control-input:invalid ~ .invalid-tooltip,.custom-control-input.is-invalid ~ .invalid-feedback,.custom-control-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-control-input:invalid:checked ~ .custom-control-label::before,.custom-control-input.is-invalid:checked ~ .custom-control-label::before{background-color:#e66258}.was-validated .custom-control-input:invalid:focus ~ .custom-control-label::before,.custom-control-input.is-invalid:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(223,56,44,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(223,56,44,0.25)}.was-validated .custom-file-input:invalid ~ .custom-file-label,.custom-file-input.is-invalid ~ .custom-file-label{border-color:#DF382C}.was-validated .custom-file-input:invalid ~ .custom-file-label::after,.custom-file-input.is-invalid ~ .custom-file-label::after{border-color:inherit}.was-validated .custom-file-input:invalid ~ .invalid-feedback,.was-validated .custom-file-input:invalid ~ .invalid-tooltip,.custom-file-input.is-invalid ~ .invalid-feedback,.custom-file-input.is-invalid ~ .invalid-tooltip{display:block}.was-validated .custom-file-input:invalid:focus ~ .custom-file-label,.custom-file-input.is-invalid:focus ~ .custom-file-label{-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.25);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.25)}.form-inline{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.form-inline .form-check{width:100%}@media (min-width: 576px){.form-inline label{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;margin-bottom:0}.form-inline .form-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:0;-ms-flex:0 0 auto;flex:0 0 auto;-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;margin-bottom:0}.form-inline .form-control{display:inline-block;width:auto;vertical-align:middle}.form-inline .form-control-plaintext{display:inline-block}.form-inline .input-group,.form-inline .custom-select{width:auto}.form-inline .form-check{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:auto;padding-left:0}.form-inline .form-check-input{position:relative;margin-top:0;margin-right:0.25rem;margin-left:0}.form-inline .custom-control{-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.form-inline .custom-control-label{margin-bottom:0}}.btn{display:inline-block;font-weight:400;text-align:center;white-space:nowrap;vertical-align:middle;-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;border:1px solid transparent;padding:0.375rem 0.75rem;font-size:1rem;line-height:1.5;border-radius:0.25rem;-webkit-transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:color 0.15s ease-in-out, background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.btn{-webkit-transition:none;transition:none}}.btn:hover,.btn:focus{text-decoration:none}.btn:focus,.btn.focus{outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25)}.btn.disabled,.btn:disabled{opacity:0.65}.btn:not(:disabled):not(.disabled){cursor:pointer}a.btn.disabled,fieldset:disabled a.btn{pointer-events:none}.btn-primary{color:#fff;background-color:#E95420;border-color:#E95420}.btn-primary:hover{color:#fff;background-color:#ce4414;border-color:#c34113}.btn-primary:focus,.btn-primary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5)}.btn-primary.disabled,.btn-primary:disabled{color:#fff;background-color:#E95420;border-color:#E95420}.btn-primary:not(:disabled):not(.disabled):active,.btn-primary:not(:disabled):not(.disabled).active,.show>.btn-primary.dropdown-toggle{color:#fff;background-color:#c34113;border-color:#b73d12}.btn-primary:not(:disabled):not(.disabled):active:focus,.btn-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5)}.btn-secondary{color:#fff;background-color:#AEA79F;border-color:#AEA79F}.btn-secondary:hover{color:#fff;background-color:#9c948a;border-color:#978e83}.btn-secondary:focus,.btn-secondary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5);box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5)}.btn-secondary.disabled,.btn-secondary:disabled{color:#fff;background-color:#AEA79F;border-color:#AEA79F}.btn-secondary:not(:disabled):not(.disabled):active,.btn-secondary:not(:disabled):not(.disabled).active,.show>.btn-secondary.dropdown-toggle{color:#fff;background-color:#978e83;border-color:#91877c}.btn-secondary:not(:disabled):not(.disabled):active:focus,.btn-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5);box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5)}.btn-success{color:#fff;background-color:#38B44A;border-color:#38B44A}.btn-success:hover{color:#fff;background-color:#2f973e;border-color:#2c8d3a}.btn-success:focus,.btn-success.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5)}.btn-success.disabled,.btn-success:disabled{color:#fff;background-color:#38B44A;border-color:#38B44A}.btn-success:not(:disabled):not(.disabled):active,.btn-success:not(:disabled):not(.disabled).active,.show>.btn-success.dropdown-toggle{color:#fff;background-color:#2c8d3a;border-color:#298336}.btn-success:not(:disabled):not(.disabled):active:focus,.btn-success:not(:disabled):not(.disabled).active:focus,.show>.btn-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5)}.btn-info{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:hover{color:#fff;background-color:#138496;border-color:#117a8b}.btn-info:focus,.btn-info.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5);box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5)}.btn-info.disabled,.btn-info:disabled{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-info:not(:disabled):not(.disabled):active,.btn-info:not(:disabled):not(.disabled).active,.show>.btn-info.dropdown-toggle{color:#fff;background-color:#117a8b;border-color:#10707f}.btn-info:not(:disabled):not(.disabled):active:focus,.btn-info:not(:disabled):not(.disabled).active:focus,.show>.btn-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5);box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5)}.btn-warning{color:#fff;background-color:#EFB73E;border-color:#EFB73E}.btn-warning:hover{color:#fff;background-color:#ecaa1b;border-color:#e7a413}.btn-warning:focus,.btn-warning.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5);box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5)}.btn-warning.disabled,.btn-warning:disabled{color:#fff;background-color:#EFB73E;border-color:#EFB73E}.btn-warning:not(:disabled):not(.disabled):active,.btn-warning:not(:disabled):not(.disabled).active,.show>.btn-warning.dropdown-toggle{color:#fff;background-color:#e7a413;border-color:#db9c12}.btn-warning:not(:disabled):not(.disabled):active:focus,.btn-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5);box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5)}.btn-danger{color:#fff;background-color:#DF382C;border-color:#DF382C}.btn-danger:hover{color:#fff;background-color:#c7291e;border-color:#bc271c}.btn-danger:focus,.btn-danger.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5)}.btn-danger.disabled,.btn-danger:disabled{color:#fff;background-color:#DF382C;border-color:#DF382C}.btn-danger:not(:disabled):not(.disabled):active,.btn-danger:not(:disabled):not(.disabled).active,.show>.btn-danger.dropdown-toggle{color:#fff;background-color:#bc271c;border-color:#b0251b}.btn-danger:not(:disabled):not(.disabled):active:focus,.btn-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5)}.btn-light{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-light:hover{color:#212529;background-color:#d3d9df;border-color:#cbd3da}.btn-light:focus,.btn-light.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-light.disabled,.btn-light:disabled{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-light:not(:disabled):not(.disabled):active,.btn-light:not(:disabled):not(.disabled).active,.show>.btn-light.dropdown-toggle{color:#212529;background-color:#cbd3da;border-color:#c4ccd4}.btn-light:not(:disabled):not(.disabled):active:focus,.btn-light:not(:disabled):not(.disabled).active:focus,.show>.btn-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-dark{color:#fff;background-color:#772953;border-color:#772953}.btn-dark:hover{color:#fff;background-color:#5b1f3f;border-color:#511c39}.btn-dark:focus,.btn-dark.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5);box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5)}.btn-dark.disabled,.btn-dark:disabled{color:#fff;background-color:#772953;border-color:#772953}.btn-dark:not(:disabled):not(.disabled):active,.btn-dark:not(:disabled):not(.disabled).active,.show>.btn-dark.dropdown-toggle{color:#fff;background-color:#511c39;border-color:#481932}.btn-dark:not(:disabled):not(.disabled):active:focus,.btn-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5);box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5)}.btn-outline-primary{color:#E95420;background-color:transparent;background-image:none;border-color:#E95420}.btn-outline-primary:hover{color:#fff;background-color:#E95420;border-color:#E95420}.btn-outline-primary:focus,.btn-outline-primary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5)}.btn-outline-primary.disabled,.btn-outline-primary:disabled{color:#E95420;background-color:transparent}.btn-outline-primary:not(:disabled):not(.disabled):active,.btn-outline-primary:not(:disabled):not(.disabled).active,.show>.btn-outline-primary.dropdown-toggle{color:#fff;background-color:#E95420;border-color:#E95420}.btn-outline-primary:not(:disabled):not(.disabled):active:focus,.btn-outline-primary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-primary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.5)}.btn-outline-secondary{color:#AEA79F;background-color:transparent;background-image:none;border-color:#AEA79F}.btn-outline-secondary:hover{color:#fff;background-color:#AEA79F;border-color:#AEA79F}.btn-outline-secondary:focus,.btn-outline-secondary.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5);box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5)}.btn-outline-secondary.disabled,.btn-outline-secondary:disabled{color:#AEA79F;background-color:transparent}.btn-outline-secondary:not(:disabled):not(.disabled):active,.btn-outline-secondary:not(:disabled):not(.disabled).active,.show>.btn-outline-secondary.dropdown-toggle{color:#fff;background-color:#AEA79F;border-color:#AEA79F}.btn-outline-secondary:not(:disabled):not(.disabled):active:focus,.btn-outline-secondary:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-secondary.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5);box-shadow:0 0 0 0.2rem rgba(174,167,159,0.5)}.btn-outline-success{color:#38B44A;background-color:transparent;background-image:none;border-color:#38B44A}.btn-outline-success:hover{color:#fff;background-color:#38B44A;border-color:#38B44A}.btn-outline-success:focus,.btn-outline-success.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5)}.btn-outline-success.disabled,.btn-outline-success:disabled{color:#38B44A;background-color:transparent}.btn-outline-success:not(:disabled):not(.disabled):active,.btn-outline-success:not(:disabled):not(.disabled).active,.show>.btn-outline-success.dropdown-toggle{color:#fff;background-color:#38B44A;border-color:#38B44A}.btn-outline-success:not(:disabled):not(.disabled):active:focus,.btn-outline-success:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-success.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5);box-shadow:0 0 0 0.2rem rgba(56,180,74,0.5)}.btn-outline-info{color:#17a2b8;background-color:transparent;background-image:none;border-color:#17a2b8}.btn-outline-info:hover{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:focus,.btn-outline-info.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5);box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5)}.btn-outline-info.disabled,.btn-outline-info:disabled{color:#17a2b8;background-color:transparent}.btn-outline-info:not(:disabled):not(.disabled):active,.btn-outline-info:not(:disabled):not(.disabled).active,.show>.btn-outline-info.dropdown-toggle{color:#fff;background-color:#17a2b8;border-color:#17a2b8}.btn-outline-info:not(:disabled):not(.disabled):active:focus,.btn-outline-info:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-info.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5);box-shadow:0 0 0 0.2rem rgba(23,162,184,0.5)}.btn-outline-warning{color:#EFB73E;background-color:transparent;background-image:none;border-color:#EFB73E}.btn-outline-warning:hover{color:#fff;background-color:#EFB73E;border-color:#EFB73E}.btn-outline-warning:focus,.btn-outline-warning.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5);box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5)}.btn-outline-warning.disabled,.btn-outline-warning:disabled{color:#EFB73E;background-color:transparent}.btn-outline-warning:not(:disabled):not(.disabled):active,.btn-outline-warning:not(:disabled):not(.disabled).active,.show>.btn-outline-warning.dropdown-toggle{color:#fff;background-color:#EFB73E;border-color:#EFB73E}.btn-outline-warning:not(:disabled):not(.disabled):active:focus,.btn-outline-warning:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-warning.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5);box-shadow:0 0 0 0.2rem rgba(239,183,62,0.5)}.btn-outline-danger{color:#DF382C;background-color:transparent;background-image:none;border-color:#DF382C}.btn-outline-danger:hover{color:#fff;background-color:#DF382C;border-color:#DF382C}.btn-outline-danger:focus,.btn-outline-danger.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5)}.btn-outline-danger.disabled,.btn-outline-danger:disabled{color:#DF382C;background-color:transparent}.btn-outline-danger:not(:disabled):not(.disabled):active,.btn-outline-danger:not(:disabled):not(.disabled).active,.show>.btn-outline-danger.dropdown-toggle{color:#fff;background-color:#DF382C;border-color:#DF382C}.btn-outline-danger:not(:disabled):not(.disabled):active:focus,.btn-outline-danger:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-danger.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5);box-shadow:0 0 0 0.2rem rgba(223,56,44,0.5)}.btn-outline-light{color:#e9ecef;background-color:transparent;background-image:none;border-color:#e9ecef}.btn-outline-light:hover{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-outline-light:focus,.btn-outline-light.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-outline-light.disabled,.btn-outline-light:disabled{color:#e9ecef;background-color:transparent}.btn-outline-light:not(:disabled):not(.disabled):active,.btn-outline-light:not(:disabled):not(.disabled).active,.show>.btn-outline-light.dropdown-toggle{color:#212529;background-color:#e9ecef;border-color:#e9ecef}.btn-outline-light:not(:disabled):not(.disabled):active:focus,.btn-outline-light:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-light.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5);box-shadow:0 0 0 0.2rem rgba(233,236,239,0.5)}.btn-outline-dark{color:#772953;background-color:transparent;background-image:none;border-color:#772953}.btn-outline-dark:hover{color:#fff;background-color:#772953;border-color:#772953}.btn-outline-dark:focus,.btn-outline-dark.focus{-webkit-box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5);box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5)}.btn-outline-dark.disabled,.btn-outline-dark:disabled{color:#772953;background-color:transparent}.btn-outline-dark:not(:disabled):not(.disabled):active,.btn-outline-dark:not(:disabled):not(.disabled).active,.show>.btn-outline-dark.dropdown-toggle{color:#fff;background-color:#772953;border-color:#772953}.btn-outline-dark:not(:disabled):not(.disabled):active:focus,.btn-outline-dark:not(:disabled):not(.disabled).active:focus,.show>.btn-outline-dark.dropdown-toggle:focus{-webkit-box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5);box-shadow:0 0 0 0.2rem rgba(119,41,83,0.5)}.btn-link{font-weight:400;color:#E95420;background-color:transparent}.btn-link:hover{color:#ac3911;text-decoration:underline;background-color:transparent;border-color:transparent}.btn-link:focus,.btn-link.focus{text-decoration:underline;border-color:transparent;-webkit-box-shadow:none;box-shadow:none}.btn-link:disabled,.btn-link.disabled{color:#868e96;pointer-events:none}.btn-lg,.btn-group-lg>.btn{padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}.btn-sm,.btn-group-sm>.btn{padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.btn-block{display:block;width:100%}.btn-block+.btn-block{margin-top:0.5rem}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.fade{-webkit-transition:opacity 0.15s linear;transition:opacity 0.15s linear}@media screen and (prefers-reduced-motion: reduce){.fade{-webkit-transition:none;transition:none}}.fade:not(.show){opacity:0}.collapse:not(.show){display:none}.collapsing{position:relative;height:0;overflow:hidden;-webkit-transition:height 0.35s ease;transition:height 0.35s ease}@media screen and (prefers-reduced-motion: reduce){.collapsing{-webkit-transition:none;transition:none}}.dropup,.dropright,.dropdown,.dropleft{position:relative}.dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid;border-right:0.3em solid transparent;border-bottom:0;border-left:0.3em solid transparent}.dropdown-toggle:empty::after{margin-left:0}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:10rem;padding:0.5rem 0;margin:0.125rem 0 0;font-size:1rem;color:#333;text-align:left;list-style:none;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.15);border-radius:0.25rem}.dropdown-menu-right{right:0;left:auto}.dropup .dropdown-menu{top:auto;bottom:100%;margin-top:0;margin-bottom:0.125rem}.dropup .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0;border-right:0.3em solid transparent;border-bottom:0.3em solid;border-left:0.3em solid transparent}.dropup .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-menu{top:0;right:auto;left:100%;margin-top:0;margin-left:0.125rem}.dropright .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid transparent;border-right:0;border-bottom:0.3em solid transparent;border-left:0.3em solid}.dropright .dropdown-toggle:empty::after{margin-left:0}.dropright .dropdown-toggle::after{vertical-align:0}.dropleft .dropdown-menu{top:0;right:100%;left:auto;margin-top:0;margin-right:0.125rem}.dropleft .dropdown-toggle::after{display:inline-block;width:0;height:0;margin-left:0.255em;vertical-align:0.255em;content:""}.dropleft .dropdown-toggle::after{display:none}.dropleft .dropdown-toggle::before{display:inline-block;width:0;height:0;margin-right:0.255em;vertical-align:0.255em;content:"";border-top:0.3em solid transparent;border-right:0.3em solid;border-bottom:0.3em solid transparent}.dropleft .dropdown-toggle:empty::after{margin-left:0}.dropleft .dropdown-toggle::before{vertical-align:0}.dropdown-menu[x-placement^="top"],.dropdown-menu[x-placement^="right"],.dropdown-menu[x-placement^="bottom"],.dropdown-menu[x-placement^="left"]{right:auto;bottom:auto}.dropdown-divider{height:0;margin:0.5rem 0;overflow:hidden;border-top:1px solid #e9ecef}.dropdown-item{display:block;width:100%;padding:0.25rem 1.5rem;clear:both;font-weight:400;color:#212529;text-align:inherit;white-space:nowrap;background-color:transparent;border:0}.dropdown-item:hover,.dropdown-item:focus{color:#16181b;text-decoration:none;background-color:#f8f9fa}.dropdown-item.active,.dropdown-item:active{color:#fff;text-decoration:none;background-color:#E95420}.dropdown-item.disabled,.dropdown-item:disabled{color:#868e96;background-color:transparent}.dropdown-menu.show{display:block}.dropdown-header{display:block;padding:0.5rem 1.5rem;margin-bottom:0;font-size:0.875rem;color:#868e96;white-space:nowrap}.dropdown-item-text{display:block;padding:0.25rem 1.5rem;color:#212529}.btn-group,.btn-group-vertical{position:relative;display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;vertical-align:middle}.btn-group>.btn,.btn-group-vertical>.btn{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto}.btn-group>.btn:hover,.btn-group-vertical>.btn:hover{z-index:1}.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active,.btn-group-vertical>.btn:focus,.btn-group-vertical>.btn:active,.btn-group-vertical>.btn.active{z-index:1}.btn-group .btn+.btn,.btn-group .btn+.btn-group,.btn-group .btn-group+.btn,.btn-group .btn-group+.btn-group,.btn-group-vertical .btn+.btn,.btn-group-vertical .btn+.btn-group,.btn-group-vertical .btn-group+.btn,.btn-group-vertical .btn-group+.btn-group{margin-left:-1px}.btn-toolbar{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.btn-toolbar .input-group{width:auto}.btn-group>.btn:first-child{margin-left:0}.btn-group>.btn:not(:last-child):not(.dropdown-toggle),.btn-group>.btn-group:not(:last-child)>.btn{border-top-right-radius:0;border-bottom-right-radius:0}.btn-group>.btn:not(:first-child),.btn-group>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-bottom-left-radius:0}.dropdown-toggle-split{padding-right:0.5625rem;padding-left:0.5625rem}.dropdown-toggle-split::after,.dropup .dropdown-toggle-split::after,.dropright .dropdown-toggle-split::after{margin-left:0}.dropleft .dropdown-toggle-split::before{margin-right:0}.btn-sm+.dropdown-toggle-split,.btn-group-sm>.btn+.dropdown-toggle-split{padding-right:0.375rem;padding-left:0.375rem}.btn-lg+.dropdown-toggle-split,.btn-group-lg>.btn+.dropdown-toggle-split{padding-right:0.75rem;padding-left:0.75rem}.btn-group-vertical{-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center}.btn-group-vertical .btn,.btn-group-vertical .btn-group{width:100%}.btn-group-vertical>.btn+.btn,.btn-group-vertical>.btn+.btn-group,.btn-group-vertical>.btn-group+.btn,.btn-group-vertical>.btn-group+.btn-group{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:not(:last-child):not(.dropdown-toggle),.btn-group-vertical>.btn-group:not(:last-child)>.btn{border-bottom-right-radius:0;border-bottom-left-radius:0}.btn-group-vertical>.btn:not(:first-child),.btn-group-vertical>.btn-group:not(:first-child)>.btn{border-top-left-radius:0;border-top-right-radius:0}.btn-group-toggle>.btn,.btn-group-toggle>.btn-group>.btn{margin-bottom:0}.btn-group-toggle>.btn input[type="radio"],.btn-group-toggle>.btn input[type="checkbox"],.btn-group-toggle>.btn-group>.btn input[type="radio"],.btn-group-toggle>.btn-group>.btn input[type="checkbox"]{position:absolute;clip:rect(0, 0, 0, 0);pointer-events:none}.input-group{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:stretch;-ms-flex-align:stretch;align-items:stretch;width:100%}.input-group>.form-control,.input-group>.custom-select,.input-group>.custom-file{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;width:1%;margin-bottom:0}.input-group>.form-control+.form-control,.input-group>.form-control+.custom-select,.input-group>.form-control+.custom-file,.input-group>.custom-select+.form-control,.input-group>.custom-select+.custom-select,.input-group>.custom-select+.custom-file,.input-group>.custom-file+.form-control,.input-group>.custom-file+.custom-select,.input-group>.custom-file+.custom-file{margin-left:-1px}.input-group>.form-control:focus,.input-group>.custom-select:focus,.input-group>.custom-file .custom-file-input:focus ~ .custom-file-label{z-index:3}.input-group>.custom-file .custom-file-input:focus{z-index:4}.input-group>.form-control:not(:last-child),.input-group>.custom-select:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.form-control:not(:first-child),.input-group>.custom-select:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.input-group>.custom-file{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.input-group>.custom-file:not(:last-child) .custom-file-label,.input-group>.custom-file:not(:last-child) .custom-file-label::after{border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.custom-file:not(:first-child) .custom-file-label{border-top-left-radius:0;border-bottom-left-radius:0}.input-group-prepend,.input-group-append{display:-webkit-box;display:-ms-flexbox;display:flex}.input-group-prepend .btn,.input-group-append .btn{position:relative;z-index:2}.input-group-prepend .btn+.btn,.input-group-prepend .btn+.input-group-text,.input-group-prepend .input-group-text+.input-group-text,.input-group-prepend .input-group-text+.btn,.input-group-append .btn+.btn,.input-group-append .btn+.input-group-text,.input-group-append .input-group-text+.input-group-text,.input-group-append .input-group-text+.btn{margin-left:-1px}.input-group-prepend{margin-right:-1px}.input-group-append{margin-left:-1px}.input-group-text{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;padding:0.375rem 0.75rem;margin-bottom:0;font-size:1rem;font-weight:400;line-height:1.5;color:#495057;text-align:center;white-space:nowrap;background-color:#e9ecef;border:1px solid #ced4da;border-radius:0.25rem}.input-group-text input[type="radio"],.input-group-text input[type="checkbox"]{margin-top:0}.input-group-lg>.form-control,.input-group-lg>.input-group-prepend>.input-group-text,.input-group-lg>.input-group-append>.input-group-text,.input-group-lg>.input-group-prepend>.btn,.input-group-lg>.input-group-append>.btn{height:calc(2.875rem + 2px);padding:0.5rem 1rem;font-size:1.25rem;line-height:1.5;border-radius:0.3rem}.input-group-sm>.form-control,.input-group-sm>.input-group-prepend>.input-group-text,.input-group-sm>.input-group-append>.input-group-text,.input-group-sm>.input-group-prepend>.btn,.input-group-sm>.input-group-append>.btn{height:calc(1.8125rem + 2px);padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5;border-radius:0.2rem}.input-group>.input-group-prepend>.btn,.input-group>.input-group-prepend>.input-group-text,.input-group>.input-group-append:not(:last-child)>.btn,.input-group>.input-group-append:not(:last-child)>.input-group-text,.input-group>.input-group-append:last-child>.btn:not(:last-child):not(.dropdown-toggle),.input-group>.input-group-append:last-child>.input-group-text:not(:last-child){border-top-right-radius:0;border-bottom-right-radius:0}.input-group>.input-group-append>.btn,.input-group>.input-group-append>.input-group-text,.input-group>.input-group-prepend:not(:first-child)>.btn,.input-group>.input-group-prepend:not(:first-child)>.input-group-text,.input-group>.input-group-prepend:first-child>.btn:not(:first-child),.input-group>.input-group-prepend:first-child>.input-group-text:not(:first-child){border-top-left-radius:0;border-bottom-left-radius:0}.custom-control{position:relative;display:block;min-height:1.5rem;padding-left:1.5rem}.custom-control-inline{display:-webkit-inline-box;display:-ms-inline-flexbox;display:inline-flex;margin-right:1rem}.custom-control-input{position:absolute;z-index:-1;opacity:0}.custom-control-input:checked ~ .custom-control-label::before{color:#fff;background-color:#E95420}.custom-control-input:focus ~ .custom-control-label::before{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25)}.custom-control-input:active ~ .custom-control-label::before{color:#fff;background-color:#f9d1c2}.custom-control-input:disabled ~ .custom-control-label{color:#868e96}.custom-control-input:disabled ~ .custom-control-label::before{background-color:#e9ecef}.custom-control-label{position:relative;margin-bottom:0}.custom-control-label::before{position:absolute;top:0.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;pointer-events:none;content:"";-webkit-user-select:none;-moz-user-select:none;-ms-user-select:none;user-select:none;background-color:#dee2e6}.custom-control-label::after{position:absolute;top:0.25rem;left:-1.5rem;display:block;width:1rem;height:1rem;content:"";background-repeat:no-repeat;background-position:center center;background-size:50% 50%}.custom-checkbox .custom-control-label::before{border-radius:0.25rem}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::before{background-color:#E95420}.custom-checkbox .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 8 8'%3E%3Cpath fill='%23fff' d='M6.564.75l-3.59 3.612-1.538-1.55L0 4.26 2.974 7.25 8 2.193z'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::before{background-color:#E95420}.custom-checkbox .custom-control-input:indeterminate ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 4'%3E%3Cpath stroke='%23fff' d='M0 2h4'/%3E%3C/svg%3E")}.custom-checkbox .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(233,84,32,0.5)}.custom-checkbox .custom-control-input:disabled:indeterminate ~ .custom-control-label::before{background-color:rgba(233,84,32,0.5)}.custom-radio .custom-control-label::before{border-radius:50%}.custom-radio .custom-control-input:checked ~ .custom-control-label::before{background-color:#E95420}.custom-radio .custom-control-input:checked ~ .custom-control-label::after{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='-4 -4 8 8'%3E%3Ccircle r='3' fill='%23fff'/%3E%3C/svg%3E")}.custom-radio .custom-control-input:disabled:checked ~ .custom-control-label::before{background-color:rgba(233,84,32,0.5)}.custom-select{display:inline-block;width:100%;height:calc(2.25rem + 2px);padding:0.375rem 1.75rem 0.375rem 0.75rem;line-height:1.5;color:#495057;vertical-align:middle;background:#fff url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' viewBox='0 0 4 5'%3E%3Cpath fill='%23333' d='M2 0L0 2h4zm0 5L0 3h4z'/%3E%3C/svg%3E") no-repeat right 0.75rem center;background-size:8px 10px;border:1px solid #ced4da;border-radius:0.25rem;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-select:focus{border-color:#f4ad94;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(244,173,148,0.5);box-shadow:0 0 0 0.2rem rgba(244,173,148,0.5)}.custom-select:focus::-ms-value{color:#495057;background-color:#fff}.custom-select[multiple],.custom-select[size]:not([size="1"]){height:auto;padding-right:0.75rem;background-image:none}.custom-select:disabled{color:#868e96;background-color:#e9ecef}.custom-select::-ms-expand{opacity:0}.custom-select-sm{height:calc(1.8125rem + 2px);padding-top:0.375rem;padding-bottom:0.375rem;font-size:75%}.custom-select-lg{height:calc(2.875rem + 2px);padding-top:0.375rem;padding-bottom:0.375rem;font-size:125%}.custom-file{position:relative;display:inline-block;width:100%;height:calc(2.25rem + 2px);margin-bottom:0}.custom-file-input{position:relative;z-index:2;width:100%;height:calc(2.25rem + 2px);margin:0;opacity:0}.custom-file-input:focus ~ .custom-file-label{border-color:#f4ad94;-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25)}.custom-file-input:focus ~ .custom-file-label::after{border-color:#f4ad94}.custom-file-input:disabled ~ .custom-file-label{background-color:#e9ecef}.custom-file-input:lang(en) ~ .custom-file-label::after{content:"Browse"}.custom-file-label{position:absolute;top:0;right:0;left:0;z-index:1;height:calc(2.25rem + 2px);padding:0.375rem 0.75rem;line-height:1.5;color:#495057;background-color:#fff;border:1px solid #ced4da;border-radius:0.25rem}.custom-file-label::after{position:absolute;top:0;right:0;bottom:0;z-index:3;display:block;height:2.25rem;padding:0.375rem 0.75rem;line-height:1.5;color:#495057;content:"Browse";background-color:#e9ecef;border-left:1px solid #ced4da;border-radius:0 0.25rem 0.25rem 0}.custom-range{width:100%;padding-left:0;background-color:transparent;-webkit-appearance:none;-moz-appearance:none;appearance:none}.custom-range:focus{outline:none}.custom-range:focus::-webkit-slider-thumb{-webkit-box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25)}.custom-range:focus::-moz-range-thumb{box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25)}.custom-range:focus::-ms-thumb{box-shadow:0 0 0 1px #fff,0 0 0 0.2rem rgba(233,84,32,0.25)}.custom-range::-moz-focus-outer{border:0}.custom-range::-webkit-slider-thumb{width:1rem;height:1rem;margin-top:-0.25rem;background-color:#E95420;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;-webkit-appearance:none;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-webkit-slider-thumb{-webkit-transition:none;transition:none}}.custom-range::-webkit-slider-thumb:active{background-color:#f9d1c2}.custom-range::-webkit-slider-runnable-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-moz-range-thumb{width:1rem;height:1rem;background-color:#E95420;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;-moz-appearance:none;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-moz-range-thumb{-webkit-transition:none;transition:none}}.custom-range::-moz-range-thumb:active{background-color:#f9d1c2}.custom-range::-moz-range-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:#dee2e6;border-color:transparent;border-radius:1rem}.custom-range::-ms-thumb{width:1rem;height:1rem;margin-top:0;margin-right:0.2rem;margin-left:0.2rem;background-color:#E95420;border:0;border-radius:1rem;-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;appearance:none}@media screen and (prefers-reduced-motion: reduce){.custom-range::-ms-thumb{-webkit-transition:none;transition:none}}.custom-range::-ms-thumb:active{background-color:#f9d1c2}.custom-range::-ms-track{width:100%;height:0.5rem;color:transparent;cursor:pointer;background-color:transparent;border-color:transparent;border-width:0.5rem}.custom-range::-ms-fill-lower{background-color:#dee2e6;border-radius:1rem}.custom-range::-ms-fill-upper{margin-right:15px;background-color:#dee2e6;border-radius:1rem}.custom-control-label::before,.custom-file-label,.custom-select{-webkit-transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out;transition:background-color 0.15s ease-in-out, border-color 0.15s ease-in-out, box-shadow 0.15s ease-in-out, -webkit-box-shadow 0.15s ease-in-out}@media screen and (prefers-reduced-motion: reduce){.custom-control-label::before,.custom-file-label,.custom-select{-webkit-transition:none;transition:none}}.nav{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding-left:0;margin-bottom:0;list-style:none}.nav-link{display:block;padding:0.5rem 1rem}.nav-link:hover,.nav-link:focus{text-decoration:none}.nav-link.disabled{color:#868e96}.nav-tabs{border-bottom:1px solid #dee2e6}.nav-tabs .nav-item{margin-bottom:-1px}.nav-tabs .nav-link{border:1px solid transparent;border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.nav-tabs .nav-link:hover,.nav-tabs .nav-link:focus{border-color:#e9ecef #e9ecef #dee2e6}.nav-tabs .nav-link.disabled{color:#868e96;background-color:transparent;border-color:transparent}.nav-tabs .nav-link.active,.nav-tabs .nav-item.show .nav-link{color:#495057;background-color:#fff;border-color:#dee2e6 #dee2e6 #fff}.nav-tabs .dropdown-menu{margin-top:-1px;border-top-left-radius:0;border-top-right-radius:0}.nav-pills .nav-link{border-radius:0.25rem}.nav-pills .nav-link.active,.nav-pills .show>.nav-link{color:#fff;background-color:#E95420}.nav-fill .nav-item{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;text-align:center}.nav-justified .nav-item{-ms-flex-preferred-size:0;flex-basis:0;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;text-align:center}.tab-content>.tab-pane{display:none}.tab-content>.active{display:block}.navbar{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:0.5rem 1rem}.navbar>.container,.navbar>.container-fluid{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between}.navbar-brand{display:inline-block;padding-top:0.3125rem;padding-bottom:0.3125rem;margin-right:1rem;font-size:1.25rem;line-height:inherit;white-space:nowrap}.navbar-brand:hover,.navbar-brand:focus{text-decoration:none}.navbar-nav{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0;list-style:none}.navbar-nav .nav-link{padding-right:0;padding-left:0}.navbar-nav .dropdown-menu{position:static;float:none}.navbar-text{display:inline-block;padding-top:0.5rem;padding-bottom:0.5rem}.navbar-collapse{-ms-flex-preferred-size:100%;flex-basis:100%;-webkit-box-flex:1;-ms-flex-positive:1;flex-grow:1;-webkit-box-align:center;-ms-flex-align:center;align-items:center}.navbar-toggler{padding:0.25rem 0.75rem;font-size:1.25rem;line-height:1;background-color:transparent;border:1px solid transparent;border-radius:0.25rem}.navbar-toggler:hover,.navbar-toggler:focus{text-decoration:none}.navbar-toggler:not(:disabled):not(.disabled){cursor:pointer}.navbar-toggler-icon{display:inline-block;width:1.5em;height:1.5em;vertical-align:middle;content:"";background:no-repeat center center;background-size:100% 100%}@media (max-width: 575.98px){.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 576px){.navbar-expand-sm{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-sm .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-sm .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-sm .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-sm>.container,.navbar-expand-sm>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-sm .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-sm .navbar-toggler{display:none}}@media (max-width: 767.98px){.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 768px){.navbar-expand-md{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-md .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-md .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-md .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-md>.container,.navbar-expand-md>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-md .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-md .navbar-toggler{display:none}}@media (max-width: 991.98px){.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 992px){.navbar-expand-lg{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-lg .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-lg .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-lg .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-lg>.container,.navbar-expand-lg>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-lg .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-lg .navbar-toggler{display:none}}@media (max-width: 1199.98px){.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{padding-right:0;padding-left:0}}@media (min-width: 1200px){.navbar-expand-xl{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand-xl .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand-xl .navbar-nav .dropdown-menu{position:absolute}.navbar-expand-xl .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand-xl>.container,.navbar-expand-xl>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand-xl .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand-xl .navbar-toggler{display:none}}.navbar-expand{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row nowrap;flex-flow:row nowrap;-webkit-box-pack:start;-ms-flex-pack:start;justify-content:flex-start}.navbar-expand>.container,.navbar-expand>.container-fluid{padding-right:0;padding-left:0}.navbar-expand .navbar-nav{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-direction:row;flex-direction:row}.navbar-expand .navbar-nav .dropdown-menu{position:absolute}.navbar-expand .navbar-nav .nav-link{padding-right:0.5rem;padding-left:0.5rem}.navbar-expand>.container,.navbar-expand>.container-fluid{-ms-flex-wrap:nowrap;flex-wrap:nowrap}.navbar-expand .navbar-collapse{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important;-ms-flex-preferred-size:auto;flex-basis:auto}.navbar-expand .navbar-toggler{display:none}.navbar-light .navbar-brand{color:rgba(0,0,0,0.9)}.navbar-light .navbar-brand:hover,.navbar-light .navbar-brand:focus{color:rgba(0,0,0,0.9)}.navbar-light .navbar-nav .nav-link{color:rgba(0,0,0,0.5)}.navbar-light .navbar-nav .nav-link:hover,.navbar-light .navbar-nav .nav-link:focus{color:rgba(0,0,0,0.7)}.navbar-light .navbar-nav .nav-link.disabled{color:rgba(0,0,0,0.3)}.navbar-light .navbar-nav .show>.nav-link,.navbar-light .navbar-nav .active>.nav-link,.navbar-light .navbar-nav .nav-link.show,.navbar-light .navbar-nav .nav-link.active{color:rgba(0,0,0,0.9)}.navbar-light .navbar-toggler{color:rgba(0,0,0,0.5);border-color:rgba(0,0,0,0.1)}.navbar-light .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(0, 0, 0, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-light .navbar-text{color:rgba(0,0,0,0.5)}.navbar-light .navbar-text a{color:rgba(0,0,0,0.9)}.navbar-light .navbar-text a:hover,.navbar-light .navbar-text a:focus{color:rgba(0,0,0,0.9)}.navbar-dark .navbar-brand{color:#fff}.navbar-dark .navbar-brand:hover,.navbar-dark .navbar-brand:focus{color:#fff}.navbar-dark .navbar-nav .nav-link{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-nav .nav-link:hover,.navbar-dark .navbar-nav .nav-link:focus{color:rgba(255,255,255,0.75)}.navbar-dark .navbar-nav .nav-link.disabled{color:rgba(255,255,255,0.25)}.navbar-dark .navbar-nav .show>.nav-link,.navbar-dark .navbar-nav .active>.nav-link,.navbar-dark .navbar-nav .nav-link.show,.navbar-dark .navbar-nav .nav-link.active{color:#fff}.navbar-dark .navbar-toggler{color:rgba(255,255,255,0.5);border-color:rgba(255,255,255,0.1)}.navbar-dark .navbar-toggler-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg viewBox='0 0 30 30' xmlns='http://www.w3.org/2000/svg'%3E%3Cpath stroke='rgba(255, 255, 255, 0.5)' stroke-width='2' stroke-linecap='round' stroke-miterlimit='10' d='M4 7h22M4 15h22M4 23h22'/%3E%3C/svg%3E")}.navbar-dark .navbar-text{color:rgba(255,255,255,0.5)}.navbar-dark .navbar-text a{color:#fff}.navbar-dark .navbar-text a:hover,.navbar-dark .navbar-text a:focus{color:#fff}.card{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;min-width:0;word-wrap:break-word;background-color:#fff;background-clip:border-box;border:1px solid rgba(0,0,0,0.125);border-radius:0.25rem}.card>hr{margin-right:0;margin-left:0}.card>.list-group:first-child .list-group-item:first-child{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.card>.list-group:last-child .list-group-item:last-child{border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.card-body{-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1.25rem}.card-title{margin-bottom:0.75rem}.card-subtitle{margin-top:-0.375rem;margin-bottom:0}.card-text:last-child{margin-bottom:0}.card-link:hover{text-decoration:none}.card-link+.card-link{margin-left:1.25rem}.card-header{padding:0.75rem 1.25rem;margin-bottom:0;background-color:rgba(0,0,0,0.03);border-bottom:1px solid rgba(0,0,0,0.125)}.card-header:first-child{border-radius:calc(0.25rem - 1px) calc(0.25rem - 1px) 0 0}.card-header+.list-group .list-group-item:first-child{border-top:0}.card-footer{padding:0.75rem 1.25rem;background-color:rgba(0,0,0,0.03);border-top:1px solid rgba(0,0,0,0.125)}.card-footer:last-child{border-radius:0 0 calc(0.25rem - 1px) calc(0.25rem - 1px)}.card-header-tabs{margin-right:-0.625rem;margin-bottom:-0.75rem;margin-left:-0.625rem;border-bottom:0}.card-header-pills{margin-right:-0.625rem;margin-left:-0.625rem}.card-img-overlay{position:absolute;top:0;right:0;bottom:0;left:0;padding:1.25rem}.card-img{width:100%;border-radius:calc(0.25rem - 1px)}.card-img-top{width:100%;border-top-left-radius:calc(0.25rem - 1px);border-top-right-radius:calc(0.25rem - 1px)}.card-img-bottom{width:100%;border-bottom-right-radius:calc(0.25rem - 1px);border-bottom-left-radius:calc(0.25rem - 1px)}.card-deck{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-deck .card{margin-bottom:15px}@media (min-width: 576px){.card-deck{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap;margin-right:-15px;margin-left:-15px}.card-deck .card{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;margin-right:15px;margin-bottom:0;margin-left:15px}}.card-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column}.card-group>.card{margin-bottom:15px}@media (min-width: 576px){.card-group{-webkit-box-orient:horizontal;-webkit-box-direction:normal;-ms-flex-flow:row wrap;flex-flow:row wrap}.card-group>.card{-webkit-box-flex:1;-ms-flex:1 0 0%;flex:1 0 0%;margin-bottom:0}.card-group>.card+.card{margin-left:0;border-left:0}.card-group>.card:first-child{border-top-right-radius:0;border-bottom-right-radius:0}.card-group>.card:first-child .card-img-top,.card-group>.card:first-child .card-header{border-top-right-radius:0}.card-group>.card:first-child .card-img-bottom,.card-group>.card:first-child .card-footer{border-bottom-right-radius:0}.card-group>.card:last-child{border-top-left-radius:0;border-bottom-left-radius:0}.card-group>.card:last-child .card-img-top,.card-group>.card:last-child .card-header{border-top-left-radius:0}.card-group>.card:last-child .card-img-bottom,.card-group>.card:last-child .card-footer{border-bottom-left-radius:0}.card-group>.card:only-child{border-radius:0.25rem}.card-group>.card:only-child .card-img-top,.card-group>.card:only-child .card-header{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.card-group>.card:only-child .card-img-bottom,.card-group>.card:only-child .card-footer{border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.card-group>.card:not(:first-child):not(:last-child):not(:only-child){border-radius:0}.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-top,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-img-bottom,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-header,.card-group>.card:not(:first-child):not(:last-child):not(:only-child) .card-footer{border-radius:0}}.card-columns .card{margin-bottom:0.75rem}@media (min-width: 576px){.card-columns{-webkit-column-count:3;column-count:3;-webkit-column-gap:1.25rem;column-gap:1.25rem;orphans:1;widows:1}.card-columns .card{display:inline-block;width:100%}}.accordion .card:not(:first-of-type):not(:last-of-type){border-bottom:0;border-radius:0}.accordion .card:not(:first-of-type) .card-header:first-child{border-radius:0}.accordion .card:first-of-type{border-bottom:0;border-bottom-right-radius:0;border-bottom-left-radius:0}.accordion .card:last-of-type{border-top-left-radius:0;border-top-right-radius:0}.breadcrumb{display:-webkit-box;display:-ms-flexbox;display:flex;-ms-flex-wrap:wrap;flex-wrap:wrap;padding:0.75rem 1rem;margin-bottom:1rem;list-style:none;background-color:#e9ecef;border-radius:0.25rem}.breadcrumb-item+.breadcrumb-item{padding-left:0.5rem}.breadcrumb-item+.breadcrumb-item::before{display:inline-block;padding-right:0.5rem;color:#868e96;content:"/"}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:underline}.breadcrumb-item+.breadcrumb-item:hover::before{text-decoration:none}.breadcrumb-item.active{color:#868e96}.pagination{display:-webkit-box;display:-ms-flexbox;display:flex;padding-left:0;list-style:none;border-radius:0.25rem}.page-link{position:relative;display:block;padding:0.5rem 0.75rem;margin-left:-1px;line-height:1.25;color:#E95420;background-color:#fff;border:1px solid #dee2e6}.page-link:hover{z-index:2;color:#ac3911;text-decoration:none;background-color:#e9ecef;border-color:#dee2e6}.page-link:focus{z-index:2;outline:0;-webkit-box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25);box-shadow:0 0 0 0.2rem rgba(233,84,32,0.25)}.page-link:not(:disabled):not(.disabled){cursor:pointer}.page-item:first-child .page-link{margin-left:0;border-top-left-radius:0.25rem;border-bottom-left-radius:0.25rem}.page-item:last-child .page-link{border-top-right-radius:0.25rem;border-bottom-right-radius:0.25rem}.page-item.active .page-link{z-index:1;color:#fff;background-color:#E95420;border-color:#E95420}.page-item.disabled .page-link{color:#868e96;pointer-events:none;cursor:auto;background-color:#fff;border-color:#dee2e6}.pagination-lg .page-link{padding:0.75rem 1.5rem;font-size:1.25rem;line-height:1.5}.pagination-lg .page-item:first-child .page-link{border-top-left-radius:0.3rem;border-bottom-left-radius:0.3rem}.pagination-lg .page-item:last-child .page-link{border-top-right-radius:0.3rem;border-bottom-right-radius:0.3rem}.pagination-sm .page-link{padding:0.25rem 0.5rem;font-size:0.875rem;line-height:1.5}.pagination-sm .page-item:first-child .page-link{border-top-left-radius:0.2rem;border-bottom-left-radius:0.2rem}.pagination-sm .page-item:last-child .page-link{border-top-right-radius:0.2rem;border-bottom-right-radius:0.2rem}.badge{display:inline-block;padding:0.25em 0.4em;font-size:75%;font-weight:700;line-height:1;text-align:center;white-space:nowrap;vertical-align:baseline;border-radius:0.25rem}.badge:empty{display:none}.btn .badge{position:relative;top:-1px}.badge-pill{padding-right:0.6em;padding-left:0.6em;border-radius:10rem}.badge-primary{color:#fff;background-color:#E95420}.badge-primary[href]:hover,.badge-primary[href]:focus{color:#fff;text-decoration:none;background-color:#c34113}.badge-secondary{color:#fff;background-color:#AEA79F}.badge-secondary[href]:hover,.badge-secondary[href]:focus{color:#fff;text-decoration:none;background-color:#978e83}.badge-success{color:#fff;background-color:#38B44A}.badge-success[href]:hover,.badge-success[href]:focus{color:#fff;text-decoration:none;background-color:#2c8d3a}.badge-info{color:#fff;background-color:#17a2b8}.badge-info[href]:hover,.badge-info[href]:focus{color:#fff;text-decoration:none;background-color:#117a8b}.badge-warning{color:#fff;background-color:#EFB73E}.badge-warning[href]:hover,.badge-warning[href]:focus{color:#fff;text-decoration:none;background-color:#e7a413}.badge-danger{color:#fff;background-color:#DF382C}.badge-danger[href]:hover,.badge-danger[href]:focus{color:#fff;text-decoration:none;background-color:#bc271c}.badge-light{color:#212529;background-color:#e9ecef}.badge-light[href]:hover,.badge-light[href]:focus{color:#212529;text-decoration:none;background-color:#cbd3da}.badge-dark{color:#fff;background-color:#772953}.badge-dark[href]:hover,.badge-dark[href]:focus{color:#fff;text-decoration:none;background-color:#511c39}.jumbotron{padding:2rem 1rem;margin-bottom:2rem;background-color:#e9ecef;border-radius:0.3rem}@media (min-width: 576px){.jumbotron{padding:4rem 2rem}}.jumbotron-fluid{padding-right:0;padding-left:0;border-radius:0}.alert{position:relative;padding:0.75rem 1.25rem;margin-bottom:1rem;border:1px solid transparent;border-radius:0.25rem}.alert-heading{color:inherit}.alert-link{font-weight:700}.alert-dismissible{padding-right:4rem}.alert-dismissible .close{position:absolute;top:0;right:0;padding:0.75rem 1.25rem;color:inherit}.alert-primary{color:#792c11;background-color:#fbddd2;border-color:#f9cfc1}.alert-primary hr{border-top-color:#f7bdaa}.alert-primary .alert-link{color:#4c1c0b}.alert-secondary{color:#5a5753;background-color:#efedec;border-color:#e8e6e4}.alert-secondary hr{border-top-color:#dcd9d6}.alert-secondary .alert-link{color:#3f3d3b}.alert-success{color:#1d5e26;background-color:#d7f0db;border-color:#c7eacc}.alert-success hr{border-top-color:#b4e3bb}.alert-success .alert-link{color:#113716}.alert-info{color:#0c5460;background-color:#d1ecf1;border-color:#bee5eb}.alert-info hr{border-top-color:#abdde5}.alert-info .alert-link{color:#062c33}.alert-warning{color:#7c5f20;background-color:#fcf1d8;border-color:#fbebc9}.alert-warning hr{border-top-color:#f9e2b1}.alert-warning .alert-link{color:#534016}.alert-danger{color:#741d17;background-color:#f9d7d5;border-color:#f6c7c4}.alert-danger hr{border-top-color:#f3b2ae}.alert-danger .alert-link{color:#49120f}.alert-light{color:#797b7c;background-color:#fbfbfc;border-color:#f9fafb}.alert-light hr{border-top-color:#eaedf1}.alert-light .alert-link{color:#606162}.alert-dark{color:#3e152b;background-color:#e4d4dd;border-color:#d9c3cf}.alert-dark hr{border-top-color:#cfb3c3}.alert-dark .alert-link{color:#180811}@-webkit-keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}@keyframes progress-bar-stripes{from{background-position:1rem 0}to{background-position:0 0}}.progress{display:-webkit-box;display:-ms-flexbox;display:flex;height:1rem;overflow:hidden;font-size:0.75rem;background-color:#e9ecef;border-radius:0.25rem}.progress-bar{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;color:#fff;text-align:center;white-space:nowrap;background-color:#E95420;-webkit-transition:width 0.6s ease;transition:width 0.6s ease}@media screen and (prefers-reduced-motion: reduce){.progress-bar{-webkit-transition:none;transition:none}}.progress-bar-striped{background-image:linear-gradient(45deg, rgba(255,255,255,0.15) 25%, transparent 25%, transparent 50%, rgba(255,255,255,0.15) 50%, rgba(255,255,255,0.15) 75%, transparent 75%, transparent);background-size:1rem 1rem}.progress-bar-animated{-webkit-animation:progress-bar-stripes 1s linear infinite;animation:progress-bar-stripes 1s linear infinite}.media{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start}.media-body{-webkit-box-flex:1;-ms-flex:1;flex:1}.list-group{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;padding-left:0;margin-bottom:0}.list-group-item-action{width:100%;color:#495057;text-align:inherit}.list-group-item-action:hover,.list-group-item-action:focus{color:#495057;text-decoration:none;background-color:#f8f9fa}.list-group-item-action:active{color:#333;background-color:#e9ecef}.list-group-item{position:relative;display:block;padding:0.75rem 1.25rem;margin-bottom:-1px;background-color:#fff;border:1px solid rgba(0,0,0,0.125)}.list-group-item:first-child{border-top-left-radius:0.25rem;border-top-right-radius:0.25rem}.list-group-item:last-child{margin-bottom:0;border-bottom-right-radius:0.25rem;border-bottom-left-radius:0.25rem}.list-group-item:hover,.list-group-item:focus{z-index:1;text-decoration:none}.list-group-item.disabled,.list-group-item:disabled{color:#868e96;background-color:#fff}.list-group-item.active{z-index:2;color:#fff;background-color:#E95420;border-color:#E95420}.list-group-flush .list-group-item{border-right:0;border-left:0;border-radius:0}.list-group-flush:first-child .list-group-item:first-child{border-top:0}.list-group-flush:last-child .list-group-item:last-child{border-bottom:0}.list-group-item-primary{color:#792c11;background-color:#f9cfc1}.list-group-item-primary.list-group-item-action:hover,.list-group-item-primary.list-group-item-action:focus{color:#792c11;background-color:#f7bdaa}.list-group-item-primary.list-group-item-action.active{color:#fff;background-color:#792c11;border-color:#792c11}.list-group-item-secondary{color:#5a5753;background-color:#e8e6e4}.list-group-item-secondary.list-group-item-action:hover,.list-group-item-secondary.list-group-item-action:focus{color:#5a5753;background-color:#dcd9d6}.list-group-item-secondary.list-group-item-action.active{color:#fff;background-color:#5a5753;border-color:#5a5753}.list-group-item-success{color:#1d5e26;background-color:#c7eacc}.list-group-item-success.list-group-item-action:hover,.list-group-item-success.list-group-item-action:focus{color:#1d5e26;background-color:#b4e3bb}.list-group-item-success.list-group-item-action.active{color:#fff;background-color:#1d5e26;border-color:#1d5e26}.list-group-item-info{color:#0c5460;background-color:#bee5eb}.list-group-item-info.list-group-item-action:hover,.list-group-item-info.list-group-item-action:focus{color:#0c5460;background-color:#abdde5}.list-group-item-info.list-group-item-action.active{color:#fff;background-color:#0c5460;border-color:#0c5460}.list-group-item-warning{color:#7c5f20;background-color:#fbebc9}.list-group-item-warning.list-group-item-action:hover,.list-group-item-warning.list-group-item-action:focus{color:#7c5f20;background-color:#f9e2b1}.list-group-item-warning.list-group-item-action.active{color:#fff;background-color:#7c5f20;border-color:#7c5f20}.list-group-item-danger{color:#741d17;background-color:#f6c7c4}.list-group-item-danger.list-group-item-action:hover,.list-group-item-danger.list-group-item-action:focus{color:#741d17;background-color:#f3b2ae}.list-group-item-danger.list-group-item-action.active{color:#fff;background-color:#741d17;border-color:#741d17}.list-group-item-light{color:#797b7c;background-color:#f9fafb}.list-group-item-light.list-group-item-action:hover,.list-group-item-light.list-group-item-action:focus{color:#797b7c;background-color:#eaedf1}.list-group-item-light.list-group-item-action.active{color:#fff;background-color:#797b7c;border-color:#797b7c}.list-group-item-dark{color:#3e152b;background-color:#d9c3cf}.list-group-item-dark.list-group-item-action:hover,.list-group-item-dark.list-group-item-action:focus{color:#3e152b;background-color:#cfb3c3}.list-group-item-dark.list-group-item-action.active{color:#fff;background-color:#3e152b;border-color:#3e152b}.close{float:right;font-size:1.5rem;font-weight:700;line-height:1;color:#000;text-shadow:0 1px 0 #fff;opacity:.5}.close:not(:disabled):not(.disabled){cursor:pointer}.close:not(:disabled):not(.disabled):hover,.close:not(:disabled):not(.disabled):focus{color:#000;text-decoration:none;opacity:.75}button.close{padding:0;background-color:transparent;border:0;-webkit-appearance:none}.modal-open{overflow:hidden}.modal-open .modal{overflow-x:hidden;overflow-y:auto}.modal{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1050;display:none;overflow:hidden;outline:0}.modal-dialog{position:relative;width:auto;margin:0.5rem;pointer-events:none}.modal.fade .modal-dialog{-webkit-transition:-webkit-transform 0.3s ease-out;transition:-webkit-transform 0.3s ease-out;transition:transform 0.3s ease-out;transition:transform 0.3s ease-out, -webkit-transform 0.3s ease-out;-webkit-transform:translate(0, -25%);transform:translate(0, -25%)}@media screen and (prefers-reduced-motion: reduce){.modal.fade .modal-dialog{-webkit-transition:none;transition:none}}.modal.show .modal-dialog{-webkit-transform:translate(0, 0);transform:translate(0, 0)}.modal-dialog-centered{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;min-height:calc(100% - (0.5rem * 2))}.modal-dialog-centered::before{display:block;height:calc(100vh - (0.5rem * 2));content:""}.modal-content{position:relative;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-orient:vertical;-webkit-box-direction:normal;-ms-flex-direction:column;flex-direction:column;width:100%;pointer-events:auto;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:0.3rem;outline:0}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop.show{opacity:0.5}.modal-header{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:start;-ms-flex-align:start;align-items:flex-start;-webkit-box-pack:justify;-ms-flex-pack:justify;justify-content:space-between;padding:1rem;border-bottom:1px solid #e9ecef;border-top-left-radius:0.3rem;border-top-right-radius:0.3rem}.modal-header .close{padding:1rem;margin:-1rem -1rem -1rem auto}.modal-title{margin-bottom:0;line-height:1.5}.modal-body{position:relative;-webkit-box-flex:1;-ms-flex:1 1 auto;flex:1 1 auto;padding:1rem}.modal-footer{display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:end;-ms-flex-pack:end;justify-content:flex-end;padding:1rem;border-top:1px solid #e9ecef}.modal-footer>:not(:first-child){margin-left:.25rem}.modal-footer>:not(:last-child){margin-right:.25rem}.modal-scrollbar-measure{position:absolute;top:-9999px;width:50px;height:50px;overflow:scroll}@media (min-width: 576px){.modal-dialog{max-width:500px;margin:1.75rem auto}.modal-dialog-centered{min-height:calc(100% - (1.75rem * 2))}.modal-dialog-centered::before{height:calc(100vh - (1.75rem * 2))}.modal-sm{max-width:300px}}@media (min-width: 992px){.modal-lg{max-width:800px}}.tooltip{position:absolute;z-index:1070;display:block;margin:0;font-family:"Ubuntu", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:0.875rem;word-wrap:break-word;opacity:0}.tooltip.show{opacity:0.9}.tooltip .arrow{position:absolute;display:block;width:0.8rem;height:0.4rem}.tooltip .arrow::before{position:absolute;content:"";border-color:transparent;border-style:solid}.bs-tooltip-top,.bs-tooltip-auto[x-placement^="top"]{padding:0.4rem 0}.bs-tooltip-top .arrow,.bs-tooltip-auto[x-placement^="top"] .arrow{bottom:0}.bs-tooltip-top .arrow::before,.bs-tooltip-auto[x-placement^="top"] .arrow::before{top:0;border-width:0.4rem 0.4rem 0;border-top-color:#000}.bs-tooltip-right,.bs-tooltip-auto[x-placement^="right"]{padding:0 0.4rem}.bs-tooltip-right .arrow,.bs-tooltip-auto[x-placement^="right"] .arrow{left:0;width:0.4rem;height:0.8rem}.bs-tooltip-right .arrow::before,.bs-tooltip-auto[x-placement^="right"] .arrow::before{right:0;border-width:0.4rem 0.4rem 0.4rem 0;border-right-color:#000}.bs-tooltip-bottom,.bs-tooltip-auto[x-placement^="bottom"]{padding:0.4rem 0}.bs-tooltip-bottom .arrow,.bs-tooltip-auto[x-placement^="bottom"] .arrow{top:0}.bs-tooltip-bottom .arrow::before,.bs-tooltip-auto[x-placement^="bottom"] .arrow::before{bottom:0;border-width:0 0.4rem 0.4rem;border-bottom-color:#000}.bs-tooltip-left,.bs-tooltip-auto[x-placement^="left"]{padding:0 0.4rem}.bs-tooltip-left .arrow,.bs-tooltip-auto[x-placement^="left"] .arrow{right:0;width:0.4rem;height:0.8rem}.bs-tooltip-left .arrow::before,.bs-tooltip-auto[x-placement^="left"] .arrow::before{left:0;border-width:0.4rem 0 0.4rem 0.4rem;border-left-color:#000}.tooltip-inner{max-width:200px;padding:0.25rem 0.5rem;color:#fff;text-align:center;background-color:#000;border-radius:0.25rem}.popover{position:absolute;top:0;left:0;z-index:1060;display:block;max-width:276px;font-family:"Ubuntu", -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol";font-style:normal;font-weight:400;line-height:1.5;text-align:left;text-align:start;text-decoration:none;text-shadow:none;text-transform:none;letter-spacing:normal;word-break:normal;word-spacing:normal;white-space:normal;line-break:auto;font-size:0.875rem;word-wrap:break-word;background-color:#fff;background-clip:padding-box;border:1px solid rgba(0,0,0,0.2);border-radius:0.3rem}.popover .arrow{position:absolute;display:block;width:1rem;height:0.5rem;margin:0 0.3rem}.popover .arrow::before,.popover .arrow::after{position:absolute;display:block;content:"";border-color:transparent;border-style:solid}.bs-popover-top,.bs-popover-auto[x-placement^="top"]{margin-bottom:0.5rem}.bs-popover-top .arrow,.bs-popover-auto[x-placement^="top"] .arrow{bottom:calc((0.5rem + 1px) * -1)}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before,.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{border-width:0.5rem 0.5rem 0}.bs-popover-top .arrow::before,.bs-popover-auto[x-placement^="top"] .arrow::before{bottom:0;border-top-color:rgba(0,0,0,0.25)}.bs-popover-top .arrow::after,.bs-popover-auto[x-placement^="top"] .arrow::after{bottom:1px;border-top-color:#fff}.bs-popover-right,.bs-popover-auto[x-placement^="right"]{margin-left:0.5rem}.bs-popover-right .arrow,.bs-popover-auto[x-placement^="right"] .arrow{left:calc((0.5rem + 1px) * -1);width:0.5rem;height:1rem;margin:0.3rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before,.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{border-width:0.5rem 0.5rem 0.5rem 0}.bs-popover-right .arrow::before,.bs-popover-auto[x-placement^="right"] .arrow::before{left:0;border-right-color:rgba(0,0,0,0.25)}.bs-popover-right .arrow::after,.bs-popover-auto[x-placement^="right"] .arrow::after{left:1px;border-right-color:#fff}.bs-popover-bottom,.bs-popover-auto[x-placement^="bottom"]{margin-top:0.5rem}.bs-popover-bottom .arrow,.bs-popover-auto[x-placement^="bottom"] .arrow{top:calc((0.5rem + 1px) * -1)}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before,.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{border-width:0 0.5rem 0.5rem 0.5rem}.bs-popover-bottom .arrow::before,.bs-popover-auto[x-placement^="bottom"] .arrow::before{top:0;border-bottom-color:rgba(0,0,0,0.25)}.bs-popover-bottom .arrow::after,.bs-popover-auto[x-placement^="bottom"] .arrow::after{top:1px;border-bottom-color:#fff}.bs-popover-bottom .popover-header::before,.bs-popover-auto[x-placement^="bottom"] .popover-header::before{position:absolute;top:0;left:50%;display:block;width:1rem;margin-left:-0.5rem;content:"";border-bottom:1px solid #f7f7f7}.bs-popover-left,.bs-popover-auto[x-placement^="left"]{margin-right:0.5rem}.bs-popover-left .arrow,.bs-popover-auto[x-placement^="left"] .arrow{right:calc((0.5rem + 1px) * -1);width:0.5rem;height:1rem;margin:0.3rem 0}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before,.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{border-width:0.5rem 0 0.5rem 0.5rem}.bs-popover-left .arrow::before,.bs-popover-auto[x-placement^="left"] .arrow::before{right:0;border-left-color:rgba(0,0,0,0.25)}.bs-popover-left .arrow::after,.bs-popover-auto[x-placement^="left"] .arrow::after{right:1px;border-left-color:#fff}.popover-header{padding:0.5rem 0.75rem;margin-bottom:0;font-size:1rem;color:inherit;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;border-top-left-radius:calc(0.3rem - 1px);border-top-right-radius:calc(0.3rem - 1px)}.popover-header:empty{display:none}.popover-body{padding:0.5rem 0.75rem;color:#333}.carousel{position:relative}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-item{position:relative;display:none;-webkit-box-align:center;-ms-flex-align:center;align-items:center;width:100%;-webkit-backface-visibility:hidden;backface-visibility:hidden;-webkit-perspective:1000px;perspective:1000px}.carousel-item.active,.carousel-item-next,.carousel-item-prev{display:block;-webkit-transition:-webkit-transform 0.6s ease;transition:-webkit-transform 0.6s ease;transition:transform 0.6s ease;transition:transform 0.6s ease, -webkit-transform 0.6s ease}@media screen and (prefers-reduced-motion: reduce){.carousel-item.active,.carousel-item-next,.carousel-item-prev{-webkit-transition:none;transition:none}}.carousel-item-next,.carousel-item-prev{position:absolute;top:0}.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translateX(0);transform:translateX(0)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-next.carousel-item-left,.carousel-item-prev.carousel-item-right{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}}.carousel-item-next,.active.carousel-item-right{-webkit-transform:translateX(100%);transform:translateX(100%)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-next,.active.carousel-item-right{-webkit-transform:translate3d(100%, 0, 0);transform:translate3d(100%, 0, 0)}}.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translateX(-100%);transform:translateX(-100%)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-item-prev,.active.carousel-item-left{-webkit-transform:translate3d(-100%, 0, 0);transform:translate3d(-100%, 0, 0)}}.carousel-fade .carousel-item{opacity:0;-webkit-transition-duration:.6s;transition-duration:.6s;-webkit-transition-property:opacity;transition-property:opacity}.carousel-fade .carousel-item.active,.carousel-fade .carousel-item-next.carousel-item-left,.carousel-fade .carousel-item-prev.carousel-item-right{opacity:1}.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-right{opacity:0}.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active,.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev{-webkit-transform:translateX(0);transform:translateX(0)}@supports (-webkit-transform-style: preserve-3d) or (transform-style: preserve-3d){.carousel-fade .carousel-item-next,.carousel-fade .carousel-item-prev,.carousel-fade .carousel-item.active,.carousel-fade .active.carousel-item-left,.carousel-fade .active.carousel-item-prev{-webkit-transform:translate3d(0, 0, 0);transform:translate3d(0, 0, 0)}}.carousel-control-prev,.carousel-control-next{position:absolute;top:0;bottom:0;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-align:center;-ms-flex-align:center;align-items:center;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;width:15%;color:#fff;text-align:center;opacity:0.5}.carousel-control-prev:hover,.carousel-control-prev:focus,.carousel-control-next:hover,.carousel-control-next:focus{color:#fff;text-decoration:none;outline:0;opacity:.9}.carousel-control-prev{left:0}.carousel-control-next{right:0}.carousel-control-prev-icon,.carousel-control-next-icon{display:inline-block;width:20px;height:20px;background:transparent no-repeat center center;background-size:100% 100%}.carousel-control-prev-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M5.25 0l-4 4 4 4 1.5-1.5-2.5-2.5 2.5-2.5-1.5-1.5z'/%3E%3C/svg%3E")}.carousel-control-next-icon{background-image:url("data:image/svg+xml;charset=utf8,%3Csvg xmlns='http://www.w3.org/2000/svg' fill='%23fff' viewBox='0 0 8 8'%3E%3Cpath d='M2.75 0l-1.5 1.5 2.5 2.5-2.5 2.5 1.5 1.5 4-4-4-4z'/%3E%3C/svg%3E")}.carousel-indicators{position:absolute;right:0;bottom:10px;left:0;z-index:15;display:-webkit-box;display:-ms-flexbox;display:flex;-webkit-box-pack:center;-ms-flex-pack:center;justify-content:center;padding-left:0;margin-right:15%;margin-left:15%;list-style:none}.carousel-indicators li{position:relative;-webkit-box-flex:0;-ms-flex:0 1 auto;flex:0 1 auto;width:30px;height:3px;margin-right:3px;margin-left:3px;text-indent:-999px;cursor:pointer;background-color:rgba(255,255,255,0.5)}.carousel-indicators li::before{position:absolute;top:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators li::after{position:absolute;bottom:-10px;left:0;display:inline-block;width:100%;height:10px;content:""}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:15%;bottom:20px;left:15%;z-index:10;padding-top:20px;padding-bottom:20px;color:#fff;text-align:center}.align-baseline{vertical-align:baseline !important}.align-top{vertical-align:top !important}.align-middle{vertical-align:middle !important}.align-bottom{vertical-align:bottom !important}.align-text-bottom{vertical-align:text-bottom !important}.align-text-top{vertical-align:text-top !important}.bg-primary{background-color:#E95420 !important}a.bg-primary:hover,a.bg-primary:focus,button.bg-primary:hover,button.bg-primary:focus{background-color:#c34113 !important}.bg-secondary{background-color:#AEA79F !important}a.bg-secondary:hover,a.bg-secondary:focus,button.bg-secondary:hover,button.bg-secondary:focus{background-color:#978e83 !important}.bg-success{background-color:#38B44A !important}a.bg-success:hover,a.bg-success:focus,button.bg-success:hover,button.bg-success:focus{background-color:#2c8d3a !important}.bg-info{background-color:#17a2b8 !important}a.bg-info:hover,a.bg-info:focus,button.bg-info:hover,button.bg-info:focus{background-color:#117a8b !important}.bg-warning{background-color:#EFB73E !important}a.bg-warning:hover,a.bg-warning:focus,button.bg-warning:hover,button.bg-warning:focus{background-color:#e7a413 !important}.bg-danger{background-color:#DF382C !important}a.bg-danger:hover,a.bg-danger:focus,button.bg-danger:hover,button.bg-danger:focus{background-color:#bc271c !important}.bg-light{background-color:#e9ecef !important}a.bg-light:hover,a.bg-light:focus,button.bg-light:hover,button.bg-light:focus{background-color:#cbd3da !important}.bg-dark{background-color:#772953 !important}a.bg-dark:hover,a.bg-dark:focus,button.bg-dark:hover,button.bg-dark:focus{background-color:#511c39 !important}.bg-white{background-color:#fff !important}.bg-transparent{background-color:transparent !important}.border{border:1px solid #dee2e6 !important}.border-top{border-top:1px solid #dee2e6 !important}.border-right{border-right:1px solid #dee2e6 !important}.border-bottom{border-bottom:1px solid #dee2e6 !important}.border-left{border-left:1px solid #dee2e6 !important}.border-0{border:0 !important}.border-top-0{border-top:0 !important}.border-right-0{border-right:0 !important}.border-bottom-0{border-bottom:0 !important}.border-left-0{border-left:0 !important}.border-primary{border-color:#E95420 !important}.border-secondary{border-color:#AEA79F !important}.border-success{border-color:#38B44A !important}.border-info{border-color:#17a2b8 !important}.border-warning{border-color:#EFB73E !important}.border-danger{border-color:#DF382C !important}.border-light{border-color:#e9ecef !important}.border-dark{border-color:#772953 !important}.border-white{border-color:#fff !important}.rounded{border-radius:0.25rem !important}.rounded-top{border-top-left-radius:0.25rem !important;border-top-right-radius:0.25rem !important}.rounded-right{border-top-right-radius:0.25rem !important;border-bottom-right-radius:0.25rem !important}.rounded-bottom{border-bottom-right-radius:0.25rem !important;border-bottom-left-radius:0.25rem !important}.rounded-left{border-top-left-radius:0.25rem !important;border-bottom-left-radius:0.25rem !important}.rounded-circle{border-radius:50% !important}.rounded-0{border-radius:0 !important}.clearfix::after{display:block;clear:both;content:""}.d-none{display:none !important}.d-inline{display:inline !important}.d-inline-block{display:inline-block !important}.d-block{display:block !important}.d-table{display:table !important}.d-table-row{display:table-row !important}.d-table-cell{display:table-cell !important}.d-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}@media (min-width: 576px){.d-sm-none{display:none !important}.d-sm-inline{display:inline !important}.d-sm-inline-block{display:inline-block !important}.d-sm-block{display:block !important}.d-sm-table{display:table !important}.d-sm-table-row{display:table-row !important}.d-sm-table-cell{display:table-cell !important}.d-sm-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-sm-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 768px){.d-md-none{display:none !important}.d-md-inline{display:inline !important}.d-md-inline-block{display:inline-block !important}.d-md-block{display:block !important}.d-md-table{display:table !important}.d-md-table-row{display:table-row !important}.d-md-table-cell{display:table-cell !important}.d-md-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-md-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 992px){.d-lg-none{display:none !important}.d-lg-inline{display:inline !important}.d-lg-inline-block{display:inline-block !important}.d-lg-block{display:block !important}.d-lg-table{display:table !important}.d-lg-table-row{display:table-row !important}.d-lg-table-cell{display:table-cell !important}.d-lg-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-lg-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media (min-width: 1200px){.d-xl-none{display:none !important}.d-xl-inline{display:inline !important}.d-xl-inline-block{display:inline-block !important}.d-xl-block{display:block !important}.d-xl-table{display:table !important}.d-xl-table-row{display:table-row !important}.d-xl-table-cell{display:table-cell !important}.d-xl-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-xl-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}@media print{.d-print-none{display:none !important}.d-print-inline{display:inline !important}.d-print-inline-block{display:inline-block !important}.d-print-block{display:block !important}.d-print-table{display:table !important}.d-print-table-row{display:table-row !important}.d-print-table-cell{display:table-cell !important}.d-print-flex{display:-webkit-box !important;display:-ms-flexbox !important;display:flex !important}.d-print-inline-flex{display:-webkit-inline-box !important;display:-ms-inline-flexbox !important;display:inline-flex !important}}.embed-responsive{position:relative;display:block;width:100%;padding:0;overflow:hidden}.embed-responsive::before{display:block;content:""}.embed-responsive .embed-responsive-item,.embed-responsive iframe,.embed-responsive embed,.embed-responsive object,.embed-responsive video{position:absolute;top:0;bottom:0;left:0;width:100%;height:100%;border:0}.embed-responsive-21by9::before{padding-top:42.8571428571%}.embed-responsive-16by9::before{padding-top:56.25%}.embed-responsive-4by3::before{padding-top:75%}.embed-responsive-1by1::before{padding-top:100%}.flex-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}@media (min-width: 576px){.flex-sm-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-sm-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-sm-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-sm-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-sm-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-sm-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-sm-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-sm-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-sm-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-sm-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-sm-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-sm-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-sm-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-sm-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-sm-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-sm-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-sm-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-sm-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-sm-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-sm-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-sm-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-sm-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-sm-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-sm-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-sm-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-sm-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-sm-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-sm-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-sm-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-sm-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-sm-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-sm-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-sm-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-sm-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 768px){.flex-md-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-md-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-md-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-md-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-md-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-md-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-md-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-md-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-md-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-md-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-md-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-md-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-md-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-md-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-md-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-md-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-md-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-md-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-md-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-md-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-md-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-md-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-md-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-md-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-md-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-md-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-md-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-md-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-md-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-md-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-md-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-md-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-md-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-md-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 992px){.flex-lg-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-lg-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-lg-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-lg-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-lg-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-lg-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-lg-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-lg-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-lg-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-lg-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-lg-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-lg-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-lg-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-lg-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-lg-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-lg-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-lg-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-lg-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-lg-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-lg-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-lg-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-lg-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-lg-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-lg-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-lg-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-lg-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-lg-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-lg-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-lg-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-lg-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-lg-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-lg-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-lg-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-lg-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}@media (min-width: 1200px){.flex-xl-row{-webkit-box-orient:horizontal !important;-webkit-box-direction:normal !important;-ms-flex-direction:row !important;flex-direction:row !important}.flex-xl-column{-webkit-box-orient:vertical !important;-webkit-box-direction:normal !important;-ms-flex-direction:column !important;flex-direction:column !important}.flex-xl-row-reverse{-webkit-box-orient:horizontal !important;-webkit-box-direction:reverse !important;-ms-flex-direction:row-reverse !important;flex-direction:row-reverse !important}.flex-xl-column-reverse{-webkit-box-orient:vertical !important;-webkit-box-direction:reverse !important;-ms-flex-direction:column-reverse !important;flex-direction:column-reverse !important}.flex-xl-wrap{-ms-flex-wrap:wrap !important;flex-wrap:wrap !important}.flex-xl-nowrap{-ms-flex-wrap:nowrap !important;flex-wrap:nowrap !important}.flex-xl-wrap-reverse{-ms-flex-wrap:wrap-reverse !important;flex-wrap:wrap-reverse !important}.flex-xl-fill{-webkit-box-flex:1 !important;-ms-flex:1 1 auto !important;flex:1 1 auto !important}.flex-xl-grow-0{-webkit-box-flex:0 !important;-ms-flex-positive:0 !important;flex-grow:0 !important}.flex-xl-grow-1{-webkit-box-flex:1 !important;-ms-flex-positive:1 !important;flex-grow:1 !important}.flex-xl-shrink-0{-ms-flex-negative:0 !important;flex-shrink:0 !important}.flex-xl-shrink-1{-ms-flex-negative:1 !important;flex-shrink:1 !important}.justify-content-xl-start{-webkit-box-pack:start !important;-ms-flex-pack:start !important;justify-content:flex-start !important}.justify-content-xl-end{-webkit-box-pack:end !important;-ms-flex-pack:end !important;justify-content:flex-end !important}.justify-content-xl-center{-webkit-box-pack:center !important;-ms-flex-pack:center !important;justify-content:center !important}.justify-content-xl-between{-webkit-box-pack:justify !important;-ms-flex-pack:justify !important;justify-content:space-between !important}.justify-content-xl-around{-ms-flex-pack:distribute !important;justify-content:space-around !important}.align-items-xl-start{-webkit-box-align:start !important;-ms-flex-align:start !important;align-items:flex-start !important}.align-items-xl-end{-webkit-box-align:end !important;-ms-flex-align:end !important;align-items:flex-end !important}.align-items-xl-center{-webkit-box-align:center !important;-ms-flex-align:center !important;align-items:center !important}.align-items-xl-baseline{-webkit-box-align:baseline !important;-ms-flex-align:baseline !important;align-items:baseline !important}.align-items-xl-stretch{-webkit-box-align:stretch !important;-ms-flex-align:stretch !important;align-items:stretch !important}.align-content-xl-start{-ms-flex-line-pack:start !important;align-content:flex-start !important}.align-content-xl-end{-ms-flex-line-pack:end !important;align-content:flex-end !important}.align-content-xl-center{-ms-flex-line-pack:center !important;align-content:center !important}.align-content-xl-between{-ms-flex-line-pack:justify !important;align-content:space-between !important}.align-content-xl-around{-ms-flex-line-pack:distribute !important;align-content:space-around !important}.align-content-xl-stretch{-ms-flex-line-pack:stretch !important;align-content:stretch !important}.align-self-xl-auto{-ms-flex-item-align:auto !important;align-self:auto !important}.align-self-xl-start{-ms-flex-item-align:start !important;align-self:flex-start !important}.align-self-xl-end{-ms-flex-item-align:end !important;align-self:flex-end !important}.align-self-xl-center{-ms-flex-item-align:center !important;align-self:center !important}.align-self-xl-baseline{-ms-flex-item-align:baseline !important;align-self:baseline !important}.align-self-xl-stretch{-ms-flex-item-align:stretch !important;align-self:stretch !important}}.float-left{float:left !important}.float-right{float:right !important}.float-none{float:none !important}@media (min-width: 576px){.float-sm-left{float:left !important}.float-sm-right{float:right !important}.float-sm-none{float:none !important}}@media (min-width: 768px){.float-md-left{float:left !important}.float-md-right{float:right !important}.float-md-none{float:none !important}}@media (min-width: 992px){.float-lg-left{float:left !important}.float-lg-right{float:right !important}.float-lg-none{float:none !important}}@media (min-width: 1200px){.float-xl-left{float:left !important}.float-xl-right{float:right !important}.float-xl-none{float:none !important}}.position-static{position:static !important}.position-relative{position:relative !important}.position-absolute{position:absolute !important}.position-fixed{position:fixed !important}.position-sticky{position:-webkit-sticky !important;position:sticky !important}.fixed-top{position:fixed;top:0;right:0;left:0;z-index:1030}.fixed-bottom{position:fixed;right:0;bottom:0;left:0;z-index:1030}@supports (position: -webkit-sticky) or (position: sticky){.sticky-top{position:-webkit-sticky;position:sticky;top:0;z-index:1020}}.sr-only{position:absolute;width:1px;height:1px;padding:0;overflow:hidden;clip:rect(0, 0, 0, 0);white-space:nowrap;border:0}.sr-only-focusable:active,.sr-only-focusable:focus{position:static;width:auto;height:auto;overflow:visible;clip:auto;white-space:normal}.shadow-sm{-webkit-box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important;box-shadow:0 0.125rem 0.25rem rgba(0,0,0,0.075) !important}.shadow{-webkit-box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important;box-shadow:0 0.5rem 1rem rgba(0,0,0,0.15) !important}.shadow-lg{-webkit-box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important;box-shadow:0 1rem 3rem rgba(0,0,0,0.175) !important}.shadow-none{-webkit-box-shadow:none !important;box-shadow:none !important}.w-25{width:25% !important}.w-50{width:50% !important}.w-75{width:75% !important}.w-100{width:100% !important}.w-auto{width:auto !important}.h-25{height:25% !important}.h-50{height:50% !important}.h-75{height:75% !important}.h-100{height:100% !important}.h-auto{height:auto !important}.mw-100{max-width:100% !important}.mh-100{max-height:100% !important}.m-0{margin:0 !important}.mt-0,.my-0{margin-top:0 !important}.mr-0,.mx-0{margin-right:0 !important}.mb-0,.my-0{margin-bottom:0 !important}.ml-0,.mx-0{margin-left:0 !important}.m-1{margin:0.25rem !important}.mt-1,.my-1{margin-top:0.25rem !important}.mr-1,.mx-1{margin-right:0.25rem !important}.mb-1,.my-1{margin-bottom:0.25rem !important}.ml-1,.mx-1{margin-left:0.25rem !important}.m-2{margin:0.5rem !important}.mt-2,.my-2{margin-top:0.5rem !important}.mr-2,.mx-2{margin-right:0.5rem !important}.mb-2,.my-2{margin-bottom:0.5rem !important}.ml-2,.mx-2{margin-left:0.5rem !important}.m-3{margin:1rem !important}.mt-3,.my-3{margin-top:1rem !important}.mr-3,.mx-3{margin-right:1rem !important}.mb-3,.my-3{margin-bottom:1rem !important}.ml-3,.mx-3{margin-left:1rem !important}.m-4{margin:1.5rem !important}.mt-4,.my-4{margin-top:1.5rem !important}.mr-4,.mx-4{margin-right:1.5rem !important}.mb-4,.my-4{margin-bottom:1.5rem !important}.ml-4,.mx-4{margin-left:1.5rem !important}.m-5{margin:3rem !important}.mt-5,.my-5{margin-top:3rem !important}.mr-5,.mx-5{margin-right:3rem !important}.mb-5,.my-5{margin-bottom:3rem !important}.ml-5,.mx-5{margin-left:3rem !important}.p-0{padding:0 !important}.pt-0,.py-0{padding-top:0 !important}.pr-0,.px-0{padding-right:0 !important}.pb-0,.py-0{padding-bottom:0 !important}.pl-0,.px-0{padding-left:0 !important}.p-1{padding:0.25rem !important}.pt-1,.py-1{padding-top:0.25rem !important}.pr-1,.px-1{padding-right:0.25rem !important}.pb-1,.py-1{padding-bottom:0.25rem !important}.pl-1,.px-1{padding-left:0.25rem !important}.p-2{padding:0.5rem !important}.pt-2,.py-2{padding-top:0.5rem !important}.pr-2,.px-2{padding-right:0.5rem !important}.pb-2,.py-2{padding-bottom:0.5rem !important}.pl-2,.px-2{padding-left:0.5rem !important}.p-3{padding:1rem !important}.pt-3,.py-3{padding-top:1rem !important}.pr-3,.px-3{padding-right:1rem !important}.pb-3,.py-3{padding-bottom:1rem !important}.pl-3,.px-3{padding-left:1rem !important}.p-4{padding:1.5rem !important}.pt-4,.py-4{padding-top:1.5rem !important}.pr-4,.px-4{padding-right:1.5rem !important}.pb-4,.py-4{padding-bottom:1.5rem !important}.pl-4,.px-4{padding-left:1.5rem !important}.p-5{padding:3rem !important}.pt-5,.py-5{padding-top:3rem !important}.pr-5,.px-5{padding-right:3rem !important}.pb-5,.py-5{padding-bottom:3rem !important}.pl-5,.px-5{padding-left:3rem !important}.m-auto{margin:auto !important}.mt-auto,.my-auto{margin-top:auto !important}.mr-auto,.mx-auto{margin-right:auto !important}.mb-auto,.my-auto{margin-bottom:auto !important}.ml-auto,.mx-auto{margin-left:auto !important}@media (min-width: 576px){.m-sm-0{margin:0 !important}.mt-sm-0,.my-sm-0{margin-top:0 !important}.mr-sm-0,.mx-sm-0{margin-right:0 !important}.mb-sm-0,.my-sm-0{margin-bottom:0 !important}.ml-sm-0,.mx-sm-0{margin-left:0 !important}.m-sm-1{margin:0.25rem !important}.mt-sm-1,.my-sm-1{margin-top:0.25rem !important}.mr-sm-1,.mx-sm-1{margin-right:0.25rem !important}.mb-sm-1,.my-sm-1{margin-bottom:0.25rem !important}.ml-sm-1,.mx-sm-1{margin-left:0.25rem !important}.m-sm-2{margin:0.5rem !important}.mt-sm-2,.my-sm-2{margin-top:0.5rem !important}.mr-sm-2,.mx-sm-2{margin-right:0.5rem !important}.mb-sm-2,.my-sm-2{margin-bottom:0.5rem !important}.ml-sm-2,.mx-sm-2{margin-left:0.5rem !important}.m-sm-3{margin:1rem !important}.mt-sm-3,.my-sm-3{margin-top:1rem !important}.mr-sm-3,.mx-sm-3{margin-right:1rem !important}.mb-sm-3,.my-sm-3{margin-bottom:1rem !important}.ml-sm-3,.mx-sm-3{margin-left:1rem !important}.m-sm-4{margin:1.5rem !important}.mt-sm-4,.my-sm-4{margin-top:1.5rem !important}.mr-sm-4,.mx-sm-4{margin-right:1.5rem !important}.mb-sm-4,.my-sm-4{margin-bottom:1.5rem !important}.ml-sm-4,.mx-sm-4{margin-left:1.5rem !important}.m-sm-5{margin:3rem !important}.mt-sm-5,.my-sm-5{margin-top:3rem !important}.mr-sm-5,.mx-sm-5{margin-right:3rem !important}.mb-sm-5,.my-sm-5{margin-bottom:3rem !important}.ml-sm-5,.mx-sm-5{margin-left:3rem !important}.p-sm-0{padding:0 !important}.pt-sm-0,.py-sm-0{padding-top:0 !important}.pr-sm-0,.px-sm-0{padding-right:0 !important}.pb-sm-0,.py-sm-0{padding-bottom:0 !important}.pl-sm-0,.px-sm-0{padding-left:0 !important}.p-sm-1{padding:0.25rem !important}.pt-sm-1,.py-sm-1{padding-top:0.25rem !important}.pr-sm-1,.px-sm-1{padding-right:0.25rem !important}.pb-sm-1,.py-sm-1{padding-bottom:0.25rem !important}.pl-sm-1,.px-sm-1{padding-left:0.25rem !important}.p-sm-2{padding:0.5rem !important}.pt-sm-2,.py-sm-2{padding-top:0.5rem !important}.pr-sm-2,.px-sm-2{padding-right:0.5rem !important}.pb-sm-2,.py-sm-2{padding-bottom:0.5rem !important}.pl-sm-2,.px-sm-2{padding-left:0.5rem !important}.p-sm-3{padding:1rem !important}.pt-sm-3,.py-sm-3{padding-top:1rem !important}.pr-sm-3,.px-sm-3{padding-right:1rem !important}.pb-sm-3,.py-sm-3{padding-bottom:1rem !important}.pl-sm-3,.px-sm-3{padding-left:1rem !important}.p-sm-4{padding:1.5rem !important}.pt-sm-4,.py-sm-4{padding-top:1.5rem !important}.pr-sm-4,.px-sm-4{padding-right:1.5rem !important}.pb-sm-4,.py-sm-4{padding-bottom:1.5rem !important}.pl-sm-4,.px-sm-4{padding-left:1.5rem !important}.p-sm-5{padding:3rem !important}.pt-sm-5,.py-sm-5{padding-top:3rem !important}.pr-sm-5,.px-sm-5{padding-right:3rem !important}.pb-sm-5,.py-sm-5{padding-bottom:3rem !important}.pl-sm-5,.px-sm-5{padding-left:3rem !important}.m-sm-auto{margin:auto !important}.mt-sm-auto,.my-sm-auto{margin-top:auto !important}.mr-sm-auto,.mx-sm-auto{margin-right:auto !important}.mb-sm-auto,.my-sm-auto{margin-bottom:auto !important}.ml-sm-auto,.mx-sm-auto{margin-left:auto !important}}@media (min-width: 768px){.m-md-0{margin:0 !important}.mt-md-0,.my-md-0{margin-top:0 !important}.mr-md-0,.mx-md-0{margin-right:0 !important}.mb-md-0,.my-md-0{margin-bottom:0 !important}.ml-md-0,.mx-md-0{margin-left:0 !important}.m-md-1{margin:0.25rem !important}.mt-md-1,.my-md-1{margin-top:0.25rem !important}.mr-md-1,.mx-md-1{margin-right:0.25rem !important}.mb-md-1,.my-md-1{margin-bottom:0.25rem !important}.ml-md-1,.mx-md-1{margin-left:0.25rem !important}.m-md-2{margin:0.5rem !important}.mt-md-2,.my-md-2{margin-top:0.5rem !important}.mr-md-2,.mx-md-2{margin-right:0.5rem !important}.mb-md-2,.my-md-2{margin-bottom:0.5rem !important}.ml-md-2,.mx-md-2{margin-left:0.5rem !important}.m-md-3{margin:1rem !important}.mt-md-3,.my-md-3{margin-top:1rem !important}.mr-md-3,.mx-md-3{margin-right:1rem !important}.mb-md-3,.my-md-3{margin-bottom:1rem !important}.ml-md-3,.mx-md-3{margin-left:1rem !important}.m-md-4{margin:1.5rem !important}.mt-md-4,.my-md-4{margin-top:1.5rem !important}.mr-md-4,.mx-md-4{margin-right:1.5rem !important}.mb-md-4,.my-md-4{margin-bottom:1.5rem !important}.ml-md-4,.mx-md-4{margin-left:1.5rem !important}.m-md-5{margin:3rem !important}.mt-md-5,.my-md-5{margin-top:3rem !important}.mr-md-5,.mx-md-5{margin-right:3rem !important}.mb-md-5,.my-md-5{margin-bottom:3rem !important}.ml-md-5,.mx-md-5{margin-left:3rem !important}.p-md-0{padding:0 !important}.pt-md-0,.py-md-0{padding-top:0 !important}.pr-md-0,.px-md-0{padding-right:0 !important}.pb-md-0,.py-md-0{padding-bottom:0 !important}.pl-md-0,.px-md-0{padding-left:0 !important}.p-md-1{padding:0.25rem !important}.pt-md-1,.py-md-1{padding-top:0.25rem !important}.pr-md-1,.px-md-1{padding-right:0.25rem !important}.pb-md-1,.py-md-1{padding-bottom:0.25rem !important}.pl-md-1,.px-md-1{padding-left:0.25rem !important}.p-md-2{padding:0.5rem !important}.pt-md-2,.py-md-2{padding-top:0.5rem !important}.pr-md-2,.px-md-2{padding-right:0.5rem !important}.pb-md-2,.py-md-2{padding-bottom:0.5rem !important}.pl-md-2,.px-md-2{padding-left:0.5rem !important}.p-md-3{padding:1rem !important}.pt-md-3,.py-md-3{padding-top:1rem !important}.pr-md-3,.px-md-3{padding-right:1rem !important}.pb-md-3,.py-md-3{padding-bottom:1rem !important}.pl-md-3,.px-md-3{padding-left:1rem !important}.p-md-4{padding:1.5rem !important}.pt-md-4,.py-md-4{padding-top:1.5rem !important}.pr-md-4,.px-md-4{padding-right:1.5rem !important}.pb-md-4,.py-md-4{padding-bottom:1.5rem !important}.pl-md-4,.px-md-4{padding-left:1.5rem !important}.p-md-5{padding:3rem !important}.pt-md-5,.py-md-5{padding-top:3rem !important}.pr-md-5,.px-md-5{padding-right:3rem !important}.pb-md-5,.py-md-5{padding-bottom:3rem !important}.pl-md-5,.px-md-5{padding-left:3rem !important}.m-md-auto{margin:auto !important}.mt-md-auto,.my-md-auto{margin-top:auto !important}.mr-md-auto,.mx-md-auto{margin-right:auto !important}.mb-md-auto,.my-md-auto{margin-bottom:auto !important}.ml-md-auto,.mx-md-auto{margin-left:auto !important}}@media (min-width: 992px){.m-lg-0{margin:0 !important}.mt-lg-0,.my-lg-0{margin-top:0 !important}.mr-lg-0,.mx-lg-0{margin-right:0 !important}.mb-lg-0,.my-lg-0{margin-bottom:0 !important}.ml-lg-0,.mx-lg-0{margin-left:0 !important}.m-lg-1{margin:0.25rem !important}.mt-lg-1,.my-lg-1{margin-top:0.25rem !important}.mr-lg-1,.mx-lg-1{margin-right:0.25rem !important}.mb-lg-1,.my-lg-1{margin-bottom:0.25rem !important}.ml-lg-1,.mx-lg-1{margin-left:0.25rem !important}.m-lg-2{margin:0.5rem !important}.mt-lg-2,.my-lg-2{margin-top:0.5rem !important}.mr-lg-2,.mx-lg-2{margin-right:0.5rem !important}.mb-lg-2,.my-lg-2{margin-bottom:0.5rem !important}.ml-lg-2,.mx-lg-2{margin-left:0.5rem !important}.m-lg-3{margin:1rem !important}.mt-lg-3,.my-lg-3{margin-top:1rem !important}.mr-lg-3,.mx-lg-3{margin-right:1rem !important}.mb-lg-3,.my-lg-3{margin-bottom:1rem !important}.ml-lg-3,.mx-lg-3{margin-left:1rem !important}.m-lg-4{margin:1.5rem !important}.mt-lg-4,.my-lg-4{margin-top:1.5rem !important}.mr-lg-4,.mx-lg-4{margin-right:1.5rem !important}.mb-lg-4,.my-lg-4{margin-bottom:1.5rem !important}.ml-lg-4,.mx-lg-4{margin-left:1.5rem !important}.m-lg-5{margin:3rem !important}.mt-lg-5,.my-lg-5{margin-top:3rem !important}.mr-lg-5,.mx-lg-5{margin-right:3rem !important}.mb-lg-5,.my-lg-5{margin-bottom:3rem !important}.ml-lg-5,.mx-lg-5{margin-left:3rem !important}.p-lg-0{padding:0 !important}.pt-lg-0,.py-lg-0{padding-top:0 !important}.pr-lg-0,.px-lg-0{padding-right:0 !important}.pb-lg-0,.py-lg-0{padding-bottom:0 !important}.pl-lg-0,.px-lg-0{padding-left:0 !important}.p-lg-1{padding:0.25rem !important}.pt-lg-1,.py-lg-1{padding-top:0.25rem !important}.pr-lg-1,.px-lg-1{padding-right:0.25rem !important}.pb-lg-1,.py-lg-1{padding-bottom:0.25rem !important}.pl-lg-1,.px-lg-1{padding-left:0.25rem !important}.p-lg-2{padding:0.5rem !important}.pt-lg-2,.py-lg-2{padding-top:0.5rem !important}.pr-lg-2,.px-lg-2{padding-right:0.5rem !important}.pb-lg-2,.py-lg-2{padding-bottom:0.5rem !important}.pl-lg-2,.px-lg-2{padding-left:0.5rem !important}.p-lg-3{padding:1rem !important}.pt-lg-3,.py-lg-3{padding-top:1rem !important}.pr-lg-3,.px-lg-3{padding-right:1rem !important}.pb-lg-3,.py-lg-3{padding-bottom:1rem !important}.pl-lg-3,.px-lg-3{padding-left:1rem !important}.p-lg-4{padding:1.5rem !important}.pt-lg-4,.py-lg-4{padding-top:1.5rem !important}.pr-lg-4,.px-lg-4{padding-right:1.5rem !important}.pb-lg-4,.py-lg-4{padding-bottom:1.5rem !important}.pl-lg-4,.px-lg-4{padding-left:1.5rem !important}.p-lg-5{padding:3rem !important}.pt-lg-5,.py-lg-5{padding-top:3rem !important}.pr-lg-5,.px-lg-5{padding-right:3rem !important}.pb-lg-5,.py-lg-5{padding-bottom:3rem !important}.pl-lg-5,.px-lg-5{padding-left:3rem !important}.m-lg-auto{margin:auto !important}.mt-lg-auto,.my-lg-auto{margin-top:auto !important}.mr-lg-auto,.mx-lg-auto{margin-right:auto !important}.mb-lg-auto,.my-lg-auto{margin-bottom:auto !important}.ml-lg-auto,.mx-lg-auto{margin-left:auto !important}}@media (min-width: 1200px){.m-xl-0{margin:0 !important}.mt-xl-0,.my-xl-0{margin-top:0 !important}.mr-xl-0,.mx-xl-0{margin-right:0 !important}.mb-xl-0,.my-xl-0{margin-bottom:0 !important}.ml-xl-0,.mx-xl-0{margin-left:0 !important}.m-xl-1{margin:0.25rem !important}.mt-xl-1,.my-xl-1{margin-top:0.25rem !important}.mr-xl-1,.mx-xl-1{margin-right:0.25rem !important}.mb-xl-1,.my-xl-1{margin-bottom:0.25rem !important}.ml-xl-1,.mx-xl-1{margin-left:0.25rem !important}.m-xl-2{margin:0.5rem !important}.mt-xl-2,.my-xl-2{margin-top:0.5rem !important}.mr-xl-2,.mx-xl-2{margin-right:0.5rem !important}.mb-xl-2,.my-xl-2{margin-bottom:0.5rem !important}.ml-xl-2,.mx-xl-2{margin-left:0.5rem !important}.m-xl-3{margin:1rem !important}.mt-xl-3,.my-xl-3{margin-top:1rem !important}.mr-xl-3,.mx-xl-3{margin-right:1rem !important}.mb-xl-3,.my-xl-3{margin-bottom:1rem !important}.ml-xl-3,.mx-xl-3{margin-left:1rem !important}.m-xl-4{margin:1.5rem !important}.mt-xl-4,.my-xl-4{margin-top:1.5rem !important}.mr-xl-4,.mx-xl-4{margin-right:1.5rem !important}.mb-xl-4,.my-xl-4{margin-bottom:1.5rem !important}.ml-xl-4,.mx-xl-4{margin-left:1.5rem !important}.m-xl-5{margin:3rem !important}.mt-xl-5,.my-xl-5{margin-top:3rem !important}.mr-xl-5,.mx-xl-5{margin-right:3rem !important}.mb-xl-5,.my-xl-5{margin-bottom:3rem !important}.ml-xl-5,.mx-xl-5{margin-left:3rem !important}.p-xl-0{padding:0 !important}.pt-xl-0,.py-xl-0{padding-top:0 !important}.pr-xl-0,.px-xl-0{padding-right:0 !important}.pb-xl-0,.py-xl-0{padding-bottom:0 !important}.pl-xl-0,.px-xl-0{padding-left:0 !important}.p-xl-1{padding:0.25rem !important}.pt-xl-1,.py-xl-1{padding-top:0.25rem !important}.pr-xl-1,.px-xl-1{padding-right:0.25rem !important}.pb-xl-1,.py-xl-1{padding-bottom:0.25rem !important}.pl-xl-1,.px-xl-1{padding-left:0.25rem !important}.p-xl-2{padding:0.5rem !important}.pt-xl-2,.py-xl-2{padding-top:0.5rem !important}.pr-xl-2,.px-xl-2{padding-right:0.5rem !important}.pb-xl-2,.py-xl-2{padding-bottom:0.5rem !important}.pl-xl-2,.px-xl-2{padding-left:0.5rem !important}.p-xl-3{padding:1rem !important}.pt-xl-3,.py-xl-3{padding-top:1rem !important}.pr-xl-3,.px-xl-3{padding-right:1rem !important}.pb-xl-3,.py-xl-3{padding-bottom:1rem !important}.pl-xl-3,.px-xl-3{padding-left:1rem !important}.p-xl-4{padding:1.5rem !important}.pt-xl-4,.py-xl-4{padding-top:1.5rem !important}.pr-xl-4,.px-xl-4{padding-right:1.5rem !important}.pb-xl-4,.py-xl-4{padding-bottom:1.5rem !important}.pl-xl-4,.px-xl-4{padding-left:1.5rem !important}.p-xl-5{padding:3rem !important}.pt-xl-5,.py-xl-5{padding-top:3rem !important}.pr-xl-5,.px-xl-5{padding-right:3rem !important}.pb-xl-5,.py-xl-5{padding-bottom:3rem !important}.pl-xl-5,.px-xl-5{padding-left:3rem !important}.m-xl-auto{margin:auto !important}.mt-xl-auto,.my-xl-auto{margin-top:auto !important}.mr-xl-auto,.mx-xl-auto{margin-right:auto !important}.mb-xl-auto,.my-xl-auto{margin-bottom:auto !important}.ml-xl-auto,.mx-xl-auto{margin-left:auto !important}}.text-monospace{font-family:SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace}.text-justify{text-align:justify !important}.text-nowrap{white-space:nowrap !important}.text-truncate{overflow:hidden;text-overflow:ellipsis;white-space:nowrap}.text-left{text-align:left !important}.text-right{text-align:right !important}.text-center{text-align:center !important}@media (min-width: 576px){.text-sm-left{text-align:left !important}.text-sm-right{text-align:right !important}.text-sm-center{text-align:center !important}}@media (min-width: 768px){.text-md-left{text-align:left !important}.text-md-right{text-align:right !important}.text-md-center{text-align:center !important}}@media (min-width: 992px){.text-lg-left{text-align:left !important}.text-lg-right{text-align:right !important}.text-lg-center{text-align:center !important}}@media (min-width: 1200px){.text-xl-left{text-align:left !important}.text-xl-right{text-align:right !important}.text-xl-center{text-align:center !important}}.text-lowercase{text-transform:lowercase !important}.text-uppercase{text-transform:uppercase !important}.text-capitalize{text-transform:capitalize !important}.font-weight-light{font-weight:300 !important}.font-weight-normal{font-weight:400 !important}.font-weight-bold{font-weight:700 !important}.font-italic{font-style:italic !important}.text-white{color:#fff !important}.text-primary{color:#E95420 !important}a.text-primary:hover,a.text-primary:focus{color:#c34113 !important}.text-secondary{color:#AEA79F !important}a.text-secondary:hover,a.text-secondary:focus{color:#978e83 !important}.text-success{color:#38B44A !important}a.text-success:hover,a.text-success:focus{color:#2c8d3a !important}.text-info{color:#17a2b8 !important}a.text-info:hover,a.text-info:focus{color:#117a8b !important}.text-warning{color:#EFB73E !important}a.text-warning:hover,a.text-warning:focus{color:#e7a413 !important}.text-danger{color:#DF382C !important}a.text-danger:hover,a.text-danger:focus{color:#bc271c !important}.text-light{color:#e9ecef !important}a.text-light:hover,a.text-light:focus{color:#cbd3da !important}.text-dark{color:#772953 !important}a.text-dark:hover,a.text-dark:focus{color:#511c39 !important}.text-body{color:#333 !important}.text-muted{color:#868e96 !important}.text-black-50{color:rgba(0,0,0,0.5) !important}.text-white-50{color:rgba(255,255,255,0.5) !important}.text-hide{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.visible{visibility:visible !important}.invisible{visibility:hidden !important}@media print{*,*::before,*::after{text-shadow:none !important;-webkit-box-shadow:none !important;box-shadow:none !important}a:not(.btn){text-decoration:underline}abbr[title]::after{content:" (" attr(title) ")"}pre{white-space:pre-wrap !important}pre,blockquote{border:1px solid #AEA79F;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}@page{size:a3}body{min-width:992px !important}.container{min-width:992px !important}.navbar{display:none}.badge{border:1px solid #000}.table{border-collapse:collapse !important}.table td,.table th{background-color:#fff !important}.table-bordered th,.table-bordered td{border:1px solid #dee2e6 !important}.table-dark{color:inherit}.table-dark th,.table-dark td,.table-dark thead th,.table-dark tbody+tbody{border-color:#dee2e6}.table .thead-dark th{color:inherit;border-color:#dee2e6}}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/css/index.css
|
CSS
|
body {
margin: 0;
padding: 0;
font-family: sans-serif;
}
.fa-chevron-left:before {
content: "\f053";
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/static/js/ExperimentList.js
|
JavaScript
|
function collapse_experiment_list() {
$("#sidebar").toggleClass("collapsed");
$("#content").toggleClass("col-md-8");
$(".collapser").toggleClass("fa-chevron-left fa-chevron-right");
var over_flow_attr = $(".experiment-list-container").css("overflow-y");
if (over_flow_attr == "scroll") {
$(".experiment-list-container").css("overflow-y", "visible")
} else {
$(".experiment-list-container").css("overflow-y", "scroll")
}
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/templates/index.html
|
HTML
|
<html>
<head>
<title>AutoMLBoard</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
{% load staticfiles %}
<!-- jquery and bootstrap dependency -->
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.11.0/umd/popper.min.js" integrity="sha384-b/U6ypiBEHpOf/4+1nzFpr53nxSS+GLCkfwBdFNTxtclqqenISfwAzpKaMNFNmj4" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta/js/bootstrap.min.js" integrity="sha384-h0AbiXch4ZDo7tp9hKZ4TsHbi047NrKGLO3SEJAg45jXxnGIfYzk4Si90RDIqNm1" crossorigin="anonymous"></script>
<!-- bootstrap table dependency -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.js"></script>
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
<link rel="stylesheet" href="/static/css/App.css">
<link rel="stylesheet" href="/static/css/HomePage.css">
<link rel="stylesheet" href="/static/css/ExperimentView.css">
<script src="/static/js/ExperimentList.js"></script>
<link rel="stylesheet" href="/static/css/ExperimentList.css">
<!-- awesome dependency -->
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.2.0/css/all.css" integrity="sha384-hWVjflwFxL6sNzntih27bfxkr27PmbbK/iSvJ+a4+0owXq79v+lsFkW54bOGbiDQ" crossorigin="anonymous">
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-primary" style="margin-right: auto;margin-left: auto;">
<a class="navbar-brand" href="#" style="padding-left: 50px">AutoMLBoard</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarColor01" aria-controls="navbarColor01" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarColor01">
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="https://github.com/ray-project/ray">Github <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="http://ray.readthedocs.io/">Document</a>
</li>
</ul>
</div>
</nav>
<div class="container" style="max-width: none">
<div class="outer-container row" id = "row-main">
<div class="HomePage-experiment-list-container col-md-2 collapsed" id="sidebar">
<div>
<div class="collapsed-expander-container">
<div class="experiment-list-outer-container">
<div><h1 class="experiments-header">Experiments</h1>
<div class="collapser-container" onclick="collapse_experiment_list()">
<i class="collapser fa fa-chevron-right login-icon"></i>
</div>
<div class="experiment-list-container" style="height: 800px; overflow-y: visible">
<ul class="nav nav-pills flex-column">
{% for job in recent_jobs %}
<tr>
<li class="nav-item">
<a class="nav-link" href="job?job_id={{ job.job_id }}">{{ job.job_id }}</a>
</li>
</tr>
{% endfor %}
</ul>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-container" id="content">
<div class="ExperimentPage">
<div>
<div class="ExperimentView">
<h1>Summary</h1>
<hr class="divider"/>
<span class="metadata" style="line-height: 40px">
<span class="metadata-header">Log Path: </span>
{{ log_dir }}
</span>
<br>
<span class="metadata" style="line-height: 40px">
<span class="metadata-header">Reload Interval: </span>
{{ reload_interval }} s
</span>
</div>
<div class="ExperimentView-runs">
<div class="metadata" style="max-width: 900px; margin-top: 20px">
<span class="metadata">
<span class="metadata-header">Jobs:</span>
{{ job_num }}
</span>
<span class="metadata" style="margin-right: 0px">
<span class="metadata-header">Trials:
</span>
<span>{{ trial_num }} </span>
<span class="badge badge-pill badge-info"
style="margin-left: 10px; border-radius: 0.3em">{{ running_num }} Running</span>
<span class="badge badge-pill badge-success"
style="border-radius: 0.3em">{{ success_num }} Success</span>
<span class="badge badge-pill badge-danger"
style="border-radius: 0.3em">{{ failed_num }} Failed</span>
</span>
</div>
<table class="table table-hover"
id="job_table"
data-toggle="table"
data-show-columns="true"
data-minimum-count-columns="2"
data-id-field="id"
data-page-list="[10, 25, 50, 100, ALL]"
style="border: none; max-height: 800px">
<thead>
<tr>
<th class="bottom-row" data-field="Job ID" data-sortable="true">Job ID</th>
<th class="bottom-row" data-field="User" data-sortable="true">User</th>
<th class="bottom-row" data-field="Start Time" data-sortable="true">Start Time</th>
<th class="bottom-row" data-field="Status" data-sortable="true">Status (Succ / Run / Fail / Total)</th>
<th class="bottom-row" data-field="Progress" data-sortable="true">Progress</th>
<th class="bottom-row" data-field="Winner Trial" data-sortable="true">Winner Trial</th>
<th class="bottom-row" data-field="Winner Metric" data-sortable="true">Winner Metric</th>
</tr>
</thead>
<tbody>
{% for job in recent_jobs %}
<tr>
<td><a href="job?job_id={{ job.job_id }}">{{ job.job_id }}</a></td>
<td>{{ job.user }}</td>
<td>{{ job.start_time }}</td>
<td>{{ job.success_num }} / {{ job.running_num }}/ {{ job.failed_num }} / {{ job.total_num }}</td>
<td>
<div class="progress">
<div class="progress-bar bg-success" role="progressbar"
style="width: {{ job.progress }}%;">
<span class="sr-only">{{ job.progress }}</span>
</div>
</div>
</td>
<td><a href="trial?trial_id={{ job.winner.trial_id }}&job_id={{ job.job_id }}">{{ job.winner.trial_id }}</td>
<td>{{ job.winner.metric}}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-right"></div>
</div>
</div>
</body>
</html>
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/templates/job.html
|
HTML
|
<html>
<head>
<title>AutoMLBoard</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
{% load staticfiles %}
<!-- jquery and bootstrap dependency -->
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.11.0/umd/popper.min.js" integrity="sha384-b/U6ypiBEHpOf/4+1nzFpr53nxSS+GLCkfwBdFNTxtclqqenISfwAzpKaMNFNmj4" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta/js/bootstrap.min.js" integrity="sha384-h0AbiXch4ZDo7tp9hKZ4TsHbi047NrKGLO3SEJAg45jXxnGIfYzk4Si90RDIqNm1" crossorigin="anonymous"></script>
<!-- bootstrap table dependency -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.js"></script>
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
<link rel="stylesheet" href="/static/css/App.css">
<link rel="stylesheet" href="/static/css/HomePage.css">
<link rel="stylesheet" href="/static/css/ExperimentView.css">
<script src="/static/js/ExperimentList.js"></script>
<link rel="stylesheet" href="/static/css/ExperimentList.css">
<!-- awesome dependency -->
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.2.0/css/all.css" integrity="sha384-hWVjflwFxL6sNzntih27bfxkr27PmbbK/iSvJ+a4+0owXq79v+lsFkW54bOGbiDQ" crossorigin="anonymous">
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-primary" style="margin-right: auto;margin-left: auto;">
<a class="navbar-brand" href="#" style="padding-left: 50px">AutoMLBoard</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarColor01" aria-controls="navbarColor01" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarColor01">
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="https://github.com/ray-project/ray">Github</a>
</li>
<li class="nav-item">
<a class="nav-link" href="http://ray.readthedocs.io/">Document</a>
</li>
</ul>
</div>
</nav>
<div class="container" style="max-width: none">
<div class="outer-container row" id = "row-main">
<div class="HomePage-experiment-list-container col-md-2" id="sidebar">
<div>
<div class="collapsed-expander-container">
<div class="experiment-list-outer-container">
<div><h1 class="experiments-header">Experiments</h1>
<div class="collapser-container" onclick="collapse_experiment_list()">
<i class="collapser fa fa-chevron-left login-icon"></i>
</div>
<div class="experiment-list-container" style="height: 800px;">
<ul class="nav nav-pills flex-column">
{% for job in recent_jobs %}
<tr>
<li class="nav-item">
<a class="nav-link" href="job?job_id={{ job.job_id }}">{{ job.job_id }}</a>
</li>
</tr>
{% endfor %}
</ul>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-container col-md-8" id="content">
<div class="ExperimentPage">
<div>
<div class="ExperimentView">
<h1>{{ current_job.job_id }}</h1>
<hr class="divider"/>
<div class="metadata" style="max-width: 900px;">
<span class="metadata">
<span class="metadata-header">User:</span>
{{ current_job.user }}
</span>
<span class="metadata" style="margin-right: 0px">
<span class="metadata-header">Progress:
</span>
<span>{{ current_job.total_num }} Trials</span>
<span class="badge badge-pill badge-info" style="margin-left: 10px; border-radius: 0.3em">{{ current_job.running_num }} Running</span>
<span class="badge badge-pill badge-success" style="border-radius: 0.3em">{{ current_job.success_num }} Success</span>
<span class="badge badge-pill badge-danger" style="border-radius: 0.3em">{{ current_job.failed_num }} Failed</span>
<span class="progress"
style="width: 150px; float: right; margin-right: 120px; margin-top: 5px">
<span class="progress-bar bg-success" role="progressbar" style="width: {{ current_job.progress }}%;"></span>
</span>
</span>
<span class="metadata" style="line-height: 40px">
<span class="metadata-header">Start Time:</span>
{{ current_job.start_time }}
</span>
</div>
<div class="ExperimentView-runs">
<hr class="divider"/>
<table class="table table-hover"
id="trial_table"
data-toggle="table"
data-show-columns="true"
data-show-export="true"
data-minimum-count-columns="2"
data-id-field="id"
data-show-pagination-switch="true"
data-page-list="[10, 25, 50, 100, ALL]"
data-pagination="true"
style="border: none; max-height: 800px">
<thead>
<tr>
<th class="top-row" scope="colgroup" colspan="4">Trials</th>
<th class="top-row left-border" scope="colgroup" colspan="{{ param_num }}">Parameters</th>
<th class="top-row left-border" scope="colgroup" colspan="{{ metric_num }}">Metrics</th>
</tr>
<tr>
<th class="bottom-row" data-field="Trial-ID" data-sortable="true">Trial-ID</th>
<th class="bottom-row" data-field="Status" data-sortable="true">Status</th>
<th class="bottom-row" data-field="Start Time" data-sortable="true">Start Time</th>
<th class="bottom-row" data-field="End Time" data-sortable="true">End Time</th>
{% for param in param_keys %}
<th class="bottom-row" data-field="{{ param }}" data-sortable="true">{{ param }}</th>
{% endfor %}
{% for metric in metric_keys %}
<th class="bottom-row" data-field="{{ metric }}"
data-sortable="true">{{ metric }}</th>
{% endfor %}
</tr>
</thead>
<tbody>
{% for trial in recent_trials %}
<tr>
<td><a href="/trial?job_id={{ trial.job_id }}&trial_id={{ trial.trial_id }}">{{ trial.trial_id }}</a></td>
<td>{{ trial.trial_status}} <!--a href="#">(Kill)</a--></td>
<td>{{ trial.start_time }}</td>
<td>{{ trial.end_time }}</td>
{% for param in trial.params.items %}
<td>{{ param.1 }}</td>
{% endfor %}
<td>{{ trial.metrics.episode_reward }}</td>
<td>{{ trial.metrics.loss }}</td>
<td>{{ trial.metrics.accuracy }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-right"></div>
</div>
</div>
</body>
</html>
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/automlboard/templates/trial.html
|
HTML
|
<html>
<head>
<title>AutoMLBoard</title>
<meta name="viewport" content="width=device-width, initial-scale=1.0">
{% load staticfiles %}
<!-- jquery and bootstrap dependency -->
<script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha384-KJ3o2DKtIkvYIK3UENzmM7KCkRr/rE9/Qpg6aAZGJwFDMVNA/GpGFF93hXpG5KkN" crossorigin="anonymous"></script>
<script src="https://cdnjs.cloudflare.com/ajax/libs/popper.js/1.11.0/umd/popper.min.js" integrity="sha384-b/U6ypiBEHpOf/4+1nzFpr53nxSS+GLCkfwBdFNTxtclqqenISfwAzpKaMNFNmj4" crossorigin="anonymous"></script>
<script src="https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta/js/bootstrap.min.js" integrity="sha384-h0AbiXch4ZDo7tp9hKZ4TsHbi047NrKGLO3SEJAg45jXxnGIfYzk4Si90RDIqNm1" crossorigin="anonymous"></script>
<!-- bootstrap table dependency -->
<link rel="stylesheet" href="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.css">
<script src="https://cdnjs.cloudflare.com/ajax/libs/bootstrap-table/1.12.1/bootstrap-table.min.js"></script>
<link rel="stylesheet" href="/static/css/bootstrap.min.css">
<link rel="stylesheet" href="/static/css/App.css">
<link rel="stylesheet" href="/static/css/HomePage.css">
<link rel="stylesheet" href="/static/css/ExperimentView.css">
<script src="/static/js/ExperimentList.js"></script>
<link rel="stylesheet" href="/static/css/ExperimentList.css">
<!-- awesome dependency -->
<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.2.0/css/all.css" integrity="sha384-hWVjflwFxL6sNzntih27bfxkr27PmbbK/iSvJ+a4+0owXq79v+lsFkW54bOGbiDQ" crossorigin="anonymous">
</head>
<body>
<nav class="navbar navbar-expand-lg navbar-dark bg-primary" style="margin-right: auto;margin-left: auto;">
<a class="navbar-brand" href="#" style="padding-left: 50px">AutoMLBoard</a>
<button class="navbar-toggler" type="button" data-toggle="collapse" data-target="#navbarColor01" aria-controls="navbarColor01" aria-expanded="false" aria-label="Toggle navigation">
<span class="navbar-toggler-icon"></span>
</button>
<div class="collapse navbar-collapse" id="navbarColor01">
<ul class="navbar-nav mr-auto">
<li class="nav-item">
<a class="nav-link" href="/">Home <span class="sr-only">(current)</span></a>
</li>
<li class="nav-item">
<a class="nav-link" href="https://github.com/ray-project/ray">Github</a>
</li>
<li class="nav-item">
<a class="nav-link" href="http://ray.readthedocs.io/">Document</a>
</li>
</ul>
</div>
</nav>
<div class="container" style="max-width: none">
<div class="outer-container row" id = "row-main">
<div class="HomePage-experiment-list-container col-md-2" id="sidebar">
<div>
<div class="collapsed-expander-container">
<div class="experiment-list-outer-container">
<div><h1 class="experiments-header" style="padding-right: 80px">Trials</h1>
<div class="collapser-container" onclick="collapse_experiment_list()">
<i class="collapser fa fa-chevron-left login-icon"></i>
</div>
<div class="experiment-list-container" style="height: 800px;">
<ul class="nav nav-pills flex-column">
{% for trial in recent_trials %}
<tr>
<li class="nav-item">
<a class="nav-link" href="trial?job_id={{ trial.job_id }}&trial_id={{ trial.trial_id }}">Trial-{{ trial.trial_id }}</a>
</li>
</tr>
{% endfor %}
</ul>
</div>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-container col-md-8" id="content">
<div class="ExperimentPage">
<div>
<div class="ExperimentView">
<h1>Trial-{{ current_trial.trial_id }}</h1>
<hr class="divider"/>
<div class="metadata" style="max-width: 900px;">
<span class="metadata">
<span class="metadata-header">Status:</span>
{{ current_trial.trial_status }}
</span>
<br>
<span class="metadata" style="line-height: 40px">
<span class="metadata-header">Start Time:</span>
{{ current_trial.start_time }}
</span>
<span class="metadata" style="line-height: 40px">
<span class="metadata-header">End Time:</span>
{{ current_trial.end_time }}
</span>
<br>
</div>
<div class="metadata" style="margin-top: 20px">
<button disabled="" type="button" class="btn btn-default"
style="width: 80px; max-height: 35px; margin-right: 10px">Kill</button>
<a href="/job?job_id={{ job_id }}">
<button type="button" class="btn-primary btn btn-default"
style="width: 80px; max-height: 35px; margin-right: 20px">Return</button>
</a>
</div>
<hr class="divider"/>
<div class="ExperimentView-runs">
<table class="table table-hover"
id="trial_table"
data-toggle="table"
data-show-columns="true"
data-show-export="true"
data-minimum-count-columns="2"
data-id-field="id"
data-show-pagination-switch="true"
data-page-list="[10, 25, 50, 100, ALL]"
data-pagination="true"
style="border: none; max-height: 800px">
<thead>
<tr class="active">
<th class="bottom-row" data-field="Trial ID" data-sortable="true">Trial Id</th>
<th class="bottom-row" data-field="Timesteps" data-sortable="true">Timesteps</th>
<th class="bottom-row" data-field="Train Iteration" data-sortable="true">Train Iteration</th>
<th class="bottom-row" data-field="Episode Reward Mean" data-sortable="true">Episode Reward Mean</th>
<th class="bottom-row" data-field="Episodes Total" data-sortable="true">Episodes Total</th>
<th class="bottom-row" data-field="Mean Accuracy" data-sortable="true">Mean Accuracy</th>
<th class="bottom-row" data-field="Mean Loss" data-sortable="true">Mean Loss</th>
<th class="bottom-row" data-field="Time Total" data-sortable="true">Time Total</th>
<th class="bottom-row" data-field="Date" data-sortable="true">Date</th>
<th class="bottom-row" data-field="Hostname" data-sortable="true">Hostname</th>
</tr>
</thead>
<tbody>
{% for result in recent_results %}
<tr>
<td>{{ result.trial_id }}</td>
<td>{{ result.timesteps_total }}</td>
<td>{{ result.trainning_iteration }}</td>
<td>{{ result.episode_reward_mean }}</td>
<td>{{ result.episodes_total }}</td>
<td>{{ result.mean_accuracy }}</td>
<td>{{ result.loss }}</td>
<td>{{ result.time_total_s }}</td>
<td>{{ result.date }}</td>
<td>{{ result.hostname }}</td>
</tr>
{% endfor %}
</tbody>
</table>
</div>
</div>
</div>
</div>
</div>
<div class="experiment-view-right"></div>
</div>
</div>
</body>
</html>
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/checkpoint_manager.py
|
Python
|
# coding: utf-8
import heapq
import logging
logger = logging.getLogger(__name__)
class Checkpoint:
"""Describes a checkpoint of trial state.
Checkpoint may be saved in different storage.
Attributes:
storage (str): Storage type.
value (str): If storage==MEMORY, it is a Python object.
If storage==PERSISTENT, it is a path to persistent storage.
"""
MEMORY = "memory"
PERSISTENT = "persistent"
def __init__(self, storage, value, result=None):
self.storage = storage
self.value = value
self.result = result or {}
@staticmethod
def from_object(value=None):
"""Creates a checkpoint from a Python object."""
return Checkpoint(Checkpoint.MEMORY, value)
class QueueItem:
def __init__(self, priority, value):
self.priority = priority
self.value = value
def __lt__(self, other):
return self.priority < other.priority
class CheckpointManager:
"""Manages checkpoints on the driver for a trial."""
def __init__(self, keep_checkpoints_num, checkpoint_score_attr, delete_fn):
"""Initializes a new CheckpointManager.
Args:
keep_checkpoints_num (int): Keep at least this many checkpoints.
checkpoint_score_attr (str): Attribute to use to determine which
checkpoints to keep.
delete_fn (function): Function that deletes checkpoints. Must be
idempotent.
"""
self.keep_checkpoints_num = keep_checkpoints_num or float("inf")
assert self.keep_checkpoints_num > 0, (
"keep_checkpoints_num must be greater than 0.")
self._checkpoint_score_desc = checkpoint_score_attr.startswith("min-")
if self._checkpoint_score_desc:
self._checkpoint_score_attr = checkpoint_score_attr[4:]
else:
self._checkpoint_score_attr = checkpoint_score_attr
self.delete = delete_fn
self.newest_checkpoint = Checkpoint(Checkpoint.MEMORY, None)
self._best_checkpoints = []
self._membership = set()
def on_checkpoint(self, checkpoint):
"""Starts tracking checkpoint metadata on checkpoint.
Sets newest checkpoint. Deletes previous checkpoint as long as it isn't
one of the best ones. Also deletes the worst checkpoint if at capacity.
Args:
checkpoint (Checkpoint): Trial state checkpoint.
"""
old_checkpoint = self.newest_checkpoint
self.newest_checkpoint = checkpoint
try:
queue_item = QueueItem(self._priority(checkpoint), checkpoint)
except KeyError:
if old_checkpoint not in self._membership:
self.delete(old_checkpoint)
logger.error("Result dict has no key: {}. "
"checkpoint_score_attr must be set to a key in the "
"result dict.".format(self._checkpoint_score_attr))
return
if len(self._best_checkpoints) < self.keep_checkpoints_num:
heapq.heappush(self._best_checkpoints, queue_item)
self._membership.add(checkpoint)
elif queue_item.priority >= self._best_checkpoints[0].priority:
worst = heapq.heappushpop(self._best_checkpoints, queue_item).value
self._membership.add(checkpoint)
if worst in self._membership:
self._membership.remove(worst)
self.delete(worst)
# Remove the old checkpoint if it isn't one of the best ones.
if old_checkpoint.value and old_checkpoint not in self._membership:
self.delete(old_checkpoint)
def best_checkpoints(self):
"""Returns best checkpoints, sorted by score."""
checkpoints = sorted(self._best_checkpoints, key=lambda c: c.priority)
return [queue_item.value for queue_item in checkpoints]
def _priority(self, checkpoint):
priority = checkpoint.result[self._checkpoint_score_attr]
return -priority if self._checkpoint_score_desc else priority
def __getstate__(self):
state = self.__dict__.copy()
# Avoid serializing lambda since it may capture cyclical dependencies.
state.pop("delete")
return state
def __setstate__(self, state):
self.__dict__.update(state)
self.delete = None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/cluster_info.py
|
Python
|
import getpass
import os
def get_ssh_user():
"""Returns ssh username for connecting to cluster workers."""
return getpass.getuser()
def get_ssh_key():
"""Returns ssh key to connecting to cluster workers.
If the env var TUNE_CLUSTER_SSH_KEY is provided, then this key
will be used for syncing across different nodes.
"""
path = os.environ.get("TUNE_CLUSTER_SSH_KEY",
os.path.expanduser("~/ray_bootstrap_key.pem"))
if os.path.exists(path):
return path
return None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/commands.py
|
Python
|
import click
import logging
import os
import subprocess
import operator
from datetime import datetime
import pandas as pd
from pandas.api.types import is_string_dtype, is_numeric_dtype
from ray.tune.result import (DEFAULT_EXPERIMENT_INFO_KEYS, DEFAULT_RESULT_KEYS,
CONFIG_PREFIX)
from ray.tune.analysis import Analysis
from ray.tune import TuneError
try:
from tabulate import tabulate
except ImportError:
tabulate = None
logger = logging.getLogger(__name__)
EDITOR = os.getenv("EDITOR", "vim")
TIMESTAMP_FORMAT = "%Y-%m-%d %H:%M:%S (%A)"
DEFAULT_CLI_KEYS = DEFAULT_EXPERIMENT_INFO_KEYS + DEFAULT_RESULT_KEYS
DEFAULT_PROJECT_INFO_KEYS = (
"name",
"total_trials",
"last_updated",
)
try:
TERM_HEIGHT, TERM_WIDTH = subprocess.check_output(["stty", "size"]).split()
TERM_HEIGHT, TERM_WIDTH = int(TERM_HEIGHT), int(TERM_WIDTH)
except subprocess.CalledProcessError:
TERM_HEIGHT, TERM_WIDTH = 100, 100
OPERATORS = {
"<": operator.lt,
"<=": operator.le,
"==": operator.eq,
"!=": operator.ne,
">=": operator.ge,
">": operator.gt,
}
def _check_tabulate():
"""Checks whether tabulate is installed."""
if tabulate is None:
raise ImportError(
"Tabulate not installed. Please run `pip install tabulate`.")
def print_format_output(dataframe):
"""Prints output of given dataframe to fit into terminal.
Returns:
table (pd.DataFrame): Final outputted dataframe.
dropped_cols (list): Columns dropped due to terminal size.
empty_cols (list): Empty columns (dropped on default).
"""
print_df = pd.DataFrame()
dropped_cols = []
empty_cols = []
# column display priority is based on the info_keys passed in
for i, col in enumerate(dataframe):
if dataframe[col].isnull().all():
# Don't add col to print_df if is fully empty
empty_cols += [col]
continue
print_df[col] = dataframe[col]
test_table = tabulate(print_df, headers="keys", tablefmt="psql")
if str(test_table).index("\n") > TERM_WIDTH:
# Drop all columns beyond terminal width
print_df.drop(col, axis=1, inplace=True)
dropped_cols += list(dataframe.columns)[i:]
break
table = tabulate(
print_df, headers="keys", tablefmt="psql", showindex="never")
print(table)
if dropped_cols:
click.secho("Dropped columns: {}".format(dropped_cols), fg="yellow")
click.secho("Please increase your terminal size "
"to view remaining columns.")
if empty_cols:
click.secho("Empty columns: {}".format(empty_cols), fg="yellow")
return table, dropped_cols, empty_cols
def list_trials(experiment_path,
sort=None,
output=None,
filter_op=None,
info_keys=None,
limit=None,
desc=False):
"""Lists trials in the directory subtree starting at the given path.
Args:
experiment_path (str): Directory where trials are located.
Like Experiment.local_dir/Experiment.name/experiment*.json.
sort (list): Keys to sort by.
output (str): Name of file where output is saved.
filter_op (str): Filter operation in the format
"<column> <operator> <value>".
info_keys (list): Keys that are displayed.
limit (int): Number of rows to display.
desc (bool): Sort ascending vs. descending.
"""
_check_tabulate()
try:
checkpoints_df = Analysis(experiment_path).dataframe()
except TuneError:
raise click.ClickException("No trial data found!")
def key_filter(k):
return k in DEFAULT_CLI_KEYS or k.startswith(CONFIG_PREFIX)
col_keys = [k for k in checkpoints_df.columns if key_filter(k)]
if info_keys:
for k in info_keys:
if k not in checkpoints_df.columns:
raise click.ClickException("Provided key invalid: {}. "
"Available keys: {}.".format(
k, checkpoints_df.columns))
col_keys = [k for k in checkpoints_df.columns if k in info_keys]
if not col_keys:
raise click.ClickException("No columns to output.")
checkpoints_df = checkpoints_df[col_keys]
if "last_update_time" in checkpoints_df:
with pd.option_context("mode.use_inf_as_null", True):
datetime_series = checkpoints_df["last_update_time"].dropna()
datetime_series = datetime_series.apply(
lambda t: datetime.fromtimestamp(t).strftime(TIMESTAMP_FORMAT))
checkpoints_df["last_update_time"] = datetime_series
if "logdir" in checkpoints_df:
# logdir often too long to view in table, so drop experiment_path
checkpoints_df["logdir"] = checkpoints_df["logdir"].str.replace(
experiment_path, "")
if filter_op:
col, op, val = filter_op.split(" ")
col_type = checkpoints_df[col].dtype
if is_numeric_dtype(col_type):
val = float(val)
elif is_string_dtype(col_type):
val = str(val)
# TODO(Andrew): add support for datetime and boolean
else:
raise click.ClickException("Unsupported dtype for {}: {}".format(
val, col_type))
op = OPERATORS[op]
filtered_index = op(checkpoints_df[col], val)
checkpoints_df = checkpoints_df[filtered_index]
if sort:
for key in sort:
if key not in checkpoints_df:
raise click.ClickException("{} not in: {}".format(
key, list(checkpoints_df)))
ascending = not desc
checkpoints_df = checkpoints_df.sort_values(
by=sort, ascending=ascending)
if limit:
checkpoints_df = checkpoints_df[:limit]
print_format_output(checkpoints_df)
if output:
file_extension = os.path.splitext(output)[1].lower()
if file_extension in (".p", ".pkl", ".pickle"):
checkpoints_df.to_pickle(output)
elif file_extension == ".csv":
checkpoints_df.to_csv(output, index=False)
else:
raise click.ClickException(
"Unsupported filetype: {}".format(output))
click.secho("Output saved at {}".format(output), fg="green")
def list_experiments(project_path,
sort=None,
output=None,
filter_op=None,
info_keys=None,
limit=None,
desc=False):
"""Lists experiments in the directory subtree.
Args:
project_path (str): Directory where experiments are located.
Corresponds to Experiment.local_dir.
sort (list): Keys to sort by.
output (str): Name of file where output is saved.
filter_op (str): Filter operation in the format
"<column> <operator> <value>".
info_keys (list): Keys that are displayed.
limit (int): Number of rows to display.
desc (bool): Sort ascending vs. descending.
"""
_check_tabulate()
base, experiment_folders, _ = next(os.walk(project_path))
experiment_data_collection = []
for experiment_dir in experiment_folders:
num_trials = sum(
"result.json" in files
for _, _, files in os.walk(os.path.join(base, experiment_dir)))
experiment_data = {"name": experiment_dir, "total_trials": num_trials}
experiment_data_collection.append(experiment_data)
if not experiment_data_collection:
raise click.ClickException("No experiments found!")
info_df = pd.DataFrame(experiment_data_collection)
if not info_keys:
info_keys = DEFAULT_PROJECT_INFO_KEYS
col_keys = [k for k in list(info_keys) if k in info_df]
if not col_keys:
raise click.ClickException(
"None of keys {} in experiment data!".format(info_keys))
info_df = info_df[col_keys]
if filter_op:
col, op, val = filter_op.split(" ")
col_type = info_df[col].dtype
if is_numeric_dtype(col_type):
val = float(val)
elif is_string_dtype(col_type):
val = str(val)
# TODO(Andrew): add support for datetime and boolean
else:
raise click.ClickException("Unsupported dtype for {}: {}".format(
val, col_type))
op = OPERATORS[op]
filtered_index = op(info_df[col], val)
info_df = info_df[filtered_index]
if sort:
for key in sort:
if key not in info_df:
raise click.ClickException("{} not in: {}".format(
key, list(info_df)))
ascending = not desc
info_df = info_df.sort_values(by=sort, ascending=ascending)
if limit:
info_df = info_df[:limit]
print_format_output(info_df)
if output:
file_extension = os.path.splitext(output)[1].lower()
if file_extension in (".p", ".pkl", ".pickle"):
info_df.to_pickle(output)
elif file_extension == ".csv":
info_df.to_csv(output, index=False)
else:
raise click.ClickException(
"Unsupported filetype: {}".format(output))
click.secho("Output saved at {}".format(output), fg="green")
def add_note(path, filename="note.txt"):
"""Opens a txt file at the given path where user can add and save notes.
Args:
path (str): Directory where note will be saved.
filename (str): Name of note. Defaults to "note.txt"
"""
path = os.path.expanduser(path)
assert os.path.isdir(path), "{} is not a valid directory.".format(path)
filepath = os.path.join(path, filename)
exists = os.path.isfile(filepath)
try:
subprocess.call([EDITOR, filepath])
except Exception as exc:
click.secho("Editing note failed: {}".format(str(exc)), fg="red")
if exists:
print("Note updated at:", filepath)
else:
print("Note created at:", filepath)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/config_parser.py
|
Python
|
import argparse
import json
import os
# For compatibility under py2 to consider unicode as str
from six import string_types
from ray.tune import TuneError
from ray.tune.trial import Trial
from ray.tune.resources import json_to_resources
from ray.tune.logger import _SafeFallbackEncoder
def make_parser(parser_creator=None, **kwargs):
"""Returns a base argument parser for the ray.tune tool.
Args:
parser_creator: A constructor for the parser class.
kwargs: Non-positional args to be passed into the
parser class constructor.
"""
if parser_creator:
parser = parser_creator(**kwargs)
else:
parser = argparse.ArgumentParser(**kwargs)
# Note: keep this in sync with rllib/train.py
parser.add_argument(
"--run",
default=None,
type=str,
help="The algorithm or model to train. This may refer to the name "
"of a built-on algorithm (e.g. RLLib's DQN or PPO), or a "
"user-defined trainable function or class registered in the "
"tune registry.")
parser.add_argument(
"--stop",
default="{}",
type=json.loads,
help="The stopping criteria, specified in JSON. The keys may be any "
"field returned by 'train()' e.g. "
"'{\"time_total_s\": 600, \"training_iteration\": 100000}' to stop "
"after 600 seconds or 100k iterations, whichever is reached first.")
parser.add_argument(
"--config",
default="{}",
type=json.loads,
help="Algorithm-specific configuration (e.g. env, hyperparams), "
"specified in JSON.")
parser.add_argument(
"--resources-per-trial",
default=None,
type=json_to_resources,
help="Override the machine resources to allocate per trial, e.g. "
"'{\"cpu\": 64, \"gpu\": 8}'. Note that GPUs will not be assigned "
"unless you specify them here. For RLlib, you probably want to "
"leave this alone and use RLlib configs to control parallelism.")
parser.add_argument(
"--num-samples",
default=1,
type=int,
help="Number of times to repeat each trial.")
parser.add_argument(
"--checkpoint-freq",
default=0,
type=int,
help="How many training iterations between checkpoints. "
"A value of 0 (default) disables checkpointing.")
parser.add_argument(
"--checkpoint-at-end",
action="store_true",
help="Whether to checkpoint at the end of the experiment. "
"Default is False.")
parser.add_argument(
"--no-sync-on-checkpoint",
action="store_true",
help="Disable sync-down of trial checkpoint, which is enabled by "
"default to guarantee recoverability. If set, checkpoint syncing from "
"worker to driver is asynchronous. Set this only if synchronous "
"checkpointing is too slow and trial restoration failures can be "
"tolerated")
parser.add_argument(
"--keep-checkpoints-num",
default=None,
type=int,
help="Number of best checkpoints to keep. Others get "
"deleted. Default (None) keeps all checkpoints.")
parser.add_argument(
"--checkpoint-score-attr",
default="training_iteration",
type=str,
help="Specifies by which attribute to rank the best checkpoint. "
"Default is increasing order. If attribute starts with min- it "
"will rank attribute in decreasing order. Example: "
"min-validation_loss")
parser.add_argument(
"--export-formats",
default=None,
help="List of formats that exported at the end of the experiment. "
"Default is None. For RLlib, 'checkpoint' and 'model' are "
"supported for TensorFlow policy graphs.")
parser.add_argument(
"--max-failures",
default=3,
type=int,
help="Try to recover a trial from its last checkpoint at least this "
"many times. Only applies if checkpointing is enabled.")
parser.add_argument(
"--scheduler",
default="FIFO",
type=str,
help="FIFO (default), MedianStopping, AsyncHyperBand, "
"HyperBand, or HyperOpt.")
parser.add_argument(
"--scheduler-config",
default="{}",
type=json.loads,
help="Config options to pass to the scheduler.")
# Note: this currently only makes sense when running a single trial
parser.add_argument(
"--restore",
default=None,
type=str,
help="If specified, restore from this checkpoint.")
return parser
def to_argv(config):
"""Converts configuration to a command line argument format."""
argv = []
for k, v in config.items():
if "-" in k:
raise ValueError("Use '_' instead of '-' in `{}`".format(k))
if v is None:
continue
if not isinstance(v, bool) or v: # for argparse flags
argv.append("--{}".format(k.replace("_", "-")))
if isinstance(v, string_types):
argv.append(v)
elif isinstance(v, bool):
pass
else:
argv.append(json.dumps(v, cls=_SafeFallbackEncoder))
return argv
def create_trial_from_spec(spec, output_path, parser, **trial_kwargs):
"""Creates a Trial object from parsing the spec.
Arguments:
spec (dict): A resolved experiment specification. Arguments should
The args here should correspond to the command line flags
in ray.tune.config_parser.
output_path (str); A specific output path within the local_dir.
Typically the name of the experiment.
parser (ArgumentParser): An argument parser object from
make_parser.
trial_kwargs: Extra keyword arguments used in instantiating the Trial.
Returns:
A trial object with corresponding parameters to the specification.
"""
try:
args, _ = parser.parse_known_args(to_argv(spec))
except SystemExit:
raise TuneError("Error parsing args, see above message", spec)
if "resources_per_trial" in spec:
trial_kwargs["resources"] = json_to_resources(
spec["resources_per_trial"])
return Trial(
# Submitting trial via server in py2.7 creates Unicode, which does not
# convert to string in a straightforward manner.
trainable_name=spec["run"],
# json.load leads to str -> unicode in py2.7
config=spec.get("config", {}),
local_dir=os.path.join(spec["local_dir"], output_path),
# json.load leads to str -> unicode in py2.7
stopping_criterion=spec.get("stop", {}),
remote_checkpoint_dir=spec.get("remote_checkpoint_dir"),
checkpoint_freq=args.checkpoint_freq,
checkpoint_at_end=args.checkpoint_at_end,
sync_on_checkpoint=not args.no_sync_on_checkpoint,
keep_checkpoints_num=args.keep_checkpoints_num,
checkpoint_score_attr=args.checkpoint_score_attr,
export_formats=spec.get("export_formats", []),
# str(None) doesn't create None
restore_path=spec.get("restore"),
trial_name_creator=spec.get("trial_name_creator"),
loggers=spec.get("loggers"),
# str(None) doesn't create None
sync_to_driver_fn=spec.get("sync_to_driver"),
max_failures=args.max_failures,
**trial_kwargs)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/durable_trainable.py
|
Python
|
import os
from ray.tune.trainable import Trainable, TrainableUtil
from ray.tune.syncer import get_cloud_sync_client
class DurableTrainable(Trainable):
"""Abstract class for a remote-storage backed fault-tolerant Trainable.
Supports checkpointing to and restoring from remote storage. To use this
class, implement the same private methods as ray.tune.Trainable (`_save`,
`_train`, `_restore`, `reset_config`, `_setup`, `_stop`).
.. warning:: This class is currently **experimental** and may
be subject to change.
Run this with Tune as follows. Setting `sync_to_driver=False` disables
syncing to the driver to avoid keeping redundant checkpoints around, as
well as preventing the driver from syncing up the same checkpoint.
See ``tune/trainable.py``.
Attributes:
remote_checkpoint_dir (str): Upload directory (S3 or GS path).
storage_client: Tune-internal interface for interacting with external
storage.
>>> tune.run(MyDurableTrainable, sync_to_driver=False)
"""
def __init__(self, remote_checkpoint_dir, *args, **kwargs):
"""Initializes a DurableTrainable.
Args:
remote_checkpoint_dir (str): Upload directory (S3 or GS path).
"""
super(DurableTrainable, self).__init__(*args, **kwargs)
self.remote_checkpoint_dir = remote_checkpoint_dir
self.storage_client = self._create_storage_client()
def save(self, checkpoint_dir=None):
"""Saves the current model state to a checkpoint, persisted remotely.
The storage client must provide durability for
restoration to work. That is, once ``storage.client.wait()``
returns after a checkpoint `sync up`, the checkpoint is considered
committed and can be used to restore the trainable.
Args:
checkpoint_dir (Optional[str]): Optional dir to place the
checkpoint. Must be ``logdir`` or a sub-directory.
Returns:
Checkpoint path or prefix that may be passed to restore().
"""
if checkpoint_dir:
if checkpoint_dir.starts_with(os.path.abspath(self.logdir)):
raise ValueError("`checkpoint_dir` must be `self.logdir`, or "
"a sub-directory.")
checkpoint_path = super(DurableTrainable, self).save(checkpoint_dir)
self.storage_client.sync_up(self.logdir, self.remote_checkpoint_dir)
self.storage_client.wait()
return checkpoint_path
def restore(self, checkpoint_path):
"""Restores training state from a given checkpoint persisted remotely.
These checkpoints are returned from calls to save().
Args:
checkpoint_path (str): Local path to checkpoint.
"""
self.storage_client.sync_down(self.remote_checkpoint_dir, self.logdir)
self.storage_client.wait()
super(DurableTrainable, self).restore(checkpoint_path)
def delete_checkpoint(self, checkpoint_path):
"""Deletes checkpoint from both local and remote storage.
Args:
checkpoint_path (str): Local path to checkpoint.
"""
super(DurableTrainable, self).delete_checkpoint(checkpoint_path)
local_dirpath = TrainableUtil.find_checkpoint_dir(checkpoint_path)
self.storage_client.delete(self._storage_path(local_dirpath))
def _create_storage_client(self):
"""Returns a storage client."""
return get_cloud_sync_client(self.remote_checkpoint_dir)
def _storage_path(self, local_path):
rel_local_path = os.path.relpath(local_path, self.logdir)
return os.path.join(self.remote_checkpoint_dir, rel_local_path)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/error.py
|
Python
|
class TuneError(Exception):
"""General error class raised by ray.tune."""
pass
class AbortTrialExecution(TuneError):
"""Error that indicates a trial should not be retried."""
pass
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/async_hyperband_example.py
|
Python
|
#!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
import ray
from ray.tune import Trainable, run, sample_from
from ray.tune.schedulers import AsyncHyperBandScheduler
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.")
args, _ = parser.parse_known_args()
ray.init(address=args.ray_address)
# asynchronous hyperband early stopping, configured with
# `episode_reward_mean` as the
# objective and `training_iteration` as the time unit,
# which is automatically filled by Tune.
ahb = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
grace_period=5,
max_t=100)
run(MyTrainableClass,
name="asynchyperband_test",
scheduler=ahb,
stop={"training_iteration": 1 if args.smoke_test else 99999},
num_samples=20,
resources_per_trial={
"cpu": 1,
"gpu": 0
},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random())),
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/ax_example.py
|
Python
|
"""This test checks that AxSearch is functional.
It also checks that it is usable with a separate scheduler.
"""
import numpy as np
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.ax import AxSearch
def hartmann6(x):
alpha = np.array([1.0, 1.2, 3.0, 3.2])
A = np.array([
[10, 3, 17, 3.5, 1.7, 8],
[0.05, 10, 17, 0.1, 8, 14],
[3, 3.5, 1.7, 10, 17, 8],
[17, 8, 0.05, 10, 0.1, 14],
])
P = 10**(-4) * np.array([
[1312, 1696, 5569, 124, 8283, 5886],
[2329, 4135, 8307, 3736, 1004, 9991],
[2348, 1451, 3522, 2883, 3047, 6650],
[4047, 8828, 8732, 5743, 1091, 381],
])
y = 0.0
for j, alpha_j in enumerate(alpha):
t = 0
for k in range(6):
t += A[j, k] * ((x[k] - P[j, k])**2)
y -= alpha_j * np.exp(-t)
return y
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
x = np.array([config.get("x{}".format(i + 1)) for i in range(6)])
reporter(
timesteps_total=i,
hartmann6=hartmann6(x),
l2norm=np.sqrt((x**2).sum()))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from ax.service.ax_client import AxClient
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
config = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
}
}
parameters = [
{
"name": "x1",
"type": "range",
"bounds": [0.0, 1.0],
"value_type": "float", # Optional, defaults to "bounds".
"log_scale": False, # Optional, defaults to False.
},
{
"name": "x2",
"type": "range",
"bounds": [0.0, 1.0],
},
{
"name": "x3",
"type": "range",
"bounds": [0.0, 1.0],
},
{
"name": "x4",
"type": "range",
"bounds": [0.0, 1.0],
},
{
"name": "x5",
"type": "range",
"bounds": [0.0, 1.0],
},
{
"name": "x6",
"type": "range",
"bounds": [0.0, 1.0],
},
]
client = AxClient(enforce_sequential_optimization=False)
client.create_experiment(
parameters=parameters,
objective_name="hartmann6",
minimize=True, # Optional, defaults to False.
parameter_constraints=["x1 + x2 <= 2.0"], # Optional.
outcome_constraints=["l2norm <= 1.25"], # Optional.
)
algo = AxSearch(client, max_concurrent=4)
scheduler = AsyncHyperBandScheduler(metric="hartmann6", mode="max")
run(easy_objective,
name="ax",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/bayesopt_example.py
|
Python
|
"""This test checks that BayesOpt is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.bayesopt import BayesOptSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
space = {"width": (0, 20), "height": (-100, 100)}
config = {
"num_samples": 10 if args.smoke_test else 1000,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
}
}
algo = BayesOptSearch(
space,
max_concurrent=4,
metric="mean_loss",
mode="min",
utility_kwargs={
"kind": "ucb",
"kappa": 2.5,
"xi": 0.0
})
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="my_exp",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/bohb_example.py
|
Python
|
#!/usr/bin/env python
import argparse
import json
import os
import numpy as np
import ray
from ray.tune import Trainable, run
from ray.tune.schedulers.hb_bohb import HyperBandForBOHB
from ray.tune.suggest.bohb import TuneBOHB
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.")
args, _ = parser.parse_known_args()
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
import ConfigSpace as CS
ray.init(address=args.ray_address)
# BOHB uses ConfigSpace for their hyperparameter search space
config_space = CS.ConfigurationSpace()
config_space.add_hyperparameter(
CS.UniformFloatHyperparameter("height", lower=10, upper=100))
config_space.add_hyperparameter(
CS.UniformFloatHyperparameter("width", lower=0, upper=100))
experiment_metrics = dict(metric="episode_reward_mean", mode="max")
bohb_hyperband = HyperBandForBOHB(
time_attr="training_iteration",
max_t=100,
reduction_factor=4,
**experiment_metrics)
bohb_search = TuneBOHB(
config_space, max_concurrent=4, **experiment_metrics)
run(MyTrainableClass,
name="bohb_test",
scheduler=bohb_hyperband,
search_alg=bohb_search,
num_samples=10,
stop={"training_iteration": 10 if args.smoke_test else 100})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/durable_trainable_example.py
|
Python
|
import argparse
import numpy as np
import time
import logging
import os
import ray
from ray import tune
from ray.tune import DurableTrainable
from ray.tune.sync_client import get_sync_client
import cloudpickle
logger = logging.getLogger(__name__)
class MockDurableTrainable(DurableTrainable):
"""Mocks the storage client on initialization to store data locally."""
def __init__(self, remote_checkpoint_dir, *args, **kwargs):
# Mock the path as a local path.
local_dir_suffix = remote_checkpoint_dir.split("://")[1]
remote_checkpoint_dir = os.path.join("/tmp", local_dir_suffix)
# Disallow malformed relative paths for delete safety.
assert os.path.abspath(remote_checkpoint_dir).startswith("/tmp")
logger.info("Using %s as the mocked remote checkpoint directory.",
self.remote_checkpoint_dir)
super(MockDurableTrainable, self).__init__(remote_checkpoint_dir,
*args, **kwargs)
def _create_storage_client(self):
sync = "mkdir -p {target} && rsync -avz {source} {target}"
delete = "rm -rf {target}"
return get_sync_client(sync, delete)
class OptimusFn(object):
def __init__(self, params, max_t=10000):
self.params = params
self.noise = np.random.normal(size=max_t) * 0.005
def eval(self, k, add_noise=True):
b0, b1, b2 = self.params
score = (b0 * k / 100 + 0.1 * b1 + 0.5)**(-1) + b2 * 0.01
if add_noise:
return score + abs(self.noise[k])
else:
return score
def get_optimus_trainable(parent_cls):
class OptimusTrainable(parent_cls):
def _setup(self, config):
self.iter = 0
if config.get("seed"):
np.random.seed(config["seed"])
time.sleep(config.get("startup_delay", 0))
params = [config["param1"], config["param2"], config["param3"]]
self.func = OptimusFn(params=params)
self.initial_samples_per_step = 500
self.mock_data = open("/dev/urandom", "rb").read(1024)
def _train(self):
self.iter += 1
new_loss = self.func.eval(self.iter)
time.sleep(0.5)
return {
"mean_loss": float(new_loss),
"mean_accuracy": (2 - new_loss) / 2,
"samples": self.initial_samples_per_step
}
def _save(self, checkpoint_dir):
time.sleep(0.5)
return {
"func": cloudpickle.dumps(self.func),
"seed": np.random.get_state(),
"data": self.mock_data,
"iter": self.iter
}
def _restore(self, checkpoint):
self.func = cloudpickle.loads(checkpoint["func"])
self.data = checkpoint["data"]
self.iter = checkpoint["iter"]
np.random.set_state(checkpoint["seed"])
return OptimusTrainable
def parse():
parser = argparse.ArgumentParser()
parser.add_argument("--local", action="store_true", default=False)
parser.add_argument("--mock-storage", action="store_true", default=False)
parser.add_argument("--remote-dir", type=str)
return parser.parse_args()
if __name__ == "__main__":
args = parse()
address = None if args.local else "auto"
ray.init(address=address)
config = {
"seed": None,
"startup_delay": 0.001,
"param1": tune.sample_from(lambda spec: np.random.exponential(0.1)),
"param2": tune.sample_from(lambda _: np.random.rand()),
"param3": tune.sample_from(lambda _: np.random.rand()),
}
parent = MockDurableTrainable if args.mock_storage else DurableTrainable
analysis = tune.run(
get_optimus_trainable(parent),
name="durableTrainable" + str(time.time()),
config=config,
num_samples=4,
verbose=1,
queue_trials=True,
# fault tolerance parameters
max_failures=-1,
checkpoint_freq=20,
sync_to_driver=False,
sync_on_checkpoint=False,
upload_dir="s3://ray-tune-test/exps/",
checkpoint_score_attr="training_iteration",
)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/genetic_example.py
|
Python
|
"""This test checks that GeneticSearch is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.automl import GeneticSearch
from ray.tune.automl import ContinuousSpace, DiscreteSpace, SearchSpace
def michalewicz_function(config, reporter):
"""f(x) = -sum{sin(xi) * [sin(i*xi^2 / pi)]^(2m)}"""
import numpy as np
x = np.array(
[config["x1"], config["x2"], config["x3"], config["x4"], config["x5"]])
sin_x = np.sin(x)
z = (np.arange(1, 6) / np.pi * (x * x))
sin_z = np.power(np.sin(z), 20) # let m = 20
y = np.dot(sin_x, sin_z)
# Negate y since we want to minimize y value
reporter(timesteps_total=1, neg_mean_loss=-y)
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
space = SearchSpace({
ContinuousSpace("x1", 0, 4, 100),
ContinuousSpace("x2", -2, 2, 100),
ContinuousSpace("x3", 1, 5, 100),
ContinuousSpace("x4", -3, 3, 100),
DiscreteSpace("x5", [-1, 0, 1, 2, 3]),
})
config = {"stop": {"training_iteration": 100}}
algo = GeneticSearch(
space,
reward_attr="neg_mean_loss",
max_generation=2 if args.smoke_test else 10,
population_size=10 if args.smoke_test else 50)
scheduler = AsyncHyperBandScheduler(metric="neg_mean_loss", mode="max")
run(michalewicz_function,
name="my_exp",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/hyperband_example.py
|
Python
|
#!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
import ray
from ray.tune import Trainable, run, Experiment, sample_from
from ray.tune.schedulers import HyperBandScheduler
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
# Hyperband early stopping, configured with `episode_reward_mean` as the
# objective and `training_iteration` as the time unit,
# which is automatically filled by Tune.
hyperband = HyperBandScheduler(
time_attr="training_iteration",
metric="episode_reward_mean",
mode="max",
max_t=100)
exp = Experiment(
name="hyperband_test",
run=MyTrainableClass,
num_samples=20,
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": sample_from(lambda spec: 10 + int(90 * random.random())),
"height": sample_from(lambda spec: int(100 * random.random()))
})
run(exp, scheduler=hyperband)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/hyperopt_example.py
|
Python
|
"""This test checks that HyperOpt is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.hyperopt import HyperOptSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
assert type(config["activation"]) == str, \
"Config is incorrect: {}".format(type(config["activation"]))
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from hyperopt import hp
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
space = {
"width": hp.uniform("width", 0, 20),
"height": hp.uniform("height", -100, 100),
"activation": hp.choice("activation", ["relu", "tanh"])
}
current_best_params = [
{
"width": 1,
"height": 2,
"activation": 0 # Activation will be relu
},
{
"width": 4,
"height": 2,
"activation": 1 # Activation will be tanh
}
]
config = {
"num_samples": 10 if args.smoke_test else 1000,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
algo = HyperOptSearch(
space,
max_concurrent=4,
metric="mean_loss",
mode="min",
points_to_evaluate=current_best_params)
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective, search_alg=algo, scheduler=scheduler, **config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/lightgbm_example.py
|
Python
|
import lightgbm as lgb
import numpy as np
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
from ray import tune
def LightGBMCallback(env):
"""Assumes that `valid_0` is the target validation score."""
_, metric, score, _ = env.evaluation_result_list[0]
tune.track.log(**{metric: score})
def train_breast_cancer(config):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(
data, target, test_size=0.25)
train_set = lgb.Dataset(train_x, label=train_y)
test_set = lgb.Dataset(test_x, label=test_y)
gbm = lgb.train(
config,
train_set,
valid_sets=[test_set],
verbose_eval=False,
callbacks=[LightGBMCallback])
preds = gbm.predict(test_x)
pred_labels = np.rint(preds)
tune.track.log(
mean_accuracy=sklearn.metrics.accuracy_score(test_y, pred_labels),
done=True)
if __name__ == "__main__":
config = {
"objective": "binary",
"metric": "binary_error",
"verbose": -1,
"boosting_type": tune.grid_search(["gbdt", "dart"]),
"num_leaves": tune.randint(10, 1000),
"learning_rate": tune.loguniform(1e-8, 1e-1)
}
from ray.tune.schedulers import ASHAScheduler
tune.run(
train_breast_cancer,
config=config,
num_samples=2,
scheduler=ASHAScheduler(metric="binary_error", mode="min"))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/logging_example.py
|
Python
|
#!/usr/bin/env python
import argparse
import json
import os
import random
import numpy as np
from ray import tune
from ray.tune import Trainable, run
class TestLogger(tune.logger.Logger):
def on_result(self, result):
print("TestLogger", result)
def trial_str_creator(trial):
return "{}_{}_123".format(trial.trainable_name, trial.trial_id)
class MyTrainableClass(Trainable):
"""Example agent whose learning curve is a random sigmoid.
The dummy hyperparameters "width" and "height" determine the slope and
maximum reward value reached.
"""
def _setup(self, config):
self.timestep = 0
def _train(self):
self.timestep += 1
v = np.tanh(float(self.timestep) / self.config.get("width", 1))
v *= self.config.get("height", 1)
# Here we use `episode_reward_mean`, but you can also report other
# objectives such as loss or accuracy.
return {"episode_reward_mean": v}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
with open(path, "w") as f:
f.write(json.dumps({"timestep": self.timestep}))
return path
def _restore(self, checkpoint_path):
with open(checkpoint_path) as f:
self.timestep = json.loads(f.read())["timestep"]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
trials = run(
MyTrainableClass,
name="hyperband_test",
num_samples=5,
trial_name_creator=trial_str_creator,
loggers=[TestLogger],
stop={"training_iteration": 1 if args.smoke_test else 99999},
config={
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(lambda spec: int(100 * random.random()))
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/mlflow_example.py
|
Python
|
#!/usr/bin/env python
"""Simple MLFLow Logger example.
This uses a simple MLFlow logger. One limitation of this is that there is
no artifact support; to save artifacts with Tune and MLFlow, you will need to
start a MLFlow run inside the Trainable function/class.
"""
import mlflow
from mlflow.tracking import MlflowClient
import time
import random
from ray import tune
from ray.tune.logger import MLFLowLogger, DEFAULT_LOGGERS
def easy_objective(config):
for i in range(20):
result = dict(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
tune.track.log(**result)
time.sleep(0.02)
tune.track.log(done=True)
if __name__ == "__main__":
client = MlflowClient()
experiment_id = client.create_experiment("test")
trials = tune.run(
easy_objective,
name="mlflow",
num_samples=5,
loggers=DEFAULT_LOGGERS + (MLFLowLogger, ),
config={
"mlflow_experiment_id": experiment_id,
"width": tune.sample_from(
lambda spec: 10 + int(90 * random.random())),
"height": tune.sample_from(lambda spec: int(100 * random.random()))
})
df = mlflow.search_runs([experiment_id])
print(df)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/mnist_pytorch.py
|
Python
|
# Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
import os
import numpy as np
import argparse
from filelock import FileLock
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import ray
from ray import tune
from ray.tune import track
from ray.tune.schedulers import AsyncHyperBandScheduler
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
class ConvNet(nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=3)
self.fc = nn.Linear(192, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 3))
x = x.view(-1, 192)
x = self.fc(x)
return F.log_softmax(x, dim=1)
def train(model, optimizer, train_loader, device=torch.device("cpu")):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
if batch_idx * len(data) > EPOCH_SIZE:
return
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
def test(model, data_loader, device=torch.device("cpu")):
model.eval()
correct = 0
total = 0
with torch.no_grad():
for batch_idx, (data, target) in enumerate(data_loader):
if batch_idx * len(data) > TEST_SIZE:
break
data, target = data.to(device), target.to(device)
outputs = model(data)
_, predicted = torch.max(outputs.data, 1)
total += target.size(0)
correct += (predicted == target).sum().item()
return correct / total
def get_data_loaders():
mnist_transforms = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.1307, ), (0.3081, ))])
# We add FileLock here because multiple workers will want to
# download data, and this may cause overwrites since
# DataLoader is not threadsafe.
with FileLock(os.path.expanduser("~/data.lock")):
train_loader = torch.utils.data.DataLoader(
datasets.MNIST(
"~/data",
train=True,
download=True,
transform=mnist_transforms),
batch_size=64,
shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST("~/data", train=False, transform=mnist_transforms),
batch_size=64,
shuffle=True)
return train_loader, test_loader
def train_mnist(config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
device = torch.device("cuda" if use_cuda else "cpu")
train_loader, test_loader = get_data_loaders()
model = ConvNet().to(device)
optimizer = optim.SGD(
model.parameters(), lr=config["lr"], momentum=config["momentum"])
while True:
train(model, optimizer, train_loader, device)
acc = test(model, test_loader, device)
track.log(mean_accuracy=acc)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--cuda",
action="store_true",
default=False,
help="Enables GPU training")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--ray-address",
help="Address of Ray cluster for seamless distributed execution.")
args = parser.parse_args()
if args.ray_address:
ray.init(address=args.ray_address)
sched = AsyncHyperBandScheduler(
time_attr="training_iteration", metric="mean_accuracy")
analysis = tune.run(
train_mnist,
name="exp",
scheduler=sched,
stop={
"mean_accuracy": 0.98,
"training_iteration": 5 if args.smoke_test else 100
},
resources_per_trial={
"cpu": 2,
"gpu": int(args.cuda)
},
num_samples=1 if args.smoke_test else 50,
config={
"lr": tune.sample_from(lambda spec: 10**(-10 * np.random.rand())),
"momentum": tune.uniform(0.1, 0.9),
"use_gpu": int(args.cuda)
})
print("Best config is:", analysis.get_best_config(metric="mean_accuracy"))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/mnist_pytorch_trainable.py
|
Python
|
# Original Code here:
# https://github.com/pytorch/examples/blob/master/mnist/main.py
from __future__ import print_function
import argparse
import os
import torch
import torch.optim as optim
import ray
from ray import tune
from ray.tune.schedulers import ASHAScheduler
from ray.tune.examples.mnist_pytorch import (train, test, get_data_loaders,
ConvNet)
# Change these values if you want the training to run quicker or slower.
EPOCH_SIZE = 512
TEST_SIZE = 256
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--use-gpu",
action="store_true",
default=False,
help="enables CUDA training")
parser.add_argument(
"--ray-address", type=str, help="The Redis address of the cluster.")
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
# Below comments are for documentation purposes only.
# yapf: disable
# __trainable_example_begin__
class TrainMNIST(tune.Trainable):
def _setup(self, config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.train_loader, self.test_loader = get_data_loaders()
self.model = ConvNet().to(self.device)
self.optimizer = optim.SGD(
self.model.parameters(),
lr=config.get("lr", 0.01),
momentum=config.get("momentum", 0.9))
def _train(self):
train(
self.model, self.optimizer, self.train_loader, device=self.device)
acc = test(self.model, self.test_loader, self.device)
return {"mean_accuracy": acc}
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model.pth")
torch.save(self.model.state_dict(), checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
self.model.load_state_dict(torch.load(checkpoint_path))
# __trainable_example_end__
# yapf: enable
if __name__ == "__main__":
args = parser.parse_args()
ray.init(address=args.ray_address)
sched = ASHAScheduler(metric="mean_accuracy")
analysis = tune.run(
TrainMNIST,
scheduler=sched,
stop={
"mean_accuracy": 0.95,
"training_iteration": 3 if args.smoke_test else 20,
},
resources_per_trial={
"cpu": 3,
"gpu": int(args.use_gpu)
},
num_samples=1 if args.smoke_test else 20,
checkpoint_at_end=True,
checkpoint_freq=3,
config={
"args": args,
"lr": tune.uniform(0.001, 0.1),
"momentum": tune.uniform(0.1, 0.9),
})
print("Best config is:", analysis.get_best_config(metric="mean_accuracy"))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/nevergrad_example.py
|
Python
|
"""This test checks that Nevergrad is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.nevergrad import NevergradSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from nevergrad.optimization import optimizerlib
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
config = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
}
}
instrumentation = 2
parameter_names = ["height", "width"]
# With nevergrad v0.2.0+ the following is also possible:
# from nevergrad import instrumentation as inst
# instrumentation = inst.Instrumentation(
# height=inst.var.Array(1).bounded(0, 200).asfloat(),
# width=inst.var.OrderedDiscrete([0, 10, 20, 30, 40, 50]))
# parameter_names = None # names are provided by the instrumentation
optimizer = optimizerlib.OnePlusOne(instrumentation)
algo = NevergradSearch(
optimizer,
parameter_names,
max_concurrent=4,
metric="mean_loss",
mode="min")
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="nevergrad",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_convnet_example.py
|
Python
|
#!/usr/bin/env python
# __tutorial_imports_begin__
import argparse
import os
import numpy as np
import torch
import torch.optim as optim
from torchvision import datasets
from ray.tune.examples.mnist_pytorch import train, test, ConvNet,\
get_data_loaders
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
from ray.tune.utils import validate_save_restore
from ray.tune.trial import ExportFormat
# __tutorial_imports_end__
# __trainable_begin__
class PytorchTrainble(tune.Trainable):
"""Train a Pytorch ConvNet with Trainable and PopulationBasedTraining
scheduler. The example reuse some of the functions in mnist_pytorch,
and is a good demo for how to add the tuning function without
changing the original training code.
"""
def _setup(self, config):
self.train_loader, self.test_loader = get_data_loaders()
self.model = ConvNet()
self.optimizer = optim.SGD(
self.model.parameters(),
lr=config.get("lr", 0.01),
momentum=config.get("momentum", 0.9))
def _train(self):
train(self.model, self.optimizer, self.train_loader)
acc = test(self.model, self.test_loader)
return {"mean_accuracy": acc}
def _save(self, checkpoint_dir):
checkpoint_path = os.path.join(checkpoint_dir, "model.pth")
torch.save(self.model.state_dict(), checkpoint_path)
return checkpoint_path
def _restore(self, checkpoint_path):
self.model.load_state_dict(torch.load(checkpoint_path))
def _export_model(self, export_formats, export_dir):
if export_formats == [ExportFormat.MODEL]:
path = os.path.join(export_dir, "exported_convnet.pt")
torch.save(self.model.state_dict(), path)
return {export_formats[0]: path}
else:
raise ValueError("unexpected formats: " + str(export_formats))
def reset_config(self, new_config):
for param_group in self.optimizer.param_groups:
if "lr" in new_config:
param_group["lr"] = new_config["lr"]
if "momentum" in new_config:
param_group["momentum"] = new_config["momentum"]
self.config = new_config
return True
# __trainable_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
datasets.MNIST("~/data", train=True, download=True)
# check if PytorchTrainble will save/restore correctly before execution
validate_save_restore(PytorchTrainble)
validate_save_restore(PytorchTrainble, use_object_store=True)
# __pbt_begin__
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: np.random.uniform(0.0001, 1),
# allow perturbations within this set of categorical values
"momentum": [0.8, 0.9, 0.99],
})
# __pbt_end__
# __tune_begin__
class Stopper:
def __init__(self):
self.should_stop = False
def stop(self, trial_id, result):
max_iter = 5 if args.smoke_test else 100
if not self.should_stop and result["mean_accuracy"] > 0.96:
self.should_stop = True
return self.should_stop or result["training_iteration"] >= max_iter
stopper = Stopper()
analysis = tune.run(
PytorchTrainble,
name="pbt_test",
scheduler=scheduler,
reuse_actors=True,
verbose=1,
stop=stopper.stop,
export_formats=[ExportFormat.MODEL],
checkpoint_score_attr="mean_accuracy",
checkpoint_freq=5,
keep_checkpoints_num=4,
num_samples=4,
config={
"lr": tune.uniform(0.001, 1),
"momentum": tune.uniform(0.001, 1),
})
# __tune_end__
best_trial = analysis.get_best_trial("mean_accuracy")
best_checkpoint = max(
analysis.get_trial_checkpoints_paths(best_trial, "mean_accuracy"))
restored_trainable = PytorchTrainble()
restored_trainable.restore(best_checkpoint[0])
best_model = restored_trainable.model
# Note that test only runs on a small random set of the test data, thus the
# accuracy may be different from metrics shown in tuning process.
test_acc = test(best_model, get_data_loaders()[1])
print("best model accuracy: ", test_acc)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_dcgan_mnist/pbt_dcgan_mnist.py
|
Python
|
#!/usr/bin/env python
import ray
from ray import tune
from ray.tune.schedulers import PopulationBasedTraining
from ray.tune.trial import ExportFormat
import argparse
import os
from filelock import FileLock
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from torch.autograd import Variable
from torch.nn import functional as F
from scipy.stats import entropy
# Training parameters
dataroot = "/tmp/"
workers = 2
batch_size = 64
image_size = 32
# Number of channels in the training images. For color images this is 3
nc = 1
# Size of z latent vector (i.e. size of generator input)
nz = 100
# Size of feature maps in generator
ngf = 32
# Size of feature maps in discriminator
ndf = 32
# Beta1 hyperparam for Adam optimizers
beta1 = 0.5
# iterations of actual training in each Trainable _train
train_iterations_per_step = 5
def get_data_loader():
dataset = dset.MNIST(
root=dataroot,
download=True,
transform=transforms.Compose([
transforms.Resize(image_size),
transforms.ToTensor(),
transforms.Normalize((0.5, ), (0.5, )),
]))
# Create the dataloader
dataloader = torch.utils.data.DataLoader(
dataset, batch_size=batch_size, shuffle=True, num_workers=workers)
return dataloader
# __GANmodel_begin__
# custom weights initialization called on netG and netD
def weights_init(m):
classname = m.__class__.__name__
if classname.find("Conv") != -1:
nn.init.normal_(m.weight.data, 0.0, 0.02)
elif classname.find("BatchNorm") != -1:
nn.init.normal_(m.weight.data, 1.0, 0.02)
nn.init.constant_(m.bias.data, 0)
# Generator Code
class Generator(nn.Module):
def __init__(self):
super(Generator, self).__init__()
self.main = nn.Sequential(
# input is Z, going into a convolution
nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False),
nn.BatchNorm2d(ngf * 4),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf * 2),
nn.ReLU(True),
nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False),
nn.BatchNorm2d(ngf),
nn.ReLU(True),
nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False),
nn.Tanh())
def forward(self, input):
return self.main(input)
class Discriminator(nn.Module):
def __init__(self):
super(Discriminator, self).__init__()
self.main = nn.Sequential(
nn.Conv2d(nc, ndf, 4, 2, 1, bias=False),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False),
nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid())
def forward(self, input):
return self.main(input)
# __GANmodel_end__
# __INCEPTION_SCORE_begin__
class Net(nn.Module):
"""
LeNet for MNist classification, used for inception_score
"""
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def inception_score(imgs, batch_size=32, splits=1):
N = len(imgs)
dtype = torch.FloatTensor
dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size)
cm = ray.get(mnist_model_ref)
up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype)
def get_pred(x):
x = up(x)
x = cm(x)
return F.softmax(x).data.cpu().numpy()
preds = np.zeros((N, 10))
for i, batch in enumerate(dataloader, 0):
batch = batch.type(dtype)
batchv = Variable(batch)
batch_size_i = batch.size()[0]
preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv)
# Now compute the mean kl-div
split_scores = []
for k in range(splits):
part = preds[k * (N // splits):(k + 1) * (N // splits), :]
py = np.mean(part, axis=0)
scores = []
for i in range(part.shape[0]):
pyx = part[i, :]
scores.append(entropy(pyx, py))
split_scores.append(np.exp(np.mean(scores)))
return np.mean(split_scores), np.std(split_scores)
# __INCEPTION_SCORE_end__
def train(netD, netG, optimG, optimD, criterion, dataloader, iteration,
device):
real_label = 1
fake_label = 0
for i, data in enumerate(dataloader, 0):
if i >= train_iterations_per_step:
break
netD.zero_grad()
real_cpu = data[0].to(device)
b_size = real_cpu.size(0)
label = torch.full((b_size, ), real_label, device=device)
output = netD(real_cpu).view(-1)
errD_real = criterion(output, label)
errD_real.backward()
D_x = output.mean().item()
noise = torch.randn(b_size, nz, 1, 1, device=device)
fake = netG(noise)
label.fill_(fake_label)
output = netD(fake.detach()).view(-1)
errD_fake = criterion(output, label)
errD_fake.backward()
D_G_z1 = output.mean().item()
errD = errD_real + errD_fake
optimD.step()
netG.zero_grad()
label.fill_(real_label)
output = netD(fake).view(-1)
errG = criterion(output, label)
errG.backward()
D_G_z2 = output.mean().item()
optimG.step()
is_score, is_std = inception_score(fake)
# Output training stats
if iteration % 10 == 0:
print("[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))"
": %.4f / %.4f \tInception score: %.4f" %
(iteration, len(dataloader), errD.item(), errG.item(), D_x,
D_G_z1, D_G_z2, is_score))
return errG.item(), errD.item(), is_score
# __Trainable_begin__
class PytorchTrainable(tune.Trainable):
def _setup(self, config):
use_cuda = config.get("use_gpu") and torch.cuda.is_available()
self.device = torch.device("cuda" if use_cuda else "cpu")
self.netD = Discriminator().to(self.device)
self.netD.apply(weights_init)
self.netG = Generator().to(self.device)
self.netG.apply(weights_init)
self.criterion = nn.BCELoss()
self.optimizerD = optim.Adam(
self.netD.parameters(),
lr=config.get("lr", 0.01),
betas=(beta1, 0.999))
self.optimizerG = optim.Adam(
self.netG.parameters(),
lr=config.get("lr", 0.01),
betas=(beta1, 0.999))
with FileLock(os.path.expanduser("~/.data.lock")):
self.dataloader = get_data_loader()
def _train(self):
lossG, lossD, is_score = train(
self.netD, self.netG, self.optimizerG, self.optimizerD,
self.criterion, self.dataloader, self._iteration, self.device)
return {"lossg": lossG, "lossd": lossD, "is_score": is_score}
def _save(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
torch.save({
"netDmodel": self.netD.state_dict(),
"netGmodel": self.netG.state_dict(),
"optimD": self.optimizerD.state_dict(),
"optimG": self.optimizerG.state_dict(),
}, path)
return checkpoint_dir
def _restore(self, checkpoint_dir):
path = os.path.join(checkpoint_dir, "checkpoint")
checkpoint = torch.load(path)
self.netD.load_state_dict(checkpoint["netDmodel"])
self.netG.load_state_dict(checkpoint["netGmodel"])
self.optimizerD.load_state_dict(checkpoint["optimD"])
self.optimizerG.load_state_dict(checkpoint["optimG"])
def reset_config(self, new_config):
if "netD_lr" in new_config:
for param_group in self.optimizerD.param_groups:
param_group["lr"] = new_config["netD_lr"]
if "netG_lr" in new_config:
for param_group in self.optimizerG.param_groups:
param_group["lr"] = new_config["netG_lr"]
self.config = new_config
return True
def _export_model(self, export_formats, export_dir):
if export_formats == [ExportFormat.MODEL]:
path = os.path.join(export_dir, "exported_models")
torch.save({
"netDmodel": self.netD.state_dict(),
"netGmodel": self.netG.state_dict()
}, path)
return {ExportFormat.MODEL: path}
else:
raise ValueError("unexpected formats: " + str(export_formats))
# __Trainable_end__
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
dataloader = get_data_loader()
if not args.smoke_test:
# Plot some training images
real_batch = next(iter(dataloader))
plt.figure(figsize=(8, 8))
plt.axis("off")
plt.title("Original Images")
plt.imshow(
np.transpose(
vutils.make_grid(
real_batch[0][:64], padding=2, normalize=True).cpu(),
(1, 2, 0)))
plt.show()
# load the pretrained mnist classification model for inception_score
mnist_cnn = Net()
model_path = os.path.join(
os.path.dirname(ray.__file__),
"tune/examples/pbt_dcgan_mnist/mnist_cnn.pt")
mnist_cnn.load_state_dict(torch.load(model_path))
mnist_cnn.eval()
mnist_model_ref = ray.put(mnist_cnn)
# __tune_begin__
scheduler = PopulationBasedTraining(
time_attr="training_iteration",
metric="is_score",
mode="max",
perturbation_interval=5,
hyperparam_mutations={
# distribution for resampling
"netG_lr": lambda: np.random.uniform(1e-2, 1e-5),
"netD_lr": lambda: np.random.uniform(1e-2, 1e-5),
})
tune_iter = 5 if args.smoke_test else 300
analysis = tune.run(
PytorchTrainable,
name="pbt_dcgan_mnist",
scheduler=scheduler,
reuse_actors=True,
verbose=1,
checkpoint_at_end=True,
stop={
"training_iteration": tune_iter,
},
num_samples=8,
export_formats=[ExportFormat.MODEL],
config={
"netG_lr": tune.sample_from(
lambda spec: random.choice([0.0001, 0.0002, 0.0005])),
"netD_lr": tune.sample_from(
lambda spec: random.choice([0.0001, 0.0002, 0.0005]))
})
# __tune_end__
# demo of the trained Generators
if not args.smoke_test:
logdirs = analysis.dataframe()["logdir"].tolist()
img_list = []
fixed_noise = torch.randn(64, nz, 1, 1)
for d in logdirs:
netG_path = os.path.join(d, "exported_models")
loadedG = Generator()
loadedG.load_state_dict(torch.load(netG_path)["netGmodel"])
with torch.no_grad():
fake = loadedG(fixed_noise).detach().cpu()
img_list.append(vutils.make_grid(fake, padding=2, normalize=True))
fig = plt.figure(figsize=(8, 8))
plt.axis("off")
ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)]
for i in img_list]
ani = animation.ArtistAnimation(
fig, ims, interval=1000, repeat_delay=1000, blit=True)
ani.save("./generated.gif", writer="imagemagick", dpi=72)
plt.show()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_example.py
|
Python
|
#!/usr/bin/env python
import numpy as np
import argparse
import random
import ray
from ray.tune import Trainable, run
from ray.tune.schedulers import PopulationBasedTraining
class PBTBenchmarkExample(Trainable):
"""Toy PBT problem for benchmarking adaptive learning rate.
The goal is to optimize this trainable's accuracy. The accuracy increases
fastest at the optimal lr, which is a function of the current accuracy.
The optimal lr schedule for this problem is the triangle wave as follows.
Note that many lr schedules for real models also follow this shape:
best lr
^
| /\
| / \
| / \
| / \
------------> accuracy
In this problem, using PBT with a population of 2-4 is sufficient to
roughly approximate this lr schedule. Higher population sizes will yield
faster convergence. Training will not converge without PBT.
"""
def _setup(self, config):
self.lr = config["lr"]
self.accuracy = 0.0 # end = 1000
def _train(self):
midpoint = 100 # lr starts decreasing after acc > midpoint
q_tolerance = 3 # penalize exceeding lr by more than this multiple
noise_level = 2 # add gaussian noise to the acc increase
# triangle wave:
# - start at 0.001 @ t=0,
# - peak at 0.01 @ t=midpoint,
# - end at 0.001 @ t=midpoint * 2,
if self.accuracy < midpoint:
optimal_lr = 0.01 * self.accuracy / midpoint
else:
optimal_lr = 0.01 - 0.01 * (self.accuracy - midpoint) / midpoint
optimal_lr = min(0.01, max(0.001, optimal_lr))
# compute accuracy increase
q_err = max(self.lr, optimal_lr) / min(self.lr, optimal_lr)
if q_err < q_tolerance:
self.accuracy += (1.0 / q_err) * random.random()
elif self.lr > optimal_lr:
self.accuracy -= (q_err - q_tolerance) * random.random()
self.accuracy += noise_level * np.random.normal()
self.accuracy = max(0, self.accuracy)
return {
"mean_accuracy": self.accuracy,
"cur_lr": self.lr,
"optimal_lr": optimal_lr, # for debugging
"q_err": q_err, # for debugging
"done": self.accuracy > midpoint * 2,
}
def _save(self, checkpoint_dir):
return {
"accuracy": self.accuracy,
"lr": self.lr,
}
def _restore(self, checkpoint):
self.accuracy = checkpoint["accuracy"]
def reset_config(self, new_config):
self.lr = new_config["lr"]
self.config = new_config
return True
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
if args.smoke_test:
ray.init(num_cpus=2) # force pausing to happen for test
else:
ray.init()
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=20,
hyperparam_mutations={
# distribution for resampling
"lr": lambda: random.uniform(0.0001, 0.02),
# allow perturbations within this set of categorical values
"some_other_factor": [1, 2],
})
run(
PBTBenchmarkExample,
name="pbt_test",
scheduler=pbt,
reuse_actors=True,
verbose=False,
stop={
"training_iteration": 2000,
},
num_samples=4,
config={
"lr": 0.0001,
# note: this parameter is perturbed but has no effect on
# the model training in this example
"some_other_factor": 1,
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_memnn_example.py
|
Python
|
"""Example training a memory neural net on the bAbI dataset.
References Keras and is based off of https://keras.io/examples/babi_memnn/.
"""
from __future__ import print_function
from tensorflow.keras.models import Sequential, Model, load_model
from tensorflow.keras.layers import Embedding
from tensorflow.keras.layers import (Input, Activation, Dense, Permute,
Dropout)
from tensorflow.keras.layers import add, dot, concatenate
from tensorflow.keras.layers import LSTM
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.utils import get_file
from tensorflow.keras.preprocessing.sequence import pad_sequences
from filelock import FileLock
import os
import argparse
import tarfile
import numpy as np
import re
from ray.tune import Trainable
def tokenize(sent):
"""Return the tokens of a sentence including punctuation.
>>> tokenize("Bob dropped the apple. Where is the apple?")
["Bob", "dropped", "the", "apple", ".", "Where", "is", "the", "apple", "?"]
"""
return [x.strip() for x in re.split(r"(\W+)?", sent) if x and x.strip()]
def parse_stories(lines, only_supporting=False):
"""Parse stories provided in the bAbi tasks format
If only_supporting is true, only the sentences
that support the answer are kept.
"""
data = []
story = []
for line in lines:
line = line.decode("utf-8").strip()
nid, line = line.split(" ", 1)
nid = int(nid)
if nid == 1:
story = []
if "\t" in line:
q, a, supporting = line.split("\t")
q = tokenize(q)
if only_supporting:
# Only select the related substory
supporting = map(int, supporting.split())
substory = [story[i - 1] for i in supporting]
else:
# Provide all the substories
substory = [x for x in story if x]
data.append((substory, q, a))
story.append("")
else:
sent = tokenize(line)
story.append(sent)
return data
def get_stories(f, only_supporting=False, max_length=None):
"""Given a file name, read the file,
retrieve the stories,
and then convert the sentences into a single story.
If max_length is supplied,
any stories longer than max_length tokens will be discarded.
"""
def flatten(data):
return sum(data, [])
data = parse_stories(f.readlines(), only_supporting=only_supporting)
data = [(flatten(story), q, answer) for story, q, answer in data
if not max_length or len(flatten(story)) < max_length]
return data
def vectorize_stories(word_idx, story_maxlen, query_maxlen, data):
inputs, queries, answers = [], [], []
for story, query, answer in data:
inputs.append([word_idx[w] for w in story])
queries.append([word_idx[w] for w in query])
answers.append(word_idx[answer])
return (pad_sequences(inputs, maxlen=story_maxlen),
pad_sequences(queries, maxlen=query_maxlen), np.array(answers))
def read_data():
# Get the file
try:
path = get_file(
"babi-tasks-v1-2.tar.gz",
origin="https://s3.amazonaws.com/text-datasets/"
"babi_tasks_1-20_v1-2.tar.gz")
except Exception:
print(
"Error downloading dataset, please download it manually:\n"
"$ wget http://www.thespermwhale.com/jaseweston/babi/tasks_1-20_v1-2" # noqa: E501
".tar.gz\n"
"$ mv tasks_1-20_v1-2.tar.gz ~/.keras/datasets/babi-tasks-v1-2.tar.gz" # noqa: E501
)
raise
# Choose challenge
challenges = {
# QA1 with 10,000 samples
"single_supporting_fact_10k": "tasks_1-20_v1-2/en-10k/qa1_"
"single-supporting-fact_{}.txt",
# QA2 with 10,000 samples
"two_supporting_facts_10k": "tasks_1-20_v1-2/en-10k/qa2_"
"two-supporting-facts_{}.txt",
}
challenge_type = "single_supporting_fact_10k"
challenge = challenges[challenge_type]
with tarfile.open(path) as tar:
train_stories = get_stories(tar.extractfile(challenge.format("train")))
test_stories = get_stories(tar.extractfile(challenge.format("test")))
return train_stories, test_stories
class MemNNModel(Trainable):
def build_model(self):
"""Helper method for creating the model"""
vocab = set()
for story, q, answer in self.train_stories + self.test_stories:
vocab |= set(story + q + [answer])
vocab = sorted(vocab)
# Reserve 0 for masking via pad_sequences
vocab_size = len(vocab) + 1
story_maxlen = max(
len(x) for x, _, _ in self.train_stories + self.test_stories)
query_maxlen = max(
len(x) for _, x, _ in self.train_stories + self.test_stories)
word_idx = {c: i + 1 for i, c in enumerate(vocab)}
self.inputs_train, self.queries_train, self.answers_train = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.train_stories))
self.inputs_test, self.queries_test, self.answers_test = (
vectorize_stories(word_idx, story_maxlen, query_maxlen,
self.test_stories))
# placeholders
input_sequence = Input((story_maxlen, ))
question = Input((query_maxlen, ))
# encoders
# embed the input sequence into a sequence of vectors
input_encoder_m = Sequential()
input_encoder_m.add(Embedding(input_dim=vocab_size, output_dim=64))
input_encoder_m.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, embedding_dim)
# embed the input into a sequence of vectors of size query_maxlen
input_encoder_c = Sequential()
input_encoder_c.add(
Embedding(input_dim=vocab_size, output_dim=query_maxlen))
input_encoder_c.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, story_maxlen, query_maxlen)
# embed the question into a sequence of vectors
question_encoder = Sequential()
question_encoder.add(
Embedding(
input_dim=vocab_size, output_dim=64,
input_length=query_maxlen))
question_encoder.add(Dropout(self.config.get("dropout", 0.3)))
# output: (samples, query_maxlen, embedding_dim)
# encode input sequence and questions (which are indices)
# to sequences of dense vectors
input_encoded_m = input_encoder_m(input_sequence)
input_encoded_c = input_encoder_c(input_sequence)
question_encoded = question_encoder(question)
# compute a "match" between the first input vector sequence
# and the question vector sequence
# shape: `(samples, story_maxlen, query_maxlen)`
match = dot([input_encoded_m, question_encoded], axes=(2, 2))
match = Activation("softmax")(match)
# add the match matrix with the second input vector sequence
response = add(
[match, input_encoded_c]) # (samples, story_maxlen, query_maxlen)
response = Permute(
(2, 1))(response) # (samples, query_maxlen, story_maxlen)
# concatenate the match matrix with the question vector sequence
answer = concatenate([response, question_encoded])
# the original paper uses a matrix multiplication.
# we choose to use a RNN instead.
answer = LSTM(32)(answer) # (samples, 32)
# one regularization layer -- more would probably be needed.
answer = Dropout(self.config.get("dropout", 0.3))(answer)
answer = Dense(vocab_size)(answer) # (samples, vocab_size)
# we output a probability distribution over the vocabulary
answer = Activation("softmax")(answer)
# build the final model
model = Model([input_sequence, question], answer)
return model
def _setup(self, config):
with FileLock(os.path.expanduser("~/.tune.lock")):
self.train_stories, self.test_stories = read_data()
model = self.build_model()
rmsprop = RMSprop(
lr=self.config.get("lr", 1e-3), rho=self.config.get("rho", 0.9))
model.compile(
optimizer=rmsprop,
loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
self.model = model
def _train(self):
# train
self.model.fit(
[self.inputs_train, self.queries_train],
self.answers_train,
batch_size=self.config.get("batch_size", 32),
epochs=self.config.get("epochs", 1),
validation_data=([self.inputs_test, self.queries_test],
self.answers_test),
verbose=0)
_, accuracy = self.model.evaluate(
[self.inputs_train, self.queries_train],
self.answers_train,
verbose=0)
return {"mean_accuracy": accuracy}
def _save(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def _restore(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
if __name__ == "__main__":
import ray
from ray.tune import Trainable, run
from ray.tune.schedulers import PopulationBasedTraining
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=5,
hyperparam_mutations={
"dropout": lambda: np.random.uniform(0, 1),
"lr": lambda: 10**np.random.randint(-10, 0),
"rho": lambda: np.random.uniform(0, 1)
})
results = run(
MemNNModel,
name="pbt_babi_memnn",
scheduler=pbt,
stop={"training_iteration": 10 if args.smoke_test else 100},
num_samples=4,
config={
"batch_size": 32,
"epochs": 1,
"dropout": 0.3,
"lr": 0.01,
"rho": 0.9
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_ppo_example.py
|
Python
|
#!/usr/bin/env python
"""Example of using PBT with RLlib.
Note that this requires a cluster with at least 8 GPUs in order for all trials
to run concurrently, otherwise PBT will round-robin train the trials which
is less efficient (or you can set {"gpu": 0} to use CPUs for SGD instead).
Note that Tune in general does not need 8 GPUs, and this is just a more
computationally demainding example.
"""
import random
import ray
from ray.tune import run, sample_from
from ray.tune.schedulers import PopulationBasedTraining
if __name__ == "__main__":
# Postprocess the perturbed config to ensure it's still valid
def explore(config):
# ensure we collect enough timesteps to do sgd
if config["train_batch_size"] < config["sgd_minibatch_size"] * 2:
config["train_batch_size"] = config["sgd_minibatch_size"] * 2
# ensure we run at least one sgd iter
if config["num_sgd_iter"] < 1:
config["num_sgd_iter"] = 1
return config
pbt = PopulationBasedTraining(
time_attr="time_total_s",
metric="episode_reward_mean",
mode="max",
perturbation_interval=120,
resample_probability=0.25,
# Specifies the mutations of these hyperparams
hyperparam_mutations={
"lambda": lambda: random.uniform(0.9, 1.0),
"clip_param": lambda: random.uniform(0.01, 0.5),
"lr": [1e-3, 5e-4, 1e-4, 5e-5, 1e-5],
"num_sgd_iter": lambda: random.randint(1, 30),
"sgd_minibatch_size": lambda: random.randint(128, 16384),
"train_batch_size": lambda: random.randint(2000, 160000),
},
custom_explore_fn=explore)
ray.init()
run(
"PPO",
name="pbt_humanoid_test",
scheduler=pbt,
num_samples=8,
config={
"env": "Humanoid-v1",
"kl_coeff": 1.0,
"num_workers": 8,
"num_gpus": 1,
"model": {
"free_log_std": True
},
# These params are tuned from a fixed starting value.
"lambda": 0.95,
"clip_param": 0.2,
"lr": 1e-4,
# These params start off randomly drawn from a set.
"num_sgd_iter": sample_from(
lambda spec: random.choice([10, 20, 30])),
"sgd_minibatch_size": sample_from(
lambda spec: random.choice([128, 512, 2048])),
"train_batch_size": sample_from(
lambda spec: random.choice([10000, 20000, 40000]))
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/pbt_tune_cifar10_with_keras.py
|
Python
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Train keras CNN on the CIFAR10 small images dataset.
The model comes from: https://zhuanlan.zhihu.com/p/29214791,
and it gets to about 87% validation accuracy in 100 epochs.
Note that the script requires a machine with 4 GPUs. You
can set {"gpu": 0} to use CPUs for training, although
it is less efficient.
"""
from __future__ import print_function
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.python.keras.datasets import cifar10
from tensorflow.python.keras.layers import Input, Dense, Dropout, Flatten
from tensorflow.python.keras.layers import Convolution2D, MaxPooling2D
from tensorflow.python.keras.models import Model, load_model
from tensorflow.python.keras.preprocessing.image import ImageDataGenerator
import ray
from ray.tune import grid_search, run, sample_from
from ray.tune import Trainable
from ray.tune.schedulers import PopulationBasedTraining
num_classes = 10
NUM_SAMPLES = 128
class Cifar10Model(Trainable):
def _read_data(self):
# The data, split between train and test sets:
(x_train, y_train), (x_test, y_test) = cifar10.load_data()
# Convert class vectors to binary class matrices.
y_train = tf.keras.utils.to_categorical(y_train, num_classes)
y_test = tf.keras.utils.to_categorical(y_test, num_classes)
x_train = x_train.astype("float32")
x_train /= 255
x_test = x_test.astype("float32")
x_test /= 255
return (x_train, y_train), (x_test, y_test)
def _build_model(self, input_shape):
x = Input(shape=(32, 32, 3))
y = x
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=64,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=128,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = Convolution2D(
filters=256,
kernel_size=3,
strides=1,
padding="same",
activation="relu",
kernel_initializer="he_normal")(y)
y = MaxPooling2D(pool_size=2, strides=2, padding="same")(y)
y = Flatten()(y)
y = Dropout(self.config.get("dropout", 0.5))(y)
y = Dense(
units=10, activation="softmax", kernel_initializer="he_normal")(y)
model = Model(inputs=x, outputs=y, name="model1")
return model
def _setup(self, config):
self.train_data, self.test_data = self._read_data()
x_train = self.train_data[0]
model = self._build_model(x_train.shape[1:])
opt = tf.keras.optimizers.Adadelta(
lr=self.config.get("lr", 1e-4),
decay=self.config.get("decay", 1e-4))
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"])
self.model = model
def _train(self):
x_train, y_train = self.train_data
x_train, y_train = x_train[:NUM_SAMPLES], y_train[:NUM_SAMPLES]
x_test, y_test = self.test_data
x_test, y_test = x_test[:NUM_SAMPLES], y_test[:NUM_SAMPLES]
aug_gen = ImageDataGenerator(
# set input mean to 0 over the dataset
featurewise_center=False,
# set each sample mean to 0
samplewise_center=False,
# divide inputs by dataset std
featurewise_std_normalization=False,
# divide each input by its std
samplewise_std_normalization=False,
# apply ZCA whitening
zca_whitening=False,
# randomly rotate images in the range (degrees, 0 to 180)
rotation_range=0,
# randomly shift images horizontally (fraction of total width)
width_shift_range=0.1,
# randomly shift images vertically (fraction of total height)
height_shift_range=0.1,
# randomly flip images
horizontal_flip=True,
# randomly flip images
vertical_flip=False,
)
aug_gen.fit(x_train)
batch_size = self.config.get("batch_size", 64)
gen = aug_gen.flow(x_train, y_train, batch_size=batch_size)
self.model.fit_generator(
generator=gen,
epochs=self.config.get("epochs", 1),
validation_data=None)
# loss, accuracy
_, accuracy = self.model.evaluate(x_test, y_test, verbose=0)
return {"mean_accuracy": accuracy}
def _save(self, checkpoint_dir):
file_path = checkpoint_dir + "/model"
self.model.save(file_path)
return file_path
def _restore(self, path):
# See https://stackoverflow.com/a/42763323
del self.model
self.model = load_model(path)
def _stop(self):
# If need, save your model when exit.
# saved_path = self.model.save(self.logdir)
# print("save model at: ", saved_path)
pass
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
train_spec = {
"resources_per_trial": {
"cpu": 1,
"gpu": 1
},
"stop": {
"mean_accuracy": 0.80,
"training_iteration": 30,
},
"config": {
"epochs": 1,
"batch_size": 64,
"lr": grid_search([10**-4, 10**-5]),
"decay": sample_from(lambda spec: spec.config.lr / 100.0),
"dropout": grid_search([0.25, 0.5]),
},
"num_samples": 4,
}
if args.smoke_test:
train_spec["config"]["lr"] = 10**-4
train_spec["config"]["dropout"] = 0.5
ray.init()
pbt = PopulationBasedTraining(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
perturbation_interval=10,
hyperparam_mutations={
"dropout": lambda _: np.random.uniform(0, 1),
})
run(Cifar10Model, name="pbt_cifar10", scheduler=pbt, **train_spec)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/sigopt_example.py
|
Python
|
"""This test checks that SigOpt is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.sigopt import SigOptSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
import os
assert "SIGOPT_KEY" in os.environ, \
"SigOpt API key must be stored as environment variable at SIGOPT_KEY"
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
space = [
{
"name": "width",
"type": "int",
"bounds": {
"min": 0,
"max": 20
},
},
{
"name": "height",
"type": "int",
"bounds": {
"min": -100,
"max": 100
},
},
]
config = {
"num_samples": 10 if args.smoke_test else 1000,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
algo = SigOptSearch(
space,
name="SigOpt Example Experiment",
max_concurrent=1,
metric="mean_loss",
mode="min")
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="my_exp",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/skopt_example.py
|
Python
|
"""This test checks that Skopt is functional.
It also checks that it is usable with a separate scheduler.
"""
import ray
from ray.tune import run
from ray.tune.schedulers import AsyncHyperBandScheduler
from ray.tune.suggest.skopt import SkOptSearch
def easy_objective(config, reporter):
import time
time.sleep(0.2)
for i in range(config["iterations"]):
reporter(
timesteps_total=i,
mean_loss=(config["height"] - 14)**2 - abs(config["width"] - 3))
time.sleep(0.02)
if __name__ == "__main__":
import argparse
from skopt import Optimizer
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
ray.init()
config = {
"num_samples": 10 if args.smoke_test else 50,
"config": {
"iterations": 100,
},
"stop": {
"timesteps_total": 100
},
}
optimizer = Optimizer([(0, 20), (-100, 100)])
previously_run_params = [[10, 0], [15, -20]]
known_rewards = [-189, -1144]
algo = SkOptSearch(
optimizer, ["width", "height"],
max_concurrent=4,
metric="mean_loss",
mode="min",
points_to_evaluate=previously_run_params,
evaluated_rewards=known_rewards)
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="skopt_exp_with_warmstart",
search_alg=algo,
scheduler=scheduler,
**config)
# Now run the experiment without known rewards
algo = SkOptSearch(
optimizer, ["width", "height"],
max_concurrent=4,
metric="mean_loss",
mode="min",
points_to_evaluate=previously_run_params)
scheduler = AsyncHyperBandScheduler(metric="mean_loss", mode="min")
run(easy_objective,
name="skopt_exp",
search_alg=algo,
scheduler=scheduler,
**config)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/tf_mnist_example.py
|
Python
|
#!/usr/bin/env python
# coding: utf-8
#
# This example showcases how to use TF2.0 APIs with Tune.
# Original code: https://www.tensorflow.org/tutorials/quickstart/advanced
#
# As of 10/12/2019: One caveat of using TF2.0 is that TF AutoGraph
# functionality does not interact nicely with Ray actors. One way to get around
# this is to `import tensorflow` inside the Tune Trainable.
#
import argparse
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras import Model
from tensorflow.keras.datasets.mnist import load_data
from ray import tune
MAX_TRAIN_BATCH = 10
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
class MyModel(Model):
def __init__(self, hiddens=128):
super(MyModel, self).__init__()
self.conv1 = Conv2D(32, 3, activation="relu")
self.flatten = Flatten()
self.d1 = Dense(hiddens, activation="relu")
self.d2 = Dense(10, activation="softmax")
def call(self, x):
x = self.conv1(x)
x = self.flatten(x)
x = self.d1(x)
return self.d2(x)
class MNISTTrainable(tune.Trainable):
def _setup(self, config):
# IMPORTANT: See the above note.
import tensorflow as tf
(x_train, y_train), (x_test, y_test) = load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
# Add a channels dimension
x_train = x_train[..., tf.newaxis]
x_test = x_test[..., tf.newaxis]
self.train_ds = tf.data.Dataset.from_tensor_slices((x_train, y_train))
self.train_ds = self.train_ds.shuffle(10000).batch(
config.get("batch", 32))
self.test_ds = tf.data.Dataset.from_tensor_slices((x_test,
y_test)).batch(32)
self.model = MyModel(hiddens=config.get("hiddens", 128))
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.Adam()
self.train_loss = tf.keras.metrics.Mean(name="train_loss")
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="train_accuracy")
self.test_loss = tf.keras.metrics.Mean(name="test_loss")
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(
name="test_accuracy")
@tf.function
def train_step(images, labels):
with tf.GradientTape() as tape:
predictions = self.model(images)
loss = self.loss_object(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(
zip(gradients, self.model.trainable_variables))
self.train_loss(loss)
self.train_accuracy(labels, predictions)
@tf.function
def test_step(images, labels):
predictions = self.model(images)
t_loss = self.loss_object(labels, predictions)
self.test_loss(t_loss)
self.test_accuracy(labels, predictions)
self.tf_train_step = train_step
self.tf_test_step = test_step
def _train(self):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
for idx, (images, labels) in enumerate(self.train_ds):
if idx > MAX_TRAIN_BATCH: # This is optional and can be removed.
break
self.tf_train_step(images, labels)
for test_images, test_labels in self.test_ds:
self.tf_test_step(test_images, test_labels)
# It is important to return tf.Tensors as numpy objects.
return {
"epoch": self.iteration,
"loss": self.train_loss.result().numpy(),
"accuracy": self.train_accuracy.result().numpy() * 100,
"test_loss": self.test_loss.result().numpy(),
"mean_accuracy": self.test_accuracy.result().numpy() * 100
}
if __name__ == "__main__":
load_data() # we download data on the driver to avoid race conditions.
tune.run(
MNISTTrainable,
stop={"training_iteration": 5 if args.smoke_test else 50},
verbose=1,
config={"hiddens": tune.grid_search([32, 64, 128])})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/track_example.py
|
Python
|
import argparse
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import mnist
from ray.tune import track
from ray.tune.integration.keras import TuneReporterCallback
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--hidden", type=int, default=64, help="Size of hidden layer.")
args, _ = parser.parse_known_args()
def train_mnist(args):
track.init(trial_name="track-example", trial_config=vars(args))
batch_size = 128
num_classes = 10
epochs = 1 if args.smoke_test else 12
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(args.hidden, activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(num_classes, activation="softmax")
])
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=keras.optimizers.SGD(lr=args.lr, momentum=args.momentum),
metrics=["accuracy"])
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(x_test, y_test),
callbacks=[TuneReporterCallback()])
track.shutdown()
if __name__ == "__main__":
train_mnist(args)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/tune_cifar10_gluon.py
|
Python
|
from __future__ import print_function
import argparse
import random
import mxnet as mx
import numpy as np
from mxnet import gluon, init
from mxnet import autograd as ag
from mxnet.gluon import nn
from mxnet.gluon.data.vision import transforms
from gluoncv.model_zoo import get_model
from gluoncv.data import transforms as gcv_transforms
# Training settings
parser = argparse.ArgumentParser(description="CIFAR-10 Example")
parser.add_argument(
"--model",
required=True,
type=str,
default="resnet50_v1b",
help="name of the pretrained model from gluoncv model zoo"
"(default: resnet50_v1b).")
parser.add_argument(
"--batch_size",
type=int,
default=64,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--epochs",
type=int,
default=1,
metavar="N",
help="number of epochs to train (default: 1)")
parser.add_argument(
"--num_gpus",
default=0,
type=int,
help="number of gpus to use, 0 indicates cpu only (default: 0)")
parser.add_argument(
"--num_workers",
default=4,
type=int,
help="number of preprocessing workers (default: 4)")
parser.add_argument(
"--classes",
type=int,
default=10,
metavar="N",
help="number of outputs (default: 10)")
parser.add_argument(
"--lr",
default=0.001,
type=float,
help="initial learning rate (default: 0.001)")
parser.add_argument(
"--momentum",
default=0.9,
type=float,
help="initial momentum (default: 0.9)")
parser.add_argument(
"--wd", default=1e-4, type=float, help="weight decay (default: 1e-4)")
parser.add_argument(
"--expname", type=str, default="cifar10exp", help="experiments location")
parser.add_argument(
"--num_samples",
type=int,
default=20,
metavar="N",
help="number of samples (default: 20)")
parser.add_argument(
"--scheduler",
type=str,
default="fifo",
help="FIFO or AsyncHyperBandScheduler.")
parser.add_argument(
"--seed",
type=int,
default=1,
metavar="S",
help="random seed (default: 1)")
parser.add_argument(
"--smoke_test", action="store_true", help="Finish quickly for testing")
args = parser.parse_args()
def train_cifar10(args, config, reporter):
vars(args).update(config)
np.random.seed(args.seed)
random.seed(args.seed)
mx.random.seed(args.seed)
# Set Hyper-params
batch_size = args.batch_size * max(args.num_gpus, 1)
ctx = [mx.gpu(i)
for i in range(args.num_gpus)] if args.num_gpus > 0 else [mx.cpu()]
# Define DataLoader
transform_train = transforms.Compose([
gcv_transforms.RandomCrop(32, pad=4),
transforms.RandomFlipLeftRight(),
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])
])
transform_test = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize([0.4914, 0.4822, 0.4465],
[0.2023, 0.1994, 0.2010])
])
train_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=True).transform_first(transform_train),
batch_size=batch_size,
shuffle=True,
last_batch="discard",
num_workers=args.num_workers)
test_data = gluon.data.DataLoader(
gluon.data.vision.CIFAR10(train=False).transform_first(transform_test),
batch_size=batch_size,
shuffle=False,
num_workers=args.num_workers)
# Load model architecture and Initialize the net with pretrained model
finetune_net = get_model(args.model, pretrained=True)
with finetune_net.name_scope():
finetune_net.fc = nn.Dense(args.classes)
finetune_net.fc.initialize(init.Xavier(), ctx=ctx)
finetune_net.collect_params().reset_ctx(ctx)
finetune_net.hybridize()
# Define trainer
trainer = gluon.Trainer(finetune_net.collect_params(), "sgd", {
"learning_rate": args.lr,
"momentum": args.momentum,
"wd": args.wd
})
L = gluon.loss.SoftmaxCrossEntropyLoss()
metric = mx.metric.Accuracy()
def train(epoch):
for i, batch in enumerate(train_data):
data = gluon.utils.split_and_load(
batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
with ag.record():
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
for l in loss:
l.backward()
trainer.step(batch_size)
mx.nd.waitall()
def test():
test_loss = 0
for i, batch in enumerate(test_data):
data = gluon.utils.split_and_load(
batch[0], ctx_list=ctx, batch_axis=0, even_split=False)
label = gluon.utils.split_and_load(
batch[1], ctx_list=ctx, batch_axis=0, even_split=False)
outputs = [finetune_net(X) for X in data]
loss = [L(yhat, y) for yhat, y in zip(outputs, label)]
test_loss += sum(l.mean().asscalar() for l in loss) / len(loss)
metric.update(label, outputs)
_, test_acc = metric.get()
test_loss /= len(test_data)
reporter(mean_loss=test_loss, mean_accuracy=test_acc)
for epoch in range(1, args.epochs + 1):
train(epoch)
test()
if __name__ == "__main__":
args = parser.parse_args()
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler, FIFOScheduler
ray.init()
if args.scheduler == "fifo":
sched = FIFOScheduler()
elif args.scheduler == "asynchyperband":
sched = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="mean_loss",
mode="min",
max_t=400,
grace_period=60)
else:
raise NotImplementedError
tune.register_trainable(
"TRAIN_FN",
lambda config, reporter: train_cifar10(args, config, reporter))
tune.run(
"TRAIN_FN",
name=args.expname,
verbose=2,
scheduler=sched,
stop={
"mean_accuracy": 0.98,
"training_iteration": 1 if args.smoke_test else args.epochs
},
resources_per_trial={
"cpu": int(args.num_workers),
"gpu": int(args.num_gpus)
},
num_samples=1 if args.smoke_test else args.num_samples,
config={
"lr": tune.sample_from(
lambda spec: np.power(10.0, np.random.uniform(-4, -1))),
"momentum": tune.sample_from(
lambda spec: np.random.uniform(0.85, 0.95)),
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/tune_mnist_keras.py
|
Python
|
import argparse
import numpy as np
from tensorflow.keras.datasets import mnist
from ray.tune.integration.keras import TuneReporterCallback
parser = argparse.ArgumentParser()
parser.add_argument(
"--smoke-test", action="store_true", help="Finish quickly for testing")
args, _ = parser.parse_known_args()
def train_mnist(config, reporter):
# https://github.com/tensorflow/tensorflow/issues/32159
import tensorflow as tf
batch_size = 128
num_classes = 10
epochs = 12
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(config["hidden"], activation="relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(num_classes, activation="softmax")
])
model.compile(
loss="sparse_categorical_crossentropy",
optimizer=tf.keras.optimizers.SGD(
lr=config["lr"], momentum=config["momentum"]),
metrics=["accuracy"])
model.fit(
x_train,
y_train,
batch_size=batch_size,
epochs=epochs,
verbose=0,
validation_data=(x_test, y_test),
callbacks=[TuneReporterCallback(reporter)])
if __name__ == "__main__":
import ray
from ray import tune
from ray.tune.schedulers import AsyncHyperBandScheduler
mnist.load_data() # we do this on the driver because it's not threadsafe
ray.init()
sched = AsyncHyperBandScheduler(
time_attr="training_iteration",
metric="mean_accuracy",
mode="max",
max_t=400,
grace_period=20)
tune.run(
train_mnist,
name="exp",
scheduler=sched,
stop={
"mean_accuracy": 0.99,
"training_iteration": 5 if args.smoke_test else 300
},
num_samples=10,
resources_per_trial={
"cpu": 2,
"gpu": 0
},
config={
"threads": 2,
"lr": tune.sample_from(lambda spec: np.random.uniform(0.001, 0.1)),
"momentum": tune.sample_from(
lambda spec: np.random.uniform(0.1, 0.9)),
"hidden": tune.sample_from(
lambda spec: np.random.randint(32, 512)),
})
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/utils.py
|
Python
|
import tensorflow as tf
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder
def get_iris_data(test_size=0.2):
iris_data = load_iris()
x = iris_data.data
y = iris_data.target.reshape(-1, 1)
encoder = OneHotEncoder(sparse=False)
y = encoder.fit_transform(y)
train_x, test_x, train_y, test_y = train_test_split(x, y)
return train_x, train_y, test_x, test_y
def set_keras_threads(threads):
# We set threads here to avoid contention, as Keras
# is heavily parallelized across multiple cores.
tf.config.threading.set_inter_op_parallelism_threads(threads)
tf.config.threading.set_intra_op_parallelism_threads(threads)
def TuneKerasCallback(*args, **kwargs):
raise DeprecationWarning("TuneKerasCallback is now "
"tune.integration.keras.TuneReporterCallback.")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/examples/xgboost_example.py
|
Python
|
import xgboost as xgb
import numpy as np
import sklearn.datasets
import sklearn.metrics
from sklearn.model_selection import train_test_split
from ray import tune
def XGBCallback(env):
tune.track.log(**dict(env.evaluation_result_list))
def train_breast_cancer(config):
data, target = sklearn.datasets.load_breast_cancer(return_X_y=True)
train_x, test_x, train_y, test_y = train_test_split(
data, target, test_size=0.25)
train_set = xgb.DMatrix(train_x, label=train_y)
test_set = xgb.DMatrix(test_x, label=test_y)
bst = xgb.train(
config, train_set, evals=[(test_set, "eval")], callbacks=[XGBCallback])
preds = bst.predict(test_set)
pred_labels = np.rint(preds)
tune.track.log(
mean_accuracy=sklearn.metrics.accuracy_score(test_y, pred_labels),
done=True)
if __name__ == "__main__":
num_threads = 2
config = {
"verbosity": 0,
"num_threads": num_threads,
"objective": "binary:logistic",
"booster": "gbtree",
"eval_metric": ["auc", "ams@0", "logloss"],
"max_depth": tune.randint(1, 9),
"eta": tune.loguniform(1e-4, 1e-1),
"gamma": tune.loguniform(1e-8, 1.0),
"grow_policy": tune.choice(["depthwise", "lossguide"])
}
from ray.tune.schedulers import ASHAScheduler
tune.run(
train_breast_cancer,
resources_per_trial={"cpu": num_threads},
config=config,
num_samples=2,
scheduler=ASHAScheduler(metric="eval-logloss", mode="min"))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/experiment.py
|
Python
|
import copy
import inspect
import logging
import os
import six
import types
from ray.tune.error import TuneError
from ray.tune.registry import register_trainable, get_trainable_cls
from ray.tune.result import DEFAULT_RESULTS_DIR
from ray.tune.sample import sample_from
logger = logging.getLogger(__name__)
def _raise_deprecation_note(deprecated, replacement, soft=False):
"""User notification for deprecated parameter.
Arguments:
deprecated (str): Deprecated parameter.
replacement (str): Replacement parameter to use instead.
soft (bool): Fatal if True.
"""
error_msg = ("`{deprecated}` is deprecated. Please use `{replacement}`. "
"`{deprecated}` will be removed in future versions of "
"Ray.".format(deprecated=deprecated, replacement=replacement))
if soft:
logger.warning(error_msg)
else:
raise DeprecationWarning(error_msg)
def _raise_on_durable(trainable_name, sync_to_driver, upload_dir):
trainable_cls = get_trainable_cls(trainable_name)
from ray.tune.durable_trainable import DurableTrainable
if issubclass(trainable_cls, DurableTrainable):
if sync_to_driver is not False:
raise ValueError(
"EXPERIMENTAL: DurableTrainable will automatically sync "
"results to the provided upload_dir. "
"Set `sync_to_driver=False` to avoid data inconsistencies.")
if not upload_dir:
raise ValueError(
"EXPERIMENTAL: DurableTrainable will automatically sync "
"results to the provided upload_dir. "
"`upload_dir` must be provided.")
class Experiment:
"""Tracks experiment specifications.
Implicitly registers the Trainable if needed.
Examples:
>>> experiment_spec = Experiment(
>>> "my_experiment_name",
>>> my_func,
>>> stop={"mean_accuracy": 100},
>>> config={
>>> "alpha": tune.grid_search([0.2, 0.4, 0.6]),
>>> "beta": tune.grid_search([1, 2]),
>>> },
>>> resources_per_trial={
>>> "cpu": 1,
>>> "gpu": 0
>>> },
>>> num_samples=10,
>>> local_dir="~/ray_results",
>>> checkpoint_freq=10,
>>> max_failures=2)
"""
def __init__(self,
name,
run,
stop=None,
config=None,
resources_per_trial=None,
num_samples=1,
local_dir=None,
upload_dir=None,
trial_name_creator=None,
loggers=None,
sync_to_driver=None,
checkpoint_freq=0,
checkpoint_at_end=False,
sync_on_checkpoint=True,
keep_checkpoints_num=None,
checkpoint_score_attr=None,
export_formats=None,
max_failures=0,
restore=None,
repeat=None,
trial_resources=None,
sync_function=None):
"""Initialize a new Experiment.
The args here take the same meaning as the command line flags defined
in `tune.py:run`.
"""
if repeat:
_raise_deprecation_note("repeat", "num_samples", soft=False)
if trial_resources:
_raise_deprecation_note(
"trial_resources", "resources_per_trial", soft=False)
if sync_function:
_raise_deprecation_note(
"sync_function", "sync_to_driver", soft=False)
stop = stop or {}
if not isinstance(stop, dict) and not callable(stop):
raise ValueError("Invalid stop criteria: {}. Must be a callable "
"or dict".format(stop))
if callable(stop):
nargs = len(inspect.getargspec(stop).args)
is_method = isinstance(stop, types.MethodType)
if (is_method and nargs != 3) or (not is_method and nargs != 2):
raise ValueError(
"Invalid stop criteria: {}. Callable "
"criteria must take exactly 2 parameters.".format(stop))
config = config or {}
self._run_identifier = Experiment.register_if_needed(run)
self.name = name or self._run_identifier
if upload_dir:
self.remote_checkpoint_dir = os.path.join(upload_dir, self.name)
else:
self.remote_checkpoint_dir = None
_raise_on_durable(self._run_identifier, sync_to_driver, upload_dir)
spec = {
"run": self._run_identifier,
"stop": stop,
"config": config,
"resources_per_trial": resources_per_trial,
"num_samples": num_samples,
"local_dir": os.path.abspath(
os.path.expanduser(local_dir or DEFAULT_RESULTS_DIR)),
"upload_dir": upload_dir,
"remote_checkpoint_dir": self.remote_checkpoint_dir,
"trial_name_creator": trial_name_creator,
"loggers": loggers,
"sync_to_driver": sync_to_driver,
"checkpoint_freq": checkpoint_freq,
"checkpoint_at_end": checkpoint_at_end,
"sync_on_checkpoint": sync_on_checkpoint,
"keep_checkpoints_num": keep_checkpoints_num,
"checkpoint_score_attr": checkpoint_score_attr,
"export_formats": export_formats or [],
"max_failures": max_failures,
"restore": os.path.abspath(os.path.expanduser(restore))
if restore else None
}
self.spec = spec
@classmethod
def from_json(cls, name, spec):
"""Generates an Experiment object from JSON.
Args:
name (str): Name of Experiment.
spec (dict): JSON configuration of experiment.
"""
if "run" not in spec:
raise TuneError("No trainable specified!")
# Special case the `env` param for RLlib by automatically
# moving it into the `config` section.
if "env" in spec:
spec["config"] = spec.get("config", {})
spec["config"]["env"] = spec["env"]
del spec["env"]
spec = copy.deepcopy(spec)
run_value = spec.pop("run")
try:
exp = cls(name, run_value, **spec)
except TypeError:
raise TuneError("Improper argument from JSON: {}.".format(spec))
return exp
@classmethod
def register_if_needed(cls, run_object):
"""Registers Trainable or Function at runtime.
Assumes already registered if run_object is a string.
Also, does not inspect interface of given run_object.
Arguments:
run_object (str|function|class): Trainable to run. If string,
assumes it is an ID and does not modify it. Otherwise,
returns a string corresponding to the run_object name.
Returns:
A string representing the trainable identifier.
"""
if isinstance(run_object, six.string_types):
return run_object
elif isinstance(run_object, sample_from):
logger.warning("Not registering trainable. Resolving as variant.")
return run_object
elif isinstance(run_object, type) or callable(run_object):
name = "DEFAULT"
if hasattr(run_object, "__name__"):
name = run_object.__name__
else:
logger.warning(
"No name detected on trainable. Using {}.".format(name))
register_trainable(name, run_object)
return name
else:
raise TuneError("Improper 'run' - not string nor trainable.")
@property
def local_dir(self):
return self.spec.get("local_dir")
@property
def checkpoint_dir(self):
if self.local_dir:
return os.path.join(self.local_dir, self.name)
@property
def run_identifier(self):
"""Returns a string representing the trainable identifier."""
return self._run_identifier
def convert_to_experiment_list(experiments):
"""Produces a list of Experiment objects.
Converts input from dict, single experiment, or list of
experiments to list of experiments. If input is None,
will return an empty list.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
Returns:
List of experiments.
"""
exp_list = experiments
# Transform list if necessary
if experiments is None:
exp_list = []
elif isinstance(experiments, Experiment):
exp_list = [experiments]
elif type(experiments) is dict:
exp_list = [
Experiment.from_json(name, spec)
for name, spec in experiments.items()
]
# Validate exp_list
if (type(exp_list) is list
and all(isinstance(exp, Experiment) for exp in exp_list)):
if len(exp_list) > 1:
logger.warning("All experiments will be "
"using the same SearchAlgorithm.")
else:
raise TuneError("Invalid argument: {}".format(experiments))
return exp_list
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/function_runner.py
|
Python
|
import logging
import time
import inspect
import threading
import traceback
from six.moves import queue
from ray.tune import track
from ray.tune import TuneError
from ray.tune.trainable import Trainable
from ray.tune.result import TIME_THIS_ITER_S, RESULT_DUPLICATE
logger = logging.getLogger(__name__)
# Time between FunctionRunner checks when fetching
# new results after signaling the reporter to continue
RESULT_FETCH_TIMEOUT = 0.2
ERROR_REPORT_TIMEOUT = 10
ERROR_FETCH_TIMEOUT = 1
class StatusReporter:
"""Object passed into your function that you can report status through.
Example:
>>> def trainable_function(config, reporter):
>>> assert isinstance(reporter, StatusReporter)
>>> reporter(timesteps_this_iter=1)
"""
def __init__(self, result_queue, continue_semaphore, logdir=None):
self._queue = result_queue
self._last_report_time = None
self._continue_semaphore = continue_semaphore
self._logdir = logdir
def __call__(self, **kwargs):
"""Report updated training status.
Pass in `done=True` when the training job is completed.
Args:
kwargs: Latest training result status.
Example:
>>> reporter(mean_accuracy=1, training_iteration=4)
>>> reporter(mean_accuracy=1, training_iteration=4, done=True)
Raises:
StopIteration: A StopIteration exception is raised if the trial has
been signaled to stop.
"""
assert self._last_report_time is not None, (
"StatusReporter._start() must be called before the first "
"report __call__ is made to ensure correct runtime metrics.")
# time per iteration is recorded directly in the reporter to ensure
# any delays in logging results aren't counted
report_time = time.time()
if TIME_THIS_ITER_S not in kwargs:
kwargs[TIME_THIS_ITER_S] = report_time - self._last_report_time
self._last_report_time = report_time
# add results to a thread-safe queue
self._queue.put(kwargs.copy(), block=True)
# This blocks until notification from the FunctionRunner that the last
# result has been returned to Tune and that the function is safe to
# resume training.
self._continue_semaphore.acquire()
def _start(self):
self._last_report_time = time.time()
@property
def logdir(self):
return self._logdir
class _RunnerThread(threading.Thread):
"""Supervisor thread that runs your script."""
def __init__(self, entrypoint, error_queue):
threading.Thread.__init__(self)
self._entrypoint = entrypoint
self._error_queue = error_queue
self.daemon = True
def run(self):
try:
self._entrypoint()
except StopIteration:
logger.debug(
("Thread runner raised StopIteration. Interperting it as a "
"signal to terminate the thread without error."))
except Exception as e:
logger.exception("Runner Thread raised error.")
try:
# report the error but avoid indefinite blocking which would
# prevent the exception from being propagated in the unlikely
# case that something went terribly wrong
err_tb_str = traceback.format_exc()
self._error_queue.put(
err_tb_str, block=True, timeout=ERROR_REPORT_TIMEOUT)
except queue.Full:
logger.critical(
("Runner Thread was unable to report error to main "
"function runner thread. This means a previous error "
"was not processed. This should never happen."))
raise e
class FunctionRunner(Trainable):
"""Trainable that runs a user function reporting results.
This mode of execution does not support checkpoint/restore."""
_name = "func"
def _setup(self, config):
# Semaphore for notifying the reporter to continue with the computation
# and to generate the next result.
self._continue_semaphore = threading.Semaphore(0)
# Queue for passing results between threads
self._results_queue = queue.Queue(1)
# Queue for passing errors back from the thread runner. The error queue
# has a max size of one to prevent stacking error and force error
# reporting to block until finished.
self._error_queue = queue.Queue(1)
self._status_reporter = StatusReporter(
self._results_queue, self._continue_semaphore, self.logdir)
self._last_result = {}
config = config.copy()
def entrypoint():
return self._trainable_func(config, self._status_reporter)
# the runner thread is not started until the first call to _train
self._runner = _RunnerThread(entrypoint, self._error_queue)
def _trainable_func(self):
"""Subclasses can override this to set the trainable func."""
raise NotImplementedError
def _train(self):
"""Implements train() for a Function API.
If the RunnerThread finishes without reporting "done",
Tune will automatically provide a magic keyword __duplicate__
along with a result with "done=True". The TrialRunner will handle the
result accordingly (see tune/trial_runner.py).
"""
if self._runner.is_alive():
# if started and alive, inform the reporter to continue and
# generate the next result
self._continue_semaphore.release()
else:
# if not alive, try to start
self._status_reporter._start()
try:
self._runner.start()
except RuntimeError:
# If this is reached, it means the thread was started and is
# now done or has raised an exception.
pass
result = None
while result is None and self._runner.is_alive():
# fetch the next produced result
try:
result = self._results_queue.get(
block=True, timeout=RESULT_FETCH_TIMEOUT)
except queue.Empty:
pass
# if no result were found, then the runner must no longer be alive
if result is None:
# Try one last time to fetch results in case results were reported
# in between the time of the last check and the termination of the
# thread runner.
try:
result = self._results_queue.get(block=False)
except queue.Empty:
pass
# check if error occured inside the thread runner
if result is None:
# only raise an error from the runner if all results are consumed
self._report_thread_runner_error(block=True)
# Under normal conditions, this code should never be reached since
# this branch should only be visited if the runner thread raised
# an exception. If no exception were raised, it means that the
# runner thread never reported any results which should not be
# possible when wrapping functions with `wrap_function`.
raise TuneError(
("Wrapped function ran until completion without reporting "
"results or raising an exception."))
else:
if not self._error_queue.empty():
logger.warning(
("Runner error waiting to be raised in main thread. "
"Logging all available results first."))
# This keyword appears if the train_func using the Function API
# finishes without "done=True". This duplicates the last result, but
# the TrialRunner will not log this result again.
if "__duplicate__" in result:
new_result = self._last_result.copy()
new_result.update(result)
result = new_result
self._last_result = result
return result
def _stop(self):
# If everything stayed in synch properly, this should never happen.
if not self._results_queue.empty():
logger.warning(
("Some results were added after the trial stop condition. "
"These results won't be logged."))
# Check for any errors that might have been missed.
self._report_thread_runner_error()
def _report_thread_runner_error(self, block=False):
try:
err_tb_str = self._error_queue.get(
block=block, timeout=ERROR_FETCH_TIMEOUT)
raise TuneError(("Trial raised an exception. Traceback:\n{}"
.format(err_tb_str)))
except queue.Empty:
pass
def wrap_function(train_func):
use_track = False
try:
func_args = inspect.getfullargspec(train_func).args
use_track = ("reporter" not in func_args and len(func_args) == 1)
if use_track:
logger.info("tune.track signature detected.")
except Exception:
logger.info(
"Function inspection failed - assuming reporter signature.")
class WrappedFunc(FunctionRunner):
def _trainable_func(self, config, reporter):
output = train_func(config, reporter)
# If train_func returns, we need to notify the main event loop
# of the last result while avoiding double logging. This is done
# with the keyword RESULT_DUPLICATE -- see tune/trial_runner.py.
reporter(**{RESULT_DUPLICATE: True})
return output
class WrappedTrackFunc(FunctionRunner):
def _trainable_func(self, config, reporter):
track.init(_tune_reporter=reporter)
output = train_func(config)
reporter(**{RESULT_DUPLICATE: True})
track.shutdown()
return output
return WrappedTrackFunc if use_track else WrappedFunc
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/integration/keras.py
|
Python
|
from tensorflow import keras
from ray.tune import track
class TuneReporterCallback(keras.callbacks.Callback):
"""Tune Callback for Keras."""
def __init__(self, reporter=None, freq="batch", logs={}):
"""Initializer.
Args:
reporter (StatusReporter|tune.track.log|None): Tune object for
returning results.
freq (str): Sets the frequency of reporting intermediate results.
One of ["batch", "epoch"].
"""
self.reporter = reporter or track.log
self.iteration = 0
if freq not in ["batch", "epoch"]:
raise ValueError("{} not supported as a frequency.".format(freq))
self.freq = freq
super(TuneReporterCallback, self).__init__()
def on_batch_end(self, batch, logs={}):
if not self.freq == "batch":
return
self.iteration += 1
for metric in list(logs):
if "loss" in metric and "neg_" not in metric:
logs["neg_" + metric] = -logs[metric]
if "acc" in logs:
self.reporter(keras_info=logs, mean_accuracy=logs["acc"])
else:
self.reporter(keras_info=logs, mean_accuracy=logs.get("accuracy"))
def on_epoch_end(self, batch, logs={}):
if not self.freq == "epoch":
return
self.iteration += 1
for metric in list(logs):
if "loss" in metric and "neg_" not in metric:
logs["neg_" + metric] = -logs[metric]
if "acc" in logs:
self.reporter(keras_info=logs, mean_accuracy=logs["acc"])
else:
self.reporter(keras_info=logs, mean_accuracy=logs.get("accuracy"))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/logger.py
|
Python
|
import csv
import json
import logging
import os
import yaml
import distutils.version
import numbers
import numpy as np
import ray.cloudpickle as cloudpickle
from ray.tune.result import (NODE_IP, TRAINING_ITERATION, TIME_TOTAL_S,
TIMESTEPS_TOTAL, EXPR_PARAM_FILE,
EXPR_PARAM_PICKLE_FILE, EXPR_PROGRESS_FILE,
EXPR_RESULT_FILE)
from ray.tune.syncer import get_node_syncer
from ray.tune.utils import flatten_dict
logger = logging.getLogger(__name__)
tf = None
VALID_SUMMARY_TYPES = [int, float, np.float32, np.float64, np.int32]
class Logger:
"""Logging interface for ray.tune.
By default, the UnifiedLogger implementation is used which logs results in
multiple formats (TensorBoard, rllab/viskit, plain json, custom loggers)
at once.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
"""
def __init__(self, config, logdir, trial=None):
self.config = config
self.logdir = logdir
self.trial = trial
self._init()
def _init(self):
pass
def on_result(self, result):
"""Given a result, appends it to the existing log."""
raise NotImplementedError
def update_config(self, config):
"""Updates the config for logger."""
pass
def close(self):
"""Releases all resources used by this logger."""
pass
def flush(self):
"""Flushes all disk writes to storage."""
pass
class NoopLogger(Logger):
def on_result(self, result):
pass
class MLFLowLogger(Logger):
"""MLFlow logger.
Requires the experiment configuration to have a MLFlow Experiment ID
or manually set the proper environment variables.
"""
def _init(self):
from mlflow.tracking import MlflowClient
client = MlflowClient()
run = client.create_run(self.config.get("mlflow_experiment_id"))
self._run_id = run.info.run_id
for key, value in self.config.items():
client.log_param(self._run_id, key, value)
self.client = client
def on_result(self, result):
for key, value in result.items():
if not isinstance(value, float):
continue
self.client.log_metric(
self._run_id, key, value, step=result.get(TRAINING_ITERATION))
def close(self):
self.client.set_terminated(self._run_id)
class JsonLogger(Logger):
def _init(self):
self.update_config(self.config)
local_file = os.path.join(self.logdir, EXPR_RESULT_FILE)
self.local_out = open(local_file, "a")
def on_result(self, result):
json.dump(result, self, cls=_SafeFallbackEncoder)
self.write("\n")
self.local_out.flush()
def write(self, b):
self.local_out.write(b)
def flush(self):
self.local_out.flush()
def close(self):
self.local_out.close()
def update_config(self, config):
self.config = config
config_out = os.path.join(self.logdir, EXPR_PARAM_FILE)
with open(config_out, "w") as f:
json.dump(
self.config,
f,
indent=2,
sort_keys=True,
cls=_SafeFallbackEncoder)
config_pkl = os.path.join(self.logdir, EXPR_PARAM_PICKLE_FILE)
with open(config_pkl, "wb") as f:
cloudpickle.dump(self.config, f)
def tf2_compat_logger(config, logdir, trial=None):
"""Chooses TensorBoard logger depending on imported TF version."""
global tf
if "RLLIB_TEST_NO_TF_IMPORT" in os.environ:
logger.warning("Not importing TensorFlow for test purposes")
tf = None
raise RuntimeError("Not importing TensorFlow for test purposes")
else:
import tensorflow as tf
use_tf2_api = (distutils.version.LooseVersion(tf.__version__) >=
distutils.version.LooseVersion("1.15.0"))
if use_tf2_api:
# This is temporarily for RLlib because it disables v2 behavior...
from tensorflow.python import tf2
if not tf2.enabled():
tf = tf.compat.v1
return TFLogger(config, logdir, trial)
tf = tf.compat.v2 # setting this for TF2.0
return TF2Logger(config, logdir, trial)
else:
return TFLogger(config, logdir, trial)
class TF2Logger(Logger):
"""TensorBoard Logger for TF version >= 2.0.0.
Automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
If you need to do more advanced logging, it is recommended
to use a Summary Writer in the Trainable yourself.
"""
def _init(self):
global tf
if tf is None:
import tensorflow as tf
tf = tf.compat.v2 # setting this for TF2.0
self._file_writer = None
self._hp_logged = False
def on_result(self, result):
if self._file_writer is None:
from tensorflow.python.eager import context
from tensorboard.plugins.hparams import api as hp
self._context = context
self._file_writer = tf.summary.create_file_writer(self.logdir)
with tf.device("/CPU:0"):
with tf.summary.record_if(True), self._file_writer.as_default():
step = result.get(
TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
if not self._hp_logged:
if self.trial and self.trial.evaluated_params:
try:
hp.hparams(
self.trial.evaluated_params,
trial_id=self.trial.trial_id)
except Exception as exc:
logger.error("HParams failed with %s", exc)
self._hp_logged = True
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S,
TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
for attr, value in flat_result.items():
if type(value) in VALID_SUMMARY_TYPES:
tf.summary.scalar(
"/".join(path + [attr]), value, step=step)
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
self._file_writer.close()
def to_tf_values(result, path):
flat_result = flatten_dict(result, delimiter="/")
values = [
tf.Summary.Value(tag="/".join(path + [attr]), simple_value=value)
for attr, value in flat_result.items()
if type(value) in VALID_SUMMARY_TYPES
]
return values
class TFLogger(Logger):
"""TensorBoard Logger for TF version < 2.0.0.
Automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
If you need to do more advanced logging, it is recommended
to use a Summary Writer in the Trainable yourself.
"""
def _init(self):
global tf
if tf is None:
import tensorflow as tf
tf = tf.compat.v1 # setting this for regular TF logger
logger.debug("Initializing TFLogger instead of TF2Logger.")
self._file_writer = tf.summary.FileWriter(self.logdir)
def on_result(self, result):
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to tf log these
values = to_tf_values(tmp, ["ray", "tune"])
train_stats = tf.Summary(value=values)
t = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
self._file_writer.add_summary(train_stats, t)
iteration_value = to_tf_values({
TRAINING_ITERATION: result[TRAINING_ITERATION]
}, ["ray", "tune"])
iteration_stats = tf.Summary(value=iteration_value)
self._file_writer.add_summary(iteration_stats, t)
self._file_writer.flush()
def flush(self):
self._file_writer.flush()
def close(self):
self._file_writer.close()
class CSVLogger(Logger):
"""Logs results to progress.csv under the trial directory.
Automatically flattens nested dicts in the result dict before writing
to csv:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
"""CSV outputted with Headers as first set of results."""
progress_file = os.path.join(self.logdir, EXPR_PROGRESS_FILE)
self._continuing = os.path.exists(progress_file)
self._file = open(progress_file, "a")
self._csv_out = None
def on_result(self, result):
tmp = result.copy()
if "config" in tmp:
del tmp["config"]
result = flatten_dict(tmp, delimiter="/")
if self._csv_out is None:
self._csv_out = csv.DictWriter(self._file, result.keys())
if not self._continuing:
self._csv_out.writeheader()
self._csv_out.writerow(
{k: v
for k, v in result.items() if k in self._csv_out.fieldnames})
self._file.flush()
def flush(self):
self._file.flush()
def close(self):
self._file.close()
class TBXLogger(Logger):
"""TensorBoardX Logger.
Note that hparams will be written only after a trial has terminated.
This logger automatically flattens nested dicts to show on TensorBoard:
{"a": {"b": 1, "c": 2}} -> {"a/b": 1, "a/c": 2}
"""
def _init(self):
try:
from tensorboardX import SummaryWriter
except ImportError:
logger.error("pip install 'ray[tune]' to see TensorBoard files.")
raise
self._file_writer = SummaryWriter(self.logdir, flush_secs=30)
self.last_result = None
def on_result(self, result):
step = result.get(TIMESTEPS_TOTAL) or result[TRAINING_ITERATION]
tmp = result.copy()
for k in [
"config", "pid", "timestamp", TIME_TOTAL_S, TRAINING_ITERATION
]:
if k in tmp:
del tmp[k] # not useful to log these
flat_result = flatten_dict(tmp, delimiter="/")
path = ["ray", "tune"]
valid_result = {
"/".join(path + [attr]): value
for attr, value in flat_result.items()
if type(value) in VALID_SUMMARY_TYPES
}
for attr, value in valid_result.items():
self._file_writer.add_scalar(attr, value, global_step=step)
self.last_result = valid_result
self._file_writer.flush()
def flush(self):
if self._file_writer is not None:
self._file_writer.flush()
def close(self):
if self._file_writer is not None:
if self.trial and self.trial.evaluated_params and self.last_result:
self._try_log_hparams(self.last_result)
self._file_writer.close()
def _try_log_hparams(self, result):
# TBX currently errors if the hparams value is None.
scrubbed_params = {
k: v
for k, v in self.trial.evaluated_params.items() if v is not None
}
from tensorboardX.summary import hparams
experiment_tag, session_start_tag, session_end_tag = hparams(
hparam_dict=scrubbed_params, metric_dict=result)
self._file_writer.file_writer.add_summary(experiment_tag)
self._file_writer.file_writer.add_summary(session_start_tag)
self._file_writer.file_writer.add_summary(session_end_tag)
DEFAULT_LOGGERS = (JsonLogger, CSVLogger, TBXLogger)
class UnifiedLogger(Logger):
"""Unified result logger for TensorBoard, rllab/viskit, plain json.
Arguments:
config: Configuration passed to all logger creators.
logdir: Directory for all logger creators to log to.
loggers (list): List of logger creators. Defaults to CSV, Tensorboard,
and JSON loggers.
sync_function (func|str): Optional function for syncer to run.
See ray/python/ray/tune/syncer.py
"""
def __init__(self,
config,
logdir,
trial=None,
loggers=None,
sync_function=None):
if loggers is None:
self._logger_cls_list = DEFAULT_LOGGERS
else:
self._logger_cls_list = loggers
self._sync_function = sync_function
self._log_syncer = None
super(UnifiedLogger, self).__init__(config, logdir, trial)
def _init(self):
self._loggers = []
for cls in self._logger_cls_list:
try:
self._loggers.append(cls(self.config, self.logdir, self.trial))
except Exception as exc:
logger.warning("Could not instantiate %s: %s.", cls.__name__,
str(exc))
self._log_syncer = get_node_syncer(
self.logdir,
remote_dir=self.logdir,
sync_function=self._sync_function)
def on_result(self, result):
for _logger in self._loggers:
_logger.on_result(result)
self._log_syncer.set_worker_ip(result.get(NODE_IP))
self._log_syncer.sync_down_if_needed()
def update_config(self, config):
for _logger in self._loggers:
_logger.update_config(config)
def close(self):
for _logger in self._loggers:
_logger.close()
def flush(self, sync_down=True):
for _logger in self._loggers:
_logger.flush()
if sync_down:
if not self._log_syncer.sync_down():
logger.warning("Trial %s: Post-flush sync skipped.",
self.trial)
def sync_up(self):
return self._log_syncer.sync_up()
def sync_down(self):
return self._log_syncer.sync_down()
def wait(self):
self._log_syncer.wait()
def sync_results_to_new_location(self, worker_ip):
"""Sends the current log directory to the remote node.
Syncing will not occur if the cluster is not started
with the Ray autoscaler.
"""
if worker_ip != self._log_syncer.worker_ip:
logger.info("Trial %s: Syncing (blocking) results to %s",
self.trial, worker_ip)
self._log_syncer.reset()
self._log_syncer.set_worker_ip(worker_ip)
if not self._log_syncer.sync_up():
logger.error(
"Trial %s: Sync up to new location skipped. "
"This should not occur.", self.trial)
self._log_syncer.wait()
else:
logger.error(
"Trial %s: Sync attempted to same IP %s. This "
"should not occur.", self.trial, worker_ip)
class _SafeFallbackEncoder(json.JSONEncoder):
def __init__(self, nan_str="null", **kwargs):
super(_SafeFallbackEncoder, self).__init__(**kwargs)
self.nan_str = nan_str
def default(self, value):
try:
if np.isnan(value):
return self.nan_str
if (type(value).__module__ == np.__name__
and isinstance(value, np.ndarray)):
return value.tolist()
if issubclass(type(value), numbers.Integral):
return int(value)
if issubclass(type(value), numbers.Number):
return float(value)
return super(_SafeFallbackEncoder, self).default(value)
except Exception:
return str(value) # give up, just stringify it (ok for logs)
def pretty_print(result):
result = result.copy()
result.update(config=None) # drop config from pretty print
out = {}
for k, v in result.items():
if v is not None:
out[k] = v
cleaned = json.dumps(out, cls=_SafeFallbackEncoder)
return yaml.safe_dump(json.loads(cleaned), default_flow_style=False)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/progress_reporter.py
|
Python
|
from __future__ import print_function
import collections
from ray.tune.result import (DEFAULT_RESULT_KEYS, CONFIG_PREFIX,
EPISODE_REWARD_MEAN, MEAN_ACCURACY, MEAN_LOSS,
TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL)
from ray.tune.utils import flatten_dict
try:
from tabulate import tabulate
except ImportError:
raise ImportError("ray.tune in ray > 0.7.5 requires 'tabulate'. "
"Please re-run 'pip install ray[tune]' or "
"'pip install ray[rllib]'.")
DEFAULT_PROGRESS_KEYS = DEFAULT_RESULT_KEYS + (EPISODE_REWARD_MEAN, )
# Truncated representations of column names (to accommodate small screens).
REPORTED_REPRESENTATIONS = {
EPISODE_REWARD_MEAN: "reward",
MEAN_ACCURACY: "acc",
MEAN_LOSS: "loss",
TIME_TOTAL_S: "total time (s)",
TIMESTEPS_TOTAL: "timesteps",
TRAINING_ITERATION: "iter",
}
class ProgressReporter:
# TODO(ujvl): Expose ProgressReporter in tune.run for custom reporting.
def report(self, trial_runner):
"""Reports progress across all trials of the trial runner.
Args:
trial_runner: Trial runner to report on.
"""
raise NotImplementedError
class JupyterNotebookReporter(ProgressReporter):
def __init__(self, overwrite):
"""Initializes a new JupyterNotebookReporter.
Args:
overwrite (bool): Flag for overwriting the last reported progress.
"""
self.overwrite = overwrite
def report(self, trial_runner):
delim = "<br>"
messages = [
"== Status ==",
memory_debug_str(),
trial_runner.scheduler_alg.debug_string(),
trial_runner.trial_executor.debug_string(),
trial_progress_str(trial_runner.get_trials(), fmt="html"),
trial_errors_str(trial_runner.get_trials(), fmt="html"),
]
from IPython.display import clear_output
from IPython.core.display import display, HTML
if self.overwrite:
clear_output(wait=True)
display(HTML(delim.join(messages) + delim))
class CLIReporter(ProgressReporter):
def report(self, trial_runner):
messages = [
"== Status ==",
memory_debug_str(),
trial_runner.scheduler_alg.debug_string(),
trial_runner.trial_executor.debug_string(),
trial_progress_str(trial_runner.get_trials()),
trial_errors_str(trial_runner.get_trials()),
]
print("\n".join(messages) + "\n")
def memory_debug_str():
try:
import psutil
total_gb = psutil.virtual_memory().total / (1024**3)
used_gb = total_gb - psutil.virtual_memory().available / (1024**3)
if used_gb > total_gb * 0.9:
warn = (": ***LOW MEMORY*** less than 10% of the memory on "
"this node is available for use. This can cause "
"unexpected crashes. Consider "
"reducing the memory used by your application "
"or reducing the Ray object store size by setting "
"`object_store_memory` when calling `ray.init`.")
else:
warn = ""
return "Memory usage on this node: {}/{} GiB{}".format(
round(used_gb, 1), round(total_gb, 1), warn)
except ImportError:
return ("Unknown memory usage. Please run `pip install psutil` "
"(or ray[debug]) to resolve)")
def trial_progress_str(trials, metrics=None, fmt="psql", max_rows=20):
"""Returns a human readable message for printing to the console.
This contains a table where each row represents a trial, its parameters
and the current values of its metrics.
Args:
trials (List[Trial]): List of trials to get progress string for.
metrics (List[str]): Names of metrics to include. Defaults to
metrics defined in DEFAULT_RESULT_KEYS.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the trial table.
"""
messages = []
delim = "<br>" if fmt == "html" else "\n"
if len(trials) < 1:
return delim.join(messages)
num_trials = len(trials)
trials_by_state = collections.defaultdict(list)
for t in trials:
trials_by_state[t.status].append(t)
for local_dir in sorted({t.local_dir for t in trials}):
messages.append("Result logdir: {}".format(local_dir))
num_trials_strs = [
"{} {}".format(len(trials_by_state[state]), state)
for state in trials_by_state
]
messages.append("Number of trials: {} ({})".format(
num_trials, ", ".join(num_trials_strs)))
if num_trials > max_rows:
# TODO(ujvl): suggestion for users to view more rows.
trials_by_state_trunc = _fair_filter_trials(trials_by_state, max_rows)
trials = []
overflow_strs = []
for state in trials_by_state:
trials += trials_by_state_trunc[state]
overflow = len(trials_by_state[state]) - len(
trials_by_state_trunc[state])
overflow_strs.append("{} {}".format(overflow, state))
# Build overflow string.
overflow = num_trials - max_rows
overflow_str = ", ".join(overflow_strs)
messages.append("Table truncated to {} rows. {} trials ({}) not "
"shown.".format(max_rows, overflow, overflow_str))
# Pre-process trials to figure out what columns to show.
keys = list(metrics or DEFAULT_PROGRESS_KEYS)
keys = [k for k in keys if any(t.last_result.get(k) for t in trials)]
# Build trial rows.
params = list(set().union(*[t.evaluated_params for t in trials]))
trial_table = [_get_trial_info(trial, params, keys) for trial in trials]
# Parse columns.
parsed_columns = [REPORTED_REPRESENTATIONS.get(k, k) for k in keys]
columns = ["Trial name", "status", "loc"]
columns += params + parsed_columns
messages.append(
tabulate(trial_table, headers=columns, tablefmt=fmt, showindex=False))
return delim.join(messages)
def trial_errors_str(trials, fmt="psql", max_rows=20):
"""Returns a readable message regarding trial errors.
Args:
trials (List[Trial]): List of trials to get progress string for.
fmt (str): Output format (see tablefmt in tabulate API).
max_rows (int): Maximum number of rows in the error table.
"""
messages = []
failed = [t for t in trials if t.error_file]
num_failed = len(failed)
if num_failed > 0:
messages.append("Number of errored trials: {}".format(num_failed))
if num_failed > max_rows:
messages.append("Table truncated to {} rows ({} overflow)".format(
max_rows, num_failed - max_rows))
error_table = []
for trial in failed[:max_rows]:
row = [str(trial), trial.num_failures, trial.error_file]
error_table.append(row)
columns = ["Trial name", "# failures", "error file"]
messages.append(
tabulate(
error_table, headers=columns, tablefmt=fmt, showindex=False))
delim = "<br>" if fmt == "html" else "\n"
return delim.join(messages)
def _fair_filter_trials(trials_by_state, max_trials):
"""Filters trials such that each state is represented fairly.
The oldest trials are truncated if necessary.
Args:
trials_by_state (Dict[str, List[Trial]]: Trials by state.
max_trials (int): Maximum number of trials to return.
Returns:
Dict mapping state to List of fairly represented trials.
"""
num_trials_by_state = collections.defaultdict(int)
no_change = False
# Determine number of trials to keep per state.
while max_trials > 0 and not no_change:
no_change = True
for state in trials_by_state:
if num_trials_by_state[state] < len(trials_by_state[state]):
no_change = False
max_trials -= 1
num_trials_by_state[state] += 1
# Sort by start time, descending.
sorted_trials_by_state = {
state: sorted(
trials_by_state[state],
reverse=True,
key=lambda t: t.start_time if t.start_time else float("-inf"))
for state in trials_by_state
}
# Truncate oldest trials.
filtered_trials = {
state: sorted_trials_by_state[state][:num_trials_by_state[state]]
for state in trials_by_state
}
return filtered_trials
def _get_trial_info(trial, parameters, metrics):
"""Returns the following information about a trial:
name | status | loc | params... | metrics...
Args:
trial (Trial): Trial to get information for.
parameters (List[str]): Names of trial parameters to include.
metrics (List[str]): Names of metrics to include.
"""
result = flatten_dict(trial.last_result)
trial_info = [str(trial), trial.status, str(trial.location)]
trial_info += [result.get(CONFIG_PREFIX + param) for param in parameters]
trial_info += [result.get(metric) for metric in metrics]
return trial_info
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/ray_trial_executor.py
|
Python
|
# coding: utf-8
import logging
import os
import random
import time
import traceback
from contextlib import contextmanager
import ray
from ray.exceptions import RayTimeoutError
from ray import ray_constants
from ray.resource_spec import ResourceSpec
from ray.tune.durable_trainable import DurableTrainable
from ray.tune.error import AbortTrialExecution, TuneError
from ray.tune.logger import NoopLogger
from ray.tune.resources import Resources
from ray.tune.trainable import TrainableUtil
from ray.tune.trial import Trial, Checkpoint, Location
from ray.tune.trial_executor import TrialExecutor
from ray.tune.utils import warn_if_slow
logger = logging.getLogger(__name__)
RESOURCE_REFRESH_PERIOD = 0.5 # Refresh resources every 500 ms
BOTTLENECK_WARN_PERIOD_S = 60
NONTRIVIAL_WAIT_TIME_THRESHOLD_S = 1e-3
DEFAULT_GET_TIMEOUT = 30.0 # seconds
class _LocalWrapper:
def __init__(self, result):
self._result = result
def unwrap(self):
"""Returns the wrapped result."""
return self._result
class RayTrialExecutor(TrialExecutor):
"""An implementation of TrialExecutor based on Ray."""
def __init__(self,
queue_trials=False,
reuse_actors=False,
ray_auto_init=False,
refresh_period=RESOURCE_REFRESH_PERIOD):
super(RayTrialExecutor, self).__init__(queue_trials)
# Check for if we are launching a trial without resources in kick off
# autoscaler.
self._trial_queued = False
self._running = {}
# Since trial resume after paused should not run
# trial.train.remote(), thus no more new remote object id generated.
# We use self._paused to store paused trials here.
self._paused = {}
self._reuse_actors = reuse_actors
self._cached_actor = None
self._avail_resources = Resources(cpu=0, gpu=0)
self._committed_resources = Resources(cpu=0, gpu=0)
self._resources_initialized = False
self._refresh_period = refresh_period
self._last_resource_refresh = float("-inf")
self._last_nontrivial_wait = time.time()
if not ray.is_initialized() and ray_auto_init:
logger.info("Initializing Ray automatically."
"For cluster usage or custom Ray initialization, "
"call `ray.init(...)` before `tune.run`.")
ray.init()
if ray.is_initialized():
self._update_avail_resources()
def _setup_remote_runner(self, trial, reuse_allowed):
trial.init_logger()
# We checkpoint metadata here to try mitigating logdir duplication
self.try_checkpoint_metadata(trial)
remote_logdir = trial.logdir
if (self._reuse_actors and reuse_allowed
and self._cached_actor is not None):
logger.debug("Trial %s: Reusing cached runner %s", trial,
self._cached_actor)
existing_runner = self._cached_actor
self._cached_actor = None
trial.set_runner(existing_runner)
if not self.reset_trial(trial, trial.config, trial.experiment_tag):
raise AbortTrialExecution(
"Trainable runner reuse requires reset_config() to be "
"implemented and return True.")
return existing_runner
if self._cached_actor:
logger.debug("Cannot reuse cached runner {} for new trial".format(
self._cached_actor))
with self._change_working_directory(trial):
self._cached_actor.stop.remote()
self._cached_actor.__ray_terminate__.remote()
self._cached_actor = None
cls = ray.remote(
num_cpus=trial.resources.cpu,
num_gpus=trial.resources.gpu,
memory=trial.resources.memory,
object_store_memory=trial.resources.object_store_memory,
resources=trial.resources.custom_resources)(
trial.get_trainable_cls())
def logger_creator(config):
# Set the working dir in the remote process, for user file writes
os.makedirs(remote_logdir, exist_ok=True)
if not ray.worker._mode() == ray.worker.LOCAL_MODE:
os.chdir(remote_logdir)
return NoopLogger(config, remote_logdir)
# Clear the Trial's location (to be updated later on result)
# since we don't know where the remote runner is placed.
trial.set_location(Location())
logger.debug("Trial %s: Setting up new remote runner.", trial)
# Logging for trials is handled centrally by TrialRunner, so
# configure the remote runner to use a noop-logger.
kwargs = {
"config": trial.config,
"logger_creator": logger_creator,
}
if issubclass(trial.get_trainable_cls(), DurableTrainable):
kwargs["remote_checkpoint_dir"] = trial.remote_checkpoint_dir
with self._change_working_directory(trial):
return cls.remote(**kwargs)
def _train(self, trial):
"""Start one iteration of training and save remote id."""
if self._find_item(self._paused, trial):
raise TuneError(
"Should not call `train` on PAUSED trial {}. "
"This is an internal error - please file an issue "
"on https://github.com/ray-project/ray/issues/.".format(
str(trial)))
if self._find_item(self._running, trial):
logging.debug(
"Trial {} already has a queued future. Skipping this "
"`train` call. This may occur if a trial has "
"been unpaused within a scheduler callback.".format(
str(trial)))
return
assert trial.status == Trial.RUNNING, trial.status
with self._change_working_directory(trial):
remote = trial.runner.train.remote()
# Local Mode
if isinstance(remote, dict):
remote = _LocalWrapper(remote)
self._running[remote] = trial
trial_item = self._find_item(self._running, trial)
assert len(trial_item) < 2, trial_item
def _start_trial(self, trial, checkpoint=None, runner=None):
"""Starts trial and restores last result if trial was paused.
Args:
trial (Trial): The trial to start.
checkpoint (Optional[Checkpoint]): The checkpoint to restore from.
If None, and no trial checkpoint exists, the trial is started
from the beginning.
runner (Trainable): The remote runner to use. This can be the
cached actor. If None, a new runner is created.
See `RayTrialExecutor.restore` for possible errors raised.
"""
prior_status = trial.status
self.set_status(trial, Trial.RUNNING)
trial.set_runner(
runner or self._setup_remote_runner(
trial,
reuse_allowed=checkpoint is not None
or trial.has_checkpoint()))
self.restore(trial, checkpoint)
previous_run = self._find_item(self._paused, trial)
if prior_status == Trial.PAUSED and previous_run:
# If Trial was in flight when paused, self._paused stores result.
self._paused.pop(previous_run[0])
self._running[previous_run[0]] = trial
elif not trial.is_restoring:
self._train(trial)
def _stop_trial(self, trial, error=False, error_msg=None,
stop_logger=True):
"""Stops this trial.
Stops this trial, releasing all allocating resources. If stopping the
trial fails, the run will be marked as terminated in error, but no
exception will be thrown.
Args:
error (bool): Whether to mark this trial as terminated in error.
error_msg (str): Optional error message.
stop_logger (bool): Whether to shut down the trial logger.
"""
if stop_logger:
trial.close_logger()
self.set_status(trial, Trial.ERROR if error else Trial.TERMINATED)
trial.set_location(Location())
try:
trial.write_error_log(error_msg)
if hasattr(trial, "runner") and trial.runner:
if (not error and self._reuse_actors
and self._cached_actor is None):
logger.debug("Reusing actor for %s", trial.runner)
self._cached_actor = trial.runner
else:
logger.debug("Trial %s: Destroying actor.", trial)
with self._change_working_directory(trial):
trial.runner.stop.remote()
trial.runner.__ray_terminate__.remote()
except Exception:
logger.exception("Trial %s: Error stopping runner.", trial)
self.set_status(trial, Trial.ERROR)
finally:
trial.set_runner(None)
def start_trial(self, trial, checkpoint=None):
"""Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial.
"""
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except AbortTrialExecution:
logger.exception("Trial %s: Error starting runner, aborting!",
trial)
time.sleep(2)
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
except Exception:
logger.exception("Trial %s: Unexpected error starting runner.",
trial)
time.sleep(2)
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg)
# Note that we don't return the resources, since they may
# have been lost. TODO(ujvl): is this the right thing to do?
def _find_item(self, dictionary, item):
out = [rid for rid, t in dictionary.items() if t is item]
return out
def stop_trial(self, trial, error=False, error_msg=None, stop_logger=True):
"""Only returns resources if resources allocated."""
prior_status = trial.status
self._stop_trial(
trial, error=error, error_msg=error_msg, stop_logger=stop_logger)
if prior_status == Trial.RUNNING:
logger.debug("Trial %s: Returning resources.", trial)
self._return_resources(trial.resources)
out = self._find_item(self._running, trial)
for result_id in out:
self._running.pop(result_id)
def continue_training(self, trial):
"""Continues the training of this trial."""
self._train(trial)
def pause_trial(self, trial):
"""Pauses the trial.
If trial is in-flight, preserves return value in separate queue
before pausing, which is restored when Trial is resumed.
"""
trial_future = self._find_item(self._running, trial)
if trial_future:
self._paused[trial_future[0]] = trial
super(RayTrialExecutor, self).pause_trial(trial)
def reset_trial(self, trial, new_config, new_experiment_tag):
"""Tries to invoke `Trainable.reset_config()` to reset trial.
Args:
trial (Trial): Trial to be reset.
new_config (dict): New configuration for Trial
trainable.
new_experiment_tag (str): New experiment name
for trial.
Returns:
True if `reset_config` is successful else False.
"""
trial.experiment_tag = new_experiment_tag
trial.config = new_config
trainable = trial.runner
with self._change_working_directory(trial):
with warn_if_slow("reset_config"):
try:
reset_val = ray.get(
trainable.reset_config.remote(new_config),
DEFAULT_GET_TIMEOUT)
except RayTimeoutError:
logger.exception("Trial %s: reset_config timed out.",
trial)
return False
return reset_val
def get_running_trials(self):
"""Returns the running trials."""
return list(self._running.values())
def get_alive_node_ips(self):
nodes = ray.state.nodes()
ip_addresses = set()
for node in nodes:
if node["alive"]:
ip_addresses.add(node["NodeManagerAddress"])
return ip_addresses
def get_current_trial_ips(self):
return {t.node_ip for t in self.get_running_trials()}
def get_next_failed_trial(self):
"""Gets the first trial found to be running on a node presumed dead.
Returns:
A Trial object that is ready for failure processing. None if
no failure detected.
"""
if ray.worker._mode() != ray.worker.LOCAL_MODE:
live_cluster_ips = self.get_alive_node_ips()
if live_cluster_ips - self.get_current_trial_ips():
for trial in self.get_running_trials():
if trial.node_ip and trial.node_ip not in live_cluster_ips:
return trial
return None
def get_next_available_trial(self):
shuffled_results = list(self._running.keys())
random.shuffle(shuffled_results)
# Note: We shuffle the results because `ray.wait` by default returns
# the first available result, and we want to guarantee that slower
# trials (i.e. trials that run remotely) also get fairly reported.
# See https://github.com/ray-project/ray/issues/4211 for details.
start = time.time()
[result_id], _ = ray.wait(shuffled_results)
wait_time = time.time() - start
if wait_time > NONTRIVIAL_WAIT_TIME_THRESHOLD_S:
self._last_nontrivial_wait = time.time()
if time.time() - self._last_nontrivial_wait > BOTTLENECK_WARN_PERIOD_S:
logger.warning(
"Over the last {} seconds, the Tune event loop has been "
"backlogged processing new results. Consider increasing your "
"period of result reporting to improve performance.".format(
BOTTLENECK_WARN_PERIOD_S))
self._last_nontrivial_wait = time.time()
return self._running[result_id]
def fetch_result(self, trial):
"""Fetches one result of the running trials.
Returns:
Result of the most recent trial training run.
"""
trial_future = self._find_item(self._running, trial)
if not trial_future:
raise ValueError("Trial was not running.")
self._running.pop(trial_future[0])
with warn_if_slow("fetch_result"):
result = ray.get(trial_future[0], DEFAULT_GET_TIMEOUT)
# For local mode
if isinstance(result, _LocalWrapper):
result = result.unwrap()
return result
def _commit_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) + resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu + resources.cpu_total(),
committed.gpu + resources.gpu_total(),
committed.memory + resources.memory_total(),
committed.object_store_memory +
resources.object_store_memory_total(),
custom_resources=custom_resources)
def _return_resources(self, resources):
committed = self._committed_resources
all_keys = set(resources.custom_resources).union(
set(committed.custom_resources))
custom_resources = {
k: committed.get(k) - resources.get_res_total(k)
for k in all_keys
}
self._committed_resources = Resources(
committed.cpu - resources.cpu_total(),
committed.gpu - resources.gpu_total(),
custom_resources=custom_resources)
assert self._committed_resources.is_nonnegative(), (
"Resource invalid: {}".format(resources))
def _update_avail_resources(self, num_retries=5):
resources = None
for i in range(num_retries):
try:
resources = ray.cluster_resources()
except Exception:
# TODO(rliaw): Remove this when local mode is fixed.
# https://github.com/ray-project/ray/issues/4147
logger.debug("Using resources for local machine.")
resources = ResourceSpec().resolve(True).to_resource_dict()
if not resources:
logger.warning(
"Cluster resources not detected or are 0. Retrying...")
time.sleep(0.5)
if not resources:
# NOTE: This hides the possibility that Ray may be waiting for
# clients to connect.
resources.setdefault("CPU", 0)
resources.setdefault("GPU", 0)
logger.warning("Cluster resources cannot be detected or are 0. "
"You can resume this experiment by passing in "
"`resume=True` to `run`.")
resources = resources.copy()
num_cpus = resources.pop("CPU", 0)
num_gpus = resources.pop("GPU", 0)
memory = ray_constants.from_memory_units(resources.pop("memory", 0))
object_store_memory = ray_constants.from_memory_units(
resources.pop("object_store_memory", 0))
custom_resources = resources
self._avail_resources = Resources(
int(num_cpus),
int(num_gpus),
memory=int(memory),
object_store_memory=int(object_store_memory),
custom_resources=custom_resources)
self._last_resource_refresh = time.time()
self._resources_initialized = True
def has_resources(self, resources):
"""Returns whether this runner has at least the specified resources.
This refreshes the Ray cluster resources if the time since last update
has exceeded self._refresh_period. This also assumes that the
cluster is not resizing very frequently.
"""
if time.time() - self._last_resource_refresh > self._refresh_period:
self._update_avail_resources()
currently_available = Resources.subtract(self._avail_resources,
self._committed_resources)
have_space = (
resources.cpu_total() <= currently_available.cpu
and resources.gpu_total() <= currently_available.gpu
and resources.memory_total() <= currently_available.memory
and resources.object_store_memory_total() <=
currently_available.object_store_memory and all(
resources.get_res_total(res) <= currently_available.get(res)
for res in resources.custom_resources))
if have_space:
# The assumption right now is that we block all trials if one
# trial is queued.
self._trial_queued = False
return True
can_overcommit = self._queue_trials and not self._trial_queued
if can_overcommit:
self._trial_queued = True
logger.warning(
"Allowing trial to start even though the "
"cluster does not have enough free resources. Trial actors "
"may appear to hang until enough resources are added to the "
"cluster (e.g., via autoscaling). You can disable this "
"behavior by specifying `queue_trials=False` in "
"ray.tune.run().")
return True
return False
def debug_string(self):
"""Returns a human readable message for printing to the console."""
if self._resources_initialized:
status = ("Resources requested: {}/{} CPUs, {}/{} GPUs, "
"{}/{} GiB heap, {}/{} GiB objects".format(
self._committed_resources.cpu,
self._avail_resources.cpu,
self._committed_resources.gpu,
self._avail_resources.gpu,
_to_gb(self._committed_resources.memory),
_to_gb(self._avail_resources.memory),
_to_gb(
self._committed_resources.object_store_memory),
_to_gb(self._avail_resources.object_store_memory)))
customs = ", ".join([
"{}/{} {}".format(
self._committed_resources.get_res_total(name),
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources
if not name.startswith(ray.resource_spec.NODE_ID_PREFIX)
])
if customs:
status += " ({})".format(customs)
return status
else:
return "Resources requested: ?"
def resource_string(self):
"""Returns a string describing the total resources available."""
if self._resources_initialized:
res_str = ("{} CPUs, {} GPUs, "
"{} GiB heap, {} GiB objects".format(
self._avail_resources.cpu,
self._avail_resources.gpu,
_to_gb(self._avail_resources.memory),
_to_gb(self._avail_resources.object_store_memory)))
if self._avail_resources.custom_resources:
custom = ", ".join(
"{} {}".format(
self._avail_resources.get_res_total(name), name)
for name in self._avail_resources.custom_resources)
res_str += " ({})".format(custom)
return res_str
else:
return "? CPUs, ? GPUs"
def on_step_begin(self, trial_runner):
"""Before step() called, update the available resources."""
self._update_avail_resources()
def save(self, trial, storage=Checkpoint.PERSISTENT, result=None):
"""Saves the trial's state to a checkpoint.
Args:
trial (Trial): The state of this trial to be saved.
storage (str): Where to store the checkpoint. Defaults to
PERSISTENT.
result (dict): The state of this trial as a dictionary to be saved.
If result is None, the trial's last result will be used.
Returns:
Checkpoint future, or None if an Exception occurs.
"""
result = result or trial.last_result
with self._change_working_directory(trial):
if storage == Checkpoint.MEMORY:
value = trial.runner.save_to_object.remote()
checkpoint = Checkpoint(storage, value, result)
else:
with warn_if_slow("save_checkpoint_to_storage"):
# TODO(ujvl): Make this asynchronous.
value = ray.get(trial.runner.save.remote())
checkpoint = Checkpoint(storage, value, result)
with warn_if_slow("on_checkpoint", DEFAULT_GET_TIMEOUT) as profile:
try:
trial.on_checkpoint(checkpoint)
except Exception:
logger.exception("Trial %s: Error handling checkpoint %s",
trial, checkpoint.value)
return None
if profile.too_slow and trial.sync_on_checkpoint:
logger.warning(
"Consider turning off forced head-worker trial checkpoint "
"syncs by setting sync_on_checkpoint=False. Note that this "
"might result in faulty trial restoration for some worker "
"failure modes.")
return checkpoint.value
def restore(self, trial, checkpoint=None):
"""Restores training state from a given model checkpoint.
Raises:
RuntimeError: This error is raised if no runner is found.
AbortTrialExecution: This error is raised if the trial is
ineligible for restoration, given the Tune input arguments.
"""
if checkpoint is None or checkpoint.value is None:
checkpoint = trial.checkpoint
if checkpoint.value is None:
return
if trial.runner is None:
raise RuntimeError(
"Trial {}: Unable to restore - no runner found.".format(trial))
value = checkpoint.value
if checkpoint.storage == Checkpoint.MEMORY:
logger.debug("Trial %s: Attempting restore from object", trial)
# Note that we don't store the remote since in-memory checkpoints
# don't guarantee fault tolerance and don't need to be waited on.
with self._change_working_directory(trial):
trial.runner.restore_from_object.remote(value)
else:
logger.debug("Trial %s: Attempting restore from %s", trial, value)
if issubclass(trial.get_trainable_cls(), DurableTrainable):
with self._change_working_directory(trial):
remote = trial.runner.restore.remote(value)
elif trial.sync_on_checkpoint:
# This provides FT backwards compatibility in the
# case where a DurableTrainable is not provided.
logger.warning("Trial %s: Reading checkpoint into memory.",
trial)
data_dict = TrainableUtil.pickle_checkpoint(value)
with self._change_working_directory(trial):
remote = trial.runner.restore_from_object.remote(data_dict)
else:
raise AbortTrialExecution(
"Pass in `sync_on_checkpoint=True` for driver-based trial"
"restoration. Pass in an `upload_dir` and a Trainable "
"extending `DurableTrainable` for remote storage-based "
"restoration")
self._running[remote] = trial
trial.restoring_from = checkpoint
def export_trial_if_needed(self, trial):
"""Exports model of this trial based on trial.export_formats.
Return:
A dict that maps ExportFormats to successfully exported models.
"""
if trial.export_formats and len(trial.export_formats) > 0:
with self._change_working_directory(trial):
return ray.get(
trial.runner.export_model.remote(trial.export_formats),
DEFAULT_GET_TIMEOUT)
return {}
def has_gpus(self):
if self._resources_initialized:
self._update_avail_resources()
return self._avail_resources.gpu > 0
@contextmanager
def _change_working_directory(self, trial):
"""Context manager changing working directory to trial logdir.
Used in local mode.
For non-local mode it is no-op.
"""
if ray.worker._mode() == ray.worker.LOCAL_MODE:
old_dir = os.getcwd()
try:
os.chdir(trial.logdir)
yield
finally:
os.chdir(old_dir)
else:
yield
def _to_gb(n_bytes):
return round(n_bytes / (1024**3), 2)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/registry.py
|
Python
|
import logging
from types import FunctionType
import ray
import ray.cloudpickle as pickle
from ray.experimental.internal_kv import _internal_kv_initialized, \
_internal_kv_get, _internal_kv_put
TRAINABLE_CLASS = "trainable_class"
ENV_CREATOR = "env_creator"
RLLIB_MODEL = "rllib_model"
RLLIB_PREPROCESSOR = "rllib_preprocessor"
RLLIB_ACTION_DIST = "rllib_action_dist"
KNOWN_CATEGORIES = [
TRAINABLE_CLASS, ENV_CREATOR, RLLIB_MODEL, RLLIB_PREPROCESSOR,
RLLIB_ACTION_DIST
]
logger = logging.getLogger(__name__)
def has_trainable(trainable_name):
return _global_registry.contains(TRAINABLE_CLASS, trainable_name)
def get_trainable_cls(trainable_name):
validate_trainable(trainable_name)
return _global_registry.get(TRAINABLE_CLASS, trainable_name)
def validate_trainable(trainable_name):
if not has_trainable(trainable_name):
# Make sure rllib agents are registered
from ray import rllib # noqa: F401
from ray.tune.error import TuneError
if not has_trainable(trainable_name):
raise TuneError("Unknown trainable: " + trainable_name)
def register_trainable(name, trainable):
"""Register a trainable function or class.
Args:
name (str): Name to register.
trainable (obj): Function or tune.Trainable class. Functions must
take (config, status_reporter) as arguments and will be
automatically converted into a class during registration.
"""
from ray.tune.trainable import Trainable
from ray.tune.function_runner import wrap_function
if isinstance(trainable, type):
logger.debug("Detected class for trainable.")
elif isinstance(trainable, FunctionType):
logger.debug("Detected function for trainable.")
trainable = wrap_function(trainable)
elif callable(trainable):
logger.warning(
"Detected unknown callable for trainable. Converting to class.")
trainable = wrap_function(trainable)
if not issubclass(trainable, Trainable):
raise TypeError("Second argument must be convertable to Trainable",
trainable)
_global_registry.register(TRAINABLE_CLASS, name, trainable)
def register_env(name, env_creator):
"""Register a custom environment for use with RLlib.
Args:
name (str): Name to register.
env_creator (obj): Function that creates an env.
"""
if not isinstance(env_creator, FunctionType):
raise TypeError("Second argument must be a function.", env_creator)
_global_registry.register(ENV_CREATOR, name, env_creator)
def _make_key(category, key):
"""Generate a binary key for the given category and key.
Args:
category (str): The category of the item
key (str): The unique identifier for the item
Returns:
The key to use for storing a the value.
"""
return (b"TuneRegistry:" + category.encode("ascii") + b"/" +
key.encode("ascii"))
class _Registry:
def __init__(self):
self._to_flush = {}
def register(self, category, key, value):
if category not in KNOWN_CATEGORIES:
from ray.tune import TuneError
raise TuneError("Unknown category {} not among {}".format(
category, KNOWN_CATEGORIES))
self._to_flush[(category, key)] = pickle.dumps(value)
if _internal_kv_initialized():
self.flush_values()
def contains(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
return value is not None
else:
return (category, key) in self._to_flush
def get(self, category, key):
if _internal_kv_initialized():
value = _internal_kv_get(_make_key(category, key))
if value is None:
raise ValueError(
"Registry value for {}/{} doesn't exist.".format(
category, key))
return pickle.loads(value)
else:
return pickle.loads(self._to_flush[(category, key)])
def flush_values(self):
for (category, key), value in self._to_flush.items():
_internal_kv_put(_make_key(category, key), value, overwrite=True)
self._to_flush.clear()
_global_registry = _Registry()
ray.worker._post_init_hooks.append(_global_registry.flush_values)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/resources.py
|
Python
|
from collections import namedtuple
import logging
import json
from numbers import Number
# For compatibility under py2 to consider unicode as str
from six import string_types
import ray
from ray.tune import TuneError
logger = logging.getLogger(__name__)
class Resources(
namedtuple("Resources", [
"cpu", "gpu", "memory", "object_store_memory", "extra_cpu",
"extra_gpu", "extra_memory", "extra_object_store_memory",
"custom_resources", "extra_custom_resources"
])):
"""Ray resources required to schedule a trial.
Attributes:
cpu (float): Number of CPUs to allocate to the trial.
gpu (float): Number of GPUs to allocate to the trial.
memory (float): Memory to reserve for the trial.
object_store_memory (float): Object store memory to reserve.
extra_cpu (float): Extra CPUs to reserve in case the trial needs to
launch additional Ray actors that use CPUs.
extra_gpu (float): Extra GPUs to reserve in case the trial needs to
launch additional Ray actors that use GPUs.
extra_memory (float): Memory to reserve for the trial launching
additional Ray actors that use memory.
extra_object_store_memory (float): Object store memory to reserve for
the trial launching additional Ray actors that use object store
memory.
custom_resources (dict): Mapping of resource to quantity to allocate
to the trial.
extra_custom_resources (dict): Extra custom resources to reserve in
case the trial needs to launch additional Ray actors that use
any of these custom resources.
"""
__slots__ = ()
def __new__(cls,
cpu,
gpu,
memory=0,
object_store_memory=0,
extra_cpu=0,
extra_gpu=0,
extra_memory=0,
extra_object_store_memory=0,
custom_resources=None,
extra_custom_resources=None):
custom_resources = custom_resources or {}
extra_custom_resources = extra_custom_resources or {}
leftovers = set(custom_resources) ^ set(extra_custom_resources)
for value in leftovers:
custom_resources.setdefault(value, 0)
extra_custom_resources.setdefault(value, 0)
cpu = round(cpu, 2)
gpu = round(gpu, 2)
memory = round(memory, 2)
object_store_memory = round(object_store_memory, 2)
extra_cpu = round(extra_cpu, 2)
extra_gpu = round(extra_gpu, 2)
extra_memory = round(extra_memory, 2)
extra_object_store_memory = round(extra_object_store_memory, 2)
custom_resources = {
resource: round(value, 2)
for resource, value in custom_resources.items()
}
extra_custom_resources = {
resource: round(value, 2)
for resource, value in extra_custom_resources.items()
}
all_values = [
cpu, gpu, memory, object_store_memory, extra_cpu, extra_gpu,
extra_memory, extra_object_store_memory
]
all_values += list(custom_resources.values())
all_values += list(extra_custom_resources.values())
assert len(custom_resources) == len(extra_custom_resources)
for entry in all_values:
assert isinstance(entry, Number), ("Improper resource value.",
entry)
return super(Resources, cls).__new__(
cls, cpu, gpu, memory, object_store_memory, extra_cpu, extra_gpu,
extra_memory, extra_object_store_memory, custom_resources,
extra_custom_resources)
def summary_string(self):
summary = "{} CPUs, {} GPUs".format(self.cpu + self.extra_cpu,
self.gpu + self.extra_gpu)
if self.memory or self.extra_memory:
summary += ", {} GiB heap".format(
round((self.memory + self.extra_memory) / (1024**3), 2))
if self.object_store_memory or self.extra_object_store_memory:
summary += ", {} GiB objects".format(
round(
(self.object_store_memory + self.extra_object_store_memory)
/ (1024**3), 2))
custom_summary = ", ".join([
"{} {}".format(self.get_res_total(res), res)
for res in self.custom_resources
if not res.startswith(ray.resource_spec.NODE_ID_PREFIX)
])
if custom_summary:
summary += " ({})".format(custom_summary)
return summary
def cpu_total(self):
return self.cpu + self.extra_cpu
def gpu_total(self):
return self.gpu + self.extra_gpu
def memory_total(self):
return self.memory + self.extra_memory
def object_store_memory_total(self):
return self.object_store_memory + self.extra_object_store_memory
def get_res_total(self, key):
return self.custom_resources.get(
key, 0) + self.extra_custom_resources.get(key, 0)
def get(self, key):
return self.custom_resources.get(key, 0)
def is_nonnegative(self):
all_values = [self.cpu, self.gpu, self.extra_cpu, self.extra_gpu]
all_values += list(self.custom_resources.values())
all_values += list(self.extra_custom_resources.values())
return all(v >= 0 for v in all_values)
@classmethod
def subtract(cls, original, to_remove):
cpu = original.cpu - to_remove.cpu
gpu = original.gpu - to_remove.gpu
memory = original.memory - to_remove.memory
object_store_memory = (
original.object_store_memory - to_remove.object_store_memory)
extra_cpu = original.extra_cpu - to_remove.extra_cpu
extra_gpu = original.extra_gpu - to_remove.extra_gpu
extra_memory = original.extra_memory - to_remove.extra_memory
extra_object_store_memory = (original.extra_object_store_memory -
to_remove.extra_object_store_memory)
all_resources = set(original.custom_resources).union(
set(to_remove.custom_resources))
new_custom_res = {
k: original.custom_resources.get(k, 0) -
to_remove.custom_resources.get(k, 0)
for k in all_resources
}
extra_custom_res = {
k: original.extra_custom_resources.get(k, 0) -
to_remove.extra_custom_resources.get(k, 0)
for k in all_resources
}
return Resources(cpu, gpu, memory, object_store_memory, extra_cpu,
extra_gpu, extra_memory, extra_object_store_memory,
new_custom_res, extra_custom_res)
def to_json(self):
return resources_to_json(self)
def json_to_resources(data):
if data is None or data == "null":
return None
if isinstance(data, string_types):
data = json.loads(data)
for k in data:
if k in ["driver_cpu_limit", "driver_gpu_limit"]:
raise TuneError(
"The field `{}` is no longer supported. Use `extra_cpu` "
"or `extra_gpu` instead.".format(k))
if k not in Resources._fields:
raise ValueError(
"Unknown resource field {}, must be one of {}".format(
k, Resources._fields))
return Resources(
data.get("cpu", 1), data.get("gpu", 0), data.get("memory", 0),
data.get("object_store_memory", 0), data.get("extra_cpu", 0),
data.get("extra_gpu", 0), data.get("extra_memory", 0),
data.get("extra_object_store_memory", 0), data.get("custom_resources"),
data.get("extra_custom_resources"))
def resources_to_json(resources):
if resources is None:
return None
return {
"cpu": resources.cpu,
"gpu": resources.gpu,
"memory": resources.memory,
"object_store_memory": resources.object_store_memory,
"extra_cpu": resources.extra_cpu,
"extra_gpu": resources.extra_gpu,
"extra_memory": resources.extra_memory,
"extra_object_store_memory": resources.extra_object_store_memory,
"custom_resources": resources.custom_resources.copy(),
"extra_custom_resources": resources.extra_custom_resources.copy()
}
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/result.py
|
Python
|
import os
# yapf: disable
# __sphinx_doc_begin__
# (Optional/Auto-filled) training is terminated. Filled only if not provided.
DONE = "done"
# (Optional) Enum for user controlled checkpoint
SHOULD_CHECKPOINT = "should_checkpoint"
# (Auto-filled) The hostname of the machine hosting the training process.
HOSTNAME = "hostname"
# (Auto-filled) The auto-assigned id of the trial.
TRIAL_ID = "trial_id"
# (Auto-filled) The auto-assigned id of the trial.
EXPERIMENT_TAG = "experiment_tag"
# (Auto-filled) The node ip of the machine hosting the training process.
NODE_IP = "node_ip"
# (Auto-filled) The pid of the training process.
PID = "pid"
# (Optional) Mean reward for current training iteration
EPISODE_REWARD_MEAN = "episode_reward_mean"
# (Optional) Mean loss for training iteration
MEAN_LOSS = "mean_loss"
# (Optional) Mean accuracy for training iteration
MEAN_ACCURACY = "mean_accuracy"
# Number of episodes in this iteration.
EPISODES_THIS_ITER = "episodes_this_iter"
# (Optional/Auto-filled) Accumulated number of episodes for this trial.
EPISODES_TOTAL = "episodes_total"
# Number of timesteps in this iteration.
TIMESTEPS_THIS_ITER = "timesteps_this_iter"
# (Auto-filled) Accumulated number of timesteps for this entire trial.
TIMESTEPS_TOTAL = "timesteps_total"
# (Auto-filled) Time in seconds this iteration took to run.
# This may be overriden to override the system-computed time difference.
TIME_THIS_ITER_S = "time_this_iter_s"
# (Auto-filled) Accumulated time in seconds for this entire trial.
TIME_TOTAL_S = "time_total_s"
# (Auto-filled) The index of this training iteration.
TRAINING_ITERATION = "training_iteration"
# __sphinx_doc_end__
# yapf: enable
DEFAULT_EXPERIMENT_INFO_KEYS = ("trainable_name", EXPERIMENT_TAG, TRIAL_ID)
DEFAULT_RESULT_KEYS = (TRAINING_ITERATION, TIME_TOTAL_S, TIMESTEPS_TOTAL,
MEAN_ACCURACY, MEAN_LOSS)
# __duplicate__ is a magic keyword used internally to
# avoid double-logging results when using the Function API.
RESULT_DUPLICATE = "__duplicate__"
# Where Tune writes result files by default
DEFAULT_RESULTS_DIR = (os.environ.get("TEST_TMPDIR")
or os.environ.get("TUNE_RESULT_DIR")
or os.path.expanduser("~/ray_results"))
# Meta file about status under each experiment directory, can be
# parsed by automlboard if exists.
JOB_META_FILE = "job_status.json"
# Meta file about status under each trial directory, can be parsed
# by automlboard if exists.
EXPR_META_FILE = "trial_status.json"
# File that stores parameters of the trial.
EXPR_PARAM_FILE = "params.json"
# Pickle File that stores parameters of the trial.
EXPR_PARAM_PICKLE_FILE = "params.pkl"
# File that stores the progress of the trial.
EXPR_PROGRESS_FILE = "progress.csv"
# File that stores results of the trial.
EXPR_RESULT_FILE = "result.json"
# Config prefix when using Analysis.
CONFIG_PREFIX = "config/"
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/sample.py
|
Python
|
import logging
import numpy as np
logger = logging.getLogger(__name__)
class sample_from:
"""Specify that tune should sample configuration values from this function.
Arguments:
func: An callable function to draw a sample from.
"""
def __init__(self, func):
self.func = func
def __str__(self):
return "tune.sample_from({})".format(str(self.func))
def __repr__(self):
return "tune.sample_from({})".format(repr(self.func))
def function(func):
logger.warning(
"DeprecationWarning: wrapping {} with tune.function() is no "
"longer needed".format(func))
return func
def uniform(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.uniform``.
``tune.uniform(1, 10)`` is equivalent to
``tune.sample_from(lambda _: np.random.uniform(1, 10))``
"""
return sample_from(lambda _: np.random.uniform(*args, **kwargs))
def loguniform(min_bound, max_bound, base=10):
"""Sugar for sampling in different orders of magnitude.
Args:
min_bound (float): Lower boundary of the output interval (1e-4)
max_bound (float): Upper boundary of the output interval (1e-2)
base (float): Base of the log. Defaults to 10.
"""
logmin = np.log(min_bound) / np.log(base)
logmax = np.log(max_bound) / np.log(base)
def apply_log(_):
return base**(np.random.uniform(logmin, logmax))
return sample_from(apply_log)
def choice(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.choice``.
``tune.choice(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.choice(10))``
"""
return sample_from(lambda _: np.random.choice(*args, **kwargs))
def randint(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.randint``.
``tune.randint(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.randint(10))``
"""
return sample_from(lambda _: np.random.randint(*args, **kwargs))
def randn(*args, **kwargs):
"""Wraps tune.sample_from around ``np.random.randn``.
``tune.randn(10)`` is equivalent to
``tune.sample_from(lambda _: np.random.randn(10))``
"""
return sample_from(lambda _: np.random.randn(*args, **kwargs))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/__init__.py
|
Python
|
from ray.tune.schedulers.trial_scheduler import TrialScheduler, FIFOScheduler
from ray.tune.schedulers.hyperband import HyperBandScheduler
from ray.tune.schedulers.hb_bohb import HyperBandForBOHB
from ray.tune.schedulers.async_hyperband import (AsyncHyperBandScheduler,
ASHAScheduler)
from ray.tune.schedulers.median_stopping_rule import MedianStoppingRule
from ray.tune.schedulers.pbt import PopulationBasedTraining
__all__ = [
"TrialScheduler", "HyperBandScheduler", "AsyncHyperBandScheduler",
"ASHAScheduler", "MedianStoppingRule", "FIFOScheduler",
"PopulationBasedTraining", "HyperBandForBOHB"
]
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/async_hyperband.py
|
Python
|
import logging
import numpy as np
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
logger = logging.getLogger(__name__)
class AsyncHyperBandScheduler(FIFOScheduler):
"""Implements the Async Successive Halving.
This should provide similar theoretical performance as HyperBand but
avoid straggler issues that HyperBand faces. One implementation detail
is when using multiple brackets, trial allocation to bracket is done
randomly with over a softmax probability.
See https://arxiv.org/abs/1810.05934
Args:
time_attr (str): A training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (float): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
grace_period (float): Only stop trials at least this old in time.
The units are the same as the attribute named by `time_attr`.
reduction_factor (float): Used to set halving rate and amount. This
is simply a unit-less scalar.
brackets (int): Number of brackets. Each bracket has a different
halving rate, specified by the reduction factor.
"""
def __init__(self,
time_attr="training_iteration",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
max_t=100,
grace_period=1,
reduction_factor=4,
brackets=1):
assert max_t > 0, "Max (time_attr) not valid!"
assert max_t >= grace_period, "grace_period must be <= max_t!"
assert grace_period > 0, "grace_period must be positive!"
assert reduction_factor > 1, "Reduction Factor not valid!"
assert brackets > 0, "brackets must be positive!"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._reduction_factor = reduction_factor
self._max_t = max_t
self._trial_info = {} # Stores Trial -> Bracket
# Tracks state for new trial add
self._brackets = [
_Bracket(grace_period, max_t, reduction_factor, s)
for s in range(brackets)
]
self._counter = 0 # for
self._num_stopped = 0
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
def on_trial_add(self, trial_runner, trial):
sizes = np.array([len(b._rungs) for b in self._brackets])
probs = np.e**(sizes - sizes.max())
normalized = probs / probs.sum()
idx = np.random.choice(len(self._brackets), p=normalized)
self._trial_info[trial.trial_id] = self._brackets[idx]
def on_trial_result(self, trial_runner, trial, result):
action = TrialScheduler.CONTINUE
if self._time_attr not in result or self._metric not in result:
return action
if result[self._time_attr] >= self._max_t:
action = TrialScheduler.STOP
else:
bracket = self._trial_info[trial.trial_id]
action = bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
if action == TrialScheduler.STOP:
self._num_stopped += 1
return action
def on_trial_complete(self, trial_runner, trial, result):
if self._time_attr not in result or self._metric not in result:
return
bracket = self._trial_info[trial.trial_id]
bracket.on_result(trial, result[self._time_attr],
self._metric_op * result[self._metric])
del self._trial_info[trial.trial_id]
def on_trial_remove(self, trial_runner, trial):
del self._trial_info[trial.trial_id]
def debug_string(self):
out = "Using AsyncHyperBand: num_stopped={}".format(self._num_stopped)
out += "\n" + "\n".join([b.debug_str() for b in self._brackets])
return out
class _Bracket():
"""Bookkeeping system to track the cutoffs.
Rungs are created in reversed order so that we can more easily find
the correct rung corresponding to the current iteration of the result.
Example:
>>> b = _Bracket(1, 10, 2, 3)
>>> b.on_result(trial1, 1, 2) # CONTINUE
>>> b.on_result(trial2, 1, 4) # CONTINUE
>>> b.cutoff(b._rungs[-1][1]) == 3.0 # rungs are reversed
>>> b.on_result(trial3, 1, 1) # STOP
>>> b.cutoff(b._rungs[0][1]) == 2.0
"""
def __init__(self, min_t, max_t, reduction_factor, s):
self.rf = reduction_factor
MAX_RUNGS = int(np.log(max_t / min_t) / np.log(self.rf) - s + 1)
self._rungs = [(min_t * self.rf**(k + s), {})
for k in reversed(range(MAX_RUNGS))]
def cutoff(self, recorded):
if not recorded:
return None
return np.percentile(list(recorded.values()), (1 - 1 / self.rf) * 100)
def on_result(self, trial, cur_iter, cur_rew):
action = TrialScheduler.CONTINUE
for milestone, recorded in self._rungs:
if cur_iter < milestone or trial.trial_id in recorded:
continue
else:
cutoff = self.cutoff(recorded)
if cutoff is not None and cur_rew < cutoff:
action = TrialScheduler.STOP
if cur_rew is None:
logger.warning("Reward attribute is None! Consider"
" reporting using a different field.")
else:
recorded[trial.trial_id] = cur_rew
break
return action
def debug_str(self):
iters = " | ".join([
"Iter {:.3f}: {}".format(milestone, self.cutoff(recorded))
for milestone, recorded in self._rungs
])
return "Bracket: " + iters
ASHAScheduler = AsyncHyperBandScheduler
if __name__ == "__main__":
sched = AsyncHyperBandScheduler(
grace_period=1, max_t=10, reduction_factor=2)
print(sched.debug_string())
bracket = sched._brackets[0]
print(bracket.cutoff({str(i): i for i in range(20)}))
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/hb_bohb.py
|
Python
|
import logging
from ray.tune.schedulers.trial_scheduler import TrialScheduler
from ray.tune.schedulers.hyperband import HyperBandScheduler, Bracket
from ray.tune.trial import Trial
logger = logging.getLogger(__name__)
class HyperBandForBOHB(HyperBandScheduler):
"""Extends HyperBand early stopping algorithm for BOHB.
This implementation removes the ``HyperBandScheduler`` pipelining. This
class introduces key changes:
1. Trials are now placed so that the bracket with the largest size is
filled first.
2. Trials will be paused even if the bracket is not filled. This allows
BOHB to insert new trials into the training.
See ray.tune.schedulers.HyperBandScheduler for parameter docstring.
"""
def on_trial_add(self, trial_runner, trial):
"""Adds new trial.
On a new trial add, if current bracket is not filled, add to current
bracket. Else, if current band is not filled, create new bracket, add
to current bracket. Else, create new iteration, create new bracket,
add to bracket.
"""
cur_bracket = self._state["bracket"]
cur_band = self._hyperbands[self._state["band_idx"]]
if cur_bracket is None or cur_bracket.filled():
retry = True
while retry:
# if current iteration is filled, create new iteration
if self._cur_band_filled():
cur_band = []
self._hyperbands.append(cur_band)
self._state["band_idx"] += 1
# MAIN CHANGE HERE - largest bracket first!
# cur_band will always be less than s_max_1 or else filled
s = self._s_max_1 - len(cur_band) - 1
assert s >= 0, "Current band is filled!"
if self._get_r0(s) == 0:
logger.debug("BOHB: Bracket too small - Retrying...")
cur_bracket = None
else:
retry = False
cur_bracket = Bracket(self._time_attr, self._get_n0(s),
self._get_r0(s), self._max_t_attr,
self._eta, s)
cur_band.append(cur_bracket)
self._state["bracket"] = cur_bracket
self._state["bracket"].add_trial(trial)
self._trial_info[trial] = cur_bracket, self._state["band_idx"]
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
result["hyperband_info"] = {}
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
result["hyperband_info"]["budget"] = bracket._cumul_r
# MAIN CHANGE HERE!
statuses = [(t, t.status) for t in bracket._live_trials]
if not bracket.filled() or any(status != Trial.PAUSED
for t, status in statuses
if t is not trial):
trial_runner._search_alg.on_pause(trial.trial_id)
return TrialScheduler.PAUSE
action = self._process_bracket(trial_runner, bracket)
return action
def _unpause_trial(self, trial_runner, trial):
trial_runner.trial_executor.unpause_trial(trial)
trial_runner._search_alg.on_unpause(trial.trial_id)
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in scrubbed:
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
# MAIN CHANGE HERE!
if not any(t.status == Trial.RUNNING
for t in trial_runner.get_trials()):
for hyperband in self._hyperbands:
for bracket in hyperband:
if bracket and any(trial.status == Trial.PAUSED
for trial in bracket.current_trials()):
# This will change the trial state and let the
# trial runner retry.
self._process_bracket(trial_runner, bracket)
# MAIN CHANGE HERE!
return None
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/hyperband.py
|
Python
|
import collections
import numpy as np
import logging
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
from ray.tune.trial import Trial
from ray.tune.error import TuneError
logger = logging.getLogger(__name__)
# Implementation notes:
# This implementation contains 3 logical levels.
# Each HyperBand iteration is a "band". There can be multiple
# bands running at once, and there can be 1 band that is incomplete.
#
# In each band, there are at most `s` + 1 brackets.
# `s` is a value determined by given parameters, and assigned on
# a cyclic basis.
#
# In each bracket, there are at most `n(s)` trials, indicating that
# `n` is a function of `s`. These trials go through a series of
# halving procedures, dropping lowest performers. Multiple
# brackets are running at once.
#
# Trials added will be inserted into the most recent bracket
# and band and will spill over to new brackets/bands accordingly.
#
# This maintains the bracket size and max trial count per band
# to 5 and 117 respectively, which correspond to that of
# `max_attr=81, eta=3` from the blog post. Trials will fill up
# from smallest bracket to largest, with largest
# having the most rounds of successive halving.
class HyperBandScheduler(FIFOScheduler):
"""Implements the HyperBand early stopping algorithm.
HyperBandScheduler early stops trials using the HyperBand optimization
algorithm. It divides trials into brackets of varying sizes, and
periodically early stops low-performing trials within each bracket.
To use this implementation of HyperBand with Tune, all you need
to do is specify the max length of time a trial can run `max_t`, the time
units `time_attr`, the name of the reported objective value `metric`,
and if `metric` is to be maximized or minimized (`mode`).
We automatically determine reasonable values for the other
HyperBand parameters based on the given values.
For example, to limit trials to 10 minutes and early stop based on the
`episode_mean_reward` attr, construct:
``HyperBand('time_total_s', 'episode_reward_mean', max_t=600)``
Note that Tune's stopping criteria will be applied in conjunction with
HyperBand's early stopping mechanisms.
See also: https://people.eecs.berkeley.edu/~kjamieson/hyperband.html
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
max_t (int): max time units per trial. Trials will be stopped after
max_t time units (determined by time_attr) have passed.
The scheduler will terminate trials after this time has passed.
Note that this is different from the semantics of `max_t` as
mentioned in the original HyperBand paper.
reduction_factor (float): Same as `eta`. Determines how sharp
the difference is between bracket space-time allocation ratios.
"""
def __init__(self,
time_attr="training_iteration",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
max_t=81,
reduction_factor=3):
assert max_t > 0, "Max (time_attr) not valid!"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._eta = reduction_factor
self._s_max_1 = int(
np.round(np.log(max_t) / np.log(reduction_factor))) + 1
self._max_t_attr = max_t
# bracket max trials
self._get_n0 = lambda s: int(
np.ceil(self._s_max_1 / (s + 1) * self._eta**s))
# bracket initial iterations
self._get_r0 = lambda s: int((max_t * self._eta**(-s)))
self._hyperbands = [[]] # list of hyperband iterations
self._trial_info = {} # Stores Trial -> Bracket, Band Iteration
# Tracks state for new trial add
self._state = {"bracket": None, "band_idx": 0}
self._num_stopped = 0
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
def on_trial_add(self, trial_runner, trial):
"""Adds new trial.
On a new trial add, if current bracket is not filled,
add to current bracket. Else, if current band is not filled,
create new bracket, add to current bracket.
Else, create new iteration, create new bracket, add to bracket."""
cur_bracket = self._state["bracket"]
cur_band = self._hyperbands[self._state["band_idx"]]
if cur_bracket is None or cur_bracket.filled():
retry = True
while retry:
# if current iteration is filled, create new iteration
if self._cur_band_filled():
cur_band = []
self._hyperbands.append(cur_band)
self._state["band_idx"] += 1
# cur_band will always be less than s_max_1 or else filled
s = len(cur_band)
assert s < self._s_max_1, "Current band is filled!"
if self._get_r0(s) == 0:
logger.info("Bracket too small - Retrying...")
cur_bracket = None
else:
retry = False
cur_bracket = Bracket(self._time_attr, self._get_n0(s),
self._get_r0(s), self._max_t_attr,
self._eta, s)
cur_band.append(cur_bracket)
self._state["bracket"] = cur_bracket
self._state["bracket"].add_trial(trial)
self._trial_info[trial] = cur_bracket, self._state["band_idx"]
def _cur_band_filled(self):
"""Checks if the current band is filled.
The size of the current band should be equal to s_max_1"""
cur_band = self._hyperbands[self._state["band_idx"]]
return len(cur_band) == self._s_max_1
def on_trial_result(self, trial_runner, trial, result):
"""If bracket is finished, all trials will be stopped.
If a given trial finishes and bracket iteration is not done,
the trial will be paused and resources will be given up.
This scheduler will not start trials but will stop trials.
The current running trial will not be handled,
as the trialrunner will be given control to handle it."""
bracket, _ = self._trial_info[trial]
bracket.update_trial_stats(trial, result)
if bracket.continue_trial(trial):
return TrialScheduler.CONTINUE
action = self._process_bracket(trial_runner, bracket)
return action
def _process_bracket(self, trial_runner, bracket):
"""This is called whenever a trial makes progress.
When all live trials in the bracket have no more iterations left,
Trials will be successively halved. If bracket is done, all
non-running trials will be stopped and cleaned up,
and during each halving phase, bad trials will be stopped while good
trials will return to "PENDING"."""
action = TrialScheduler.PAUSE
if bracket.cur_iter_done():
if bracket.finished():
bracket.cleanup_full(trial_runner)
return TrialScheduler.STOP
good, bad = bracket.successive_halving(self._metric,
self._metric_op)
# kill bad trials
self._num_stopped += len(bad)
for t in bad:
if t.status == Trial.PAUSED:
trial_runner.stop_trial(t)
elif t.status == Trial.RUNNING:
bracket.cleanup_trial(t)
action = TrialScheduler.STOP
else:
raise TuneError("Trial with unexpected status encountered")
# ready the good trials - if trial is too far ahead, don't continue
for t in good:
if t.status not in [Trial.PAUSED, Trial.RUNNING]:
raise TuneError("Trial with unexpected status encountered")
if bracket.continue_trial(t):
if t.status == Trial.PAUSED:
self._unpause_trial(trial_runner, t)
elif t.status == Trial.RUNNING:
action = TrialScheduler.CONTINUE
return action
def on_trial_remove(self, trial_runner, trial):
"""Notification when trial terminates.
Trial info is removed from bracket. Triggers halving if bracket is
not finished."""
bracket, _ = self._trial_info[trial]
bracket.cleanup_trial(trial)
if not bracket.finished():
self._process_bracket(trial_runner, bracket)
def on_trial_complete(self, trial_runner, trial, result):
"""Cleans up trial info from bracket if trial completed early."""
self.on_trial_remove(trial_runner, trial)
def on_trial_error(self, trial_runner, trial):
"""Cleans up trial info from bracket if trial errored early."""
self.on_trial_remove(trial_runner, trial)
def choose_trial_to_run(self, trial_runner):
"""Fair scheduling within iteration by completion percentage.
List of trials not used since all trials are tracked as state
of scheduler. If iteration is occupied (ie, no trials to run),
then look into next iteration.
"""
for hyperband in self._hyperbands:
# band will have None entries if no resources
# are to be allocated to that bracket.
scrubbed = [b for b in hyperband if b is not None]
for bracket in sorted(
scrubbed, key=lambda b: b.completion_percentage()):
for trial in bracket.current_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
return None
def debug_string(self):
"""This provides a progress notification for the algorithm.
For each bracket, the algorithm will output a string as follows:
Bracket(Max Size (n)=5, Milestone (r)=33, completed=14.6%):
{PENDING: 2, RUNNING: 3, TERMINATED: 2}
"Max Size" indicates the max number of pending/running experiments
set according to the Hyperband algorithm.
"Milestone" indicates the iterations a trial will run for before
the next halving will occur.
"Completed" indicates an approximate progress metric. Some brackets,
like ones that are unfilled, will not reach 100%.
"""
out = "Using HyperBand: "
out += "num_stopped={} total_brackets={}".format(
self._num_stopped, sum(len(band) for band in self._hyperbands))
for i, band in enumerate(self._hyperbands):
out += "\nRound #{}:".format(i)
for bracket in band:
out += "\n {}".format(bracket)
return out
def state(self):
return {
"num_brackets": sum(len(band) for band in self._hyperbands),
"num_stopped": self._num_stopped
}
def _unpause_trial(self, trial_runner, trial):
trial_runner.trial_executor.unpause_trial(trial)
class Bracket:
"""Logical object for tracking Hyperband bracket progress. Keeps track
of proper parameters as designated by HyperBand.
Also keeps track of progress to ensure good scheduling.
"""
def __init__(self, time_attr, max_trials, init_t_attr, max_t_attr, eta, s):
self._live_trials = {} # maps trial -> current result
self._all_trials = []
self._time_attr = time_attr # attribute to
self._n = self._n0 = max_trials
self._r = self._r0 = init_t_attr
self._max_t_attr = max_t_attr
self._cumul_r = self._r0
self._eta = eta
self._halves = s
self._total_work = self._calculate_total_work(self._n0, self._r0, s)
self._completed_progress = 0
def add_trial(self, trial):
"""Add trial to bracket assuming bracket is not filled.
At a later iteration, a newly added trial will be given equal
opportunity to catch up."""
assert not self.filled(), "Cannot add trial to filled bracket!"
self._live_trials[trial] = None
self._all_trials.append(trial)
def cur_iter_done(self):
"""Checks if all iterations have completed.
TODO(rliaw): also check that `t.iterations == self._r`"""
return all(
self._get_result_time(result) >= self._cumul_r
for result in self._live_trials.values())
def finished(self):
return self._halves == 0 and self.cur_iter_done()
def current_trials(self):
return list(self._live_trials)
def continue_trial(self, trial):
result = self._live_trials[trial]
if self._get_result_time(result) < self._cumul_r:
return True
else:
return False
def filled(self):
"""Checks if bracket is filled.
Only let new trials be added at current level minimizing the need
to backtrack and bookkeep previous medians."""
return len(self._live_trials) == self._n
def successive_halving(self, metric, metric_op):
assert self._halves > 0
self._halves -= 1
self._n /= self._eta
self._n = int(np.ceil(self._n))
self._r *= self._eta
self._r = int(min(self._r, self._max_t_attr - self._cumul_r))
self._cumul_r = self._r
sorted_trials = sorted(
self._live_trials,
key=lambda t: metric_op * self._live_trials[t][metric])
good, bad = sorted_trials[-self._n:], sorted_trials[:-self._n]
return good, bad
def update_trial_stats(self, trial, result):
"""Update result for trial. Called after trial has finished
an iteration - will decrement iteration count.
TODO(rliaw): The other alternative is to keep the trials
in and make sure they're not set as pending later."""
assert trial in self._live_trials
assert self._get_result_time(result) >= 0
delta = self._get_result_time(result) - \
self._get_result_time(self._live_trials[trial])
assert delta >= 0
self._completed_progress += delta
self._live_trials[trial] = result
def cleanup_trial(self, trial):
"""Clean up statistics tracking for terminated trials (either by force
or otherwise).
This may cause bad trials to continue for a long time, in the case
where all the good trials finish early and there are only bad trials
left in a bracket with a large max-iteration."""
assert trial in self._live_trials
del self._live_trials[trial]
def cleanup_full(self, trial_runner):
"""Cleans up bracket after bracket is completely finished.
Lets the last trial continue to run until termination condition
kicks in."""
for trial in self.current_trials():
if (trial.status == Trial.PAUSED):
trial_runner.stop_trial(trial)
def completion_percentage(self):
"""Returns a progress metric.
This will not be always finish with 100 since dead trials
are dropped."""
if self.finished():
return 1.0
return self._completed_progress / self._total_work
def _get_result_time(self, result):
if result is None:
return 0
return result[self._time_attr]
def _calculate_total_work(self, n, r, s):
work = 0
cumulative_r = r
for i in range(s + 1):
work += int(n) * int(r)
n /= self._eta
n = int(np.ceil(n))
r *= self._eta
r = int(min(r, self._max_t_attr - cumulative_r))
return work
def __repr__(self):
status = ", ".join([
"Max Size (n)={}".format(self._n),
"Milestone (r)={}".format(self._cumul_r),
"completed={:.1%}".format(self.completion_percentage())
])
counts = collections.Counter([t.status for t in self._all_trials])
trial_statuses = ", ".join(
sorted("{}: {}".format(k, v) for k, v in counts.items()))
return "Bracket({}): {{{}}} ".format(status, trial_statuses)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/median_stopping_rule.py
|
Python
|
import collections
import logging
import numpy as np
from ray.tune.trial import Trial
from ray.tune.schedulers.trial_scheduler import FIFOScheduler, TrialScheduler
logger = logging.getLogger(__name__)
class MedianStoppingRule(FIFOScheduler):
"""Implements the median stopping rule as described in the Vizier paper:
https://research.google.com/pubs/pub46180.html
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
grace_period (float): Only stop trials at least this old in time.
The mean will only be computed from this time onwards. The units
are the same as the attribute named by `time_attr`.
min_samples_required (int): Minimum number of trials to compute median
over.
min_time_slice (float): Each trial runs at least this long before
yielding (assuming it isn't stopped). Note: trials ONLY yield if
there are not enough samples to evaluate performance for the
current result AND there are other trials waiting to run.
The units are the same as the attribute named by `time_attr`.
hard_stop (bool): If False, pauses trials instead of stopping
them. When all other trials are complete, paused trials will be
resumed and allowed to run FIFO.
"""
def __init__(self,
time_attr="time_total_s",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
grace_period=60.0,
min_samples_required=3,
min_time_slice=0,
hard_stop=True):
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._stopped_trials = set()
self._grace_period = grace_period
self._min_samples_required = min_samples_required
self._min_time_slice = min_time_slice
self._metric = metric
assert mode in {"min", "max"}, "`mode` must be 'min' or 'max'."
self._worst = float("-inf") if mode == "max" else float("inf")
self._compare_op = max if mode == "max" else min
self._time_attr = time_attr
self._hard_stop = hard_stop
self._trial_state = {}
self._last_pause = collections.defaultdict(lambda: float("-inf"))
self._results = collections.defaultdict(list)
def on_trial_result(self, trial_runner, trial, result):
"""Callback for early stopping.
This stopping rule stops a running trial if the trial's best objective
value by step `t` is strictly worse than the median of the running
averages of all completed trials' objectives reported up to step `t`.
"""
if self._time_attr not in result or self._metric not in result:
return TrialScheduler.CONTINUE
if trial in self._stopped_trials:
assert not self._hard_stop
# Fall back to FIFO
return TrialScheduler.CONTINUE
time = result[self._time_attr]
self._results[trial].append(result)
if time < self._grace_period:
return TrialScheduler.CONTINUE
trials = self._trials_beyond_time(time)
trials.remove(trial)
if len(trials) < self._min_samples_required:
action = self._on_insufficient_samples(trial_runner, trial, time)
if action == TrialScheduler.PAUSE:
self._last_pause[trial] = time
action_str = "Yielding time to other trials."
else:
action_str = "Continuing anyways."
logger.debug(
"MedianStoppingRule: insufficient samples={} to evaluate "
"trial {} at t={}. {}".format(
len(trials), trial.trial_id, time, action_str))
return action
median_result = self._median_result(trials, time)
best_result = self._best_result(trial)
logger.debug("Trial {} best res={} vs median res={} at t={}".format(
trial, best_result, median_result, time))
if self._compare_op(median_result, best_result) != best_result:
logger.debug("MedianStoppingRule: early stopping {}".format(trial))
self._stopped_trials.add(trial)
if self._hard_stop:
return TrialScheduler.STOP
else:
return TrialScheduler.PAUSE
else:
return TrialScheduler.CONTINUE
def on_trial_complete(self, trial_runner, trial, result):
self._results[trial].append(result)
def debug_string(self):
return "Using MedianStoppingRule: num_stopped={}.".format(
len(self._stopped_trials))
def _on_insufficient_samples(self, trial_runner, trial, time):
pause = time - self._last_pause[trial] > self._min_time_slice
pause = pause and [
t for t in trial_runner.get_trials()
if t.status in (Trial.PENDING, Trial.PAUSED)
]
return TrialScheduler.PAUSE if pause else TrialScheduler.CONTINUE
def _trials_beyond_time(self, time):
trials = [
trial for trial in self._results
if self._results[trial][-1][self._time_attr] >= time
]
return trials
def _median_result(self, trials, time):
return np.median([self._running_mean(trial, time) for trial in trials])
def _running_mean(self, trial, time):
results = self._results[trial]
# TODO(ekl) we could do interpolation to be more precise, but for now
# assume len(results) is large and the time diffs are roughly equal
scoped_results = [
r for r in results
if self._grace_period <= r[self._time_attr] <= time
]
return np.mean([r[self._metric] for r in scoped_results])
def _best_result(self, trial):
results = self._results[trial]
return self._compare_op([r[self._metric] for r in results])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/pbt.py
|
Python
|
import copy
import itertools
import logging
import json
import math
import os
import random
import shutil
from ray.tune.error import TuneError
from ray.tune.result import TRAINING_ITERATION
from ray.tune.logger import _SafeFallbackEncoder
from ray.tune.schedulers import FIFOScheduler, TrialScheduler
from ray.tune.suggest.variant_generator import format_vars
from ray.tune.trial import Trial, Checkpoint
logger = logging.getLogger(__name__)
class PBTTrialState:
"""Internal PBT state tracked per-trial."""
def __init__(self, trial):
self.orig_tag = trial.experiment_tag
self.last_score = None
self.last_checkpoint = None
self.last_perturbation_time = 0
def __repr__(self):
return str((self.last_score, self.last_checkpoint,
self.last_perturbation_time))
def explore(config, mutations, resample_probability, custom_explore_fn):
"""Return a config perturbed as specified.
Args:
config (dict): Original hyperparameter configuration.
mutations (dict): Specification of mutations to perform as documented
in the PopulationBasedTraining scheduler.
resample_probability (float): Probability of allowing resampling of a
particular variable.
custom_explore_fn (func): Custom explore fn applied after built-in
config perturbations are.
"""
new_config = copy.deepcopy(config)
for key, distribution in mutations.items():
if isinstance(distribution, dict):
new_config.update({
key: explore(config[key], mutations[key], resample_probability,
None)
})
elif isinstance(distribution, list):
if random.random() < resample_probability or \
config[key] not in distribution:
new_config[key] = random.choice(distribution)
elif random.random() > 0.5:
new_config[key] = distribution[max(
0,
distribution.index(config[key]) - 1)]
else:
new_config[key] = distribution[min(
len(distribution) - 1,
distribution.index(config[key]) + 1)]
else:
if random.random() < resample_probability:
new_config[key] = distribution()
elif random.random() > 0.5:
new_config[key] = config[key] * 1.2
else:
new_config[key] = config[key] * 0.8
if type(config[key]) is int:
new_config[key] = int(new_config[key])
if custom_explore_fn:
new_config = custom_explore_fn(new_config)
assert new_config is not None, \
"Custom explore fn failed to return new config"
logger.info("[explore] perturbed config from {} -> {}".format(
config, new_config))
return new_config
def make_experiment_tag(orig_tag, config, mutations):
"""Appends perturbed params to the trial name to show in the console."""
resolved_vars = {}
for k in mutations.keys():
resolved_vars[("config", k)] = config[k]
return "{}@perturbed[{}]".format(orig_tag, format_vars(resolved_vars))
class PopulationBasedTraining(FIFOScheduler):
"""Implements the Population Based Training (PBT) algorithm.
https://deepmind.com/blog/population-based-training-neural-networks
PBT trains a group of models (or agents) in parallel. Periodically, poorly
performing models clone the state of the top performers, and a random
mutation is applied to their hyperparameters in the hopes of
outperforming the current top models.
Unlike other hyperparameter search algorithms, PBT mutates hyperparameters
during training time. This enables very fast hyperparameter discovery and
also automatically discovers good annealing schedules.
This Tune PBT implementation considers all trials added as part of the
PBT population. If the number of trials exceeds the cluster capacity,
they will be time-multiplexed as to balance training progress across the
population. To run multiple trials, use `tune.run(num_samples=<int>)`.
In {LOG_DIR}/{MY_EXPERIMENT_NAME}/, all mutations are logged in
`pbt_global.txt` and individual policy perturbations are recorded
in pbt_policy_{i}.txt. Tune logs: [target trial tag, clone trial tag,
target trial iteration, clone trial iteration, old config, new config]
on each perturbation step.
Args:
time_attr (str): The training result attr to use for comparing time.
Note that you can pass in something non-temporal such as
`training_iteration` as a measure of progress, the only requirement
is that the attribute should increase monotonically.
metric (str): The training result objective value attribute. Stopping
procedures will use this attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
perturbation_interval (float): Models will be considered for
perturbation at this interval of `time_attr`. Note that
perturbation incurs checkpoint overhead, so you shouldn't set this
to be too frequent.
hyperparam_mutations (dict): Hyperparams to mutate. The format is
as follows: for each key, either a list or function can be
provided. A list specifies an allowed set of categorical values.
A function specifies the distribution of a continuous parameter.
You must specify at least one of `hyperparam_mutations` or
`custom_explore_fn`.
quantile_fraction (float): Parameters are transferred from the top
`quantile_fraction` fraction of trials to the bottom
`quantile_fraction` fraction. Needs to be between 0 and 0.5.
Setting it to 0 essentially implies doing no exploitation at all.
resample_probability (float): The probability of resampling from the
original distribution when applying `hyperparam_mutations`. If not
resampled, the value will be perturbed by a factor of 1.2 or 0.8
if continuous, or changed to an adjacent value if discrete.
custom_explore_fn (func): You can also specify a custom exploration
function. This function is invoked as `f(config)` after built-in
perturbations from `hyperparam_mutations` are applied, and should
return `config` updated as needed. You must specify at least one of
`hyperparam_mutations` or `custom_explore_fn`.
log_config (bool): Whether to log the ray config of each model to
local_dir at each exploit. Allows config schedule to be
reconstructed.
Example:
>>> pbt = PopulationBasedTraining(
>>> time_attr="training_iteration",
>>> metric="episode_reward_mean",
>>> mode="max",
>>> perturbation_interval=10, # every 10 `time_attr` units
>>> # (training_iterations in this case)
>>> hyperparam_mutations={
>>> # Perturb factor1 by scaling it by 0.8 or 1.2. Resampling
>>> # resets it to a value sampled from the lambda function.
>>> "factor_1": lambda: random.uniform(0.0, 20.0),
>>> # Perturb factor2 by changing it to an adjacent value, e.g.
>>> # 10 -> 1 or 10 -> 100. Resampling will choose at random.
>>> "factor_2": [1, 10, 100, 1000, 10000],
>>> })
>>> tune.run({...}, num_samples=8, scheduler=pbt)
"""
def __init__(self,
time_attr="time_total_s",
reward_attr=None,
metric="episode_reward_mean",
mode="max",
perturbation_interval=60.0,
hyperparam_mutations={},
quantile_fraction=0.25,
resample_probability=0.25,
custom_explore_fn=None,
log_config=True):
for value in hyperparam_mutations.values():
if not (isinstance(value, list) or callable(value)):
raise TypeError("`hyperparam_mutation` values must be either "
"a List or callable.")
if not hyperparam_mutations and not custom_explore_fn:
raise TuneError(
"You must specify at least one of `hyperparam_mutations` or "
"`custom_explore_fn` to use PBT.")
if quantile_fraction > 0.5 or quantile_fraction < 0:
raise TuneError(
"You must set `quantile_fraction` to a value between 0 and"
"0.5. Current value: '{}'".format(quantile_fraction))
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
FIFOScheduler.__init__(self)
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._time_attr = time_attr
self._perturbation_interval = perturbation_interval
self._hyperparam_mutations = hyperparam_mutations
self._quantile_fraction = quantile_fraction
self._resample_probability = resample_probability
self._trial_state = {}
self._custom_explore_fn = custom_explore_fn
self._log_config = log_config
# Metrics
self._num_checkpoints = 0
self._num_perturbations = 0
def on_trial_add(self, trial_runner, trial):
self._trial_state[trial] = PBTTrialState(trial)
def on_trial_result(self, trial_runner, trial, result):
if self._time_attr not in result or self._metric not in result:
return TrialScheduler.CONTINUE
time = result[self._time_attr]
state = self._trial_state[trial]
if time - state.last_perturbation_time < self._perturbation_interval:
return TrialScheduler.CONTINUE # avoid checkpoint overhead
score = self._metric_op * result[self._metric]
state.last_score = score
state.last_perturbation_time = time
lower_quantile, upper_quantile = self._quantiles()
if trial in upper_quantile:
# The trial last result is only updated after the scheduler
# callback. So, we override with the current result.
state.last_checkpoint = trial_runner.trial_executor.save(
trial, Checkpoint.MEMORY, result=result)
self._num_checkpoints += 1
else:
state.last_checkpoint = None # not a top trial
if trial in lower_quantile:
trial_to_clone = random.choice(upper_quantile)
assert trial is not trial_to_clone
self._exploit(trial_runner.trial_executor, trial, trial_to_clone)
for trial in trial_runner.get_trials():
if trial.status in [Trial.PENDING, Trial.PAUSED]:
return TrialScheduler.PAUSE # yield time to other trials
return TrialScheduler.CONTINUE
def _log_config_on_step(self, trial_state, new_state, trial,
trial_to_clone, new_config):
"""Logs transition during exploit/exploit step.
For each step, logs: [target trial tag, clone trial tag, target trial
iteration, clone trial iteration, old config, new config].
"""
trial_name, trial_to_clone_name = (trial_state.orig_tag,
new_state.orig_tag)
trial_id = "".join(itertools.takewhile(str.isdigit, trial_name))
trial_to_clone_id = "".join(
itertools.takewhile(str.isdigit, trial_to_clone_name))
trial_path = os.path.join(trial.local_dir,
"pbt_policy_" + trial_id + ".txt")
trial_to_clone_path = os.path.join(
trial_to_clone.local_dir,
"pbt_policy_" + trial_to_clone_id + ".txt")
policy = [
trial_name, trial_to_clone_name,
trial.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.last_result.get(TRAINING_ITERATION, 0),
trial_to_clone.config, new_config
]
# Log to global file.
with open(os.path.join(trial.local_dir, "pbt_global.txt"), "a+") as f:
print(json.dumps(policy, cls=_SafeFallbackEncoder), file=f)
# Overwrite state in target trial from trial_to_clone.
if os.path.exists(trial_to_clone_path):
shutil.copyfile(trial_to_clone_path, trial_path)
# Log new exploit in target trial log.
with open(trial_path, "a+") as f:
f.write(json.dumps(policy, cls=_SafeFallbackEncoder) + "\n")
def _exploit(self, trial_executor, trial, trial_to_clone):
"""Transfers perturbed state from trial_to_clone -> trial.
If specified, also logs the updated hyperparam state.
"""
trial_state = self._trial_state[trial]
new_state = self._trial_state[trial_to_clone]
if not new_state.last_checkpoint:
logger.info("[pbt]: no checkpoint for trial."
" Skip exploit for Trial {}".format(trial))
return
new_config = explore(trial_to_clone.config, self._hyperparam_mutations,
self._resample_probability,
self._custom_explore_fn)
logger.info("[exploit] transferring weights from trial "
"{} (score {}) -> {} (score {})".format(
trial_to_clone, new_state.last_score, trial,
trial_state.last_score))
if self._log_config:
self._log_config_on_step(trial_state, new_state, trial,
trial_to_clone, new_config)
new_tag = make_experiment_tag(trial_state.orig_tag, new_config,
self._hyperparam_mutations)
reset_successful = trial_executor.reset_trial(trial, new_config,
new_tag)
if reset_successful:
trial_executor.restore(
trial, Checkpoint.from_object(new_state.last_checkpoint))
else:
trial_executor.stop_trial(trial, stop_logger=False)
trial.config = new_config
trial.experiment_tag = new_tag
trial_executor.start_trial(
trial, Checkpoint.from_object(new_state.last_checkpoint))
self._num_perturbations += 1
# Transfer over the last perturbation time as well
trial_state.last_perturbation_time = new_state.last_perturbation_time
def _quantiles(self):
"""Returns trials in the lower and upper `quantile` of the population.
If there is not enough data to compute this, returns empty lists.
"""
trials = []
for trial, state in self._trial_state.items():
if state.last_score is not None and not trial.is_finished():
trials.append(trial)
trials.sort(key=lambda t: self._trial_state[t].last_score)
if len(trials) <= 1:
return [], []
else:
num_trials_in_quantile = int(
math.ceil(len(trials) * self._quantile_fraction))
if num_trials_in_quantile > len(trials) / 2:
num_trials_in_quantile = int(math.floor(len(trials) / 2))
return (trials[:num_trials_in_quantile],
trials[-num_trials_in_quantile:])
def choose_trial_to_run(self, trial_runner):
"""Ensures all trials get fair share of time (as defined by time_attr).
This enables the PBT scheduler to support a greater number of
concurrent trials than can fit in the cluster at any given time.
"""
candidates = []
for trial in trial_runner.get_trials():
if trial.status in [Trial.PENDING, Trial.PAUSED] and \
trial_runner.has_resources(trial.resources):
candidates.append(trial)
candidates.sort(
key=lambda trial: self._trial_state[trial].last_perturbation_time)
return candidates[0] if candidates else None
def reset_stats(self):
self._num_perturbations = 0
self._num_checkpoints = 0
def last_scores(self, trials):
scores = []
for trial in trials:
state = self._trial_state[trial]
if state.last_score is not None and not trial.is_finished():
scores.append(state.last_score)
return scores
def debug_string(self):
return "PopulationBasedTraining: {} checkpoints, {} perturbs".format(
self._num_checkpoints, self._num_perturbations)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/schedulers/trial_scheduler.py
|
Python
|
from ray.tune.trial import Trial
class TrialScheduler:
"""Interface for implementing a Trial Scheduler class."""
CONTINUE = "CONTINUE" #: Status for continuing trial execution
PAUSE = "PAUSE" #: Status for pausing trial execution
STOP = "STOP" #: Status for stopping trial execution
def on_trial_add(self, trial_runner, trial):
"""Called when a new trial is added to the trial runner."""
raise NotImplementedError
def on_trial_error(self, trial_runner, trial):
"""Notification for the error of trial.
This will only be called when the trial is in the RUNNING state."""
raise NotImplementedError
def on_trial_result(self, trial_runner, trial, result):
"""Called on each intermediate result returned by a trial.
At this point, the trial scheduler can make a decision by returning
one of CONTINUE, PAUSE, and STOP. This will only be called when the
trial is in the RUNNING state."""
raise NotImplementedError
def on_trial_complete(self, trial_runner, trial, result):
"""Notification for the completion of trial.
This will only be called when the trial is in the RUNNING state and
either completes naturally or by manual termination."""
raise NotImplementedError
def on_trial_remove(self, trial_runner, trial):
"""Called to remove trial.
This is called when the trial is in PAUSED or PENDING state. Otherwise,
call `on_trial_complete`."""
raise NotImplementedError
def choose_trial_to_run(self, trial_runner):
"""Called to choose a new trial to run.
This should return one of the trials in trial_runner that is in
the PENDING or PAUSED state. This function must be idempotent.
If no trial is ready, return None."""
raise NotImplementedError
def debug_string(self):
"""Returns a human readable message for printing to the console."""
raise NotImplementedError
class FIFOScheduler(TrialScheduler):
"""Simple scheduler that just runs trials in submission order."""
def on_trial_add(self, trial_runner, trial):
pass
def on_trial_error(self, trial_runner, trial):
pass
def on_trial_result(self, trial_runner, trial, result):
return TrialScheduler.CONTINUE
def on_trial_complete(self, trial_runner, trial, result):
pass
def on_trial_remove(self, trial_runner, trial):
pass
def choose_trial_to_run(self, trial_runner):
for trial in trial_runner.get_trials():
if (trial.status == Trial.PENDING
and trial_runner.has_resources(trial.resources)):
return trial
for trial in trial_runner.get_trials():
if (trial.status == Trial.PAUSED
and trial_runner.has_resources(trial.resources)):
return trial
return None
def debug_string(self):
return "Using FIFO scheduling algorithm."
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/scripts.py
|
Python
|
import click
import ray.tune.commands as commands
@click.group()
def cli():
pass
@cli.command()
@click.argument("experiment_path", required=True, type=str)
@click.option(
"--sort", default=None, type=str, help="Select which column to sort on.")
@click.option(
"--output",
"-o",
default=None,
type=str,
help="Select file to output information to.")
@click.option(
"--filter",
"filter_op",
default=None,
type=str,
help="Select filter in the format '<column> <operator> <value>'.")
@click.option(
"--columns",
default=None,
type=str,
help="Select columns to be displayed.")
@click.option(
"--limit",
default=None,
type=int,
help="Select number of rows to display.")
@click.option(
"--desc", default=False, type=bool, help="Sort ascending vs. descending.")
def list_trials(experiment_path, sort, output, filter_op, columns, limit,
desc):
"""Lists trials in the directory subtree starting at the given path."""
if sort:
sort = sort.split(",")
if columns:
columns = columns.split(",")
commands.list_trials(experiment_path, sort, output, filter_op, columns,
limit, desc)
@cli.command()
@click.argument("project_path", required=True, type=str)
@click.option(
"--sort", default=None, type=str, help="Select which column to sort on.")
@click.option(
"--output",
"-o",
default=None,
type=str,
help="Select file to output information to.")
@click.option(
"--filter",
"filter_op",
default=None,
type=str,
help="Select filter in the format '<column> <operator> <value>'.")
@click.option(
"--columns",
default=None,
type=str,
help="Select columns to be displayed.")
@click.option(
"--limit",
default=None,
type=int,
help="Select number of rows to display.")
@click.option(
"--desc", default=False, type=bool, help="Sort ascending vs. descending.")
def list_experiments(project_path, sort, output, filter_op, columns, limit,
desc):
"""Lists experiments in the directory subtree."""
if sort:
sort = sort.split(",")
if columns:
columns = columns.split(",")
commands.list_experiments(project_path, sort, output, filter_op, columns,
limit, desc)
@cli.command()
@click.argument("path", required=True, type=str)
@click.option(
"--filename",
default="note.txt",
type=str,
help="Specify filename for note.")
def add_note(path, filename):
"""Adds user notes as a text file at the given path."""
commands.add_note(path, filename)
cli.add_command(list_trials, name="ls")
cli.add_command(list_trials, name="list-trials")
cli.add_command(list_experiments, name="lsx")
cli.add_command(list_experiments, name="list-experiments")
cli.add_command(add_note, name="add-note")
def main():
return cli()
if __name__ == "__main__":
main()
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/__init__.py
|
Python
|
from ray.tune.suggest.search import SearchAlgorithm
from ray.tune.suggest.basic_variant import BasicVariantGenerator
from ray.tune.suggest.suggestion import SuggestionAlgorithm
from ray.tune.suggest.variant_generator import grid_search
from ray.tune.suggest.bohb import TuneBOHB
__all__ = [
"SearchAlgorithm", "BasicVariantGenerator", "SuggestionAlgorithm",
"grid_search", "TuneBOHB"
]
def BayesOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.bayesopt import BayesOptSearch`""")
def HyperOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.hyperopt import HyperOptSearch`""")
def NevergradSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.nevergrad import NevergradSearch`""")
def SkOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.skopt import SkOptSearch`""")
def SigOptSearch(*args, **kwargs):
raise DeprecationWarning("""This class has been moved. Please import via
`from ray.tune.suggest.sigopt import SigOptSearch`""")
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/ax.py
|
Python
|
try:
import ax
except ImportError:
ax = None
import logging
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class AxSearch(SuggestionAlgorithm):
"""A wrapper around Ax to provide trial suggestions.
Requires Ax to be installed. Ax is an open source tool from
Facebook for configuring and optimizing experiments. More information
can be found in https://ax.dev/.
Parameters:
parameters (list[dict]): Parameters in the experiment search space.
Required elements in the dictionaries are: "name" (name of
this parameter, string), "type" (type of the parameter: "range",
"fixed", or "choice", string), "bounds" for range parameters
(list of two values, lower bound first), "values" for choice
parameters (list of values), and "value" for fixed parameters
(single value).
objective_name (str): Name of the metric used as objective in this
experiment. This metric must be present in `raw_data` argument
to `log_data`. This metric must also be present in the dict
reported/returned by the Trainable.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
minimize (bool): Whether this experiment represents a minimization
problem. Defaults to False.
parameter_constraints (list[str]): Parameter constraints, such as
"x3 >= x4" or "x3 + x4 >= 2".
outcome_constraints (list[str]): Outcome constraints of form
"metric_name >= bound", like "m1 <= 3."
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> parameters = [
>>> {"name": "x1", "type": "range", "bounds": [0.0, 1.0]},
>>> {"name": "x2", "type": "range", "bounds": [0.0, 1.0]},
>>> ]
>>> algo = AxSearch(parameters=parameters,
>>> objective_name="hartmann6", max_concurrent=4)
"""
def __init__(self, ax_client, max_concurrent=10, **kwargs):
assert ax is not None, "Ax must be installed!"
assert type(max_concurrent) is int and max_concurrent > 0
self._ax = ax_client
exp = self._ax.experiment
self._objective_name = exp.optimization_config.objective.metric.name
if self._ax._enforce_sequential_optimization:
logger.warning("Detected sequential enforcement. Setting max "
"concurrency to 1.")
max_concurrent = 1
self._max_concurrent = max_concurrent
self._parameters = list(exp.parameters)
self._live_index_mapping = {}
super(AxSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
parameters, trial_index = self._ax.get_next_trial()
self._live_index_mapping[trial_id] = trial_index
return parameters
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
Data of form key value dictionary of metric names and values.
"""
if result:
self._process_result(trial_id, result, early_terminated)
self._live_index_mapping.pop(trial_id)
def _process_result(self, trial_id, result, early_terminated=False):
if early_terminated and self._use_early_stopped is False:
return
ax_trial_index = self._live_index_mapping[trial_id]
metric_dict = {
self._objective_name: (result[self._objective_name], 0.0)
}
outcome_names = [
oc.metric.name for oc in
self._ax.experiment.optimization_config.outcome_constraints
]
metric_dict.update({on: (result[on], 0.0) for on in outcome_names})
self._ax.complete_trial(
trial_index=ax_trial_index, raw_data=metric_dict)
def _num_live_trials(self):
return len(self._live_index_mapping)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/basic_variant.py
|
Python
|
import itertools
import random
from ray.tune.error import TuneError
from ray.tune.experiment import convert_to_experiment_list
from ray.tune.config_parser import make_parser, create_trial_from_spec
from ray.tune.suggest.variant_generator import (generate_variants, format_vars,
flatten_resolved_vars)
from ray.tune.suggest.search import SearchAlgorithm
class BasicVariantGenerator(SearchAlgorithm):
"""Uses Tune's variant generation for resolving variables.
See also: `ray.tune.suggest.variant_generator`.
Example:
>>> searcher = BasicVariantGenerator()
>>> searcher.add_configurations({"experiment": { ... }})
>>> list_of_trials = searcher.next_trials()
>>> searcher.is_finished == True
"""
def __init__(self, shuffle=False):
"""Initializes the Variant Generator.
Arguments:
shuffle (bool): Shuffles the generated list of configurations.
"""
self._parser = make_parser()
self._trial_generator = []
self._counter = 0
self._finished = False
self._shuffle = shuffle
def add_configurations(self, experiments):
"""Chains generator given experiment specifications.
Arguments:
experiments (Experiment | list | dict): Experiments to run.
"""
experiment_list = convert_to_experiment_list(experiments)
for experiment in experiment_list:
self._trial_generator = itertools.chain(
self._trial_generator,
self._generate_trials(experiment.spec, experiment.name))
def next_trials(self):
"""Provides Trial objects to be queued into the TrialRunner.
Returns:
trials (list): Returns a list of trials.
"""
trials = list(self._trial_generator)
if self._shuffle:
random.shuffle(trials)
self._finished = True
return trials
def _generate_trials(self, unresolved_spec, output_path=""):
"""Generates Trial objects with the variant generation process.
Uses a fixed point iteration to resolve variants. All trials
should be able to be generated at once.
See also: `ray.tune.suggest.variant_generator`.
Yields:
Trial object
"""
if "run" not in unresolved_spec:
raise TuneError("Must specify `run` in {}".format(unresolved_spec))
for _ in range(unresolved_spec.get("num_samples", 1)):
for resolved_vars, spec in generate_variants(unresolved_spec):
experiment_tag = str(self._counter)
if resolved_vars:
experiment_tag += "_{}".format(format_vars(resolved_vars))
self._counter += 1
yield create_trial_from_spec(
spec,
output_path,
self._parser,
evaluated_params=flatten_resolved_vars(resolved_vars),
experiment_tag=experiment_tag)
def is_finished(self):
return self._finished
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/bayesopt.py
|
Python
|
import copy
import logging
import pickle
try: # Python 3 only -- needed for lint test.
import bayes_opt as byo
except ImportError:
byo = None
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class BayesOptSearch(SuggestionAlgorithm):
"""A wrapper around BayesOpt to provide trial suggestions.
Requires BayesOpt to be installed. You can install BayesOpt with the
command: `pip install bayesian-optimization`.
Parameters:
space (dict): Continuous search space. Parameters will be sampled from
this space which will be used to run trials.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
utility_kwargs (dict): Parameters to define the utility function. Must
provide values for the keys `kind`, `kappa`, and `xi`.
random_state (int): Used to initialize BayesOpt.
verbose (int): Sets verbosity level for BayesOpt packages.
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> space = {
>>> 'width': (0, 20),
>>> 'height': (-100, 100),
>>> }
>>> algo = BayesOptSearch(
>>> space, max_concurrent=4, metric="mean_loss", mode="min")
"""
def __init__(self,
space,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
utility_kwargs=None,
random_state=1,
verbose=0,
**kwargs):
assert byo is not None, (
"BayesOpt must be installed!. You can install BayesOpt with"
" the command: `pip install bayesian-optimization`.")
assert type(max_concurrent) is int and max_concurrent > 0
assert utility_kwargs is not None, (
"Must define arguments for the utiliy function!")
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._max_concurrent = max_concurrent
self._metric = metric
if mode == "max":
self._metric_op = 1.
elif mode == "min":
self._metric_op = -1.
self._live_trial_mapping = {}
self.optimizer = byo.BayesianOptimization(
f=None, pbounds=space, verbose=verbose, random_state=random_state)
self.utility = byo.UtilityFunction(**utility_kwargs)
super(BayesOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
new_trial = self.optimizer.suggest(self.utility)
self._live_trial_mapping[trial_id] = new_trial
return copy.deepcopy(new_trial)
def on_trial_result(self, trial_id, result):
pass
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial."""
if result:
self._process_result(trial_id, result, early_terminated)
del self._live_trial_mapping[trial_id]
def _process_result(self, trial_id, result, early_terminated=False):
if early_terminated and self._use_early_stopped is False:
return
self.optimizer.register(
params=self._live_trial_mapping[trial_id],
target=self._metric_op * result[self._metric])
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = self.optimizer
with open(checkpoint_dir, "wb") as output:
pickle.dump(trials_object, output)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as input:
trials_object = pickle.load(input)
self.optimizer = trials_object
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/bohb.py
|
Python
|
"""BOHB (Bayesian Optimization with HyperBand)"""
import copy
import logging
from ray.tune.suggest import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class _BOHBJobWrapper():
"""Mock object for HpBandSter to process."""
def __init__(self, loss, budget, config):
self.result = {"loss": loss}
self.kwargs = {"budget": budget, "config": config.copy()}
self.exception = None
class TuneBOHB(SuggestionAlgorithm):
"""BOHB suggestion component.
Requires HpBandSter and ConfigSpace to be installed. You can install
HpBandSter and ConfigSpace with: `pip install hpbandster ConfigSpace`.
This should be used in conjunction with HyperBandForBOHB.
Args:
space (ConfigurationSpace): Continuous ConfigSpace search space.
Parameters will be sampled from this space which will be used
to run trials.
bohb_config (dict): configuration for HpBandSter BOHB algorithm
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
Example:
>>> import ConfigSpace as CS
>>> config_space = CS.ConfigurationSpace()
>>> config_space.add_hyperparameter(
CS.UniformFloatHyperparameter('width', lower=0, upper=20))
>>> config_space.add_hyperparameter(
CS.UniformFloatHyperparameter('height', lower=-100, upper=100))
>>> config_space.add_hyperparameter(
CS.CategoricalHyperparameter(
name='activation', choices=['relu', 'tanh']))
>>> algo = TuneBOHB(
config_space, max_concurrent=4, metric='mean_loss', mode='min')
>>> bohb = HyperBandForBOHB(
time_attr='training_iteration',
metric='mean_loss',
mode='min',
max_t=100)
>>> run(MyTrainableClass, scheduler=bohb, search_alg=algo)
"""
def __init__(self,
space,
bohb_config=None,
max_concurrent=10,
metric="neg_mean_loss",
mode="max"):
from hpbandster.optimizers.config_generators.bohb import BOHB
assert BOHB is not None, "HpBandSter must be installed!"
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
self._max_concurrent = max_concurrent
self.trial_to_params = {}
self.running = set()
self.paused = set()
self.metric = metric
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
bohb_config = bohb_config or {}
self.bohber = BOHB(space, **bohb_config)
super(TuneBOHB, self).__init__()
def _suggest(self, trial_id):
if len(self.running) < self._max_concurrent:
# This parameter is not used in hpbandster implementation.
config, info = self.bohber.get_config(None)
self.trial_to_params[trial_id] = copy.deepcopy(config)
self.running.add(trial_id)
return config
return None
def on_trial_result(self, trial_id, result):
if trial_id not in self.paused:
self.running.add(trial_id)
if "hyperband_info" not in result:
logger.warning("BOHB Info not detected in result. Are you using "
"HyperBandForBOHB as a scheduler?")
elif "budget" in result.get("hyperband_info", {}):
hbs_wrapper = self.to_wrapper(trial_id, result)
self.bohber.new_result(hbs_wrapper)
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
del self.trial_to_params[trial_id]
if trial_id in self.paused:
self.paused.remove(trial_id)
if trial_id in self.running:
self.running.remove(trial_id)
def to_wrapper(self, trial_id, result):
return _BOHBJobWrapper(self._metric_op * result[self.metric],
result["hyperband_info"]["budget"],
self.trial_to_params[trial_id])
def on_pause(self, trial_id):
self.paused.add(trial_id)
self.running.remove(trial_id)
def on_unpause(self, trial_id):
self.paused.remove(trial_id)
self.running.add(trial_id)
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
|
python/ray/tune/suggest/hyperopt.py
|
Python
|
import numpy as np
import copy
import logging
from functools import partial
import pickle
try:
hyperopt_logger = logging.getLogger("hyperopt")
hyperopt_logger.setLevel(logging.WARNING)
import hyperopt as hpo
except ImportError:
hpo = None
from ray.tune.error import TuneError
from ray.tune.suggest.suggestion import SuggestionAlgorithm
logger = logging.getLogger(__name__)
class HyperOptSearch(SuggestionAlgorithm):
"""A wrapper around HyperOpt to provide trial suggestions.
Requires HyperOpt to be installed from source.
Uses the Tree-structured Parzen Estimators algorithm, although can be
trivially extended to support any algorithm HyperOpt uses. Externally
added trials will not be tracked by HyperOpt. Trials of the current run
can be saved using save method, trials of a previous run can be loaded
using restore method, thus enabling a warm start feature.
Parameters:
space (dict): HyperOpt configuration. Parameters will be sampled
from this configuration and will be used to override
parameters generated in the variant generation process.
max_concurrent (int): Number of maximum concurrent trials. Defaults
to 10.
metric (str): The training result objective value attribute.
mode (str): One of {min, max}. Determines whether objective is
minimizing or maximizing the metric attribute.
points_to_evaluate (list): Initial parameter suggestions to be run
first. This is for when you already have some good parameters
you want hyperopt to run first to help the TPE algorithm
make better suggestions for future parameters. Needs to be
a list of dict of hyperopt-named variables.
Choice variables should be indicated by their index in the
list (see example)
n_initial_points (int): number of random evaluations of the
objective function before starting to aproximate it with
tree parzen estimators. Defaults to 20.
random_state_seed (int, array_like, None): seed for reproducible
results. Defaults to None.
gamma (float in range (0,1)): parameter governing the tree parzen
estimators suggestion algorithm. Defaults to 0.25.
use_early_stopped_trials (bool): Whether to use early terminated
trial results in the optimization process.
Example:
>>> space = {
>>> 'width': hp.uniform('width', 0, 20),
>>> 'height': hp.uniform('height', -100, 100),
>>> 'activation': hp.choice("activation", ["relu", "tanh"])
>>> }
>>> current_best_params = [{
>>> 'width': 10,
>>> 'height': 0,
>>> 'activation': 0, # The index of "relu"
>>> }]
>>> algo = HyperOptSearch(
>>> space, max_concurrent=4, metric="mean_loss", mode="min",
>>> points_to_evaluate=current_best_params)
"""
def __init__(self,
space,
max_concurrent=10,
reward_attr=None,
metric="episode_reward_mean",
mode="max",
points_to_evaluate=None,
n_initial_points=20,
random_state_seed=None,
gamma=0.25,
**kwargs):
assert hpo is not None, "HyperOpt must be installed!"
from hyperopt.fmin import generate_trials_to_calculate
assert type(max_concurrent) is int and max_concurrent > 0
assert mode in ["min", "max"], "`mode` must be 'min' or 'max'!"
if reward_attr is not None:
mode = "max"
metric = reward_attr
logger.warning(
"`reward_attr` is deprecated and will be removed in a future "
"version of Tune. "
"Setting `metric={}` and `mode=max`.".format(reward_attr))
self._max_concurrent = max_concurrent
self._metric = metric
# hyperopt internally minimizes, so "max" => -1
if mode == "max":
self._metric_op = -1.
elif mode == "min":
self._metric_op = 1.
if n_initial_points is None:
self.algo = hpo.tpe.suggest
else:
self.algo = partial(
hpo.tpe.suggest, n_startup_jobs=n_initial_points)
if gamma is not None:
self.algo = partial(self.algo, gamma=gamma)
self.domain = hpo.Domain(lambda spc: spc, space)
if points_to_evaluate is None:
self._hpopt_trials = hpo.Trials()
self._points_to_evaluate = 0
else:
assert type(points_to_evaluate) == list
self._hpopt_trials = generate_trials_to_calculate(
points_to_evaluate)
self._hpopt_trials.refresh()
self._points_to_evaluate = len(points_to_evaluate)
self._live_trial_mapping = {}
if random_state_seed is None:
self.rstate = np.random.RandomState()
else:
self.rstate = np.random.RandomState(random_state_seed)
super(HyperOptSearch, self).__init__(**kwargs)
def _suggest(self, trial_id):
if self._num_live_trials() >= self._max_concurrent:
return None
if self._points_to_evaluate > 0:
new_trial = self._hpopt_trials.trials[self._points_to_evaluate - 1]
self._points_to_evaluate -= 1
else:
new_ids = self._hpopt_trials.new_trial_ids(1)
self._hpopt_trials.refresh()
# Get new suggestion from Hyperopt
new_trials = self.algo(new_ids, self.domain, self._hpopt_trials,
self.rstate.randint(2**31 - 1))
self._hpopt_trials.insert_trial_docs(new_trials)
self._hpopt_trials.refresh()
new_trial = new_trials[0]
self._live_trial_mapping[trial_id] = (new_trial["tid"], new_trial)
# Taken from HyperOpt.base.evaluate
config = hpo.base.spec_from_misc(new_trial["misc"])
ctrl = hpo.base.Ctrl(self._hpopt_trials, current_trial=new_trial)
memo = self.domain.memo_from_config(config)
hpo.utils.use_obj_for_literal_in_memo(self.domain.expr, ctrl,
hpo.base.Ctrl, memo)
suggested_config = hpo.pyll.rec_eval(
self.domain.expr,
memo=memo,
print_node_on_error=self.domain.rec_eval_print_node_on_error)
return copy.deepcopy(suggested_config)
def on_trial_result(self, trial_id, result):
ho_trial = self._get_hyperopt_trial(trial_id)
if ho_trial is None:
return
now = hpo.utils.coarse_utcnow()
ho_trial["book_time"] = now
ho_trial["refresh_time"] = now
def on_trial_complete(self,
trial_id,
result=None,
error=False,
early_terminated=False):
"""Notification for the completion of trial.
The result is internally negated when interacting with HyperOpt
so that HyperOpt can "maximize" this value, as it minimizes on default.
"""
ho_trial = self._get_hyperopt_trial(trial_id)
if ho_trial is None:
return
ho_trial["refresh_time"] = hpo.utils.coarse_utcnow()
if error:
ho_trial["state"] = hpo.base.JOB_STATE_ERROR
ho_trial["misc"]["error"] = (str(TuneError), "Tune Error")
self._hpopt_trials.refresh()
else:
self._process_result(trial_id, result, early_terminated)
del self._live_trial_mapping[trial_id]
def _process_result(self, trial_id, result, early_terminated=False):
ho_trial = self._get_hyperopt_trial(trial_id)
ho_trial["refresh_time"] = hpo.utils.coarse_utcnow()
if early_terminated and self._use_early_stopped is False:
ho_trial["state"] = hpo.base.JOB_STATE_ERROR
ho_trial["misc"]["error"] = (str(TuneError), "Tune Removed")
return
ho_trial["state"] = hpo.base.JOB_STATE_DONE
hp_result = self._to_hyperopt_result(result)
ho_trial["result"] = hp_result
self._hpopt_trials.refresh()
def _to_hyperopt_result(self, result):
return {"loss": self._metric_op * result[self._metric], "status": "ok"}
def _get_hyperopt_trial(self, trial_id):
if trial_id not in self._live_trial_mapping:
return
hyperopt_tid = self._live_trial_mapping[trial_id][0]
return [
t for t in self._hpopt_trials.trials if t["tid"] == hyperopt_tid
][0]
def _num_live_trials(self):
return len(self._live_trial_mapping)
def save(self, checkpoint_dir):
trials_object = (self._hpopt_trials, self.rstate.get_state())
with open(checkpoint_dir, "wb") as outputFile:
pickle.dump(trials_object, outputFile)
def restore(self, checkpoint_dir):
with open(checkpoint_dir, "rb") as inputFile:
trials_object = pickle.load(inputFile)
self._hpopt_trials = trials_object[0]
self.rstate.set_state(trials_object[1])
|
zhuohan123/hoplite-rllib
| 3
|
Python
|
zhuohan123
|
Zhuohan Li
|
vLLM / Meta
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.