Datasets:
File size: 7,993 Bytes
bcba0e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 |
import wandb
import d3rlpy
import argparse
import traceback
from d3rlpy.dataset import ReplayBuffer, InfiniteBuffer
from d3rlpy.preprocessing import StandardObservationScaler
from d3rlpy.logging import CombineAdapterFactory, FileAdapterFactory, TensorboardAdapterFactory
from fcev import FCEVEnv, load_drive_cycle
from d3rlpy.algos import (
TD3PlusBCConfig, IQLConfig, CQLConfig, BCQConfig,
CalQLConfig, AWACConfig, ReBRACConfig, TACRConfig,
PLASConfig, PRDCConfig, BEARConfig
)
from typing import Any, Optional
from d3rlpy.logging import WanDBAdapter
from d3rlpy.logging.logger import (
AlgProtocol,
LoggerAdapter,
LoggerAdapterFactory,
SaveProtocol,
)
# ---------- WandB Logger Factory ----------
class GWanDBAdapterFactory(LoggerAdapterFactory):
r"""WandB Logger Adapter Factory class.
This class creates instances of the WandB Logger Adapter for experiment
tracking.
Args:
project (Optional[str], optional): The name of the WandB project.
Defaults to None.
"""
_project: Optional[str]
def __init__(self, project: Optional[str] = None, experiment_name: Optional[str] = None,) -> None:
self._project = project
def create(
self, algo: AlgProtocol, experiment_name: str, n_steps_per_epoch: int
) -> LoggerAdapter:
return WanDBAdapter(
algo=algo,
experiment_name=experiment_name,
n_steps_per_epoch=n_steps_per_epoch,
project=self._project,
)
# ---------- Algorithm Config Dictionary ----------
def get_algo_configs():
# Algorithm configurations with encoder and observation preprocessing settings
algo_configs = {
"TD3PlusBC": TD3PlusBCConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"IQL": IQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"CQL": CQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"BCQ": BCQConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"CalQL": CalQLConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"AWAC": AWACConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"ReBRAC": ReBRACConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), q_func_factory=d3rlpy.models.QRQFunctionFactory()),
"TACR": TACRConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"PLAS": PLASConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"PRDC": PRDCConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
"BEAR": BEARConfig(actor_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), critic_encoder_factory=d3rlpy.models.DefaultEncoderFactory(use_batch_norm=True, dropout_rate=0.2), observation_scaler=StandardObservationScaler()),
}
return algo_configs
# ---------- Training Function ----------
def train(args):
algo_configs = get_algo_configs()
if args.algo not in algo_configs:
raise ValueError(f"Unsupported algorithm: {args.algo}")
# Load dataset
with open(args.dataset_path, "rb") as f:
dataset = ReplayBuffer.load(f, InfiniteBuffer())
# Load environment for evaluation
env = FCEVEnv(load_drive_cycle(args.drive_cycle))
config = algo_configs[args.algo]
algo = config.create(device=args.device)
# Setup logger
logger_adapters = [
FileAdapterFactory(root_dir=f"d3rlpy_logs/{args.algo}"),
TensorboardAdapterFactory(root_dir=f"tensorboard_logs/{args.algo}")
]
if args.wandb:
logger_adapters.append(GWanDBAdapterFactory(experiment_name=f"{args.algo}-run", project=args.wandb_project))
logger_adapter = CombineAdapterFactory(logger_adapters)
try:
print(f"\n🚀 Starting training: {args.algo}")
algo.fit(
dataset,
n_steps=args.n_steps,
n_steps_per_epoch=args.n_steps_per_epoch,
logger_adapter=logger_adapter,
evaluators={
'init_value': d3rlpy.metrics.InitialStateValueEstimationEvaluator(),
'soft_opc': d3rlpy.metrics.SoftOPCEvaluator(return_threshold=100),
'action': d3rlpy.metrics.ContinuousActionDiffEvaluator(),
'environment': d3rlpy.metrics.EnvironmentEvaluator(env),
'Advantage': d3rlpy.metrics.DiscountedSumOfAdvantageEvaluator()
},
)
print(f"\n✅ Training finished for: {args.algo}")
except Exception as e:
print(f"\n❌ Training failed: {args.algo}")
print(traceback.format_exc())
wandb.finish()
# ---------- Main CLI ----------
if __name__ == "__main__":
parser = argparse.ArgumentParser(description="Offline RL training for FCEV")
parser.add_argument("--algo", type=str, default="AWAC",
choices=list(get_algo_configs().keys()),
help="Name of the offline RL algorithm")
parser.add_argument("--dataset-path", type=str, default="datasets/fcev-mpc-v1.h5",
help="Path to the .h5 dataset file")
parser.add_argument("--drive-cycle", type=str, default="CLTC-P-PartI.csv",
help="Path to the drive cycle CSV file")
parser.add_argument("--n-steps", type=int, default=10000,
help="Total number of training steps")
parser.add_argument("--n-steps-per-epoch", type=int, default=100,
help="Steps per epoch")
parser.add_argument("--device", type=str, default="cuda:0",
help="Training device (e.g., 'cpu', 'cuda:0')")
parser.add_argument("--wandb", action="store_true",
help="Enable WandB logging")
parser.add_argument("--wandb-project", type=str, default="fcev-offline-benchmark",
help="WandB project name (used only if --wandb is enabled)")
args = parser.parse_args()
train(args) |