File size: 3,170 Bytes
bcba0e4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
import minari
import numpy as np
import d3rlpy.dataset
from d3rlpy.dataset import MDPDataset
from fcev import FCEVEnv, load_drive_cycle
from d3rlpy.algos import SACConfig, TD3PlusBCConfig, IQLConfig, CQLConfig, BCQConfig, CalQLConfig, AWACConfig, \
    ReBRACConfig, TACRConfig, PLASConfig, PRDCConfig, BEARConfig, DecisionTransformerConfig, CalQL

def load_minari_as_d3rlpy(name="fcev-mpc-v1", num=None):
    """Load Minari dataset with a custom reward function.



    Args:

        name (str): Dataset name.

        num (int, optional): Number of episodes to sample.

        beta (float): Logistic function slope.

        c (float): Offset for logistic transformation.



    Returns:

        MDPDataset: Dataset with custom rewards.

    """
    dataset = minari.load_dataset(name)
    episodes = dataset.sample_episodes(num) if num else dataset.sample_episodes(dataset.total_episodes)

    all_obs = []
    all_actions = []
    all_rewards = []
    all_terminals = []

    for ep in episodes:
        obs = ep.observations[:-1]
        actions = ep.actions
        rewards = ep.rewards
        terminals = ep.terminations

        n = len(actions)
        obs = obs[:n]
        actions = actions[:n]
        rewards = rewards[:n]
        terminals = terminals[:n]

        all_obs.append(obs)
        all_actions.append(actions)
        all_rewards.append(rewards)
        all_terminals.append(terminals)

    obs = np.vstack(all_obs)
    act = np.vstack(all_actions)
    reward = np.hstack(all_rewards)
    terminal = np.hstack(all_terminals)

    return MDPDataset(
        observations=obs,
        actions=act,
        rewards=reward,
        terminals=terminal
    )


# Define environment and dataset
env_name = "fcev-mpc-v1"
# env_name = "fcev-rule-v1"

# Save dataset to disk
dataset = load_minari_as_d3rlpy(env_name)

# Reload dataset using ReplayBuffer
dataset.dump(f"datasets/{env_name}.h5")
with open(f"datasets/{env_name}.h5", "rb") as f:
    dataset = d3rlpy.dataset.ReplayBuffer.load(f, d3rlpy.dataset.InfiniteBuffer())
# dataset = d3rlpy.datasets.get_minari("fcev-mpc-v1")

# Select and build algorithm
# algo = SACConfig(compile_graph=True).create()
# algo = TD3PlusBCConfig(compile_graph=True).create()
algo = CQLConfig(compile_graph=True).create()
# algo = BCQConfig(compile_graph=True).create()
# algo = IQLConfig(compile_graph=True).create()
# algo = CalQLConfig(compile_graph=True).create()
# algo = DecisionTransformerConfig(compile_graph=True).create()

# Setup logging
algo.build_with_env(env=FCEVEnv(load_drive_cycle("CLTC-P-PartI.csv")))

# Setup FileAdapterFactory and TensorboardAdapterFactory
logger_adapter = d3rlpy.logging.CombineAdapterFactory([
    d3rlpy.logging.FileAdapterFactory(root_dir="d3rlpy_logs"),
    d3rlpy.logging.TensorboardAdapterFactory(root_dir="tensorboard_logs"),
    d3rlpy.logging.WanDBAdapterFactory()
])

# Train the algorithm offline
algo.fit(dataset, n_steps=10000, n_steps_per_epoch=1000)
# algo = TD3PlusBC(actor_learning_rate=1e-4, alpha=2.5)
# algo.fit(dataset, n_epochs=200)
# algo.save_model("td3bc_model.d3")