leonard-pak's picture
load model
1ba6d71
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8921821117401123,
"min": 0.8921821117401123,
"max": 2.866762161254883,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8567.625,
"min": 8567.625,
"max": 29453.115234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.007867813110352,
"min": 0.25780078768730164,
"max": 13.007867813110352,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2536.5341796875,
"min": 50.01335144042969,
"max": 2642.181640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06841181528123379,
"min": 0.06310368753084,
"max": 0.07741665172929346,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27364726112493515,
"min": 0.2532981200738326,
"max": 0.3870832586464673,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2013024192522554,
"min": 0.11823406295996963,
"max": 0.29022657310261446,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8052096770090216,
"min": 0.47293625183987853,
"max": 1.4511328655130722,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.431818181818183,
"min": 3.5454545454545454,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1119.0,
"min": 156.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.431818181818183,
"min": 3.5454545454545454,
"max": 25.527272727272727,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1119.0,
"min": 156.0,
"max": 1404.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1692638393",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1692638867"
},
"total": 473.66726973799996,
"count": 1,
"self": 0.7959081019999985,
"children": {
"run_training.setup": {
"total": 0.041467202000035286,
"count": 1,
"self": 0.041467202000035286
},
"TrainerController.start_learning": {
"total": 472.8298944339999,
"count": 1,
"self": 0.5427615870087266,
"children": {
"TrainerController._reset_env": {
"total": 3.909542052000006,
"count": 1,
"self": 3.909542052000006
},
"TrainerController.advance": {
"total": 468.1515399979912,
"count": 18220,
"self": 0.26763193498743476,
"children": {
"env_step": {
"total": 467.8839080630038,
"count": 18220,
"self": 344.34932016601476,
"children": {
"SubprocessEnvManager._take_step": {
"total": 123.2496864489899,
"count": 18220,
"self": 1.7516417269922613,
"children": {
"TorchPolicy.evaluate": {
"total": 121.49804472199764,
"count": 18220,
"self": 121.49804472199764
}
}
},
"workers": {
"total": 0.2849014479991183,
"count": 18220,
"self": 0.0,
"children": {
"worker_root": {
"total": 471.15185718402506,
"count": 18220,
"is_parallel": true,
"self": 222.15420415701078,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020991400000411886,
"count": 1,
"is_parallel": true,
"self": 0.0006376100001261875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001461529999915001,
"count": 10,
"is_parallel": true,
"self": 0.001461529999915001
}
}
},
"UnityEnvironment.step": {
"total": 0.03438927399997738,
"count": 1,
"is_parallel": true,
"self": 0.0005897349999486323,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003235610000729139,
"count": 1,
"is_parallel": true,
"self": 0.0003235610000729139
},
"communicator.exchange": {
"total": 0.03137701899993317,
"count": 1,
"is_parallel": true,
"self": 0.03137701899993317
},
"steps_from_proto": {
"total": 0.00209895900002266,
"count": 1,
"is_parallel": true,
"self": 0.0004882880003833634,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016106709996392965,
"count": 10,
"is_parallel": true,
"self": 0.0016106709996392965
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 248.99765302701428,
"count": 18219,
"is_parallel": true,
"self": 10.554917989000273,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.324247468998692,
"count": 18219,
"is_parallel": true,
"self": 5.324247468998692
},
"communicator.exchange": {
"total": 196.90412630801222,
"count": 18219,
"is_parallel": true,
"self": 196.90412630801222
},
"steps_from_proto": {
"total": 36.21436126100309,
"count": 18219,
"is_parallel": true,
"self": 6.439612423993367,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.774748837009724,
"count": 182190,
"is_parallel": true,
"self": 29.774748837009724
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00015919900010885613,
"count": 1,
"self": 0.00015919900010885613,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 464.543044689996,
"count": 426289,
"is_parallel": true,
"self": 9.599563429980435,
"children": {
"process_trajectory": {
"total": 252.30833339801507,
"count": 426289,
"is_parallel": true,
"self": 250.7263421250152,
"children": {
"RLTrainer._checkpoint": {
"total": 1.5819912729998578,
"count": 4,
"is_parallel": true,
"self": 1.5819912729998578
}
}
},
"_update_policy": {
"total": 202.6351478620005,
"count": 90,
"is_parallel": true,
"self": 83.97347864999574,
"children": {
"TorchPPOOptimizer.update": {
"total": 118.66166921200477,
"count": 4587,
"is_parallel": true,
"self": 118.66166921200477
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22589159799986192,
"count": 1,
"self": 0.001189087999819094,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22470251000004282,
"count": 1,
"self": 0.22470251000004282
}
}
}
}
}
}
}