zee0110's picture
First Push
43a7083 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5363530516624451,
"min": 0.5098602175712585,
"max": 1.4904824495315552,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16030.51953125,
"min": 15516.0654296875,
"max": 45215.27734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989873.0,
"min": 29952.0,
"max": 989873.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.2863658666610718,
"min": -0.11008022725582123,
"max": 0.2863658666610718,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 73.30966186523438,
"min": -26.529335021972656,
"max": 73.30966186523438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.035201944410800934,
"min": 0.009661422111093998,
"max": 0.49104270339012146,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 9.011697769165039,
"min": 2.482985496520996,
"max": 116.37712097167969,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07229158546505023,
"min": 0.06509584695434541,
"max": 0.07502237057474907,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0120821965107032,
"min": 0.4668491066158508,
"max": 1.0349062754927825,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011381422780148028,
"min": 0.0008753815623843848,
"max": 0.012184750249695604,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1593399189220724,
"min": 0.008954203185399903,
"max": 0.17058650349573845,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.397690391278571e-06,
"min": 7.397690391278571e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010356766547789999,
"min": 0.00010356766547789999,
"max": 0.0035068622310460004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246586428571429,
"min": 0.10246586428571429,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4345221000000001,
"min": 1.3691136000000002,
"max": 2.568954,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002563398421428572,
"min": 0.0002563398421428572,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035887577900000005,
"min": 0.0035887577900000005,
"max": 0.1169185046,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01444873958826065,
"min": 0.01444873958826065,
"max": 0.599606454372406,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2022823542356491,
"min": 0.2022823542356491,
"max": 4.197245121002197,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 561.6923076923077,
"min": 538.8,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29208.0,
"min": 15984.0,
"max": 32392.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1689884325919242,
"min": -1.0000000521540642,
"max": 1.1689884325919242,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 60.787398494780064,
"min": -32.000001668930054,
"max": 60.787398494780064,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1689884325919242,
"min": -1.0000000521540642,
"max": 1.1689884325919242,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 60.787398494780064,
"min": -32.000001668930054,
"max": 60.787398494780064,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08480149387973003,
"min": 0.08480149387973003,
"max": 12.10956524219364,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.409677681745961,
"min": 4.409677681745961,
"max": 193.75304387509823,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1716189009",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1716191413"
},
"total": 2404.174309156,
"count": 1,
"self": 0.4918073620001451,
"children": {
"run_training.setup": {
"total": 0.09776135299989619,
"count": 1,
"self": 0.09776135299989619
},
"TrainerController.start_learning": {
"total": 2403.584740441,
"count": 1,
"self": 1.6561197580076623,
"children": {
"TrainerController._reset_env": {
"total": 2.8340315890000056,
"count": 1,
"self": 2.8340315890000056
},
"TrainerController.advance": {
"total": 2399.009469369993,
"count": 63355,
"self": 1.755647314989801,
"children": {
"env_step": {
"total": 1681.47908929098,
"count": 63355,
"self": 1529.2082466999316,
"children": {
"SubprocessEnvManager._take_step": {
"total": 151.26146766402417,
"count": 63355,
"self": 5.55854456204429,
"children": {
"TorchPolicy.evaluate": {
"total": 145.70292310197988,
"count": 62573,
"self": 145.70292310197988
}
}
},
"workers": {
"total": 1.0093749270242824,
"count": 63355,
"self": 0.0,
"children": {
"worker_root": {
"total": 2398.1954402269134,
"count": 63355,
"is_parallel": true,
"self": 1009.8575869189265,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0023526039999524073,
"count": 1,
"is_parallel": true,
"self": 0.0006996519998665462,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001652952000085861,
"count": 8,
"is_parallel": true,
"self": 0.001652952000085861
}
}
},
"UnityEnvironment.step": {
"total": 0.05133290699995996,
"count": 1,
"is_parallel": true,
"self": 0.0006438919998572601,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004856650000419904,
"count": 1,
"is_parallel": true,
"self": 0.0004856650000419904
},
"communicator.exchange": {
"total": 0.04848320799999328,
"count": 1,
"is_parallel": true,
"self": 0.04848320799999328
},
"steps_from_proto": {
"total": 0.0017201420000674261,
"count": 1,
"is_parallel": true,
"self": 0.00036116499984473194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013589770002226942,
"count": 8,
"is_parallel": true,
"self": 0.0013589770002226942
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1388.337853307987,
"count": 63354,
"is_parallel": true,
"self": 39.690104823929005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.719607527992252,
"count": 63354,
"is_parallel": true,
"self": 25.719607527992252
},
"communicator.exchange": {
"total": 1213.4963820550252,
"count": 63354,
"is_parallel": true,
"self": 1213.4963820550252
},
"steps_from_proto": {
"total": 109.43175890104078,
"count": 63354,
"is_parallel": true,
"self": 22.658127316101854,
"children": {
"_process_rank_one_or_two_observation": {
"total": 86.77363158493893,
"count": 506832,
"is_parallel": true,
"self": 86.77363158493893
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 715.7747327640229,
"count": 63355,
"self": 3.2379490790193586,
"children": {
"process_trajectory": {
"total": 142.69964709800377,
"count": 63355,
"self": 142.48994722900386,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20969986899990545,
"count": 2,
"self": 0.20969986899990545
}
}
},
"_update_policy": {
"total": 569.8371365869998,
"count": 443,
"self": 334.9204535690062,
"children": {
"TorchPPOOptimizer.update": {
"total": 234.9166830179936,
"count": 22803,
"self": 234.9166830179936
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.699998943484388e-07,
"count": 1,
"self": 8.699998943484388e-07
},
"TrainerController._save_models": {
"total": 0.08511885399957464,
"count": 1,
"self": 0.0014500249999400694,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08366882899963457,
"count": 1,
"self": 0.08366882899963457
}
}
}
}
}
}
}