Jbot's picture
Second time
7a65511
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.31421124935150146,
"min": 0.2840237617492676,
"max": 1.3432326316833496,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9431.365234375,
"min": 8525.2568359375,
"max": 40748.3046875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989908.0,
"min": 29952.0,
"max": 989908.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.28630736470222473,
"min": -0.10326258093118668,
"max": 0.3394848704338074,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 73.86730194091797,
"min": -24.886281967163086,
"max": 86.90812683105469,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.002211504615843296,
"min": -0.003026550868526101,
"max": 0.19428789615631104,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.5705682039260864,
"min": -0.753611147403717,
"max": 46.04623031616211,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06974054059003071,
"min": 0.06547258155443982,
"max": 0.07400301405891577,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0461081088504607,
"min": 0.4902440781697387,
"max": 1.0932508326813524,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010319827537303808,
"min": 0.0010141509631897306,
"max": 0.011688448390607186,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1547974130595571,
"min": 0.013114335913794281,
"max": 0.16533768097410667,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.47305750901333e-06,
"min": 7.47305750901333e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011209586263519996,
"min": 0.00011209586263519996,
"max": 0.003633732788755799,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249098666666664,
"min": 0.10249098666666664,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373647999999998,
"min": 1.371456,
"max": 2.6112442000000007,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025884956799999997,
"min": 0.00025884956799999997,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038827435199999994,
"min": 0.0038827435199999994,
"max": 0.12114329557999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010354802943766117,
"min": 0.010210125707089901,
"max": 0.45836424827575684,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15532204508781433,
"min": 0.132731631398201,
"max": 3.208549737930298,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 525.5192307692307,
"min": 495.7049180327869,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 27327.0,
"min": 15984.0,
"max": 33967.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0897307371577392,
"min": -1.0000000521540642,
"max": 1.1325817946683276,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 56.665998332202435,
"min": -29.099601708352566,
"max": 67.75399824976921,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0897307371577392,
"min": -1.0000000521540642,
"max": 1.1325817946683276,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 56.665998332202435,
"min": -29.099601708352566,
"max": 67.75399824976921,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.056008590235321135,
"min": 0.056008590235321135,
"max": 8.996327891945839,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.912446692236699,
"min": 2.912446692236699,
"max": 143.94124627113342,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674421870",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training2 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674423853"
},
"total": 1983.170290858,
"count": 1,
"self": 0.4334071639996182,
"children": {
"run_training.setup": {
"total": 0.10060317799980112,
"count": 1,
"self": 0.10060317799980112
},
"TrainerController.start_learning": {
"total": 1982.6362805160006,
"count": 1,
"self": 1.353520445984941,
"children": {
"TrainerController._reset_env": {
"total": 5.930512423999971,
"count": 1,
"self": 5.930512423999971
},
"TrainerController.advance": {
"total": 1975.2708701840156,
"count": 63585,
"self": 1.3766213879198403,
"children": {
"env_step": {
"total": 1327.3217640259427,
"count": 63585,
"self": 1217.1686995927785,
"children": {
"SubprocessEnvManager._take_step": {
"total": 109.33425272204022,
"count": 63585,
"self": 4.618678569950134,
"children": {
"TorchPolicy.evaluate": {
"total": 104.71557415209008,
"count": 62570,
"self": 35.004307358019105,
"children": {
"TorchPolicy.sample_actions": {
"total": 69.71126679407098,
"count": 62570,
"self": 69.71126679407098
}
}
}
}
},
"workers": {
"total": 0.818811711123999,
"count": 63585,
"self": 0.0,
"children": {
"worker_root": {
"total": 1977.8371454249473,
"count": 63585,
"is_parallel": true,
"self": 863.7658797191134,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002054730999589083,
"count": 1,
"is_parallel": true,
"self": 0.0007944589970065863,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012602720025824965,
"count": 8,
"is_parallel": true,
"self": 0.0012602720025824965
}
}
},
"UnityEnvironment.step": {
"total": 0.048239406999528,
"count": 1,
"is_parallel": true,
"self": 0.0004901860002064495,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045722699996986194,
"count": 1,
"is_parallel": true,
"self": 0.00045722699996986194
},
"communicator.exchange": {
"total": 0.045628173999830324,
"count": 1,
"is_parallel": true,
"self": 0.045628173999830324
},
"steps_from_proto": {
"total": 0.0016638199995213654,
"count": 1,
"is_parallel": true,
"self": 0.0004661449993363931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011976750001849723,
"count": 8,
"is_parallel": true,
"self": 0.0011976750001849723
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1114.0712657058339,
"count": 63584,
"is_parallel": true,
"self": 28.284180096286946,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.93000472285894,
"count": 63584,
"is_parallel": true,
"self": 23.93000472285894
},
"communicator.exchange": {
"total": 956.1459612529579,
"count": 63584,
"is_parallel": true,
"self": 956.1459612529579
},
"steps_from_proto": {
"total": 105.71111963373005,
"count": 63584,
"is_parallel": true,
"self": 23.549288799286842,
"children": {
"_process_rank_one_or_two_observation": {
"total": 82.16183083444321,
"count": 508672,
"is_parallel": true,
"self": 82.16183083444321
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 646.5724847701531,
"count": 63585,
"self": 2.5010763138561742,
"children": {
"process_trajectory": {
"total": 150.38760613227169,
"count": 63585,
"self": 150.19726568227088,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19034045000080368,
"count": 2,
"self": 0.19034045000080368
}
}
},
"_update_policy": {
"total": 493.68380232402524,
"count": 453,
"self": 183.87012862614483,
"children": {
"TorchPPOOptimizer.update": {
"total": 309.8136736978804,
"count": 22758,
"self": 309.8136736978804
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.640007192501798e-07,
"count": 1,
"self": 9.640007192501798e-07
},
"TrainerController._save_models": {
"total": 0.08137649799937208,
"count": 1,
"self": 0.001334700998995686,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08004179700037639,
"count": 1,
"self": 0.08004179700037639
}
}
}
}
}
}
}