KenanKhan's picture
First commit
bc02ec4 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 1.0307931900024414,
"min": 0.9445683360099792,
"max": 1.4617441892623901,
"count": 3
},
"Pyramids.Policy.Entropy.sum": {
"value": 30874.318359375,
"min": 28291.7109375,
"max": 44343.47265625,
"count": 3
},
"Pyramids.Step.mean": {
"value": 89888.0,
"min": 29952.0,
"max": 89888.0,
"count": 3
},
"Pyramids.Step.sum": {
"value": 89888.0,
"min": 29952.0,
"max": 89888.0,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09728601574897766,
"min": -0.23202307522296906,
"max": -0.09728601574897766,
"count": 3
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -23.348644256591797,
"min": -54.98946762084961,
"max": -23.348644256591797,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.1619967669248581,
"min": 0.1619967669248581,
"max": 0.29302218556404114,
"count": 3
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 38.87922286987305,
"min": 38.87922286987305,
"max": 70.13727569580078,
"count": 3
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06808813516296819,
"min": 0.06808813516296819,
"max": 0.0733561234391459,
"count": 3
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.6808813516296819,
"min": 0.5073471987587714,
"max": 0.6808813516296819,
"count": 3
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0008742923530273486,
"min": 0.0008742923530273486,
"max": 0.008278111200221852,
"count": 3
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.008742923530273486,
"min": 0.008742923530273486,
"max": 0.05794677840155297,
"count": 3
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.80432739856e-05,
"min": 7.80432739856e-05,
"max": 0.0002515063018788571,
"count": 3
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.000780432739856,
"min": 0.000780432739856,
"max": 0.0017605441131519997,
"count": 3
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.12601440000000003,
"min": 0.12601440000000003,
"max": 0.1838354285714286,
"count": 3
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.2601440000000002,
"min": 1.234912,
"max": 1.2868480000000002,
"count": 3
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0026088385599999995,
"min": 0.0026088385599999995,
"max": 0.008385159314285713,
"count": 3
},
"Pyramids.Policy.Beta.sum": {
"value": 0.026088385599999996,
"min": 0.026088385599999996,
"max": 0.058696115199999996,
"count": 3
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.10714844614267349,
"min": 0.10714844614267349,
"max": 0.45613914728164673,
"count": 3
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 1.0714844465255737,
"min": 1.0714844465255737,
"max": 3.192974090576172,
"count": 3
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 997.5,
"min": 978.8787878787879,
"max": 999.0,
"count": 3
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31920.0,
"min": 15984.0,
"max": 32303.0,
"count": 3
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9359438039828092,
"min": -1.0000000521540642,
"max": -0.9192424714565277,
"count": 3
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -29.950201727449894,
"min": -30.335001558065414,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9359438039828092,
"min": -1.0000000521540642,
"max": -0.9192424714565277,
"count": 3
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -29.950201727449894,
"min": -30.335001558065414,
"max": -16.000000834465027,
"count": 3
},
"Pyramids.Policy.RndReward.mean": {
"value": 1.252864717738703,
"min": 1.252864717738703,
"max": 9.333736169151962,
"count": 3
},
"Pyramids.Policy.RndReward.sum": {
"value": 40.09167096763849,
"min": 40.09167096763849,
"max": 149.3397787064314,
"count": 3
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1715938812",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1715939086"
},
"total": 274.692146288,
"count": 1,
"self": 0.529008402000045,
"children": {
"run_training.setup": {
"total": 0.09010310100006791,
"count": 1,
"self": 0.09010310100006791
},
"TrainerController.start_learning": {
"total": 274.0730347849999,
"count": 1,
"self": 0.20235360701713034,
"children": {
"TrainerController._reset_env": {
"total": 2.4943681779998315,
"count": 1,
"self": 2.4943681779998315
},
"TrainerController.advance": {
"total": 271.2882538579829,
"count": 6259,
"self": 0.21224610498484253,
"children": {
"env_step": {
"total": 166.11827408500085,
"count": 6259,
"self": 151.3339588529982,
"children": {
"SubprocessEnvManager._take_step": {
"total": 14.662845279993007,
"count": 6259,
"self": 0.6101162039808514,
"children": {
"TorchPolicy.evaluate": {
"total": 14.052729076012156,
"count": 6256,
"self": 14.052729076012156
}
}
},
"workers": {
"total": 0.12146995200964739,
"count": 6259,
"self": 0.0,
"children": {
"worker_root": {
"total": 273.394026755999,
"count": 6259,
"is_parallel": true,
"self": 137.35727957400013,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002981971000053818,
"count": 1,
"is_parallel": true,
"self": 0.0010457759999553673,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019361950000984507,
"count": 8,
"is_parallel": true,
"self": 0.0019361950000984507
}
}
},
"UnityEnvironment.step": {
"total": 0.05428578600003675,
"count": 1,
"is_parallel": true,
"self": 0.000679080000054455,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045194300014372857,
"count": 1,
"is_parallel": true,
"self": 0.00045194300014372857
},
"communicator.exchange": {
"total": 0.0513332470000023,
"count": 1,
"is_parallel": true,
"self": 0.0513332470000023
},
"steps_from_proto": {
"total": 0.0018215159998362651,
"count": 1,
"is_parallel": true,
"self": 0.00038358700066964957,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014379289991666155,
"count": 8,
"is_parallel": true,
"self": 0.0014379289991666155
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 136.03674718199886,
"count": 6258,
"is_parallel": true,
"self": 4.4181820919980055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.277102920002335,
"count": 6258,
"is_parallel": true,
"self": 2.277102920002335
},
"communicator.exchange": {
"total": 117.40567068799737,
"count": 6258,
"is_parallel": true,
"self": 117.40567068799737
},
"steps_from_proto": {
"total": 11.93579148200115,
"count": 6258,
"is_parallel": true,
"self": 2.5136160779773036,
"children": {
"_process_rank_one_or_two_observation": {
"total": 9.422175404023847,
"count": 50064,
"is_parallel": true,
"self": 9.422175404023847
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 104.95773366799722,
"count": 6259,
"self": 0.26564680700585086,
"children": {
"process_trajectory": {
"total": 15.797466327991287,
"count": 6259,
"self": 15.797466327991287
},
"_update_policy": {
"total": 88.89462053300008,
"count": 29,
"self": 35.65047338499721,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.24414714800287,
"count": 2289,
"self": 53.24414714800287
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.314999963142327e-06,
"count": 1,
"self": 1.314999963142327e-06
},
"TrainerController._save_models": {
"total": 0.08805782700005693,
"count": 1,
"self": 0.0016399819999151077,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08641784500014182,
"count": 1,
"self": 0.08641784500014182
}
}
}
}
}
}
}