jayeshvpatil's picture
Init Commit
b6ba1bf
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3023781478404999,
"min": 0.3023781478404999,
"max": 1.3851267099380493,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9047.154296875,
"min": 9047.154296875,
"max": 42019.203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989888.0,
"min": 29873.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989888.0,
"min": 29873.0,
"max": 989888.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.3720914125442505,
"min": -0.09277643263339996,
"max": 0.3720914125442505,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 97.11585998535156,
"min": -22.359119415283203,
"max": 97.11585998535156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.43772897124290466,
"min": -0.43772897124290466,
"max": 0.33005502820014954,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -114.24726104736328,
"min": -114.24726104736328,
"max": 79.54325866699219,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06873782694714868,
"min": 0.06451474706382396,
"max": 0.07308701650019313,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0310674042072303,
"min": 0.5116091155013519,
"max": 1.0862417079430695,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.027784618391888213,
"min": 0.0009425128522200676,
"max": 0.044604256470587904,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.4167692758783232,
"min": 0.011310154226640811,
"max": 0.6244595905882306,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.447137517653333e-06,
"min": 7.447137517653333e-06,
"max": 0.0002952363015879,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001117070627648,
"min": 0.0001117070627648,
"max": 0.003634545488484899,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248234666666668,
"min": 0.10248234666666668,
"max": 0.1984121,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5372352000000002,
"min": 1.3888847,
"max": 2.6115151,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025798643200000004,
"min": 0.00025798643200000004,
"max": 0.009841368789999999,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038697964800000003,
"min": 0.0038697964800000003,
"max": 0.12117035848999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011309059336781502,
"min": 0.011277731508016586,
"max": 0.5924831032752991,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16963589191436768,
"min": 0.16314435005187988,
"max": 4.147381782531738,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 493.0769230769231,
"min": 459.3968253968254,
"max": 996.1,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 32050.0,
"min": 16544.0,
"max": 33496.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.1991599783301354,
"min": -0.93032005255421,
"max": 1.4135936210079798,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 77.9453985914588,
"min": -28.575801625847816,
"max": 89.05639812350273,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.1991599783301354,
"min": -0.93032005255421,
"max": 1.4135936210079798,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 77.9453985914588,
"min": -28.575801625847816,
"max": 89.05639812350273,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05725687068143788,
"min": 0.05432561445466952,
"max": 11.161639002316138,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.721696594293462,
"min": 3.4639352806843817,
"max": 189.74786303937435,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679878663",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679880906"
},
"total": 2242.628757988,
"count": 1,
"self": 0.5904768440004773,
"children": {
"run_training.setup": {
"total": 0.1733352730000206,
"count": 1,
"self": 0.1733352730000206
},
"TrainerController.start_learning": {
"total": 2241.8649458709997,
"count": 1,
"self": 1.8260159289520743,
"children": {
"TrainerController._reset_env": {
"total": 9.386494696,
"count": 1,
"self": 9.386494696
},
"TrainerController.advance": {
"total": 2230.5558218750475,
"count": 63570,
"self": 1.9545012550024694,
"children": {
"env_step": {
"total": 1575.3374323760445,
"count": 63570,
"self": 1445.327589238058,
"children": {
"SubprocessEnvManager._take_step": {
"total": 128.90409589900486,
"count": 63570,
"self": 5.530283208047365,
"children": {
"TorchPolicy.evaluate": {
"total": 123.3738126909575,
"count": 62561,
"self": 123.3738126909575
}
}
},
"workers": {
"total": 1.105747238981678,
"count": 63570,
"self": 0.0,
"children": {
"worker_root": {
"total": 2236.1136092039274,
"count": 63570,
"is_parallel": true,
"self": 926.9765567779423,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005914359999962926,
"count": 1,
"is_parallel": true,
"self": 0.0039002400000072157,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0020141199999557102,
"count": 8,
"is_parallel": true,
"self": 0.0020141199999557102
}
}
},
"UnityEnvironment.step": {
"total": 0.05079742499998474,
"count": 1,
"is_parallel": true,
"self": 0.0005627999999546773,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005408300000340205,
"count": 1,
"is_parallel": true,
"self": 0.0005408300000340205
},
"communicator.exchange": {
"total": 0.0478714489999561,
"count": 1,
"is_parallel": true,
"self": 0.0478714489999561
},
"steps_from_proto": {
"total": 0.0018223460000399427,
"count": 1,
"is_parallel": true,
"self": 0.0004176430000484288,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001404702999991514,
"count": 8,
"is_parallel": true,
"self": 0.001404702999991514
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1309.137052425985,
"count": 63569,
"is_parallel": true,
"self": 34.42142797188035,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.62193662304412,
"count": 63569,
"is_parallel": true,
"self": 25.62193662304412
},
"communicator.exchange": {
"total": 1145.0924451590486,
"count": 63569,
"is_parallel": true,
"self": 1145.0924451590486
},
"steps_from_proto": {
"total": 104.00124267201193,
"count": 63569,
"is_parallel": true,
"self": 23.37512846807755,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.62611420393438,
"count": 508552,
"is_parallel": true,
"self": 80.62611420393438
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 653.2638882440007,
"count": 63570,
"self": 3.3727580900273324,
"children": {
"process_trajectory": {
"total": 131.09013973797465,
"count": 63570,
"self": 130.87539030797444,
"children": {
"RLTrainer._checkpoint": {
"total": 0.21474943000021085,
"count": 2,
"self": 0.21474943000021085
}
}
},
"_update_policy": {
"total": 518.8009904159987,
"count": 457,
"self": 329.9478973829904,
"children": {
"TorchPPOOptimizer.update": {
"total": 188.85309303300835,
"count": 22803,
"self": 188.85309303300835
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0650001058820635e-06,
"count": 1,
"self": 1.0650001058820635e-06
},
"TrainerController._save_models": {
"total": 0.09661230599976989,
"count": 1,
"self": 0.0014541739997184777,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09515813200005141,
"count": 1,
"self": 0.09515813200005141
}
}
}
}
}
}
}