ppo-PyramidsRND / run_logs /timers.json
bastienm's picture
First training of PyramidsRND
d613540
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6620796918869019,
"min": 0.6620796918869019,
"max": 1.4667741060256958,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 19978.916015625,
"min": 19960.2265625,
"max": 44496.05859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989987.0,
"min": 29952.0,
"max": 989987.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.20519033074378967,
"min": -0.17344701290130615,
"max": 0.20654046535491943,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 52.528724670410156,
"min": -41.10694122314453,
"max": 52.874359130859375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 1.3986117839813232,
"min": -0.06036427989602089,
"max": 1.3986117839813232,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 358.04461669921875,
"min": -15.030705451965332,
"max": 358.04461669921875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0692680958971588,
"min": 0.06470004747573399,
"max": 0.07248837278178268,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9697533425602232,
"min": 0.49441283309398,
"max": 1.0483101284513465,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.3603130001878022,
"min": 8.47830240978031e-05,
"max": 0.3603130001878022,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 5.044382002629231,
"min": 0.0011869623373692434,
"max": 5.044382002629231,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.609126035085715e-06,
"min": 7.609126035085715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001065277644912,
"min": 0.0001065277644912,
"max": 0.0031178401607199996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253634285714286,
"min": 0.10253634285714286,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355088,
"min": 1.3691136000000002,
"max": 2.3588663,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026338065142857153,
"min": 0.00026338065142857153,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036873291200000013,
"min": 0.0036873291200000013,
"max": 0.103944072,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013422289863228798,
"min": 0.013422289863228798,
"max": 0.43161314725875854,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.18791206181049347,
"min": 0.18791206181049347,
"max": 3.021291971206665,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 636.6086956521739,
"min": 611.156862745098,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29284.0,
"min": 15984.0,
"max": 32901.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0154477882644404,
"min": -1.0000000521540642,
"max": 1.0154477882644404,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 46.71059826016426,
"min": -32.000001668930054,
"max": 48.823198437690735,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0154477882644404,
"min": -1.0000000521540642,
"max": 1.0154477882644404,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 46.71059826016426,
"min": -32.000001668930054,
"max": 48.823198437690735,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.08974283203133382,
"min": 0.08752535250467047,
"max": 8.214335225522518,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.128170273441356,
"min": 4.128170273441356,
"max": 131.4293636083603,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1684171069",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1684174108"
},
"total": 3039.81514055,
"count": 1,
"self": 0.577137847999893,
"children": {
"run_training.setup": {
"total": 0.08268272599980264,
"count": 1,
"self": 0.08268272599980264
},
"TrainerController.start_learning": {
"total": 3039.155319976,
"count": 1,
"self": 2.2148949709726367,
"children": {
"TrainerController._reset_env": {
"total": 1.527361185000018,
"count": 1,
"self": 1.527361185000018
},
"TrainerController.advance": {
"total": 3035.3060140780276,
"count": 63286,
"self": 2.2841829730673453,
"children": {
"env_step": {
"total": 1866.624345296985,
"count": 63286,
"self": 1726.2285102259793,
"children": {
"SubprocessEnvManager._take_step": {
"total": 139.0217970800138,
"count": 63286,
"self": 6.890456470957815,
"children": {
"TorchPolicy.evaluate": {
"total": 132.131340609056,
"count": 62563,
"self": 132.131340609056
}
}
},
"workers": {
"total": 1.3740379909918374,
"count": 63286,
"self": 0.0,
"children": {
"worker_root": {
"total": 3032.9997046860194,
"count": 63286,
"is_parallel": true,
"self": 1470.041201775975,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0036436600000797625,
"count": 1,
"is_parallel": true,
"self": 0.0010930240000561753,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025506360000235873,
"count": 8,
"is_parallel": true,
"self": 0.0025506360000235873
}
}
},
"UnityEnvironment.step": {
"total": 0.11045893999994405,
"count": 1,
"is_parallel": true,
"self": 0.0008369439999569295,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0006263870000111638,
"count": 1,
"is_parallel": true,
"self": 0.0006263870000111638
},
"communicator.exchange": {
"total": 0.10653362799985189,
"count": 1,
"is_parallel": true,
"self": 0.10653362799985189
},
"steps_from_proto": {
"total": 0.0024619810001240694,
"count": 1,
"is_parallel": true,
"self": 0.0005753020000156539,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018866790001084155,
"count": 8,
"is_parallel": true,
"self": 0.0018866790001084155
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1562.9585029100444,
"count": 63285,
"is_parallel": true,
"self": 44.509151482126526,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.205985459940848,
"count": 63285,
"is_parallel": true,
"self": 24.205985459940848
},
"communicator.exchange": {
"total": 1362.4870685549581,
"count": 63285,
"is_parallel": true,
"self": 1362.4870685549581
},
"steps_from_proto": {
"total": 131.75629741301896,
"count": 63285,
"is_parallel": true,
"self": 27.93870645329048,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.81759095972848,
"count": 506280,
"is_parallel": true,
"self": 103.81759095972848
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1166.3974858079755,
"count": 63286,
"self": 4.097018040994044,
"children": {
"process_trajectory": {
"total": 144.0392915559869,
"count": 63286,
"self": 143.7545519799869,
"children": {
"RLTrainer._checkpoint": {
"total": 0.28473957599999267,
"count": 2,
"self": 0.28473957599999267
}
}
},
"_update_policy": {
"total": 1018.2611762109946,
"count": 434,
"self": 419.7806967830088,
"children": {
"TorchPPOOptimizer.update": {
"total": 598.4804794279858,
"count": 22914,
"self": 598.4804794279858
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2210002751089633e-06,
"count": 1,
"self": 1.2210002751089633e-06
},
"TrainerController._save_models": {
"total": 0.10704852099934214,
"count": 1,
"self": 0.0015458779998880345,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10550264299945411,
"count": 1,
"self": 0.10550264299945411
}
}
}
}
}
}
}