ppo-PyramidsRND / run_logs /timers.json
Schwarzschild009's picture
my pyramids agent with curiosity
e98e81a
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.352596640586853,
"min": 0.352596640586853,
"max": 1.4438633918762207,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10532.7666015625,
"min": 10532.7666015625,
"max": 43801.0390625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989883.0,
"min": 29952.0,
"max": 989883.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5242827534675598,
"min": -0.15232543647289276,
"max": 0.5568318963050842,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 145.226318359375,
"min": -36.10112762451172,
"max": 154.7992706298828,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.0017738514579832554,
"min": -0.011245288886129856,
"max": 0.3378306031227112,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.49135684967041016,
"min": -2.9575109481811523,
"max": 81.079345703125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06509389588332158,
"min": 0.06509389588332158,
"max": 0.07197656412193153,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9113145423665022,
"min": 0.4926972930435354,
"max": 1.0577035829243386,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017509505700824473,
"min": 0.0005202187562566717,
"max": 0.017509505700824473,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.24513307981154261,
"min": 0.004161750050053373,
"max": 0.261881070214383,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.679090297478569e-06,
"min": 7.679090297478569e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010750726416469996,
"min": 0.00010750726416469996,
"max": 0.0033829412723530004,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1025596642857143,
"min": 0.1025596642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358353000000001,
"min": 1.3691136000000002,
"max": 2.5276470000000004,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026571046214285713,
"min": 0.00026571046214285713,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00371994647,
"min": 0.00371994647,
"max": 0.1127919353,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012241525575518608,
"min": 0.011378294788300991,
"max": 0.44877880811691284,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17138135433197021,
"min": 0.15929612517356873,
"max": 3.141451597213745,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 335.35869565217394,
"min": 335.35869565217394,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30853.0,
"min": 15984.0,
"max": 32565.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5776608474552631,
"min": -1.0000000521540642,
"max": 1.6140636154873804,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 145.1447979658842,
"min": -32.000001668930054,
"max": 145.1447979658842,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5776608474552631,
"min": -1.0000000521540642,
"max": 1.6140636154873804,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 145.1447979658842,
"min": -32.000001668930054,
"max": 145.1447979658842,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04284386810151931,
"min": 0.04284386810151931,
"max": 8.549296743236482,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.9416358653397765,
"min": 3.606773059553234,
"max": 136.78874789178371,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678922190",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678924315"
},
"total": 2125.309630403,
"count": 1,
"self": 0.49277456599975267,
"children": {
"run_training.setup": {
"total": 0.3357330300000285,
"count": 1,
"self": 0.3357330300000285
},
"TrainerController.start_learning": {
"total": 2124.4811228070002,
"count": 1,
"self": 1.2732231900035913,
"children": {
"TrainerController._reset_env": {
"total": 8.843219354999974,
"count": 1,
"self": 8.843219354999974
},
"TrainerController.advance": {
"total": 2114.270565774996,
"count": 63677,
"self": 1.4622285509631183,
"children": {
"env_step": {
"total": 1496.5163646940314,
"count": 63677,
"self": 1388.630765608039,
"children": {
"SubprocessEnvManager._take_step": {
"total": 107.09315168199248,
"count": 63677,
"self": 4.674478920987667,
"children": {
"TorchPolicy.evaluate": {
"total": 102.41867276100481,
"count": 62554,
"self": 102.41867276100481
}
}
},
"workers": {
"total": 0.7924474039998586,
"count": 63677,
"self": 0.0,
"children": {
"worker_root": {
"total": 2119.658654461017,
"count": 63677,
"is_parallel": true,
"self": 845.986704066017,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005844532000082836,
"count": 1,
"is_parallel": true,
"self": 0.003904330000068512,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019402020000143239,
"count": 8,
"is_parallel": true,
"self": 0.0019402020000143239
}
}
},
"UnityEnvironment.step": {
"total": 0.0513837070000136,
"count": 1,
"is_parallel": true,
"self": 0.0005448789999036308,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004396599999836326,
"count": 1,
"is_parallel": true,
"self": 0.0004396599999836326
},
"communicator.exchange": {
"total": 0.04881509200004075,
"count": 1,
"is_parallel": true,
"self": 0.04881509200004075
},
"steps_from_proto": {
"total": 0.001584076000085588,
"count": 1,
"is_parallel": true,
"self": 0.0003718990000152189,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001212177000070369,
"count": 8,
"is_parallel": true,
"self": 0.001212177000070369
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1273.6719503949998,
"count": 63676,
"is_parallel": true,
"self": 31.06672961307595,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.704181733008454,
"count": 63676,
"is_parallel": true,
"self": 22.704181733008454
},
"communicator.exchange": {
"total": 1127.127074955953,
"count": 63676,
"is_parallel": true,
"self": 1127.127074955953
},
"steps_from_proto": {
"total": 92.77396409296227,
"count": 63676,
"is_parallel": true,
"self": 19.833332882013565,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.94063121094871,
"count": 509408,
"is_parallel": true,
"self": 72.94063121094871
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 616.2919725300015,
"count": 63677,
"self": 2.5091048979732022,
"children": {
"process_trajectory": {
"total": 117.79389257802438,
"count": 63677,
"self": 117.57311557502442,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2207770029999665,
"count": 2,
"self": 0.2207770029999665
}
}
},
"_update_policy": {
"total": 495.98897505400396,
"count": 443,
"self": 312.9837423480067,
"children": {
"TorchPPOOptimizer.update": {
"total": 183.0052327059973,
"count": 22836,
"self": 183.0052327059973
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.030000021273736e-06,
"count": 1,
"self": 1.030000021273736e-06
},
"TrainerController._save_models": {
"total": 0.09411345700027596,
"count": 1,
"self": 0.0016304280002259475,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09248302900005001,
"count": 1,
"self": 0.09248302900005001
}
}
}
}
}
}
}