ppo-Pyramids / run_logs /timers.json
nhiro3303's picture
First Push
23f75d9
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5929673910140991,
"min": 0.5615445375442505,
"max": 1.4988300800323486,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17722.609375,
"min": 16756.48828125,
"max": 45468.5078125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989893.0,
"min": 29952.0,
"max": 989893.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.38267001509666443,
"min": -0.10155199468135834,
"max": 0.45119205117225647,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 101.40755462646484,
"min": -24.372478485107422,
"max": 122.72423553466797,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00165123853366822,
"min": -0.008666027337312698,
"max": 0.25182202458381653,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.4375782012939453,
"min": -2.357159376144409,
"max": 60.163490295410156,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07173747104631426,
"min": 0.06428959868841971,
"max": 0.07406589521286695,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0043245946483996,
"min": 0.4882517525989458,
"max": 1.0274214771052357,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.012457457060615223,
"min": 0.00010062138897817122,
"max": 0.01599858179231108,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1744043988486131,
"min": 0.0013080780567162257,
"max": 0.2239801450923551,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.47569036527857e-06,
"min": 7.47569036527857e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010465966511389998,
"min": 0.00010465966511389998,
"max": 0.0035065481311506996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.1024918642857143,
"min": 0.1024918642857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348861000000002,
"min": 1.3886848,
"max": 2.5688492999999992,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025893724214285713,
"min": 0.00025893724214285713,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00362512139,
"min": 0.00362512139,
"max": 0.11690804507000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008416557684540749,
"min": 0.008416557684540749,
"max": 0.360109806060791,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11783181130886078,
"min": 0.11783181130886078,
"max": 2.520768642425537,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 425.92537313432837,
"min": 386.21794871794873,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28537.0,
"min": 15984.0,
"max": 33267.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.3650596784566766,
"min": -1.0000000521540642,
"max": 1.4855435637709422,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 91.45899845659733,
"min": -31.995201662182808,
"max": 115.87239797413349,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.3650596784566766,
"min": -1.0000000521540642,
"max": 1.4855435637709422,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 91.45899845659733,
"min": -31.995201662182808,
"max": 115.87239797413349,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.037229162332890274,
"min": 0.037229162332890274,
"max": 6.975258087739348,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.4943538763036486,
"min": 2.4943538763036486,
"max": 111.60412940382957,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676350452",
"python_version": "3.9.9 (main, Feb 14 2023, 11:37:38) \n[GCC 11.3.0]",
"command_line_arguments": "/home/gpu/venv/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=../training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids --no-graphics --force",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cu117",
"numpy_version": "1.19.0",
"end_time_seconds": "1676352088"
},
"total": 1635.848218699999,
"count": 1,
"self": 0.21994133599946508,
"children": {
"run_training.setup": {
"total": 0.015377457999420585,
"count": 1,
"self": 0.015377457999420585
},
"TrainerController.start_learning": {
"total": 1635.6128999060002,
"count": 1,
"self": 0.9468453748759202,
"children": {
"TrainerController._reset_env": {
"total": 1.3867732119997527,
"count": 1,
"self": 1.3867732119997527
},
"TrainerController.advance": {
"total": 1633.2186109721224,
"count": 63533,
"self": 0.8816795949951484,
"children": {
"env_step": {
"total": 834.9302232409846,
"count": 63533,
"self": 674.4003693069626,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.93001061904215,
"count": 63533,
"self": 3.3149341811968043,
"children": {
"TorchPolicy.evaluate": {
"total": 156.61507643784535,
"count": 62561,
"self": 87.15355175480363,
"children": {
"TorchPolicy.sample_actions": {
"total": 69.46152468304172,
"count": 62561,
"self": 69.46152468304172
}
}
}
}
},
"workers": {
"total": 0.5998433149798075,
"count": 63533,
"self": 0.0,
"children": {
"worker_root": {
"total": 1633.252459567957,
"count": 63533,
"is_parallel": true,
"self": 1021.1662403918835,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001194802000100026,
"count": 1,
"is_parallel": true,
"self": 0.0002632239993545227,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009315780007455032,
"count": 8,
"is_parallel": true,
"self": 0.0009315780007455032
}
}
},
"UnityEnvironment.step": {
"total": 0.02418118700006744,
"count": 1,
"is_parallel": true,
"self": 0.00020050600141985342,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00021195499903114978,
"count": 1,
"is_parallel": true,
"self": 0.00021195499903114978
},
"communicator.exchange": {
"total": 0.022794288999648415,
"count": 1,
"is_parallel": true,
"self": 0.022794288999648415
},
"steps_from_proto": {
"total": 0.0009744369999680202,
"count": 1,
"is_parallel": true,
"self": 0.0001843860009103082,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000790050999057712,
"count": 8,
"is_parallel": true,
"self": 0.000790050999057712
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 612.0862191760734,
"count": 63532,
"is_parallel": true,
"self": 12.482786769078302,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 8.382901202967332,
"count": 63532,
"is_parallel": true,
"self": 8.382901202967332
},
"communicator.exchange": {
"total": 534.8648343010609,
"count": 63532,
"is_parallel": true,
"self": 534.8648343010609
},
"steps_from_proto": {
"total": 56.35569690296688,
"count": 63532,
"is_parallel": true,
"self": 9.318342507762281,
"children": {
"_process_rank_one_or_two_observation": {
"total": 47.0373543952046,
"count": 508256,
"is_parallel": true,
"self": 47.0373543952046
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 797.4067081361427,
"count": 63533,
"self": 1.792593487063641,
"children": {
"process_trajectory": {
"total": 158.85940002108327,
"count": 63533,
"self": 158.7259573790834,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13344264199986355,
"count": 2,
"self": 0.13344264199986355
}
}
},
"_update_policy": {
"total": 636.7547146279958,
"count": 446,
"self": 190.72909291985525,
"children": {
"TorchPPOOptimizer.update": {
"total": 446.02562170814053,
"count": 22887,
"self": 446.02562170814053
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.100006769178435e-07,
"count": 1,
"self": 7.100006769178435e-07
},
"TrainerController._save_models": {
"total": 0.06066963700141059,
"count": 1,
"self": 0.0006440550005208934,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0600255820008897,
"count": 1,
"self": 0.0600255820008897
}
}
}
}
}
}
}