Pyramids / run_logs /timers.json
timjwhite's picture
First Push
c954516
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.27756866812705994,
"min": 0.27756866812705994,
"max": 1.4939614534378052,
"count": 50
},
"Pyramids.Policy.Entropy.sum": {
"value": 8322.619140625,
"min": 8322.619140625,
"max": 45320.81640625,
"count": 50
},
"Pyramids.Step.mean": {
"value": 1499942.0,
"min": 29952.0,
"max": 1499942.0,
"count": 50
},
"Pyramids.Step.sum": {
"value": 1499942.0,
"min": 29952.0,
"max": 1499942.0,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5789384245872498,
"min": -0.0975242480635643,
"max": 0.5831009149551392,
"count": 50
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 162.10275268554688,
"min": -23.50334358215332,
"max": 162.68515014648438,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.0047641322016716,
"min": -0.00961232464760542,
"max": 0.2847445607185364,
"count": 50
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.3339570760726929,
"min": -2.6433892250061035,
"max": 68.33869171142578,
"count": 50
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06626441871061112,
"min": 0.06472257043822624,
"max": 0.07405200430449053,
"count": 50
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9277018619485556,
"min": 0.481576595329626,
"max": 1.0788347453926692,
"count": 50
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014320910561950296,
"min": 0.0001412396246198773,
"max": 0.015177880950310887,
"count": 50
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20049274786730414,
"min": 0.001836115120058405,
"max": 0.2276682142546633,
"count": 50
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.9522133016761895e-06,
"min": 2.9522133016761895e-06,
"max": 0.00029676708679192377,
"count": 50
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.1330986223466654e-05,
"min": 4.1330986223466654e-05,
"max": 0.003821436126188,
"count": 50
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10098403809523811,
"min": 0.10098403809523811,
"max": 0.19892236190476195,
"count": 50
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4137765333333334,
"min": 1.3924565333333336,
"max": 2.673812,
"count": 50
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00010830540571428568,
"min": 0.00010830540571428568,
"max": 0.009892343954285714,
"count": 50
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0015162756799999995,
"min": 0.0015162756799999995,
"max": 0.1273938188,
"count": 50
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009133702144026756,
"min": 0.008654032833874226,
"max": 0.4274999797344208,
"count": 50
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1278718262910843,
"min": 0.12115645408630371,
"max": 2.992499828338623,
"count": 50
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 341.95348837209303,
"min": 325.032967032967,
"max": 999.0,
"count": 50
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29408.0,
"min": 15984.0,
"max": 32524.0,
"count": 50
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5882650993244594,
"min": -1.0000000521540642,
"max": 1.6081120736978867,
"count": 50
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 136.5907985419035,
"min": -30.99220161139965,
"max": 146.33819870650768,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5882650993244594,
"min": -1.0000000521540642,
"max": 1.6081120736978867,
"count": 50
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 136.5907985419035,
"min": -30.99220161139965,
"max": 146.33819870650768,
"count": 50
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03212499187142131,
"min": 0.030447889234842526,
"max": 8.614150084555149,
"count": 50
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.7627493009422324,
"min": 2.547172557468002,
"max": 137.82640135288239,
"count": 50
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687142688",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687146127"
},
"total": 3438.3760360919996,
"count": 1,
"self": 0.5281753519993799,
"children": {
"run_training.setup": {
"total": 0.03801211300014984,
"count": 1,
"self": 0.03801211300014984
},
"TrainerController.start_learning": {
"total": 3437.809848627,
"count": 1,
"self": 2.3642726932612277,
"children": {
"TrainerController._reset_env": {
"total": 3.9950029499996162,
"count": 1,
"self": 3.9950029499996162
},
"TrainerController.advance": {
"total": 3431.3458559627406,
"count": 95861,
"self": 2.254608118867509,
"children": {
"env_step": {
"total": 2461.6358360100976,
"count": 95861,
"self": 2283.958441310114,
"children": {
"SubprocessEnvManager._take_step": {
"total": 176.373868001946,
"count": 95861,
"self": 7.673953184113998,
"children": {
"TorchPolicy.evaluate": {
"total": 168.699914817832,
"count": 93805,
"self": 168.699914817832
}
}
},
"workers": {
"total": 1.3035266980377855,
"count": 95861,
"self": 0.0,
"children": {
"worker_root": {
"total": 3429.7770765410396,
"count": 95861,
"is_parallel": true,
"self": 1328.769679007819,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018518609995226143,
"count": 1,
"is_parallel": true,
"self": 0.0006043900002623559,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012474709992602584,
"count": 8,
"is_parallel": true,
"self": 0.0012474709992602584
}
}
},
"UnityEnvironment.step": {
"total": 0.05019675099993037,
"count": 1,
"is_parallel": true,
"self": 0.0005551630001718877,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005188309996810858,
"count": 1,
"is_parallel": true,
"self": 0.0005188309996810858
},
"communicator.exchange": {
"total": 0.04711564199988061,
"count": 1,
"is_parallel": true,
"self": 0.04711564199988061
},
"steps_from_proto": {
"total": 0.002007115000196791,
"count": 1,
"is_parallel": true,
"self": 0.0004147039999224944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015924110002742964,
"count": 8,
"is_parallel": true,
"self": 0.0015924110002742964
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2101.0073975332207,
"count": 95860,
"is_parallel": true,
"self": 50.66098508576124,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 36.23023858304714,
"count": 95860,
"is_parallel": true,
"self": 36.23023858304714
},
"communicator.exchange": {
"total": 1851.3092562163101,
"count": 95860,
"is_parallel": true,
"self": 1851.3092562163101
},
"steps_from_proto": {
"total": 162.80691764810217,
"count": 95860,
"is_parallel": true,
"self": 32.76465410011588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 130.0422635479863,
"count": 766880,
"is_parallel": true,
"self": 130.0422635479863
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 967.4554118337755,
"count": 95861,
"self": 4.311192843687422,
"children": {
"process_trajectory": {
"total": 176.3111501010826,
"count": 95861,
"self": 175.99676295108384,
"children": {
"RLTrainer._checkpoint": {
"total": 0.3143871499987654,
"count": 3,
"self": 0.3143871499987654
}
}
},
"_update_policy": {
"total": 786.8330688890055,
"count": 688,
"self": 504.9381683639567,
"children": {
"TorchPPOOptimizer.update": {
"total": 281.8949005250488,
"count": 34221,
"self": 281.8949005250488
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4869992810417898e-06,
"count": 1,
"self": 1.4869992810417898e-06
},
"TrainerController._save_models": {
"total": 0.1047155339992969,
"count": 1,
"self": 0.001472164999540837,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10324336899975606,
"count": 1,
"self": 0.10324336899975606
}
}
}
}
}
}
}