ppo-Pyramids / run_logs /timers.json
skywalker7's picture
First Push
b0c702b
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.34832340478897095,
"min": 0.34832340478897095,
"max": 1.4622364044189453,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10405.1171875,
"min": 10405.1171875,
"max": 44358.40234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989979.0,
"min": 29952.0,
"max": 989979.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6554292440414429,
"min": -0.11360462754964828,
"max": 0.6554292440414429,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 191.38534545898438,
"min": -27.37871551513672,
"max": 191.38534545898438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.016062632203102112,
"min": -0.004301663022488356,
"max": 0.5802025198936462,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.690288543701172,
"min": -1.0668123960494995,
"max": 137.50799560546875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06672628333866928,
"min": 0.06493013817124026,
"max": 0.07394129455067562,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9341679667413699,
"min": 0.5006143359645355,
"max": 1.0700096035434399,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016255145767077227,
"min": 0.0007360366355607995,
"max": 0.01666574471681896,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22757204073908116,
"min": 0.009568476262290394,
"max": 0.24998617075228444,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.220240450428572e-06,
"min": 7.220240450428572e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010108336630600001,
"min": 0.00010108336630600001,
"max": 0.0034917745360751994,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10240671428571431,
"min": 0.10240671428571431,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4336940000000002,
"min": 1.3886848,
"max": 2.4851279,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025043075714285714,
"min": 0.00025043075714285714,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035060306,
"min": 0.0035060306,
"max": 0.11640608752,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01262574177235365,
"min": 0.01262574177235365,
"max": 0.5261805057525635,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.17676039040088654,
"min": 0.17676039040088654,
"max": 3.6832635402679443,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 290.578431372549,
"min": 290.578431372549,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29639.0,
"min": 15984.0,
"max": 32974.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6920823342367715,
"min": -1.0000000521540642,
"max": 1.6920823342367715,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 172.5923980921507,
"min": -29.783601693809032,
"max": 172.5923980921507,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6920823342367715,
"min": -1.0000000521540642,
"max": 1.6920823342367715,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 172.5923980921507,
"min": -29.783601693809032,
"max": 172.5923980921507,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03795710252654994,
"min": 0.03795710252654994,
"max": 12.040004938840866,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8716244577080943,
"min": 3.8716244577080943,
"max": 192.64007902145386,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689293779",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689296174"
},
"total": 2394.7370083289998,
"count": 1,
"self": 0.48947118399928513,
"children": {
"run_training.setup": {
"total": 0.04142073899993193,
"count": 1,
"self": 0.04142073899993193
},
"TrainerController.start_learning": {
"total": 2394.2061164060005,
"count": 1,
"self": 1.6857448490013667,
"children": {
"TrainerController._reset_env": {
"total": 5.473299649000182,
"count": 1,
"self": 5.473299649000182
},
"TrainerController.advance": {
"total": 2386.945634285999,
"count": 64012,
"self": 1.6537900949761024,
"children": {
"env_step": {
"total": 1706.0167622909635,
"count": 64012,
"self": 1580.1740359468063,
"children": {
"SubprocessEnvManager._take_step": {
"total": 124.87054938201709,
"count": 64012,
"self": 5.269762554961289,
"children": {
"TorchPolicy.evaluate": {
"total": 119.6007868270558,
"count": 62555,
"self": 119.6007868270558
}
}
},
"workers": {
"total": 0.9721769621401108,
"count": 64012,
"self": 0.0,
"children": {
"worker_root": {
"total": 2388.2770991890216,
"count": 64012,
"is_parallel": true,
"self": 938.1089773259591,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002809275999879901,
"count": 1,
"is_parallel": true,
"self": 0.000700279000284354,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002108996999595547,
"count": 8,
"is_parallel": true,
"self": 0.002108996999595547
}
}
},
"UnityEnvironment.step": {
"total": 0.05233556499979386,
"count": 1,
"is_parallel": true,
"self": 0.000573656999222294,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000528467000094679,
"count": 1,
"is_parallel": true,
"self": 0.000528467000094679
},
"communicator.exchange": {
"total": 0.049266879000242625,
"count": 1,
"is_parallel": true,
"self": 0.049266879000242625
},
"steps_from_proto": {
"total": 0.001966562000234262,
"count": 1,
"is_parallel": true,
"self": 0.0003941660011150816,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015723959991191805,
"count": 8,
"is_parallel": true,
"self": 0.0015723959991191805
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1450.1681218630624,
"count": 64011,
"is_parallel": true,
"self": 36.008163250022335,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.14850788496551,
"count": 64011,
"is_parallel": true,
"self": 25.14850788496551
},
"communicator.exchange": {
"total": 1272.351497321029,
"count": 64011,
"is_parallel": true,
"self": 1272.351497321029
},
"steps_from_proto": {
"total": 116.65995340704558,
"count": 64011,
"is_parallel": true,
"self": 23.16387124790117,
"children": {
"_process_rank_one_or_two_observation": {
"total": 93.49608215914441,
"count": 512088,
"is_parallel": true,
"self": 93.49608215914441
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 679.2750819000594,
"count": 64012,
"self": 3.0479437191411307,
"children": {
"process_trajectory": {
"total": 122.30620107891082,
"count": 64012,
"self": 122.0765762099104,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22962486900041768,
"count": 2,
"self": 0.22962486900041768
}
}
},
"_update_policy": {
"total": 553.9209371020074,
"count": 449,
"self": 356.27039495005147,
"children": {
"TorchPPOOptimizer.update": {
"total": 197.65054215195596,
"count": 22782,
"self": 197.65054215195596
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0980002116411924e-06,
"count": 1,
"self": 1.0980002116411924e-06
},
"TrainerController._save_models": {
"total": 0.10143652399983694,
"count": 1,
"self": 0.0018505890002415981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09958593499959534,
"count": 1,
"self": 0.09958593499959534
}
}
}
}
}
}
}