vkamenski's picture
First Push
033f1e0 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.6116529703140259,
"min": 0.6061592102050781,
"max": 1.3944107294082642,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 18339.802734375,
"min": 18320.556640625,
"max": 42300.84375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989975.0,
"min": 29952.0,
"max": 989975.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989975.0,
"min": 29952.0,
"max": 989975.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.33403143286705017,
"min": -0.11313650012016296,
"max": 0.40177735686302185,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 86.18010711669922,
"min": -27.152759552001953,
"max": 107.27455139160156,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.013097880408167839,
"min": -0.04308406636118889,
"max": 0.3253285586833954,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -3.3792531490325928,
"min": -11.503445625305176,
"max": 78.078857421875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06867977549873536,
"min": 0.06520782027610615,
"max": 0.07517814151864355,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.961516856982295,
"min": 0.4930097880163069,
"max": 1.0359264655722369,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011767660245823208,
"min": 6.925681392715787e-05,
"max": 0.01328377440998641,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.16474724344152492,
"min": 0.0009003385810530523,
"max": 0.19925661614979615,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2890047132214335e-06,
"min": 7.2890047132214335e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010204606598510007,
"min": 0.00010204606598510007,
"max": 0.0033317517894161,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242963571428569,
"min": 0.10242963571428569,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4340148999999998,
"min": 1.3691136000000002,
"max": 2.4430834999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025272060785714304,
"min": 0.00025272060785714304,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035380885100000023,
"min": 0.0035380885100000023,
"max": 0.11106733161000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009169643744826317,
"min": 0.009169643744826317,
"max": 0.4085240960121155,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12837500870227814,
"min": 0.12837500870227814,
"max": 2.859668731689453,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 539.3962264150944,
"min": 453.34285714285716,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28588.0,
"min": 15984.0,
"max": 32800.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.0816258972993604,
"min": -1.0000000521540642,
"max": 1.3184847183904405,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 58.40779845416546,
"min": -32.000001668930054,
"max": 89.03579786419868,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.0816258972993604,
"min": -1.0000000521540642,
"max": 1.3184847183904405,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 58.40779845416546,
"min": -32.000001668930054,
"max": 89.03579786419868,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05281426646763942,
"min": 0.04624830833802532,
"max": 8.107680436223745,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8519703892525285,
"min": 2.8519703892525285,
"max": 129.72288697957993,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1704915982",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1704918133"
},
"total": 2150.4950036,
"count": 1,
"self": 0.47779296200042154,
"children": {
"run_training.setup": {
"total": 0.05154780199995912,
"count": 1,
"self": 0.05154780199995912
},
"TrainerController.start_learning": {
"total": 2149.965662836,
"count": 1,
"self": 1.3095709379576874,
"children": {
"TrainerController._reset_env": {
"total": 2.8784918490000564,
"count": 1,
"self": 2.8784918490000564
},
"TrainerController.advance": {
"total": 2145.693191206042,
"count": 63485,
"self": 1.4609100699981354,
"children": {
"env_step": {
"total": 1506.1341400329957,
"count": 63485,
"self": 1377.4946018808655,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.80377161902743,
"count": 63485,
"self": 4.630873852981495,
"children": {
"TorchPolicy.evaluate": {
"total": 123.17289776604593,
"count": 62568,
"self": 123.17289776604593
}
}
},
"workers": {
"total": 0.835766533102742,
"count": 63485,
"self": 0.0,
"children": {
"worker_root": {
"total": 2145.0528999210155,
"count": 63485,
"is_parallel": true,
"self": 884.8097471549979,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002271119999932125,
"count": 1,
"is_parallel": true,
"self": 0.0006082789998345106,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016628410000976146,
"count": 8,
"is_parallel": true,
"self": 0.0016628410000976146
}
}
},
"UnityEnvironment.step": {
"total": 0.08595335400013937,
"count": 1,
"is_parallel": true,
"self": 0.0006425349999972241,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004172520000338409,
"count": 1,
"is_parallel": true,
"self": 0.0004172520000338409
},
"communicator.exchange": {
"total": 0.08312986800001454,
"count": 1,
"is_parallel": true,
"self": 0.08312986800001454
},
"steps_from_proto": {
"total": 0.0017636990000937658,
"count": 1,
"is_parallel": true,
"self": 0.0003620420000061131,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014016570000876527,
"count": 8,
"is_parallel": true,
"self": 0.0014016570000876527
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1260.2431527660176,
"count": 63484,
"is_parallel": true,
"self": 34.6561118289078,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 24.146154590015612,
"count": 63484,
"is_parallel": true,
"self": 24.146154590015612
},
"communicator.exchange": {
"total": 1104.5051646560983,
"count": 63484,
"is_parallel": true,
"self": 1104.5051646560983
},
"steps_from_proto": {
"total": 96.93572169099593,
"count": 63484,
"is_parallel": true,
"self": 19.135609518872798,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.80011217212314,
"count": 507872,
"is_parallel": true,
"self": 77.80011217212314
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 638.0981411030482,
"count": 63485,
"self": 2.4847003570605466,
"children": {
"process_trajectory": {
"total": 125.06147663298748,
"count": 63485,
"self": 124.87426974898722,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1872068840002612,
"count": 2,
"self": 0.1872068840002612
}
}
},
"_update_policy": {
"total": 510.55196411300017,
"count": 442,
"self": 304.9086849919977,
"children": {
"TorchPPOOptimizer.update": {
"total": 205.64327912100248,
"count": 22770,
"self": 205.64327912100248
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.890001481631771e-07,
"count": 1,
"self": 8.890001481631771e-07
},
"TrainerController._save_models": {
"total": 0.08440795400019852,
"count": 1,
"self": 0.0013986420003675448,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08300931199983097,
"count": 1,
"self": 0.08300931199983097
}
}
}
}
}
}
}