egksrikanth's picture
First Commit
8c4dc21 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5407678484916687,
"min": 0.5407678484916687,
"max": 1.4623218774795532,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16024.0322265625,
"min": 16024.0322265625,
"max": 44360.99609375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989890.0,
"min": 29952.0,
"max": 989890.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989890.0,
"min": 29952.0,
"max": 989890.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.503671407699585,
"min": -0.10175586491823196,
"max": 0.5733243227005005,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 137.50228881835938,
"min": -24.726675033569336,
"max": 158.23751831054688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.024476993829011917,
"min": -0.23295113444328308,
"max": 0.3963066041469574,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.682219505310059,
"min": -61.26614761352539,
"max": 93.92466735839844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0724736359247583,
"min": 0.06483398421689691,
"max": 0.07342556430880316,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0146309029466163,
"min": 0.49362125625815795,
"max": 1.0838274830117978,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014799059452863212,
"min": 8.526768997569097e-05,
"max": 0.017005501146195458,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20718683234008498,
"min": 0.0011937476596596737,
"max": 0.25508251719293185,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.418604670021428e-06,
"min": 7.418604670021428e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001038604653803,
"min": 0.0001038604653803,
"max": 0.003505248231584,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247283571428575,
"min": 0.10247283571428575,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346197000000005,
"min": 1.3691136000000002,
"max": 2.568416,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025703628785714284,
"min": 0.00025703628785714284,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035985080299999997,
"min": 0.0035985080299999997,
"max": 0.1168647584,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.008443105034530163,
"min": 0.008443105034530163,
"max": 0.42193976044654846,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.11820347607135773,
"min": 0.11820347607135773,
"max": 2.953578233718872,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 380.987012987013,
"min": 329.25274725274727,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29336.0,
"min": 15984.0,
"max": 32652.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4631376487094085,
"min": -1.0000000521540642,
"max": 1.6460577524370616,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 112.66159895062447,
"min": -32.000001668930054,
"max": 148.14519771933556,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4631376487094085,
"min": -1.0000000521540642,
"max": 1.6460577524370616,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 112.66159895062447,
"min": -32.000001668930054,
"max": 148.14519771933556,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03328385552113223,
"min": 0.030734434690303816,
"max": 8.5698985401541,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.5628568751271814,
"min": 2.5628568751271814,
"max": 137.1183766424656,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728152304",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728155667"
},
"total": 3363.203998131,
"count": 1,
"self": 1.0263237119997939,
"children": {
"run_training.setup": {
"total": 0.13654329300015888,
"count": 1,
"self": 0.13654329300015888
},
"TrainerController.start_learning": {
"total": 3362.041131126,
"count": 1,
"self": 2.2724594359974617,
"children": {
"TrainerController._reset_env": {
"total": 4.304678932999877,
"count": 1,
"self": 4.304678932999877
},
"TrainerController.advance": {
"total": 3355.365311287003,
"count": 63737,
"self": 2.6751873471212093,
"children": {
"env_step": {
"total": 2189.66683478899,
"count": 63737,
"self": 2015.2593802531205,
"children": {
"SubprocessEnvManager._take_step": {
"total": 172.95193703896825,
"count": 63737,
"self": 7.6877488959742095,
"children": {
"TorchPolicy.evaluate": {
"total": 165.26418814299404,
"count": 62573,
"self": 165.26418814299404
}
}
},
"workers": {
"total": 1.455517496901166,
"count": 63737,
"self": 0.0,
"children": {
"worker_root": {
"total": 3354.4901359989726,
"count": 63737,
"is_parallel": true,
"self": 1533.6672468509812,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002672122999911153,
"count": 1,
"is_parallel": true,
"self": 0.000864783999759311,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001807339000151842,
"count": 8,
"is_parallel": true,
"self": 0.001807339000151842
}
}
},
"UnityEnvironment.step": {
"total": 0.09196317099986118,
"count": 1,
"is_parallel": true,
"self": 0.0033734350001850544,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005936099998962163,
"count": 1,
"is_parallel": true,
"self": 0.0005936099998962163
},
"communicator.exchange": {
"total": 0.08572852999986935,
"count": 1,
"is_parallel": true,
"self": 0.08572852999986935
},
"steps_from_proto": {
"total": 0.0022675959999105544,
"count": 1,
"is_parallel": true,
"self": 0.0005178000001251348,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017497959997854196,
"count": 8,
"is_parallel": true,
"self": 0.0017497959997854196
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1820.8228891479914,
"count": 63736,
"is_parallel": true,
"self": 52.39772183494438,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.95655688103011,
"count": 63736,
"is_parallel": true,
"self": 30.95655688103011
},
"communicator.exchange": {
"total": 1604.6491993810014,
"count": 63736,
"is_parallel": true,
"self": 1604.6491993810014
},
"steps_from_proto": {
"total": 132.81941105101555,
"count": 63736,
"is_parallel": true,
"self": 28.75444018814369,
"children": {
"_process_rank_one_or_two_observation": {
"total": 104.06497086287186,
"count": 509888,
"is_parallel": true,
"self": 104.06497086287186
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1163.0232891508917,
"count": 63737,
"self": 4.66205958491696,
"children": {
"process_trajectory": {
"total": 174.85832384197738,
"count": 63737,
"self": 174.6094879619768,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2488358800005699,
"count": 2,
"self": 0.2488358800005699
}
}
},
"_update_policy": {
"total": 983.5029057239974,
"count": 448,
"self": 393.25808448401494,
"children": {
"TorchPPOOptimizer.update": {
"total": 590.2448212399825,
"count": 22845,
"self": 590.2448212399825
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6429994502686895e-06,
"count": 1,
"self": 1.6429994502686895e-06
},
"TrainerController._save_models": {
"total": 0.0986798270005238,
"count": 1,
"self": 0.004116694000003918,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09456313300051988,
"count": 1,
"self": 0.09456313300051988
}
}
}
}
}
}
}