ppo-PyramidsRND / run_logs /timers.json
sajelian's picture
init
5907564 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2737155854701996,
"min": 0.2737155854701996,
"max": 1.4636592864990234,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 8128.2578125,
"min": 8128.2578125,
"max": 44401.56640625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989901.0,
"min": 29952.0,
"max": 989901.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5711452960968018,
"min": -0.08777591586112976,
"max": 0.6317324042320251,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 159.92068481445312,
"min": -21.153995513916016,
"max": 180.6754608154297,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.02830958366394043,
"min": -0.026873625814914703,
"max": 0.5821145176887512,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 7.92668342590332,
"min": -6.825901031494141,
"max": 137.9611358642578,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07000973117135886,
"min": 0.0641350160225818,
"max": 0.07408802489324187,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9801362363990241,
"min": 0.4999073677463457,
"max": 1.0664408130687664,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016412346961650264,
"min": 0.00028157564408214135,
"max": 0.017568348714294624,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22977285746310372,
"min": 0.0019710295085749894,
"max": 0.2550734743175174,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.258404723421429e-06,
"min": 7.258404723421429e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001016176661279,
"min": 0.0001016176661279,
"max": 0.0032175469274845,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241943571428573,
"min": 0.10241943571428573,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338721000000003,
"min": 1.327104,
"max": 2.5725155000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025170162785714297,
"min": 0.00025170162785714297,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035238227900000014,
"min": 0.0035238227900000014,
"max": 0.10729429845,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.01111177634447813,
"min": 0.01111177634447813,
"max": 0.36652323603630066,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.15556487441062927,
"min": 0.15556487441062927,
"max": 2.5656626224517822,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 323.70103092783506,
"min": 308.12371134020617,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31399.0,
"min": 15984.0,
"max": 32415.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.614426787981053,
"min": -1.0000000521540642,
"max": 1.6738291482130687,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 156.59939843416214,
"min": -32.000001668930054,
"max": 160.6875982284546,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.614426787981053,
"min": -1.0000000521540642,
"max": 1.6738291482130687,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 156.59939843416214,
"min": -32.000001668930054,
"max": 160.6875982284546,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03706810462248653,
"min": 0.03706810462248653,
"max": 7.271036705002189,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.5956061483811936,
"min": 3.294545262819156,
"max": 116.33658728003502,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751144409",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751146638"
},
"total": 2229.4143894649997,
"count": 1,
"self": 0.47602322199963965,
"children": {
"run_training.setup": {
"total": 0.020007694000014453,
"count": 1,
"self": 0.020007694000014453
},
"TrainerController.start_learning": {
"total": 2228.918358549,
"count": 1,
"self": 1.2853784940202786,
"children": {
"TrainerController._reset_env": {
"total": 2.2730405299998893,
"count": 1,
"self": 2.2730405299998893
},
"TrainerController.advance": {
"total": 2225.2807002079803,
"count": 63948,
"self": 1.362135683867109,
"children": {
"env_step": {
"total": 1564.0361288090637,
"count": 63948,
"self": 1417.730686335073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.51534128001504,
"count": 63948,
"self": 4.537451229932458,
"children": {
"TorchPolicy.evaluate": {
"total": 140.97789005008258,
"count": 62538,
"self": 140.97789005008258
}
}
},
"workers": {
"total": 0.7901011939757154,
"count": 63948,
"self": 0.0,
"children": {
"worker_root": {
"total": 2223.8238351910613,
"count": 63948,
"is_parallel": true,
"self": 917.0646645521256,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017428329999802372,
"count": 1,
"is_parallel": true,
"self": 0.000544438999668273,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011983940003119642,
"count": 8,
"is_parallel": true,
"self": 0.0011983940003119642
}
}
},
"UnityEnvironment.step": {
"total": 0.056696920999911526,
"count": 1,
"is_parallel": true,
"self": 0.0005280889997720806,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046302300006573205,
"count": 1,
"is_parallel": true,
"self": 0.00046302300006573205
},
"communicator.exchange": {
"total": 0.054181516000198826,
"count": 1,
"is_parallel": true,
"self": 0.054181516000198826
},
"steps_from_proto": {
"total": 0.0015242929998748878,
"count": 1,
"is_parallel": true,
"self": 0.00032026299959397875,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001204030000280909,
"count": 8,
"is_parallel": true,
"self": 0.001204030000280909
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1306.7591706389358,
"count": 63947,
"is_parallel": true,
"self": 32.28764648497713,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.044011226985504,
"count": 63947,
"is_parallel": true,
"self": 23.044011226985504
},
"communicator.exchange": {
"total": 1156.2718901660107,
"count": 63947,
"is_parallel": true,
"self": 1156.2718901660107
},
"steps_from_proto": {
"total": 95.15562276096239,
"count": 63947,
"is_parallel": true,
"self": 18.655950168900517,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.49967259206187,
"count": 511576,
"is_parallel": true,
"self": 76.49967259206187
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 659.8824357150495,
"count": 63948,
"self": 2.5481671930326684,
"children": {
"process_trajectory": {
"total": 126.94658287701031,
"count": 63948,
"self": 126.75597779000964,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19060508700067658,
"count": 2,
"self": 0.19060508700067658
}
}
},
"_update_policy": {
"total": 530.3876856450065,
"count": 434,
"self": 294.2917523529993,
"children": {
"TorchPPOOptimizer.update": {
"total": 236.0959332920072,
"count": 22806,
"self": 236.0959332920072
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.249999154941179e-07,
"count": 1,
"self": 8.249999154941179e-07
},
"TrainerController._save_models": {
"total": 0.07923849199960387,
"count": 1,
"self": 0.0013186989999667276,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07791979299963714,
"count": 1,
"self": 0.07791979299963714
}
}
}
}
}
}
}