ppo-pyramids / run_logs /timers.json
mohitpg's picture
First Push
5c990ce verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.8482744097709656,
"min": 0.8238581418991089,
"max": 1.3770806789398193,
"count": 16
},
"Pyramids.Policy.Entropy.sum": {
"value": 25231.07421875,
"min": 24465.291015625,
"max": 41775.12109375,
"count": 16
},
"Pyramids.Step.mean": {
"value": 479953.0,
"min": 29952.0,
"max": 479953.0,
"count": 16
},
"Pyramids.Step.sum": {
"value": 479953.0,
"min": 29952.0,
"max": 479953.0,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.1001254990696907,
"min": -0.09756134450435638,
"max": 0.26537954807281494,
"count": 16
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 24.931249618530273,
"min": -23.512283325195312,
"max": 62.894954681396484,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.025898238644003868,
"min": 0.023176442831754684,
"max": 0.47287121415138245,
"count": 16
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.4486613273620605,
"min": 5.655052185058594,
"max": 112.07048034667969,
"count": 16
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06744945801295171,
"min": 0.06413263848596983,
"max": 0.07580344352214742,
"count": 16
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.944292412181324,
"min": 0.530624104655032,
"max": 1.0091975809579405,
"count": 16
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.008235233433030694,
"min": 0.00034471488094860544,
"max": 0.011257351826661132,
"count": 16
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.11529326806242973,
"min": 0.0041365785713832655,
"max": 0.11529326806242973,
"count": 16
},
"Pyramids.Policy.LearningRate.mean": {
"value": 2.0728635947628568e-05,
"min": 2.0728635947628568e-05,
"max": 0.00029030126037577137,
"count": 16
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00029020090326679996,
"min": 0.00029020090326679996,
"max": 0.0030851756716082,
"count": 16
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10690951428571428,
"min": 0.10690951428571428,
"max": 0.19676708571428575,
"count": 16
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4967332,
"min": 1.3773696000000002,
"max": 2.3389688,
"count": 16
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0007002604771428573,
"min": 0.0007002604771428573,
"max": 0.00967703186285714,
"count": 16
},
"Pyramids.Policy.Beta.sum": {
"value": 0.009803646680000002,
"min": 0.009803646680000002,
"max": 0.10286634082000001,
"count": 16
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.024802012369036674,
"min": 0.024802012369036674,
"max": 0.5258089900016785,
"count": 16
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.34722816944122314,
"min": 0.34722816944122314,
"max": 3.6806631088256836,
"count": 16
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 777.725,
"min": 777.725,
"max": 999.0,
"count": 16
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31109.0,
"min": 15984.0,
"max": 32685.0,
"count": 16
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.47200495805591347,
"min": -1.0000000521540642,
"max": 0.47200495805591347,
"count": 16
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 18.880198322236538,
"min": -31.998801693320274,
"max": 18.880198322236538,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.47200495805591347,
"min": -1.0000000521540642,
"max": 0.47200495805591347,
"count": 16
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 18.880198322236538,
"min": -31.998801693320274,
"max": 18.880198322236538,
"count": 16
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.20378975550411269,
"min": 0.20378975550411269,
"max": 11.0823162663728,
"count": 16
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.151590220164508,
"min": 8.151590220164508,
"max": 177.3170602619648,
"count": 16
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1728710997",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.4.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1728712039"
},
"total": 1042.6718296469999,
"count": 1,
"self": 0.494355411000015,
"children": {
"run_training.setup": {
"total": 0.056343589999983124,
"count": 1,
"self": 0.056343589999983124
},
"TrainerController.start_learning": {
"total": 1042.121130646,
"count": 1,
"self": 0.6655041160024666,
"children": {
"TrainerController._reset_env": {
"total": 2.556922285999974,
"count": 1,
"self": 2.556922285999974
},
"TrainerController.advance": {
"total": 1038.8155495239973,
"count": 31575,
"self": 0.6855038910177882,
"children": {
"env_step": {
"total": 692.0674005139838,
"count": 31575,
"self": 615.8416372670064,
"children": {
"SubprocessEnvManager._take_step": {
"total": 75.82687134398492,
"count": 31575,
"self": 2.2725660590109555,
"children": {
"TorchPolicy.evaluate": {
"total": 73.55430528497396,
"count": 31295,
"self": 73.55430528497396
}
}
},
"workers": {
"total": 0.39889190299248867,
"count": 31575,
"self": 0.0,
"children": {
"worker_root": {
"total": 1039.6019978100187,
"count": 31575,
"is_parallel": true,
"self": 482.5742403670206,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00209915400000682,
"count": 1,
"is_parallel": true,
"self": 0.0006228100000953418,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001476343999911478,
"count": 8,
"is_parallel": true,
"self": 0.001476343999911478
}
}
},
"UnityEnvironment.step": {
"total": 0.057373281000025145,
"count": 1,
"is_parallel": true,
"self": 0.0006079620000605246,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004718740000271282,
"count": 1,
"is_parallel": true,
"self": 0.0004718740000271282
},
"communicator.exchange": {
"total": 0.05478363999998237,
"count": 1,
"is_parallel": true,
"self": 0.05478363999998237
},
"steps_from_proto": {
"total": 0.0015098049999551222,
"count": 1,
"is_parallel": true,
"self": 0.0003008470000054331,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012089579999496891,
"count": 8,
"is_parallel": true,
"self": 0.0012089579999496891
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 557.0277574429981,
"count": 31574,
"is_parallel": true,
"self": 16.579979727009686,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 11.314982642003486,
"count": 31574,
"is_parallel": true,
"self": 11.314982642003486
},
"communicator.exchange": {
"total": 482.1058485129947,
"count": 31574,
"is_parallel": true,
"self": 482.1058485129947
},
"steps_from_proto": {
"total": 47.02694656099021,
"count": 31574,
"is_parallel": true,
"self": 9.431337442942493,
"children": {
"_process_rank_one_or_two_observation": {
"total": 37.59560911804772,
"count": 252592,
"is_parallel": true,
"self": 37.59560911804772
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 346.0626451189957,
"count": 31575,
"self": 1.2273609859848307,
"children": {
"process_trajectory": {
"total": 65.45234130701198,
"count": 31575,
"self": 65.26030450401191,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19203680300006454,
"count": 1,
"self": 0.19203680300006454
}
}
},
"_update_policy": {
"total": 279.3829428259989,
"count": 218,
"self": 157.43463760499918,
"children": {
"TorchPPOOptimizer.update": {
"total": 121.94830522099971,
"count": 11418,
"self": 121.94830522099971
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0000001111620804e-06,
"count": 1,
"self": 1.0000001111620804e-06
},
"TrainerController._save_models": {
"total": 0.08315372000015486,
"count": 1,
"self": 0.001286994000111008,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08186672600004385,
"count": 1,
"self": 0.08186672600004385
}
}
}
}
}
}
}