ppo-Pyramids / run_logs /timers.json
Vanheart's picture
First Push
4d7399a verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4918648600578308,
"min": 0.4918648600578308,
"max": 1.472496509552002,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 14661.5078125,
"min": 14661.5078125,
"max": 44669.65234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989878.0,
"min": 29952.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989878.0,
"min": 29952.0,
"max": 989878.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.21501803398132324,
"min": -0.10819876194000244,
"max": 0.3403164744377136,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 54.39956283569336,
"min": -26.07590103149414,
"max": 87.80165100097656,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006905943620949984,
"min": -0.03808030113577843,
"max": 0.05363503098487854,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.7472037076950073,
"min": -9.520074844360352,
"max": 12.926042556762695,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06911271545760131,
"min": 0.06091887155100411,
"max": 0.07247982541541062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9675780164064183,
"min": 0.49829920096926383,
"max": 1.0469261228087512,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.010311447635221025,
"min": 0.00034951855390376096,
"max": 0.012614916255427766,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.14436026689309434,
"min": 0.004543741200748893,
"max": 0.17660882757598872,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.685411723942859e-06,
"min": 7.685411723942859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010759576413520002,
"min": 0.00010759576413520002,
"max": 0.0033761512746162997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10256177142857144,
"min": 0.10256177142857144,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4358648,
"min": 1.3886848,
"max": 2.4253837000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002659209657142858,
"min": 0.0002659209657142858,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037228935200000006,
"min": 0.0037228935200000006,
"max": 0.11255583163000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009169014170765877,
"min": 0.008785725571215153,
"max": 0.22358380258083344,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.12836620211601257,
"min": 0.12390249222517014,
"max": 1.5650866031646729,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 654.7391304347826,
"min": 514.1272727272727,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30118.0,
"min": 15984.0,
"max": 32359.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8233651901068895,
"min": -1.0000000521540642,
"max": 1.2329249790470516,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 37.874798744916916,
"min": -30.260001629590988,
"max": 69.04379882663488,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8233651901068895,
"min": -1.0000000521540642,
"max": 1.2329249790470516,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 37.874798744916916,
"min": -30.260001629590988,
"max": 69.04379882663488,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.06280911366775399,
"min": 0.04967818662147953,
"max": 5.181517615914345,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.8892192287166836,
"min": 2.617512088007061,
"max": 82.90428185462952,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740278399",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740280617"
},
"total": 2217.888209089,
"count": 1,
"self": 0.4877483300001586,
"children": {
"run_training.setup": {
"total": 0.021597989999918354,
"count": 1,
"self": 0.021597989999918354
},
"TrainerController.start_learning": {
"total": 2217.378862769,
"count": 1,
"self": 1.349649126955228,
"children": {
"TrainerController._reset_env": {
"total": 2.729297740999982,
"count": 1,
"self": 2.729297740999982
},
"TrainerController.advance": {
"total": 2213.204841660045,
"count": 63356,
"self": 1.4580236540627993,
"children": {
"env_step": {
"total": 1536.1341472100512,
"count": 63356,
"self": 1378.540254067045,
"children": {
"SubprocessEnvManager._take_step": {
"total": 156.80327432705076,
"count": 63356,
"self": 4.7712816460045815,
"children": {
"TorchPolicy.evaluate": {
"total": 152.03199268104618,
"count": 62568,
"self": 152.03199268104618
}
}
},
"workers": {
"total": 0.7906188159554404,
"count": 63356,
"self": 0.0,
"children": {
"worker_root": {
"total": 2212.33750645606,
"count": 63356,
"is_parallel": true,
"self": 945.7078903730601,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002474361000167846,
"count": 1,
"is_parallel": true,
"self": 0.0007019540003057045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017724069998621417,
"count": 8,
"is_parallel": true,
"self": 0.0017724069998621417
}
}
},
"UnityEnvironment.step": {
"total": 0.08311290899996493,
"count": 1,
"is_parallel": true,
"self": 0.000540854000064428,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042226000005030073,
"count": 1,
"is_parallel": true,
"self": 0.00042226000005030073
},
"communicator.exchange": {
"total": 0.08046750499988775,
"count": 1,
"is_parallel": true,
"self": 0.08046750499988775
},
"steps_from_proto": {
"total": 0.0016822899999624497,
"count": 1,
"is_parallel": true,
"self": 0.0003603830000429298,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013219069999195199,
"count": 8,
"is_parallel": true,
"self": 0.0013219069999195199
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1266.629616083,
"count": 63355,
"is_parallel": true,
"self": 31.10217069198552,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.086398218965996,
"count": 63355,
"is_parallel": true,
"self": 23.086398218965996
},
"communicator.exchange": {
"total": 1116.4700419270218,
"count": 63355,
"is_parallel": true,
"self": 1116.4700419270218
},
"steps_from_proto": {
"total": 95.97100524502662,
"count": 63355,
"is_parallel": true,
"self": 19.602799986098944,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.36820525892767,
"count": 506840,
"is_parallel": true,
"self": 76.36820525892767
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 675.6126707959309,
"count": 63356,
"self": 2.5572727070280052,
"children": {
"process_trajectory": {
"total": 126.01283364390133,
"count": 63356,
"self": 125.81075853490142,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20207510899990666,
"count": 2,
"self": 0.20207510899990666
}
}
},
"_update_policy": {
"total": 547.0425644450015,
"count": 442,
"self": 300.6203574320259,
"children": {
"TorchPPOOptimizer.update": {
"total": 246.4222070129756,
"count": 22770,
"self": 246.4222070129756
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.989997357071843e-07,
"count": 1,
"self": 7.989997357071843e-07
},
"TrainerController._save_models": {
"total": 0.09507344199982981,
"count": 1,
"self": 0.0014438369998970302,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09362960499993278,
"count": 1,
"self": 0.09362960499993278
}
}
}
}
}
}
}