ppo-Pyramid / run_logs /timers.json
AlejandroTorresMunoz's picture
First Push
376a8a9 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.4741351306438446,
"min": 0.4741351306438446,
"max": 1.4752534627914429,
"count": 38
},
"Pyramids.Policy.Entropy.sum": {
"value": 14170.951171875,
"min": 14170.951171875,
"max": 44753.2890625,
"count": 38
},
"Pyramids.Step.mean": {
"value": 1139906.0,
"min": 29952.0,
"max": 1139906.0,
"count": 38
},
"Pyramids.Step.sum": {
"value": 1139906.0,
"min": 29952.0,
"max": 1139906.0,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5179052352905273,
"min": -0.11418596655130386,
"max": 0.5179052352905273,
"count": 38
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 142.42393493652344,
"min": -27.518817901611328,
"max": 142.42393493652344,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.031623564660549164,
"min": 0.01680169440805912,
"max": 0.3216843605041504,
"count": 38
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 8.696480751037598,
"min": 4.334836959838867,
"max": 76.47903442382812,
"count": 38
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06969120601605114,
"min": 0.06413979248209115,
"max": 0.07418953383226322,
"count": 38
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.975676884224716,
"min": 0.5053599941739598,
"max": 1.0806849657058004,
"count": 38
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.018262600138162575,
"min": 5.9414489272486543e-05,
"max": 0.018262600138162575,
"count": 38
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.25567640193427604,
"min": 0.0007723883605423251,
"max": 0.25567640193427604,
"count": 38
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00018755980176579285,
"min": 0.00018755980176579285,
"max": 0.00029838354339596195,
"count": 38
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0026258372247211,
"min": 0.0020886848037717336,
"max": 0.003646478684507133,
"count": 38
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.16251992142857144,
"min": 0.16251992142857144,
"max": 0.19946118095238097,
"count": 38
},
"Pyramids.Policy.Epsilon.sum": {
"value": 2.2752789,
"min": 1.3962282666666668,
"max": 2.6110292333333334,
"count": 38
},
"Pyramids.Policy.Beta.mean": {
"value": 0.006255740150714286,
"min": 0.006255740150714286,
"max": 0.009946171977142856,
"count": 38
},
"Pyramids.Policy.Beta.sum": {
"value": 0.08758036211,
"min": 0.06962320384,
"max": 0.12155773738,
"count": 38
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.02799767069518566,
"min": 0.02446014992892742,
"max": 0.48671993613243103,
"count": 38
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.39196738600730896,
"min": 0.3424420952796936,
"max": 3.4070396423339844,
"count": 38
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 330.3707865168539,
"min": 330.3707865168539,
"max": 999.0,
"count": 38
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29403.0,
"min": 15984.0,
"max": 32821.0,
"count": 38
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6706291942783955,
"min": -1.0000000521540642,
"max": 1.6706291942783955,
"count": 38
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 148.6859982907772,
"min": -31.99760167300701,
"max": 148.6859982907772,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6706291942783955,
"min": -1.0000000521540642,
"max": 1.6706291942783955,
"count": 38
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 148.6859982907772,
"min": -31.99760167300701,
"max": 148.6859982907772,
"count": 38
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09393738361410378,
"min": 0.09393738361410378,
"max": 10.751796618103981,
"count": 38
},
"Pyramids.Policy.RndReward.sum": {
"value": 8.360427141655236,
"min": 7.830947146052495,
"max": 172.0287458896637,
"count": 38
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 38
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1712402254",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1712404694"
},
"total": 2440.48331886,
"count": 1,
"self": 0.38190787200028353,
"children": {
"run_training.setup": {
"total": 0.05751398000001018,
"count": 1,
"self": 0.05751398000001018
},
"TrainerController.start_learning": {
"total": 2440.043897008,
"count": 1,
"self": 1.571812851019331,
"children": {
"TrainerController._reset_env": {
"total": 3.2215791130000184,
"count": 1,
"self": 3.2215791130000184
},
"TrainerController.advance": {
"total": 2435.0980102049807,
"count": 72833,
"self": 1.600132085981386,
"children": {
"env_step": {
"total": 1714.80622496499,
"count": 72833,
"self": 1563.7804019949933,
"children": {
"SubprocessEnvManager._take_step": {
"total": 150.0812637979838,
"count": 72833,
"self": 5.3667055199405524,
"children": {
"TorchPolicy.evaluate": {
"total": 144.71455827804326,
"count": 71885,
"self": 144.71455827804326
}
}
},
"workers": {
"total": 0.9445591720129016,
"count": 72832,
"self": 0.0,
"children": {
"worker_root": {
"total": 2434.47097356699,
"count": 72832,
"is_parallel": true,
"self": 1007.1148519589115,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005741585999999188,
"count": 1,
"is_parallel": true,
"self": 0.004066848000007894,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016747379999912937,
"count": 8,
"is_parallel": true,
"self": 0.0016747379999912937
}
}
},
"UnityEnvironment.step": {
"total": 0.05333513399997969,
"count": 1,
"is_parallel": true,
"self": 0.0006377299999655861,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000481374000003143,
"count": 1,
"is_parallel": true,
"self": 0.000481374000003143
},
"communicator.exchange": {
"total": 0.0505824109999935,
"count": 1,
"is_parallel": true,
"self": 0.0505824109999935
},
"steps_from_proto": {
"total": 0.001633619000017461,
"count": 1,
"is_parallel": true,
"self": 0.000342312000043421,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00129130699997404,
"count": 8,
"is_parallel": true,
"self": 0.00129130699997404
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1427.3561216080784,
"count": 72831,
"is_parallel": true,
"self": 40.51816200402686,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 26.899065164009016,
"count": 72831,
"is_parallel": true,
"self": 26.899065164009016
},
"communicator.exchange": {
"total": 1246.5510601810377,
"count": 72831,
"is_parallel": true,
"self": 1246.5510601810377
},
"steps_from_proto": {
"total": 113.3878342590049,
"count": 72831,
"is_parallel": true,
"self": 22.825436815857074,
"children": {
"_process_rank_one_or_two_observation": {
"total": 90.56239744314783,
"count": 582648,
"is_parallel": true,
"self": 90.56239744314783
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 718.6916531540096,
"count": 72832,
"self": 2.8914169819819335,
"children": {
"process_trajectory": {
"total": 144.06817854202606,
"count": 72832,
"self": 143.77846502102622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2897135209998396,
"count": 2,
"self": 0.2897135209998396
}
}
},
"_update_policy": {
"total": 571.7320576300016,
"count": 502,
"self": 335.7737611589828,
"children": {
"TorchPPOOptimizer.update": {
"total": 235.95829647101883,
"count": 26238,
"self": 235.95829647101883
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.4229999578674324e-06,
"count": 1,
"self": 1.4229999578674324e-06
},
"TrainerController._save_models": {
"total": 0.15249341599974287,
"count": 1,
"self": 0.0022893529994689743,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1502040630002739,
"count": 1,
"self": 0.1502040630002739
}
}
}
}
}
}
}