ppo-Pyramids1 / run_logs /timers.json
admarcosai's picture
First Pyramids Commit
ec88876
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.1704377681016922,
"min": 0.16250012814998627,
"max": 1.3789162635803223,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5061.31982421875,
"min": 4864.60400390625,
"max": 41830.8046875,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999914.0,
"min": 29952.0,
"max": 2999914.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999914.0,
"min": 29952.0,
"max": 2999914.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7902837991714478,
"min": -0.09778378903865814,
"max": 0.8584803938865662,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 237.87542724609375,
"min": -23.565893173217773,
"max": 262.69500732421875,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.00298650492914021,
"min": -0.018346533179283142,
"max": 0.24226222932338715,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 0.898938000202179,
"min": -5.320494651794434,
"max": 58.14293670654297,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0656459979031102,
"min": 0.0640618006432369,
"max": 0.07418233807034352,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9190439706435427,
"min": 0.4868394951367866,
"max": 1.112735071055153,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014464565969925996,
"min": 6.014493335350987e-05,
"max": 0.017031274515284675,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20250392357896393,
"min": 0.0007818841335956283,
"max": 0.24928042040133447,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.5315923466452327e-06,
"min": 1.5315923466452327e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.144229285303326e-05,
"min": 2.144229285303326e-05,
"max": 0.003969168476943867,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10051049761904762,
"min": 0.10051049761904762,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4071469666666667,
"min": 1.3962282666666668,
"max": 2.7375194333333335,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 6.099871214285698e-05,
"min": 6.099871214285698e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008539819699999977,
"min": 0.0008539819699999977,
"max": 0.13231330772,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.006842241622507572,
"min": 0.006842241622507572,
"max": 0.45617565512657166,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.09579138457775116,
"min": 0.09579138457775116,
"max": 3.1932296752929688,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 218.77037037037036,
"min": 205.33812949640287,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29534.0,
"min": 15984.0,
"max": 32976.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7664088795582453,
"min": -1.0000000521540642,
"max": 1.7946618575629572,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 238.46519874036312,
"min": -29.98700162023306,
"max": 255.99759805202484,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7664088795582453,
"min": -1.0000000521540642,
"max": 1.7946618575629572,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 238.46519874036312,
"min": -29.98700162023306,
"max": 255.99759805202484,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.0155968772913588,
"min": 0.015450548466813215,
"max": 8.832182018086314,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.105578434333438,
"min": 2.105578434333438,
"max": 141.31491228938103,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673979155",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673985886"
},
"total": 6731.185879846999,
"count": 1,
"self": 0.4362290599992775,
"children": {
"run_training.setup": {
"total": 0.11607115999959206,
"count": 1,
"self": 0.11607115999959206
},
"TrainerController.start_learning": {
"total": 6730.6335796270005,
"count": 1,
"self": 3.603178385767933,
"children": {
"TrainerController._reset_env": {
"total": 6.405750881999666,
"count": 1,
"self": 6.405750881999666
},
"TrainerController.advance": {
"total": 6720.532759976234,
"count": 194738,
"self": 3.622378233094423,
"children": {
"env_step": {
"total": 4727.929942035064,
"count": 194738,
"self": 4427.488266751005,
"children": {
"SubprocessEnvManager._take_step": {
"total": 298.2716086270757,
"count": 194738,
"self": 12.487329158991088,
"children": {
"TorchPolicy.evaluate": {
"total": 285.7842794680846,
"count": 187547,
"self": 96.50915453912148,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.27512492896312,
"count": 187547,
"self": 189.27512492896312
}
}
}
}
},
"workers": {
"total": 2.1700666569840905,
"count": 194738,
"self": 0.0,
"children": {
"worker_root": {
"total": 6717.929572420756,
"count": 194738,
"is_parallel": true,
"self": 2577.9597060974784,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016657129999657627,
"count": 1,
"is_parallel": true,
"self": 0.0005343929997252417,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001131320000240521,
"count": 8,
"is_parallel": true,
"self": 0.001131320000240521
}
}
},
"UnityEnvironment.step": {
"total": 0.044550435000019206,
"count": 1,
"is_parallel": true,
"self": 0.00046901699988666223,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004629760005627759,
"count": 1,
"is_parallel": true,
"self": 0.0004629760005627759
},
"communicator.exchange": {
"total": 0.04197596399990289,
"count": 1,
"is_parallel": true,
"self": 0.04197596399990289
},
"steps_from_proto": {
"total": 0.0016424779996668804,
"count": 1,
"is_parallel": true,
"self": 0.00040788099886412965,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012345970008027507,
"count": 8,
"is_parallel": true,
"self": 0.0012345970008027507
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4139.9698663232775,
"count": 194737,
"is_parallel": true,
"self": 83.69388864580469,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 66.25068691022989,
"count": 194737,
"is_parallel": true,
"self": 66.25068691022989
},
"communicator.exchange": {
"total": 3691.435993178892,
"count": 194737,
"is_parallel": true,
"self": 3691.435993178892
},
"steps_from_proto": {
"total": 298.58929758835075,
"count": 194737,
"is_parallel": true,
"self": 64.67071543008478,
"children": {
"_process_rank_one_or_two_observation": {
"total": 233.91858215826596,
"count": 1557896,
"is_parallel": true,
"self": 233.91858215826596
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1988.9804397080752,
"count": 194738,
"self": 6.85351151818486,
"children": {
"process_trajectory": {
"total": 439.5669318208784,
"count": 194738,
"self": 438.98961023787797,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5773215830004119,
"count": 6,
"self": 0.5773215830004119
}
}
},
"_update_policy": {
"total": 1542.559996369012,
"count": 1391,
"self": 590.9275996289753,
"children": {
"TorchPPOOptimizer.update": {
"total": 951.6323967400367,
"count": 68307,
"self": 951.6323967400367
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5519999578827992e-06,
"count": 1,
"self": 1.5519999578827992e-06
},
"TrainerController._save_models": {
"total": 0.0918888309988688,
"count": 1,
"self": 0.0015667959978600265,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09032203500100877,
"count": 1,
"self": 0.09032203500100877
}
}
}
}
}
}
}