SnowballTarget / run_logs /timers.json
GeorgeImmanuel's picture
FT of SnowballTarget
2e7ee64 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0551000833511353,
"min": 1.0551000833511353,
"max": 2.862008571624756,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10085.701171875,
"min": 10085.701171875,
"max": 29498.72265625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.656046867370605,
"min": 0.32267439365386963,
"max": 12.656046867370605,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2467.92919921875,
"min": 62.59883499145508,
"max": 2567.480712890625,
"count": 20
},
"SnowballTarget.Policy.RndValueEstimate.mean": {
"value": -0.0009955825516954064,
"min": -0.03700552135705948,
"max": 0.00973495189100504,
"count": 20
},
"SnowballTarget.Policy.RndValueEstimate.sum": {
"value": -0.19413858652114868,
"min": -7.179071426391602,
"max": 1.898315668106079,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06684095613537447,
"min": 0.061452012961358285,
"max": 0.07469023812383038,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2673638245414979,
"min": 0.24580805184543314,
"max": 0.3529809728708785,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.10505503836069621,
"min": 0.06902955410837688,
"max": 0.14385717073638066,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.42022015344278485,
"min": 0.27611821643350754,
"max": 0.6739003345662472,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Losses.RNDLoss.mean": {
"value": 0.004172837361693382,
"min": 0.004172837361693382,
"max": 0.41886237263679504,
"count": 20
},
"SnowballTarget.Losses.RNDLoss.sum": {
"value": 0.01669134944677353,
"min": 0.01669134944677353,
"max": 1.6754494905471802,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.272727272727273,
"min": 3.8181818181818183,
"max": 25.272727272727273,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1112.0,
"min": 168.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.272727272727273,
"min": 3.8181818181818183,
"max": 25.272727272727273,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1112.0,
"min": 168.0,
"max": 1374.0,
"count": 20
},
"SnowballTarget.Policy.RndReward.mean": {
"value": 0.004277452380020722,
"min": 0.004277452380020722,
"max": 0.5791144296950237,
"count": 20
},
"SnowballTarget.Policy.RndReward.sum": {
"value": 0.18820790472091176,
"min": 0.18820790472091176,
"max": 25.481034906581044,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1719419975",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn /content/ml-agents/config/ppo/SnowballTarget.yaml --env=/content/training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1719420664"
},
"total": 688.6761523559999,
"count": 1,
"self": 1.0541900560001523,
"children": {
"run_training.setup": {
"total": 0.06820072099981189,
"count": 1,
"self": 0.06820072099981189
},
"TrainerController.start_learning": {
"total": 687.5537615789999,
"count": 1,
"self": 1.3037707329103796,
"children": {
"TrainerController._reset_env": {
"total": 2.538256335999904,
"count": 1,
"self": 2.538256335999904
},
"TrainerController.advance": {
"total": 683.5392678530907,
"count": 18211,
"self": 0.47428005311121524,
"children": {
"env_step": {
"total": 683.0649877999795,
"count": 18211,
"self": 540.2831138090742,
"children": {
"SubprocessEnvManager._take_step": {
"total": 142.32900804891506,
"count": 18211,
"self": 2.697478199809666,
"children": {
"TorchPolicy.evaluate": {
"total": 139.6315298491054,
"count": 18211,
"self": 139.6315298491054
}
}
},
"workers": {
"total": 0.45286594199023966,
"count": 18211,
"self": 0.0,
"children": {
"worker_root": {
"total": 685.089505107042,
"count": 18211,
"is_parallel": true,
"self": 353.2331788320653,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002701184000216017,
"count": 1,
"is_parallel": true,
"self": 0.0007869420014685602,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019142419987474568,
"count": 10,
"is_parallel": true,
"self": 0.0019142419987474568
}
}
},
"UnityEnvironment.step": {
"total": 0.07612469800005783,
"count": 1,
"is_parallel": true,
"self": 0.0009638999990784214,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004944689999319962,
"count": 1,
"is_parallel": true,
"self": 0.0004944689999319962
},
"communicator.exchange": {
"total": 0.07223946200065257,
"count": 1,
"is_parallel": true,
"self": 0.07223946200065257
},
"steps_from_proto": {
"total": 0.00242686700039485,
"count": 1,
"is_parallel": true,
"self": 0.0004909889994451078,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019358780009497423,
"count": 10,
"is_parallel": true,
"self": 0.0019358780009497423
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 331.8563262749767,
"count": 18210,
"is_parallel": true,
"self": 16.039744162993884,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.922684798957562,
"count": 18210,
"is_parallel": true,
"self": 7.922684798957562
},
"communicator.exchange": {
"total": 261.850786741049,
"count": 18210,
"is_parallel": true,
"self": 261.850786741049
},
"steps_from_proto": {
"total": 46.043110571976285,
"count": 18210,
"is_parallel": true,
"self": 9.297504443178696,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.74560612879759,
"count": 182100,
"is_parallel": true,
"self": 36.74560612879759
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002805759995681001,
"count": 1,
"self": 0.0002805759995681001,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 677.6107948141916,
"count": 712972,
"is_parallel": true,
"self": 17.137674548855102,
"children": {
"process_trajectory": {
"total": 360.8141546473289,
"count": 712972,
"is_parallel": true,
"self": 359.4495456923296,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3646089549993121,
"count": 4,
"is_parallel": true,
"self": 1.3646089549993121
}
}
},
"_update_policy": {
"total": 299.65896561800764,
"count": 90,
"is_parallel": true,
"self": 130.54220798797542,
"children": {
"TorchPPOOptimizer.update": {
"total": 169.11675763003223,
"count": 4587,
"is_parallel": true,
"self": 169.11675763003223
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.17218608099938137,
"count": 1,
"self": 0.002373593997617718,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16981248700176366,
"count": 1,
"self": 0.16981248700176366
}
}
}
}
}
}
}