ljones's picture
First push
6e0acd1
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.897840678691864,
"min": 0.8958286643028259,
"max": 2.8876895904541016,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8928.1279296875,
"min": 8928.1279296875,
"max": 29985.76953125,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.678810477256775,
"min": 0.0441976860165596,
"max": 1.7111351490020752,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 426.4178466796875,
"min": 10.784235000610352,
"max": 434.0635986328125,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.818181818181817,
"min": 3.3863636363636362,
"max": 26.818181818181817,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1475.0,
"min": 149.0,
"max": 1475.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.818181818181817,
"min": 3.3863636363636362,
"max": 26.818181818181817,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1475.0,
"min": 149.0,
"max": 1475.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06639447424201232,
"min": 0.061644420944958256,
"max": 0.0827343492790078,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06639447424201232,
"min": 0.061644420944958256,
"max": 0.14820478036559354,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.0990071992855519,
"min": 0.04816095922690104,
"max": 0.13071894684496024,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.0990071992855519,
"min": 0.04816095922690104,
"max": 0.2323015054377417,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.6688986656000035e-06,
"min": 2.6688986656000035e-06,
"max": 0.00019651520174240002,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6688986656000035e-06,
"min": 2.6688986656000035e-06,
"max": 0.0003562816218592001,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10133440000000003,
"min": 0.10133440000000003,
"max": 0.19825760000000003,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10133440000000003,
"min": 0.10133440000000003,
"max": 0.37814080000000005,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.658656000000014e-05,
"min": 7.658656000000014e-05,
"max": 0.00491305424,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 7.658656000000014e-05,
"min": 7.658656000000014e-05,
"max": 0.00890922592,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679308086",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679309101"
},
"total": 1014.584843874,
"count": 1,
"self": 0.75333521799962,
"children": {
"run_training.setup": {
"total": 0.10383334700009073,
"count": 1,
"self": 0.10383334700009073
},
"TrainerController.start_learning": {
"total": 1013.7276753090002,
"count": 1,
"self": 0.9041088700003002,
"children": {
"TrainerController._reset_env": {
"total": 9.800832706000165,
"count": 1,
"self": 9.800832706000165
},
"TrainerController.advance": {
"total": 1002.7952113799997,
"count": 45496,
"self": 1.0438958690706386,
"children": {
"env_step": {
"total": 715.6815380199466,
"count": 45496,
"self": 583.3141892819549,
"children": {
"SubprocessEnvManager._take_step": {
"total": 131.77234663299578,
"count": 45496,
"self": 8.27268163597887,
"children": {
"TorchPolicy.evaluate": {
"total": 123.49966499701691,
"count": 45496,
"self": 123.49966499701691
}
}
},
"workers": {
"total": 0.595002104995956,
"count": 45496,
"self": 0.0,
"children": {
"worker_root": {
"total": 1010.1917472619491,
"count": 45496,
"is_parallel": true,
"self": 497.79621540197195,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005107242000121914,
"count": 1,
"is_parallel": true,
"self": 0.003657038999563156,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014502030005587585,
"count": 10,
"is_parallel": true,
"self": 0.0014502030005587585
}
}
},
"UnityEnvironment.step": {
"total": 0.03424643199991806,
"count": 1,
"is_parallel": true,
"self": 0.0005840389999320905,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039740500005791546,
"count": 1,
"is_parallel": true,
"self": 0.00039740500005791546
},
"communicator.exchange": {
"total": 0.03152023100005863,
"count": 1,
"is_parallel": true,
"self": 0.03152023100005863
},
"steps_from_proto": {
"total": 0.0017447569998694235,
"count": 1,
"is_parallel": true,
"self": 0.0003754489994207688,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013693080004486546,
"count": 10,
"is_parallel": true,
"self": 0.0013693080004486546
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 512.3955318599772,
"count": 45495,
"is_parallel": true,
"self": 24.035349170944983,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.22412833103499,
"count": 45495,
"is_parallel": true,
"self": 13.22412833103499
},
"communicator.exchange": {
"total": 398.15062383999225,
"count": 45495,
"is_parallel": true,
"self": 398.15062383999225
},
"steps_from_proto": {
"total": 76.98543051800493,
"count": 45495,
"is_parallel": true,
"self": 14.80028442994103,
"children": {
"_process_rank_one_or_two_observation": {
"total": 62.1851460880639,
"count": 454950,
"is_parallel": true,
"self": 62.1851460880639
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 286.06977749098246,
"count": 45496,
"self": 1.1056014859568677,
"children": {
"process_trajectory": {
"total": 68.62288061602635,
"count": 45496,
"self": 67.87542460302643,
"children": {
"RLTrainer._checkpoint": {
"total": 0.747456012999919,
"count": 5,
"self": 0.747456012999919
}
}
},
"_update_policy": {
"total": 216.34129538899924,
"count": 59,
"self": 115.256303510007,
"children": {
"TorchPPOOptimizer.update": {
"total": 101.08499187899224,
"count": 11472,
"self": 101.08499187899224
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2860000424552709e-06,
"count": 1,
"self": 1.2860000424552709e-06
},
"TrainerController._save_models": {
"total": 0.22752106699999786,
"count": 1,
"self": 0.001221148000240646,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2262999189997572,
"count": 1,
"self": 0.2262999189997572
}
}
}
}
}
}
}