HanliChu's picture
First Push
d5733a9 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 2.7334444522857666,
"min": 2.7334444522857666,
"max": 2.8872103691101074,
"count": 4
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 14222.111328125,
"min": 14222.111328125,
"max": 14958.63671875,
"count": 4
},
"SnowballTarget.Step.mean": {
"value": 19992.0,
"min": 4976.0,
"max": 19992.0,
"count": 4
},
"SnowballTarget.Step.sum": {
"value": 19992.0,
"min": 4976.0,
"max": 19992.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.535324215888977,
"min": -0.013077957555651665,
"max": 1.535324215888977,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 164.27969360351562,
"min": -1.268561840057373,
"max": 164.27969360351562,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 4
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 6567.0,
"min": 4378.0,
"max": 6567.0,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06965600378473526,
"min": 0.06534318111017437,
"max": 0.07049389890580285,
"count": 4
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.20896801135420578,
"min": 0.13068636222034874,
"max": 0.20896801135420578,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22691513685619127,
"min": 0.07384172623392707,
"max": 0.22691513685619127,
"count": 4
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6807454105685738,
"min": 0.14768345246785414,
"max": 0.6807454105685738,
"count": 4
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.732008755999999e-05,
"min": 3.732008755999999e-05,
"max": 0.00025182001606,
"count": 4
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.00011196026267999998,
"min": 0.00011196026267999998,
"max": 0.00050364003212,
"count": 4
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.11244000000000003,
"min": 0.11244000000000003,
"max": 0.18394000000000002,
"count": 4
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.33732000000000006,
"min": 0.27988,
"max": 0.36788000000000004,
"count": 4
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0006307560000000001,
"min": 0.0006307560000000001,
"max": 0.004198606,
"count": 4
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0018922680000000003,
"min": 0.0018922680000000003,
"max": 0.008397212,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 6.818181818181818,
"min": 2.1818181818181817,
"max": 6.818181818181818,
"count": 4
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 225.0,
"min": 48.0,
"max": 225.0,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 6.818181818181818,
"min": 2.1818181818181817,
"max": 6.818181818181818,
"count": 4
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 225.0,
"min": 48.0,
"max": 225.0,
"count": 4
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 4
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1711452545",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1711452594"
},
"total": 49.244143297999926,
"count": 1,
"self": 0.4797363210000185,
"children": {
"run_training.setup": {
"total": 0.05510140699993826,
"count": 1,
"self": 0.05510140699993826
},
"TrainerController.start_learning": {
"total": 48.70930556999997,
"count": 1,
"self": 0.08352489500134652,
"children": {
"TrainerController._reset_env": {
"total": 2.7749784160000672,
"count": 1,
"self": 2.7749784160000672
},
"TrainerController.advance": {
"total": 45.757378888998346,
"count": 1875,
"self": 0.029272224001488212,
"children": {
"env_step": {
"total": 45.72810666499686,
"count": 1875,
"self": 28.434824671993738,
"children": {
"SubprocessEnvManager._take_step": {
"total": 17.265607215001637,
"count": 1875,
"self": 0.1434248820000903,
"children": {
"TorchPolicy.evaluate": {
"total": 17.122182333001547,
"count": 1875,
"self": 17.122182333001547
}
}
},
"workers": {
"total": 0.02767477800148299,
"count": 1875,
"self": 0.0,
"children": {
"worker_root": {
"total": 48.514558945003046,
"count": 1875,
"is_parallel": true,
"self": 26.66132022099464,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005856837999999698,
"count": 1,
"is_parallel": true,
"self": 0.004240566000362378,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016162719996373198,
"count": 10,
"is_parallel": true,
"self": 0.0016162719996373198
}
}
},
"UnityEnvironment.step": {
"total": 0.035760776999950394,
"count": 1,
"is_parallel": true,
"self": 0.0006055730000298354,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00039558699995723146,
"count": 1,
"is_parallel": true,
"self": 0.00039558699995723146
},
"communicator.exchange": {
"total": 0.03289135299996815,
"count": 1,
"is_parallel": true,
"self": 0.03289135299996815
},
"steps_from_proto": {
"total": 0.001868263999995179,
"count": 1,
"is_parallel": true,
"self": 0.0003552369998942595,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015130270001009194,
"count": 10,
"is_parallel": true,
"self": 0.0015130270001009194
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 21.853238724008406,
"count": 1874,
"is_parallel": true,
"self": 1.0358339570163935,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.5561837939937959,
"count": 1874,
"is_parallel": true,
"self": 0.5561837939937959
},
"communicator.exchange": {
"total": 16.87313842699541,
"count": 1874,
"is_parallel": true,
"self": 16.87313842699541
},
"steps_from_proto": {
"total": 3.3880825460028063,
"count": 1874,
"is_parallel": true,
"self": 0.625689298028874,
"children": {
"_process_rank_one_or_two_observation": {
"total": 2.7623932479739324,
"count": 18740,
"is_parallel": true,
"self": 2.7623932479739324
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00031160800017460133,
"count": 1,
"self": 0.00031160800017460133,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 45.21625030796008,
"count": 71794,
"is_parallel": true,
"self": 1.4649785019389583,
"children": {
"process_trajectory": {
"total": 24.961383864020945,
"count": 71794,
"is_parallel": true,
"self": 24.29891412902134,
"children": {
"RLTrainer._checkpoint": {
"total": 0.662469734999604,
"count": 4,
"is_parallel": true,
"self": 0.662469734999604
}
}
},
"_update_policy": {
"total": 18.789887942000178,
"count": 9,
"is_parallel": true,
"self": 5.5344027060002645,
"children": {
"TorchPPOOptimizer.update": {
"total": 13.255485235999913,
"count": 456,
"is_parallel": true,
"self": 13.255485235999913
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09311176200003501,
"count": 1,
"self": 0.0009881400001177099,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0921236219999173,
"count": 1,
"self": 0.0921236219999173
}
}
}
}
}
}
}