zfh1995's picture
First Push
1ef9a5a verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.6791611909866333,
"min": 1.6791611909866333,
"max": 2.8879926204681396,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 15958.748046875,
"min": 15958.748046875,
"max": 29480.62890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 7.565064907073975,
"min": -0.004969883244484663,
"max": 7.565064907073975,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1475.1876220703125,
"min": -0.9641573429107666,
"max": 1475.4478759765625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 19.34090909090909,
"min": 2.8636363636363638,
"max": 19.34090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 851.0,
"min": 126.0,
"max": 1043.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 19.34090909090909,
"min": 2.8636363636363638,
"max": 19.34090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 851.0,
"min": 126.0,
"max": 1043.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.03458931947620038,
"min": 0.02771801657646018,
"max": 0.04313195290445697,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.03458931947620038,
"min": 0.02771801657646018,
"max": 0.06832397965298892,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.28088264985411776,
"min": 0.10080790417451485,
"max": 0.30106635070314597,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.28088264985411776,
"min": 0.10080790417451485,
"max": 0.5976092494001576,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.622009675600001e-05,
"min": 1.622009675600001e-05,
"max": 0.00047822000435599995,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.622009675600001e-05,
"min": 1.622009675600001e-05,
"max": 0.0006264400747119999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.103244,
"min": 0.103244,
"max": 0.19564399999999998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.103244,
"min": 0.103244,
"max": 0.325288,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001718756000000001,
"min": 0.0001718756000000001,
"max": 0.0047826356000000006,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001718756000000001,
"min": 0.0001718756000000001,
"max": 0.006271871200000001,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1730084369",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1730084775"
},
"total": 405.2463201180001,
"count": 1,
"self": 0.7334652440001719,
"children": {
"run_training.setup": {
"total": 0.05396722699993006,
"count": 1,
"self": 0.05396722699993006
},
"TrainerController.start_learning": {
"total": 404.458887647,
"count": 1,
"self": 0.3851750219953374,
"children": {
"TrainerController._reset_env": {
"total": 2.1370442049999383,
"count": 1,
"self": 2.1370442049999383
},
"TrainerController.advance": {
"total": 401.80188542700466,
"count": 18192,
"self": 0.3932898789744286,
"children": {
"env_step": {
"total": 311.41756820403657,
"count": 18192,
"self": 236.5261631480737,
"children": {
"SubprocessEnvManager._take_step": {
"total": 74.66253532699602,
"count": 18192,
"self": 1.3036248579844596,
"children": {
"TorchPolicy.evaluate": {
"total": 73.35891046901156,
"count": 18192,
"self": 73.35891046901156
}
}
},
"workers": {
"total": 0.2288697289668562,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 402.938812309,
"count": 18192,
"is_parallel": true,
"self": 195.8295069429521,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022905369999079994,
"count": 1,
"is_parallel": true,
"self": 0.0007049930002267502,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015855439996812493,
"count": 10,
"is_parallel": true,
"self": 0.0015855439996812493
}
}
},
"UnityEnvironment.step": {
"total": 0.0377944239999124,
"count": 1,
"is_parallel": true,
"self": 0.0007113409999419673,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00041207100002793595,
"count": 1,
"is_parallel": true,
"self": 0.00041207100002793595
},
"communicator.exchange": {
"total": 0.03455096399989088,
"count": 1,
"is_parallel": true,
"self": 0.03455096399989088
},
"steps_from_proto": {
"total": 0.002120048000051611,
"count": 1,
"is_parallel": true,
"self": 0.0005152099995484605,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016048380005031504,
"count": 10,
"is_parallel": true,
"self": 0.0016048380005031504
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 207.10930536604792,
"count": 18191,
"is_parallel": true,
"self": 10.320279233983683,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.416297755037476,
"count": 18191,
"is_parallel": true,
"self": 5.416297755037476
},
"communicator.exchange": {
"total": 158.97646661502176,
"count": 18191,
"is_parallel": true,
"self": 158.97646661502176
},
"steps_from_proto": {
"total": 32.396261762005,
"count": 18191,
"is_parallel": true,
"self": 6.003698664935428,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.39256309706957,
"count": 181910,
"is_parallel": true,
"self": 26.39256309706957
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 89.99102734399366,
"count": 18192,
"self": 0.4674197110205114,
"children": {
"process_trajectory": {
"total": 27.43932491297369,
"count": 18192,
"self": 26.973073096974304,
"children": {
"RLTrainer._checkpoint": {
"total": 0.46625181599938514,
"count": 4,
"self": 0.46625181599938514
}
}
},
"_update_policy": {
"total": 62.084282719999464,
"count": 22,
"self": 42.795274675993596,
"children": {
"TorchPPOOptimizer.update": {
"total": 19.289008044005868,
"count": 1122,
"self": 19.289008044005868
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2989999049750622e-06,
"count": 1,
"self": 1.2989999049750622e-06
},
"TrainerController._save_models": {
"total": 0.1347816940001394,
"count": 1,
"self": 0.0013300320001690125,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13345166199997038,
"count": 1,
"self": 0.13345166199997038
}
}
}
}
}
}
}