ahmadsy's picture
First Push
9208de8 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.6453768610954285,
"min": 0.6052058935165405,
"max": 0.8181990385055542,
"count": 29
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6559.6103515625,
"min": 2979.062744140625,
"max": 8338.6083984375,
"count": 29
},
"SnowballTarget.Step.mean": {
"value": 689960.0,
"min": 409984.0,
"max": 689960.0,
"count": 29
},
"SnowballTarget.Step.sum": {
"value": 689960.0,
"min": 409984.0,
"max": 689960.0,
"count": 29
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.772141456604004,
"min": 13.346248626708984,
"max": 13.857466697692871,
"count": 29
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2685.567626953125,
"min": 799.4398803710938,
"max": 2823.4833984375,
"count": 29
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 29
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 2189.0,
"max": 10945.0,
"count": 29
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06500108055679091,
"min": 0.0592020819404199,
"max": 0.08497456358679756,
"count": 29
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26000432222716363,
"min": 0.08497456358679756,
"max": 0.3639923398230024,
"count": 29
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.15492069965922364,
"min": 0.1400732405337633,
"max": 0.19679481974419424,
"count": 29
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6196827986368946,
"min": 0.16414106776937842,
"max": 0.9178970949906928,
"count": 29
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.44892685036e-05,
"min": 9.44892685036e-05,
"max": 0.00017731924089360002,
"count": 29
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0003779570740144,
"min": 0.00017731924089360002,
"max": 0.000876696207768,
"count": 29
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1314964,
"min": 0.1314964,
"max": 0.1591064,
"count": 29
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5259856,
"min": 0.1591064,
"max": 0.7922319999999999,
"count": 29
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0015816703600000004,
"min": 0.0015816703600000004,
"max": 0.0029594093600000004,
"count": 29
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0063266814400000015,
"min": 0.0029594093600000004,
"max": 0.014632376800000003,
"count": 29
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.09090909090909,
"min": 25.59090909090909,
"max": 27.181818181818183,
"count": 29
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1192.0,
"min": 282.0,
"max": 1489.0,
"count": 29
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 27.09090909090909,
"min": 25.59090909090909,
"max": 27.181818181818183,
"count": 29
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1192.0,
"min": 282.0,
"max": 1489.0,
"count": 29
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 29
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 29
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731518013",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --resume --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731518853"
},
"total": 840.429411397,
"count": 1,
"self": 0.36528794899959394,
"children": {
"run_training.setup": {
"total": 0.11104295700033617,
"count": 1,
"self": 0.11104295700033617
},
"TrainerController.start_learning": {
"total": 839.953080491,
"count": 1,
"self": 1.281243647989868,
"children": {
"TrainerController._reset_env": {
"total": 3.2445078089999697,
"count": 1,
"self": 3.2445078089999697
},
"TrainerController.advance": {
"total": 835.2928897720103,
"count": 26535,
"self": 0.6298937230021693,
"children": {
"env_step": {
"total": 834.6629960490081,
"count": 26535,
"self": 640.1278579970003,
"children": {
"SubprocessEnvManager._take_step": {
"total": 193.92481113003896,
"count": 26535,
"self": 2.9560653630492197,
"children": {
"TorchPolicy.evaluate": {
"total": 190.96874576698974,
"count": 26535,
"self": 190.96874576698974
}
}
},
"workers": {
"total": 0.6103269219688627,
"count": 26534,
"self": 0.0,
"children": {
"worker_root": {
"total": 837.2221901299704,
"count": 26534,
"is_parallel": true,
"self": 405.7934578399522,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0037471790001291083,
"count": 1,
"is_parallel": true,
"self": 0.0010018210005000583,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00274535799962905,
"count": 10,
"is_parallel": true,
"self": 0.00274535799962905
}
}
},
"UnityEnvironment.step": {
"total": 0.07911048000005394,
"count": 1,
"is_parallel": true,
"self": 0.0007877650000409631,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004636660000869597,
"count": 1,
"is_parallel": true,
"self": 0.0004636660000869597
},
"communicator.exchange": {
"total": 0.07536837199995716,
"count": 1,
"is_parallel": true,
"self": 0.07536837199995716
},
"steps_from_proto": {
"total": 0.002490676999968855,
"count": 1,
"is_parallel": true,
"self": 0.0005000929995730985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019905840003957564,
"count": 10,
"is_parallel": true,
"self": 0.0019905840003957564
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 431.42873229001816,
"count": 26533,
"is_parallel": true,
"self": 20.579289500986306,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.255141167016063,
"count": 26533,
"is_parallel": true,
"self": 10.255141167016063
},
"communicator.exchange": {
"total": 338.9035427789836,
"count": 26533,
"is_parallel": true,
"self": 338.9035427789836
},
"steps_from_proto": {
"total": 61.6907588430322,
"count": 26533,
"is_parallel": true,
"self": 12.478091371013306,
"children": {
"_process_rank_one_or_two_observation": {
"total": 49.21266747201889,
"count": 265330,
"is_parallel": true,
"self": 49.21266747201889
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001709950001895777,
"count": 1,
"self": 0.0001709950001895777,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 825.7275560776652,
"count": 1094165,
"is_parallel": true,
"self": 25.041207345781913,
"children": {
"process_trajectory": {
"total": 433.45666232688745,
"count": 1094165,
"is_parallel": true,
"self": 432.42008302688737,
"children": {
"RLTrainer._checkpoint": {
"total": 1.036579300000085,
"count": 5,
"is_parallel": true,
"self": 1.036579300000085
}
}
},
"_update_policy": {
"total": 367.22968640499585,
"count": 132,
"is_parallel": true,
"self": 92.73948547499185,
"children": {
"TorchPPOOptimizer.update": {
"total": 274.490200930004,
"count": 6729,
"is_parallel": true,
"self": 274.490200930004
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.13426826699969752,
"count": 1,
"self": 0.0030220349995033757,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13124623200019414,
"count": 1,
"self": 0.13124623200019414
}
}
}
}
}
}
}