beeks11's picture
First Completed Training
2abdfcd
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.3074700832366943,
"min": 1.3074700832366943,
"max": 2.8770358562469482,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12584.3994140625,
"min": 12584.3994140625,
"max": 29653.609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.963115692138672,
"min": -0.003124536946415901,
"max": 11.963115692138672,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2332.8076171875,
"min": -0.6061601638793945,
"max": 2392.39453125,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 0.3639466464519501,
"min": -0.07460642606019974,
"max": 0.41845494508743286,
"count": 20
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 70.96959686279297,
"min": -14.473647117614746,
"max": 82.07357025146484,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.568181818181817,
"min": 3.3181818181818183,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1081.0,
"min": 146.0,
"max": 1359.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.568181818181817,
"min": 3.3181818181818183,
"max": 24.90909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1081.0,
"min": 146.0,
"max": 1359.0,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 7.2439395473762,
"min": 5.678277009590105,
"max": 9.186523964459246,
"count": 20
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 318.73334008455276,
"min": 249.84418842196465,
"max": 505.2588180452585,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06141591183417511,
"min": 0.06141591183417511,
"max": 0.07391346620086765,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.12283182366835022,
"min": 0.12283182366835022,
"max": 0.21428069352302426,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.10879980183261281,
"min": 0.05808106478795064,
"max": 0.1463760170428192,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.21759960366522563,
"min": 0.11616212957590127,
"max": 0.43907720792819477,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 6.432097856000005e-06,
"min": 6.432097856000005e-06,
"max": 0.00029023200325600004,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.286419571200001e-05,
"min": 1.286419571200001e-05,
"max": 0.0007419960526679999,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10214399999999998,
"min": 0.10214399999999998,
"max": 0.196744,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20428799999999997,
"min": 0.20428799999999997,
"max": 0.547332,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001169856000000001,
"min": 0.0001169856000000001,
"max": 0.0048375256,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0002339712000000002,
"min": 0.0002339712000000002,
"max": 0.0123718668,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.04639660934095874,
"min": 0.044086100873263444,
"max": 0.09217024550818026,
"count": 20
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.09279321868191748,
"min": 0.08817220174652689,
"max": 0.18434049101636052,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 1.0736719608306884,
"min": 1.0736719608306884,
"max": 2.7185221451064057,
"count": 20
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 2.1473439216613768,
"min": 2.1473439216613768,
"max": 6.719442791097304,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700685888",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700686510"
},
"total": 621.9481216020001,
"count": 1,
"self": 0.4259320410001237,
"children": {
"run_training.setup": {
"total": 0.04874042599999484,
"count": 1,
"self": 0.04874042599999484
},
"TrainerController.start_learning": {
"total": 621.473449135,
"count": 1,
"self": 0.5055729260038788,
"children": {
"TrainerController._reset_env": {
"total": 9.9770176909999,
"count": 1,
"self": 9.9770176909999
},
"TrainerController.advance": {
"total": 610.9056673859961,
"count": 18207,
"self": 0.2384794209779102,
"children": {
"env_step": {
"total": 610.6671879650182,
"count": 18207,
"self": 483.9036989760127,
"children": {
"SubprocessEnvManager._take_step": {
"total": 126.50123197499033,
"count": 18207,
"self": 1.3555897769945204,
"children": {
"TorchPolicy.evaluate": {
"total": 125.14564219799581,
"count": 18207,
"self": 125.14564219799581
}
}
},
"workers": {
"total": 0.26225701401517654,
"count": 18207,
"self": 0.0,
"children": {
"worker_root": {
"total": 620.2787489810087,
"count": 18207,
"is_parallel": true,
"self": 393.52211029504315,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006363141000065298,
"count": 1,
"is_parallel": true,
"self": 0.00502303899997969,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013401020000856079,
"count": 10,
"is_parallel": true,
"self": 0.0013401020000856079
}
}
},
"UnityEnvironment.step": {
"total": 0.04159457400010069,
"count": 1,
"is_parallel": true,
"self": 0.0005807310001273436,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024865599993972864,
"count": 1,
"is_parallel": true,
"self": 0.00024865599993972864
},
"communicator.exchange": {
"total": 0.03881610100006583,
"count": 1,
"is_parallel": true,
"self": 0.03881610100006583
},
"steps_from_proto": {
"total": 0.0019490859999677923,
"count": 1,
"is_parallel": true,
"self": 0.00038047000043661683,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015686159995311755,
"count": 10,
"is_parallel": true,
"self": 0.0015686159995311755
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 226.75663868596553,
"count": 18206,
"is_parallel": true,
"self": 10.217233721012008,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.213032212976486,
"count": 18206,
"is_parallel": true,
"self": 5.213032212976486
},
"communicator.exchange": {
"total": 178.97839152798338,
"count": 18206,
"is_parallel": true,
"self": 178.97839152798338
},
"steps_from_proto": {
"total": 32.347981223993656,
"count": 18206,
"is_parallel": true,
"self": 5.907664024002088,
"children": {
"_process_rank_one_or_two_observation": {
"total": 26.44031719999157,
"count": 182060,
"is_parallel": true,
"self": 26.44031719999157
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00010651999991750927,
"count": 1,
"self": 0.00010651999991750927,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 606.8234466279862,
"count": 513941,
"is_parallel": true,
"self": 10.547591210862947,
"children": {
"process_trajectory": {
"total": 308.6046761361224,
"count": 513941,
"is_parallel": true,
"self": 307.88478620612227,
"children": {
"RLTrainer._checkpoint": {
"total": 0.7198899300001358,
"count": 4,
"is_parallel": true,
"self": 0.7198899300001358
}
}
},
"_update_policy": {
"total": 287.67117928100083,
"count": 45,
"is_parallel": true,
"self": 184.5105077260066,
"children": {
"TorchPPOOptimizer.update": {
"total": 103.16067155499422,
"count": 7645,
"is_parallel": true,
"self": 103.16067155499422
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08508461200017337,
"count": 1,
"self": 0.0007858080002733914,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08429880399989997,
"count": 1,
"self": 0.08429880399989997
}
}
}
}
}
}
}