Rom1T's picture
First Push
b53b407 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9328919649124146,
"min": 0.9328919649124146,
"max": 2.8860585689544678,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9522.9609375,
"min": 8491.7744140625,
"max": 31406.125,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499912.0,
"min": 9952.0,
"max": 499912.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.057124137878418,
"min": -0.07539340108633041,
"max": 14.057124137878418,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1419.76953125,
"min": -7.313159942626953,
"max": 1441.7701416015625,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.0188679245283,
"min": 2.5090909090909093,
"max": 28.163636363636364,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1485.0,
"min": 120.0,
"max": 1549.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.0188679245283,
"min": 2.5090909090909093,
"max": 28.163636363636364,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1485.0,
"min": 120.0,
"max": 1549.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04795419881051845,
"min": 0.042951618692056985,
"max": 0.055461305127520226,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.04795419881051845,
"min": 0.042951618692056985,
"max": 0.10035613556563691,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17515234026838752,
"min": 0.09697852073346867,
"max": 0.3035049795227892,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.17515234026838752,
"min": 0.09697852073346867,
"max": 0.5870711494894588,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.00029472000176,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 4.320098559999991e-06,
"min": 4.320098559999991e-06,
"max": 0.00051024002992,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10143999999999999,
"min": 0.10143999999999999,
"max": 0.19823999999999997,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10143999999999999,
"min": 0.10143999999999999,
"max": 0.3700800000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 3.865599999999992e-05,
"min": 3.865599999999992e-05,
"max": 0.0019649760000000002,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 3.865599999999992e-05,
"min": 3.865599999999992e-05,
"max": 0.003404592,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1765805827",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1765806953"
},
"total": 1125.639104655,
"count": 1,
"self": 0.752796533000037,
"children": {
"run_training.setup": {
"total": 0.051349824999988414,
"count": 1,
"self": 0.051349824999988414
},
"TrainerController.start_learning": {
"total": 1124.834958297,
"count": 1,
"self": 0.8157271730119646,
"children": {
"TrainerController._reset_env": {
"total": 3.4518225419999453,
"count": 1,
"self": 3.4518225419999453
},
"TrainerController.advance": {
"total": 1120.4128791819878,
"count": 45528,
"self": 0.8359274260046732,
"children": {
"env_step": {
"total": 764.143059406003,
"count": 45528,
"self": 581.1986654199925,
"children": {
"SubprocessEnvManager._take_step": {
"total": 182.44652199600193,
"count": 45528,
"self": 2.8903641720295354,
"children": {
"TorchPolicy.evaluate": {
"total": 179.5561578239724,
"count": 45528,
"self": 179.5561578239724
}
}
},
"workers": {
"total": 0.497871990008548,
"count": 45528,
"self": 0.0,
"children": {
"worker_root": {
"total": 1120.4063604880234,
"count": 45528,
"is_parallel": true,
"self": 608.5320408800181,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.012187391999987085,
"count": 1,
"is_parallel": true,
"self": 0.00859386300010101,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0035935289998860753,
"count": 10,
"is_parallel": true,
"self": 0.0035935289998860753
}
}
},
"UnityEnvironment.step": {
"total": 0.03627521999999317,
"count": 1,
"is_parallel": true,
"self": 0.0006504289999611501,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00038971100002527237,
"count": 1,
"is_parallel": true,
"self": 0.00038971100002527237
},
"communicator.exchange": {
"total": 0.03334481600006711,
"count": 1,
"is_parallel": true,
"self": 0.03334481600006711
},
"steps_from_proto": {
"total": 0.0018902639999396342,
"count": 1,
"is_parallel": true,
"self": 0.00037186900010510726,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001518394999834527,
"count": 10,
"is_parallel": true,
"self": 0.001518394999834527
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 511.8743196080053,
"count": 45527,
"is_parallel": true,
"self": 24.79781625901603,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.0982564519893,
"count": 45527,
"is_parallel": true,
"self": 13.0982564519893
},
"communicator.exchange": {
"total": 389.94771327601416,
"count": 45527,
"is_parallel": true,
"self": 389.94771327601416
},
"steps_from_proto": {
"total": 84.0305336209858,
"count": 45527,
"is_parallel": true,
"self": 15.412565856010929,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.61796776497488,
"count": 455270,
"is_parallel": true,
"self": 68.61796776497488
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 355.43389234998017,
"count": 45528,
"self": 0.9394323679572381,
"children": {
"process_trajectory": {
"total": 68.7981408870229,
"count": 45528,
"self": 67.53076395902292,
"children": {
"RLTrainer._checkpoint": {
"total": 1.267376927999976,
"count": 10,
"self": 1.267376927999976
}
}
},
"_update_policy": {
"total": 285.69631909500004,
"count": 56,
"self": 156.8650124159917,
"children": {
"TorchPPOOptimizer.update": {
"total": 128.83130667900832,
"count": 9520,
"self": 128.83130667900832
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1730001006071689e-06,
"count": 1,
"self": 1.1730001006071689e-06
},
"TrainerController._save_models": {
"total": 0.15452822700012803,
"count": 1,
"self": 0.0009037219999754598,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15362450500015257,
"count": 1,
"self": 0.15362450500015257
}
}
}
}
}
}
}