armeiski's picture
First Push
9c9d63c verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8480338454246521,
"min": 0.8480338454246521,
"max": 0.8480338454246521,
"count": 1
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8656.7294921875,
"min": 8656.7294921875,
"max": 8656.7294921875,
"count": 1
},
"SnowballTarget.Step.mean": {
"value": 209936.0,
"min": 209936.0,
"max": 209936.0,
"count": 1
},
"SnowballTarget.Step.sum": {
"value": 209936.0,
"min": 209936.0,
"max": 209936.0,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.852452278137207,
"min": 12.852452278137207,
"max": 12.852452278137207,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2467.6708984375,
"min": 2467.6708984375,
"max": 2467.6708984375,
"count": 1
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": -0.26364466547966003,
"min": -0.26364466547966003,
"max": -0.26364466547966003,
"count": 1
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": -50.61977767944336,
"min": -50.61977767944336,
"max": -50.61977767944336,
"count": 1
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07305969190729586,
"min": 0.07305969190729586,
"max": 0.07305969190729586,
"count": 1
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29223876762918344,
"min": 0.29223876762918344,
"max": 0.29223876762918344,
"count": 1
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.10914913157238534,
"min": 0.10914913157238534,
"max": 0.10914913157238534,
"count": 1
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4365965262895414,
"min": 0.4365965262895414,
"max": 0.4365965262895414,
"count": 1
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00026917141027619996,
"min": 0.00026917141027619996,
"max": 0.00026917141027619996,
"count": 1
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0010766856411047999,
"min": 0.0010766856411047999,
"max": 0.0010766856411047999,
"count": 1
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.1897238,
"min": 0.1897238,
"max": 0.1897238,
"count": 1
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.7588952,
"min": 0.7588952,
"max": 0.7588952,
"count": 1
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00448721762,
"min": 0.00448721762,
"max": 0.00448721762,
"count": 1
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.01794887048,
"min": 0.01794887048,
"max": 0.01794887048,
"count": 1
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.30136276854603894,
"min": 0.30136276854603894,
"max": 0.30136276854603894,
"count": 1
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 1.2054510741841558,
"min": 1.2054510741841558,
"max": 1.2054510741841558,
"count": 1
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 1.2597437747115012,
"min": 1.2597437747115012,
"max": 1.2597437747115012,
"count": 1
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 5.038975098846005,
"min": 5.038975098846005,
"max": 5.038975098846005,
"count": 1
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 1
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 8756.0,
"count": 1
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.34090909090909,
"min": 25.34090909090909,
"max": 25.34090909090909,
"count": 1
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1115.0,
"min": 1115.0,
"max": 1115.0,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.34090909090909,
"min": 25.34090909090909,
"max": 25.34090909090909,
"count": 1
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1115.0,
"min": 1115.0,
"max": 1115.0,
"count": 1
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 1.6820884825323115,
"min": 1.6820884825323115,
"max": 1.6820884825323115,
"count": 1
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 74.01189323142171,
"min": 74.01189323142171,
"max": 74.01189323142171,
"count": 1
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 1
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1756802225",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1756802256"
},
"total": 31.338433906999853,
"count": 1,
"self": 0.2968910239997058,
"children": {
"run_training.setup": {
"total": 0.02272881499993673,
"count": 1,
"self": 0.02272881499993673
},
"TrainerController.start_learning": {
"total": 31.01881406800021,
"count": 1,
"self": 0.027829952990032325,
"children": {
"TrainerController._reset_env": {
"total": 1.81535464100034,
"count": 1,
"self": 1.81535464100034
},
"TrainerController.advance": {
"total": 28.672942395009613,
"count": 1121,
"self": 0.02096365402076117,
"children": {
"env_step": {
"total": 18.185425256999224,
"count": 1121,
"self": 14.027776797000115,
"children": {
"SubprocessEnvManager._take_step": {
"total": 4.145900908994918,
"count": 1121,
"self": 0.07304100900091726,
"children": {
"TorchPolicy.evaluate": {
"total": 4.072859899994,
"count": 1121,
"self": 4.072859899994
}
}
},
"workers": {
"total": 0.011747551004191337,
"count": 1120,
"self": 0.0,
"children": {
"worker_root": {
"total": 30.443461416006812,
"count": 1120,
"is_parallel": true,
"self": 18.139483271002064,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0020680239999819605,
"count": 1,
"is_parallel": true,
"self": 0.0006200059992806928,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014480180007012677,
"count": 10,
"is_parallel": true,
"self": 0.0014480180007012677
}
}
},
"UnityEnvironment.step": {
"total": 0.03450543100007053,
"count": 1,
"is_parallel": true,
"self": 0.0005361779999475402,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004192500000499422,
"count": 1,
"is_parallel": true,
"self": 0.0004192500000499422
},
"communicator.exchange": {
"total": 0.03177147599990349,
"count": 1,
"is_parallel": true,
"self": 0.03177147599990349
},
"steps_from_proto": {
"total": 0.0017785270001695608,
"count": 1,
"is_parallel": true,
"self": 0.00034121399903597194,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014373130011335888,
"count": 10,
"is_parallel": true,
"self": 0.0014373130011335888
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 12.303978145004749,
"count": 1119,
"is_parallel": true,
"self": 0.5808981090235648,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.31569907898665406,
"count": 1119,
"is_parallel": true,
"self": 0.31569907898665406
},
"communicator.exchange": {
"total": 9.53153811498396,
"count": 1119,
"is_parallel": true,
"self": 9.53153811498396
},
"steps_from_proto": {
"total": 1.8758428420105702,
"count": 1119,
"is_parallel": true,
"self": 0.3269197190279556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1.5489231229826146,
"count": 11190,
"is_parallel": true,
"self": 1.5489231229826146
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 10.466553483989628,
"count": 1120,
"self": 0.023239342988290446,
"children": {
"process_trajectory": {
"total": 2.119433444000606,
"count": 1120,
"self": 2.119433444000606
},
"_update_policy": {
"total": 8.323880697000732,
"count": 5,
"self": 4.957675552002456,
"children": {
"TorchPPOOptimizer.update": {
"total": 3.3662051449982755,
"count": 252,
"self": 3.3662051449982755
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.8000000636675395e-06,
"count": 1,
"self": 1.8000000636675395e-06
},
"TrainerController._save_models": {
"total": 0.5026852790001612,
"count": 1,
"self": 0.0018124950001947582,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5008727839999665,
"count": 1,
"self": 0.5008727839999665
}
}
}
}
}
}
}