TAS-Theo's picture
First Push
dc711b3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7481241822242737,
"min": 0.7275084257125854,
"max": 2.878674268722534,
"count": 47
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7110.17236328125,
"min": 7110.17236328125,
"max": 29385.5078125,
"count": 47
},
"SnowballTarget.Step.mean": {
"value": 469944.0,
"min": 9952.0,
"max": 469944.0,
"count": 47
},
"SnowballTarget.Step.sum": {
"value": 469944.0,
"min": 9952.0,
"max": 469944.0,
"count": 47
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.457496643066406,
"min": 0.33969926834106445,
"max": 13.665112495422363,
"count": 47
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2610.75439453125,
"min": 65.90165710449219,
"max": 2798.97265625,
"count": 47
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06545934811013057,
"min": 0.06072266933670286,
"max": 0.07486839363335486,
"count": 47
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2618373924405223,
"min": 0.253462643431632,
"max": 0.37091525558036453,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.16715518008990615,
"min": 0.10786164894599613,
"max": 0.2755243507962601,
"count": 47
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6686207203596246,
"min": 0.43144659578398453,
"max": 1.3011655334164114,
"count": 47
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.0002302182232606,
"min": 0.0002302182232606,
"max": 0.00029918820027059994,
"count": 47
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0009208728930424,
"min": 0.0009208728930424,
"max": 0.0014885160038279998,
"count": 47
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.17673940000000002,
"min": 0.17673940000000002,
"max": 0.1997294,
"count": 47
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.7069576000000001,
"min": 0.7069576000000001,
"max": 0.996172,
"count": 47
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0038392960600000005,
"min": 0.0038392960600000005,
"max": 0.004986497059999999,
"count": 47
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.015357184240000002,
"min": 0.015357184240000002,
"max": 0.024808982800000004,
"count": 47
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 47
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 47
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.522727272727273,
"min": 2.7045454545454546,
"max": 26.927272727272726,
"count": 47
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1167.0,
"min": 119.0,
"max": 1481.0,
"count": 47
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.522727272727273,
"min": 2.7045454545454546,
"max": 26.927272727272726,
"count": 47
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1167.0,
"min": 119.0,
"max": 1481.0,
"count": 47
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 47
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 47
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744705912",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744706970"
},
"total": 1058.07574952,
"count": 1,
"self": 0.4432100240001091,
"children": {
"run_training.setup": {
"total": 0.022422150000011243,
"count": 1,
"self": 0.022422150000011243
},
"TrainerController.start_learning": {
"total": 1057.610117346,
"count": 1,
"self": 0.9030628089963102,
"children": {
"TrainerController._reset_env": {
"total": 3.233324046000007,
"count": 1,
"self": 3.233324046000007
},
"TrainerController.advance": {
"total": 1053.4709160990037,
"count": 43583,
"self": 0.9520847240057719,
"children": {
"env_step": {
"total": 750.5101634279881,
"count": 43583,
"self": 569.6815519260022,
"children": {
"SubprocessEnvManager._take_step": {
"total": 180.27819443399682,
"count": 43583,
"self": 3.188406766999151,
"children": {
"TorchPolicy.evaluate": {
"total": 177.08978766699767,
"count": 43583,
"self": 177.08978766699767
}
}
},
"workers": {
"total": 0.5504170679891445,
"count": 43582,
"self": 0.0,
"children": {
"worker_root": {
"total": 1054.7774350150146,
"count": 43582,
"is_parallel": true,
"self": 554.8248011610128,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005352161999951477,
"count": 1,
"is_parallel": true,
"self": 0.0037272519998623466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016249100000891303,
"count": 10,
"is_parallel": true,
"self": 0.0016249100000891303
}
}
},
"UnityEnvironment.step": {
"total": 0.06631748900002776,
"count": 1,
"is_parallel": true,
"self": 0.0006457819999923231,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003638060000525911,
"count": 1,
"is_parallel": true,
"self": 0.0003638060000525911
},
"communicator.exchange": {
"total": 0.06165754299996706,
"count": 1,
"is_parallel": true,
"self": 0.06165754299996706
},
"steps_from_proto": {
"total": 0.0036503580000157854,
"count": 1,
"is_parallel": true,
"self": 0.0003982170001108898,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0032521409999048956,
"count": 10,
"is_parallel": true,
"self": 0.0032521409999048956
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 499.95263385400176,
"count": 43581,
"is_parallel": true,
"self": 23.758844507941774,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 13.128302615001587,
"count": 43581,
"is_parallel": true,
"self": 13.128302615001587
},
"communicator.exchange": {
"total": 384.41075515101795,
"count": 43581,
"is_parallel": true,
"self": 384.41075515101795
},
"steps_from_proto": {
"total": 78.65473158004045,
"count": 43581,
"is_parallel": true,
"self": 14.338957949076757,
"children": {
"_process_rank_one_or_two_observation": {
"total": 64.31577363096369,
"count": 435810,
"is_parallel": true,
"self": 64.31577363096369
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 302.00866794700977,
"count": 43582,
"self": 1.1423924239913958,
"children": {
"process_trajectory": {
"total": 66.26860182901692,
"count": 43582,
"self": 65.35715842901686,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9114434000000529,
"count": 9,
"self": 0.9114434000000529
}
}
},
"_update_policy": {
"total": 234.59767369400146,
"count": 217,
"self": 94.00690392100944,
"children": {
"TorchPPOOptimizer.update": {
"total": 140.59076977299202,
"count": 11064,
"self": 140.59076977299202
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.2509999578469433e-06,
"count": 1,
"self": 1.2509999578469433e-06
},
"TrainerController._save_models": {
"total": 0.002813140999933239,
"count": 1,
"self": 3.2273999977405765e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0027808669999558333,
"count": 1,
"self": 0.0027808669999558333
}
}
}
}
}
}
}