Arjunrao's picture
First Push
6af6604 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7406790852546692,
"min": 0.7193822860717773,
"max": 2.8380188941955566,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7039.4140625,
"min": 7036.32861328125,
"max": 28970.49609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.209427833557129,
"min": 0.428206205368042,
"max": 13.209427833557129,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2575.83837890625,
"min": 83.07200622558594,
"max": 2656.27392578125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07169464517866407,
"min": 0.05979355345274705,
"max": 0.07344373334333297,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2867785807146563,
"min": 0.24911936978876875,
"max": 0.3660912525842386,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17857725994990153,
"min": 0.12100692785358713,
"max": 0.29005720882731323,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7143090397996061,
"min": 0.4840277114143485,
"max": 1.4020979211610907,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.045454545454547,
"min": 3.5,
"max": 26.045454545454547,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1146.0,
"min": 154.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.045454545454547,
"min": 3.5,
"max": 26.045454545454547,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1146.0,
"min": 154.0,
"max": 1426.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1752858676",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1752859228"
},
"total": 551.19814807,
"count": 1,
"self": 0.5416120009999759,
"children": {
"run_training.setup": {
"total": 0.04889609200006362,
"count": 1,
"self": 0.04889609200006362
},
"TrainerController.start_learning": {
"total": 550.607639977,
"count": 1,
"self": 0.6110976759907771,
"children": {
"TrainerController._reset_env": {
"total": 4.375727056999949,
"count": 1,
"self": 4.375727056999949
},
"TrainerController.advance": {
"total": 545.5435773080092,
"count": 18192,
"self": 0.6820056610105212,
"children": {
"env_step": {
"total": 385.29502050000804,
"count": 18192,
"self": 329.9452590800229,
"children": {
"SubprocessEnvManager._take_step": {
"total": 54.97994992799829,
"count": 18192,
"self": 1.9205838680057923,
"children": {
"TorchPolicy.evaluate": {
"total": 53.0593660599925,
"count": 18192,
"self": 53.0593660599925
}
}
},
"workers": {
"total": 0.3698114919868658,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 548.4451183860098,
"count": 18192,
"is_parallel": true,
"self": 264.15737461901836,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009406017999936012,
"count": 1,
"is_parallel": true,
"self": 0.006156075000035344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.003249942999900668,
"count": 10,
"is_parallel": true,
"self": 0.003249942999900668
}
}
},
"UnityEnvironment.step": {
"total": 0.04741251200005081,
"count": 1,
"is_parallel": true,
"self": 0.0007066069999837055,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005074809999996432,
"count": 1,
"is_parallel": true,
"self": 0.0005074809999996432
},
"communicator.exchange": {
"total": 0.043843567000067196,
"count": 1,
"is_parallel": true,
"self": 0.043843567000067196
},
"steps_from_proto": {
"total": 0.0023548570000002655,
"count": 1,
"is_parallel": true,
"self": 0.00044354800024848373,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019113089997517818,
"count": 10,
"is_parallel": true,
"self": 0.0019113089997517818
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 284.2877437669914,
"count": 18191,
"is_parallel": true,
"self": 13.402903483985483,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.625340082991443,
"count": 18191,
"is_parallel": true,
"self": 7.625340082991443
},
"communicator.exchange": {
"total": 221.7128444380196,
"count": 18191,
"is_parallel": true,
"self": 221.7128444380196
},
"steps_from_proto": {
"total": 41.546655761994884,
"count": 18191,
"is_parallel": true,
"self": 8.02677741803575,
"children": {
"_process_rank_one_or_two_observation": {
"total": 33.519878343959135,
"count": 181910,
"is_parallel": true,
"self": 33.519878343959135
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 159.5665511469906,
"count": 18192,
"self": 0.7781809699897622,
"children": {
"process_trajectory": {
"total": 30.45892117700066,
"count": 18192,
"self": 30.05372774200066,
"children": {
"RLTrainer._checkpoint": {
"total": 0.40519343500000105,
"count": 4,
"self": 0.40519343500000105
}
}
},
"_update_policy": {
"total": 128.32944900000018,
"count": 90,
"self": 49.83190056700471,
"children": {
"TorchPPOOptimizer.update": {
"total": 78.49754843299547,
"count": 4587,
"self": 78.49754843299547
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0159999419556698e-06,
"count": 1,
"self": 1.0159999419556698e-06
},
"TrainerController._save_models": {
"total": 0.07723692000013216,
"count": 1,
"self": 0.0011502630002269143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07608665699990524,
"count": 1,
"self": 0.07608665699990524
}
}
}
}
}
}
}