AGuzhvenko's picture
First Push
6464850 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.3314012587070465,
"min": 0.3270619213581085,
"max": 2.8598127365112305,
"count": 200
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 3412.107421875,
"min": 3157.62744140625,
"max": 29192.96875,
"count": 200
},
"SnowballTarget.Step.mean": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Step.sum": {
"value": 1999992.0,
"min": 9952.0,
"max": 1999992.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.344200134277344,
"min": 0.4010504186153412,
"max": 14.36646556854248,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2940.56103515625,
"min": 77.80377960205078,
"max": 2945.12548828125,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0689329136448328,
"min": 0.058687101516018934,
"max": 0.0782569578932096,
"count": 200
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.34466456822416397,
"min": 0.23474840606407574,
"max": 0.3795769412787266,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.14288388405944788,
"min": 0.12275751165899576,
"max": 0.28038673731041885,
"count": 200
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7144194202972394,
"min": 0.5008412361711118,
"max": 1.3989377880797667,
"count": 200
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.032997656000039e-07,
"min": 7.032997656000039e-07,
"max": 0.00029918820027059994,
"count": 200
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.5164988280000196e-06,
"min": 3.5164988280000196e-06,
"max": 0.0014885160038279998,
"count": 200
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10023440000000002,
"min": 0.10023440000000002,
"max": 0.1997294,
"count": 200
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5011720000000001,
"min": 0.4029176,
"max": 0.996172,
"count": 200
},
"SnowballTarget.Policy.Beta.mean": {
"value": 2.1696560000000067e-05,
"min": 2.1696560000000067e-05,
"max": 0.004986497059999999,
"count": 200
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00010848280000000034,
"min": 0.00010848280000000034,
"max": 0.024808982800000004,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 200
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.345454545454544,
"min": 3.25,
"max": 28.545454545454547,
"count": 200
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1559.0,
"min": 143.0,
"max": 1565.0,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.345454545454544,
"min": 3.25,
"max": 28.545454545454547,
"count": 200
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1559.0,
"min": 143.0,
"max": 1565.0,
"count": 200
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 200
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739171466",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739175649"
},
"total": 4183.661267745,
"count": 1,
"self": 0.43648233499970956,
"children": {
"run_training.setup": {
"total": 0.02431995300003109,
"count": 1,
"self": 0.02431995300003109
},
"TrainerController.start_learning": {
"total": 4183.200465457,
"count": 1,
"self": 3.2531446710881937,
"children": {
"TrainerController._reset_env": {
"total": 3.105925941999999,
"count": 1,
"self": 3.105925941999999
},
"TrainerController.advance": {
"total": 4176.7512573539125,
"count": 181864,
"self": 3.368483168943385,
"children": {
"env_step": {
"total": 2926.1088045370384,
"count": 181864,
"self": 2217.6007750281565,
"children": {
"SubprocessEnvManager._take_step": {
"total": 706.5077714289263,
"count": 181864,
"self": 12.2194095379989,
"children": {
"TorchPolicy.evaluate": {
"total": 694.2883618909274,
"count": 181864,
"self": 694.2883618909274
}
}
},
"workers": {
"total": 2.000258079955529,
"count": 181864,
"self": 0.0,
"children": {
"worker_root": {
"total": 4171.048414529961,
"count": 181864,
"is_parallel": true,
"self": 2229.178157453185,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005796984999960841,
"count": 1,
"is_parallel": true,
"self": 0.004292478000138544,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001504506999822297,
"count": 10,
"is_parallel": true,
"self": 0.001504506999822297
}
}
},
"UnityEnvironment.step": {
"total": 0.03626193700006297,
"count": 1,
"is_parallel": true,
"self": 0.0005792990000372811,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004130410000016127,
"count": 1,
"is_parallel": true,
"self": 0.0004130410000016127
},
"communicator.exchange": {
"total": 0.03317575500000203,
"count": 1,
"is_parallel": true,
"self": 0.03317575500000203
},
"steps_from_proto": {
"total": 0.0020938420000220503,
"count": 1,
"is_parallel": true,
"self": 0.000398240999970767,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016956010000512833,
"count": 10,
"is_parallel": true,
"self": 0.0016956010000512833
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1941.8702570767757,
"count": 181863,
"is_parallel": true,
"self": 94.18546899662215,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 51.843831393067035,
"count": 181863,
"is_parallel": true,
"self": 51.843831393067035
},
"communicator.exchange": {
"total": 1496.533544256004,
"count": 181863,
"is_parallel": true,
"self": 1496.533544256004
},
"steps_from_proto": {
"total": 299.30741243108264,
"count": 181863,
"is_parallel": true,
"self": 52.39333896434232,
"children": {
"_process_rank_one_or_two_observation": {
"total": 246.91407346674032,
"count": 1818630,
"is_parallel": true,
"self": 246.91407346674032
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1247.2739696479312,
"count": 181864,
"self": 3.991147702065291,
"children": {
"process_trajectory": {
"total": 266.2548088678577,
"count": 181864,
"self": 262.2578423578582,
"children": {
"RLTrainer._checkpoint": {
"total": 3.996966509999538,
"count": 40,
"self": 3.996966509999538
}
}
},
"_update_policy": {
"total": 977.0280130780081,
"count": 909,
"self": 390.18840093102665,
"children": {
"TorchPPOOptimizer.update": {
"total": 586.8396121469815,
"count": 46356,
"self": 586.8396121469815
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0949997886200435e-06,
"count": 1,
"self": 1.0949997886200435e-06
},
"TrainerController._save_models": {
"total": 0.0901363949997176,
"count": 1,
"self": 0.0009759069998835912,
"children": {
"RLTrainer._checkpoint": {
"total": 0.089160487999834,
"count": 1,
"self": 0.089160487999834
}
}
}
}
}
}
}