Ishakbgr's picture
First Push
f68ed20 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7776843905448914,
"min": 0.7776843905448914,
"max": 2.84915828704834,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7391.1123046875,
"min": 7391.1123046875,
"max": 29084.20703125,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.852338790893555,
"min": 0.4086481034755707,
"max": 12.852338790893555,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2506.2060546875,
"min": 79.2777328491211,
"max": 2602.386962890625,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07370762677273174,
"min": 0.06243369443223392,
"max": 0.07729937483215027,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.29483050709092695,
"min": 0.24973477772893568,
"max": 0.384961966329309,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.18538151659509716,
"min": 0.12491344298948259,
"max": 0.2957010899104324,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7415260663803886,
"min": 0.49965377195793037,
"max": 1.4785054495521621,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.59090909090909,
"min": 3.7045454545454546,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1126.0,
"min": 163.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.59090909090909,
"min": 3.7045454545454546,
"max": 25.59090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1126.0,
"min": 163.0,
"max": 1395.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1744793781",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1744794229"
},
"total": 447.50007808,
"count": 1,
"self": 0.4346174429999792,
"children": {
"run_training.setup": {
"total": 0.022820045999992544,
"count": 1,
"self": 0.022820045999992544
},
"TrainerController.start_learning": {
"total": 447.042640591,
"count": 1,
"self": 0.36909088900114284,
"children": {
"TrainerController._reset_env": {
"total": 3.3600756359999764,
"count": 1,
"self": 3.3600756359999764
},
"TrainerController.advance": {
"total": 443.2090903889989,
"count": 18192,
"self": 0.38308835100053784,
"children": {
"env_step": {
"total": 316.9699314239966,
"count": 18192,
"self": 241.10148272199575,
"children": {
"SubprocessEnvManager._take_step": {
"total": 75.64711696199811,
"count": 18192,
"self": 1.3291381989939737,
"children": {
"TorchPolicy.evaluate": {
"total": 74.31797876300413,
"count": 18192,
"self": 74.31797876300413
}
}
},
"workers": {
"total": 0.22133174000276767,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 445.5731239069939,
"count": 18192,
"is_parallel": true,
"self": 233.7211670220005,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.009555677999969703,
"count": 1,
"is_parallel": true,
"self": 0.006995405999930426,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025602720000392765,
"count": 10,
"is_parallel": true,
"self": 0.0025602720000392765
}
}
},
"UnityEnvironment.step": {
"total": 0.03668225799998481,
"count": 1,
"is_parallel": true,
"self": 0.0005773380000277939,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040522499995176986,
"count": 1,
"is_parallel": true,
"self": 0.00040522499995176986
},
"communicator.exchange": {
"total": 0.033818902000007256,
"count": 1,
"is_parallel": true,
"self": 0.033818902000007256
},
"steps_from_proto": {
"total": 0.0018807929999979933,
"count": 1,
"is_parallel": true,
"self": 0.00040552899997692293,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014752640000210704,
"count": 10,
"is_parallel": true,
"self": 0.0014752640000210704
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 211.8519568849934,
"count": 18191,
"is_parallel": true,
"self": 9.982251717991403,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.617068619006147,
"count": 18191,
"is_parallel": true,
"self": 5.617068619006147
},
"communicator.exchange": {
"total": 163.07606065199906,
"count": 18191,
"is_parallel": true,
"self": 163.07606065199906
},
"steps_from_proto": {
"total": 33.17657589599679,
"count": 18191,
"is_parallel": true,
"self": 5.989651999985256,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.186923896011535,
"count": 181910,
"is_parallel": true,
"self": 27.186923896011535
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 125.85607061400174,
"count": 18192,
"self": 0.4770062790025804,
"children": {
"process_trajectory": {
"total": 28.26575067599896,
"count": 18192,
"self": 27.81763648799887,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4481141880000905,
"count": 4,
"self": 0.4481141880000905
}
}
},
"_update_policy": {
"total": 97.1133136590002,
"count": 90,
"self": 38.76932686699831,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.343986792001886,
"count": 4587,
"self": 58.343986792001886
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0129999736818718e-06,
"count": 1,
"self": 1.0129999736818718e-06
},
"TrainerController._save_models": {
"total": 0.10438266400001339,
"count": 1,
"self": 0.0009925880000309917,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1033900759999824,
"count": 1,
"self": 0.1033900759999824
}
}
}
}
}
}
}