Dreaver's picture
First Push
12e762c
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0986477136611938,
"min": 1.0986477136611938,
"max": 2.873309850692749,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10501.9736328125,
"min": 10501.9736328125,
"max": 29393.9609375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 8.215096473693848,
"min": 0.30705520510673523,
"max": 8.215096473693848,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1601.9437255859375,
"min": 59.56871032714844,
"max": 1657.724365234375,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07136622569626472,
"min": 0.06426109877205567,
"max": 0.07412953849829958,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28546490278505887,
"min": 0.2649621272869933,
"max": 0.36382579032272355,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21123314042593916,
"min": 0.11189294867582764,
"max": 0.2317389920646069,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8449325617037566,
"min": 0.44757179470331054,
"max": 1.1586949603230345,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 9.429097306000008e-06,
"min": 9.429097306000008e-06,
"max": 0.0003405290027059999,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.771638922400003e-05,
"min": 3.771638922400003e-05,
"max": 0.00161602003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10134700000000002,
"min": 0.10134700000000002,
"max": 0.148647,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.4053880000000001,
"min": 0.4053880000000001,
"max": 0.73086,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 24.568181818181817,
"min": 3.4545454545454546,
"max": 24.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1081.0,
"min": 152.0,
"max": 1328.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 24.568181818181817,
"min": 3.4545454545454546,
"max": 24.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1081.0,
"min": 152.0,
"max": 1328.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1702031226",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1702031758"
},
"total": 531.9665597500001,
"count": 1,
"self": 0.4962068030002911,
"children": {
"run_training.setup": {
"total": 0.06357163999996374,
"count": 1,
"self": 0.06357163999996374
},
"TrainerController.start_learning": {
"total": 531.4067813069998,
"count": 1,
"self": 0.6790782640000543,
"children": {
"TrainerController._reset_env": {
"total": 4.764014189000022,
"count": 1,
"self": 4.764014189000022
},
"TrainerController.advance": {
"total": 525.8652131089997,
"count": 18200,
"self": 0.3232452120018934,
"children": {
"env_step": {
"total": 525.5419678969978,
"count": 18200,
"self": 357.70478318199184,
"children": {
"SubprocessEnvManager._take_step": {
"total": 167.49639028799425,
"count": 18200,
"self": 1.69389377498851,
"children": {
"TorchPolicy.evaluate": {
"total": 165.80249651300574,
"count": 18200,
"self": 165.80249651300574
}
}
},
"workers": {
"total": 0.3407944270117014,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 529.9182251560046,
"count": 18200,
"is_parallel": true,
"self": 260.02869286500606,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005932075000032455,
"count": 1,
"is_parallel": true,
"self": 0.0044597569999496045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014723180000828506,
"count": 10,
"is_parallel": true,
"self": 0.0014723180000828506
}
}
},
"UnityEnvironment.step": {
"total": 0.03737989900002958,
"count": 1,
"is_parallel": true,
"self": 0.0007096120001506279,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00040048999994724,
"count": 1,
"is_parallel": true,
"self": 0.00040048999994724
},
"communicator.exchange": {
"total": 0.034087128000010125,
"count": 1,
"is_parallel": true,
"self": 0.034087128000010125
},
"steps_from_proto": {
"total": 0.0021826689999215887,
"count": 1,
"is_parallel": true,
"self": 0.0004181289997404747,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001764540000181114,
"count": 10,
"is_parallel": true,
"self": 0.001764540000181114
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 269.88953229099855,
"count": 18199,
"is_parallel": true,
"self": 11.89432997697952,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.874545148006291,
"count": 18199,
"is_parallel": true,
"self": 5.874545148006291
},
"communicator.exchange": {
"total": 213.81577096,
"count": 18199,
"is_parallel": true,
"self": 213.81577096
},
"steps_from_proto": {
"total": 38.304886206012725,
"count": 18199,
"is_parallel": true,
"self": 7.445713530009698,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.859172676003027,
"count": 181990,
"is_parallel": true,
"self": 30.859172676003027
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00016846600010467228,
"count": 1,
"self": 0.00016846600010467228,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 521.1614732539387,
"count": 555738,
"is_parallel": true,
"self": 12.647322576887632,
"children": {
"process_trajectory": {
"total": 294.7357675620509,
"count": 555738,
"is_parallel": true,
"self": 294.21722451605103,
"children": {
"RLTrainer._checkpoint": {
"total": 0.518543045999877,
"count": 4,
"is_parallel": true,
"self": 0.518543045999877
}
}
},
"_update_policy": {
"total": 213.7783831150001,
"count": 90,
"is_parallel": true,
"self": 67.43026347599607,
"children": {
"TorchPPOOptimizer.update": {
"total": 146.34811963900404,
"count": 4587,
"is_parallel": true,
"self": 146.34811963900404
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09830727899998237,
"count": 1,
"self": 0.0009912799998801347,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09731599900010224,
"count": 1,
"self": 0.09731599900010224
}
}
}
}
}
}
}