akgeni's picture
First Push
1bb8d20
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0536994934082031,
"min": 1.0536994934082031,
"max": 2.85956072807312,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10060.72265625,
"min": 10060.72265625,
"max": 29284.76171875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.688569068908691,
"min": 0.3652726113796234,
"max": 12.688569068908691,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2474.27099609375,
"min": 70.86288452148438,
"max": 2557.008056640625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.058308800625787166,
"min": 0.058308800625787166,
"max": 0.07311528069654813,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.23323520250314866,
"min": 0.23323520250314866,
"max": 0.3526300441530218,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21606520556059539,
"min": 0.10765716270429501,
"max": 0.30788690414206654,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8642608222423815,
"min": 0.43062865081718005,
"max": 1.310740615515148,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.272727272727273,
"min": 3.1136363636363638,
"max": 25.454545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1112.0,
"min": 137.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.272727272727273,
"min": 3.1136363636363638,
"max": 25.454545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1112.0,
"min": 137.0,
"max": 1400.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673506322",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673506746"
},
"total": 424.228387118,
"count": 1,
"self": 0.3775776439999845,
"children": {
"run_training.setup": {
"total": 0.10958906799999113,
"count": 1,
"self": 0.10958906799999113
},
"TrainerController.start_learning": {
"total": 423.741220406,
"count": 1,
"self": 0.48090048000437946,
"children": {
"TrainerController._reset_env": {
"total": 6.699862620999966,
"count": 1,
"self": 6.699862620999966
},
"TrainerController.advance": {
"total": 416.43914259099563,
"count": 18200,
"self": 0.27688694298728933,
"children": {
"env_step": {
"total": 416.16225564800834,
"count": 18200,
"self": 271.0453229280163,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.85216106700182,
"count": 18200,
"self": 1.3544988329966827,
"children": {
"TorchPolicy.evaluate": {
"total": 143.49766223400513,
"count": 18200,
"self": 30.954714091006622,
"children": {
"TorchPolicy.sample_actions": {
"total": 112.54294814299851,
"count": 18200,
"self": 112.54294814299851
}
}
}
}
},
"workers": {
"total": 0.2647716529902482,
"count": 18200,
"self": 0.0,
"children": {
"worker_root": {
"total": 422.54919662999407,
"count": 18200,
"is_parallel": true,
"self": 205.39985900399404,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0069939850000082515,
"count": 1,
"is_parallel": true,
"self": 0.00359992900018824,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0033940559998200115,
"count": 10,
"is_parallel": true,
"self": 0.0033940559998200115
}
}
},
"UnityEnvironment.step": {
"total": 0.03891223200002969,
"count": 1,
"is_parallel": true,
"self": 0.0004893679999895539,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002165949999834993,
"count": 1,
"is_parallel": true,
"self": 0.0002165949999834993
},
"communicator.exchange": {
"total": 0.03641321199995673,
"count": 1,
"is_parallel": true,
"self": 0.03641321199995673
},
"steps_from_proto": {
"total": 0.0017930570000999069,
"count": 1,
"is_parallel": true,
"self": 0.00041361100011272356,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013794459999871833,
"count": 10,
"is_parallel": true,
"self": 0.0013794459999871833
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 217.14933762600003,
"count": 18199,
"is_parallel": true,
"self": 8.299447517020326,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.980577718991412,
"count": 18199,
"is_parallel": true,
"self": 4.980577718991412
},
"communicator.exchange": {
"total": 173.14411489700035,
"count": 18199,
"is_parallel": true,
"self": 173.14411489700035
},
"steps_from_proto": {
"total": 30.725197492987945,
"count": 18199,
"is_parallel": true,
"self": 6.568943460025366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.15625403296258,
"count": 181990,
"is_parallel": true,
"self": 24.15625403296258
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 4.654999997910636e-05,
"count": 1,
"self": 4.654999997910636e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 413.6988770770123,
"count": 327760,
"is_parallel": true,
"self": 8.43599835601799,
"children": {
"process_trajectory": {
"total": 235.33835700499435,
"count": 327760,
"is_parallel": true,
"self": 234.64650410999434,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6918528950000109,
"count": 4,
"is_parallel": true,
"self": 0.6918528950000109
}
}
},
"_update_policy": {
"total": 169.92452171599996,
"count": 90,
"is_parallel": true,
"self": 44.76850057400384,
"children": {
"TorchPPOOptimizer.update": {
"total": 125.15602114199612,
"count": 4587,
"is_parallel": true,
"self": 125.15602114199612
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12126816400007101,
"count": 1,
"self": 0.0008872840001004079,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1203808799999706,
"count": 1,
"self": 0.1203808799999706
}
}
}
}
}
}
}