ThomasDH's picture
First Push
0e0d0c9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.8513259887695312,
"min": 1.8513259887695312,
"max": 2.8851559162139893,
"count": 10
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 18816.876953125,
"min": 18425.265625,
"max": 29578.619140625,
"count": 10
},
"SnowballTarget.Step.mean": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Step.sum": {
"value": 99960.0,
"min": 9952.0,
"max": 99960.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 5.951883792877197,
"min": 0.22479069232940674,
"max": 5.951883792877197,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1214.184326171875,
"min": 43.60939407348633,
"max": 1214.184326171875,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 10
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04912459823614578,
"min": 0.04417074829349682,
"max": 0.051318535677789136,
"count": 10
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24562299118072892,
"min": 0.17821466329041868,
"max": 0.25575042429894285,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2910540713618199,
"min": 0.10612709319684654,
"max": 0.2910914970561862,
"count": 10
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.4552703568090994,
"min": 0.4245083727873862,
"max": 1.4552703568090994,
"count": 10
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0976094512e-05,
"min": 1.0976094512e-05,
"max": 0.000189176005412,
"count": 10
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.488047256e-05,
"min": 5.488047256e-05,
"max": 0.00084688007656,
"count": 10
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.105488,
"min": 0.105488,
"max": 0.194588,
"count": 10
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.52744,
"min": 0.461552,
"max": 0.92344,
"count": 10
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0002838512,
"min": 0.0002838512,
"max": 0.004729941200000001,
"count": 10
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.001419256,
"min": 0.001419256,
"max": 0.021179656,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 12.690909090909091,
"min": 2.909090909090909,
"max": 12.690909090909091,
"count": 10
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 698.0,
"min": 128.0,
"max": 698.0,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 12.690909090909091,
"min": 2.909090909090909,
"max": 12.690909090909091,
"count": 10
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 698.0,
"min": 128.0,
"max": 698.0,
"count": 10
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 10
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1681809601",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1681809830"
},
"total": 228.13922394999997,
"count": 1,
"self": 0.4300480540000535,
"children": {
"run_training.setup": {
"total": 0.10884999699999298,
"count": 1,
"self": 0.10884999699999298
},
"TrainerController.start_learning": {
"total": 227.60032589899993,
"count": 1,
"self": 0.3145094609994885,
"children": {
"TrainerController._reset_env": {
"total": 3.857699626999988,
"count": 1,
"self": 3.857699626999988
},
"TrainerController.advance": {
"total": 223.29690562000053,
"count": 9134,
"self": 0.1393002480050427,
"children": {
"env_step": {
"total": 223.1576053719955,
"count": 9134,
"self": 155.3706981869973,
"children": {
"SubprocessEnvManager._take_step": {
"total": 67.64372732100367,
"count": 9134,
"self": 0.8939866110007415,
"children": {
"TorchPolicy.evaluate": {
"total": 66.74974071000292,
"count": 9134,
"self": 66.74974071000292
}
}
},
"workers": {
"total": 0.14317986399453275,
"count": 9134,
"self": 0.0,
"children": {
"worker_root": {
"total": 226.84481095200647,
"count": 9134,
"is_parallel": true,
"self": 101.29701592200615,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005631859999994049,
"count": 1,
"is_parallel": true,
"self": 0.004141775999869424,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014900840001246252,
"count": 10,
"is_parallel": true,
"self": 0.0014900840001246252
}
}
},
"UnityEnvironment.step": {
"total": 0.06570462499996665,
"count": 1,
"is_parallel": true,
"self": 0.0005607649999319619,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046496200002366095,
"count": 1,
"is_parallel": true,
"self": 0.00046496200002366095
},
"communicator.exchange": {
"total": 0.0628620200000114,
"count": 1,
"is_parallel": true,
"self": 0.0628620200000114
},
"steps_from_proto": {
"total": 0.0018168779999996332,
"count": 1,
"is_parallel": true,
"self": 0.00037561400017693813,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001441263999822695,
"count": 10,
"is_parallel": true,
"self": 0.001441263999822695
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 125.54779503000032,
"count": 9133,
"is_parallel": true,
"self": 5.102451592016564,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.85767077799278,
"count": 9133,
"is_parallel": true,
"self": 2.85767077799278
},
"communicator.exchange": {
"total": 100.72151432799535,
"count": 9133,
"is_parallel": true,
"self": 100.72151432799535
},
"steps_from_proto": {
"total": 16.86615833199562,
"count": 9133,
"is_parallel": true,
"self": 3.3350439480026353,
"children": {
"_process_rank_one_or_two_observation": {
"total": 13.531114383992985,
"count": 91330,
"is_parallel": true,
"self": 13.531114383992985
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00034630599998308753,
"count": 1,
"self": 0.00034630599998308753,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 221.49969147301124,
"count": 214872,
"is_parallel": true,
"self": 5.3244156440053985,
"children": {
"process_trajectory": {
"total": 131.60382649300573,
"count": 214872,
"is_parallel": true,
"self": 130.3316676180059,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2721588749998318,
"count": 4,
"is_parallel": true,
"self": 1.2721588749998318
}
}
},
"_update_policy": {
"total": 84.57144933600011,
"count": 45,
"is_parallel": true,
"self": 37.399699890001784,
"children": {
"TorchPPOOptimizer.update": {
"total": 47.17174944599833,
"count": 1080,
"is_parallel": true,
"self": 47.17174944599833
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1308648849999372,
"count": 1,
"self": 0.0009586850000005143,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1299061999999367,
"count": 1,
"self": 0.1299061999999367
}
}
}
}
}
}
}