G-e-o-r-g-e's picture
First Push
2ebff40
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0897388458251953,
"min": 1.0897388458251953,
"max": 2.8685219287872314,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10404.826171875,
"min": 10404.826171875,
"max": 29344.98046875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.668253898620605,
"min": 0.4068361222743988,
"max": 12.668253898620605,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2470.3095703125,
"min": 78.92620849609375,
"max": 2558.044921875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0668627093694465,
"min": 0.06276919788569121,
"max": 0.07470760182403346,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.267450837477786,
"min": 0.25107679154276485,
"max": 0.36535158983886007,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17748954782591148,
"min": 0.11097887231895298,
"max": 0.28823358543655453,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7099581913036459,
"min": 0.4439154892758119,
"max": 1.2711580267139508,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.386363636363637,
"min": 3.2954545454545454,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1117.0,
"min": 145.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.386363636363637,
"min": 3.2954545454545454,
"max": 25.386363636363637,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1117.0,
"min": 145.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675080105",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675080632"
},
"total": 527.287254353,
"count": 1,
"self": 0.5403309509999872,
"children": {
"run_training.setup": {
"total": 0.1275153459999956,
"count": 1,
"self": 0.1275153459999956
},
"TrainerController.start_learning": {
"total": 526.619408056,
"count": 1,
"self": 0.8797183360002236,
"children": {
"TrainerController._reset_env": {
"total": 6.835419184000045,
"count": 1,
"self": 6.835419184000045
},
"TrainerController.advance": {
"total": 518.7747847949997,
"count": 18203,
"self": 0.4275439469979574,
"children": {
"env_step": {
"total": 518.3472408480018,
"count": 18203,
"self": 388.14625897499286,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.80268081100922,
"count": 18203,
"self": 2.4114931530205013,
"children": {
"TorchPolicy.evaluate": {
"total": 127.39118765798872,
"count": 18203,
"self": 21.213432814980592,
"children": {
"TorchPolicy.sample_actions": {
"total": 106.17775484300813,
"count": 18203,
"self": 106.17775484300813
}
}
}
}
},
"workers": {
"total": 0.39830106199968895,
"count": 18203,
"self": 0.0,
"children": {
"worker_root": {
"total": 524.6767807730049,
"count": 18203,
"is_parallel": true,
"self": 229.10292340300992,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00736441199995852,
"count": 1,
"is_parallel": true,
"self": 0.00442984000000024,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0029345719999582798,
"count": 10,
"is_parallel": true,
"self": 0.0029345719999582798
}
}
},
"UnityEnvironment.step": {
"total": 0.03854647400004296,
"count": 1,
"is_parallel": true,
"self": 0.000413764000086303,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002643199999852186,
"count": 1,
"is_parallel": true,
"self": 0.0002643199999852186
},
"communicator.exchange": {
"total": 0.03583943600000339,
"count": 1,
"is_parallel": true,
"self": 0.03583943600000339
},
"steps_from_proto": {
"total": 0.002028953999968053,
"count": 1,
"is_parallel": true,
"self": 0.0004855209999732324,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015434329999948204,
"count": 10,
"is_parallel": true,
"self": 0.0015434329999948204
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 295.573857369995,
"count": 18202,
"is_parallel": true,
"self": 12.389078882008107,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.059626485992737,
"count": 18202,
"is_parallel": true,
"self": 7.059626485992737
},
"communicator.exchange": {
"total": 234.7635471889957,
"count": 18202,
"is_parallel": true,
"self": 234.7635471889957
},
"steps_from_proto": {
"total": 41.36160481299845,
"count": 18202,
"is_parallel": true,
"self": 9.4408201000291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.92078471296935,
"count": 182020,
"is_parallel": true,
"self": 31.92078471296935
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.6902999909216305e-05,
"count": 1,
"self": 3.6902999909216305e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 512.8735209549103,
"count": 665708,
"is_parallel": true,
"self": 17.10184296693012,
"children": {
"process_trajectory": {
"total": 286.6530636339801,
"count": 665708,
"is_parallel": true,
"self": 285.7571846029802,
"children": {
"RLTrainer._checkpoint": {
"total": 0.895879030999879,
"count": 4,
"is_parallel": true,
"self": 0.895879030999879
}
}
},
"_update_policy": {
"total": 209.1186143540001,
"count": 90,
"is_parallel": true,
"self": 50.46818784399676,
"children": {
"TorchPPOOptimizer.update": {
"total": 158.65042651000334,
"count": 4587,
"is_parallel": true,
"self": 158.65042651000334
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12944883800003026,
"count": 1,
"self": 0.0011721949999810022,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12827664300004926,
"count": 1,
"self": 0.12827664300004926
}
}
}
}
}
}
}