0sunfire0's picture
First Push
44b1540
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.2860615253448486,
"min": 1.2419261932373047,
"max": 2.8552134037017822,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 12293.4619140625,
"min": 12293.4619140625,
"max": 29240.240234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 64.10287475585938,
"min": 0.505084216594696,
"max": 64.10287475585938,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 12500.060546875,
"min": 97.98633575439453,
"max": 12500.060546875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06621475017734565,
"min": 0.06200313892891156,
"max": 0.07302246726484395,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2648590007093826,
"min": 0.25388889340170223,
"max": 0.3490614113193884,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.8442495814433285,
"min": 0.18865362484939396,
"max": 0.8442495814433285,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 3.376998325773314,
"min": 0.7546144993975759,
"max": 3.8259938470288812,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.25,
"min": 3.7954545454545454,
"max": 22.25,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 979.0,
"min": 167.0,
"max": 1215.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.25,
"min": 3.7954545454545454,
"max": 22.25,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 979.0,
"min": 167.0,
"max": 1215.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689435683",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689436156"
},
"total": 473.12772343000006,
"count": 1,
"self": 0.7914289590000294,
"children": {
"run_training.setup": {
"total": 0.062190289999989545,
"count": 1,
"self": 0.062190289999989545
},
"TrainerController.start_learning": {
"total": 472.27410418100004,
"count": 1,
"self": 0.5438083420038993,
"children": {
"TrainerController._reset_env": {
"total": 4.9842468819999795,
"count": 1,
"self": 4.9842468819999795
},
"TrainerController.advance": {
"total": 466.5209851369961,
"count": 18213,
"self": 0.28948835098293557,
"children": {
"env_step": {
"total": 466.23149678601317,
"count": 18213,
"self": 340.9007732520104,
"children": {
"SubprocessEnvManager._take_step": {
"total": 125.05958207200382,
"count": 18213,
"self": 1.8583368140090215,
"children": {
"TorchPolicy.evaluate": {
"total": 123.2012452579948,
"count": 18213,
"self": 123.2012452579948
}
}
},
"workers": {
"total": 0.27114146199892275,
"count": 18213,
"self": 0.0,
"children": {
"worker_root": {
"total": 470.6075418089979,
"count": 18213,
"is_parallel": true,
"self": 219.0485352079976,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006492166000043653,
"count": 1,
"is_parallel": true,
"self": 0.004237468000098943,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022546979999447103,
"count": 10,
"is_parallel": true,
"self": 0.0022546979999447103
}
}
},
"UnityEnvironment.step": {
"total": 0.04228143400001727,
"count": 1,
"is_parallel": true,
"self": 0.00047545299997864277,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003390110000509594,
"count": 1,
"is_parallel": true,
"self": 0.0003390110000509594
},
"communicator.exchange": {
"total": 0.03978238299998793,
"count": 1,
"is_parallel": true,
"self": 0.03978238299998793
},
"steps_from_proto": {
"total": 0.0016845869999997376,
"count": 1,
"is_parallel": true,
"self": 0.00034923200001912846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013353549999806091,
"count": 10,
"is_parallel": true,
"self": 0.0013353549999806091
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 251.5590066010003,
"count": 18212,
"is_parallel": true,
"self": 10.772451217007642,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.313815581998654,
"count": 18212,
"is_parallel": true,
"self": 5.313815581998654
},
"communicator.exchange": {
"total": 199.52079520099505,
"count": 18212,
"is_parallel": true,
"self": 199.52079520099505
},
"steps_from_proto": {
"total": 35.95194460099896,
"count": 18212,
"is_parallel": true,
"self": 6.4805869980046396,
"children": {
"_process_rank_one_or_two_observation": {
"total": 29.471357602994317,
"count": 182120,
"is_parallel": true,
"self": 29.471357602994317
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00013210600002366846,
"count": 1,
"self": 0.00013210600002366846,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 463.12736518802564,
"count": 437211,
"is_parallel": true,
"self": 9.38639839897985,
"children": {
"process_trajectory": {
"total": 252.11443535504526,
"count": 437211,
"is_parallel": true,
"self": 250.35231033304535,
"children": {
"RLTrainer._checkpoint": {
"total": 1.762125021999907,
"count": 4,
"is_parallel": true,
"self": 1.762125021999907
}
}
},
"_update_policy": {
"total": 201.62653143400053,
"count": 90,
"is_parallel": true,
"self": 77.4614314459987,
"children": {
"TorchPPOOptimizer.update": {
"total": 124.16509998800183,
"count": 4584,
"is_parallel": true,
"self": 124.16509998800183
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.22493171400003575,
"count": 1,
"self": 0.0011633039999878747,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22376841000004788,
"count": 1,
"self": 0.22376841000004788
}
}
}
}
}
}
}