catrabbitbear's picture
first run, 4 epochs
20bf5f3
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0585618019104004,
"min": 1.0534974336624146,
"max": 2.8751986026763916,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9385.208984375,
"min": 9385.208984375,
"max": 31975.083984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 1.3437092304229736,
"min": 0.1186748743057251,
"max": 1.3437092304229736,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.71463012695312,
"min": 11.511463165283203,
"max": 135.71463012695312,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 6766.0,
"max": 13134.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.403846153846153,
"min": 3.25,
"max": 26.403846153846153,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1373.0,
"min": 143.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.403846153846153,
"min": 3.25,
"max": 26.403846153846153,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1373.0,
"min": 143.0,
"max": 1410.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06918457031377008,
"min": 0.06340250090711705,
"max": 0.0716663325315717,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2767382812550803,
"min": 0.26945240781217356,
"max": 0.3550604389867801,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.08374894166831687,
"min": 0.04444083816590993,
"max": 0.10852076508427416,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.33499576667326747,
"min": 0.1777633526636397,
"max": 0.54101474932471,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.336097332000003e-06,
"min": 5.336097332000003e-06,
"max": 0.00019450000275,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.1344389328000012e-05,
"min": 2.1344389328000012e-05,
"max": 0.0009231440384280002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10266800000000001,
"min": 0.10266800000000001,
"max": 0.19725000000000004,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41067200000000004,
"min": 0.41067200000000004,
"max": 0.961572,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014313320000000008,
"min": 0.00014313320000000008,
"max": 0.004862775,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005725328000000003,
"min": 0.0005725328000000003,
"max": 0.0230824428,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687944727",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687945242"
},
"total": 515.735848857,
"count": 1,
"self": 0.43163641799992547,
"children": {
"run_training.setup": {
"total": 0.04230669300000045,
"count": 1,
"self": 0.04230669300000045
},
"TrainerController.start_learning": {
"total": 515.261905746,
"count": 1,
"self": 0.6076182450001397,
"children": {
"TrainerController._reset_env": {
"total": 4.131036575999929,
"count": 1,
"self": 4.131036575999929
},
"TrainerController.advance": {
"total": 510.32035672200027,
"count": 18224,
"self": 0.3265962289996196,
"children": {
"env_step": {
"total": 509.99376049300065,
"count": 18224,
"self": 324.5830188959908,
"children": {
"SubprocessEnvManager._take_step": {
"total": 185.1161776719963,
"count": 18224,
"self": 1.6514881780188944,
"children": {
"TorchPolicy.evaluate": {
"total": 183.4646894939774,
"count": 18224,
"self": 183.4646894939774
}
}
},
"workers": {
"total": 0.29456392501356277,
"count": 18224,
"self": 0.0,
"children": {
"worker_root": {
"total": 513.6363847340235,
"count": 18224,
"is_parallel": true,
"self": 253.0477841340197,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005616421000013361,
"count": 1,
"is_parallel": true,
"self": 0.0038329379998458535,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017834830001675073,
"count": 10,
"is_parallel": true,
"self": 0.0017834830001675073
}
}
},
"UnityEnvironment.step": {
"total": 0.05398624699989796,
"count": 1,
"is_parallel": true,
"self": 0.0005126499999050793,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037754699997094576,
"count": 1,
"is_parallel": true,
"self": 0.00037754699997094576
},
"communicator.exchange": {
"total": 0.05103094300000066,
"count": 1,
"is_parallel": true,
"self": 0.05103094300000066
},
"steps_from_proto": {
"total": 0.0020651070000212712,
"count": 1,
"is_parallel": true,
"self": 0.00037329399992813705,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016918130000931342,
"count": 10,
"is_parallel": true,
"self": 0.0016918130000931342
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 260.5886006000038,
"count": 18223,
"is_parallel": true,
"self": 10.827706262984066,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.657928923012605,
"count": 18223,
"is_parallel": true,
"self": 5.657928923012605
},
"communicator.exchange": {
"total": 205.9016679619889,
"count": 18223,
"is_parallel": true,
"self": 205.9016679619889
},
"steps_from_proto": {
"total": 38.20129745201825,
"count": 18223,
"is_parallel": true,
"self": 7.05853067891519,
"children": {
"_process_rank_one_or_two_observation": {
"total": 31.142766773103062,
"count": 182230,
"is_parallel": true,
"self": 31.142766773103062
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0002090189998398273,
"count": 1,
"self": 0.0002090189998398273,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 510.49272270501854,
"count": 46181,
"is_parallel": true,
"self": 1.138427116009325,
"children": {
"process_trajectory": {
"total": 94.51433682700917,
"count": 46181,
"is_parallel": true,
"self": 91.90544054800921,
"children": {
"RLTrainer._checkpoint": {
"total": 2.608896278999964,
"count": 4,
"is_parallel": true,
"self": 2.608896278999964
}
}
},
"_update_policy": {
"total": 414.83995876200004,
"count": 90,
"is_parallel": true,
"self": 125.08211349099997,
"children": {
"TorchPPOOptimizer.update": {
"total": 289.75784527100006,
"count": 6120,
"is_parallel": true,
"self": 289.75784527100006
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.20268518399984714,
"count": 1,
"self": 0.0007686559999910969,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20191652799985604,
"count": 1,
"self": 0.20191652799985604
}
}
}
}
}
}
}