LOGQS's picture
First Push
e6b5e5e
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8731207847595215,
"min": 0.8715961575508118,
"max": 2.8693485260009766,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8365.3701171875,
"min": 8365.3701171875,
"max": 29416.560546875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 6.442963600158691,
"min": 0.06724988669157028,
"max": 6.442963600158691,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 1256.3779296875,
"min": 13.046478271484375,
"max": 1306.3614501953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.454545454545453,
"min": 3.5454545454545454,
"max": 25.509090909090908,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1120.0,
"min": 156.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.454545454545453,
"min": 3.5454545454545454,
"max": 25.509090909090908,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1120.0,
"min": 156.0,
"max": 1403.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.034210455457832724,
"min": 0.028208378084400465,
"max": 0.0352522443679239,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.06842091091566545,
"min": 0.05641675616880093,
"max": 0.1057567331037717,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.13462310014292597,
"min": 0.079695287020877,
"max": 0.1987219991783301,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.26924620028585194,
"min": 0.159390574041754,
"max": 0.5961659975349903,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.0720097856000008e-05,
"min": 1.0720097856000008e-05,
"max": 0.000483720003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.1440195712000017e-05,
"min": 2.1440195712000017e-05,
"max": 0.0012366600526679998,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10428799999999999,
"min": 0.10428799999999999,
"max": 0.29348799999999997,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20857599999999998,
"min": 0.20857599999999998,
"max": 0.794664,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00015986560000000012,
"min": 0.00015986560000000012,
"max": 0.006772405600000001,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00031973120000000025,
"min": 0.00031973120000000025,
"max": 0.0173185068,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1687290284",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1687290755"
},
"total": 471.5478387570001,
"count": 1,
"self": 0.43755301500010546,
"children": {
"run_training.setup": {
"total": 0.040696868999987146,
"count": 1,
"self": 0.040696868999987146
},
"TrainerController.start_learning": {
"total": 471.069588873,
"count": 1,
"self": 0.5572934599981636,
"children": {
"TrainerController._reset_env": {
"total": 4.027367986999991,
"count": 1,
"self": 4.027367986999991
},
"TrainerController.advance": {
"total": 466.2969910570017,
"count": 18218,
"self": 0.2567138620020728,
"children": {
"env_step": {
"total": 466.04027719499965,
"count": 18218,
"self": 338.4191967200124,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.36943891998982,
"count": 18218,
"self": 1.8134467679924455,
"children": {
"TorchPolicy.evaluate": {
"total": 125.55599215199737,
"count": 18218,
"self": 125.55599215199737
}
}
},
"workers": {
"total": 0.251641554997434,
"count": 18218,
"self": 0.0,
"children": {
"worker_root": {
"total": 469.47005129198976,
"count": 18218,
"is_parallel": true,
"self": 229.97332278299268,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005415299999981471,
"count": 1,
"is_parallel": true,
"self": 0.003890445000024556,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001524854999956915,
"count": 10,
"is_parallel": true,
"self": 0.001524854999956915
}
}
},
"UnityEnvironment.step": {
"total": 0.04492465599997786,
"count": 1,
"is_parallel": true,
"self": 0.0005923900000084359,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004077079999547095,
"count": 1,
"is_parallel": true,
"self": 0.0004077079999547095
},
"communicator.exchange": {
"total": 0.04194489400003931,
"count": 1,
"is_parallel": true,
"self": 0.04194489400003931
},
"steps_from_proto": {
"total": 0.0019796639999754007,
"count": 1,
"is_parallel": true,
"self": 0.0003473470000585621,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016323169999168385,
"count": 10,
"is_parallel": true,
"self": 0.0016323169999168385
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 239.49672850899708,
"count": 18217,
"is_parallel": true,
"self": 10.106359760009923,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.067650246997005,
"count": 18217,
"is_parallel": true,
"self": 5.067650246997005
},
"communicator.exchange": {
"total": 190.6459399589951,
"count": 18217,
"is_parallel": true,
"self": 190.6459399589951
},
"steps_from_proto": {
"total": 33.676778542995066,
"count": 18217,
"is_parallel": true,
"self": 6.076048975002436,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.60072956799263,
"count": 182170,
"is_parallel": true,
"self": 27.60072956799263
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00019385100006275024,
"count": 1,
"self": 0.00019385100006275024,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 461.8160306720341,
"count": 553600,
"is_parallel": true,
"self": 11.900291695057888,
"children": {
"process_trajectory": {
"total": 298.69797242497634,
"count": 553600,
"is_parallel": true,
"self": 297.2430913569765,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4548810679998496,
"count": 4,
"is_parallel": true,
"self": 1.4548810679998496
}
}
},
"_update_policy": {
"total": 151.2177665519999,
"count": 45,
"is_parallel": true,
"self": 96.8147014110013,
"children": {
"TorchPPOOptimizer.update": {
"total": 54.40306514099859,
"count": 1800,
"is_parallel": true,
"self": 54.40306514099859
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.18774251800005004,
"count": 1,
"self": 0.0010076959999878454,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1867348220000622,
"count": 1,
"self": 0.1867348220000622
}
}
}
}
}
}
}