keano010 / run_logs /timers.json
sinny's picture
keano010
e32a179
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.703129291534424,
"min": 2.703129291534424,
"max": 2.7793354988098145,
"count": 14
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 53543.5859375,
"min": 42438.78125,
"max": 65375.390625,
"count": 14
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 69.6029411764706,
"min": 56.020833333333336,
"max": 98.76,
"count": 14
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 18932.0,
"min": 10756.0,
"max": 20432.0,
"count": 14
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1278.8971824827797,
"min": 1268.108171043482,
"max": 1281.830568073172,
"count": 14
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 173930.01681765803,
"min": 121738.38442017426,
"max": 188599.82272090457,
"count": 14
},
"SoccerTwos.Step.mean": {
"value": 2889962.0,
"min": 2759977.0,
"max": 2889962.0,
"count": 14
},
"SoccerTwos.Step.sum": {
"value": 2889962.0,
"min": 2759977.0,
"max": 2889962.0,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.08530861884355545,
"min": -0.034373119473457336,
"max": 0.08530861884355545,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 11.687280654907227,
"min": -3.437312126159668,
"max": 11.687280654907227,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.08278385549783707,
"min": -0.03185887262225151,
"max": 0.08508288115262985,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 11.341388702392578,
"min": -3.185887098312378,
"max": 11.341388702392578,
"count": 14
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 14
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0462510959945456,
"min": -0.18280000131107071,
"max": 0.19943925654776742,
"count": 14
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 6.336400151252747,
"min": -19.393999814987183,
"max": 21.340000450611115,
"count": 14
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0462510959945456,
"min": -0.18280000131107071,
"max": 0.19943925654776742,
"count": 14
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 6.336400151252747,
"min": -19.393999814987183,
"max": 21.340000450611115,
"count": 14
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 14
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.015285970742782713,
"min": 0.015285970742782713,
"max": 0.01743163846937629,
"count": 3
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.015285970742782713,
"min": 0.015285970742782713,
"max": 0.01743163846937629,
"count": 3
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06671442395697037,
"min": 0.06047525306542714,
"max": 0.06671442395697037,
"count": 3
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06671442395697037,
"min": 0.06047525306542714,
"max": 0.06671442395697037,
"count": 3
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.0683846140280366,
"min": 0.06145328295727571,
"max": 0.0683846140280366,
"count": 3
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.0683846140280366,
"min": 0.06145328295727571,
"max": 0.0683846140280366,
"count": 3
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.000278414512195165,
"min": 0.000278414512195165,
"max": 0.00027903186698937987,
"count": 3
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.000278414512195165,
"min": 0.000278414512195165,
"max": 0.00027903186698937987,
"count": 3
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.2392072525,
"min": 0.2392072525,
"max": 0.23951593,
"count": 3
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.2392072525,
"min": 0.2392072525,
"max": 0.23951593,
"count": 3
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.0046409612665,
"min": 0.0046409612665,
"max": 0.0046512299379999995,
"count": 3
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.0046409612665,
"min": 0.0046409612665,
"max": 0.0046512299379999995,
"count": 3
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676277774",
"python_version": "3.9.0 (default, Nov 15 2020, 14:28:56) \n[GCC 7.3.0]",
"command_line_arguments": "/home/olav/dev/anaconda3/envs/rl/bin/mlagents-learn hectorjelly_keano2.yaml --env=./ml-agents/training-envs-executables/SoccerTwos.x86_64 --run-id='keano010' --no-graphics --results-dir=./ml-agents/results --resume",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1676277848"
},
"total": 73.968246739998,
"count": 1,
"self": 0.08385452099173563,
"children": {
"run_training.setup": {
"total": 0.00852127600228414,
"count": 1,
"self": 0.00852127600228414
},
"TrainerController.start_learning": {
"total": 73.87587094300397,
"count": 1,
"self": 0.11096652713604271,
"children": {
"TrainerController._reset_env": {
"total": 1.9044654190001893,
"count": 2,
"self": 1.9044654190001893
},
"TrainerController.advance": {
"total": 71.72593067586422,
"count": 9017,
"self": 0.10724471049616113,
"children": {
"env_step": {
"total": 39.63989065177884,
"count": 9017,
"self": 12.972749723419838,
"children": {
"SubprocessEnvManager._take_step": {
"total": 26.602160032780375,
"count": 9951,
"self": 0.7460532058612444,
"children": {
"TorchPolicy.evaluate": {
"total": 25.85610682691913,
"count": 18440,
"self": 25.85610682691913
}
}
},
"workers": {
"total": 0.0649808955786284,
"count": 9017,
"self": 0.0,
"children": {
"worker_root": {
"total": 216.40812493732665,
"count": 9948,
"is_parallel": true,
"self": 152.2895102654511,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001390852004988119,
"count": 2,
"is_parallel": true,
"self": 0.00037795299431309104,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001012899010675028,
"count": 8,
"is_parallel": true,
"self": 0.001012899010675028
}
}
},
"UnityEnvironment.step": {
"total": 0.014402177999727428,
"count": 1,
"is_parallel": true,
"self": 0.0003619839990278706,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003440830041654408,
"count": 1,
"is_parallel": true,
"self": 0.0003440830041654408
},
"communicator.exchange": {
"total": 0.012468519998947158,
"count": 1,
"is_parallel": true,
"self": 0.012468519998947158
},
"steps_from_proto": {
"total": 0.0012275909975869581,
"count": 2,
"is_parallel": true,
"self": 0.00027497202245285735,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009526189751341008,
"count": 8,
"is_parallel": true,
"self": 0.0009526189751341008
}
}
}
}
}
}
},
"steps_from_proto": {
"total": 0.0031462580082006752,
"count": 6,
"is_parallel": true,
"self": 0.0005912540218560025,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025550039863446727,
"count": 24,
"is_parallel": true,
"self": 0.0025550039863446727
}
}
},
"UnityEnvironment.step": {
"total": 64.11546841386735,
"count": 9947,
"is_parallel": true,
"self": 4.476060561763006,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 2.7713833530287957,
"count": 9947,
"is_parallel": true,
"self": 2.7713833530287957
},
"communicator.exchange": {
"total": 43.584730614573346,
"count": 9947,
"is_parallel": true,
"self": 43.584730614573346
},
"steps_from_proto": {
"total": 13.2832938845022,
"count": 19894,
"is_parallel": true,
"self": 2.5061248958445503,
"children": {
"_process_rank_one_or_two_observation": {
"total": 10.77716898865765,
"count": 79576,
"is_parallel": true,
"self": 10.77716898865765
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 31.978795313589217,
"count": 9017,
"self": 1.2593850398625364,
"children": {
"process_trajectory": {
"total": 13.47246632372844,
"count": 9017,
"self": 13.47246632372844
},
"_update_policy": {
"total": 17.24694394999824,
"count": 3,
"self": 12.398393929011945,
"children": {
"TorchPOCAOptimizer.update": {
"total": 4.848550020986295,
"count": 180,
"self": 4.848550020986295
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.100017344579101e-07,
"count": 1,
"self": 6.100017344579101e-07
},
"TrainerController._save_models": {
"total": 0.13450771100178827,
"count": 1,
"self": 0.0010589190060272813,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13344879199576098,
"count": 1,
"self": 0.13344879199576098
}
}
}
}
}
}
}