poca-SoccerTwos / run_logs /timers.json
MHaurel's picture
First Push
e54ab0f
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 3.1757891178131104,
"min": 3.052255868911743,
"max": 3.2957327365875244,
"count": 103
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 63210.90625,
"min": 21711.99609375,
"max": 113837.3984375,
"count": 103
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 408.44444444444446,
"max": 999.0,
"count": 103
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19980.0,
"min": 14704.0,
"max": 26728.0,
"count": 103
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1190.533017805937,
"min": 1189.491936846409,
"max": 1201.026640494582,
"count": 78
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 7143.198106835623,
"min": 2379.5120552007315,
"max": 16797.025016783464,
"count": 78
},
"SoccerTwos.Step.mean": {
"value": 1029712.0,
"min": 9380.0,
"max": 1029712.0,
"count": 103
},
"SoccerTwos.Step.sum": {
"value": 1029712.0,
"min": 9380.0,
"max": 1029712.0,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.003727235598489642,
"min": -0.10226910561323166,
"max": 0.02684769406914711,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 0.03727235645055771,
"min": -2.147559881210327,
"max": 0.34388789534568787,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.004195396788418293,
"min": -0.10219008475542068,
"max": 0.024234874173998833,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 0.04195396602153778,
"min": -2.145991802215576,
"max": 0.31505337357521057,
"count": 103
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 103
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.34271429093288525,
"count": 103
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.798000073060393,
"count": 103
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.0,
"min": -0.5,
"max": 0.34271429093288525,
"count": 103
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 0.0,
"min": -6.0,
"max": 4.798000073060393,
"count": 103
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 103
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 103
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.016643856981924426,
"min": 0.012222096690675244,
"max": 0.023487966150666277,
"count": 48
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.016643856981924426,
"min": 0.012222096690675244,
"max": 0.023487966150666277,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 8.994354948299587e-06,
"min": 3.119201634641892e-06,
"max": 0.006860534210378925,
"count": 48
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 8.994354948299587e-06,
"min": 3.119201634641892e-06,
"max": 0.006860534210378925,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 8.417320426209093e-06,
"min": 3.3641932607982502e-06,
"max": 0.006350147669824461,
"count": 48
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 8.417320426209093e-06,
"min": 3.3641932607982502e-06,
"max": 0.006350147669824461,
"count": 48
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 48
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 48
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 48
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 48
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685949145",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\m.haurel\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1685951497"
},
"total": 2352.6081789,
"count": 1,
"self": 0.10230349999983446,
"children": {
"run_training.setup": {
"total": 0.09347729999999999,
"count": 1,
"self": 0.09347729999999999
},
"TrainerController.start_learning": {
"total": 2352.4123981000002,
"count": 1,
"self": 1.1738023000089015,
"children": {
"TrainerController._reset_env": {
"total": 2.7633410000001457,
"count": 6,
"self": 2.7633410000001457
},
"TrainerController.advance": {
"total": 2348.367615499991,
"count": 67789,
"self": 1.1453414000502562,
"children": {
"env_step": {
"total": 846.22593609999,
"count": 67789,
"self": 637.7993366999939,
"children": {
"SubprocessEnvManager._take_step": {
"total": 207.6809375999962,
"count": 67789,
"self": 6.689052299961844,
"children": {
"TorchPolicy.evaluate": {
"total": 200.99188530003437,
"count": 134638,
"self": 200.99188530003437
}
}
},
"workers": {
"total": 0.7456617999999322,
"count": 67789,
"self": 0.0,
"children": {
"worker_root": {
"total": 2348.396970900011,
"count": 67789,
"is_parallel": true,
"self": 1848.5679559000198,
"children": {
"steps_from_proto": {
"total": 0.01018230000011533,
"count": 12,
"is_parallel": true,
"self": 0.001959699999309006,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.008222600000806324,
"count": 48,
"is_parallel": true,
"self": 0.008222600000806324
}
}
},
"UnityEnvironment.step": {
"total": 499.81883269999133,
"count": 67789,
"is_parallel": true,
"self": 26.514116499926104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.818813300014646,
"count": 67789,
"is_parallel": true,
"self": 21.818813300014646
},
"communicator.exchange": {
"total": 365.4592283000048,
"count": 67789,
"is_parallel": true,
"self": 365.4592283000048
},
"steps_from_proto": {
"total": 86.02667460004581,
"count": 135578,
"is_parallel": true,
"self": 17.92055120011389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 68.10612339993192,
"count": 542312,
"is_parallel": true,
"self": 68.10612339993192
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1500.996337999951,
"count": 67788,
"self": 8.665577099961183,
"children": {
"process_trajectory": {
"total": 174.07086839998914,
"count": 67788,
"self": 173.82330519998885,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2475632000002861,
"count": 2,
"self": 0.2475632000002861
}
}
},
"_update_policy": {
"total": 1318.2598925000007,
"count": 48,
"self": 128.793430000002,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1189.4664624999987,
"count": 1440,
"self": 1189.4664624999987
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.000000318337698e-07,
"count": 1,
"self": 9.000000318337698e-07
},
"TrainerController._save_models": {
"total": 0.10763839999981428,
"count": 1,
"self": 0.004882400000042253,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10275599999977203,
"count": 1,
"self": 0.10275599999977203
}
}
}
}
}
}
}