harsh0000's picture
First Push
e15f228
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8647285103797913,
"min": 0.8647285103797913,
"max": 2.872610330581665,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8313.5,
"min": 8313.5,
"max": 29418.40234375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.731921195983887,
"min": 0.5242366790771484,
"max": 12.731921195983887,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2482.724609375,
"min": 101.70191955566406,
"max": 2574.3408203125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06269247598615324,
"min": 0.06269247598615324,
"max": 0.07597458879919906,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.25076990394461296,
"min": 0.25076990394461296,
"max": 0.3798729439959953,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.21681054889717521,
"min": 0.13437334983495483,
"max": 0.28053072287755854,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8672421955887009,
"min": 0.5374933993398193,
"max": 1.4026536143877926,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.106097298000005e-06,
"min": 8.106097298000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.242438919200002e-05,
"min": 3.242438919200002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10270200000000002,
"min": 0.10270200000000002,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41080800000000006,
"min": 0.41080800000000006,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00014482980000000007,
"min": 0.00014482980000000007,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005793192000000003,
"min": 0.0005793192000000003,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.204545454545453,
"min": 3.8181818181818183,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1109.0,
"min": 168.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.204545454545453,
"min": 3.8181818181818183,
"max": 25.204545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1109.0,
"min": 168.0,
"max": 1379.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1683718779",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1683719379"
},
"total": 600.2722447160002,
"count": 1,
"self": 0.8064988080000148,
"children": {
"run_training.setup": {
"total": 0.04701621000003797,
"count": 1,
"self": 0.04701621000003797
},
"TrainerController.start_learning": {
"total": 599.4187296980001,
"count": 1,
"self": 0.8033095919952302,
"children": {
"TrainerController._reset_env": {
"total": 4.913949671999944,
"count": 1,
"self": 4.913949671999944
},
"TrainerController.advance": {
"total": 593.462478595005,
"count": 18220,
"self": 0.39846388299099544,
"children": {
"env_step": {
"total": 593.064014712014,
"count": 18220,
"self": 432.31090582602815,
"children": {
"SubprocessEnvManager._take_step": {
"total": 160.37322787599635,
"count": 18220,
"self": 2.3274540930007106,
"children": {
"TorchPolicy.evaluate": {
"total": 158.04577378299564,
"count": 18220,
"self": 158.04577378299564
}
}
},
"workers": {
"total": 0.3798810099895036,
"count": 18220,
"self": 0.0,
"children": {
"worker_root": {
"total": 597.2713996790116,
"count": 18220,
"is_parallel": true,
"self": 271.28018864502087,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0056444690000034825,
"count": 1,
"is_parallel": true,
"self": 0.003990217000023222,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016542519999802607,
"count": 10,
"is_parallel": true,
"self": 0.0016542519999802607
}
}
},
"UnityEnvironment.step": {
"total": 0.06789969700002985,
"count": 1,
"is_parallel": true,
"self": 0.0006702200000745506,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00042276099998161953,
"count": 1,
"is_parallel": true,
"self": 0.00042276099998161953
},
"communicator.exchange": {
"total": 0.058703828000034264,
"count": 1,
"is_parallel": true,
"self": 0.058703828000034264
},
"steps_from_proto": {
"total": 0.008102887999939412,
"count": 1,
"is_parallel": true,
"self": 0.000501078999946003,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007601808999993409,
"count": 10,
"is_parallel": true,
"self": 0.007601808999993409
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 325.9912110339907,
"count": 18219,
"is_parallel": true,
"self": 12.404892447988573,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.91822566000792,
"count": 18219,
"is_parallel": true,
"self": 6.91822566000792
},
"communicator.exchange": {
"total": 261.08298435400843,
"count": 18219,
"is_parallel": true,
"self": 261.08298435400843
},
"steps_from_proto": {
"total": 45.58510857198576,
"count": 18219,
"is_parallel": true,
"self": 9.204859688981855,
"children": {
"_process_rank_one_or_two_observation": {
"total": 36.3802488830039,
"count": 182190,
"is_parallel": true,
"self": 36.3802488830039
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001993379999021272,
"count": 1,
"self": 0.0001993379999021272,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 588.2775542139818,
"count": 558314,
"is_parallel": true,
"self": 14.003330464023975,
"children": {
"process_trajectory": {
"total": 330.5854625669581,
"count": 558314,
"is_parallel": true,
"self": 328.623389816958,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9620727500000612,
"count": 4,
"is_parallel": true,
"self": 1.9620727500000612
}
}
},
"_update_policy": {
"total": 243.68876118299977,
"count": 90,
"is_parallel": true,
"self": 90.19283626999754,
"children": {
"TorchPPOOptimizer.update": {
"total": 153.49592491300223,
"count": 4584,
"is_parallel": true,
"self": 153.49592491300223
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.23879250100003446,
"count": 1,
"self": 0.0011919539999780682,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2376005470000564,
"count": 1,
"self": 0.2376005470000564
}
}
}
}
}
}
}