paudelapil's picture
Snowball commit
034f6d3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.7158788442611694,
"min": 0.6993654370307922,
"max": 2.860241174697876,
"count": 30
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7339.18994140625,
"min": 6780.953125,
"max": 29354.65625,
"count": 30
},
"SnowballTarget.Step.mean": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Step.sum": {
"value": 299968.0,
"min": 9952.0,
"max": 299968.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.269784927368164,
"min": 0.27604958415031433,
"max": 13.291929244995117,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2707.0361328125,
"min": 53.553619384765625,
"max": 2724.845458984375,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 30
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06749971089382453,
"min": 0.06312681999804458,
"max": 0.07445974530442162,
"count": 30
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3374985544691227,
"min": 0.2525072799921783,
"max": 0.36952820682542065,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20504871250367632,
"min": 0.12077927482017661,
"max": 0.27141019994137333,
"count": 30
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0252435625183816,
"min": 0.48311709928070645,
"max": 1.3570509997068667,
"count": 30
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 5.288098237333331e-06,
"min": 5.288098237333331e-06,
"max": 0.00029458800180399996,
"count": 30
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.6440491186666655e-05,
"min": 2.6440491186666655e-05,
"max": 0.0014234400255199997,
"count": 30
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10176266666666667,
"min": 0.10176266666666667,
"max": 0.198196,
"count": 30
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5088133333333333,
"min": 0.42025066666666666,
"max": 0.97448,
"count": 30
},
"SnowballTarget.Policy.Beta.mean": {
"value": 9.795706666666662e-05,
"min": 9.795706666666662e-05,
"max": 0.0049099804000000006,
"count": 30
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0004897853333333331,
"min": 0.0004897853333333331,
"max": 0.023726551999999998,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.163636363636364,
"min": 3.409090909090909,
"max": 26.431818181818183,
"count": 30
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1439.0,
"min": 150.0,
"max": 1439.0,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.163636363636364,
"min": 3.409090909090909,
"max": 26.431818181818183,
"count": 30
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1439.0,
"min": 150.0,
"max": 1439.0,
"count": 30
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 30
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1724167000",
"python_version": "3.10.12 (main, Jul 29 2024, 16:56:48) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.3.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1724167889"
},
"total": 889.365831131,
"count": 1,
"self": 0.5465748420001546,
"children": {
"run_training.setup": {
"total": 0.12914078000000018,
"count": 1,
"self": 0.12914078000000018
},
"TrainerController.start_learning": {
"total": 888.690115509,
"count": 1,
"self": 1.2355751429927295,
"children": {
"TrainerController._reset_env": {
"total": 3.9544220670000527,
"count": 1,
"self": 3.9544220670000527
},
"TrainerController.advance": {
"total": 883.3992562720073,
"count": 27333,
"self": 0.6195305070118593,
"children": {
"env_step": {
"total": 882.7797257649954,
"count": 27333,
"self": 684.5732354549532,
"children": {
"SubprocessEnvManager._take_step": {
"total": 197.58984855401786,
"count": 27333,
"self": 3.31205037402691,
"children": {
"TorchPolicy.evaluate": {
"total": 194.27779817999095,
"count": 27333,
"self": 194.27779817999095
}
}
},
"workers": {
"total": 0.6166417560243644,
"count": 27333,
"self": 0.0,
"children": {
"worker_root": {
"total": 885.6508180489927,
"count": 27333,
"is_parallel": true,
"self": 415.83291945696845,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010056153999983053,
"count": 1,
"is_parallel": true,
"self": 0.007404675999794108,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002651478000188945,
"count": 10,
"is_parallel": true,
"self": 0.002651478000188945
}
}
},
"UnityEnvironment.step": {
"total": 0.05038445000002412,
"count": 1,
"is_parallel": true,
"self": 0.0009023380000598991,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004935370000112016,
"count": 1,
"is_parallel": true,
"self": 0.0004935370000112016
},
"communicator.exchange": {
"total": 0.046692716999984896,
"count": 1,
"is_parallel": true,
"self": 0.046692716999984896
},
"steps_from_proto": {
"total": 0.0022958579999681206,
"count": 1,
"is_parallel": true,
"self": 0.00044052200001942765,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001855335999948693,
"count": 10,
"is_parallel": true,
"self": 0.001855335999948693
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 469.8178985920243,
"count": 27332,
"is_parallel": true,
"self": 22.07962725902513,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 10.896487218998288,
"count": 27332,
"is_parallel": true,
"self": 10.896487218998288
},
"communicator.exchange": {
"total": 369.5647601339869,
"count": 27332,
"is_parallel": true,
"self": 369.5647601339869
},
"steps_from_proto": {
"total": 67.27702398001395,
"count": 27332,
"is_parallel": true,
"self": 13.141225008010622,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.13579897200333,
"count": 273320,
"is_parallel": true,
"self": 54.13579897200333
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0010648590000528202,
"count": 1,
"self": 0.0010648590000528202,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 873.2384768689427,
"count": 1161476,
"is_parallel": true,
"self": 25.968936939973105,
"children": {
"process_trajectory": {
"total": 465.7745810979703,
"count": 1161476,
"is_parallel": true,
"self": 464.33072010497034,
"children": {
"RLTrainer._checkpoint": {
"total": 1.443860992999987,
"count": 6,
"is_parallel": true,
"self": 1.443860992999987
}
}
},
"_update_policy": {
"total": 381.4949588309993,
"count": 136,
"is_parallel": true,
"self": 108.55094060999829,
"children": {
"TorchPPOOptimizer.update": {
"total": 272.944018221001,
"count": 6933,
"is_parallel": true,
"self": 272.944018221001
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.09979716799989546,
"count": 1,
"self": 0.0013788069998099672,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0984183610000855,
"count": 1,
"self": 0.0984183610000855
}
}
}
}
}
}
}