amirest's picture
First Push
503ed18 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9284960031509399,
"min": 0.9284960031509399,
"max": 2.866152286529541,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8824.42578125,
"min": 8824.42578125,
"max": 29257.681640625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.097471237182617,
"min": 0.18841716647148132,
"max": 13.097471237182617,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2554.0068359375,
"min": 36.55292892456055,
"max": 2654.40283203125,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07075534365453766,
"min": 0.06311876177913327,
"max": 0.07416931517375404,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.28302137461815063,
"min": 0.25247504711653307,
"max": 0.3652134789587637,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20408615692719526,
"min": 0.11328667096163639,
"max": 0.3025279199229736,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.816344627708781,
"min": 0.45314668384654555,
"max": 1.4447184339457868,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 26.09090909090909,
"min": 3.159090909090909,
"max": 26.09090909090909,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1148.0,
"min": 139.0,
"max": 1422.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 26.09090909090909,
"min": 3.159090909090909,
"max": 26.09090909090909,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1148.0,
"min": 139.0,
"max": 1422.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739269668",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739270081"
},
"total": 412.986179819,
"count": 1,
"self": 0.44139238999991903,
"children": {
"run_training.setup": {
"total": 0.024023426999974618,
"count": 1,
"self": 0.024023426999974618
},
"TrainerController.start_learning": {
"total": 412.5207640020001,
"count": 1,
"self": 0.3019091959697562,
"children": {
"TrainerController._reset_env": {
"total": 3.522105096999894,
"count": 1,
"self": 3.522105096999894
},
"TrainerController.advance": {
"total": 408.6110043610306,
"count": 18192,
"self": 0.34584552101432564,
"children": {
"env_step": {
"total": 287.08830194903317,
"count": 18192,
"self": 218.47415363400023,
"children": {
"SubprocessEnvManager._take_step": {
"total": 68.42678944698378,
"count": 18192,
"self": 1.1948726379912387,
"children": {
"TorchPolicy.evaluate": {
"total": 67.23191680899254,
"count": 18192,
"self": 67.23191680899254
}
}
},
"workers": {
"total": 0.18735886804915935,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 411.010822681988,
"count": 18192,
"is_parallel": true,
"self": 219.38780775800933,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005318173999967257,
"count": 1,
"is_parallel": true,
"self": 0.0037966080001297087,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015215659998375486,
"count": 10,
"is_parallel": true,
"self": 0.0015215659998375486
}
}
},
"UnityEnvironment.step": {
"total": 0.058729175000053147,
"count": 1,
"is_parallel": true,
"self": 0.0005813730001591466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00037887600001340616,
"count": 1,
"is_parallel": true,
"self": 0.00037887600001340616
},
"communicator.exchange": {
"total": 0.0559685629998512,
"count": 1,
"is_parallel": true,
"self": 0.0559685629998512
},
"steps_from_proto": {
"total": 0.0018003630000293924,
"count": 1,
"is_parallel": true,
"self": 0.0003799070000241045,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001420456000005288,
"count": 10,
"is_parallel": true,
"self": 0.001420456000005288
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 191.62301492397864,
"count": 18191,
"is_parallel": true,
"self": 9.481189966960073,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.314903814012496,
"count": 18191,
"is_parallel": true,
"self": 5.314903814012496
},
"communicator.exchange": {
"total": 146.80906143398602,
"count": 18191,
"is_parallel": true,
"self": 146.80906143398602
},
"steps_from_proto": {
"total": 30.01785970902006,
"count": 18191,
"is_parallel": true,
"self": 5.19137117092464,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.82648853809542,
"count": 181910,
"is_parallel": true,
"self": 24.82648853809542
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 121.17685689098312,
"count": 18192,
"self": 0.3663920200103803,
"children": {
"process_trajectory": {
"total": 26.248867436971977,
"count": 18192,
"self": 25.715260669972395,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5336067669995828,
"count": 4,
"self": 0.5336067669995828
}
}
},
"_update_policy": {
"total": 94.56159743400076,
"count": 90,
"self": 37.97110824801007,
"children": {
"TorchPPOOptimizer.update": {
"total": 56.59048918599069,
"count": 4587,
"self": 56.59048918599069
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.590000106778461e-07,
"count": 1,
"self": 8.590000106778461e-07
},
"TrainerController._save_models": {
"total": 0.0857444889998078,
"count": 1,
"self": 0.0008655679998810228,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08487892099992678,
"count": 1,
"self": 0.08487892099992678
}
}
}
}
}
}
}