pb96's picture
second_run
ee9d4a1 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8076406121253967,
"min": 0.8076406121253967,
"max": 2.8677656650543213,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 7720.236328125,
"min": 7720.236328125,
"max": 29431.87890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.973052024841309,
"min": 0.3029351234436035,
"max": 12.973052024841309,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2529.7451171875,
"min": 58.76941680908203,
"max": 2630.764892578125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0656531876962867,
"min": 0.06359905676214005,
"max": 0.07575434307321248,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2626127507851468,
"min": 0.2543962270485602,
"max": 0.3787717153660624,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.22919858831400963,
"min": 0.11734187111427424,
"max": 0.27680956762210995,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9167943532560385,
"min": 0.469367484457097,
"max": 1.3840478381105497,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.454545454545453,
"min": 3.5454545454545454,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1120.0,
"min": 156.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.454545454545453,
"min": 3.5454545454545454,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1120.0,
"min": 156.0,
"max": 1407.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1751681054",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.1+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1751681656"
},
"total": 601.9221062020001,
"count": 1,
"self": 0.5394454160002624,
"children": {
"run_training.setup": {
"total": 0.0275811959999146,
"count": 1,
"self": 0.0275811959999146
},
"TrainerController.start_learning": {
"total": 601.35507959,
"count": 1,
"self": 0.9645992230103957,
"children": {
"TrainerController._reset_env": {
"total": 2.17607069099995,
"count": 1,
"self": 2.17607069099995
},
"TrainerController.advance": {
"total": 598.1364124299896,
"count": 18202,
"self": 0.4540480029874061,
"children": {
"env_step": {
"total": 597.6823644270022,
"count": 18202,
"self": 463.39356516798216,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.89007067300724,
"count": 18202,
"self": 2.5014697150025995,
"children": {
"TorchPolicy.evaluate": {
"total": 131.38860095800464,
"count": 18202,
"self": 131.38860095800464
}
}
},
"workers": {
"total": 0.39872858601279404,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 599.116084046003,
"count": 18202,
"is_parallel": true,
"self": 276.8646271190165,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002754729999992378,
"count": 1,
"is_parallel": true,
"self": 0.000895887000183393,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001858842999808985,
"count": 10,
"is_parallel": true,
"self": 0.001858842999808985
}
}
},
"UnityEnvironment.step": {
"total": 0.04453338099995108,
"count": 1,
"is_parallel": true,
"self": 0.0006901849999394472,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004301310000300873,
"count": 1,
"is_parallel": true,
"self": 0.0004301310000300873
},
"communicator.exchange": {
"total": 0.04120604800004912,
"count": 1,
"is_parallel": true,
"self": 0.04120604800004912
},
"steps_from_proto": {
"total": 0.0022070169999324207,
"count": 1,
"is_parallel": true,
"self": 0.00042593399996349035,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017810829999689304,
"count": 10,
"is_parallel": true,
"self": 0.0017810829999689304
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 322.2514569269865,
"count": 18201,
"is_parallel": true,
"self": 13.74442500898192,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.487013504001084,
"count": 18201,
"is_parallel": true,
"self": 7.487013504001084
},
"communicator.exchange": {
"total": 257.27287328400064,
"count": 18201,
"is_parallel": true,
"self": 257.27287328400064
},
"steps_from_proto": {
"total": 43.74714513000288,
"count": 18201,
"is_parallel": true,
"self": 8.795492252999452,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.951652877003426,
"count": 182010,
"is_parallel": true,
"self": 34.951652877003426
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00025124800004050485,
"count": 1,
"self": 0.00025124800004050485,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 591.165092812098,
"count": 796274,
"is_parallel": true,
"self": 18.080837445200814,
"children": {
"process_trajectory": {
"total": 319.550494337897,
"count": 796274,
"is_parallel": true,
"self": 318.91754650189694,
"children": {
"RLTrainer._checkpoint": {
"total": 0.6329478360000849,
"count": 4,
"is_parallel": true,
"self": 0.6329478360000849
}
}
},
"_update_policy": {
"total": 253.53376102900017,
"count": 90,
"is_parallel": true,
"self": 66.02715142599197,
"children": {
"TorchPPOOptimizer.update": {
"total": 187.5066096030082,
"count": 4587,
"is_parallel": true,
"self": 187.5066096030082
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.07774599799995485,
"count": 1,
"self": 0.001092300999971485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07665369699998337,
"count": 1,
"self": 0.07665369699998337
}
}
}
}
}
}
}