ppo-Huggy / run_logs /timers.json
kuhess's picture
first training of Huggy
06086f5 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.40640127658844,
"min": 1.40640127658844,
"max": 1.428126573562622,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 69888.296875,
"min": 68543.796875,
"max": 77244.953125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 110.65548098434004,
"min": 90.71272727272728,
"max": 367.1617647058824,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49463.0,
"min": 48890.0,
"max": 50134.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999959.0,
"min": 49619.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999959.0,
"min": 49619.0,
"max": 1999959.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.259868621826172,
"min": 0.024314260110259056,
"max": 2.3957583904266357,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1010.1612548828125,
"min": 3.2824251651763916,
"max": 1269.986083984375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.449675091694399,
"min": 1.8349797264293388,
"max": 3.8419793492869325,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1542.0047659873962,
"min": 247.72226306796074,
"max": 1987.8381706476212,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.449675091694399,
"min": 1.8349797264293388,
"max": 3.8419793492869325,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1542.0047659873962,
"min": 247.72226306796074,
"max": 1987.8381706476212,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018353146503310803,
"min": 0.014020303914690481,
"max": 0.02018960216276658,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.03670629300662161,
"min": 0.02870938188716536,
"max": 0.056857699262521544,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04550258523474137,
"min": 0.021403061443318924,
"max": 0.0599461384738485,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.09100517046948274,
"min": 0.04280612288663785,
"max": 0.1792838905006647,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 4.5810984730000025e-06,
"min": 4.5810984730000025e-06,
"max": 0.00029534332655222503,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.162196946000005e-06,
"min": 9.162196946000005e-06,
"max": 0.0008441842686052498,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10152699999999999,
"min": 0.10152699999999999,
"max": 0.19844777500000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.20305399999999998,
"min": 0.20305399999999998,
"max": 0.5813947500000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 8.619730000000005e-05,
"min": 8.619730000000005e-05,
"max": 0.0049225439725,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0001723946000000001,
"min": 0.0001723946000000001,
"max": 0.014071598024999996,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1708942296",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1708944779"
},
"total": 2483.358001142,
"count": 1,
"self": 0.4916626450003605,
"children": {
"run_training.setup": {
"total": 0.07332805600003667,
"count": 1,
"self": 0.07332805600003667
},
"TrainerController.start_learning": {
"total": 2482.7930104409998,
"count": 1,
"self": 4.536968991000776,
"children": {
"TrainerController._reset_env": {
"total": 3.458884448000049,
"count": 1,
"self": 3.458884448000049
},
"TrainerController.advance": {
"total": 2474.6863484459986,
"count": 231401,
"self": 4.876908049086978,
"children": {
"env_step": {
"total": 1997.2040966239822,
"count": 231401,
"self": 1651.8817307028116,
"children": {
"SubprocessEnvManager._take_step": {
"total": 342.2658061671119,
"count": 231401,
"self": 18.13718358905828,
"children": {
"TorchPolicy.evaluate": {
"total": 324.12862257805364,
"count": 222910,
"self": 324.12862257805364
}
}
},
"workers": {
"total": 3.056559754058526,
"count": 231401,
"self": 0.0,
"children": {
"worker_root": {
"total": 2475.3701928259857,
"count": 231401,
"is_parallel": true,
"self": 1131.2825527150012,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010329130000172881,
"count": 1,
"is_parallel": true,
"self": 0.0002971339999930933,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007357790000241948,
"count": 2,
"is_parallel": true,
"self": 0.0007357790000241948
}
}
},
"UnityEnvironment.step": {
"total": 0.030860660000030293,
"count": 1,
"is_parallel": true,
"self": 0.00038811500007795985,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00024359400003959308,
"count": 1,
"is_parallel": true,
"self": 0.00024359400003959308
},
"communicator.exchange": {
"total": 0.029449403999933566,
"count": 1,
"is_parallel": true,
"self": 0.029449403999933566
},
"steps_from_proto": {
"total": 0.0007795469999791749,
"count": 1,
"is_parallel": true,
"self": 0.00020119499993143108,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005783520000477438,
"count": 2,
"is_parallel": true,
"self": 0.0005783520000477438
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1344.0876401109845,
"count": 231400,
"is_parallel": true,
"self": 42.27545100105522,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 89.56751782291815,
"count": 231400,
"is_parallel": true,
"self": 89.56751782291815
},
"communicator.exchange": {
"total": 1116.525193694033,
"count": 231400,
"is_parallel": true,
"self": 1116.525193694033
},
"steps_from_proto": {
"total": 95.71947759297814,
"count": 231400,
"is_parallel": true,
"self": 35.50327804002052,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.21619955295762,
"count": 462800,
"is_parallel": true,
"self": 60.21619955295762
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 472.6053437729296,
"count": 231401,
"self": 7.076953399923923,
"children": {
"process_trajectory": {
"total": 153.01565766700685,
"count": 231401,
"self": 151.68784104100712,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3278166259997306,
"count": 10,
"self": 1.3278166259997306
}
}
},
"_update_policy": {
"total": 312.5127327059988,
"count": 96,
"self": 250.51810205399522,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.99463065200359,
"count": 2880,
"self": 61.99463065200359
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0580001799098682e-06,
"count": 1,
"self": 1.0580001799098682e-06
},
"TrainerController._save_models": {
"total": 0.11080749800021295,
"count": 1,
"self": 0.0018707379999796103,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10893676000023333,
"count": 1,
"self": 0.10893676000023333
}
}
}
}
}
}
}