ppo-Huggy / run_logs /timers.json
Exploration's picture
Huggy
af5af08 verified
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4045560359954834,
"min": 1.4045560359954834,
"max": 1.425719141960144,
"count": 32
},
"Huggy.Policy.Entropy.sum": {
"value": 69602.7734375,
"min": 68251.296875,
"max": 76696.25,
"count": 32
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 76.3560371517028,
"min": 75.96923076923076,
"max": 397.92857142857144,
"count": 32
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49326.0,
"min": 49302.0,
"max": 50139.0,
"count": 32
},
"Huggy.Step.mean": {
"value": 1599963.0,
"min": 49919.0,
"max": 1599963.0,
"count": 32
},
"Huggy.Step.sum": {
"value": 1599963.0,
"min": 49919.0,
"max": 1599963.0,
"count": 32
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.451035499572754,
"min": -0.030836891382932663,
"max": 2.5037331581115723,
"count": 32
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1583.368896484375,
"min": -3.854611396789551,
"max": 1585.8370361328125,
"count": 32
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.8952538632202445,
"min": 1.809530202627182,
"max": 4.063939113365977,
"count": 32
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2516.333995640278,
"min": 226.19127532839775,
"max": 2551.130595564842,
"count": 32
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.8952538632202445,
"min": 1.809530202627182,
"max": 4.063939113365977,
"count": 32
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2516.333995640278,
"min": 226.19127532839775,
"max": 2551.130595564842,
"count": 32
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.014203194167506202,
"min": 0.013755661541099092,
"max": 0.02048794745824125,
"count": 32
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.028406388335012404,
"min": 0.027511323082198183,
"max": 0.05948689594612612,
"count": 32
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05697564588238796,
"min": 0.02355757883439461,
"max": 0.06127499112238487,
"count": 32
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.11395129176477592,
"min": 0.04711515766878922,
"max": 0.17602411955595015,
"count": 32
},
"Huggy.Policy.LearningRate.mean": {
"value": 6.381360372882498e-05,
"min": 6.381360372882498e-05,
"max": 0.00029533845155385,
"count": 32
},
"Huggy.Policy.LearningRate.sum": {
"value": 0.00012762720745764997,
"min": 0.00012762720745764997,
"max": 0.0008438011687329501,
"count": 32
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.121271175,
"min": 0.121271175,
"max": 0.19844615000000004,
"count": 32
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.24254235,
"min": 0.24254235,
"max": 0.5812670500000001,
"count": 32
},
"Huggy.Policy.Beta.mean": {
"value": 0.0010714316325000002,
"min": 0.0010714316325000002,
"max": 0.004922462885000001,
"count": 32
},
"Huggy.Policy.Beta.sum": {
"value": 0.0021428632650000003,
"min": 0.0021428632650000003,
"max": 0.014065225795,
"count": 32
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 32
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1741739045",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/root/autodl-tmp/miniconda3/envs/hfrl/bin/mlagents-learn ml-agents/config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy2 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1741741669"
},
"total": 2623.398121515289,
"count": 1,
"self": 0.3876295741647482,
"children": {
"run_training.setup": {
"total": 0.0327600073069334,
"count": 1,
"self": 0.0327600073069334
},
"TrainerController.start_learning": {
"total": 2622.9777319338173,
"count": 1,
"self": 3.44746271148324,
"children": {
"TrainerController._reset_env": {
"total": 2.869343815371394,
"count": 1,
"self": 2.869343815371394
},
"TrainerController.advance": {
"total": 2616.5745798535645,
"count": 189235,
"self": 3.2792701479047537,
"children": {
"env_step": {
"total": 2260.752312771976,
"count": 189235,
"self": 1977.9905481878668,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.4348835349083,
"count": 189235,
"self": 11.372258115559816,
"children": {
"TorchPolicy.evaluate": {
"total": 269.0626254193485,
"count": 181239,
"self": 269.0626254193485
}
}
},
"workers": {
"total": 2.3268810492008924,
"count": 189235,
"self": 0.0,
"children": {
"worker_root": {
"total": 2613.1423880588263,
"count": 189235,
"is_parallel": true,
"self": 900.9339048750699,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0012963395565748215,
"count": 1,
"is_parallel": true,
"self": 0.00035632215440273285,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009400174021720886,
"count": 2,
"is_parallel": true,
"self": 0.0009400174021720886
}
}
},
"UnityEnvironment.step": {
"total": 0.031056590378284454,
"count": 1,
"is_parallel": true,
"self": 0.0003698933869600296,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00036738812923431396,
"count": 1,
"is_parallel": true,
"self": 0.00036738812923431396
},
"communicator.exchange": {
"total": 0.028848940506577492,
"count": 1,
"is_parallel": true,
"self": 0.028848940506577492
},
"steps_from_proto": {
"total": 0.001470368355512619,
"count": 1,
"is_parallel": true,
"self": 0.0003799349069595337,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010904334485530853,
"count": 2,
"is_parallel": true,
"self": 0.0010904334485530853
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1712.2084831837565,
"count": 189234,
"is_parallel": true,
"self": 52.45978309959173,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 112.28371349535882,
"count": 189234,
"is_parallel": true,
"self": 112.28371349535882
},
"communicator.exchange": {
"total": 1427.5770109836012,
"count": 189234,
"is_parallel": true,
"self": 1427.5770109836012
},
"steps_from_proto": {
"total": 119.8879756052047,
"count": 189234,
"is_parallel": true,
"self": 45.31914711929858,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.56882848590612,
"count": 378468,
"is_parallel": true,
"self": 74.56882848590612
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 352.54299693368375,
"count": 189235,
"self": 4.600446682423353,
"children": {
"process_trajectory": {
"total": 124.12604659982026,
"count": 189235,
"self": 123.48473713174462,
"children": {
"RLTrainer._checkpoint": {
"total": 0.641309468075633,
"count": 8,
"self": 0.641309468075633
}
}
},
"_update_policy": {
"total": 223.81650365144014,
"count": 79,
"self": 181.82267162017524,
"children": {
"TorchPPOOptimizer.update": {
"total": 41.9938320312649,
"count": 2340,
"self": 41.9938320312649
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0356307029724121e-06,
"count": 1,
"self": 1.0356307029724121e-06
},
"TrainerController._save_models": {
"total": 0.08634451776742935,
"count": 1,
"self": 0.0009008254855871201,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08544369228184223,
"count": 1,
"self": 0.08544369228184223
}
}
}
}
}
}
}