ppo-Huggy / run_logs /timers.json
spatial's picture
Huggy
cf76de7
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407737135887146,
"min": 1.407737135887146,
"max": 1.4283111095428467,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71475.0390625,
"min": 69567.625,
"max": 76670.21875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.62323943661971,
"min": 75.46788990825688,
"max": 394.755905511811,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49202.0,
"min": 48942.0,
"max": 50153.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999945.0,
"min": 49545.0,
"max": 1999945.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999945.0,
"min": 49545.0,
"max": 1999945.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4941837787628174,
"min": 0.08929670602083206,
"max": 2.5023579597473145,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1416.6964111328125,
"min": 11.251384735107422,
"max": 1618.9415283203125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.867357066924303,
"min": 1.775967292960674,
"max": 4.051920339561278,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2196.6588140130043,
"min": 223.77187891304493,
"max": 2527.5081028342247,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.867357066924303,
"min": 1.775967292960674,
"max": 4.051920339561278,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2196.6588140130043,
"min": 223.77187891304493,
"max": 2527.5081028342247,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.017274779772318043,
"min": 0.01404156981850267,
"max": 0.020442034504473364,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051824339316954134,
"min": 0.028143292541305223,
"max": 0.05664533710029597,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.04885669015347958,
"min": 0.02034824586783846,
"max": 0.0634689266482989,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.14657007046043874,
"min": 0.04069649173567692,
"max": 0.18914192902545135,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.8076987308000004e-06,
"min": 3.8076987308000004e-06,
"max": 0.0002953704015432,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1423096192400002e-05,
"min": 1.1423096192400002e-05,
"max": 0.0008438301187232999,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10126920000000002,
"min": 0.10126920000000002,
"max": 0.19845680000000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30380760000000007,
"min": 0.20767205000000005,
"max": 0.5812767,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.333308000000003e-05,
"min": 7.333308000000003e-05,
"max": 0.004922994320000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002199992400000001,
"min": 0.0002199992400000001,
"max": 0.014065707330000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673507702",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673509924"
},
"total": 2222.627279431,
"count": 1,
"self": 0.40044932599994354,
"children": {
"run_training.setup": {
"total": 0.10836598599962599,
"count": 1,
"self": 0.10836598599962599
},
"TrainerController.start_learning": {
"total": 2222.1184641190002,
"count": 1,
"self": 3.8752618601074573,
"children": {
"TrainerController._reset_env": {
"total": 8.89318898900001,
"count": 1,
"self": 8.89318898900001
},
"TrainerController.advance": {
"total": 2209.239492424893,
"count": 233144,
"self": 4.040084092038342,
"children": {
"env_step": {
"total": 1743.1198361158258,
"count": 233144,
"self": 1460.388395498423,
"children": {
"SubprocessEnvManager._take_step": {
"total": 280.13124156619233,
"count": 233144,
"self": 14.405490250146613,
"children": {
"TorchPolicy.evaluate": {
"total": 265.7257513160457,
"count": 223068,
"self": 65.67057021317123,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.05518110287449,
"count": 223068,
"self": 200.05518110287449
}
}
}
}
},
"workers": {
"total": 2.6001990512104385,
"count": 233144,
"self": 0.0,
"children": {
"worker_root": {
"total": 2214.085864111855,
"count": 233144,
"is_parallel": true,
"self": 1017.4238119805605,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002046966000307293,
"count": 1,
"is_parallel": true,
"self": 0.00029127500056347344,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017556909997438197,
"count": 2,
"is_parallel": true,
"self": 0.0017556909997438197
}
}
},
"UnityEnvironment.step": {
"total": 0.029703636000704137,
"count": 1,
"is_parallel": true,
"self": 0.0002774230015347712,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00022655799966742052,
"count": 1,
"is_parallel": true,
"self": 0.00022655799966742052
},
"communicator.exchange": {
"total": 0.028259757999876456,
"count": 1,
"is_parallel": true,
"self": 0.028259757999876456
},
"steps_from_proto": {
"total": 0.0009398969996254891,
"count": 1,
"is_parallel": true,
"self": 0.0004271789994163555,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005127180002091336,
"count": 2,
"is_parallel": true,
"self": 0.0005127180002091336
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1196.6620521312943,
"count": 233143,
"is_parallel": true,
"self": 34.0133164375593,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.39001766980073,
"count": 233143,
"is_parallel": true,
"self": 77.39001766980073
},
"communicator.exchange": {
"total": 988.8165375190329,
"count": 233143,
"is_parallel": true,
"self": 988.8165375190329
},
"steps_from_proto": {
"total": 96.44218050490144,
"count": 233143,
"is_parallel": true,
"self": 42.30157201637212,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.140608488529324,
"count": 466286,
"is_parallel": true,
"self": 54.140608488529324
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 462.0795722170287,
"count": 233144,
"self": 5.807121670744891,
"children": {
"process_trajectory": {
"total": 147.85930028327584,
"count": 233144,
"self": 146.69630041627715,
"children": {
"RLTrainer._checkpoint": {
"total": 1.162999866998689,
"count": 10,
"self": 1.162999866998689
}
}
},
"_update_policy": {
"total": 308.413150263008,
"count": 97,
"self": 255.5242802840021,
"children": {
"TorchPPOOptimizer.update": {
"total": 52.888869979005904,
"count": 2910,
"self": 52.888869979005904
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.299994078697637e-07,
"count": 1,
"self": 8.299994078697637e-07
},
"TrainerController._save_models": {
"total": 0.11052001500047481,
"count": 1,
"self": 0.001972339000531065,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10854767599994375,
"count": 1,
"self": 0.10854767599994375
}
}
}
}
}
}
}