ppo-Huggy / run_logs /timers.json
jamiet1139's picture
Huggy
6b0bd6b
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.401271104812622,
"min": 1.401271104812622,
"max": 1.4247978925704956,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70786.609375,
"min": 68869.9296875,
"max": 76701.7265625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 82.61371237458194,
"min": 74.8,
"max": 393.6746031746032,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49403.0,
"min": 48781.0,
"max": 50211.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999944.0,
"min": 49935.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999944.0,
"min": 49935.0,
"max": 1999944.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4648239612579346,
"min": 0.15126533806324005,
"max": 2.5035064220428467,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1473.9647216796875,
"min": 19.210697174072266,
"max": 1603.172119140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7600274683041714,
"min": 1.7693142158778634,
"max": 4.064420328938902,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2248.4964260458946,
"min": 224.70290541648865,
"max": 2562.709665775299,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7600274683041714,
"min": 1.7693142158778634,
"max": 4.064420328938902,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2248.4964260458946,
"min": 224.70290541648865,
"max": 2562.709665775299,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.015879956372534962,
"min": 0.013000445743576467,
"max": 0.02029799896020753,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.047639869117604886,
"min": 0.026000891487152934,
"max": 0.05927636543201516,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.055224999992383846,
"min": 0.021245359008510906,
"max": 0.06401197032796012,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16567499997715154,
"min": 0.04249071801702181,
"max": 0.19203591098388037,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6417987860999927e-06,
"min": 3.6417987860999927e-06,
"max": 0.00029535165154944993,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0925396358299978e-05,
"min": 1.0925396358299978e-05,
"max": 0.0008441656686114501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10121390000000001,
"min": 0.10121390000000001,
"max": 0.19845055,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036417,
"min": 0.20758734999999998,
"max": 0.58138855,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.057360999999988e-05,
"min": 7.057360999999988e-05,
"max": 0.0049226824450000015,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021172082999999965,
"min": 0.00021172082999999965,
"max": 0.014071288645000003,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673887152",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673889469"
},
"total": 2316.818680417,
"count": 1,
"self": 0.43675061900012224,
"children": {
"run_training.setup": {
"total": 0.11803073599998015,
"count": 1,
"self": 0.11803073599998015
},
"TrainerController.start_learning": {
"total": 2316.263899062,
"count": 1,
"self": 3.895096869144254,
"children": {
"TrainerController._reset_env": {
"total": 11.470378738000022,
"count": 1,
"self": 11.470378738000022
},
"TrainerController.advance": {
"total": 2300.7747537758555,
"count": 233069,
"self": 4.409879334877587,
"children": {
"env_step": {
"total": 1812.896476675992,
"count": 233069,
"self": 1525.7646598369868,
"children": {
"SubprocessEnvManager._take_step": {
"total": 284.34526538693774,
"count": 233069,
"self": 14.675529557985726,
"children": {
"TorchPolicy.evaluate": {
"total": 269.669735828952,
"count": 223022,
"self": 66.98246026799592,
"children": {
"TorchPolicy.sample_actions": {
"total": 202.6872755609561,
"count": 223022,
"self": 202.6872755609561
}
}
}
}
},
"workers": {
"total": 2.786551452067556,
"count": 233069,
"self": 0.0,
"children": {
"worker_root": {
"total": 2308.2862707009626,
"count": 233069,
"is_parallel": true,
"self": 1051.3667119860434,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021972320000145373,
"count": 1,
"is_parallel": true,
"self": 0.00045715999999629275,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017400720000182446,
"count": 2,
"is_parallel": true,
"self": 0.0017400720000182446
}
}
},
"UnityEnvironment.step": {
"total": 0.029885167000031743,
"count": 1,
"is_parallel": true,
"self": 0.00028927200003181497,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0001928959999872859,
"count": 1,
"is_parallel": true,
"self": 0.0001928959999872859
},
"communicator.exchange": {
"total": 0.028597122999997282,
"count": 1,
"is_parallel": true,
"self": 0.028597122999997282
},
"steps_from_proto": {
"total": 0.0008058760000153598,
"count": 1,
"is_parallel": true,
"self": 0.00027432499996393744,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005315510000514223,
"count": 2,
"is_parallel": true,
"self": 0.0005315510000514223
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1256.9195587149193,
"count": 233068,
"is_parallel": true,
"self": 36.64620629607248,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.20174580287073,
"count": 233068,
"is_parallel": true,
"self": 80.20174580287073
},
"communicator.exchange": {
"total": 1039.2389494489978,
"count": 233068,
"is_parallel": true,
"self": 1039.2389494489978
},
"steps_from_proto": {
"total": 100.83265716697832,
"count": 233068,
"is_parallel": true,
"self": 40.153329505806994,
"children": {
"_process_rank_one_or_two_observation": {
"total": 60.67932766117133,
"count": 466136,
"is_parallel": true,
"self": 60.67932766117133
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 483.4683977649859,
"count": 233069,
"self": 6.051274024920929,
"children": {
"process_trajectory": {
"total": 151.52955540306698,
"count": 233069,
"self": 150.25107906606763,
"children": {
"RLTrainer._checkpoint": {
"total": 1.278476336999347,
"count": 10,
"self": 1.278476336999347
}
}
},
"_update_policy": {
"total": 325.887568336998,
"count": 97,
"self": 272.0473575640006,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.84021077299741,
"count": 2910,
"self": 53.84021077299741
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.1649999578366987e-06,
"count": 1,
"self": 1.1649999578366987e-06
},
"TrainerController._save_models": {
"total": 0.12366851400020096,
"count": 1,
"self": 0.0021323320002011315,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12153618199999983,
"count": 1,
"self": 0.12153618199999983
}
}
}
}
}
}
}