ppo-Huggy / run_logs /timers.json
gnieto's picture
Huggy
f64be09
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4089908599853516,
"min": 1.4089908599853516,
"max": 1.4286081790924072,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71733.1328125,
"min": 69138.109375,
"max": 75911.4140625,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.22279792746114,
"min": 74.80424886191199,
"max": 403.1209677419355,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49344.0,
"min": 48776.0,
"max": 50014.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999986.0,
"min": 49482.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999986.0,
"min": 49482.0,
"max": 1999986.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4575352668762207,
"min": 0.11437847465276718,
"max": 2.523136854171753,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1422.9129638671875,
"min": 14.068552017211914,
"max": 1648.468994140625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.7954320578171594,
"min": 1.9219772631559915,
"max": 3.996921558296535,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2197.5551614761353,
"min": 236.40320336818695,
"max": 2621.980542242527,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.7954320578171594,
"min": 1.9219772631559915,
"max": 3.996921558296535,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2197.5551614761353,
"min": 236.40320336818695,
"max": 2621.980542242527,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01441182480015818,
"min": 0.013456550870129529,
"max": 0.0202193695595876,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.04323547440047454,
"min": 0.026913101740259057,
"max": 0.05565977489459329,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05430406447913912,
"min": 0.021423793770372866,
"max": 0.06220193120340506,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16291219343741736,
"min": 0.04284758754074573,
"max": 0.18660579361021518,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.4233988589000077e-06,
"min": 3.4233988589000077e-06,
"max": 0.000295348126550625,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0270196576700023e-05,
"min": 1.0270196576700023e-05,
"max": 0.0008440401186533001,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10114110000000004,
"min": 0.10114110000000004,
"max": 0.19844937500000004,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3034233000000001,
"min": 0.207443,
"max": 0.5813466999999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.694089000000011e-05,
"min": 6.694089000000011e-05,
"max": 0.0049226238125,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00020082267000000033,
"min": 0.00020082267000000033,
"max": 0.01406920033,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1673940976",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1673943185"
},
"total": 2208.899489445,
"count": 1,
"self": 0.3890462910003407,
"children": {
"run_training.setup": {
"total": 0.10588949800001046,
"count": 1,
"self": 0.10588949800001046
},
"TrainerController.start_learning": {
"total": 2208.4045536559997,
"count": 1,
"self": 3.6718158409425996,
"children": {
"TrainerController._reset_env": {
"total": 10.807640351000032,
"count": 1,
"self": 10.807640351000032
},
"TrainerController.advance": {
"total": 2193.803664413057,
"count": 233052,
"self": 3.825498914099626,
"children": {
"env_step": {
"total": 1714.2868432879918,
"count": 233052,
"self": 1443.7482235489201,
"children": {
"SubprocessEnvManager._take_step": {
"total": 267.9379304760064,
"count": 233052,
"self": 14.033431763137173,
"children": {
"TorchPolicy.evaluate": {
"total": 253.9044987128692,
"count": 223058,
"self": 64.4238268527605,
"children": {
"TorchPolicy.sample_actions": {
"total": 189.4806718601087,
"count": 223058,
"self": 189.4806718601087
}
}
}
}
},
"workers": {
"total": 2.600689263065192,
"count": 233052,
"self": 0.0,
"children": {
"worker_root": {
"total": 2200.3472323559213,
"count": 233052,
"is_parallel": true,
"self": 1011.2540562859062,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0025633019999986573,
"count": 1,
"is_parallel": true,
"self": 0.00033146100008707435,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002231840999911583,
"count": 2,
"is_parallel": true,
"self": 0.002231840999911583
}
}
},
"UnityEnvironment.step": {
"total": 0.03057272700004887,
"count": 1,
"is_parallel": true,
"self": 0.00032907600007092697,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00019034100000681065,
"count": 1,
"is_parallel": true,
"self": 0.00019034100000681065
},
"communicator.exchange": {
"total": 0.029113465000023098,
"count": 1,
"is_parallel": true,
"self": 0.029113465000023098
},
"steps_from_proto": {
"total": 0.0009398449999480363,
"count": 1,
"is_parallel": true,
"self": 0.0002724469999293433,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000667398000018693,
"count": 2,
"is_parallel": true,
"self": 0.000667398000018693
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1189.093176070015,
"count": 233051,
"is_parallel": true,
"self": 34.46944611091794,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 74.79437374604981,
"count": 233051,
"is_parallel": true,
"self": 74.79437374604981
},
"communicator.exchange": {
"total": 987.8386840929874,
"count": 233051,
"is_parallel": true,
"self": 987.8386840929874
},
"steps_from_proto": {
"total": 91.99067212005991,
"count": 233051,
"is_parallel": true,
"self": 37.948377674984044,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.04229444507587,
"count": 466102,
"is_parallel": true,
"self": 54.04229444507587
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 475.6913222109656,
"count": 233052,
"self": 5.9311087639434845,
"children": {
"process_trajectory": {
"total": 151.92154480702231,
"count": 233052,
"self": 150.76748182102244,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1540629859998717,
"count": 10,
"self": 1.1540629859998717
}
}
},
"_update_policy": {
"total": 317.8386686399998,
"count": 97,
"self": 264.35880230098064,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.47986633901917,
"count": 2910,
"self": 53.47986633901917
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.000001798791345e-07,
"count": 1,
"self": 8.000001798791345e-07
},
"TrainerController._save_models": {
"total": 0.12143225099998745,
"count": 1,
"self": 0.00201408300017647,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11941816799981098,
"count": 1,
"self": 0.11941816799981098
}
}
}
}
}
}
}