PyramidsRND / run_logs /timers.json
mchen-hf-2023's picture
First Push
a7a83e4
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.2638106644153595,
"min": 0.2638106644153595,
"max": 1.446632981300354,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 7990.2978515625,
"min": 7928.2802734375,
"max": 43885.05859375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989874.0,
"min": 29952.0,
"max": 989874.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989874.0,
"min": 29952.0,
"max": 989874.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6766624450683594,
"min": -0.08274572342634201,
"max": 0.6766624450683594,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 190.81881713867188,
"min": -19.94171905517578,
"max": 190.81881713867188,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.023598728701472282,
"min": -0.012456174939870834,
"max": 0.3302382230758667,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 6.654841423034668,
"min": -3.338254928588867,
"max": 79.58740997314453,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06886424190274232,
"min": 0.06532289323513397,
"max": 0.07181334247623182,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0329636285411348,
"min": 0.47673062887578005,
"max": 1.0756257738491208,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014594345993181275,
"min": 0.001543593980523438,
"max": 0.015934420966530322,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21891518989771913,
"min": 0.015541871055763202,
"max": 0.23901631449795482,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.5023574992466726e-06,
"min": 7.5023574992466726e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011253536248870009,
"min": 0.00011253536248870009,
"max": 0.0033754480748507,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250075333333335,
"min": 0.10250075333333335,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5375113000000002,
"min": 1.3886848,
"max": 2.4846047,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025982525800000015,
"min": 0.00025982525800000015,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038973788700000025,
"min": 0.0038973788700000025,
"max": 0.11253241506999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.015087615698575974,
"min": 0.015087615698575974,
"max": 0.48296722769737244,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2263142317533493,
"min": 0.2187182903289795,
"max": 3.380770683288574,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 282.28846153846155,
"min": 282.28846153846155,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29358.0,
"min": 15984.0,
"max": 33093.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.698471137465766,
"min": -1.0000000521540642,
"max": 1.698471137465766,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 176.64099829643965,
"min": -30.72800175845623,
"max": 176.64099829643965,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.698471137465766,
"min": -1.0000000521540642,
"max": 1.698471137465766,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 176.64099829643965,
"min": -30.72800175845623,
"max": 176.64099829643965,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.044099136679925814,
"min": 0.044099136679925814,
"max": 9.73774465546012,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.5863102147122845,
"min": 4.5863102147122845,
"max": 155.8039144873619,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1695999831",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1696002358"
},
"total": 2527.2707236970004,
"count": 1,
"self": 0.5276847190002627,
"children": {
"run_training.setup": {
"total": 0.07260956699974486,
"count": 1,
"self": 0.07260956699974486
},
"TrainerController.start_learning": {
"total": 2526.6704294110004,
"count": 1,
"self": 1.9116735739767137,
"children": {
"TrainerController._reset_env": {
"total": 5.716541465999853,
"count": 1,
"self": 5.716541465999853
},
"TrainerController.advance": {
"total": 2518.938434671024,
"count": 63916,
"self": 1.7979187530268064,
"children": {
"env_step": {
"total": 1808.8100078860343,
"count": 63916,
"self": 1674.148304615917,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.55172982204613,
"count": 63916,
"self": 5.588812453088394,
"children": {
"TorchPolicy.evaluate": {
"total": 127.96291736895773,
"count": 62569,
"self": 127.96291736895773
}
}
},
"workers": {
"total": 1.1099734480712868,
"count": 63916,
"self": 0.0,
"children": {
"worker_root": {
"total": 2520.3103077089136,
"count": 63916,
"is_parallel": true,
"self": 983.4499135998758,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002761620999990555,
"count": 1,
"is_parallel": true,
"self": 0.0007690390007155656,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019925819992749894,
"count": 8,
"is_parallel": true,
"self": 0.0019925819992749894
}
}
},
"UnityEnvironment.step": {
"total": 0.05808792800007723,
"count": 1,
"is_parallel": true,
"self": 0.0006532289994538587,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005823199999213102,
"count": 1,
"is_parallel": true,
"self": 0.0005823199999213102
},
"communicator.exchange": {
"total": 0.053722664000360965,
"count": 1,
"is_parallel": true,
"self": 0.053722664000360965
},
"steps_from_proto": {
"total": 0.0031297150003410934,
"count": 1,
"is_parallel": true,
"self": 0.00043906900100409985,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0026906459993369936,
"count": 8,
"is_parallel": true,
"self": 0.0026906459993369936
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1536.8603941090378,
"count": 63915,
"is_parallel": true,
"self": 38.2559088559633,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.383993455010568,
"count": 63915,
"is_parallel": true,
"self": 25.383993455010568
},
"communicator.exchange": {
"total": 1354.5754040499887,
"count": 63915,
"is_parallel": true,
"self": 1354.5754040499887
},
"steps_from_proto": {
"total": 118.64508774807518,
"count": 63915,
"is_parallel": true,
"self": 23.81704980688164,
"children": {
"_process_rank_one_or_two_observation": {
"total": 94.82803794119354,
"count": 511320,
"is_parallel": true,
"self": 94.82803794119354
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 708.3305080319628,
"count": 63916,
"self": 3.43714574104024,
"children": {
"process_trajectory": {
"total": 119.970798255928,
"count": 63916,
"self": 119.74919112892758,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22160712700042495,
"count": 2,
"self": 0.22160712700042495
}
}
},
"_update_policy": {
"total": 584.9225640349946,
"count": 449,
"self": 377.742723362971,
"children": {
"TorchPPOOptimizer.update": {
"total": 207.1798406720236,
"count": 22818,
"self": 207.1798406720236
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.099994713324122e-07,
"count": 1,
"self": 9.099994713324122e-07
},
"TrainerController._save_models": {
"total": 0.10377879000043322,
"count": 1,
"self": 0.0014161700000840938,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10236262000034912,
"count": 1,
"self": 0.10236262000034912
}
}
}
}
}
}
}