ppo-Pyramids / run_logs /timers.json
akrisn's picture
First Push
a85fb66 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17474807798862457,
"min": 0.17474807798862457,
"max": 1.4941887855529785,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5239.646484375,
"min": 5239.646484375,
"max": 45327.7109375,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999961.0,
"min": 29952.0,
"max": 2999961.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999961.0,
"min": 29952.0,
"max": 2999961.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6794767379760742,
"min": -0.10855518281459808,
"max": 0.7530921697616577,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 196.3687744140625,
"min": -25.72757911682129,
"max": 223.6683807373047,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.006858796812593937,
"min": -0.020373791456222534,
"max": 0.2177567183971405,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 1.9821922779083252,
"min": -5.236064434051514,
"max": 52.4793701171875,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06675986004556353,
"min": 0.065122923737699,
"max": 0.0736049485345748,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9346380406378895,
"min": 0.5096716401167214,
"max": 1.0635929871156502,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014018459578136995,
"min": 0.00016288555255408605,
"max": 0.015118839174272796,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19625843409391794,
"min": 0.0021175121832031187,
"max": 0.22037165285334645,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4788637927928594e-06,
"min": 1.4788637927928594e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.070409309910003e-05,
"min": 2.070409309910003e-05,
"max": 0.003969190076936667,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049292142857146,
"min": 0.10049292142857146,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069009000000003,
"min": 1.3962282666666668,
"max": 2.7230633333333336,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.924285071428578e-05,
"min": 5.924285071428578e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008293999100000009,
"min": 0.0008293999100000009,
"max": 0.132314027,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.004748155362904072,
"min": 0.004564561881124973,
"max": 0.32770994305610657,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.06647417694330215,
"min": 0.06390386819839478,
"max": 2.2939696311950684,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 265.5982142857143,
"min": 235.7295081967213,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29747.0,
"min": 15984.0,
"max": 34559.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6986803420420205,
"min": -1.0000000521540642,
"max": 1.7642032362338973,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 190.25219830870628,
"min": -30.99840161949396,
"max": 216.99699805676937,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6986803420420205,
"min": -1.0000000521540642,
"max": 1.7642032362338973,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 190.25219830870628,
"min": -30.99840161949396,
"max": 216.99699805676937,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013078147336013249,
"min": 0.012025743967281347,
"max": 6.187570055015385,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.464752501633484,
"min": 1.3535099465807434,
"max": 99.00112088024616,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705310939",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/mnt/wsl/PHYSICALDRIVE0p1/akira/Works/src/github.com/Unity-Technologies/ml-agents/.venv/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705317629"
},
"total": 6690.736504244,
"count": 1,
"self": 0.37448220999067416,
"children": {
"run_training.setup": {
"total": 0.01567573800275568,
"count": 1,
"self": 0.01567573800275568
},
"TrainerController.start_learning": {
"total": 6690.346346296006,
"count": 1,
"self": 4.080947478192684,
"children": {
"TrainerController._reset_env": {
"total": 4.796052055004111,
"count": 1,
"self": 4.796052055004111
},
"TrainerController.advance": {
"total": 6681.402287860808,
"count": 193094,
"self": 4.191292090370553,
"children": {
"env_step": {
"total": 4429.072117945572,
"count": 193094,
"self": 3694.300870279345,
"children": {
"SubprocessEnvManager._take_step": {
"total": 732.0460590446091,
"count": 193094,
"self": 12.514113700701273,
"children": {
"TorchPolicy.evaluate": {
"total": 719.5319453439079,
"count": 187566,
"self": 719.5319453439079
}
}
},
"workers": {
"total": 2.7251886216181447,
"count": 193094,
"self": 0.0,
"children": {
"worker_root": {
"total": 6681.673903074407,
"count": 193094,
"is_parallel": true,
"self": 3268.7607938800647,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001483286003349349,
"count": 1,
"is_parallel": true,
"self": 0.0005199290098971687,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0009633569934521802,
"count": 8,
"is_parallel": true,
"self": 0.0009633569934521802
}
}
},
"UnityEnvironment.step": {
"total": 0.029584228999738116,
"count": 1,
"is_parallel": true,
"self": 0.00021731199376517907,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002830170051311143,
"count": 1,
"is_parallel": true,
"self": 0.0002830170051311143
},
"communicator.exchange": {
"total": 0.028369558000122197,
"count": 1,
"is_parallel": true,
"self": 0.028369558000122197
},
"steps_from_proto": {
"total": 0.0007143420007196255,
"count": 1,
"is_parallel": true,
"self": 0.0002014119891100563,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005129300116095692,
"count": 8,
"is_parallel": true,
"self": 0.0005129300116095692
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3412.913109194342,
"count": 193093,
"is_parallel": true,
"self": 43.85012592727435,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 37.678215693027596,
"count": 193093,
"is_parallel": true,
"self": 37.678215693027596
},
"communicator.exchange": {
"total": 3189.1914329608626,
"count": 193093,
"is_parallel": true,
"self": 3189.1914329608626
},
"steps_from_proto": {
"total": 142.19333461317729,
"count": 193093,
"is_parallel": true,
"self": 38.878965809955844,
"children": {
"_process_rank_one_or_two_observation": {
"total": 103.31436880322144,
"count": 1544744,
"is_parallel": true,
"self": 103.31436880322144
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2248.1388778248656,
"count": 193094,
"self": 8.226687879221572,
"children": {
"process_trajectory": {
"total": 415.617537879647,
"count": 193094,
"self": 415.17937027964217,
"children": {
"RLTrainer._checkpoint": {
"total": 0.43816760000481736,
"count": 6,
"self": 0.43816760000481736
}
}
},
"_update_policy": {
"total": 1824.294652065997,
"count": 1394,
"self": 886.2588591961539,
"children": {
"TorchPPOOptimizer.update": {
"total": 938.0357928698431,
"count": 68364,
"self": 938.0357928698431
}
}
}
}
}
}
},
"trainer_threads": {
"total": 7.999988156370819e-07,
"count": 1,
"self": 7.999988156370819e-07
},
"TrainerController._save_models": {
"total": 0.06705810200219275,
"count": 1,
"self": 0.0009000069985631853,
"children": {
"RLTrainer._checkpoint": {
"total": 0.06615809500362957,
"count": 1,
"self": 0.06615809500362957
}
}
}
}
}
}
}