Piramids / run_logs /timers.json
ToonAga's picture
First Push
76b638c verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.17828094959259033,
"min": 0.1721799373626709,
"max": 1.4769920110702515,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 5265.7060546875,
"min": 5180.63916015625,
"max": 44806.03125,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999954.0,
"min": 29952.0,
"max": 2999954.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999954.0,
"min": 29952.0,
"max": 2999954.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7533966898918152,
"min": -0.13449205458164215,
"max": 0.8544591665267944,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 226.01901245117188,
"min": -32.4125862121582,
"max": 261.4645080566406,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.002585420850664377,
"min": -0.016605578362941742,
"max": 0.4719714820384979,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -0.7756262421607971,
"min": -4.549928665161133,
"max": 111.85723876953125,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07147376345112987,
"min": 0.06372184326637878,
"max": 0.07485158303322914,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.000632688315818,
"min": 0.4676359217426098,
"max": 1.0945590881941218,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.015439929482887414,
"min": 7.127000181848545e-05,
"max": 0.01702970204531171,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2161590127604238,
"min": 0.0009265100236403108,
"max": 0.23841582863436392,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4941280734190472e-06,
"min": 1.4941280734190472e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.091779302786666e-05,
"min": 2.091779302786666e-05,
"max": 0.0039695232768256,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049800952380951,
"min": 0.10049800952380951,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069721333333332,
"min": 1.3962282666666668,
"max": 2.7824777333333333,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.9751151428571415e-05,
"min": 5.9751151428571415e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008365161199999998,
"min": 0.0008365161199999998,
"max": 0.13232512256,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.005526099819689989,
"min": 0.005032707937061787,
"max": 0.4690413475036621,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.07736539840698242,
"min": 0.07534351944923401,
"max": 3.2832894325256348,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 241.43846153846152,
"min": 210.74100719424462,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31387.0,
"min": 15984.0,
"max": 33090.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7117395186147024,
"min": -1.0000000521540642,
"max": 1.7897214177463736,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 220.81439790129662,
"min": -29.690401628613472,
"max": 250.5609984844923,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7117395186147024,
"min": -1.0000000521540642,
"max": 1.7897214177463736,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 220.81439790129662,
"min": -29.690401628613472,
"max": 250.5609984844923,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.013703771141969577,
"min": 0.01276355956272064,
"max": 9.776673128828406,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 1.7677864773140755,
"min": 1.5491279620910063,
"max": 156.4267700612545,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1723559769",
"python_version": "3.10.1 | packaged by conda-forge | (main, Dec 22 2021, 01:39:36) [GCC 9.4.0]",
"command_line_arguments": "/home/aa/miniconda3/envs/rl-unit5/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.1+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1723561760"
},
"total": 1991.055388856912,
"count": 1,
"self": 0.1671973499469459,
"children": {
"run_training.setup": {
"total": 0.009938825154677033,
"count": 1,
"self": 0.009938825154677033
},
"TrainerController.start_learning": {
"total": 1990.8782526818104,
"count": 1,
"self": 2.1196501951199025,
"children": {
"TrainerController._reset_env": {
"total": 0.8935073090251535,
"count": 1,
"self": 0.8935073090251535
},
"TrainerController.advance": {
"total": 1987.8339933347888,
"count": 194560,
"self": 1.9496149611659348,
"children": {
"env_step": {
"total": 1358.5622360771522,
"count": 194560,
"self": 1210.746600769693,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.5065441785846,
"count": 194560,
"self": 6.1542078596539795,
"children": {
"TorchPolicy.evaluate": {
"total": 140.3523363189306,
"count": 187548,
"self": 140.3523363189306
}
}
},
"workers": {
"total": 1.3090911288745701,
"count": 194560,
"self": 0.0,
"children": {
"worker_root": {
"total": 1988.1316426536068,
"count": 194560,
"is_parallel": true,
"self": 912.8895388829987,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0007532248273491859,
"count": 1,
"is_parallel": true,
"self": 0.00022136885672807693,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000531855970621109,
"count": 8,
"is_parallel": true,
"self": 0.000531855970621109
}
}
},
"UnityEnvironment.step": {
"total": 0.015873197931796312,
"count": 1,
"is_parallel": true,
"self": 0.00015640282072126865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00013201613910496235,
"count": 1,
"is_parallel": true,
"self": 0.00013201613910496235
},
"communicator.exchange": {
"total": 0.01516228006221354,
"count": 1,
"is_parallel": true,
"self": 0.01516228006221354
},
"steps_from_proto": {
"total": 0.00042249890975654125,
"count": 1,
"is_parallel": true,
"self": 0.00011073984205722809,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00031175906769931316,
"count": 8,
"is_parallel": true,
"self": 0.00031175906769931316
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1075.242103770608,
"count": 194559,
"is_parallel": true,
"self": 27.32880406617187,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 17.399412733502686,
"count": 194559,
"is_parallel": true,
"self": 17.399412733502686
},
"communicator.exchange": {
"total": 958.0143073941581,
"count": 194559,
"is_parallel": true,
"self": 958.0143073941581
},
"steps_from_proto": {
"total": 72.49957957677543,
"count": 194559,
"is_parallel": true,
"self": 16.784605825319886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 55.714973751455545,
"count": 1556472,
"is_parallel": true,
"self": 55.714973751455545
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.3221422964707,
"count": 194560,
"self": 3.886689353734255,
"children": {
"process_trajectory": {
"total": 133.90792788635008,
"count": 194560,
"self": 133.6866648425348,
"children": {
"RLTrainer._checkpoint": {
"total": 0.22126304381527007,
"count": 6,
"self": 0.22126304381527007
}
}
},
"_update_policy": {
"total": 489.52752505638637,
"count": 1400,
"self": 295.3779928740114,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.14953218237497,
"count": 68394,
"self": 194.14953218237497
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.068723112344742e-07,
"count": 1,
"self": 5.068723112344742e-07
},
"TrainerController._save_models": {
"total": 0.0311013360042125,
"count": 1,
"self": 0.0006687080021947622,
"children": {
"RLTrainer._checkpoint": {
"total": 0.030432628002017736,
"count": 1,
"self": 0.030432628002017736
}
}
}
}
}
}
}