ppo-PyramidsRND / run_logs /timers.json
Leventiir's picture
First Push
6d29231
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.555293619632721,
"min": 0.5478405356407166,
"max": 1.4640631675720215,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16569.9609375,
"min": 16569.9609375,
"max": 44413.8203125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989983.0,
"min": 29952.0,
"max": 989983.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.47768712043762207,
"min": -0.09551462531089783,
"max": 0.47768712043762207,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 128.49783325195312,
"min": -22.92350959777832,
"max": 128.49783325195312,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": -0.18849189579486847,
"min": -0.18849189579486847,
"max": 0.4504363536834717,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": -50.70431900024414,
"min": -50.70431900024414,
"max": 106.75341796875,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0687378419499423,
"min": 0.06450924142457856,
"max": 0.07425150116149849,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9623297872991922,
"min": 0.5156247117202033,
"max": 1.0716776510040895,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.017136993261547533,
"min": 0.00015477263839662266,
"max": 0.017136993261547533,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2399179056616655,
"min": 0.0020120442991560946,
"max": 0.2399179056616655,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.2756975748e-06,
"min": 7.2756975748e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0001018597660472,
"min": 0.0001018597660472,
"max": 0.0032593763135413,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10242520000000001,
"min": 0.10242520000000001,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4339528000000001,
"min": 1.3886848,
"max": 2.4017298999999994,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025227748000000006,
"min": 0.00025227748000000006,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035318847200000005,
"min": 0.0035318847200000005,
"max": 0.10866722413,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007798521313816309,
"min": 0.007798521313816309,
"max": 0.39074942469596863,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1091792955994606,
"min": 0.1091792955994606,
"max": 2.735245943069458,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 403.1095890410959,
"min": 403.1095890410959,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29427.0,
"min": 15984.0,
"max": 32577.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.5146712140269476,
"min": -1.0000000521540642,
"max": 1.5146712140269476,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 110.57099862396717,
"min": -31.996001660823822,
"max": 110.57099862396717,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.5146712140269476,
"min": -1.0000000521540642,
"max": 1.5146712140269476,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 110.57099862396717,
"min": -31.996001660823822,
"max": 110.57099862396717,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03264087562613534,
"min": 0.03264087562613534,
"max": 8.224545539356768,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.3827839207078796,
"min": 2.3827839207078796,
"max": 131.5927286297083,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1700124804",
"python_version": "3.10.12 (main, Jun 11 2023, 05:26:28) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.0+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1700126962"
},
"total": 2157.65470841,
"count": 1,
"self": 1.2295993869993254,
"children": {
"run_training.setup": {
"total": 0.044638752000082604,
"count": 1,
"self": 0.044638752000082604
},
"TrainerController.start_learning": {
"total": 2156.3804702710004,
"count": 1,
"self": 1.29497244900449,
"children": {
"TrainerController._reset_env": {
"total": 3.494260913000062,
"count": 1,
"self": 3.494260913000062
},
"TrainerController.advance": {
"total": 2151.4895569799955,
"count": 63567,
"self": 1.3314210029284368,
"children": {
"env_step": {
"total": 1508.1229794910787,
"count": 63567,
"self": 1379.7883147500656,
"children": {
"SubprocessEnvManager._take_step": {
"total": 127.5350869339934,
"count": 63567,
"self": 4.498344671013228,
"children": {
"TorchPolicy.evaluate": {
"total": 123.03674226298017,
"count": 62561,
"self": 123.03674226298017
}
}
},
"workers": {
"total": 0.7995778070196593,
"count": 63567,
"self": 0.0,
"children": {
"worker_root": {
"total": 2151.9283808810546,
"count": 63567,
"is_parallel": true,
"self": 886.807323686108,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018003120001139905,
"count": 1,
"is_parallel": true,
"self": 0.0005816060004235624,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012187059996904281,
"count": 8,
"is_parallel": true,
"self": 0.0012187059996904281
}
}
},
"UnityEnvironment.step": {
"total": 0.06916476099991087,
"count": 1,
"is_parallel": true,
"self": 0.003667680999797085,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004441769999630196,
"count": 1,
"is_parallel": true,
"self": 0.0004441769999630196
},
"communicator.exchange": {
"total": 0.0632852470000671,
"count": 1,
"is_parallel": true,
"self": 0.0632852470000671
},
"steps_from_proto": {
"total": 0.0017676560000836616,
"count": 1,
"is_parallel": true,
"self": 0.0004185489997325931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013491070003510686,
"count": 8,
"is_parallel": true,
"self": 0.0013491070003510686
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1265.1210571949466,
"count": 63566,
"is_parallel": true,
"self": 34.13266680804372,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.371631291993026,
"count": 63566,
"is_parallel": true,
"self": 23.371631291993026
},
"communicator.exchange": {
"total": 1111.6869079539651,
"count": 63566,
"is_parallel": true,
"self": 1111.6869079539651
},
"steps_from_proto": {
"total": 95.92985114094472,
"count": 63566,
"is_parallel": true,
"self": 18.96251521185468,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.96733592909004,
"count": 508528,
"is_parallel": true,
"self": 76.96733592909004
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 642.0351564859882,
"count": 63567,
"self": 2.400863965998724,
"children": {
"process_trajectory": {
"total": 122.89141309398656,
"count": 63567,
"self": 122.69936202798726,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19205106599929422,
"count": 2,
"self": 0.19205106599929422
}
}
},
"_update_policy": {
"total": 516.7428794260029,
"count": 443,
"self": 312.9288214150133,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.81405801098958,
"count": 22824,
"self": 203.81405801098958
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3280000530357938e-06,
"count": 1,
"self": 1.3280000530357938e-06
},
"TrainerController._save_models": {
"total": 0.10167860100000325,
"count": 1,
"self": 0.0018223049996777263,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09985629600032553,
"count": 1,
"self": 0.09985629600032553
}
}
}
}
}
}
}