ppo-Pyramids / run_logs /timers.json
lee-910530's picture
First Push
bffd335 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3480006158351898,
"min": 0.3480006158351898,
"max": 1.5122617483139038,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 10445.5869140625,
"min": 10445.5869140625,
"max": 45875.97265625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989937.0,
"min": 29952.0,
"max": 989937.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989937.0,
"min": 29952.0,
"max": 989937.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5045989155769348,
"min": -0.13163955509662628,
"max": 0.5440557599067688,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 135.23251342773438,
"min": -31.19857406616211,
"max": 151.79156494140625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.018216941505670547,
"min": -0.009632612578570843,
"max": 0.2034500688314438,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.882140159606934,
"min": -2.6008052825927734,
"max": 48.82801818847656,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0680510714275038,
"min": 0.06350886450603692,
"max": 0.07337358330204594,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.020766071412557,
"min": 0.484897939534791,
"max": 1.0685566518987604,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013217109531979077,
"min": 0.0010246546285109213,
"max": 0.01527340855093263,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19825664297968615,
"min": 0.012295855542131057,
"max": 0.2138277197130568,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.573877475406666e-06,
"min": 7.573877475406666e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011360816213109998,
"min": 0.00011360816213109998,
"max": 0.0033822767725744996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10252459333333334,
"min": 0.10252459333333334,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5378689,
"min": 1.3886848,
"max": 2.5274254999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.000262206874,
"min": 0.000262206874,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00393310311,
"min": 0.00393310311,
"max": 0.11276980745000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.011645362712442875,
"min": 0.011645362712442875,
"max": 0.38643667101860046,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1746804416179657,
"min": 0.16425588726997375,
"max": 2.705056667327881,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 373.41333333333336,
"min": 358.4712643678161,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28006.0,
"min": 15984.0,
"max": 32330.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4932053163647652,
"min": -1.0000000521540642,
"max": 1.5966588036978946,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 111.99039872735739,
"min": -29.85000167042017,
"max": 136.81219808757305,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4932053163647652,
"min": -1.0000000521540642,
"max": 1.5966588036978946,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 111.99039872735739,
"min": -29.85000167042017,
"max": 136.81219808757305,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04467218621672752,
"min": 0.044351103500832385,
"max": 8.524537837132812,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.350413966254564,
"min": 3.350413966254564,
"max": 136.39260539412498,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1747983685",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1747985909"
},
"total": 2223.98099359,
"count": 1,
"self": 0.8867171479996614,
"children": {
"run_training.setup": {
"total": 0.01979575699988345,
"count": 1,
"self": 0.01979575699988345
},
"TrainerController.start_learning": {
"total": 2223.0744806850003,
"count": 1,
"self": 1.420863693989304,
"children": {
"TrainerController._reset_env": {
"total": 2.7430320220000795,
"count": 1,
"self": 2.7430320220000795
},
"TrainerController.advance": {
"total": 2218.79092631701,
"count": 63779,
"self": 1.473064826040627,
"children": {
"env_step": {
"total": 1538.2724813219306,
"count": 63779,
"self": 1383.1349463218776,
"children": {
"SubprocessEnvManager._take_step": {
"total": 154.34305719901477,
"count": 63779,
"self": 4.849897313121346,
"children": {
"TorchPolicy.evaluate": {
"total": 149.49315988589342,
"count": 62547,
"self": 149.49315988589342
}
}
},
"workers": {
"total": 0.7944778010382834,
"count": 63779,
"self": 0.0,
"children": {
"worker_root": {
"total": 2217.978245469938,
"count": 63779,
"is_parallel": true,
"self": 950.3227692099413,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018480929998077045,
"count": 1,
"is_parallel": true,
"self": 0.000583169999117672,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012649230006900325,
"count": 8,
"is_parallel": true,
"self": 0.0012649230006900325
}
}
},
"UnityEnvironment.step": {
"total": 0.07819981100010409,
"count": 1,
"is_parallel": true,
"self": 0.00051503400072761,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004637869997168309,
"count": 1,
"is_parallel": true,
"self": 0.0004637869997168309
},
"communicator.exchange": {
"total": 0.07561661299996558,
"count": 1,
"is_parallel": true,
"self": 0.07561661299996558
},
"steps_from_proto": {
"total": 0.0016043769996940682,
"count": 1,
"is_parallel": true,
"self": 0.0003500529987832124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012543240009108558,
"count": 8,
"is_parallel": true,
"self": 0.0012543240009108558
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1267.6554762599967,
"count": 63778,
"is_parallel": true,
"self": 31.759362979138132,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.653061181043086,
"count": 63778,
"is_parallel": true,
"self": 23.653061181043086
},
"communicator.exchange": {
"total": 1114.6381054679205,
"count": 63778,
"is_parallel": true,
"self": 1114.6381054679205
},
"steps_from_proto": {
"total": 97.60494663189502,
"count": 63778,
"is_parallel": true,
"self": 19.630318693072695,
"children": {
"_process_rank_one_or_two_observation": {
"total": 77.97462793882232,
"count": 510224,
"is_parallel": true,
"self": 77.97462793882232
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 679.0453801690387,
"count": 63779,
"self": 2.7417782090133187,
"children": {
"process_trajectory": {
"total": 132.5244476210246,
"count": 63779,
"self": 132.2791393360244,
"children": {
"RLTrainer._checkpoint": {
"total": 0.24530828500019197,
"count": 2,
"self": 0.24530828500019197
}
}
},
"_update_policy": {
"total": 543.7791543390008,
"count": 447,
"self": 303.9167075650048,
"children": {
"TorchPPOOptimizer.update": {
"total": 239.862446773996,
"count": 22773,
"self": 239.862446773996
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0870007827179506e-06,
"count": 1,
"self": 1.0870007827179506e-06
},
"TrainerController._save_models": {
"total": 0.11965756500012503,
"count": 1,
"self": 0.0017907800001921714,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11786678499993286,
"count": 1,
"self": 0.11786678499993286
}
}
}
}
}
}
}