Pyramids / run_logs /timers.json
sam-rei's picture
Yote
d08ef9d verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.44011664390563965,
"min": 0.44011664390563965,
"max": 1.4029858112335205,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13126.0390625,
"min": 13126.0390625,
"max": 42560.9765625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989933.0,
"min": 29952.0,
"max": 989933.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5777921676635742,
"min": -0.09090372920036316,
"max": 0.5817480087280273,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 161.20401000976562,
"min": -21.907798767089844,
"max": 164.0529327392578,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008946703746914864,
"min": -0.006395567674189806,
"max": 0.5310140252113342,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.4961302280426025,
"min": -1.733198881149292,
"max": 125.8503189086914,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07004542917794813,
"min": 0.06468445404676632,
"max": 0.07393118554765314,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.980636008491274,
"min": 0.5009785290682373,
"max": 1.0530875019721149,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.016674809737499647,
"min": 0.0007331115997585904,
"max": 0.017010113332833584,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.23344733632499504,
"min": 0.00885103431025326,
"max": 0.2381415866596702,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.508368925814283e-06,
"min": 7.508368925814283e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010511716496139996,
"min": 0.00010511716496139996,
"max": 0.0035077886307372,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10250275714285716,
"min": 0.10250275714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4350386000000002,
"min": 1.3886848,
"max": 2.5692627999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0002600254385714285,
"min": 0.0002600254385714285,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036403561399999994,
"min": 0.0036403561399999994,
"max": 0.11694935371999998,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.009841570630669594,
"min": 0.009841570630669594,
"max": 0.5599178671836853,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1377819925546646,
"min": 0.1377819925546646,
"max": 3.9194250106811523,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 329.7931034482759,
"min": 306.72340425531917,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 28692.0,
"min": 15984.0,
"max": 32327.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6228045285256072,
"min": -1.0000000521540642,
"max": 1.691548374872054,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 142.80679851025343,
"min": -29.81740166991949,
"max": 157.313998863101,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6228045285256072,
"min": -1.0000000521540642,
"max": 1.691548374872054,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 142.80679851025343,
"min": -29.81740166991949,
"max": 157.313998863101,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.033978092623080804,
"min": 0.03256384932914228,
"max": 11.95184032060206,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.9900721508311108,
"min": 2.9585949972533854,
"max": 191.22944512963295,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748551908",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748554121"
},
"total": 2213.004740252,
"count": 1,
"self": 1.2636512169997332,
"children": {
"run_training.setup": {
"total": 0.019928865000110818,
"count": 1,
"self": 0.019928865000110818
},
"TrainerController.start_learning": {
"total": 2211.72116017,
"count": 1,
"self": 1.309291178028161,
"children": {
"TrainerController._reset_env": {
"total": 2.295816489000117,
"count": 1,
"self": 2.295816489000117
},
"TrainerController.advance": {
"total": 2207.980407852972,
"count": 63914,
"self": 1.377729167965299,
"children": {
"env_step": {
"total": 1534.303165643989,
"count": 63914,
"self": 1387.1106705290988,
"children": {
"SubprocessEnvManager._take_step": {
"total": 146.45163331298045,
"count": 63914,
"self": 4.562752177008406,
"children": {
"TorchPolicy.evaluate": {
"total": 141.88888113597204,
"count": 62575,
"self": 141.88888113597204
}
}
},
"workers": {
"total": 0.7408618019096593,
"count": 63914,
"self": 0.0,
"children": {
"worker_root": {
"total": 2206.84294106294,
"count": 63914,
"is_parallel": true,
"self": 927.3783936069462,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018533410000145523,
"count": 1,
"is_parallel": true,
"self": 0.0006273300000430027,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012260109999715496,
"count": 8,
"is_parallel": true,
"self": 0.0012260109999715496
}
}
},
"UnityEnvironment.step": {
"total": 0.049522432999992816,
"count": 1,
"is_parallel": true,
"self": 0.0005058610001924535,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004622219998964283,
"count": 1,
"is_parallel": true,
"self": 0.0004622219998964283
},
"communicator.exchange": {
"total": 0.046969683999805056,
"count": 1,
"is_parallel": true,
"self": 0.046969683999805056
},
"steps_from_proto": {
"total": 0.0015846660000988777,
"count": 1,
"is_parallel": true,
"self": 0.0003319250001823093,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012527409999165684,
"count": 8,
"is_parallel": true,
"self": 0.0012527409999165684
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1279.4645474559939,
"count": 63913,
"is_parallel": true,
"self": 31.87360894899416,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.079376943929446,
"count": 63913,
"is_parallel": true,
"self": 23.079376943929446
},
"communicator.exchange": {
"total": 1130.8581935390255,
"count": 63913,
"is_parallel": true,
"self": 1130.8581935390255
},
"steps_from_proto": {
"total": 93.65336802404477,
"count": 63913,
"is_parallel": true,
"self": 18.664885612872013,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.98848241117275,
"count": 511304,
"is_parallel": true,
"self": 74.98848241117275
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 672.2995130410177,
"count": 63914,
"self": 2.4277108120402318,
"children": {
"process_trajectory": {
"total": 127.85003238897275,
"count": 63914,
"self": 127.59933200497312,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2507003839996287,
"count": 2,
"self": 0.2507003839996287
}
}
},
"_update_policy": {
"total": 542.0217698400047,
"count": 452,
"self": 299.9707779410321,
"children": {
"TorchPPOOptimizer.update": {
"total": 242.0509918989726,
"count": 22791,
"self": 242.0509918989726
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.5699997675255872e-06,
"count": 1,
"self": 1.5699997675255872e-06
},
"TrainerController._save_models": {
"total": 0.13564308000013625,
"count": 1,
"self": 0.0019106629997622804,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13373241700037397,
"count": 1,
"self": 0.13373241700037397
}
}
}
}
}
}
}