dogpizza's picture
First attempt with default values
095494b verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.38729771971702576,
"min": 0.3853414058685303,
"max": 1.3900351524353027,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11470.208984375,
"min": 11399.9404296875,
"max": 42168.10546875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989963.0,
"min": 29952.0,
"max": 989963.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.5025095343589783,
"min": -0.10628219693899155,
"max": 0.5982186794281006,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 136.18008422851562,
"min": -25.614009857177734,
"max": 166.30479431152344,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.008587466552853584,
"min": -0.00292215496301651,
"max": 0.44154250621795654,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.3272035121917725,
"min": -0.7919039726257324,
"max": 104.64557647705078,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07042836474994103,
"min": 0.06463789745092592,
"max": 0.07465106075978838,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9859971064991745,
"min": 0.4973175405287492,
"max": 1.1197659113968257,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01439024162530023,
"min": 0.00019515904372693473,
"max": 0.016481910407438755,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20146338275420322,
"min": 0.002341908524723217,
"max": 0.2457869820160947,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.71258314345714e-06,
"min": 7.71258314345714e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010797616400839996,
"min": 0.00010797616400839996,
"max": 0.0032249629250124,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10257082857142859,
"min": 0.10257082857142859,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4359916000000001,
"min": 1.3886848,
"max": 2.3176271999999996,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026682577428571426,
"min": 0.00026682577428571426,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0037355608399999994,
"min": 0.0037355608399999994,
"max": 0.10751126124000003,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.00984563585370779,
"min": 0.00984563585370779,
"max": 0.4996545910835266,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.13783890008926392,
"min": 0.13783890008926392,
"max": 3.497582197189331,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 395.46153846153845,
"min": 323.31111111111113,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30846.0,
"min": 15984.0,
"max": 32297.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4295417528740968,
"min": -1.0000000521540642,
"max": 1.6322333147128423,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 112.93379847705364,
"min": -31.998401671648026,
"max": 146.9009983241558,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4295417528740968,
"min": -1.0000000521540642,
"max": 1.6322333147128423,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 112.93379847705364,
"min": -31.998401671648026,
"max": 146.9009983241558,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.039737957967754714,
"min": 0.0360181934740265,
"max": 9.992280322127044,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.1392986794526223,
"min": 3.1346675031236373,
"max": 159.8764851540327,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739062090",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id={PYARAMID_RUN_ID} --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739065477"
},
"total": 3386.905189491,
"count": 1,
"self": 0.6986803169997984,
"children": {
"run_training.setup": {
"total": 0.028693217000068216,
"count": 1,
"self": 0.028693217000068216
},
"TrainerController.start_learning": {
"total": 3386.177815957,
"count": 1,
"self": 2.3784632969682207,
"children": {
"TrainerController._reset_env": {
"total": 3.4407767619999277,
"count": 1,
"self": 3.4407767619999277
},
"TrainerController.advance": {
"total": 3380.271053250032,
"count": 63713,
"self": 2.553238244053773,
"children": {
"env_step": {
"total": 2248.3026288589876,
"count": 63713,
"self": 2076.2170634709446,
"children": {
"SubprocessEnvManager._take_step": {
"total": 170.6862976220459,
"count": 63713,
"self": 7.384994348035889,
"children": {
"TorchPolicy.evaluate": {
"total": 163.30130327401002,
"count": 62550,
"self": 163.30130327401002
}
}
},
"workers": {
"total": 1.3992677659969104,
"count": 63713,
"self": 0.0,
"children": {
"worker_root": {
"total": 3378.145833700032,
"count": 63713,
"is_parallel": true,
"self": 1485.3943185409953,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005820688999961021,
"count": 1,
"is_parallel": true,
"self": 0.004089155000087885,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001731533999873136,
"count": 8,
"is_parallel": true,
"self": 0.001731533999873136
}
}
},
"UnityEnvironment.step": {
"total": 0.0637089049999986,
"count": 1,
"is_parallel": true,
"self": 0.0007021860000122615,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005613700000139943,
"count": 1,
"is_parallel": true,
"self": 0.0005613700000139943
},
"communicator.exchange": {
"total": 0.0603707120000081,
"count": 1,
"is_parallel": true,
"self": 0.0603707120000081
},
"steps_from_proto": {
"total": 0.002074636999964241,
"count": 1,
"is_parallel": true,
"self": 0.0006135500000254979,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014610869999387432,
"count": 8,
"is_parallel": true,
"self": 0.0014610869999387432
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1892.751515159037,
"count": 63712,
"is_parallel": true,
"self": 47.48118881803293,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 30.854301166966593,
"count": 63712,
"is_parallel": true,
"self": 30.854301166966593
},
"communicator.exchange": {
"total": 1686.2732888519913,
"count": 63712,
"is_parallel": true,
"self": 1686.2732888519913
},
"steps_from_proto": {
"total": 128.14273632204583,
"count": 63712,
"is_parallel": true,
"self": 27.797276882953838,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.345459439092,
"count": 509696,
"is_parallel": true,
"self": 100.345459439092
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1129.4151861469902,
"count": 63713,
"self": 4.574359873966614,
"children": {
"process_trajectory": {
"total": 167.2293419900226,
"count": 63713,
"self": 166.81339157402238,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4159504160002143,
"count": 2,
"self": 0.4159504160002143
}
}
},
"_update_policy": {
"total": 957.611484283001,
"count": 440,
"self": 373.78398477501935,
"children": {
"TorchPPOOptimizer.update": {
"total": 583.8274995079817,
"count": 22845,
"self": 583.8274995079817
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.319996934209485e-07,
"count": 1,
"self": 9.319996934209485e-07
},
"TrainerController._save_models": {
"total": 0.08752171600008296,
"count": 1,
"self": 0.0022351530001287756,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08528656299995419,
"count": 1,
"self": 0.08528656299995419
}
}
}
}
}
}
}