mvyboh's picture
First Push
901ef20 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3775785565376282,
"min": 0.3775785565376282,
"max": 1.5146287679672241,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11351.521484375,
"min": 11351.521484375,
"max": 45947.77734375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989955.0,
"min": 29952.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989955.0,
"min": 29952.0,
"max": 989955.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6721531748771667,
"min": -0.1570003777742386,
"max": 0.7361451983451843,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 199.62948608398438,
"min": -37.20909118652344,
"max": 218.6351318359375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.015921317040920258,
"min": 0.008380566723644733,
"max": 0.4915061891078949,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.728631019592285,
"min": 2.2878947257995605,
"max": 116.48696899414062,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0687326513157348,
"min": 0.06595088119159781,
"max": 0.07412121547422848,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9622571184202873,
"min": 0.4968420624242555,
"max": 1.0616614596253973,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.019259122636174738,
"min": 0.0003473202248639351,
"max": 0.019259122636174738,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.2696277169064463,
"min": 0.002431241574047546,
"max": 0.2748525777960643,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.476718936364288e-06,
"min": 7.476718936364288e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010467406510910003,
"min": 0.00010467406510910003,
"max": 0.0029060685313106,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10249220714285716,
"min": 0.10249220714285716,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4348909000000003,
"min": 1.327104,
"max": 2.3589431000000003,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025897149357142867,
"min": 0.00025897149357142867,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036256009100000017,
"min": 0.0036256009100000017,
"max": 0.09690207106,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.014376168139278889,
"min": 0.014048464596271515,
"max": 0.5358132123947144,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.201266348361969,
"min": 0.1966785043478012,
"max": 3.75069260597229,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 263.5206611570248,
"min": 258.0431034482759,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31886.0,
"min": 15984.0,
"max": 32156.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7029449886331955,
"min": -1.0000000521540642,
"max": 1.7074620570859005,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 204.35339863598347,
"min": -32.000001668930054,
"max": 204.35339863598347,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7029449886331955,
"min": -1.0000000521540642,
"max": 1.7074620570859005,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 204.35339863598347,
"min": -32.000001668930054,
"max": 204.35339863598347,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03962243022203135,
"min": 0.03962243022203135,
"max": 10.700059106573462,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.754691626643762,
"min": 4.155050105415285,
"max": 171.2009457051754,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1748526446",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.7.0+cu126",
"numpy_version": "1.23.5",
"end_time_seconds": "1748528797"
},
"total": 2350.835716681,
"count": 1,
"self": 0.47593455700007326,
"children": {
"run_training.setup": {
"total": 0.01975483199998962,
"count": 1,
"self": 0.01975483199998962
},
"TrainerController.start_learning": {
"total": 2350.340027292,
"count": 1,
"self": 1.3189322769712817,
"children": {
"TrainerController._reset_env": {
"total": 2.210383768000156,
"count": 1,
"self": 2.210383768000156
},
"TrainerController.advance": {
"total": 2346.695886305029,
"count": 64056,
"self": 1.3804383810329455,
"children": {
"env_step": {
"total": 1629.3943670890292,
"count": 64056,
"self": 1454.7156881320452,
"children": {
"SubprocessEnvManager._take_step": {
"total": 173.91967040697114,
"count": 64056,
"self": 4.6095383479805605,
"children": {
"TorchPolicy.evaluate": {
"total": 169.31013205899058,
"count": 62551,
"self": 169.31013205899058
}
}
},
"workers": {
"total": 0.7590085500128225,
"count": 64056,
"self": 0.0,
"children": {
"worker_root": {
"total": 2345.406777398982,
"count": 64056,
"is_parallel": true,
"self": 1001.5302680030304,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001868879000085144,
"count": 1,
"is_parallel": true,
"self": 0.0006391449999227916,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012297340001623525,
"count": 8,
"is_parallel": true,
"self": 0.0012297340001623525
}
}
},
"UnityEnvironment.step": {
"total": 0.04880538000020351,
"count": 1,
"is_parallel": true,
"self": 0.0005489040001975809,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005844830000114598,
"count": 1,
"is_parallel": true,
"self": 0.0005844830000114598
},
"communicator.exchange": {
"total": 0.04515022999999019,
"count": 1,
"is_parallel": true,
"self": 0.04515022999999019
},
"steps_from_proto": {
"total": 0.0025217630000042845,
"count": 1,
"is_parallel": true,
"self": 0.00036936700007572654,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002152395999928558,
"count": 8,
"is_parallel": true,
"self": 0.002152395999928558
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1343.8765093959514,
"count": 64055,
"is_parallel": true,
"self": 31.58826856795349,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.676375130039105,
"count": 64055,
"is_parallel": true,
"self": 22.676375130039105
},
"communicator.exchange": {
"total": 1193.9500731339765,
"count": 64055,
"is_parallel": true,
"self": 1193.9500731339765
},
"steps_from_proto": {
"total": 95.66179256398232,
"count": 64055,
"is_parallel": true,
"self": 19.45874564981432,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.203046914168,
"count": 512440,
"is_parallel": true,
"self": 76.203046914168
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 715.9210808349669,
"count": 64056,
"self": 2.47736825300467,
"children": {
"process_trajectory": {
"total": 163.39659018295788,
"count": 64056,
"self": 163.14358308795772,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2530070950001573,
"count": 2,
"self": 0.2530070950001573
}
}
},
"_update_policy": {
"total": 550.0471223990044,
"count": 426,
"self": 295.91472856799214,
"children": {
"TorchPPOOptimizer.update": {
"total": 254.13239383101222,
"count": 22881,
"self": 254.13239383101222
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.529994713375345e-07,
"count": 1,
"self": 9.529994713375345e-07
},
"TrainerController._save_models": {
"total": 0.11482398899988766,
"count": 1,
"self": 0.0014732519994140603,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1133507370004736,
"count": 1,
"self": 0.1133507370004736
}
}
}
}
}
}
}