Vishath's picture
Upload folder using huggingface_hub
0939d97 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3710872530937195,
"min": 0.35479119420051575,
"max": 1.5143848657608032,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 11174.1796875,
"min": 10626.7060546875,
"max": 45940.37890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989995.0,
"min": 29952.0,
"max": 989995.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989995.0,
"min": 29952.0,
"max": 989995.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.557255744934082,
"min": -0.14526034891605377,
"max": 0.557255744934082,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 157.1461181640625,
"min": -34.42670440673828,
"max": 157.1461181640625,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.03877447172999382,
"min": -0.006362123414874077,
"max": 0.36770954728126526,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 10.934401512145996,
"min": -1.7177733182907104,
"max": 87.14716339111328,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0672745228661808,
"min": 0.06501670798158611,
"max": 0.07346477177524312,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9418433201265312,
"min": 0.4956838974572622,
"max": 1.0732063263179346,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.01629458488845945,
"min": 0.0006526181452389054,
"max": 0.01629458488845945,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.22812418843843227,
"min": 0.00848403588810577,
"max": 0.22812418843843227,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.612726033885715e-06,
"min": 7.612726033885715e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010657816447440001,
"min": 0.00010657816447440001,
"max": 0.0035084789305070995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10253754285714287,
"min": 0.10253754285714287,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355256,
"min": 1.3886848,
"max": 2.5694928999999997,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026350053142857146,
"min": 0.00026350053142857146,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.00368900744,
"min": 0.00368900744,
"max": 0.11697234071000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013825265690684319,
"min": 0.013825265690684319,
"max": 0.38135233521461487,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.19355371594429016,
"min": 0.19355371594429016,
"max": 2.669466257095337,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 334.0113636363636,
"min": 334.0113636363636,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29393.0,
"min": 15984.0,
"max": 33057.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6432522562417118,
"min": -1.0000000521540642,
"max": 1.6432522562417118,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 144.60619854927063,
"min": -30.46620174497366,
"max": 144.60619854927063,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6432522562417118,
"min": -1.0000000521540642,
"max": 1.6432522562417118,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 144.60619854927063,
"min": -30.46620174497366,
"max": 144.60619854927063,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.047644139063446674,
"min": 0.047644139063446674,
"max": 7.428159176371992,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.1926842375833075,
"min": 4.1926842375833075,
"max": 118.85054682195187,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1703515782",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1703517998"
},
"total": 2215.7548088470003,
"count": 1,
"self": 0.4794476430006398,
"children": {
"run_training.setup": {
"total": 0.07070936500008429,
"count": 1,
"self": 0.07070936500008429
},
"TrainerController.start_learning": {
"total": 2215.204651839,
"count": 1,
"self": 1.359752002966161,
"children": {
"TrainerController._reset_env": {
"total": 2.3331976160000067,
"count": 1,
"self": 2.3331976160000067
},
"TrainerController.advance": {
"total": 2211.4257492580336,
"count": 63766,
"self": 1.4513701620035135,
"children": {
"env_step": {
"total": 1570.8221303140824,
"count": 63766,
"self": 1436.3440708690725,
"children": {
"SubprocessEnvManager._take_step": {
"total": 133.61226920101285,
"count": 63766,
"self": 4.84103026803723,
"children": {
"TorchPolicy.evaluate": {
"total": 128.77123893297562,
"count": 62554,
"self": 128.77123893297562
}
}
},
"workers": {
"total": 0.8657902439970258,
"count": 63766,
"self": 0.0,
"children": {
"worker_root": {
"total": 2209.7449179168934,
"count": 63766,
"is_parallel": true,
"self": 896.1744166428964,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017345950000162702,
"count": 1,
"is_parallel": true,
"self": 0.0005544049997752154,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011801900002410548,
"count": 8,
"is_parallel": true,
"self": 0.0011801900002410548
}
}
},
"UnityEnvironment.step": {
"total": 0.05237587900001017,
"count": 1,
"is_parallel": true,
"self": 0.0006416359999548149,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005207960000461753,
"count": 1,
"is_parallel": true,
"self": 0.0005207960000461753
},
"communicator.exchange": {
"total": 0.04948841400005222,
"count": 1,
"is_parallel": true,
"self": 0.04948841400005222
},
"steps_from_proto": {
"total": 0.0017250329999569658,
"count": 1,
"is_parallel": true,
"self": 0.000370162000081109,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013548709998758568,
"count": 8,
"is_parallel": true,
"self": 0.0013548709998758568
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1313.570501273997,
"count": 63765,
"is_parallel": true,
"self": 34.69091422898123,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 25.305957065010944,
"count": 63765,
"is_parallel": true,
"self": 25.305957065010944
},
"communicator.exchange": {
"total": 1152.4164550949642,
"count": 63765,
"is_parallel": true,
"self": 1152.4164550949642
},
"steps_from_proto": {
"total": 101.1571748850406,
"count": 63765,
"is_parallel": true,
"self": 20.606153722172394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 80.55102116286821,
"count": 510120,
"is_parallel": true,
"self": 80.55102116286821
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 639.152248781948,
"count": 63766,
"self": 2.610875363061041,
"children": {
"process_trajectory": {
"total": 128.0266002138983,
"count": 63766,
"self": 127.8289334128981,
"children": {
"RLTrainer._checkpoint": {
"total": 0.19766680100019585,
"count": 2,
"self": 0.19766680100019585
}
}
},
"_update_policy": {
"total": 508.5147732049886,
"count": 450,
"self": 305.11276038298047,
"children": {
"TorchPPOOptimizer.update": {
"total": 203.40201282200815,
"count": 22803,
"self": 203.40201282200815
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.61000296229031e-07,
"count": 1,
"self": 9.61000296229031e-07
},
"TrainerController._save_models": {
"total": 0.08595200099989597,
"count": 1,
"self": 0.0016194410000025528,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08433255999989342,
"count": 1,
"self": 0.08433255999989342
}
}
}
}
}
}
}