MarcLinder's picture
First Push
0947b42 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.079076886177063,
"min": 1.079076886177063,
"max": 1.8403242826461792,
"count": 16
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 10314.8955078125,
"min": 10314.8955078125,
"max": 16503.14453125,
"count": 16
},
"SnowballTarget.Step.mean": {
"value": 199960.0,
"min": 49976.0,
"max": 199960.0,
"count": 16
},
"SnowballTarget.Step.sum": {
"value": 199960.0,
"min": 49976.0,
"max": 199960.0,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.485359191894531,
"min": 5.538041591644287,
"max": 11.485359191894531,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2239.64501953125,
"min": 875.0105590820312,
"max": 2325.2490234375,
"count": 16
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 16
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 6567.0,
"max": 10945.0,
"count": 16
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.07394885309737648,
"min": 0.06376243962067212,
"max": 0.0745559073780789,
"count": 16
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2957954123895059,
"min": 0.22012230727009674,
"max": 0.36464871082417255,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.23174436163047657,
"min": 0.19375390019815636,
"max": 0.2945854187459119,
"count": 16
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.9269774465219063,
"min": 0.7750156007926254,
"max": 1.39026350352694,
"count": 16
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 1.3690097262000011e-05,
"min": 1.3690097262000011e-05,
"max": 0.000384940023012,
"count": 16
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 5.4760389048000044e-05,
"min": 5.4760389048000044e-05,
"max": 0.0018147001370599997,
"count": 16
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10273800000000001,
"min": 0.10273800000000001,
"max": 0.176988,
"count": 16
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41095200000000004,
"min": 0.41095200000000004,
"max": 0.8629400000000002,
"count": 16
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001466262000000001,
"min": 0.0001466262000000001,
"max": 0.0038517012000000004,
"count": 16
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005865048000000004,
"min": 0.0005865048000000004,
"max": 0.018160706000000002,
"count": 16
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.318181818181817,
"min": 12.242424242424242,
"max": 22.818181818181817,
"count": 16
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 982.0,
"min": 404.0,
"max": 1255.0,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.318181818181817,
"min": 12.242424242424242,
"max": 22.818181818181817,
"count": 16
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 982.0,
"min": 404.0,
"max": 1255.0,
"count": 16
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 16
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1705324491",
"python_version": "3.10.12 (main, Nov 20 2023, 15:14:05) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --resume",
"mlagents_version": "1.1.0.dev0",
"mlagents_envs_version": "1.1.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.1.2+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1705325032"
},
"total": 541.0458172010003,
"count": 1,
"self": 0.5848272070002167,
"children": {
"run_training.setup": {
"total": 0.08440067599985923,
"count": 1,
"self": 0.08440067599985923
},
"TrainerController.start_learning": {
"total": 540.3765893180002,
"count": 1,
"self": 0.8068774269977439,
"children": {
"TrainerController._reset_env": {
"total": 2.6759859320000032,
"count": 1,
"self": 2.6759859320000032
},
"TrainerController.advance": {
"total": 536.7887518100024,
"count": 14401,
"self": 0.4164147919973402,
"children": {
"env_step": {
"total": 536.372337018005,
"count": 14401,
"self": 425.9195179250073,
"children": {
"SubprocessEnvManager._take_step": {
"total": 110.07432684703781,
"count": 14401,
"self": 2.1850499850625056,
"children": {
"TorchPolicy.evaluate": {
"total": 107.8892768619753,
"count": 14401,
"self": 107.8892768619753
}
}
},
"workers": {
"total": 0.3784922459599329,
"count": 14401,
"self": 0.0,
"children": {
"worker_root": {
"total": 538.4271402390737,
"count": 14401,
"is_parallel": true,
"self": 263.97798676404864,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002854946999832464,
"count": 1,
"is_parallel": true,
"self": 0.0008778519991210487,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001977095000711415,
"count": 10,
"is_parallel": true,
"self": 0.001977095000711415
}
}
},
"UnityEnvironment.step": {
"total": 0.047459396999784076,
"count": 1,
"is_parallel": true,
"self": 0.0007988570000634354,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00048310300007869955,
"count": 1,
"is_parallel": true,
"self": 0.00048310300007869955
},
"communicator.exchange": {
"total": 0.043685710999852745,
"count": 1,
"is_parallel": true,
"self": 0.043685710999852745
},
"steps_from_proto": {
"total": 0.002491725999789196,
"count": 1,
"is_parallel": true,
"self": 0.0005077320001873886,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0019839939996018074,
"count": 10,
"is_parallel": true,
"self": 0.0019839939996018074
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 274.449153475025,
"count": 14400,
"is_parallel": true,
"self": 12.664060205113856,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 6.569676703962159,
"count": 14400,
"is_parallel": true,
"self": 6.569676703962159
},
"communicator.exchange": {
"total": 217.42812241693628,
"count": 14400,
"is_parallel": true,
"self": 217.42812241693628
},
"steps_from_proto": {
"total": 37.78729414901272,
"count": 14400,
"is_parallel": true,
"self": 7.661794797009861,
"children": {
"_process_rank_one_or_two_observation": {
"total": 30.125499352002862,
"count": 144000,
"is_parallel": true,
"self": 30.125499352002862
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00022824500001661363,
"count": 1,
"self": 0.00022824500001661363,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 530.6407981292173,
"count": 717106,
"is_parallel": true,
"self": 15.713632937146485,
"children": {
"process_trajectory": {
"total": 273.5499356680716,
"count": 717106,
"is_parallel": true,
"self": 272.69790154907196,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8520341189996543,
"count": 4,
"is_parallel": true,
"self": 0.8520341189996543
}
}
},
"_update_policy": {
"total": 241.37722952399918,
"count": 71,
"is_parallel": true,
"self": 68.29432320798605,
"children": {
"TorchPPOOptimizer.update": {
"total": 173.08290631601312,
"count": 4824,
"is_parallel": true,
"self": 173.08290631601312
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1047459040000831,
"count": 1,
"self": 0.0018855130001611542,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10286039099992195,
"count": 1,
"self": 0.10286039099992195
}
}
}
}
}
}
}