ericalt's picture
First Push
590b8c5
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9193209409713745,
"min": 0.9193209409713745,
"max": 2.8601338863372803,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8808.013671875,
"min": 8808.013671875,
"max": 29322.09375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.930448532104492,
"min": 0.22459860146045685,
"max": 12.930448532104492,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2521.4375,
"min": 43.57212829589844,
"max": 2634.97998046875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06553240325507326,
"min": 0.0625059275756654,
"max": 0.0755836467352114,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.26212961302029303,
"min": 0.2570274986014114,
"max": 0.36655959638957547,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2123196436932274,
"min": 0.10607056042485341,
"max": 0.3009559067294878,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8492785747729096,
"min": 0.42428224169941364,
"max": 1.4137468755829568,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.34090909090909,
"min": 3.0681818181818183,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1115.0,
"min": 135.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.34090909090909,
"min": 3.0681818181818183,
"max": 25.563636363636363,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1115.0,
"min": 135.0,
"max": 1406.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1685236643",
"python_version": "3.10.11 (main, Apr 5 2023, 14:15:10) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1685237090"
},
"total": 446.565222761,
"count": 1,
"self": 0.4924770999999737,
"children": {
"run_training.setup": {
"total": 0.04067739499998879,
"count": 1,
"self": 0.04067739499998879
},
"TrainerController.start_learning": {
"total": 446.032068266,
"count": 1,
"self": 0.501746198001797,
"children": {
"TrainerController._reset_env": {
"total": 5.0707979490000525,
"count": 1,
"self": 5.0707979490000525
},
"TrainerController.advance": {
"total": 440.2443522819981,
"count": 18209,
"self": 0.2430332640110464,
"children": {
"env_step": {
"total": 440.00131901798704,
"count": 18209,
"self": 319.69615874396186,
"children": {
"SubprocessEnvManager._take_step": {
"total": 120.05974389300707,
"count": 18209,
"self": 1.6906239799897094,
"children": {
"TorchPolicy.evaluate": {
"total": 118.36911991301736,
"count": 18209,
"self": 118.36911991301736
}
}
},
"workers": {
"total": 0.24541638101811714,
"count": 18209,
"self": 0.0,
"children": {
"worker_root": {
"total": 444.4375564279884,
"count": 18209,
"is_parallel": true,
"self": 210.28989729300383,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.00642241399998511,
"count": 1,
"is_parallel": true,
"self": 0.004432661000464577,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001989752999520533,
"count": 10,
"is_parallel": true,
"self": 0.001989752999520533
}
}
},
"UnityEnvironment.step": {
"total": 0.033211131999905774,
"count": 1,
"is_parallel": true,
"self": 0.0005359399998496883,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002468390000558429,
"count": 1,
"is_parallel": true,
"self": 0.0002468390000558429
},
"communicator.exchange": {
"total": 0.030493062999994436,
"count": 1,
"is_parallel": true,
"self": 0.030493062999994436
},
"steps_from_proto": {
"total": 0.001935290000005807,
"count": 1,
"is_parallel": true,
"self": 0.00036360099977628124,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015716890002295258,
"count": 10,
"is_parallel": true,
"self": 0.0015716890002295258
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 234.14765913498456,
"count": 18208,
"is_parallel": true,
"self": 9.329205166987549,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.02749712099569,
"count": 18208,
"is_parallel": true,
"self": 5.02749712099569
},
"communicator.exchange": {
"total": 188.20071838499177,
"count": 18208,
"is_parallel": true,
"self": 188.20071838499177
},
"steps_from_proto": {
"total": 31.59023846200955,
"count": 18208,
"is_parallel": true,
"self": 5.945309370931454,
"children": {
"_process_rank_one_or_two_observation": {
"total": 25.644929091078097,
"count": 182080,
"is_parallel": true,
"self": 25.644929091078097
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00012192400004096271,
"count": 1,
"self": 0.00012192400004096271,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 436.647018992942,
"count": 434431,
"is_parallel": true,
"self": 9.40867747597133,
"children": {
"process_trajectory": {
"total": 238.5617264489706,
"count": 434431,
"is_parallel": true,
"self": 237.21917949297062,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3425469559999783,
"count": 4,
"is_parallel": true,
"self": 1.3425469559999783
}
}
},
"_update_policy": {
"total": 188.67661506800005,
"count": 90,
"is_parallel": true,
"self": 71.67159794699535,
"children": {
"TorchPPOOptimizer.update": {
"total": 117.0050171210047,
"count": 4587,
"is_parallel": true,
"self": 117.0050171210047
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.21504991300002985,
"count": 1,
"self": 0.001092966999976852,
"children": {
"RLTrainer._checkpoint": {
"total": 0.213956946000053,
"count": 1,
"self": 0.213956946000053
}
}
}
}
}
}
}