Nablaaa's picture
test other yaml configs
df896b7 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8138673901557922,
"min": 0.8005954027175903,
"max": 0.8235114812850952,
"count": 19
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8361.673828125,
"min": 6552.91015625,
"max": 8427.986328125,
"count": 19
},
"SnowballTarget.Step.mean": {
"value": 999992.0,
"min": 819976.0,
"max": 999992.0,
"count": 19
},
"SnowballTarget.Step.sum": {
"value": 999992.0,
"min": 819976.0,
"max": 999992.0,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.193025588989258,
"min": 14.180804252624512,
"max": 14.40643310546875,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2909.5703125,
"min": 2146.55859375,
"max": 2930.494873046875,
"count": 19
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 19
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 6567.0,
"max": 10945.0,
"count": 19
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 27.87272727272727,
"min": 26.745454545454546,
"max": 27.87272727272727,
"count": 19
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1533.0,
"min": 894.0,
"max": 1533.0,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.485926123098892,
"min": 27.33385359807448,
"max": 28.485926123098892,
"count": 19
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1566.7259367704391,
"min": 913.6679664850235,
"max": 1566.7259367704391,
"count": 19
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 19
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 19
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.04594731732751384,
"min": 0.044273665204544906,
"max": 0.053823788311671124,
"count": 18
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.04594731732751384,
"min": 0.044273665204544906,
"max": 0.09959654017517702,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.24564173311898202,
"min": 0.23254177736885406,
"max": 0.3044277711002797,
"count": 18
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.24564173311898202,
"min": 0.23254177736885406,
"max": 0.6088555422005594,
"count": 18
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 2.2209925999999891e-07,
"min": 2.2209925999999891e-07,
"max": 5.254402485599999e-06,
"count": 18
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 2.2209925999999891e-07,
"min": 2.2209925999999891e-07,
"max": 1.0508804971199998e-05,
"count": 18
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10036999999999997,
"min": 0.10036999999999997,
"max": 0.10875720000000003,
"count": 18
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.10036999999999997,
"min": 0.10036999999999997,
"max": 0.21751440000000005,
"count": 18
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00015792599999999923,
"min": 0.00015792599999999923,
"max": 0.0035111285599999987,
"count": 18
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.00015792599999999923,
"min": 0.00015792599999999923,
"max": 0.007022257119999997,
"count": 18
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1731662610",
"python_version": "3.10.12 (main, Sep 11 2024, 15:47:36) [GCC 11.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget3.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget3 --no-graphics --resume",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.5.0+cu121",
"numpy_version": "1.23.5",
"end_time_seconds": "1731663067"
},
"total": 456.3888227039997,
"count": 1,
"self": 0.7171841759991366,
"children": {
"run_training.setup": {
"total": 0.0854702910000924,
"count": 1,
"self": 0.0854702910000924
},
"TrainerController.start_learning": {
"total": 455.5861682370005,
"count": 1,
"self": 0.4975255839553938,
"children": {
"TrainerController._reset_env": {
"total": 2.162518302000535,
"count": 1,
"self": 2.162518302000535
},
"TrainerController.advance": {
"total": 452.8023012030444,
"count": 17141,
"self": 0.24580430904461537,
"children": {
"env_step": {
"total": 452.5564968939998,
"count": 17141,
"self": 307.00740788777057,
"children": {
"SubprocessEnvManager._take_step": {
"total": 145.2975660201364,
"count": 17141,
"self": 1.3256831071548731,
"children": {
"TorchPolicy.evaluate": {
"total": 143.97188291298153,
"count": 17141,
"self": 143.97188291298153
}
}
},
"workers": {
"total": 0.251522986092823,
"count": 17141,
"self": 0.0,
"children": {
"worker_root": {
"total": 454.5149997389226,
"count": 17141,
"is_parallel": true,
"self": 258.6602134329305,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0021595989992420073,
"count": 1,
"is_parallel": true,
"self": 0.0006339549991025706,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015256440001394367,
"count": 10,
"is_parallel": true,
"self": 0.0015256440001394367
}
}
},
"UnityEnvironment.step": {
"total": 0.03778493000027083,
"count": 1,
"is_parallel": true,
"self": 0.0006341050011542393,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003842309997708071,
"count": 1,
"is_parallel": true,
"self": 0.0003842309997708071
},
"communicator.exchange": {
"total": 0.03484560299966688,
"count": 1,
"is_parallel": true,
"self": 0.03484560299966688
},
"steps_from_proto": {
"total": 0.0019209909996789065,
"count": 1,
"is_parallel": true,
"self": 0.0003637410009105224,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001557249998768384,
"count": 10,
"is_parallel": true,
"self": 0.001557249998768384
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 195.8547863059921,
"count": 17140,
"is_parallel": true,
"self": 9.158067409049181,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 4.742068792984355,
"count": 17140,
"is_parallel": true,
"self": 4.742068792984355
},
"communicator.exchange": {
"total": 152.02819800496036,
"count": 17140,
"is_parallel": true,
"self": 152.02819800496036
},
"steps_from_proto": {
"total": 29.926452098998197,
"count": 17140,
"is_parallel": true,
"self": 5.547426242139409,
"children": {
"_process_rank_one_or_two_observation": {
"total": 24.37902585685879,
"count": 171400,
"is_parallel": true,
"self": 24.37902585685879
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.567599979869556e-05,
"count": 1,
"self": 8.567599979869556e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 445.006484520859,
"count": 905101,
"is_parallel": true,
"self": 18.988185667437392,
"children": {
"process_trajectory": {
"total": 295.59006721842434,
"count": 905101,
"is_parallel": true,
"self": 294.65669273542426,
"children": {
"RLTrainer._checkpoint": {
"total": 0.9333744830000796,
"count": 4,
"is_parallel": true,
"self": 0.9333744830000796
}
}
},
"_update_policy": {
"total": 130.42823163499725,
"count": 21,
"is_parallel": true,
"self": 68.5583731740171,
"children": {
"TorchPPOOptimizer.update": {
"total": 61.86985846098014,
"count": 3480,
"is_parallel": true,
"self": 61.86985846098014
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12373747200035723,
"count": 1,
"self": 0.0019653560002552695,
"children": {
"RLTrainer._checkpoint": {
"total": 0.12177211600010196,
"count": 1,
"self": 0.12177211600010196
}
}
}
}
}
}
}