saranyabalakumar's picture
SnowballTarget trained model
fb021ea verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8727447986602783,
"min": 0.8727447986602783,
"max": 2.8613224029541016,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8294.56640625,
"min": 8294.56640625,
"max": 29208.37890625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.785255432128906,
"min": 0.3782866299152374,
"max": 12.785255432128906,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2493.124755859375,
"min": 73.38760375976562,
"max": 2589.20849609375,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06388506883048845,
"min": 0.06035856703459229,
"max": 0.07357280870201066,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2555402753219538,
"min": 0.24143426813836916,
"max": 0.36199274625830064,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2165125553660533,
"min": 0.12498371009532289,
"max": 0.2683227203640284,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.8660502214642132,
"min": 0.49993484038129155,
"max": 1.3416136018201419,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.65909090909091,
"min": 3.6363636363636362,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1129.0,
"min": 160.0,
"max": 1360.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.65909090909091,
"min": 3.6363636363636362,
"max": 25.65909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1129.0,
"min": 160.0,
"max": 1360.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1757330305",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/home/test/Saranya/reinforcement_learning/rl/bin/mlagents-learn ./ml-agents/config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.8.0+cu128",
"numpy_version": "1.23.5",
"end_time_seconds": "1757330475"
},
"total": 169.5981690690005,
"count": 1,
"self": 0.21805955700165214,
"children": {
"run_training.setup": {
"total": 0.014224560999537061,
"count": 1,
"self": 0.014224560999537061
},
"TrainerController.start_learning": {
"total": 169.36588495099932,
"count": 1,
"self": 0.20889247895775043,
"children": {
"TrainerController._reset_env": {
"total": 1.0328936349997093,
"count": 1,
"self": 1.0328936349997093
},
"TrainerController.advance": {
"total": 168.049249900042,
"count": 18192,
"self": 0.19215139793050184,
"children": {
"env_step": {
"total": 116.75762241404755,
"count": 18192,
"self": 100.69143021109903,
"children": {
"SubprocessEnvManager._take_step": {
"total": 15.947935846978908,
"count": 18192,
"self": 0.6236601839309515,
"children": {
"TorchPolicy.evaluate": {
"total": 15.324275663047956,
"count": 18192,
"self": 15.324275663047956
}
}
},
"workers": {
"total": 0.11825635596960637,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 168.9586544641079,
"count": 18192,
"is_parallel": true,
"self": 82.16869625320487,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0010454580005898606,
"count": 1,
"is_parallel": true,
"self": 0.0002933020005002618,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007521560000895988,
"count": 10,
"is_parallel": true,
"self": 0.0007521560000895988
}
}
},
"UnityEnvironment.step": {
"total": 0.015403387999867846,
"count": 1,
"is_parallel": true,
"self": 0.0003095739994023461,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.000148124000588723,
"count": 1,
"is_parallel": true,
"self": 0.000148124000588723
},
"communicator.exchange": {
"total": 0.014037715000085882,
"count": 1,
"is_parallel": true,
"self": 0.014037715000085882
},
"steps_from_proto": {
"total": 0.0009079749997908948,
"count": 1,
"is_parallel": true,
"self": 0.00019619500108092325,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0007117799987099716,
"count": 10,
"is_parallel": true,
"self": 0.0007117799987099716
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 86.78995821090302,
"count": 18191,
"is_parallel": true,
"self": 3.6155736228738533,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 1.8516307510935803,
"count": 18191,
"is_parallel": true,
"self": 1.8516307510935803
},
"communicator.exchange": {
"total": 71.07513371305322,
"count": 18191,
"is_parallel": true,
"self": 71.07513371305322
},
"steps_from_proto": {
"total": 10.24762012388237,
"count": 18191,
"is_parallel": true,
"self": 2.0109742474969607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 8.23664587638541,
"count": 181910,
"is_parallel": true,
"self": 8.23664587638541
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 51.099476088063966,
"count": 18192,
"self": 0.23307140512224578,
"children": {
"process_trajectory": {
"total": 11.187480078947374,
"count": 18192,
"self": 11.054367379947507,
"children": {
"RLTrainer._checkpoint": {
"total": 0.13311269899986655,
"count": 4,
"self": 0.13311269899986655
}
}
},
"_update_policy": {
"total": 39.678924603994346,
"count": 90,
"self": 18.539226840989613,
"children": {
"TorchPPOOptimizer.update": {
"total": 21.139697763004733,
"count": 4587,
"self": 21.139697763004733
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0650001058820635e-06,
"count": 1,
"self": 1.0650001058820635e-06
},
"TrainerController._save_models": {
"total": 0.07484787199973653,
"count": 1,
"self": 0.00042503999975451734,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07442283199998201,
"count": 1,
"self": 0.07442283199998201
}
}
}
}
}
}
}