Subarashi's picture
First Push
a481cf3 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.0103477239608765,
"min": 1.0103477239608765,
"max": 2.862990140914917,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 9602.3447265625,
"min": 9602.3447265625,
"max": 29225.404296875,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.203608512878418,
"min": 0.14564645290374756,
"max": 12.203608512878418,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2379.70361328125,
"min": 28.255413055419922,
"max": 2461.5263671875,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 2.659090909090909,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 117.0,
"max": 1344.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 2.659090909090909,
"max": 25.15909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 117.0,
"max": 1344.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.05581574634796947,
"min": 0.04164787709466028,
"max": 0.05581574634796947,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.11163149269593894,
"min": 0.08329575418932056,
"max": 0.16213553127240363,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.20467460286967895,
"min": 0.09264251597992637,
"max": 0.297377472743392,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.4093492057393579,
"min": 0.18528503195985274,
"max": 0.8552808494252316,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 7.504097856000006e-06,
"min": 7.504097856000006e-06,
"max": 0.000338604003256,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.5008195712000011e-05,
"min": 1.5008195712000011e-05,
"max": 0.0008656620526680001,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10321600000000003,
"min": 0.10321600000000003,
"max": 0.24511600000000003,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.20643200000000006,
"min": 0.20643200000000006,
"max": 0.6709980000000001,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 7.410560000000005e-05,
"min": 7.410560000000005e-05,
"max": 0.0029026456,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0001482112000000001,
"min": 0.0001482112000000001,
"max": 0.0074252268,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1740589767",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1740590226"
},
"total": 459.3139847,
"count": 1,
"self": 0.5785673819998465,
"children": {
"run_training.setup": {
"total": 0.021315510000022186,
"count": 1,
"self": 0.021315510000022186
},
"TrainerController.start_learning": {
"total": 458.7141018080001,
"count": 1,
"self": 0.3675232179946306,
"children": {
"TrainerController._reset_env": {
"total": 2.0096789959998205,
"count": 1,
"self": 2.0096789959998205
},
"TrainerController.advance": {
"total": 456.2478828340056,
"count": 18192,
"self": 0.40215587000329833,
"children": {
"env_step": {
"total": 328.62178444798406,
"count": 18192,
"self": 250.32102939198126,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.07909805400323,
"count": 18192,
"self": 1.3718122779944224,
"children": {
"TorchPolicy.evaluate": {
"total": 76.7072857760088,
"count": 18192,
"self": 76.7072857760088
}
}
},
"workers": {
"total": 0.22165700199957428,
"count": 18192,
"self": 0.0,
"children": {
"worker_root": {
"total": 457.257652848004,
"count": 18192,
"is_parallel": true,
"self": 237.40227198500816,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022847979998914525,
"count": 1,
"is_parallel": true,
"self": 0.0006958409999242576,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015889569999671949,
"count": 10,
"is_parallel": true,
"self": 0.0015889569999671949
}
}
},
"UnityEnvironment.step": {
"total": 0.06750377299999855,
"count": 1,
"is_parallel": true,
"self": 0.0006104439999035094,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005445830001917784,
"count": 1,
"is_parallel": true,
"self": 0.0005445830001917784
},
"communicator.exchange": {
"total": 0.06428691899986916,
"count": 1,
"is_parallel": true,
"self": 0.06428691899986916
},
"steps_from_proto": {
"total": 0.0020618270000341,
"count": 1,
"is_parallel": true,
"self": 0.00042797900050572935,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0016338479995283706,
"count": 10,
"is_parallel": true,
"self": 0.0016338479995283706
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 219.85538086299584,
"count": 18191,
"is_parallel": true,
"self": 10.406624329931674,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.978913874002501,
"count": 18191,
"is_parallel": true,
"self": 5.978913874002501
},
"communicator.exchange": {
"total": 168.4059613090235,
"count": 18191,
"is_parallel": true,
"self": 168.4059613090235
},
"steps_from_proto": {
"total": 35.06388135003817,
"count": 18191,
"is_parallel": true,
"self": 6.2768995181143055,
"children": {
"_process_rank_one_or_two_observation": {
"total": 28.786981831923868,
"count": 181910,
"is_parallel": true,
"self": 28.786981831923868
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 127.22394251601827,
"count": 18192,
"self": 0.44014550303336364,
"children": {
"process_trajectory": {
"total": 28.84959258898516,
"count": 18192,
"self": 28.60243235898497,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2471602300001905,
"count": 2,
"self": 0.2471602300001905
}
}
},
"_update_policy": {
"total": 97.93420442399974,
"count": 45,
"self": 54.94231066199427,
"children": {
"TorchPPOOptimizer.update": {
"total": 42.991893762005475,
"count": 3056,
"self": 42.991893762005475
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.51999936660286e-07,
"count": 1,
"self": 9.51999936660286e-07
},
"TrainerController._save_models": {
"total": 0.08901580800011288,
"count": 1,
"self": 0.0007264780001605686,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08828932999995232,
"count": 1,
"self": 0.08828932999995232
}
}
}
}
}
}
}