dn-gh's picture
upload ppo model
f67dce9
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.708141565322876,
"min": 0.6551461219787598,
"max": 2.8875668048858643,
"count": 50
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 6815.86279296875,
"min": 6255.3349609375,
"max": 29571.572265625,
"count": 50
},
"SnowballTarget.Step.mean": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Step.sum": {
"value": 499976.0,
"min": 9952.0,
"max": 499976.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 13.123973846435547,
"min": 0.2898223102092743,
"max": 13.277969360351562,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2690.41455078125,
"min": 56.225528717041016,
"max": 2717.501953125,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 50
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 10945.0,
"min": 8756.0,
"max": 10945.0,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.014798153882535794,
"min": 0.012497158107968668,
"max": 0.025443847368781768,
"count": 50
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.07399076941267897,
"min": 0.05013531381458354,
"max": 0.12721923684390885,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.2044389699896177,
"min": 0.08696539141237737,
"max": 0.3059546291828156,
"count": 50
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 1.0221948499480884,
"min": 0.3478615656495095,
"max": 1.5297731459140778,
"count": 50
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 3.0528989824000023e-06,
"min": 3.0528989824000023e-06,
"max": 0.0002967528010823999,
"count": 50
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 1.526449491200001e-05,
"min": 1.526449491200001e-05,
"max": 0.001454064015312,
"count": 50
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10101760000000001,
"min": 0.10101760000000001,
"max": 0.19891760000000003,
"count": 50
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.5050880000000001,
"min": 0.4119904000000001,
"max": 0.9846880000000001,
"count": 50
},
"SnowballTarget.Policy.Beta.mean": {
"value": 6.0778240000000044e-05,
"min": 6.0778240000000044e-05,
"max": 0.00494598824,
"count": 50
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0003038912000000002,
"min": 0.0003038912000000002,
"max": 0.024235931199999998,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.69090909090909,
"min": 2.2045454545454546,
"max": 26.38888888888889,
"count": 50
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1413.0,
"min": 97.0,
"max": 1436.0,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.69090909090909,
"min": 2.2045454545454546,
"max": 26.38888888888889,
"count": 50
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1413.0,
"min": 97.0,
"max": 1436.0,
"count": 50
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 50
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675522831",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675523866"
},
"total": 1035.810128657,
"count": 1,
"self": 0.38698001399984605,
"children": {
"run_training.setup": {
"total": 0.10912781000001814,
"count": 1,
"self": 0.10912781000001814
},
"TrainerController.start_learning": {
"total": 1035.314020833,
"count": 1,
"self": 1.2451135469898418,
"children": {
"TrainerController._reset_env": {
"total": 10.595690019000017,
"count": 1,
"self": 10.595690019000017
},
"TrainerController.advance": {
"total": 1023.3591618480103,
"count": 45474,
"self": 0.6327646199879382,
"children": {
"env_step": {
"total": 1022.7263972280224,
"count": 45474,
"self": 683.9843644000368,
"children": {
"SubprocessEnvManager._take_step": {
"total": 338.1040110310145,
"count": 45474,
"self": 3.523703360018885,
"children": {
"TorchPolicy.evaluate": {
"total": 334.58030767099564,
"count": 45474,
"self": 78.83782503900017,
"children": {
"TorchPolicy.sample_actions": {
"total": 255.74248263199547,
"count": 45474,
"self": 255.74248263199547
}
}
}
}
},
"workers": {
"total": 0.6380217969709747,
"count": 45474,
"self": 0.0,
"children": {
"worker_root": {
"total": 1032.2931027280104,
"count": 45474,
"is_parallel": true,
"self": 458.22532116501304,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.007232008000016776,
"count": 1,
"is_parallel": true,
"self": 0.004536718999872846,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00269528900014393,
"count": 10,
"is_parallel": true,
"self": 0.00269528900014393
}
}
},
"UnityEnvironment.step": {
"total": 0.04282062400000086,
"count": 1,
"is_parallel": true,
"self": 0.000332410999988042,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002895599999988008,
"count": 1,
"is_parallel": true,
"self": 0.0002895599999988008
},
"communicator.exchange": {
"total": 0.04058204100005014,
"count": 1,
"is_parallel": true,
"self": 0.04058204100005014
},
"steps_from_proto": {
"total": 0.0016166119999638795,
"count": 1,
"is_parallel": true,
"self": 0.0004042539999886685,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001212357999975211,
"count": 10,
"is_parallel": true,
"self": 0.001212357999975211
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 574.0677815629973,
"count": 45473,
"is_parallel": true,
"self": 20.520609007996768,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 12.009731023997745,
"count": 45473,
"is_parallel": true,
"self": 12.009731023997745
},
"communicator.exchange": {
"total": 469.22703291598646,
"count": 45473,
"is_parallel": true,
"self": 469.22703291598646
},
"steps_from_proto": {
"total": 72.3104086150164,
"count": 45473,
"is_parallel": true,
"self": 15.644043280944686,
"children": {
"_process_rank_one_or_two_observation": {
"total": 56.66636533407171,
"count": 454730,
"is_parallel": true,
"self": 56.66636533407171
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 3.542499985087488e-05,
"count": 1,
"self": 3.542499985087488e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 1012.743429081972,
"count": 1246553,
"is_parallel": true,
"self": 30.328763108005887,
"children": {
"process_trajectory": {
"total": 758.3694598919669,
"count": 1246553,
"is_parallel": true,
"self": 756.3947013539673,
"children": {
"RLTrainer._checkpoint": {
"total": 1.9747585379996622,
"count": 10,
"is_parallel": true,
"self": 1.9747585379996622
}
}
},
"_update_policy": {
"total": 224.04520608199914,
"count": 227,
"is_parallel": true,
"self": 155.18561309799856,
"children": {
"TorchPPOOptimizer.update": {
"total": 68.85959298400059,
"count": 1362,
"is_parallel": true,
"self": 68.85959298400059
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.1140199940000457,
"count": 1,
"self": 0.000812998000128573,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11320699599991713,
"count": 1,
"self": 0.11320699599991713
}
}
}
}
}
}
}