elliemci's picture
more trained model
9f60e47 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.5378670692443848,
"min": 0.25169727206230164,
"max": 0.8338563442230225,
"count": 205
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 5455.0478515625,
"min": 2408.742919921875,
"max": 8392.9716796875,
"count": 205
},
"SnowballTarget.Step.mean": {
"value": 4049936.0,
"min": 2009960.0,
"max": 4049936.0,
"count": 205
},
"SnowballTarget.Step.sum": {
"value": 4049936.0,
"min": 2009960.0,
"max": 4049936.0,
"count": 205
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 14.551513671875,
"min": 9.390125274658203,
"max": 14.746602058410645,
"count": 205
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2822.99365234375,
"min": 1924.9755859375,
"max": 3022.7861328125,
"count": 205
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 205
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 205
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.06759569437018204,
"min": 0.06010027918062977,
"max": 0.08732905397454149,
"count": 205
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.27038277748072814,
"min": 0.24040111672251907,
"max": 0.3776409532326986,
"count": 205
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1507860389264191,
"min": 0.1247357524858386,
"max": 0.7414896927627863,
"count": 205
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.6031441557056764,
"min": 0.4989430099433544,
"max": 3.7074484638139316,
"count": 205
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 0.00019097208090280006,
"min": 0.00019097208090280006,
"max": 0.0005988520401148,
"count": 205
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 0.0007638883236112002,
"min": 0.0007638883236112002,
"max": 0.0029755602024440003,
"count": 205
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.11909720000000001,
"min": 0.11909720000000001,
"max": 0.15988520000000003,
"count": 205
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.47638880000000006,
"min": 0.47638880000000006,
"max": 0.7975560000000002,
"count": 205
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0009629502800000004,
"min": 0.0009629502800000004,
"max": 0.00299827148,
"count": 205
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0038518011200000014,
"min": 0.0038518011200000014,
"max": 0.014898044400000001,
"count": 205
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 28.636363636363637,
"min": 10.872727272727273,
"max": 29.145454545454545,
"count": 205
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1260.0,
"min": 579.0,
"max": 1603.0,
"count": 205
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 28.636363636363637,
"min": 10.872727272727273,
"max": 29.145454545454545,
"count": 205
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1260.0,
"min": 579.0,
"max": 1603.0,
"count": 205
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 205
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 205
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1759641718",
"python_version": "3.10.12 (main, Aug 15 2025, 14:32:43) [GCC 11.4.0]",
"command_line_arguments": "/content/drive/MyDrive/ColabNotebooks/DeepRL/unity_ml_agents/ml-agents/ml-agents/mlagents/trainers/learn.py config/ppo/SnowballTarget.yaml --env=training-envs-executables/linux/SnowballTarget/SnowballTarget.x86_64 --run-id=SnowballTarget1 --resume --no-graphics --time-scale=20",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.2.2+cu118",
"numpy_version": "1.23.5",
"end_time_seconds": "1759648498"
},
"total": 6780.427215903001,
"count": 1,
"self": 0.023770914998749504,
"children": {
"run_training.setup": {
"total": 0.03404056700128422,
"count": 1,
"self": 0.03404056700128422
},
"TrainerController.start_learning": {
"total": 6780.369404421001,
"count": 1,
"self": 9.438188089317919,
"children": {
"TrainerController._reset_env": {
"total": 4.192950361999465,
"count": 1,
"self": 4.192950361999465
},
"TrainerController.advance": {
"total": 6766.522339763684,
"count": 187259,
"self": 4.6685647021568,
"children": {
"env_step": {
"total": 6761.853775061527,
"count": 187259,
"self": 5480.371305527964,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1276.6703044718233,
"count": 187259,
"self": 25.201322214867105,
"children": {
"TorchPolicy.evaluate": {
"total": 1251.4689822569562,
"count": 187259,
"self": 1251.4689822569562
}
}
},
"workers": {
"total": 4.812165061739506,
"count": 187259,
"self": 0.0,
"children": {
"worker_root": {
"total": 6743.494264420429,
"count": 187259,
"is_parallel": true,
"self": 2837.1724640803022,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002290477999849827,
"count": 1,
"is_parallel": true,
"self": 0.0007419679986924166,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015485100011574104,
"count": 10,
"is_parallel": true,
"self": 0.0015485100011574104
}
}
},
"UnityEnvironment.step": {
"total": 0.04480024599979515,
"count": 1,
"is_parallel": true,
"self": 0.0006656960013060598,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004623589993570931,
"count": 1,
"is_parallel": true,
"self": 0.0004623589993570931
},
"communicator.exchange": {
"total": 0.0414822330003517,
"count": 1,
"is_parallel": true,
"self": 0.0414822330003517
},
"steps_from_proto": {
"total": 0.002189957998780301,
"count": 1,
"is_parallel": true,
"self": 0.00042489800034672953,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0017650599984335713,
"count": 10,
"is_parallel": true,
"self": 0.0017650599984335713
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3906.321800340127,
"count": 187258,
"is_parallel": true,
"self": 164.54901427960613,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 83.51316091107583,
"count": 187258,
"is_parallel": true,
"self": 83.51316091107583
},
"communicator.exchange": {
"total": 3053.320768955846,
"count": 187258,
"is_parallel": true,
"self": 3053.320768955846
},
"steps_from_proto": {
"total": 604.9388561935993,
"count": 187258,
"is_parallel": true,
"self": 116.33661882368142,
"children": {
"_process_rank_one_or_two_observation": {
"total": 488.60223736991793,
"count": 1872580,
"is_parallel": true,
"self": 488.60223736991793
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 6.0630000007222407e-05,
"count": 1,
"self": 6.0630000007222407e-05,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 6710.570346064002,
"count": 5475989,
"is_parallel": true,
"self": 143.0828769158761,
"children": {
"process_trajectory": {
"total": 3645.7056422951628,
"count": 5475990,
"is_parallel": true,
"self": 3644.880095744167,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8255465509955684,
"count": 4,
"is_parallel": true,
"self": 0.8255465509955684
}
}
},
"_update_policy": {
"total": 2921.7818268529627,
"count": 935,
"is_parallel": true,
"self": 622.6791587437492,
"children": {
"TorchPPOOptimizer.update": {
"total": 2299.1026681092135,
"count": 47717,
"is_parallel": true,
"self": 2299.1026681092135
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.2158655759994872,
"count": 1,
"self": 0.020677766000517295,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1951878099989699,
"count": 1,
"self": 0.1951878099989699
}
}
}
}
}
}
}