feb-learn6 / run_logs /timers.json
mjm54's picture
First Push
b210d32 verified
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 1.5577729940414429,
"min": 1.5577729940414429,
"max": 2.8711555004119873,
"count": 24
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8327.8544921875,
"min": 7152.794921875,
"max": 17492.96484375,
"count": 24
},
"SnowballTarget.Step.mean": {
"value": 119952.0,
"min": 4976.0,
"max": 119952.0,
"count": 24
},
"SnowballTarget.Step.sum": {
"value": 119952.0,
"min": 4976.0,
"max": 119952.0,
"count": 24
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 11.330068588256836,
"min": 0.4573516249656677,
"max": 11.330068588256836,
"count": 24
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 815.7649536132812,
"min": 32.929317474365234,
"max": 902.7938842773438,
"count": 24
},
"SnowballTarget.Policy.CuriosityValueEstimate.mean": {
"value": 0.11255823075771332,
"min": 0.10959342122077942,
"max": 0.25435778498649597,
"count": 24
},
"SnowballTarget.Policy.CuriosityValueEstimate.sum": {
"value": 8.104192733764648,
"min": 7.890726089477539,
"max": 18.31376075744629,
"count": 24
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 24
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 6368.0,
"min": 4378.0,
"max": 6567.0,
"count": 24
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0906716643233696,
"min": 0.08936119078574505,
"max": 0.10692422976172196,
"count": 24
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.3626866572934784,
"min": 0.2680835723572351,
"max": 0.41460953043600357,
"count": 24
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.12022656089237826,
"min": 0.05834233026278092,
"max": 0.14807391013288293,
"count": 24
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.48090624356951306,
"min": 0.23336932105112368,
"max": 0.5922956405315317,
"count": 24
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.146764630000004e-06,
"min": 8.146764630000004e-06,
"max": 0.00039203555754666665,
"count": 24
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.2587058520000015e-05,
"min": 3.2587058520000015e-05,
"max": 0.00149925335852,
"count": 24
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10203666666666666,
"min": 0.10203666666666666,
"max": 0.1980088888888889,
"count": 24
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.40814666666666666,
"min": 0.31902666666666674,
"max": 0.7748133333333332,
"count": 24
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.00021346300000000008,
"min": 0.00021346300000000008,
"max": 0.009801088,
"count": 24
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0008538520000000003,
"min": 0.0008538520000000003,
"max": 0.037483852000000005,
"count": 24
},
"SnowballTarget.Losses.CuriosityForwardLoss.mean": {
"value": 0.049340017580563,
"min": 0.049340017580563,
"max": 0.11238793617953258,
"count": 24
},
"SnowballTarget.Losses.CuriosityForwardLoss.sum": {
"value": 0.197360070322252,
"min": 0.15563410755797008,
"max": 0.33716380853859773,
"count": 24
},
"SnowballTarget.Losses.CuriosityInverseLoss.mean": {
"value": 1.4141540664244727,
"min": 1.4141540664244727,
"max": 2.8354673816503912,
"count": 24
},
"SnowballTarget.Losses.CuriosityInverseLoss.sum": {
"value": 5.656616265697891,
"min": 4.2828076974346505,
"max": 10.669374022168101,
"count": 24
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 22.454545454545453,
"min": 2.909090909090909,
"max": 22.454545454545453,
"count": 24
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 494.0,
"min": 64.0,
"max": 711.0,
"count": 24
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 22.454545454545453,
"min": 2.909090909090909,
"max": 22.454545454545453,
"count": 24
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 494.0,
"min": 64.0,
"max": 711.0,
"count": 24
},
"SnowballTarget.Policy.CuriosityReward.mean": {
"value": 0.20842078646687281,
"min": 0.20536660502495413,
"max": 0.302854514691386,
"count": 24
},
"SnowballTarget.Policy.CuriosityReward.sum": {
"value": 4.585257302271202,
"min": 4.518065310548991,
"max": 7.958207841962576,
"count": 24
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 24
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 24
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1739206661",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics --force",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1739207027"
},
"total": 366.6684852830001,
"count": 1,
"self": 0.43187780299990663,
"children": {
"run_training.setup": {
"total": 0.029345969000132754,
"count": 1,
"self": 0.029345969000132754
},
"TrainerController.start_learning": {
"total": 366.20726151100007,
"count": 1,
"self": 0.6093387509949935,
"children": {
"TrainerController._reset_env": {
"total": 2.414866170999858,
"count": 1,
"self": 2.414866170999858
},
"TrainerController.advance": {
"total": 363.0920417230052,
"count": 11005,
"self": 0.2074296979983501,
"children": {
"env_step": {
"total": 362.88461202500685,
"count": 11005,
"self": 284.4297636560018,
"children": {
"SubprocessEnvManager._take_step": {
"total": 78.26171221600043,
"count": 11005,
"self": 0.991260860004104,
"children": {
"TorchPolicy.evaluate": {
"total": 77.27045135599633,
"count": 11005,
"self": 77.27045135599633
}
}
},
"workers": {
"total": 0.19313615300461606,
"count": 11005,
"self": 0.0,
"children": {
"worker_root": {
"total": 365.18384611800525,
"count": 11005,
"is_parallel": true,
"self": 220.45321479002064,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0028145039998435095,
"count": 1,
"is_parallel": true,
"self": 0.0007361789998867607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002078324999956749,
"count": 10,
"is_parallel": true,
"self": 0.002078324999956749
}
}
},
"UnityEnvironment.step": {
"total": 0.06449263800004701,
"count": 1,
"is_parallel": true,
"self": 0.0005959709999387997,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003966610001953086,
"count": 1,
"is_parallel": true,
"self": 0.0003966610001953086
},
"communicator.exchange": {
"total": 0.06152797400000054,
"count": 1,
"is_parallel": true,
"self": 0.06152797400000054
},
"steps_from_proto": {
"total": 0.0019720319999123603,
"count": 1,
"is_parallel": true,
"self": 0.00038622000010946067,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0015858119998028997,
"count": 10,
"is_parallel": true,
"self": 0.0015858119998028997
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 144.7306313279846,
"count": 11004,
"is_parallel": true,
"self": 6.164258635984197,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 3.3917431570159806,
"count": 11004,
"is_parallel": true,
"self": 3.3917431570159806
},
"communicator.exchange": {
"total": 114.39247847900333,
"count": 11004,
"is_parallel": true,
"self": 114.39247847900333
},
"steps_from_proto": {
"total": 20.782151055981103,
"count": 11004,
"is_parallel": true,
"self": 4.146583734015621,
"children": {
"_process_rank_one_or_two_observation": {
"total": 16.635567321965482,
"count": 110040,
"is_parallel": true,
"self": 16.635567321965482
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.001371276000099897,
"count": 1,
"self": 0.001371276000099897,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 363.6459302829876,
"count": 6012,
"is_parallel": true,
"self": 0.1291725459764166,
"children": {
"process_trajectory": {
"total": 56.10390000901111,
"count": 6012,
"is_parallel": true,
"self": 54.67397870901141,
"children": {
"RLTrainer._checkpoint": {
"total": 1.4299212999997053,
"count": 8,
"is_parallel": true,
"self": 1.4299212999997053
}
}
},
"_update_policy": {
"total": 307.41285772800006,
"count": 82,
"is_parallel": true,
"self": 152.3872712640043,
"children": {
"TorchPPOOptimizer.update": {
"total": 155.02558646399575,
"count": 7300,
"is_parallel": true,
"self": 155.02558646399575
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.08964358999992328,
"count": 1,
"self": 0.000892784999905416,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08875080500001786,
"count": 1,
"self": 0.08875080500001786
}
}
}
}
}
}
}