ppo-Pyramids / run_logs /timers.json
Chiz's picture
Unit 5 Pyramids with curiosity
3e61367 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.432526558637619,
"min": 0.32225361466407776,
"max": 1.5993752479553223,
"count": 150
},
"Pyramids.Policy.Entropy.sum": {
"value": 4401.39013671875,
"min": 3195.638916015625,
"max": 16377.6025390625,
"count": 150
},
"Pyramids.Step.mean": {
"value": 1499903.0,
"min": 9984.0,
"max": 1499903.0,
"count": 150
},
"Pyramids.Step.sum": {
"value": 1499903.0,
"min": 9984.0,
"max": 1499903.0,
"count": 150
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.04954734072089195,
"min": -0.10484987497329712,
"max": 0.18645791709423065,
"count": 150
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 4.062881946563721,
"min": -8.283140182495117,
"max": 15.66246509552002,
"count": 150
},
"Pyramids.Policy.CuriosityValueEstimate.mean": {
"value": 0.11475102603435516,
"min": 0.08723638951778412,
"max": 0.5798850059509277,
"count": 150
},
"Pyramids.Policy.CuriosityValueEstimate.sum": {
"value": 9.409584045410156,
"min": 7.153383731842041,
"max": 46.97068405151367,
"count": 150
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06473862011140832,
"min": 0.05774385195642632,
"max": 0.07875350401352715,
"count": 150
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.2589544804456333,
"min": 0.13296349911252037,
"max": 0.38936668815657355,
"count": 150
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0037324324090756518,
"min": 7.420701751145458e-05,
"max": 0.009053264769439314,
"count": 150
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.014929729636302607,
"min": 0.00029682807004581833,
"max": 0.03491397371423469,
"count": 150
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.0306996564666704e-06,
"min": 1.0306996564666704e-06,
"max": 0.00029877120040959997,
"count": 150
},
"Pyramids.Policy.LearningRate.sum": {
"value": 4.122798625866682e-06,
"min": 4.122798625866682e-06,
"max": 0.0013251332582889333,
"count": 150
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10034353333333335,
"min": 0.10034353333333335,
"max": 0.1995904,
"count": 150
},
"Pyramids.Policy.Epsilon.sum": {
"value": 0.4013741333333334,
"min": 0.3847936,
"max": 0.9417110666666667,
"count": 150
},
"Pyramids.Policy.Beta.mean": {
"value": 4.431898000000013e-05,
"min": 4.431898000000013e-05,
"max": 0.009959080959999998,
"count": 150
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0001772759200000005,
"min": 0.0001772759200000005,
"max": 0.044176935560000005,
"count": 150
},
"Pyramids.Losses.CuriosityForwardLoss.mean": {
"value": 0.061895945167634636,
"min": 0.06048533997188012,
"max": 0.9530652779309701,
"count": 150
},
"Pyramids.Losses.CuriosityForwardLoss.sum": {
"value": 0.24758378067053854,
"min": 0.24758378067053854,
"max": 1.9061305558619401,
"count": 150
},
"Pyramids.Losses.CuriosityInverseLoss.mean": {
"value": 0.06055828338685387,
"min": 0.04053960538779696,
"max": 0.9808244205390414,
"count": 150
},
"Pyramids.Losses.CuriosityInverseLoss.sum": {
"value": 0.2422331335474155,
"min": 0.16215842155118784,
"max": 1.9616488410780828,
"count": 150
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 150
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 150
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 628.2727272727273,
"min": 364.0,
"max": 999.0,
"count": 145
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 6911.0,
"min": 364.0,
"max": 15984.0,
"count": 145
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8261090652509169,
"min": -1.0000000521540642,
"max": 1.6359999477863312,
"count": 146
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 9.087199717760086,
"min": -16.000000834465027,
"max": 9.087199717760086,
"count": 146
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8261090652509169,
"min": -1.0000000521540642,
"max": 1.6359999477863312,
"count": 146
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 9.087199717760086,
"min": -16.000000834465027,
"max": 9.087199717760086,
"count": 146
},
"Pyramids.Policy.CuriosityReward.mean": {
"value": 0.8066543743691661,
"min": 0.8066543743691661,
"max": 7.507157441228628,
"count": 146
},
"Pyramids.Policy.CuriosityReward.sum": {
"value": 8.873198118060827,
"min": 1.2870761454105377,
"max": 120.11451905965805,
"count": 146
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1771968355",
"python_version": "3.10.12 (main, Jan 26 2026, 14:55:28) [GCC 11.4.0]",
"command_line_arguments": "/kaggle/working/mlagents-env/bin/mlagents-learn /kaggle/working/ml-agents/config/ppo/Pyramids.yaml --env=./training-envs-executables/Pyramids/Pyramids/Pyramids --run-id=Pyramids1 --no-graphics --force",
"mlagents_version": "1.1.0",
"mlagents_envs_version": "1.1.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.1+cpu",
"numpy_version": "1.23.5",
"end_time_seconds": "1771971846"
},
"total": 3490.8813013159997,
"count": 1,
"self": 0.3800962890009032,
"children": {
"run_training.setup": {
"total": 0.016093584999907762,
"count": 1,
"self": 0.016093584999907762
},
"TrainerController.start_learning": {
"total": 3490.485111441999,
"count": 1,
"self": 1.8422932399635101,
"children": {
"TrainerController._reset_env": {
"total": 0.8198842850006258,
"count": 1,
"self": 0.8198842850006258
},
"TrainerController.advance": {
"total": 3487.768661795034,
"count": 94781,
"self": 1.8979434687980756,
"children": {
"env_step": {
"total": 2328.233717913111,
"count": 94781,
"self": 2197.3696197046556,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.72657525722298,
"count": 94781,
"self": 7.196937510988391,
"children": {
"TorchPolicy.evaluate": {
"total": 122.52963774623458,
"count": 93819,
"self": 122.52963774623458
}
}
},
"workers": {
"total": 1.137522951232313,
"count": 94781,
"self": 0.0,
"children": {
"worker_root": {
"total": 3487.0163712979956,
"count": 94781,
"is_parallel": true,
"self": 1445.7082351739773,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0022598160003326484,
"count": 1,
"is_parallel": true,
"self": 0.0007911549992059008,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014686610011267476,
"count": 8,
"is_parallel": true,
"self": 0.0014686610011267476
}
}
},
"UnityEnvironment.step": {
"total": 0.04300834900004702,
"count": 1,
"is_parallel": true,
"self": 0.0003477970003586961,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0003681289999803994,
"count": 1,
"is_parallel": true,
"self": 0.0003681289999803994
},
"communicator.exchange": {
"total": 0.04089825099981681,
"count": 1,
"is_parallel": true,
"self": 0.04089825099981681
},
"steps_from_proto": {
"total": 0.001394171999891114,
"count": 1,
"is_parallel": true,
"self": 0.00042417899931024294,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000969993000580871,
"count": 8,
"is_parallel": true,
"self": 0.000969993000580871
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 2041.3081361240183,
"count": 94780,
"is_parallel": true,
"self": 39.858876127959775,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 29.71144846707284,
"count": 94780,
"is_parallel": true,
"self": 29.71144846707284
},
"communicator.exchange": {
"total": 1845.155440685765,
"count": 94780,
"is_parallel": true,
"self": 1845.155440685765
},
"steps_from_proto": {
"total": 126.58237084322082,
"count": 94780,
"is_parallel": true,
"self": 26.328388871829702,
"children": {
"_process_rank_one_or_two_observation": {
"total": 100.25398197139111,
"count": 758240,
"is_parallel": true,
"self": 100.25398197139111
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1157.637000413125,
"count": 94781,
"self": 3.5209788969887086,
"children": {
"process_trajectory": {
"total": 164.55201621413744,
"count": 94781,
"self": 164.14582424913533,
"children": {
"RLTrainer._checkpoint": {
"total": 0.4061919650021082,
"count": 7,
"self": 0.4061919650021082
}
}
},
"_update_policy": {
"total": 989.5640053019988,
"count": 662,
"self": 713.7821614979439,
"children": {
"TorchPPOOptimizer.update": {
"total": 275.7818438040549,
"count": 34125,
"self": 275.7818438040549
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0059993655886501e-06,
"count": 1,
"self": 1.0059993655886501e-06
},
"TrainerController._save_models": {
"total": 0.054271116001473274,
"count": 1,
"self": 0.000733008000679547,
"children": {
"RLTrainer._checkpoint": {
"total": 0.05353810800079373,
"count": 1,
"self": 0.05353810800079373
}
}
}
}
}
}
}