ppo-Pyramids / run_logs /timers.json
orenk's picture
First training of Pyramids Training
4c0cc93
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.535670280456543,
"min": 0.5250978469848633,
"max": 1.464095115661621,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 16112.9619140625,
"min": 15719.796875,
"max": 44414.7890625,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989964.0,
"min": 29952.0,
"max": 989964.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989964.0,
"min": 29952.0,
"max": 989964.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.38181909918785095,
"min": -0.1483517736196518,
"max": 0.39710476994514465,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 100.41842651367188,
"min": -35.15937042236328,
"max": 105.62986755371094,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.011703135445713997,
"min": 0.009123816154897213,
"max": 0.22145621478557587,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.0779247283935547,
"min": 2.372192144393921,
"max": 52.48512268066406,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.0683729722624689,
"min": 0.06230263852228831,
"max": 0.072528962395271,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9572216116745645,
"min": 0.4927181337084273,
"max": 1.0554828427993925,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013990169560461905,
"min": 0.00012293459437223657,
"max": 0.014769640864133612,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.19586237384646665,
"min": 0.0015981497268390754,
"max": 0.20677497209787057,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.622861744792859e-06,
"min": 7.622861744792859e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010672006442710003,
"min": 0.00010672006442710003,
"max": 0.0035074811308396995,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10254092142857142,
"min": 0.10254092142857142,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4355729,
"min": 1.3886848,
"max": 2.5691603,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00026383805071428576,
"min": 0.00026383805071428576,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.003693732710000001,
"min": 0.003693732710000001,
"max": 0.11693911397000002,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.012064195238053799,
"min": 0.012064195238053799,
"max": 0.33062949776649475,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.16889873147010803,
"min": 0.16889873147010803,
"max": 2.314406394958496,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 463.8955223880597,
"min": 437.9855072463768,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31081.0,
"min": 15984.0,
"max": 35404.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.38680594225428,
"min": -1.0000000521540642,
"max": 1.4170492576516194,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 92.91599813103676,
"min": -31.993601635098457,
"max": 97.77639877796173,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.38680594225428,
"min": -1.0000000521540642,
"max": 1.4170492576516194,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 92.91599813103676,
"min": -31.993601635098457,
"max": 97.77639877796173,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.05788842873185623,
"min": 0.05788842873185623,
"max": 6.564672093372792,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.8785247250343673,
"min": 3.8120617750973906,
"max": 105.03475349396467,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1674119817",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1674121831"
},
"total": 2014.0848039549999,
"count": 1,
"self": 0.4336514240001179,
"children": {
"run_training.setup": {
"total": 0.102516426999955,
"count": 1,
"self": 0.102516426999955
},
"TrainerController.start_learning": {
"total": 2013.5486361039998,
"count": 1,
"self": 1.2334205319275497,
"children": {
"TrainerController._reset_env": {
"total": 6.042525527999942,
"count": 1,
"self": 6.042525527999942
},
"TrainerController.advance": {
"total": 2006.1903550880722,
"count": 63472,
"self": 1.2827735951016166,
"children": {
"env_step": {
"total": 1375.2022578350088,
"count": 63472,
"self": 1272.4462254000016,
"children": {
"SubprocessEnvManager._take_step": {
"total": 101.98675042603236,
"count": 63472,
"self": 4.168451589010829,
"children": {
"TorchPolicy.evaluate": {
"total": 97.81829883702153,
"count": 62559,
"self": 32.6591787429536,
"children": {
"TorchPolicy.sample_actions": {
"total": 65.15912009406793,
"count": 62559,
"self": 65.15912009406793
}
}
}
}
},
"workers": {
"total": 0.7692820089748693,
"count": 63472,
"self": 0.0,
"children": {
"worker_root": {
"total": 2009.4306138529628,
"count": 63472,
"is_parallel": true,
"self": 833.8099237929182,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0016853880001690413,
"count": 1,
"is_parallel": true,
"self": 0.0006233879996671021,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0010620000005019392,
"count": 8,
"is_parallel": true,
"self": 0.0010620000005019392
}
}
},
"UnityEnvironment.step": {
"total": 0.04208939199997985,
"count": 1,
"is_parallel": true,
"self": 0.0004637339995952061,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004066140002123575,
"count": 1,
"is_parallel": true,
"self": 0.0004066140002123575
},
"communicator.exchange": {
"total": 0.03972162600007323,
"count": 1,
"is_parallel": true,
"self": 0.03972162600007323
},
"steps_from_proto": {
"total": 0.00149741800009906,
"count": 1,
"is_parallel": true,
"self": 0.00039529400009996607,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001102123999999094,
"count": 8,
"is_parallel": true,
"self": 0.001102123999999094
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1175.6206900600446,
"count": 63471,
"is_parallel": true,
"self": 27.139882453022437,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 21.2648697889947,
"count": 63471,
"is_parallel": true,
"self": 21.2648697889947
},
"communicator.exchange": {
"total": 1032.236780324039,
"count": 63471,
"is_parallel": true,
"self": 1032.236780324039
},
"steps_from_proto": {
"total": 94.9791574939884,
"count": 63471,
"is_parallel": true,
"self": 20.733960698200008,
"children": {
"_process_rank_one_or_two_observation": {
"total": 74.24519679578839,
"count": 507768,
"is_parallel": true,
"self": 74.24519679578839
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 629.7053236579618,
"count": 63472,
"self": 2.4036915959711678,
"children": {
"process_trajectory": {
"total": 136.87390040098876,
"count": 63472,
"self": 136.57839326198905,
"children": {
"RLTrainer._checkpoint": {
"total": 0.29550713899971015,
"count": 2,
"self": 0.29550713899971015
}
}
},
"_update_policy": {
"total": 490.42773166100187,
"count": 443,
"self": 183.7625554219885,
"children": {
"TorchPPOOptimizer.update": {
"total": 306.66517623901336,
"count": 22803,
"self": 306.66517623901336
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.810000847210176e-07,
"count": 1,
"self": 9.810000847210176e-07
},
"TrainerController._save_models": {
"total": 0.08233397499998318,
"count": 1,
"self": 0.0015697759999966365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08076419899998655,
"count": 1,
"self": 0.08076419899998655
}
}
}
}
}
}
}