AntiSquid's picture
m
737df45
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.13540564477443695,
"min": 0.13540564477443695,
"max": 1.4734561443328857,
"count": 100
},
"Pyramids.Policy.Entropy.sum": {
"value": 4103.33251953125,
"min": 4079.176513671875,
"max": 44698.765625,
"count": 100
},
"Pyramids.Step.mean": {
"value": 2999904.0,
"min": 29952.0,
"max": 2999904.0,
"count": 100
},
"Pyramids.Step.sum": {
"value": 2999904.0,
"min": 29952.0,
"max": 2999904.0,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.8861145377159119,
"min": -0.1472976803779602,
"max": 0.9121769666671753,
"count": 100
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 268.4927062988281,
"min": -34.909549713134766,
"max": 279.12615966796875,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.06771577149629593,
"min": 0.013790382072329521,
"max": 0.25470271706581116,
"count": 100
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 20.517879486083984,
"min": 4.178485870361328,
"max": 61.638057708740234,
"count": 100
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07044521735130654,
"min": 0.06400929516114826,
"max": 0.07556259549095386,
"count": 100
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9862330429182916,
"min": 0.4823238184259423,
"max": 1.0714634416896538,
"count": 100
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.013740267741304091,
"min": 0.0004607026081294152,
"max": 0.016847438853888196,
"count": 100
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.1923637483782573,
"min": 0.005989133905682398,
"max": 0.2471406059436655,
"count": 100
},
"Pyramids.Policy.LearningRate.mean": {
"value": 1.4850423621619094e-06,
"min": 1.4850423621619094e-06,
"max": 0.00029838354339596195,
"count": 100
},
"Pyramids.Policy.LearningRate.sum": {
"value": 2.079059307026673e-05,
"min": 2.079059307026673e-05,
"max": 0.0039694875768374995,
"count": 100
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10049498095238096,
"min": 0.10049498095238096,
"max": 0.19946118095238097,
"count": 100
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4069297333333335,
"min": 1.3962282666666668,
"max": 2.797502366666667,
"count": 100
},
"Pyramids.Policy.Beta.mean": {
"value": 5.944859714285731e-05,
"min": 5.944859714285731e-05,
"max": 0.009946171977142856,
"count": 100
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0008322803600000023,
"min": 0.0008322803600000023,
"max": 0.13232393374999998,
"count": 100
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.041759099811315536,
"min": 0.03174985945224762,
"max": 0.4781936705112457,
"count": 100
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.5846273899078369,
"min": 0.4444980323314667,
"max": 3.347355604171753,
"count": 100
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 217.45985401459853,
"min": 190.36423841059602,
"max": 999.0,
"count": 100
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29792.0,
"min": 15984.0,
"max": 33443.0,
"count": 100
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7669709961375464,
"min": -1.0000000521540642,
"max": 1.8096357429067031,
"count": 100
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 243.8419974669814,
"min": -31.997201666235924,
"max": 273.25499717891216,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7669709961375464,
"min": -1.0000000521540642,
"max": 1.8096357429067031,
"count": 100
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 243.8419974669814,
"min": -31.997201666235924,
"max": 273.25499717891216,
"count": 100
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09221154288626048,
"min": 0.06875119469109146,
"max": 9.790997877717018,
"count": 100
},
"Pyramids.Policy.RndReward.sum": {
"value": 12.725192918303946,
"min": 9.605980330263264,
"max": 156.6559660434723,
"count": 100
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1676823589",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1676830965"
},
"total": 7376.287325714,
"count": 1,
"self": 0.48540138699900126,
"children": {
"run_training.setup": {
"total": 0.10568284400005723,
"count": 1,
"self": 0.10568284400005723
},
"TrainerController.start_learning": {
"total": 7375.696241483001,
"count": 1,
"self": 4.072606585128597,
"children": {
"TrainerController._reset_env": {
"total": 7.205507098999988,
"count": 1,
"self": 7.205507098999988
},
"TrainerController.advance": {
"total": 7364.325274402875,
"count": 195877,
"self": 4.1785884261980755,
"children": {
"env_step": {
"total": 5152.578531978866,
"count": 195877,
"self": 4820.022500557094,
"children": {
"SubprocessEnvManager._take_step": {
"total": 330.0811124039583,
"count": 195877,
"self": 13.50141495498383,
"children": {
"TorchPolicy.evaluate": {
"total": 316.57969744897446,
"count": 187567,
"self": 105.91716787001542,
"children": {
"TorchPolicy.sample_actions": {
"total": 210.66252957895904,
"count": 187567,
"self": 210.66252957895904
}
}
}
}
},
"workers": {
"total": 2.474919017814045,
"count": 195877,
"self": 0.0,
"children": {
"worker_root": {
"total": 7360.406783949097,
"count": 195877,
"is_parallel": true,
"self": 2884.0379565203966,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018848560002879822,
"count": 1,
"is_parallel": true,
"self": 0.000689021999733086,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011958340005548962,
"count": 8,
"is_parallel": true,
"self": 0.0011958340005548962
}
}
},
"UnityEnvironment.step": {
"total": 0.04550718300015433,
"count": 1,
"is_parallel": true,
"self": 0.000539817000117182,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004312930000196502,
"count": 1,
"is_parallel": true,
"self": 0.0004312930000196502
},
"communicator.exchange": {
"total": 0.042946002000007866,
"count": 1,
"is_parallel": true,
"self": 0.042946002000007866
},
"steps_from_proto": {
"total": 0.0015900710000096296,
"count": 1,
"is_parallel": true,
"self": 0.0004072860010637669,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011827849989458628,
"count": 8,
"is_parallel": true,
"self": 0.0011827849989458628
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4476.368827428701,
"count": 195876,
"is_parallel": true,
"self": 93.17836234240804,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 67.47899630618804,
"count": 195876,
"is_parallel": true,
"self": 67.47899630618804
},
"communicator.exchange": {
"total": 4037.357925805171,
"count": 195876,
"is_parallel": true,
"self": 4037.357925805171
},
"steps_from_proto": {
"total": 278.35354297493404,
"count": 195876,
"is_parallel": true,
"self": 64.9154730451096,
"children": {
"_process_rank_one_or_two_observation": {
"total": 213.43806992982445,
"count": 1567008,
"is_parallel": true,
"self": 213.43806992982445
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2207.5681539978114,
"count": 195877,
"self": 8.149826396926073,
"children": {
"process_trajectory": {
"total": 487.6857130179087,
"count": 195877,
"self": 487.1499923049082,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5357207130005008,
"count": 6,
"self": 0.5357207130005008
}
}
},
"_update_policy": {
"total": 1711.7326145829766,
"count": 1403,
"self": 670.911606885827,
"children": {
"TorchPPOOptimizer.update": {
"total": 1040.8210076971495,
"count": 68403,
"self": 1040.8210076971495
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.649985829833895e-07,
"count": 1,
"self": 8.649985829833895e-07
},
"TrainerController._save_models": {
"total": 0.0928525309991528,
"count": 1,
"self": 0.0016688710002199514,
"children": {
"RLTrainer._checkpoint": {
"total": 0.09118365999893285,
"count": 1,
"self": 0.09118365999893285
}
}
}
}
}
}
}