Nnarruqt's picture
first Commit
3b6e694
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.20153234899044037,
"min": 0.19174011051654816,
"max": 1.5280029773712158,
"count": 73
},
"Pyramids.Policy.Entropy.sum": {
"value": 6107.236328125,
"min": 5813.56005859375,
"max": 46353.5,
"count": 73
},
"Pyramids.Step.mean": {
"value": 2189886.0,
"min": 29909.0,
"max": 2189886.0,
"count": 73
},
"Pyramids.Step.sum": {
"value": 2189886.0,
"min": 29909.0,
"max": 2189886.0,
"count": 73
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.7948275804519653,
"min": -0.09267928451299667,
"max": 0.8700828552246094,
"count": 73
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 229.70516967773438,
"min": -22.33570671081543,
"max": 269.7256774902344,
"count": 73
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.007939147762954235,
"min": -0.02261369861662388,
"max": 0.15529946982860565,
"count": 73
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 2.2944138050079346,
"min": -6.354449272155762,
"max": 37.271873474121094,
"count": 73
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.07060907667787163,
"min": 0.06561050651084986,
"max": 0.07470018518090779,
"count": 73
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.4942635367451014,
"min": 0.3652114047526589,
"max": 0.5899645246778771,
"count": 73
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.011441139300219184,
"min": 0.0009819429486738072,
"max": 0.01617671375528776,
"count": 73
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.08008797510153429,
"min": 0.005891657692042844,
"max": 0.11758436653083967,
"count": 73
},
"Pyramids.Policy.LearningRate.mean": {
"value": 0.00014132182476947502,
"min": 0.00014132182476947502,
"max": 0.00030861980294522495,
"count": 73
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.0009892527733863252,
"min": 0.0009892527733863252,
"max": 0.002302997419597625,
"count": 73
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.14558766785714286,
"min": 0.14558766785714286,
"max": 0.19955477500000002,
"count": 73
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.019113675,
"min": 0.997773875,
"max": 1.5429023750000002,
"count": 73
},
"Pyramids.Policy.Beta.mean": {
"value": 0.0045642080189285715,
"min": 0.0045642080189285715,
"max": 0.0099555220225,
"count": 73
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0319494561325,
"min": 0.0319494561325,
"max": 0.0742959472625,
"count": 73
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.007443276233971119,
"min": 0.0069568161852657795,
"max": 0.3445335924625397,
"count": 73
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.05210293456912041,
"min": 0.048697713762521744,
"max": 1.722667932510376,
"count": 73
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 273.11009174311926,
"min": 216.12686567164178,
"max": 999.0,
"count": 73
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 29769.0,
"min": 16836.0,
"max": 32393.0,
"count": 73
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.7085320983047878,
"min": -0.9999750521965325,
"max": 1.78346322181032,
"count": 73
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 186.22999871522188,
"min": -31.99920167028904,
"max": 245.7855984866619,
"count": 73
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.7085320983047878,
"min": -0.9999750521965325,
"max": 1.78346322181032,
"count": 73
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 186.22999871522188,
"min": -31.99920167028904,
"max": 245.7855984866619,
"count": 73
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.021169403688063623,
"min": 0.016048717665102297,
"max": 8.485743029152646,
"count": 73
},
"Pyramids.Policy.RndReward.sum": {
"value": 2.307465001998935,
"min": 2.080391679715831,
"max": 144.25763149559498,
"count": 73
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 73
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 73
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677601982",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training 1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677608164"
},
"total": 6181.976544378999,
"count": 1,
"self": 0.8422480380004345,
"children": {
"run_training.setup": {
"total": 0.10476226299988411,
"count": 1,
"self": 0.10476226299988411
},
"TrainerController.start_learning": {
"total": 6181.029534077999,
"count": 1,
"self": 3.4949724138241436,
"children": {
"TrainerController._reset_env": {
"total": 6.062431775999812,
"count": 1,
"self": 6.062431775999812
},
"TrainerController.advance": {
"total": 6171.464550249175,
"count": 142560,
"self": 3.8609167509021063,
"children": {
"env_step": {
"total": 4254.0593055442005,
"count": 142560,
"self": 3964.166890189564,
"children": {
"SubprocessEnvManager._take_step": {
"total": 287.76362109690353,
"count": 142560,
"self": 12.084583657525855,
"children": {
"TorchPolicy.evaluate": {
"total": 275.6790374393777,
"count": 137141,
"self": 93.73339672437396,
"children": {
"TorchPolicy.sample_actions": {
"total": 181.94564071500372,
"count": 137141,
"self": 181.94564071500372
}
}
}
}
},
"workers": {
"total": 2.1287942577328067,
"count": 142559,
"self": 0.0,
"children": {
"worker_root": {
"total": 6167.988362214017,
"count": 142559,
"is_parallel": true,
"self": 2508.2833278801304,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0019709560001501814,
"count": 1,
"is_parallel": true,
"self": 0.0007427409982483368,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012282150019018445,
"count": 8,
"is_parallel": true,
"self": 0.0012282150019018445
}
}
},
"UnityEnvironment.step": {
"total": 0.05069202100003167,
"count": 1,
"is_parallel": true,
"self": 0.0005407120002018928,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005146240000613034,
"count": 1,
"is_parallel": true,
"self": 0.0005146240000613034
},
"communicator.exchange": {
"total": 0.04789565600003698,
"count": 1,
"is_parallel": true,
"self": 0.04789565600003698
},
"steps_from_proto": {
"total": 0.0017410289997314976,
"count": 1,
"is_parallel": true,
"self": 0.00045208499977889005,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012889439999526076,
"count": 8,
"is_parallel": true,
"self": 0.0012889439999526076
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 3659.7050343338865,
"count": 142558,
"is_parallel": true,
"self": 70.50940164768053,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 53.252057974014406,
"count": 142558,
"is_parallel": true,
"self": 53.252057974014406
},
"communicator.exchange": {
"total": 3311.9736428790393,
"count": 142558,
"is_parallel": true,
"self": 3311.9736428790393
},
"steps_from_proto": {
"total": 223.96993183315226,
"count": 142558,
"is_parallel": true,
"self": 55.492057234002004,
"children": {
"_process_rank_one_or_two_observation": {
"total": 168.47787459915025,
"count": 1140464,
"is_parallel": true,
"self": 168.47787459915025
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1913.5443279540723,
"count": 142559,
"self": 6.642013524338381,
"children": {
"process_trajectory": {
"total": 441.19942892274685,
"count": 142559,
"self": 440.72070650874775,
"children": {
"RLTrainer._checkpoint": {
"total": 0.478722413999094,
"count": 4,
"self": 0.478722413999094
}
}
},
"_update_policy": {
"total": 1465.702885506987,
"count": 521,
"self": 572.6332086719758,
"children": {
"TorchPPOOptimizer.update": {
"total": 893.0696768350113,
"count": 50697,
"self": 893.0696768350113
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.6230005712714046e-06,
"count": 1,
"self": 1.6230005712714046e-06
},
"TrainerController._save_models": {
"total": 0.007578016000479693,
"count": 1,
"self": 4.705900028056931e-05,
"children": {
"RLTrainer._checkpoint": {
"total": 0.007530957000199123,
"count": 1,
"self": 0.007530957000199123
}
}
}
}
}
}
}