ppo-Pyramids / run_logs /timers.json
PakanunNoa's picture
First Push
bbb6eb7
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.47019147872924805,
"min": 0.47019147872924805,
"max": 1.4967389106750488,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 13947.759765625,
"min": 13947.759765625,
"max": 45405.0703125,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989977.0,
"min": 29952.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989977.0,
"min": 29952.0,
"max": 989977.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.49538129568099976,
"min": -0.10629834979772568,
"max": 0.5803999304771423,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 134.74371337890625,
"min": -25.724201202392578,
"max": 161.93157958984375,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01387109886854887,
"min": -0.03978590667247772,
"max": 0.11708737909793854,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 3.7729389667510986,
"min": -9.42926025390625,
"max": 28.100971221923828,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06516321028040785,
"min": 0.06516321028040785,
"max": 0.0744888767572246,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9122849439257099,
"min": 0.5002769117085357,
"max": 1.053547180616685,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014955064537519188,
"min": 0.0005392633678588749,
"max": 0.01625847792907041,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.20937090352526863,
"min": 0.007010423782165373,
"max": 0.23562632080574983,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.258747580449997e-06,
"min": 7.258747580449997e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010162246612629996,
"min": 0.00010162246612629996,
"max": 0.0034932868355710997,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10241955000000004,
"min": 0.10241955000000004,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4338737000000006,
"min": 1.3691136000000002,
"max": 2.484064,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025171304500000007,
"min": 0.00025171304500000007,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035239826300000007,
"min": 0.0035239826300000007,
"max": 0.11645644710999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.010213752277195454,
"min": 0.010213752277195454,
"max": 0.24146492779254913,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.1429925262928009,
"min": 0.1429925262928009,
"max": 1.6902544498443604,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 378.2560975609756,
"min": 324.10227272727275,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 31017.0,
"min": 15984.0,
"max": 33128.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.4754864027102788,
"min": -1.0000000521540642,
"max": 1.6758977154439145,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 119.51439861953259,
"min": -32.000001668930054,
"max": 148.4319978877902,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.4754864027102788,
"min": -1.0000000521540642,
"max": 1.6758977154439145,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 119.51439861953259,
"min": -32.000001668930054,
"max": 148.4319978877902,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.03981199028652397,
"min": 0.03493330160894071,
"max": 4.9287842540070415,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 3.2247712132084416,
"min": 3.062824132866808,
"max": 78.86054806411266,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678708494",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1678710636"
},
"total": 2142.3020889259997,
"count": 1,
"self": 0.8344891609995102,
"children": {
"run_training.setup": {
"total": 0.10071282800004155,
"count": 1,
"self": 0.10071282800004155
},
"TrainerController.start_learning": {
"total": 2141.366886937,
"count": 1,
"self": 1.328769133944661,
"children": {
"TrainerController._reset_env": {
"total": 5.8561510479999015,
"count": 1,
"self": 5.8561510479999015
},
"TrainerController.advance": {
"total": 2134.021762569055,
"count": 63703,
"self": 1.3644642280373773,
"children": {
"env_step": {
"total": 1505.0004920089812,
"count": 63703,
"self": 1397.5307845460097,
"children": {
"SubprocessEnvManager._take_step": {
"total": 106.64765105298943,
"count": 63703,
"self": 4.593527341959316,
"children": {
"TorchPolicy.evaluate": {
"total": 102.05412371103012,
"count": 62561,
"self": 102.05412371103012
}
}
},
"workers": {
"total": 0.8220564099819967,
"count": 63703,
"self": 0.0,
"children": {
"worker_root": {
"total": 2136.6348991780037,
"count": 63703,
"is_parallel": true,
"self": 852.1371668120391,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0017750990000422462,
"count": 1,
"is_parallel": true,
"self": 0.0005693850000625389,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012057139999797073,
"count": 8,
"is_parallel": true,
"self": 0.0012057139999797073
}
}
},
"UnityEnvironment.step": {
"total": 0.04507985500003997,
"count": 1,
"is_parallel": true,
"self": 0.0005694600001788785,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00045566900007543154,
"count": 1,
"is_parallel": true,
"self": 0.00045566900007543154
},
"communicator.exchange": {
"total": 0.04252236299998913,
"count": 1,
"is_parallel": true,
"self": 0.04252236299998913
},
"steps_from_proto": {
"total": 0.0015323629997965327,
"count": 1,
"is_parallel": true,
"self": 0.0003359309998813842,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0011964319999151485,
"count": 8,
"is_parallel": true,
"self": 0.0011964319999151485
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1284.4977323659646,
"count": 63702,
"is_parallel": true,
"self": 30.77022109893005,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.210098435990403,
"count": 63702,
"is_parallel": true,
"self": 22.210098435990403
},
"communicator.exchange": {
"total": 1140.1662154540086,
"count": 63702,
"is_parallel": true,
"self": 1140.1662154540086
},
"steps_from_proto": {
"total": 91.35119737703553,
"count": 63702,
"is_parallel": true,
"self": 19.329220477013678,
"children": {
"_process_rank_one_or_two_observation": {
"total": 72.02197690002185,
"count": 509616,
"is_parallel": true,
"self": 72.02197690002185
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 627.6568063320367,
"count": 63703,
"self": 2.457332148986552,
"children": {
"process_trajectory": {
"total": 117.24018147905508,
"count": 63703,
"self": 116.93804312105522,
"children": {
"RLTrainer._checkpoint": {
"total": 0.30213835799986555,
"count": 2,
"self": 0.30213835799986555
}
}
},
"_update_policy": {
"total": 507.95929270399506,
"count": 448,
"self": 322.4392480210265,
"children": {
"TorchPPOOptimizer.update": {
"total": 185.52004468296855,
"count": 22791,
"self": 185.52004468296855
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.3670000953425188e-06,
"count": 1,
"self": 1.3670000953425188e-06
},
"TrainerController._save_models": {
"total": 0.16020281900000555,
"count": 1,
"self": 0.0020388020002428675,
"children": {
"RLTrainer._checkpoint": {
"total": 0.15816401699976268,
"count": 1,
"self": 0.15816401699976268
}
}
}
}
}
}
}