ppo-Pyramids / run_logs /timers.json
rdesarz's picture
First try
40b9561
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.3324046730995178,
"min": 0.3324046730995178,
"max": 1.51145339012146,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 9918.955078125,
"min": 9918.955078125,
"max": 45851.44921875,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989947.0,
"min": 29952.0,
"max": 989947.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989947.0,
"min": 29952.0,
"max": 989947.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.6356512904167175,
"min": -0.12760119140148163,
"max": 0.6810547709465027,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 179.88931274414062,
"min": -30.24148178100586,
"max": 196.14376831054688,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.01655861921608448,
"min": 0.008748773485422134,
"max": 0.29381561279296875,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 4.686089038848877,
"min": 2.484651565551758,
"max": 71.10337829589844,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06834278703317978,
"min": 0.06520326914959841,
"max": 0.07376530641528155,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 1.0251418054976966,
"min": 0.5041903745809212,
"max": 1.0809117345197592,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.014557177390735079,
"min": 0.0011839439733153808,
"max": 0.01680611529906348,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.21835766086102618,
"min": 0.013023383706469188,
"max": 0.2352856141868887,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.462617512493338e-06,
"min": 7.462617512493338e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00011193926268740007,
"min": 0.00011193926268740007,
"max": 0.0033830093723302996,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10248750666666667,
"min": 0.10248750666666667,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.5373126000000001,
"min": 1.3886848,
"max": 2.5276697,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025850191600000015,
"min": 0.00025850191600000015,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0038775287400000026,
"min": 0.0038775287400000026,
"max": 0.11279420303000001,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.013626319356262684,
"min": 0.013626319356262684,
"max": 0.39074864983558655,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.20439478754997253,
"min": 0.2029009461402893,
"max": 2.7352404594421387,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 309.29896907216494,
"min": 273.4036697247706,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30002.0,
"min": 15984.0,
"max": 33059.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 1.6288247281282218,
"min": -1.0000000521540642,
"max": 1.7265963141524463,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 157.99599862843752,
"min": -30.326601669192314,
"max": 188.19899824261665,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 1.6288247281282218,
"min": -1.0000000521540642,
"max": 1.7265963141524463,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 157.99599862843752,
"min": -30.326601669192314,
"max": 188.19899824261665,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.04358661355487954,
"min": 0.04135681849447375,
"max": 7.892207018099725,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 4.227901514823316,
"min": 4.227901514823316,
"max": 126.2753122895956,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677315674",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677317365"
},
"total": 1690.7915436910002,
"count": 1,
"self": 0.32780665300015244,
"children": {
"run_training.setup": {
"total": 0.10649745099999564,
"count": 1,
"self": 0.10649745099999564
},
"TrainerController.start_learning": {
"total": 1690.357239587,
"count": 1,
"self": 1.0787967830854086,
"children": {
"TrainerController._reset_env": {
"total": 6.212022681999997,
"count": 1,
"self": 6.212022681999997
},
"TrainerController.advance": {
"total": 1682.9902817549146,
"count": 64136,
"self": 1.1560017909623639,
"children": {
"env_step": {
"total": 1067.4114520379549,
"count": 64136,
"self": 973.2023379759084,
"children": {
"SubprocessEnvManager._take_step": {
"total": 93.53008218293621,
"count": 64136,
"self": 4.033235828915167,
"children": {
"TorchPolicy.evaluate": {
"total": 89.49684635402105,
"count": 62555,
"self": 30.844650641965472,
"children": {
"TorchPolicy.sample_actions": {
"total": 58.65219571205557,
"count": 62555,
"self": 58.65219571205557
}
}
}
}
},
"workers": {
"total": 0.6790318791101981,
"count": 64136,
"self": 0.0,
"children": {
"worker_root": {
"total": 1687.9845482259846,
"count": 64136,
"is_parallel": true,
"self": 800.3079692010381,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.005298490000086531,
"count": 1,
"is_parallel": true,
"self": 0.0040711020003527665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012273879997337644,
"count": 8,
"is_parallel": true,
"self": 0.0012273879997337644
}
}
},
"UnityEnvironment.step": {
"total": 0.034978249000005235,
"count": 1,
"is_parallel": true,
"self": 0.0003267969998432818,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004731770000034885,
"count": 1,
"is_parallel": true,
"self": 0.0004731770000034885
},
"communicator.exchange": {
"total": 0.03320013400002608,
"count": 1,
"is_parallel": true,
"self": 0.03320013400002608
},
"steps_from_proto": {
"total": 0.000978141000132382,
"count": 1,
"is_parallel": true,
"self": 0.00029064899968034297,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0006874920004520391,
"count": 8,
"is_parallel": true,
"self": 0.0006874920004520391
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 887.6765790249465,
"count": 64135,
"is_parallel": true,
"self": 21.3382354580001,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 15.761239877952903,
"count": 64135,
"is_parallel": true,
"self": 15.761239877952903
},
"communicator.exchange": {
"total": 790.2521644959859,
"count": 64135,
"is_parallel": true,
"self": 790.2521644959859
},
"steps_from_proto": {
"total": 60.324939193007594,
"count": 64135,
"is_parallel": true,
"self": 14.645400092902264,
"children": {
"_process_rank_one_or_two_observation": {
"total": 45.67953910010533,
"count": 513080,
"is_parallel": true,
"self": 45.67953910010533
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 614.4228279259974,
"count": 64136,
"self": 2.097375646993214,
"children": {
"process_trajectory": {
"total": 136.3131885740038,
"count": 64136,
"self": 136.14465382500407,
"children": {
"RLTrainer._checkpoint": {
"total": 0.16853474899971843,
"count": 2,
"self": 0.16853474899971843
}
}
},
"_update_policy": {
"total": 476.01226370500035,
"count": 452,
"self": 174.74040301798823,
"children": {
"TorchPPOOptimizer.update": {
"total": 301.2718606870121,
"count": 22782,
"self": 301.2718606870121
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.450001587334555e-07,
"count": 1,
"self": 8.450001587334555e-07
},
"TrainerController._save_models": {
"total": 0.0761375219999536,
"count": 1,
"self": 0.0012819069997931365,
"children": {
"RLTrainer._checkpoint": {
"total": 0.07485561500016047,
"count": 1,
"self": 0.07485561500016047
}
}
}
}
}
}
}