ppo-Huggy / run_logs /timers.json
jarkrandel's picture
Huggy
97d5c29
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.407385230064392,
"min": 1.407385230064392,
"max": 1.4286208152770996,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71016.65625,
"min": 68816.328125,
"max": 76900.2421875,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 86.28646748681898,
"min": 80.79638752052546,
"max": 420.7583333333333,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49097.0,
"min": 48805.0,
"max": 50491.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999978.0,
"min": 49861.0,
"max": 1999978.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999978.0,
"min": 49861.0,
"max": 1999978.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3829455375671387,
"min": 0.11452861875295639,
"max": 2.4751226902008057,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1355.89599609375,
"min": 13.628905296325684,
"max": 1420.844970703125,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.692687800982296,
"min": 1.8203454638729577,
"max": 3.983334034737445,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2101.1393587589264,
"min": 216.62111020088196,
"max": 2254.5428445339203,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.692687800982296,
"min": 1.8203454638729577,
"max": 3.983334034737445,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2101.1393587589264,
"min": 216.62111020088196,
"max": 2254.5428445339203,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016835935235980693,
"min": 0.013674838062070194,
"max": 0.020494382902365438,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.050507805707942076,
"min": 0.02734967612414039,
"max": 0.06148314870709631,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05420520905819204,
"min": 0.021051808384557565,
"max": 0.06217266486750709,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.16261562717457612,
"min": 0.04210361676911513,
"max": 0.18651799460252128,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.1521989493000017e-06,
"min": 3.1521989493000017e-06,
"max": 0.000295353376548875,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.456596847900005e-06,
"min": 9.456596847900005e-06,
"max": 0.0008441014686328499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10105070000000001,
"min": 0.10105070000000001,
"max": 0.198451125,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30315210000000004,
"min": 0.20723029999999998,
"max": 0.58136715,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.242993000000006e-05,
"min": 6.242993000000006e-05,
"max": 0.004922711137500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018728979000000017,
"min": 0.00018728979000000017,
"max": 0.014070220785,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671591443",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671593718"
},
"total": 2274.2501381449997,
"count": 1,
"self": 0.40284632299972145,
"children": {
"run_training.setup": {
"total": 0.10425803600003292,
"count": 1,
"self": 0.10425803600003292
},
"TrainerController.start_learning": {
"total": 2273.743033786,
"count": 1,
"self": 4.020578486115028,
"children": {
"TrainerController._reset_env": {
"total": 7.212611756000001,
"count": 1,
"self": 7.212611756000001
},
"TrainerController.advance": {
"total": 2262.3924549968856,
"count": 232190,
"self": 4.4264747478014215,
"children": {
"env_step": {
"total": 1789.8019461929634,
"count": 232190,
"self": 1504.9399870609475,
"children": {
"SubprocessEnvManager._take_step": {
"total": 282.2085959090193,
"count": 232190,
"self": 14.905137277972699,
"children": {
"TorchPolicy.evaluate": {
"total": 267.3034586310466,
"count": 222916,
"self": 66.88175811211897,
"children": {
"TorchPolicy.sample_actions": {
"total": 200.42170051892765,
"count": 222916,
"self": 200.42170051892765
}
}
}
}
},
"workers": {
"total": 2.6533632229966315,
"count": 232190,
"self": 0.0,
"children": {
"worker_root": {
"total": 2265.7549310248987,
"count": 232190,
"is_parallel": true,
"self": 1030.5650891028354,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018612349999784783,
"count": 1,
"is_parallel": true,
"self": 0.0003726740000047357,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014885609999737426,
"count": 2,
"is_parallel": true,
"self": 0.0014885609999737426
}
}
},
"UnityEnvironment.step": {
"total": 0.029002387999980783,
"count": 1,
"is_parallel": true,
"self": 0.00034525899991422193,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018692200001169113,
"count": 1,
"is_parallel": true,
"self": 0.00018692200001169113
},
"communicator.exchange": {
"total": 0.02768522300004861,
"count": 1,
"is_parallel": true,
"self": 0.02768522300004861
},
"steps_from_proto": {
"total": 0.0007849840000062613,
"count": 1,
"is_parallel": true,
"self": 0.0002712179999662112,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005137660000400501,
"count": 2,
"is_parallel": true,
"self": 0.0005137660000400501
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1235.1898419220634,
"count": 232189,
"is_parallel": true,
"self": 34.57662860623668,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 81.41248065389766,
"count": 232189,
"is_parallel": true,
"self": 81.41248065389766
},
"communicator.exchange": {
"total": 1022.9128659509945,
"count": 232189,
"is_parallel": true,
"self": 1022.9128659509945
},
"steps_from_proto": {
"total": 96.28786671093428,
"count": 232189,
"is_parallel": true,
"self": 41.94052951774597,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.34733719318831,
"count": 464378,
"is_parallel": true,
"self": 54.34733719318831
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 468.16403405612095,
"count": 232190,
"self": 6.082626476103314,
"children": {
"process_trajectory": {
"total": 150.0246399030184,
"count": 232190,
"self": 148.73563660401862,
"children": {
"RLTrainer._checkpoint": {
"total": 1.2890032989997735,
"count": 10,
"self": 1.2890032989997735
}
}
},
"_update_policy": {
"total": 312.05676767699924,
"count": 97,
"self": 258.99595881799166,
"children": {
"TorchPPOOptimizer.update": {
"total": 53.06080885900758,
"count": 2910,
"self": 53.06080885900758
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0989997463184409e-06,
"count": 1,
"self": 1.0989997463184409e-06
},
"TrainerController._save_models": {
"total": 0.11738744799959022,
"count": 1,
"self": 0.0023860229994170368,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11500142500017319,
"count": 1,
"self": 0.11500142500017319
}
}
}
}
}
}
}