ppo-Huggy / run_logs /timers.json
JulianZas's picture
Huggy
713dee7
raw
history blame
17.9 kB
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4088997840881348,
"min": 1.4088997840881348,
"max": 1.4293849468231201,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 71173.390625,
"min": 68708.484375,
"max": 77460.8125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 89.82913669064749,
"min": 82.51752921535893,
"max": 417.3057851239669,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49945.0,
"min": 48757.0,
"max": 50494.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999766.0,
"min": 49909.0,
"max": 1999766.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999766.0,
"min": 49909.0,
"max": 1999766.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3853249549865723,
"min": 0.06508781015872955,
"max": 2.497054100036621,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1326.24072265625,
"min": 7.810537338256836,
"max": 1455.6171875,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.576727960499928,
"min": 1.9918422761062782,
"max": 3.980106412526384,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1988.66074603796,
"min": 239.02107313275337,
"max": 2266.0469810962677,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.576727960499928,
"min": 1.9918422761062782,
"max": 3.980106412526384,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1988.66074603796,
"min": 239.02107313275337,
"max": 2266.0469810962677,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.018183682954663206,
"min": 0.012962108511257814,
"max": 0.020143634332149912,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.05455104886398962,
"min": 0.02684174941581053,
"max": 0.05669651345233433,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05252561618884405,
"min": 0.021694910743584237,
"max": 0.05782596686234077,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15757684856653215,
"min": 0.043389821487168474,
"max": 0.16872400778035324,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.5612488129499906e-06,
"min": 3.5612488129499906e-06,
"max": 0.0002953502265499249,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.0683746438849972e-05,
"min": 1.0683746438849972e-05,
"max": 0.0008444419685193499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10118704999999999,
"min": 0.10118704999999999,
"max": 0.19845007500000006,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30356114999999995,
"min": 0.207528,
"max": 0.5814806499999999,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.923379499999987e-05,
"min": 6.923379499999987e-05,
"max": 0.004922658742500001,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.0002077013849999996,
"min": 0.0002077013849999996,
"max": 0.014075884435000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677505234",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677507738"
},
"total": 2504.2590566030003,
"count": 1,
"self": 0.5461561239999355,
"children": {
"run_training.setup": {
"total": 0.10802378700009285,
"count": 1,
"self": 0.10802378700009285
},
"TrainerController.start_learning": {
"total": 2503.6048766920003,
"count": 1,
"self": 4.299206847007554,
"children": {
"TrainerController._reset_env": {
"total": 9.881195365000053,
"count": 1,
"self": 9.881195365000053
},
"TrainerController.advance": {
"total": 2489.309720845992,
"count": 232281,
"self": 4.775477011854036,
"children": {
"env_step": {
"total": 1947.0487787110733,
"count": 232281,
"self": 1630.716323953216,
"children": {
"SubprocessEnvManager._take_step": {
"total": 313.4341197818668,
"count": 232281,
"self": 16.78508923581819,
"children": {
"TorchPolicy.evaluate": {
"total": 296.6490305460486,
"count": 223004,
"self": 74.23264798502544,
"children": {
"TorchPolicy.sample_actions": {
"total": 222.41638256102317,
"count": 223004,
"self": 222.41638256102317
}
}
}
}
},
"workers": {
"total": 2.8983349759904513,
"count": 232281,
"self": 0.0,
"children": {
"worker_root": {
"total": 2494.8128548090203,
"count": 232281,
"is_parallel": true,
"self": 1170.8455450849983,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.000929632999941532,
"count": 1,
"is_parallel": true,
"self": 0.0003463419999434336,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0005832909999980984,
"count": 2,
"is_parallel": true,
"self": 0.0005832909999980984
}
}
},
"UnityEnvironment.step": {
"total": 0.04412417900005039,
"count": 1,
"is_parallel": true,
"self": 0.0003936900001235699,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0004614409999703639,
"count": 1,
"is_parallel": true,
"self": 0.0004614409999703639
},
"communicator.exchange": {
"total": 0.04173676200002774,
"count": 1,
"is_parallel": true,
"self": 0.04173676200002774
},
"steps_from_proto": {
"total": 0.0015322859999287175,
"count": 1,
"is_parallel": true,
"self": 0.00031940799999574665,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012128779999329709,
"count": 2,
"is_parallel": true,
"self": 0.0012128779999329709
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1323.967309724022,
"count": 232280,
"is_parallel": true,
"self": 39.0108326120378,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 84.45363922600188,
"count": 232280,
"is_parallel": true,
"self": 84.45363922600188
},
"communicator.exchange": {
"total": 1104.714281621005,
"count": 232280,
"is_parallel": true,
"self": 1104.714281621005
},
"steps_from_proto": {
"total": 95.78855626497739,
"count": 232280,
"is_parallel": true,
"self": 41.04114127498269,
"children": {
"_process_rank_one_or_two_observation": {
"total": 54.747414989994695,
"count": 464560,
"is_parallel": true,
"self": 54.747414989994695
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 537.4854651230647,
"count": 232281,
"self": 6.882245262039191,
"children": {
"process_trajectory": {
"total": 174.06454335102342,
"count": 232281,
"self": 172.69775888902268,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3667844620007372,
"count": 10,
"self": 1.3667844620007372
}
}
},
"_update_policy": {
"total": 356.5386765100021,
"count": 97,
"self": 298.3400421330052,
"children": {
"TorchPPOOptimizer.update": {
"total": 58.19863437699689,
"count": 2910,
"self": 58.19863437699689
}
}
}
}
}
}
},
"trainer_threads": {
"total": 9.330001375928987e-07,
"count": 1,
"self": 9.330001375928987e-07
},
"TrainerController._save_models": {
"total": 0.11475270100027046,
"count": 1,
"self": 0.0020421880003596016,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11271051299991086,
"count": 1,
"self": 0.11271051299991086
}
}
}
}
}
}
}