ppo-Huggy / run_logs /timers.json
AinTziLLo's picture
Huggy
ce03bdf
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4014904499053955,
"min": 1.4014904499053955,
"max": 1.4277653694152832,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70734.625,
"min": 68627.984375,
"max": 77093.34375,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 98.98203592814372,
"min": 86.84035087719299,
"max": 421.99159663865544,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49590.0,
"min": 48852.0,
"max": 50242.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999345.0,
"min": 49989.0,
"max": 1999345.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999345.0,
"min": 49989.0,
"max": 1999345.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.3980538845062256,
"min": -0.00964298564940691,
"max": 2.4573490619659424,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1201.425048828125,
"min": -1.137872338294983,
"max": 1365.904052734375,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.667439023653666,
"min": 1.8024333730592565,
"max": 3.8627371399038766,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 1837.3869508504868,
"min": 212.68713802099228,
"max": 2136.50817579031,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.667439023653666,
"min": 1.8024333730592565,
"max": 3.8627371399038766,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 1837.3869508504868,
"min": 212.68713802099228,
"max": 2136.50817579031,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.01702958438092739,
"min": 0.013229516060528112,
"max": 0.019577306915824818,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.051088753142782176,
"min": 0.026459032121056224,
"max": 0.05873192074747445,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.052959732214609784,
"min": 0.023544111972053847,
"max": 0.06145080729491181,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.15887919664382935,
"min": 0.04708822394410769,
"max": 0.18435242188473544,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.198848933750006e-06,
"min": 3.198848933750006e-06,
"max": 0.00029536830154390005,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 9.596546801250018e-06,
"min": 9.596546801250018e-06,
"max": 0.0008442454685848499,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10106625000000001,
"min": 0.10106625000000001,
"max": 0.19845610000000002,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.30319875,
"min": 0.20727210000000007,
"max": 0.58141515,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 6.320587500000011e-05,
"min": 6.320587500000011e-05,
"max": 0.004922959390000002,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00018961762500000032,
"min": 0.00018961762500000032,
"max": 0.014072615985000002,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1671402480",
"python_version": "3.8.16 (default, Dec 7 2022, 01:12:13) \n[GCC 7.5.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1671404712"
},
"total": 2232.062187505,
"count": 1,
"self": 0.39568528300014805,
"children": {
"run_training.setup": {
"total": 0.10420680400000037,
"count": 1,
"self": 0.10420680400000037
},
"TrainerController.start_learning": {
"total": 2231.562295418,
"count": 1,
"self": 3.8880631450515466,
"children": {
"TrainerController._reset_env": {
"total": 7.667768556999988,
"count": 1,
"self": 7.667768556999988
},
"TrainerController.advance": {
"total": 2219.8912739879484,
"count": 231454,
"self": 4.288404419078233,
"children": {
"env_step": {
"total": 1758.8313431089316,
"count": 231454,
"self": 1479.7118371128806,
"children": {
"SubprocessEnvManager._take_step": {
"total": 276.50134769399483,
"count": 231454,
"self": 14.56048641192433,
"children": {
"TorchPolicy.evaluate": {
"total": 261.9408612820705,
"count": 223019,
"self": 64.62168202613628,
"children": {
"TorchPolicy.sample_actions": {
"total": 197.31917925593422,
"count": 223019,
"self": 197.31917925593422
}
}
}
}
},
"workers": {
"total": 2.618158302056088,
"count": 231454,
"self": 0.0,
"children": {
"worker_root": {
"total": 2223.5987440729673,
"count": 231454,
"is_parallel": true,
"self": 1007.2494400479313,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.002212584999995215,
"count": 1,
"is_parallel": true,
"self": 0.00033678500000178246,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018757999999934327,
"count": 2,
"is_parallel": true,
"self": 0.0018757999999934327
}
}
},
"UnityEnvironment.step": {
"total": 0.027545636000013474,
"count": 1,
"is_parallel": true,
"self": 0.00028993400007948367,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00018909199997096948,
"count": 1,
"is_parallel": true,
"self": 0.00018909199997096948
},
"communicator.exchange": {
"total": 0.02635741199998165,
"count": 1,
"is_parallel": true,
"self": 0.02635741199998165
},
"steps_from_proto": {
"total": 0.0007091979999813702,
"count": 1,
"is_parallel": true,
"self": 0.0002427570000236301,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.00046644099995774013,
"count": 2,
"is_parallel": true,
"self": 0.00046644099995774013
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1216.349304025036,
"count": 231453,
"is_parallel": true,
"self": 34.66733568118798,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 80.02373798796339,
"count": 231453,
"is_parallel": true,
"self": 80.02373798796339
},
"communicator.exchange": {
"total": 1006.9203259629376,
"count": 231453,
"is_parallel": true,
"self": 1006.9203259629376
},
"steps_from_proto": {
"total": 94.73790439294703,
"count": 231453,
"is_parallel": true,
"self": 41.28127723794984,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.45662715499719,
"count": 462906,
"is_parallel": true,
"self": 53.45662715499719
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 456.7715264599387,
"count": 231454,
"self": 6.0082554900058085,
"children": {
"process_trajectory": {
"total": 145.4992006439332,
"count": 231454,
"self": 144.32953474493326,
"children": {
"RLTrainer._checkpoint": {
"total": 1.1696658989999378,
"count": 10,
"self": 1.1696658989999378
}
}
},
"_update_policy": {
"total": 305.2640703259997,
"count": 97,
"self": 253.38502445000745,
"children": {
"TorchPPOOptimizer.update": {
"total": 51.87904587599223,
"count": 2910,
"self": 51.87904587599223
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.289998731925152e-07,
"count": 1,
"self": 8.289998731925152e-07
},
"TrainerController._save_models": {
"total": 0.11518889900025897,
"count": 1,
"self": 0.0019509270005073631,
"children": {
"RLTrainer._checkpoint": {
"total": 0.11323797199975161,
"count": 1,
"self": 0.11323797199975161
}
}
}
}
}
}
}