ppo-Huggy / run_logs /timers.json
Nasree's picture
Huggy
8930477
{
"name": "root",
"gauges": {
"Huggy.Policy.Entropy.mean": {
"value": 1.4081265926361084,
"min": 1.4081265926361084,
"max": 1.4280298948287964,
"count": 40
},
"Huggy.Policy.Entropy.sum": {
"value": 70611.9140625,
"min": 69024.0,
"max": 76856.78125,
"count": 40
},
"Huggy.Environment.EpisodeLength.mean": {
"value": 85.69618055555556,
"min": 79.24077046548956,
"max": 399.16,
"count": 40
},
"Huggy.Environment.EpisodeLength.sum": {
"value": 49361.0,
"min": 48782.0,
"max": 50099.0,
"count": 40
},
"Huggy.Step.mean": {
"value": 1999964.0,
"min": 49792.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Step.sum": {
"value": 1999964.0,
"min": 49792.0,
"max": 1999964.0,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.mean": {
"value": 2.4141650199890137,
"min": 0.15193036198616028,
"max": 2.465233087539673,
"count": 40
},
"Huggy.Policy.ExtrinsicValueEstimate.sum": {
"value": 1390.55908203125,
"min": 18.839365005493164,
"max": 1480.2779541015625,
"count": 40
},
"Huggy.Environment.CumulativeReward.mean": {
"value": 3.67760917565061,
"min": 1.8638189033635202,
"max": 3.9284404501882206,
"count": 40
},
"Huggy.Environment.CumulativeReward.sum": {
"value": 2118.3028851747513,
"min": 231.1135440170765,
"max": 2315.2461104393005,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.mean": {
"value": 3.67760917565061,
"min": 1.8638189033635202,
"max": 3.9284404501882206,
"count": 40
},
"Huggy.Policy.ExtrinsicReward.sum": {
"value": 2118.3028851747513,
"min": 231.1135440170765,
"max": 2315.2461104393005,
"count": 40
},
"Huggy.Losses.PolicyLoss.mean": {
"value": 0.016371189924797767,
"min": 0.013947833617567084,
"max": 0.02079147932987932,
"count": 40
},
"Huggy.Losses.PolicyLoss.sum": {
"value": 0.0491135697743933,
"min": 0.027895667235134168,
"max": 0.05983873316242049,
"count": 40
},
"Huggy.Losses.ValueLoss.mean": {
"value": 0.05423077042731974,
"min": 0.01980876460050543,
"max": 0.060259520821273324,
"count": 40
},
"Huggy.Losses.ValueLoss.sum": {
"value": 0.1626923112819592,
"min": 0.03961752920101086,
"max": 0.17318796453376611,
"count": 40
},
"Huggy.Policy.LearningRate.mean": {
"value": 3.6945987685000005e-06,
"min": 3.6945987685000005e-06,
"max": 0.0002953146015617999,
"count": 40
},
"Huggy.Policy.LearningRate.sum": {
"value": 1.1083796305500001e-05,
"min": 1.1083796305500001e-05,
"max": 0.0008440497186501,
"count": 40
},
"Huggy.Policy.Epsilon.mean": {
"value": 0.10123149999999999,
"min": 0.10123149999999999,
"max": 0.1984382,
"count": 40
},
"Huggy.Policy.Epsilon.sum": {
"value": 0.3036945,
"min": 0.20762535000000007,
"max": 0.5813499000000001,
"count": 40
},
"Huggy.Policy.Beta.mean": {
"value": 7.145185000000001e-05,
"min": 7.145185000000001e-05,
"max": 0.00492206618,
"count": 40
},
"Huggy.Policy.Beta.sum": {
"value": 0.00021435555000000003,
"min": 0.00021435555000000003,
"max": 0.014069360010000004,
"count": 40
},
"Huggy.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
},
"Huggy.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 40
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677999106",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/Huggy.yaml --env=./trained-envs-executables/linux/Huggy/Huggy --run-id=Huggy --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1678001467"
},
"total": 2361.503765561,
"count": 1,
"self": 0.44730999600005816,
"children": {
"run_training.setup": {
"total": 0.11073548800004573,
"count": 1,
"self": 0.11073548800004573
},
"TrainerController.start_learning": {
"total": 2360.945720077,
"count": 1,
"self": 4.091890744986813,
"children": {
"TrainerController._reset_env": {
"total": 10.101833893999924,
"count": 1,
"self": 10.101833893999924
},
"TrainerController.advance": {
"total": 2346.6077393450128,
"count": 232799,
"self": 4.33808647299611,
"children": {
"env_step": {
"total": 1813.2855425989576,
"count": 232799,
"self": 1516.4104268371195,
"children": {
"SubprocessEnvManager._take_step": {
"total": 294.1058814239949,
"count": 232799,
"self": 15.29578289704557,
"children": {
"TorchPolicy.evaluate": {
"total": 278.81009852694933,
"count": 223034,
"self": 69.69439410000325,
"children": {
"TorchPolicy.sample_actions": {
"total": 209.11570442694608,
"count": 223034,
"self": 209.11570442694608
}
}
}
}
},
"workers": {
"total": 2.7692343378431588,
"count": 232799,
"self": 0.0,
"children": {
"worker_root": {
"total": 2353.048092424082,
"count": 232799,
"is_parallel": true,
"self": 1121.3570967331075,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0008487229999900592,
"count": 1,
"is_parallel": true,
"self": 0.0002994889999854422,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.000549234000004617,
"count": 2,
"is_parallel": true,
"self": 0.000549234000004617
}
}
},
"UnityEnvironment.step": {
"total": 0.05089444099996854,
"count": 1,
"is_parallel": true,
"self": 0.0003100499999391104,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002074110000194196,
"count": 1,
"is_parallel": true,
"self": 0.0002074110000194196
},
"communicator.exchange": {
"total": 0.04791155099997013,
"count": 1,
"is_parallel": true,
"self": 0.04791155099997013
},
"steps_from_proto": {
"total": 0.002465429000039876,
"count": 1,
"is_parallel": true,
"self": 0.00026046200002838304,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.002204967000011493,
"count": 2,
"is_parallel": true,
"self": 0.002204967000011493
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1231.6909956909744,
"count": 232798,
"is_parallel": true,
"self": 37.59044481288902,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 77.31957086805346,
"count": 232798,
"is_parallel": true,
"self": 77.31957086805346
},
"communicator.exchange": {
"total": 1027.3664539559811,
"count": 232798,
"is_parallel": true,
"self": 1027.3664539559811
},
"steps_from_proto": {
"total": 89.41452605405084,
"count": 232798,
"is_parallel": true,
"self": 35.92515238796102,
"children": {
"_process_rank_one_or_two_observation": {
"total": 53.48937366608982,
"count": 465596,
"is_parallel": true,
"self": 53.48937366608982
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 528.984110273059,
"count": 232799,
"self": 6.525908199046739,
"children": {
"process_trajectory": {
"total": 168.42436956001006,
"count": 232799,
"self": 167.07385322901018,
"children": {
"RLTrainer._checkpoint": {
"total": 1.3505163309998807,
"count": 10,
"self": 1.3505163309998807
}
}
},
"_update_policy": {
"total": 354.03383251400214,
"count": 97,
"self": 296.45758306699224,
"children": {
"TorchPPOOptimizer.update": {
"total": 57.5762494470099,
"count": 2910,
"self": 57.5762494470099
}
}
}
}
}
}
},
"trainer_threads": {
"total": 8.730003173695877e-07,
"count": 1,
"self": 8.730003173695877e-07
},
"TrainerController._save_models": {
"total": 0.1442552200001046,
"count": 1,
"self": 0.0025357370000165247,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14171948300008808,
"count": 1,
"self": 0.14171948300008808
}
}
}
}
}
}
}