poca-SoccerTwos / run_logs /timers.json
brahamdp's picture
First Push
392dd80
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.1234664916992188,
"min": 2.0680198669433594,
"max": 3.295696258544922,
"count": 401
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 44575.80859375,
"min": 23121.9296875,
"max": 131835.265625,
"count": 401
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 43.89090909090909,
"min": 39.62809917355372,
"max": 999.0,
"count": 401
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19312.0,
"min": 16504.0,
"max": 23828.0,
"count": 401
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1458.819924772066,
"min": 1197.381414094449,
"max": 1458.819924772066,
"count": 361
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 320940.38344985456,
"min": 2394.762828188898,
"max": 349825.9969054016,
"count": 361
},
"SoccerTwos.Step.mean": {
"value": 4009980.0,
"min": 9580.0,
"max": 4009980.0,
"count": 401
},
"SoccerTwos.Step.sum": {
"value": 4009980.0,
"min": 9580.0,
"max": 4009980.0,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": -0.017577948048710823,
"min": -0.0689413845539093,
"max": 0.12349371612071991,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": -3.8671483993530273,
"min": -11.857917785644531,
"max": 23.282855987548828,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.011574816890060902,
"min": -0.07775025814771652,
"max": 0.12607432901859283,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": -2.546459674835205,
"min": -13.373044967651367,
"max": 22.963909149169922,
"count": 401
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 401
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.04317272684790872,
"min": -0.5714285714285714,
"max": 0.4855199992656708,
"count": 401
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -9.497999906539917,
"min": -38.985799729824066,
"max": 44.62460023164749,
"count": 401
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.04317272684790872,
"min": -0.5714285714285714,
"max": 0.4855199992656708,
"count": 401
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -9.497999906539917,
"min": -38.985799729824066,
"max": 44.62460023164749,
"count": 401
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 401
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 401
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014393620810005813,
"min": 0.0090754490942345,
"max": 0.023623464931733906,
"count": 189
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014393620810005813,
"min": 0.0090754490942345,
"max": 0.023623464931733906,
"count": 189
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.10187095900376637,
"min": 6.334792648961715e-07,
"max": 0.11014831562836964,
"count": 189
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.10187095900376637,
"min": 6.334792648961715e-07,
"max": 0.11014831562836964,
"count": 189
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.10531567086776097,
"min": 5.973257297379557e-07,
"max": 0.11348382458090782,
"count": 189
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.10531567086776097,
"min": 5.973257297379557e-07,
"max": 0.11348382458090782,
"count": 189
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 189
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 189
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 189
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 189
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 189
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 189
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679248253",
"python_version": "3.9.16 (main, Dec 7 2022, 01:11:51) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/linux/SoccerTwos/SoccerTwos --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1679256460"
},
"total": 8207.504130348,
"count": 1,
"self": 0.1238337219983805,
"children": {
"run_training.setup": {
"total": 0.10375207200013392,
"count": 1,
"self": 0.10375207200013392
},
"TrainerController.start_learning": {
"total": 8207.276544554,
"count": 1,
"self": 5.5967586282786215,
"children": {
"TrainerController._reset_env": {
"total": 10.842579830998147,
"count": 21,
"self": 10.842579830998147
},
"TrainerController.advance": {
"total": 8190.264911134723,
"count": 266800,
"self": 6.755552592790991,
"children": {
"env_step": {
"total": 6383.723980721797,
"count": 266800,
"self": 5051.430777319019,
"children": {
"SubprocessEnvManager._take_step": {
"total": 1328.8135201068344,
"count": 266800,
"self": 43.78259800200749,
"children": {
"TorchPolicy.evaluate": {
"total": 1285.030922104827,
"count": 510964,
"self": 1285.030922104827
}
}
},
"workers": {
"total": 3.4796832959432322,
"count": 266799,
"self": 0.0,
"children": {
"worker_root": {
"total": 8190.199200947313,
"count": 266799,
"is_parallel": true,
"self": 3925.901021403367,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.006427587999951356,
"count": 2,
"is_parallel": true,
"self": 0.004141516000345291,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0022860719996060652,
"count": 8,
"is_parallel": true,
"self": 0.0022860719996060652
}
}
},
"UnityEnvironment.step": {
"total": 0.03766557400012971,
"count": 1,
"is_parallel": true,
"self": 0.001046170000336133,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0008184700000128942,
"count": 1,
"is_parallel": true,
"self": 0.0008184700000128942
},
"communicator.exchange": {
"total": 0.03265630499981853,
"count": 1,
"is_parallel": true,
"self": 0.03265630499981853
},
"steps_from_proto": {
"total": 0.0031446289999621513,
"count": 2,
"is_parallel": true,
"self": 0.0006178869994073466,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0025267420005548047,
"count": 8,
"is_parallel": true,
"self": 0.0025267420005548047
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 4264.255875027952,
"count": 266798,
"is_parallel": true,
"self": 251.12442209167466,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 174.21723865785134,
"count": 266798,
"is_parallel": true,
"self": 174.21723865785134
},
"communicator.exchange": {
"total": 3060.944262470271,
"count": 266798,
"is_parallel": true,
"self": 3060.944262470271
},
"steps_from_proto": {
"total": 777.9699518081552,
"count": 533596,
"is_parallel": true,
"self": 143.55081448279225,
"children": {
"_process_rank_one_or_two_observation": {
"total": 634.419137325363,
"count": 2134384,
"is_parallel": true,
"self": 634.419137325363
}
}
}
}
},
"steps_from_proto": {
"total": 0.04230451599460139,
"count": 40,
"is_parallel": true,
"self": 0.008221047996812558,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.03408346799778883,
"count": 160,
"is_parallel": true,
"self": 0.03408346799778883
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 1799.7853778201352,
"count": 266799,
"self": 50.939613701308645,
"children": {
"process_trajectory": {
"total": 566.7395315698086,
"count": 266799,
"self": 564.4333904428108,
"children": {
"RLTrainer._checkpoint": {
"total": 2.3061411269977725,
"count": 8,
"self": 2.3061411269977725
}
}
},
"_update_policy": {
"total": 1182.106232549018,
"count": 190,
"self": 738.5792792499833,
"children": {
"TorchPOCAOptimizer.update": {
"total": 443.52695329903463,
"count": 5700,
"self": 443.52695329903463
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.631999111850746e-06,
"count": 1,
"self": 1.631999111850746e-06
},
"TrainerController._save_models": {
"total": 0.5722933280012512,
"count": 1,
"self": 0.0023873210011515766,
"children": {
"RLTrainer._checkpoint": {
"total": 0.5699060070000996,
"count": 1,
"self": 0.5699060070000996
}
}
}
}
}
}
}