poca-SoccerTwos / run_logs /timers.json
JulianZas's picture
First Push
7bb333d
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 1.7251516580581665,
"min": 1.707503318786621,
"max": 3.174571990966797,
"count": 576
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 35938.359375,
"min": 31786.740234375,
"max": 105980.0078125,
"count": 576
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 56.95348837209303,
"min": 39.17741935483871,
"max": 999.0,
"count": 576
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 19592.0,
"min": 3712.0,
"max": 24624.0,
"count": 576
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1578.3574627670168,
"min": 1199.2677790086584,
"max": 1606.0342035563074,
"count": 575
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 271477.4835959269,
"min": 2402.7180783841522,
"max": 374456.13451850973,
"count": 575
},
"SoccerTwos.Step.mean": {
"value": 6249989.0,
"min": 499642.0,
"max": 6249989.0,
"count": 576
},
"SoccerTwos.Step.sum": {
"value": 6249989.0,
"min": 499642.0,
"max": 6249989.0,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.012768158689141273,
"min": -0.11785878241062164,
"max": 0.19906549155712128,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 2.2088913917541504,
"min": -25.575355529785156,
"max": 29.151256561279297,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.008878721855580807,
"min": -0.11966969072818756,
"max": 0.1968848556280136,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 1.5360188484191895,
"min": -25.96832275390625,
"max": 28.592832565307617,
"count": 576
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 576
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": -0.1276531805192804,
"min": -1.0,
"max": 0.4130636372349479,
"count": 576
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": -22.08400022983551,
"min": -61.4134002327919,
"max": 55.57920044660568,
"count": 576
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": -0.1276531805192804,
"min": -1.0,
"max": 0.4130636372349479,
"count": 576
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": -22.08400022983551,
"min": -61.4134002327919,
"max": 55.57920044660568,
"count": 576
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 576
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 576
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.014371902578568551,
"min": 0.010854670327777665,
"max": 0.02512110493456324,
"count": 278
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.014371902578568551,
"min": 0.010854670327777665,
"max": 0.02512110493456324,
"count": 278
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.1116157442331314,
"min": 0.0019259569118730724,
"max": 0.12376162310441335,
"count": 278
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.1116157442331314,
"min": 0.0019259569118730724,
"max": 0.12376162310441335,
"count": 278
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.11334097410241763,
"min": 0.0019231226838504274,
"max": 0.12612716878453892,
"count": 278
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.11334097410241763,
"min": 0.0019231226838504274,
"max": 0.12612716878453892,
"count": 278
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 278
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 278
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 278
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 278
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 278
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 278
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1679767885",
"python_version": "3.9.16 (main, Mar 8 2023, 10:39:24) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "C:\\Users\\jzaselsky\\Anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos --no-graphics --resume",
"mlagents_version": "0.30.0",
"mlagents_envs_version": "0.30.0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.0.0+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1679797425"
},
"total": 29542.645427699998,
"count": 1,
"self": 0.27287649999561836,
"children": {
"run_training.setup": {
"total": 0.08973530000000007,
"count": 1,
"self": 0.08973530000000007
},
"TrainerController.start_learning": {
"total": 29542.282815900002,
"count": 1,
"self": 15.447214899966639,
"children": {
"TrainerController._reset_env": {
"total": 4.7684795000045765,
"count": 30,
"self": 4.7684795000045765
},
"TrainerController.advance": {
"total": 29521.837226500033,
"count": 399929,
"self": 15.625370101075532,
"children": {
"env_step": {
"total": 11538.266188899039,
"count": 399929,
"self": 9032.36400050034,
"children": {
"SubprocessEnvManager._take_step": {
"total": 2496.314132599687,
"count": 399929,
"self": 91.53346130116415,
"children": {
"TorchPolicy.evaluate": {
"total": 2404.780671298523,
"count": 723952,
"self": 2404.780671298523
}
}
},
"workers": {
"total": 9.588055799012594,
"count": 399929,
"self": 0.0,
"children": {
"worker_root": {
"total": 29517.646469899875,
"count": 399929,
"is_parallel": true,
"self": 22200.7477018988,
"children": {
"steps_from_proto": {
"total": 0.10001439999791817,
"count": 60,
"is_parallel": true,
"self": 0.01949169997639366,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.08052270002152451,
"count": 240,
"is_parallel": true,
"self": 0.08052270002152451
}
}
},
"UnityEnvironment.step": {
"total": 7316.798753601077,
"count": 399929,
"is_parallel": true,
"self": 432.7451484020312,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 342.53851350016805,
"count": 399929,
"is_parallel": true,
"self": 342.53851350016805
},
"communicator.exchange": {
"total": 5166.394223900375,
"count": 399929,
"is_parallel": true,
"self": 5166.394223900375
},
"steps_from_proto": {
"total": 1375.1208677985019,
"count": 799858,
"is_parallel": true,
"self": 264.94331849852915,
"children": {
"_process_rank_one_or_two_observation": {
"total": 1110.1775492999727,
"count": 3199432,
"is_parallel": true,
"self": 1110.1775492999727
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 17967.945667499916,
"count": 399929,
"self": 95.84763130049396,
"children": {
"process_trajectory": {
"total": 2960.893217999399,
"count": 399929,
"self": 2957.7614840994015,
"children": {
"RLTrainer._checkpoint": {
"total": 3.131733899997478,
"count": 12,
"self": 3.131733899997478
}
}
},
"_update_policy": {
"total": 14911.204818200022,
"count": 278,
"self": 1375.758417000041,
"children": {
"TorchPOCAOptimizer.update": {
"total": 13535.44640119998,
"count": 8340,
"self": 13535.44640119998
}
}
}
}
}
}
},
"trainer_threads": {
"total": 2.2999993234407157e-06,
"count": 1,
"self": 2.2999993234407157e-06
},
"TrainerController._save_models": {
"total": 0.22989269999743556,
"count": 1,
"self": 0.0034610999973665457,
"children": {
"RLTrainer._checkpoint": {
"total": 0.226431600000069,
"count": 1,
"self": 0.226431600000069
}
}
}
}
}
}
}