poca-SoccerTwos / run_logs /timers.json
Taratata's picture
First Push`
b3b66d8
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.895189046859741,
"min": 2.882762908935547,
"max": 3.2957255840301514,
"count": 100
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 70225.703125,
"min": 12840.181640625,
"max": 113489.03125,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 70.05479452054794,
"min": 67.02857142857142,
"max": 999.0,
"count": 100
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 20456.0,
"min": 7992.0,
"max": 27684.0,
"count": 100
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1371.2024934525527,
"min": 1198.7478938846245,
"max": 1371.2024934525527,
"count": 98
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 200195.5640440727,
"min": 2398.071096847074,
"max": 200195.5640440727,
"count": 98
},
"SoccerTwos.Step.mean": {
"value": 999796.0,
"min": 9354.0,
"max": 999796.0,
"count": 100
},
"SoccerTwos.Step.sum": {
"value": 999796.0,
"min": 9354.0,
"max": 999796.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.12615734338760376,
"min": -0.034751515835523605,
"max": 0.1727389246225357,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 18.41897201538086,
"min": -0.5118509531021118,
"max": 19.83926773071289,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.12158912420272827,
"min": -0.03479311615228653,
"max": 0.16984131932258606,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 17.752012252807617,
"min": -0.506144642829895,
"max": 18.756845474243164,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.1343589033166023,
"min": -0.7058823529411765,
"max": 0.4778153827557197,
"count": 100
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 19.616399884223938,
"min": -19.895799815654755,
"max": 29.935599863529205,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.1343589033166023,
"min": -0.7058823529411765,
"max": 0.4778153827557197,
"count": 100
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 19.616399884223938,
"min": -19.895799815654755,
"max": 29.935599863529205,
"count": 100
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 100
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.019362225211807527,
"min": 0.012232778915010083,
"max": 0.020660320390015842,
"count": 47
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.019362225211807527,
"min": 0.012232778915010083,
"max": 0.020660320390015842,
"count": 47
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.06422364997367064,
"min": 0.0007572673028334975,
"max": 0.06422364997367064,
"count": 47
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.06422364997367064,
"min": 0.0007572673028334975,
"max": 0.06422364997367064,
"count": 47
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.06598331580559412,
"min": 0.0007670198295575877,
"max": 0.06598331580559412,
"count": 47
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.06598331580559412,
"min": 0.0007670198295575877,
"max": 0.06598331580559412,
"count": 47
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 47
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 47
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 47
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000004,
"max": 0.20000000000000007,
"count": 47
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 47
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 47
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678028558",
"python_version": "3.9.16 (main, Mar 1 2023, 18:30:21) [MSC v.1916 64 bit (AMD64)]",
"command_line_arguments": "D:\\Users\\tara0\\anaconda3\\envs\\rl\\Scripts\\mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos.exe --run-id=SoccerTwos-v1 --no-graphics --force",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.13.1+cpu",
"numpy_version": "1.21.2",
"end_time_seconds": "1678032305"
},
"total": 3747.2477577,
"count": 1,
"self": 6.996451900000011,
"children": {
"run_training.setup": {
"total": 0.21207019999999988,
"count": 1,
"self": 0.21207019999999988
},
"TrainerController.start_learning": {
"total": 3740.0392355999998,
"count": 1,
"self": 1.6216672999885304,
"children": {
"TrainerController._reset_env": {
"total": 5.618658799999587,
"count": 5,
"self": 5.618658799999587
},
"TrainerController.advance": {
"total": 3732.5257065000114,
"count": 65469,
"self": 1.8485426001170708,
"children": {
"env_step": {
"total": 1295.1675282999415,
"count": 65469,
"self": 997.296369299966,
"children": {
"SubprocessEnvManager._take_step": {
"total": 296.8057279000061,
"count": 65469,
"self": 10.310303100041835,
"children": {
"TorchPolicy.evaluate": {
"total": 286.49542479996427,
"count": 127760,
"self": 286.49542479996427
}
}
},
"workers": {
"total": 1.0654310999694125,
"count": 65469,
"self": 0.0,
"children": {
"worker_root": {
"total": 3731.3927511999477,
"count": 65469,
"is_parallel": true,
"self": 2935.654930099925,
"children": {
"steps_from_proto": {
"total": 0.009749700000887707,
"count": 10,
"is_parallel": true,
"self": 0.0021172000019040738,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.007632499998983633,
"count": 40,
"is_parallel": true,
"self": 0.007632499998983633
}
}
},
"UnityEnvironment.step": {
"total": 795.7280714000217,
"count": 65469,
"is_parallel": true,
"self": 38.773284400056355,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 31.08622479999121,
"count": 65469,
"is_parallel": true,
"self": 31.08622479999121
},
"communicator.exchange": {
"total": 602.8142191999585,
"count": 65469,
"is_parallel": true,
"self": 602.8142191999585
},
"steps_from_proto": {
"total": 123.05434300001554,
"count": 130938,
"is_parallel": true,
"self": 26.732309099967253,
"children": {
"_process_rank_one_or_two_observation": {
"total": 96.32203390004828,
"count": 523752,
"is_parallel": true,
"self": 96.32203390004828
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 2435.5096355999526,
"count": 65469,
"self": 12.60086309991766,
"children": {
"process_trajectory": {
"total": 311.2266540000354,
"count": 65469,
"self": 307.76587500003563,
"children": {
"RLTrainer._checkpoint": {
"total": 3.460778999999775,
"count": 2,
"self": 3.460778999999775
}
}
},
"_update_policy": {
"total": 2111.6821184999994,
"count": 47,
"self": 190.00795910000193,
"children": {
"TorchPOCAOptimizer.update": {
"total": 1921.6741593999975,
"count": 1416,
"self": 1921.6741593999975
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.100000190490391e-06,
"count": 1,
"self": 1.100000190490391e-06
},
"TrainerController._save_models": {
"total": 0.27320190000000366,
"count": 1,
"self": 0.051475399999617366,
"children": {
"RLTrainer._checkpoint": {
"total": 0.2217265000003863,
"count": 1,
"self": 0.2217265000003863
}
}
}
}
}
}
}