boballl / run_logs /timers.json
tvarella's picture
Lets COMMIT
fe32694
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.8873598575592041,
"min": 0.8873598575592041,
"max": 2.857192039489746,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8472.51171875,
"min": 8472.51171875,
"max": 29260.50390625,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.900227546691895,
"min": 0.3158401548862457,
"max": 12.900227546691895,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2515.54443359375,
"min": 61.27299118041992,
"max": 2592.0751953125,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.0668118380374782,
"min": 0.0651139115990886,
"max": 0.07173888658579408,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.2672473521499128,
"min": 0.2604556463963544,
"max": 0.35467591713520463,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.17821636704691485,
"min": 0.11747463420375852,
"max": 0.28628270339732076,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.7128654681876594,
"min": 0.4698985368150341,
"max": 1.4314135169866038,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.454545454545453,
"min": 3.409090909090909,
"max": 25.454545454545453,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1120.0,
"min": 150.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.454545454545453,
"min": 3.409090909090909,
"max": 25.454545454545453,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1120.0,
"min": 150.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1677167571",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.22.4",
"end_time_seconds": "1677168185"
},
"total": 614.07469524,
"count": 1,
"self": 0.5927766849999898,
"children": {
"run_training.setup": {
"total": 0.1476218600000152,
"count": 1,
"self": 0.1476218600000152
},
"TrainerController.start_learning": {
"total": 613.3342966949999,
"count": 1,
"self": 0.8809309849995088,
"children": {
"TrainerController._reset_env": {
"total": 6.9334763790000125,
"count": 1,
"self": 6.9334763790000125
},
"TrainerController.advance": {
"total": 605.3699830320003,
"count": 18201,
"self": 0.42970611800114966,
"children": {
"env_step": {
"total": 604.9402769139991,
"count": 18201,
"self": 475.44441866798604,
"children": {
"SubprocessEnvManager._take_step": {
"total": 129.06946824300945,
"count": 18201,
"self": 2.625341695010718,
"children": {
"TorchPolicy.evaluate": {
"total": 126.44412654799874,
"count": 18201,
"self": 21.03455597799143,
"children": {
"TorchPolicy.sample_actions": {
"total": 105.4095705700073,
"count": 18201,
"self": 105.4095705700073
}
}
}
}
},
"workers": {
"total": 0.4263900030036325,
"count": 18201,
"self": 0.0,
"children": {
"worker_root": {
"total": 610.9053553090034,
"count": 18201,
"is_parallel": true,
"self": 272.9443326890107,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.004917073999990862,
"count": 1,
"is_parallel": true,
"self": 0.0034180729999775394,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014990010000133225,
"count": 10,
"is_parallel": true,
"self": 0.0014990010000133225
}
}
},
"UnityEnvironment.step": {
"total": 0.05105843000001187,
"count": 1,
"is_parallel": true,
"self": 0.0007266360000244276,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00044021399997973276,
"count": 1,
"is_parallel": true,
"self": 0.00044021399997973276
},
"communicator.exchange": {
"total": 0.04753440500002171,
"count": 1,
"is_parallel": true,
"self": 0.04753440500002171
},
"steps_from_proto": {
"total": 0.0023571749999860003,
"count": 1,
"is_parallel": true,
"self": 0.0005163760000073125,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018407989999786878,
"count": 10,
"is_parallel": true,
"self": 0.0018407989999786878
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 337.9610226199927,
"count": 18200,
"is_parallel": true,
"self": 14.023425474980286,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 7.64241397400292,
"count": 18200,
"is_parallel": true,
"self": 7.64241397400292
},
"communicator.exchange": {
"total": 271.30734948500594,
"count": 18200,
"is_parallel": true,
"self": 271.30734948500594
},
"steps_from_proto": {
"total": 44.98783368600357,
"count": 18200,
"is_parallel": true,
"self": 10.447215401011931,
"children": {
"_process_rank_one_or_two_observation": {
"total": 34.54061828499164,
"count": 182000,
"is_parallel": true,
"self": 34.54061828499164
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.0001838880000377685,
"count": 1,
"self": 0.0001838880000377685,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 600.0377256159707,
"count": 600020,
"is_parallel": true,
"self": 15.893219618960757,
"children": {
"process_trajectory": {
"total": 330.3091263330091,
"count": 600020,
"is_parallel": true,
"self": 329.465156544009,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8439697890000843,
"count": 4,
"is_parallel": true,
"self": 0.8439697890000843
}
}
},
"_update_policy": {
"total": 253.83537966400075,
"count": 90,
"is_parallel": true,
"self": 80.97694738499678,
"children": {
"TorchPPOOptimizer.update": {
"total": 172.85843227900398,
"count": 4587,
"is_parallel": true,
"self": 172.85843227900398
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.14972241100008432,
"count": 1,
"self": 0.001316302000077485,
"children": {
"RLTrainer._checkpoint": {
"total": 0.14840610900000684,
"count": 1,
"self": 0.14840610900000684
}
}
}
}
}
}
}