0sunfire0's picture
First Push
ad0dd72
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.5889737606048584,
"min": 0.5854454636573792,
"max": 1.4055655002593994,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 17697.484375,
"min": 17647.66796875,
"max": 42639.234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989900.0,
"min": 29952.0,
"max": 989900.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989900.0,
"min": 29952.0,
"max": 989900.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.18668167293071747,
"min": -0.106877401471138,
"max": 0.1946895718574524,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": 47.97718811035156,
"min": -25.75745391845703,
"max": 49.45115280151367,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.44912707805633545,
"min": -0.11661994457244873,
"max": 0.7304412126541138,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 115.4256591796875,
"min": -29.6214656829834,
"max": 173.11456298828125,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06829019276511983,
"min": 0.06567004802456636,
"max": 0.07319228416644637,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9560626987116776,
"min": 0.5009770855302895,
"max": 1.0604173963074572,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0505443668182267,
"min": 0.00020144575900598848,
"max": 0.0505443668182267,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.7076211354551738,
"min": 0.0028202406260838388,
"max": 0.7076211354551738,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.422654668671433e-06,
"min": 7.422654668671433e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010391716536140007,
"min": 0.00010391716536140007,
"max": 0.0035076614307796,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10247418571428572,
"min": 0.10247418571428572,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.4346386000000002,
"min": 1.3886848,
"max": 2.5692204,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025717115285714306,
"min": 0.00025717115285714306,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0036003961400000025,
"min": 0.0036003961400000025,
"max": 0.11694511796,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.016612287610769272,
"min": 0.016435718163847923,
"max": 0.6745191812515259,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 0.2325720191001892,
"min": 0.23024119436740875,
"max": 4.721634387969971,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 584.6153846153846,
"min": 584.6153846153846,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 30400.0,
"min": 15984.0,
"max": 33097.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": 0.8767692021165903,
"min": -1.0000000521540642,
"max": 0.8767692021165903,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": 45.591998510062695,
"min": -30.745801769196987,
"max": 45.591998510062695,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": 0.8767692021165903,
"min": -1.0000000521540642,
"max": 0.8767692021165903,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": 45.591998510062695,
"min": -30.745801769196987,
"max": 45.591998510062695,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 0.09984455910531463,
"min": 0.09984455910531463,
"max": 14.22121711447835,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 5.191917073476361,
"min": 4.786510685284156,
"max": 227.5394738316536,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1689436279",
"python_version": "3.10.12 (main, Jun 7 2023, 12:45:35) [GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0+cu102",
"numpy_version": "1.21.2",
"end_time_seconds": "1689438506"
},
"total": 2226.8685961970004,
"count": 1,
"self": 0.4826569270003347,
"children": {
"run_training.setup": {
"total": 0.03862324399995032,
"count": 1,
"self": 0.03862324399995032
},
"TrainerController.start_learning": {
"total": 2226.347316026,
"count": 1,
"self": 1.482592812067196,
"children": {
"TrainerController._reset_env": {
"total": 4.151113783000028,
"count": 1,
"self": 4.151113783000028
},
"TrainerController.advance": {
"total": 2220.6222812849333,
"count": 63393,
"self": 1.5100999339656482,
"children": {
"env_step": {
"total": 1550.4962126359728,
"count": 63393,
"self": 1434.4200713579048,
"children": {
"SubprocessEnvManager._take_step": {
"total": 115.2016280210388,
"count": 63393,
"self": 4.853687228083459,
"children": {
"TorchPolicy.evaluate": {
"total": 110.34794079295534,
"count": 62572,
"self": 110.34794079295534
}
}
},
"workers": {
"total": 0.8745132570293208,
"count": 63393,
"self": 0.0,
"children": {
"worker_root": {
"total": 2221.195639222984,
"count": 63393,
"is_parallel": true,
"self": 903.4703006380473,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.0018957739999905243,
"count": 1,
"is_parallel": true,
"self": 0.00057146099993588,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0013243130000546444,
"count": 8,
"is_parallel": true,
"self": 0.0013243130000546444
}
}
},
"UnityEnvironment.step": {
"total": 0.054198573000007855,
"count": 1,
"is_parallel": true,
"self": 0.0006718600001249797,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0005106369999339222,
"count": 1,
"is_parallel": true,
"self": 0.0005106369999339222
},
"communicator.exchange": {
"total": 0.05075729999998657,
"count": 1,
"is_parallel": true,
"self": 0.05075729999998657
},
"steps_from_proto": {
"total": 0.0022587759999623813,
"count": 1,
"is_parallel": true,
"self": 0.0004113810000490048,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0018473949999133765,
"count": 8,
"is_parallel": true,
"self": 0.0018473949999133765
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 1317.7253385849367,
"count": 63392,
"is_parallel": true,
"self": 34.60739540290888,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 23.06231559100445,
"count": 63392,
"is_parallel": true,
"self": 23.06231559100445
},
"communicator.exchange": {
"total": 1153.3920841130057,
"count": 63392,
"is_parallel": true,
"self": 1153.3920841130057
},
"steps_from_proto": {
"total": 106.66354347801769,
"count": 63392,
"is_parallel": true,
"self": 20.731451548112318,
"children": {
"_process_rank_one_or_two_observation": {
"total": 85.93209192990537,
"count": 507136,
"is_parallel": true,
"self": 85.93209192990537
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 668.6159687149947,
"count": 63393,
"self": 2.819989432982993,
"children": {
"process_trajectory": {
"total": 110.31504068801155,
"count": 63393,
"self": 110.10727649301157,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20776419499998156,
"count": 2,
"self": 0.20776419499998156
}
}
},
"_update_policy": {
"total": 555.4809385940001,
"count": 451,
"self": 360.854777838994,
"children": {
"TorchPPOOptimizer.update": {
"total": 194.62616075500614,
"count": 22797,
"self": 194.62616075500614
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.008999788609799e-06,
"count": 1,
"self": 1.008999788609799e-06
},
"TrainerController._save_models": {
"total": 0.0913271370000075,
"count": 1,
"self": 0.001367839000067761,
"children": {
"RLTrainer._checkpoint": {
"total": 0.08995929799993974,
"count": 1,
"self": 0.08995929799993974
}
}
}
}
}
}
}