Upload initial model
Browse files- PPO-LunarLander-v2.zip +2 -2
- PPO-LunarLander-v2/data +22 -22
- PPO-LunarLander-v2/policy.optimizer.pth +1 -1
- PPO-LunarLander-v2/policy.pth +1 -1
- README.md +1 -1
- config.json +1 -1
- replay.mp4 +0 -0
- results.json +1 -1
PPO-LunarLander-v2.zip
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2398a94bf7281eefbd22f793c2cb9dac244862d7f6cc4b8c05293961e8b08325
|
3 |
+
size 142181
|
PPO-LunarLander-v2/data
CHANGED
@@ -4,20 +4,20 @@
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
-
"__init__": "<function ActorCriticPolicy.__init__ at
|
8 |
-
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at
|
9 |
-
"reset_noise": "<function ActorCriticPolicy.reset_noise at
|
10 |
-
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at
|
11 |
-
"_build": "<function ActorCriticPolicy._build at
|
12 |
-
"forward": "<function ActorCriticPolicy.forward at
|
13 |
-
"extract_features": "<function ActorCriticPolicy.extract_features at
|
14 |
-
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at
|
15 |
-
"_predict": "<function ActorCriticPolicy._predict at
|
16 |
-
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at
|
17 |
-
"get_distribution": "<function ActorCriticPolicy.get_distribution at
|
18 |
-
"predict_values": "<function ActorCriticPolicy.predict_values at
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
-
"_abc_impl": "<_abc_data object at
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
@@ -43,20 +43,20 @@
|
|
43 |
"_np_random": null
|
44 |
},
|
45 |
"n_envs": 1,
|
46 |
-
"num_timesteps":
|
47 |
-
"_total_timesteps":
|
48 |
"_num_timesteps_at_start": 0,
|
49 |
"seed": null,
|
50 |
"action_noise": null,
|
51 |
-
"start_time":
|
52 |
"learning_rate": {
|
53 |
":type:": "<class 'function'>",
|
54 |
-
":serialized:": "
|
55 |
},
|
56 |
"tensorboard_log": null,
|
57 |
"lr_schedule": {
|
58 |
":type:": "<class 'function'>",
|
59 |
-
":serialized:": "
|
60 |
},
|
61 |
"_last_obs": null,
|
62 |
"_last_episode_starts": {
|
@@ -67,7 +67,7 @@
|
|
67 |
"_episode_num": 0,
|
68 |
"use_sde": false,
|
69 |
"sde_sample_freq": -1,
|
70 |
-
"_current_progress_remaining": 0.
|
71 |
"ep_info_buffer": {
|
72 |
":type:": "<class 'collections.deque'>",
|
73 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
@@ -76,14 +76,14 @@
|
|
76 |
":type:": "<class 'collections.deque'>",
|
77 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
78 |
},
|
79 |
-
"_n_updates":
|
80 |
"n_steps": 4096,
|
81 |
"gamma": 0.999,
|
82 |
-
"gae_lambda": 0.
|
83 |
"ent_coef": 0.0,
|
84 |
"vf_coef": 0.5,
|
85 |
"max_grad_norm": 0.5,
|
86 |
-
"batch_size":
|
87 |
"n_epochs": 8,
|
88 |
"clip_range": {
|
89 |
":type:": "<class 'function'>",
|
|
|
4 |
":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==",
|
5 |
"__module__": "stable_baselines3.common.policies",
|
6 |
"__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ",
|
7 |
+
"__init__": "<function ActorCriticPolicy.__init__ at 0x7f5d21fe6af0>",
|
8 |
+
"_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f5d21fe6b80>",
|
9 |
+
"reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f5d21fe6c10>",
|
10 |
+
"_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f5d21fe6ca0>",
|
11 |
+
"_build": "<function ActorCriticPolicy._build at 0x7f5d21fe6d30>",
|
12 |
+
"forward": "<function ActorCriticPolicy.forward at 0x7f5d21fe6dc0>",
|
13 |
+
"extract_features": "<function ActorCriticPolicy.extract_features at 0x7f5d21fe6e50>",
|
14 |
+
"_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f5d21fe6ee0>",
|
15 |
+
"_predict": "<function ActorCriticPolicy._predict at 0x7f5d21fe6f70>",
|
16 |
+
"evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f5d21fea040>",
|
17 |
+
"get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f5d21fea0d0>",
|
18 |
+
"predict_values": "<function ActorCriticPolicy.predict_values at 0x7f5d21fea160>",
|
19 |
"__abstractmethods__": "frozenset()",
|
20 |
+
"_abc_impl": "<_abc_data object at 0x7f5d21fe0990>"
|
21 |
},
|
22 |
"verbose": 1,
|
23 |
"policy_kwargs": {},
|
|
|
43 |
"_np_random": null
|
44 |
},
|
45 |
"n_envs": 1,
|
46 |
+
"num_timesteps": 1700000,
|
47 |
+
"_total_timesteps": 2000000,
|
48 |
"_num_timesteps_at_start": 0,
|
49 |
"seed": null,
|
50 |
"action_noise": null,
|
51 |
+
"start_time": 1675259277903740900,
|
52 |
"learning_rate": {
|
53 |
":type:": "<class 'function'>",
|
54 |
+
":serialized:": "gAWVHQMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAksTQwh8AIgAFABTAJSMjAogICAgICAgIFByb2dyZXNzIHdpbGwgZGVjcmVhc2UgZnJvbSAxIChiZWdpbm5pbmcpIHRvIDAuCgogICAgICAgIDpwYXJhbSBwcm9ncmVzc19yZW1haW5pbmc6CiAgICAgICAgOnJldHVybjogY3VycmVudCBsZWFybmluZyByYXRlCiAgICAgICAglIWUKYwScHJvZ3Jlc3NfcmVtYWluaW5nlIWUjD9DOlxVc2Vyc1xqb3JnZVxBcHBEYXRhXExvY2FsXFRlbXBcaXB5a2VybmVsXzEzODI4XDMzMjU0MjM3MTAucHmUjARmdW5jlEsOQwIIB5SMDWluaXRpYWxfdmFsdWWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UTowIX19uYW1lX1+UjAhfX21haW5fX5R1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgdfZR9lChoFmgOjAxfX3F1YWxuYW1lX1+UjB1saW5lYXJfc2NoZWR1bGUuPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lCiMEnByb2dyZXNzX3JlbWFpbmluZ5SMCGJ1aWx0aW5zlIwFZmxvYXSUk5SMBnJldHVybpRoKnWMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flGgJjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
55 |
},
|
56 |
"tensorboard_log": null,
|
57 |
"lr_schedule": {
|
58 |
":type:": "<class 'function'>",
|
59 |
+
":serialized:": "gAWVHQMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAksTQwh8AIgAFABTAJSMjAogICAgICAgIFByb2dyZXNzIHdpbGwgZGVjcmVhc2UgZnJvbSAxIChiZWdpbm5pbmcpIHRvIDAuCgogICAgICAgIDpwYXJhbSBwcm9ncmVzc19yZW1haW5pbmc6CiAgICAgICAgOnJldHVybjogY3VycmVudCBsZWFybmluZyByYXRlCiAgICAgICAglIWUKYwScHJvZ3Jlc3NfcmVtYWluaW5nlIWUjD9DOlxVc2Vyc1xqb3JnZVxBcHBEYXRhXExvY2FsXFRlbXBcaXB5a2VybmVsXzEzODI4XDMzMjU0MjM3MTAucHmUjARmdW5jlEsOQwIIB5SMDWluaXRpYWxfdmFsdWWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UTowIX19uYW1lX1+UjAhfX21haW5fX5R1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgdfZR9lChoFmgOjAxfX3F1YWxuYW1lX1+UjB1saW5lYXJfc2NoZWR1bGUuPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lCiMEnByb2dyZXNzX3JlbWFpbmluZ5SMCGJ1aWx0aW5zlIwFZmxvYXSUk5SMBnJldHVybpRoKnWMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flGgJjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="
|
60 |
},
|
61 |
"_last_obs": null,
|
62 |
"_last_episode_starts": {
|
|
|
67 |
"_episode_num": 0,
|
68 |
"use_sde": false,
|
69 |
"sde_sample_freq": -1,
|
70 |
+
"_current_progress_remaining": 0.15008,
|
71 |
"ep_info_buffer": {
|
72 |
":type:": "<class 'collections.deque'>",
|
73 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
|
|
76 |
":type:": "<class 'collections.deque'>",
|
77 |
":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="
|
78 |
},
|
79 |
+
"_n_updates": 3320,
|
80 |
"n_steps": 4096,
|
81 |
"gamma": 0.999,
|
82 |
+
"gae_lambda": 0.9,
|
83 |
"ent_coef": 0.0,
|
84 |
"vf_coef": 0.5,
|
85 |
"max_grad_norm": 0.5,
|
86 |
+
"batch_size": 32,
|
87 |
"n_epochs": 8,
|
88 |
"clip_range": {
|
89 |
":type:": "<class 'function'>",
|
PPO-LunarLander-v2/policy.optimizer.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 88057
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e59b58f617d4ae00c5fe02b2a295fad3dfb5c34870c70ece177ecb234a08c51
|
3 |
size 88057
|
PPO-LunarLander-v2/policy.pth
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
size 43393
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:31014ca5d6feaf98b3c2468edc38eb8d07fec16c793cedb602cf13c2a2e43951
|
3 |
size 43393
|
README.md
CHANGED
@@ -16,7 +16,7 @@ model-index:
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
-
value:
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
|
|
16 |
type: LunarLander-v2
|
17 |
metrics:
|
18 |
- type: mean_reward
|
19 |
+
value: 267.05 +/- 19.58
|
20 |
name: mean_reward
|
21 |
verified: false
|
22 |
---
|
config.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at
|
|
|
1 |
+
{"policy_class": {":type:": "<class 'abc.ABCMeta'>", ":serialized:": "gAWVOwAAAAAAAACMIXN0YWJsZV9iYXNlbGluZXMzLmNvbW1vbi5wb2xpY2llc5SMEUFjdG9yQ3JpdGljUG9saWN5lJOULg==", "__module__": "stable_baselines3.common.policies", "__doc__": "\n Policy class for actor-critic algorithms (has both policy and value prediction).\n Used by A2C, PPO and the likes.\n\n :param observation_space: Observation space\n :param action_space: Action space\n :param lr_schedule: Learning rate schedule (could be constant)\n :param net_arch: The specification of the policy and value networks.\n :param activation_fn: Activation function\n :param ortho_init: Whether to use or not orthogonal initialization\n :param use_sde: Whether to use State Dependent Exploration or not\n :param log_std_init: Initial value for the log standard deviation\n :param full_std: Whether to use (n_features x n_actions) parameters\n for the std instead of only (n_features,) when using gSDE\n :param use_expln: Use ``expln()`` function instead of ``exp()`` to ensure\n a positive standard deviation (cf paper). It allows to keep variance\n above zero and prevent it from growing too fast. In practice, ``exp()`` is usually enough.\n :param squash_output: Whether to squash the output using a tanh function,\n this allows to ensure boundaries when using gSDE.\n :param features_extractor_class: Features extractor to use.\n :param features_extractor_kwargs: Keyword arguments\n to pass to the features extractor.\n :param share_features_extractor: If True, the features extractor is shared between the policy and value networks.\n :param normalize_images: Whether to normalize images or not,\n dividing by 255.0 (True by default)\n :param optimizer_class: The optimizer to use,\n ``th.optim.Adam`` by default\n :param optimizer_kwargs: Additional keyword arguments,\n excluding the learning rate, to pass to the optimizer\n ", "__init__": "<function ActorCriticPolicy.__init__ at 0x7f5d21fe6af0>", "_get_constructor_parameters": "<function ActorCriticPolicy._get_constructor_parameters at 0x7f5d21fe6b80>", "reset_noise": "<function ActorCriticPolicy.reset_noise at 0x7f5d21fe6c10>", "_build_mlp_extractor": "<function ActorCriticPolicy._build_mlp_extractor at 0x7f5d21fe6ca0>", "_build": "<function ActorCriticPolicy._build at 0x7f5d21fe6d30>", "forward": "<function ActorCriticPolicy.forward at 0x7f5d21fe6dc0>", "extract_features": "<function ActorCriticPolicy.extract_features at 0x7f5d21fe6e50>", "_get_action_dist_from_latent": "<function ActorCriticPolicy._get_action_dist_from_latent at 0x7f5d21fe6ee0>", "_predict": "<function ActorCriticPolicy._predict at 0x7f5d21fe6f70>", "evaluate_actions": "<function ActorCriticPolicy.evaluate_actions at 0x7f5d21fea040>", "get_distribution": "<function ActorCriticPolicy.get_distribution at 0x7f5d21fea0d0>", "predict_values": "<function ActorCriticPolicy.predict_values at 0x7f5d21fea160>", "__abstractmethods__": "frozenset()", "_abc_impl": "<_abc_data object at 0x7f5d21fe0990>"}, "verbose": 1, "policy_kwargs": {}, "observation_space": {":type:": "<class 'gym.spaces.box.Box'>", ":serialized:": "gAWVnwEAAAAAAACMDmd5bS5zcGFjZXMuYm94lIwDQm94lJOUKYGUfZQojAVkdHlwZZSMBW51bXB5lGgFk5SMAmY0lImIh5RSlChLA4wBPJROTk5K/////0r/////SwB0lGKMBl9zaGFwZZRLCIWUjANsb3eUjBJudW1weS5jb3JlLm51bWVyaWOUjAtfZnJvbWJ1ZmZlcpSTlCiWIAAAAAAAAAAAAID/AACA/wAAgP8AAID/AACA/wAAgP8AAID/AACA/5RoCksIhZSMAUOUdJRSlIwEaGlnaJRoEiiWIAAAAAAAAAAAAIB/AACAfwAAgH8AAIB/AACAfwAAgH8AAIB/AACAf5RoCksIhZRoFXSUUpSMDWJvdW5kZWRfYmVsb3eUaBIolggAAAAAAAAAAAAAAAAAAACUaAeMAmIxlImIh5RSlChLA4wBfJROTk5K/////0r/////SwB0lGJLCIWUaBV0lFKUjA1ib3VuZGVkX2Fib3ZllGgSKJYIAAAAAAAAAAAAAAAAAAAAlGghSwiFlGgVdJRSlIwKX25wX3JhbmRvbZROdWIu", "dtype": "float32", "_shape": [8], "low": "[-inf -inf -inf -inf -inf -inf -inf -inf]", "high": "[inf inf inf inf inf inf inf inf]", "bounded_below": "[False False False False False False False False]", "bounded_above": "[False False False False False False False False]", "_np_random": null}, "action_space": {":type:": "<class 'gym.spaces.discrete.Discrete'>", ":serialized:": "gAWVggAAAAAAAACME2d5bS5zcGFjZXMuZGlzY3JldGWUjAhEaXNjcmV0ZZSTlCmBlH2UKIwBbpRLBIwGX3NoYXBllCmMBWR0eXBllIwFbnVtcHmUaAeTlIwCaTiUiYiHlFKUKEsDjAE8lE5OTkr/////Sv////9LAHSUYowKX25wX3JhbmRvbZROdWIu", "n": 4, "_shape": [], "dtype": "int64", "_np_random": null}, "n_envs": 1, "num_timesteps": 1700000, "_total_timesteps": 2000000, "_num_timesteps_at_start": 0, "seed": null, "action_noise": null, "start_time": 1675259277903740900, "learning_rate": {":type:": "<class 'function'>", ":serialized:": "gAWVHQMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAksTQwh8AIgAFABTAJSMjAogICAgICAgIFByb2dyZXNzIHdpbGwgZGVjcmVhc2UgZnJvbSAxIChiZWdpbm5pbmcpIHRvIDAuCgogICAgICAgIDpwYXJhbSBwcm9ncmVzc19yZW1haW5pbmc6CiAgICAgICAgOnJldHVybjogY3VycmVudCBsZWFybmluZyByYXRlCiAgICAgICAglIWUKYwScHJvZ3Jlc3NfcmVtYWluaW5nlIWUjD9DOlxVc2Vyc1xqb3JnZVxBcHBEYXRhXExvY2FsXFRlbXBcaXB5a2VybmVsXzEzODI4XDMzMjU0MjM3MTAucHmUjARmdW5jlEsOQwIIB5SMDWluaXRpYWxfdmFsdWWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UTowIX19uYW1lX1+UjAhfX21haW5fX5R1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgdfZR9lChoFmgOjAxfX3F1YWxuYW1lX1+UjB1saW5lYXJfc2NoZWR1bGUuPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lCiMEnByb2dyZXNzX3JlbWFpbmluZ5SMCGJ1aWx0aW5zlIwFZmxvYXSUk5SMBnJldHVybpRoKnWMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flGgJjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "tensorboard_log": null, "lr_schedule": {":type:": "<class 'function'>", ":serialized:": "gAWVHQMAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAksTQwh8AIgAFABTAJSMjAogICAgICAgIFByb2dyZXNzIHdpbGwgZGVjcmVhc2UgZnJvbSAxIChiZWdpbm5pbmcpIHRvIDAuCgogICAgICAgIDpwYXJhbSBwcm9ncmVzc19yZW1haW5pbmc6CiAgICAgICAgOnJldHVybjogY3VycmVudCBsZWFybmluZyByYXRlCiAgICAgICAglIWUKYwScHJvZ3Jlc3NfcmVtYWluaW5nlIWUjD9DOlxVc2Vyc1xqb3JnZVxBcHBEYXRhXExvY2FsXFRlbXBcaXB5a2VybmVsXzEzODI4XDMzMjU0MjM3MTAucHmUjARmdW5jlEsOQwIIB5SMDWluaXRpYWxfdmFsdWWUhZQpdJRSlH2UKIwLX19wYWNrYWdlX1+UTowIX19uYW1lX1+UjAhfX21haW5fX5R1Tk5oAIwQX21ha2VfZW1wdHlfY2VsbJSTlClSlIWUdJRSlIwcY2xvdWRwaWNrbGUuY2xvdWRwaWNrbGVfZmFzdJSMEl9mdW5jdGlvbl9zZXRzdGF0ZZSTlGgdfZR9lChoFmgOjAxfX3F1YWxuYW1lX1+UjB1saW5lYXJfc2NoZWR1bGUuPGxvY2Fscz4uZnVuY5SMD19fYW5ub3RhdGlvbnNfX5R9lCiMEnByb2dyZXNzX3JlbWFpbmluZ5SMCGJ1aWx0aW5zlIwFZmxvYXSUk5SMBnJldHVybpRoKnWMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flGgJjAtfX2Nsb3N1cmVfX5RoAIwKX21ha2VfY2VsbJSTlEc/UGJN0vGp/IWUUpSFlIwXX2Nsb3VkcGlja2xlX3N1Ym1vZHVsZXOUXZSMC19fZ2xvYmFsc19flH2UdYaUhlIwLg=="}, "_last_obs": null, "_last_episode_starts": {":type:": "<class 'numpy.ndarray'>", ":serialized:": "gAWVdAAAAAAAAACMEm51bXB5LmNvcmUubnVtZXJpY5SMC19mcm9tYnVmZmVylJOUKJYBAAAAAAAAAACUjAVudW1weZSMBWR0eXBllJOUjAJiMZSJiIeUUpQoSwOMAXyUTk5OSv////9K/////0sAdJRiSwGFlIwBQ5R0lFKULg=="}, "_last_original_obs": null, "_episode_num": 0, "use_sde": false, "sde_sample_freq": -1, "_current_progress_remaining": 0.15008, "ep_info_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "ep_success_buffer": {":type:": "<class 'collections.deque'>", ":serialized:": "gAWVIAAAAAAAAACMC2NvbGxlY3Rpb25zlIwFZGVxdWWUk5QpS2SGlFKULg=="}, "_n_updates": 3320, "n_steps": 4096, "gamma": 0.999, "gae_lambda": 0.9, "ent_coef": 0.0, "vf_coef": 0.5, "max_grad_norm": 0.5, "batch_size": 32, "n_epochs": 8, "clip_range": {":type:": "<class 'function'>", ":serialized:": "gAWVhgIAAAAAAACMF2Nsb3VkcGlja2xlLmNsb3VkcGlja2xllIwOX21ha2VfZnVuY3Rpb26Uk5QoaACMDV9idWlsdGluX3R5cGWUk5SMCENvZGVUeXBllIWUUpQoSwFLAEsASwFLAUsTQwSIAFMAlE6FlCmMAV+UhZSMVEM6XFVzZXJzXGpvcmdlXGFuYWNvbmRhM1xlbnZzXHJsXGxpYlxzaXRlLXBhY2thZ2VzXHN0YWJsZV9iYXNlbGluZXMzXGNvbW1vblx1dGlscy5weZSMBGZ1bmOUS4JDAgQBlIwDdmFslIWUKXSUUpR9lCiMC19fcGFja2FnZV9flIwYc3RhYmxlX2Jhc2VsaW5lczMuY29tbW9ulIwIX19uYW1lX1+UjB5zdGFibGVfYmFzZWxpbmVzMy5jb21tb24udXRpbHOUjAhfX2ZpbGVfX5RoDHVOTmgAjBBfbWFrZV9lbXB0eV9jZWxslJOUKVKUhZR0lFKUjBxjbG91ZHBpY2tsZS5jbG91ZHBpY2tsZV9mYXN0lIwSX2Z1bmN0aW9uX3NldHN0YXRllJOUaB59lH2UKGgWaA2MDF9fcXVhbG5hbWVfX5SMGWNvbnN0YW50X2ZuLjxsb2NhbHM+LmZ1bmOUjA9fX2Fubm90YXRpb25zX1+UfZSMDl9fa3dkZWZhdWx0c19flE6MDF9fZGVmYXVsdHNfX5ROjApfX21vZHVsZV9flGgXjAdfX2RvY19flE6MC19fY2xvc3VyZV9flGgAjApfbWFrZV9jZWxslJOURz/JmZmZmZmahZRSlIWUjBdfY2xvdWRwaWNrbGVfc3VibW9kdWxlc5RdlIwLX19nbG9iYWxzX1+UfZR1hpSGUjAu"}, "clip_range_vf": null, "normalize_advantage": true, "target_kl": null, "system_info": {"OS": "Linux-5.10.147+-x86_64-with-glibc2.29 # 1 SMP Sat Dec 10 16:00:40 UTC 2022", "Python": "3.8.10", "Stable-Baselines3": "1.7.0", "PyTorch": "1.13.1+cu116", "GPU Enabled": "True", "Numpy": "1.21.6", "Gym": "0.21.0"}}
|
replay.mp4
CHANGED
Binary files a/replay.mp4 and b/replay.mp4 differ
|
|
results.json
CHANGED
@@ -1 +1 @@
|
|
1 |
-
{"mean_reward":
|
|
|
1 |
+
{"mean_reward": 267.0464136317765, "std_reward": 19.578165949833608, "is_deterministic": true, "n_eval_episodes": 10, "eval_datetime": "2023-02-01T17:41:20.024332"}
|