Upload README.md with huggingface_hub
Browse files
README.md
CHANGED
@@ -21,7 +21,7 @@ model-index:
|
|
21 |
type: OpenAI/Gym/ClassicControl-Pendulum-v1
|
22 |
metrics:
|
23 |
- type: mean_reward
|
24 |
-
value: -
|
25 |
name: mean_reward
|
26 |
---
|
27 |
|
@@ -67,10 +67,10 @@ import torch
|
|
67 |
|
68 |
# Pull model from files which are git cloned from huggingface
|
69 |
policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
|
70 |
-
cfg = EasyDict(Config.file_to_dict("policy_config.py"))
|
71 |
# Instantiate the agent
|
72 |
agent = SACAgent(
|
73 |
-
|
74 |
)
|
75 |
# Continue training
|
76 |
agent.train(step=5000)
|
@@ -98,7 +98,7 @@ from huggingface_ding import pull_model_from_hub
|
|
98 |
policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/Pendulum-v1-SAC")
|
99 |
# Instantiate the agent
|
100 |
agent = SACAgent(
|
101 |
-
|
102 |
exp_name="Pendulum-v1-SAC",
|
103 |
cfg=cfg.exp_config,
|
104 |
policy_state_dict=policy_state_dict
|
@@ -128,7 +128,7 @@ from ding.bonus import SACAgent
|
|
128 |
from huggingface_ding import push_model_to_hub
|
129 |
|
130 |
# Instantiate the agent
|
131 |
-
agent = SACAgent("
|
132 |
# Train the agent
|
133 |
return_ = agent.train(step=int(4000000), collector_env_num=8, evaluator_env_num=8)
|
134 |
# Push model to huggingface hub
|
@@ -145,7 +145,8 @@ push_model_to_hub(
|
|
145 |
usage_file_by_git_clone="./sac/pendulum_sac_deploy.py",
|
146 |
usage_file_by_huggingface_ding="./sac/pendulum_sac_download.py",
|
147 |
train_file="./sac/pendulum_sac.py",
|
148 |
-
repo_id="OpenDILabCommunity/Pendulum-v1-SAC"
|
|
|
149 |
)
|
150 |
|
151 |
```
|
@@ -170,10 +171,11 @@ exp_config = {
|
|
170 |
'cfg_type': 'BaseEnvManagerDict'
|
171 |
},
|
172 |
'stop_value': -250,
|
|
|
|
|
173 |
'collector_env_num': 10,
|
174 |
'evaluator_env_num': 8,
|
175 |
-
'act_scale': True
|
176 |
-
'n_evaluator_episode': 8
|
177 |
},
|
178 |
'policy': {
|
179 |
'model': {
|
@@ -226,9 +228,10 @@ exp_config = {
|
|
226 |
'render_freq': -1,
|
227 |
'mode': 'train_iter'
|
228 |
},
|
|
|
229 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
230 |
-
'
|
231 |
-
'
|
232 |
}
|
233 |
},
|
234 |
'other': {
|
@@ -237,7 +240,7 @@ exp_config = {
|
|
237 |
}
|
238 |
},
|
239 |
'on_policy': False,
|
240 |
-
'cuda':
|
241 |
'multi_gpu': False,
|
242 |
'bp_update_sync': True,
|
243 |
'traj_len_inf': False,
|
@@ -274,14 +277,14 @@ exp_config = {
|
|
274 |
- **Configuration:** [config link](https://huggingface.co/OpenDILabCommunity/Pendulum-v1-SAC/blob/main/policy_config.py)
|
275 |
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/Pendulum-v1-SAC/blob/main/replay.mp4)
|
276 |
<!-- Provide the size information for the model. -->
|
277 |
-
- **Parameters total size:**
|
278 |
-
- **Last Update Date:** 2023-
|
279 |
|
280 |
## Environments
|
281 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
282 |
- **Benchmark:** OpenAI/Gym/ClassicControl
|
283 |
- **Task:** Pendulum-v1
|
284 |
- **Gym version:** 0.25.1
|
285 |
-
- **DI-engine version:** v0.4.
|
286 |
-
- **PyTorch version:**
|
287 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/pendulum.html)
|
|
|
21 |
type: OpenAI/Gym/ClassicControl-Pendulum-v1
|
22 |
metrics:
|
23 |
- type: mean_reward
|
24 |
+
value: -231.49 +/- 235.68
|
25 |
name: mean_reward
|
26 |
---
|
27 |
|
|
|
67 |
|
68 |
# Pull model from files which are git cloned from huggingface
|
69 |
policy_state_dict = torch.load("pytorch_model.bin", map_location=torch.device("cpu"))
|
70 |
+
cfg = EasyDict(Config.file_to_dict("policy_config.py").cfg_dict)
|
71 |
# Instantiate the agent
|
72 |
agent = SACAgent(
|
73 |
+
env_id="Pendulum-v1", exp_name="Pendulum-v1-SAC", cfg=cfg.exp_config, policy_state_dict=policy_state_dict
|
74 |
)
|
75 |
# Continue training
|
76 |
agent.train(step=5000)
|
|
|
98 |
policy_state_dict, cfg = pull_model_from_hub(repo_id="OpenDILabCommunity/Pendulum-v1-SAC")
|
99 |
# Instantiate the agent
|
100 |
agent = SACAgent(
|
101 |
+
env_id="Pendulum-v1",
|
102 |
exp_name="Pendulum-v1-SAC",
|
103 |
cfg=cfg.exp_config,
|
104 |
policy_state_dict=policy_state_dict
|
|
|
128 |
from huggingface_ding import push_model_to_hub
|
129 |
|
130 |
# Instantiate the agent
|
131 |
+
agent = SACAgent(env_id="Pendulum-v1", exp_name="Pendulum-v1-SAC")
|
132 |
# Train the agent
|
133 |
return_ = agent.train(step=int(4000000), collector_env_num=8, evaluator_env_num=8)
|
134 |
# Push model to huggingface hub
|
|
|
145 |
usage_file_by_git_clone="./sac/pendulum_sac_deploy.py",
|
146 |
usage_file_by_huggingface_ding="./sac/pendulum_sac_download.py",
|
147 |
train_file="./sac/pendulum_sac.py",
|
148 |
+
repo_id="OpenDILabCommunity/Pendulum-v1-SAC",
|
149 |
+
create_repo=False
|
150 |
)
|
151 |
|
152 |
```
|
|
|
171 |
'cfg_type': 'BaseEnvManagerDict'
|
172 |
},
|
173 |
'stop_value': -250,
|
174 |
+
'n_evaluator_episode': 8,
|
175 |
+
'env_id': 'Pendulum-v1',
|
176 |
'collector_env_num': 10,
|
177 |
'evaluator_env_num': 8,
|
178 |
+
'act_scale': True
|
|
|
179 |
},
|
180 |
'policy': {
|
181 |
'model': {
|
|
|
228 |
'render_freq': -1,
|
229 |
'mode': 'train_iter'
|
230 |
},
|
231 |
+
'figure_path': None,
|
232 |
'cfg_type': 'InteractionSerialEvaluatorDict',
|
233 |
+
'stop_value': -250,
|
234 |
+
'n_episode': 8
|
235 |
}
|
236 |
},
|
237 |
'other': {
|
|
|
240 |
}
|
241 |
},
|
242 |
'on_policy': False,
|
243 |
+
'cuda': True,
|
244 |
'multi_gpu': False,
|
245 |
'bp_update_sync': True,
|
246 |
'traj_len_inf': False,
|
|
|
277 |
- **Configuration:** [config link](https://huggingface.co/OpenDILabCommunity/Pendulum-v1-SAC/blob/main/policy_config.py)
|
278 |
- **Demo:** [video](https://huggingface.co/OpenDILabCommunity/Pendulum-v1-SAC/blob/main/replay.mp4)
|
279 |
<!-- Provide the size information for the model. -->
|
280 |
+
- **Parameters total size:** 405.03 KB
|
281 |
+
- **Last Update Date:** 2023-09-23
|
282 |
|
283 |
## Environments
|
284 |
<!-- Address questions around what environment the model is intended to be trained and deployed at, including the necessary information needed to be provided for future users. -->
|
285 |
- **Benchmark:** OpenAI/Gym/ClassicControl
|
286 |
- **Task:** Pendulum-v1
|
287 |
- **Gym version:** 0.25.1
|
288 |
+
- **DI-engine version:** v0.4.9
|
289 |
+
- **PyTorch version:** 2.0.1+cu117
|
290 |
- **Doc**: [DI-engine-docs Environments link](https://di-engine-docs.readthedocs.io/en/latest/13_envs/pendulum.html)
|