Commit
·
2ef0e15
1
Parent(s):
8ded26a
Upload . with huggingface_hub
Browse files- .gitattributes +1 -0
- .summary/0/events.out.tfevents.1677097368.VDI-H-LIN-P-003.vdi.pix4d.com +3 -0
- README.md +56 -0
- checkpoint_p0/best_000000978_4005888_reward_28.129.pth +3 -0
- checkpoint_p0/checkpoint_000000605_2478080.pth +3 -0
- checkpoint_p0/checkpoint_000000978_4005888.pth +3 -0
- config.json +142 -0
- replay.mp4 +3 -0
- sf_log.txt +679 -0
.gitattributes
CHANGED
@@ -32,3 +32,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
32 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
33 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
34 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
35 |
+
replay.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1677097368.VDI-H-LIN-P-003.vdi.pix4d.com
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cee58dc980336acb6755301252348cc8733f064224cc002959c43b2384cefee6
|
3 |
+
size 169248
|
README.md
ADDED
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
model-index:
|
8 |
+
- name: APPO
|
9 |
+
results:
|
10 |
+
- task:
|
11 |
+
type: reinforcement-learning
|
12 |
+
name: reinforcement-learning
|
13 |
+
dataset:
|
14 |
+
name: doom_health_gathering_supreme
|
15 |
+
type: doom_health_gathering_supreme
|
16 |
+
metrics:
|
17 |
+
- type: mean_reward
|
18 |
+
value: 8.78 +/- 4.49
|
19 |
+
name: mean_reward
|
20 |
+
verified: false
|
21 |
+
---
|
22 |
+
|
23 |
+
A(n) **APPO** model trained on the **doom_health_gathering_supreme** environment.
|
24 |
+
|
25 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
26 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
27 |
+
|
28 |
+
|
29 |
+
## Downloading the model
|
30 |
+
|
31 |
+
After installing Sample-Factory, download the model with:
|
32 |
+
```
|
33 |
+
python -m sample_factory.huggingface.load_from_hub -r GrimReaperSam/rl_course_vizdoom_health_gathering_supreme
|
34 |
+
```
|
35 |
+
|
36 |
+
|
37 |
+
## Using the model
|
38 |
+
|
39 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
40 |
+
```
|
41 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme
|
42 |
+
```
|
43 |
+
|
44 |
+
|
45 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
46 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
47 |
+
|
48 |
+
## Training with this model
|
49 |
+
|
50 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
51 |
+
```
|
52 |
+
python -m <path.to.train.module> --algo=APPO --env=doom_health_gathering_supreme --train_dir=./train_dir --experiment=rl_course_vizdoom_health_gathering_supreme --restart_behavior=resume --train_for_env_steps=10000000000
|
53 |
+
```
|
54 |
+
|
55 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
56 |
+
|
checkpoint_p0/best_000000978_4005888_reward_28.129.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7ec0957818ed6776821ebdc0f2f6f0737085ad0fe7f34094242fce5f5337966f
|
3 |
+
size 34928614
|
checkpoint_p0/checkpoint_000000605_2478080.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:d7af59cf1d899591c69ab7417df7b8a66619f104f75fef1f0a31a5419d71c3ee
|
3 |
+
size 34929028
|
checkpoint_p0/checkpoint_000000978_4005888.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:b27857647b3368f84c7b3f78a4cc114d1d0da3759f78f083e32bc1d6ff4ee593
|
3 |
+
size 34929028
|
config.json
ADDED
@@ -0,0 +1,142 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "doom_health_gathering_supreme",
|
5 |
+
"experiment": "default_experiment",
|
6 |
+
"train_dir": "/home/flahoud/studies/collab/train_dir",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": null,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 1000,
|
18 |
+
"num_workers": 8,
|
19 |
+
"num_envs_per_worker": 4,
|
20 |
+
"batch_size": 1024,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 32,
|
24 |
+
"recurrence": 32,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.001,
|
32 |
+
"value_loss_coeff": 0.5,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"exploration_loss": "symmetric_kl",
|
35 |
+
"gae_lambda": 0.95,
|
36 |
+
"ppo_clip_ratio": 0.1,
|
37 |
+
"ppo_clip_value": 0.2,
|
38 |
+
"with_vtrace": false,
|
39 |
+
"vtrace_rho": 1.0,
|
40 |
+
"vtrace_c": 1.0,
|
41 |
+
"optimizer": "adam",
|
42 |
+
"adam_eps": 1e-06,
|
43 |
+
"adam_beta1": 0.9,
|
44 |
+
"adam_beta2": 0.999,
|
45 |
+
"max_grad_norm": 4.0,
|
46 |
+
"learning_rate": 0.0001,
|
47 |
+
"lr_schedule": "constant",
|
48 |
+
"lr_schedule_kl_threshold": 0.008,
|
49 |
+
"lr_adaptive_min": 1e-06,
|
50 |
+
"lr_adaptive_max": 0.01,
|
51 |
+
"obs_subtract_mean": 0.0,
|
52 |
+
"obs_scale": 255.0,
|
53 |
+
"normalize_input": true,
|
54 |
+
"normalize_input_keys": null,
|
55 |
+
"decorrelate_experience_max_seconds": 0,
|
56 |
+
"decorrelate_envs_on_one_worker": true,
|
57 |
+
"actor_worker_gpus": [],
|
58 |
+
"set_workers_cpu_affinity": true,
|
59 |
+
"force_envs_single_thread": false,
|
60 |
+
"default_niceness": 0,
|
61 |
+
"log_to_file": true,
|
62 |
+
"experiment_summaries_interval": 10,
|
63 |
+
"flush_summaries_interval": 30,
|
64 |
+
"stats_avg": 100,
|
65 |
+
"summaries_use_frameskip": true,
|
66 |
+
"heartbeat_interval": 20,
|
67 |
+
"heartbeat_reporting_interval": 600,
|
68 |
+
"train_for_env_steps": 4000000,
|
69 |
+
"train_for_seconds": 10000000000,
|
70 |
+
"save_every_sec": 120,
|
71 |
+
"keep_checkpoints": 2,
|
72 |
+
"load_checkpoint_kind": "latest",
|
73 |
+
"save_milestones_sec": -1,
|
74 |
+
"save_best_every_sec": 5,
|
75 |
+
"save_best_metric": "reward",
|
76 |
+
"save_best_after": 100000,
|
77 |
+
"benchmark": false,
|
78 |
+
"encoder_mlp_layers": [
|
79 |
+
512,
|
80 |
+
512
|
81 |
+
],
|
82 |
+
"encoder_conv_architecture": "convnet_simple",
|
83 |
+
"encoder_conv_mlp_layers": [
|
84 |
+
512
|
85 |
+
],
|
86 |
+
"use_rnn": true,
|
87 |
+
"rnn_size": 512,
|
88 |
+
"rnn_type": "gru",
|
89 |
+
"rnn_num_layers": 1,
|
90 |
+
"decoder_mlp_layers": [],
|
91 |
+
"nonlinearity": "elu",
|
92 |
+
"policy_initialization": "orthogonal",
|
93 |
+
"policy_init_gain": 1.0,
|
94 |
+
"actor_critic_share_weights": true,
|
95 |
+
"adaptive_stddev": true,
|
96 |
+
"continuous_tanh_scale": 0.0,
|
97 |
+
"initial_stddev": 1.0,
|
98 |
+
"use_env_info_cache": false,
|
99 |
+
"env_gpu_actions": false,
|
100 |
+
"env_gpu_observations": true,
|
101 |
+
"env_frameskip": 4,
|
102 |
+
"env_framestack": 1,
|
103 |
+
"pixel_format": "CHW",
|
104 |
+
"use_record_episode_statistics": false,
|
105 |
+
"with_wandb": false,
|
106 |
+
"wandb_user": null,
|
107 |
+
"wandb_project": "sample_factory",
|
108 |
+
"wandb_group": null,
|
109 |
+
"wandb_job_type": "SF",
|
110 |
+
"wandb_tags": [],
|
111 |
+
"with_pbt": false,
|
112 |
+
"pbt_mix_policies_in_one_env": true,
|
113 |
+
"pbt_period_env_steps": 5000000,
|
114 |
+
"pbt_start_mutation": 20000000,
|
115 |
+
"pbt_replace_fraction": 0.3,
|
116 |
+
"pbt_mutation_rate": 0.15,
|
117 |
+
"pbt_replace_reward_gap": 0.1,
|
118 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
119 |
+
"pbt_optimize_gamma": false,
|
120 |
+
"pbt_target_objective": "true_objective",
|
121 |
+
"pbt_perturb_min": 1.1,
|
122 |
+
"pbt_perturb_max": 1.5,
|
123 |
+
"num_agents": -1,
|
124 |
+
"num_humans": 0,
|
125 |
+
"num_bots": -1,
|
126 |
+
"start_bot_difficulty": null,
|
127 |
+
"timelimit": null,
|
128 |
+
"res_w": 128,
|
129 |
+
"res_h": 72,
|
130 |
+
"wide_aspect_ratio": false,
|
131 |
+
"eval_env_frameskip": 1,
|
132 |
+
"fps": 35,
|
133 |
+
"command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
|
134 |
+
"cli_args": {
|
135 |
+
"env": "doom_health_gathering_supreme",
|
136 |
+
"num_workers": 8,
|
137 |
+
"num_envs_per_worker": 4,
|
138 |
+
"train_for_env_steps": 4000000
|
139 |
+
},
|
140 |
+
"git_hash": "unknown",
|
141 |
+
"git_repo_name": "not a git repository"
|
142 |
+
}
|
replay.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:105886dddb3b8178b9711d80f6b769188a241967a34dcb263c92b9fe52682358
|
3 |
+
size 16569762
|
sf_log.txt
ADDED
@@ -0,0 +1,679 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[2023-02-22 21:22:50,811][24717] Saving configuration to /home/flahoud/studies/collab/train_dir/default_experiment/config.json...
|
2 |
+
[2023-02-22 21:22:50,812][24717] Rollout worker 0 uses device cpu
|
3 |
+
[2023-02-22 21:22:50,813][24717] Rollout worker 1 uses device cpu
|
4 |
+
[2023-02-22 21:22:50,813][24717] Rollout worker 2 uses device cpu
|
5 |
+
[2023-02-22 21:22:50,814][24717] Rollout worker 3 uses device cpu
|
6 |
+
[2023-02-22 21:22:50,815][24717] Rollout worker 4 uses device cpu
|
7 |
+
[2023-02-22 21:22:50,815][24717] Rollout worker 5 uses device cpu
|
8 |
+
[2023-02-22 21:22:50,816][24717] Rollout worker 6 uses device cpu
|
9 |
+
[2023-02-22 21:22:50,817][24717] Rollout worker 7 uses device cpu
|
10 |
+
[2023-02-22 21:22:50,874][24717] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
11 |
+
[2023-02-22 21:22:50,875][24717] InferenceWorker_p0-w0: min num requests: 2
|
12 |
+
[2023-02-22 21:22:50,907][24717] Starting all processes...
|
13 |
+
[2023-02-22 21:22:50,908][24717] Starting process learner_proc0
|
14 |
+
[2023-02-22 21:22:50,957][24717] Starting all processes...
|
15 |
+
[2023-02-22 21:22:50,964][24717] Starting process inference_proc0-0
|
16 |
+
[2023-02-22 21:22:50,965][24717] Starting process rollout_proc0
|
17 |
+
[2023-02-22 21:22:50,965][24717] Starting process rollout_proc1
|
18 |
+
[2023-02-22 21:22:50,966][24717] Starting process rollout_proc2
|
19 |
+
[2023-02-22 21:22:50,967][24717] Starting process rollout_proc3
|
20 |
+
[2023-02-22 21:22:50,967][24717] Starting process rollout_proc4
|
21 |
+
[2023-02-22 21:22:50,968][24717] Starting process rollout_proc5
|
22 |
+
[2023-02-22 21:22:50,968][24717] Starting process rollout_proc6
|
23 |
+
[2023-02-22 21:22:50,968][24717] Starting process rollout_proc7
|
24 |
+
[2023-02-22 21:22:52,699][32247] Worker 1 uses CPU cores [1]
|
25 |
+
[2023-02-22 21:22:52,745][32230] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
26 |
+
[2023-02-22 21:22:52,745][32230] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for learning process 0
|
27 |
+
[2023-02-22 21:22:52,758][32230] Num visible devices: 1
|
28 |
+
[2023-02-22 21:22:52,803][32230] Starting seed is not provided
|
29 |
+
[2023-02-22 21:22:52,803][32230] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
30 |
+
[2023-02-22 21:22:52,803][32230] Initializing actor-critic model on device cuda:0
|
31 |
+
[2023-02-22 21:22:52,803][32230] RunningMeanStd input shape: (3, 72, 128)
|
32 |
+
[2023-02-22 21:22:52,804][32230] RunningMeanStd input shape: (1,)
|
33 |
+
[2023-02-22 21:22:52,815][32230] ConvEncoder: input_channels=3
|
34 |
+
[2023-02-22 21:22:52,855][32246] Worker 0 uses CPU cores [0]
|
35 |
+
[2023-02-22 21:22:52,864][32245] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
36 |
+
[2023-02-22 21:22:52,864][32245] Set environment var CUDA_VISIBLE_DEVICES to '0' (GPU indices [0]) for inference process 0
|
37 |
+
[2023-02-22 21:22:52,878][32245] Num visible devices: 1
|
38 |
+
[2023-02-22 21:22:52,943][32249] Worker 3 uses CPU cores [3]
|
39 |
+
[2023-02-22 21:22:52,955][32253] Worker 4 uses CPU cores [4]
|
40 |
+
[2023-02-22 21:22:52,969][32230] Conv encoder output size: 512
|
41 |
+
[2023-02-22 21:22:52,970][32230] Policy head output size: 512
|
42 |
+
[2023-02-22 21:22:52,983][32230] Created Actor Critic model with architecture:
|
43 |
+
[2023-02-22 21:22:52,984][32230] ActorCriticSharedWeights(
|
44 |
+
(obs_normalizer): ObservationNormalizer(
|
45 |
+
(running_mean_std): RunningMeanStdDictInPlace(
|
46 |
+
(running_mean_std): ModuleDict(
|
47 |
+
(obs): RunningMeanStdInPlace()
|
48 |
+
)
|
49 |
+
)
|
50 |
+
)
|
51 |
+
(returns_normalizer): RecursiveScriptModule(original_name=RunningMeanStdInPlace)
|
52 |
+
(encoder): VizdoomEncoder(
|
53 |
+
(basic_encoder): ConvEncoder(
|
54 |
+
(enc): RecursiveScriptModule(
|
55 |
+
original_name=ConvEncoderImpl
|
56 |
+
(conv_head): RecursiveScriptModule(
|
57 |
+
original_name=Sequential
|
58 |
+
(0): RecursiveScriptModule(original_name=Conv2d)
|
59 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
60 |
+
(2): RecursiveScriptModule(original_name=Conv2d)
|
61 |
+
(3): RecursiveScriptModule(original_name=ELU)
|
62 |
+
(4): RecursiveScriptModule(original_name=Conv2d)
|
63 |
+
(5): RecursiveScriptModule(original_name=ELU)
|
64 |
+
)
|
65 |
+
(mlp_layers): RecursiveScriptModule(
|
66 |
+
original_name=Sequential
|
67 |
+
(0): RecursiveScriptModule(original_name=Linear)
|
68 |
+
(1): RecursiveScriptModule(original_name=ELU)
|
69 |
+
)
|
70 |
+
)
|
71 |
+
)
|
72 |
+
)
|
73 |
+
(core): ModelCoreRNN(
|
74 |
+
(core): GRU(512, 512)
|
75 |
+
)
|
76 |
+
(decoder): MlpDecoder(
|
77 |
+
(mlp): Identity()
|
78 |
+
)
|
79 |
+
(critic_linear): Linear(in_features=512, out_features=1, bias=True)
|
80 |
+
(action_parameterization): ActionParameterizationDefault(
|
81 |
+
(distribution_linear): Linear(in_features=512, out_features=5, bias=True)
|
82 |
+
)
|
83 |
+
)
|
84 |
+
[2023-02-22 21:22:52,989][32262] Worker 6 uses CPU cores [6]
|
85 |
+
[2023-02-22 21:22:53,089][32248] Worker 2 uses CPU cores [2]
|
86 |
+
[2023-02-22 21:22:53,090][32252] Worker 5 uses CPU cores [5]
|
87 |
+
[2023-02-22 21:22:53,203][32263] Worker 7 uses CPU cores [7]
|
88 |
+
[2023-02-22 21:22:55,724][32230] Using optimizer <class 'torch.optim.adam.Adam'>
|
89 |
+
[2023-02-22 21:22:55,725][32230] No checkpoints found
|
90 |
+
[2023-02-22 21:22:55,725][32230] Did not load from checkpoint, starting from scratch!
|
91 |
+
[2023-02-22 21:22:55,725][32230] Initialized policy 0 weights for model version 0
|
92 |
+
[2023-02-22 21:22:55,727][32230] LearnerWorker_p0 finished initialization!
|
93 |
+
[2023-02-22 21:22:55,728][32230] Using GPUs [0] for process 0 (actually maps to GPUs [0])
|
94 |
+
[2023-02-22 21:22:55,918][32245] RunningMeanStd input shape: (3, 72, 128)
|
95 |
+
[2023-02-22 21:22:55,919][32245] RunningMeanStd input shape: (1,)
|
96 |
+
[2023-02-22 21:22:55,929][32245] ConvEncoder: input_channels=3
|
97 |
+
[2023-02-22 21:22:56,020][32245] Conv encoder output size: 512
|
98 |
+
[2023-02-22 21:22:56,021][32245] Policy head output size: 512
|
99 |
+
[2023-02-22 21:22:58,156][24717] Fps is (10 sec: nan, 60 sec: nan, 300 sec: nan). Total num frames: 0. Throughput: 0: nan. Samples: 0. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
100 |
+
[2023-02-22 21:22:58,538][24717] Inference worker 0-0 is ready!
|
101 |
+
[2023-02-22 21:22:58,539][24717] All inference workers are ready! Signal rollout workers to start!
|
102 |
+
[2023-02-22 21:22:58,557][32247] Doom resolution: 160x120, resize resolution: (128, 72)
|
103 |
+
[2023-02-22 21:22:58,558][32262] Doom resolution: 160x120, resize resolution: (128, 72)
|
104 |
+
[2023-02-22 21:22:58,558][32263] Doom resolution: 160x120, resize resolution: (128, 72)
|
105 |
+
[2023-02-22 21:22:58,558][32248] Doom resolution: 160x120, resize resolution: (128, 72)
|
106 |
+
[2023-02-22 21:22:58,558][32246] Doom resolution: 160x120, resize resolution: (128, 72)
|
107 |
+
[2023-02-22 21:22:58,559][32253] Doom resolution: 160x120, resize resolution: (128, 72)
|
108 |
+
[2023-02-22 21:22:58,560][32249] Doom resolution: 160x120, resize resolution: (128, 72)
|
109 |
+
[2023-02-22 21:22:58,579][32252] Doom resolution: 160x120, resize resolution: (128, 72)
|
110 |
+
[2023-02-22 21:22:59,188][32247] Decorrelating experience for 0 frames...
|
111 |
+
[2023-02-22 21:22:59,191][32246] Decorrelating experience for 0 frames...
|
112 |
+
[2023-02-22 21:22:59,192][32249] Decorrelating experience for 0 frames...
|
113 |
+
[2023-02-22 21:22:59,193][32252] Decorrelating experience for 0 frames...
|
114 |
+
[2023-02-22 21:22:59,194][32262] Decorrelating experience for 0 frames...
|
115 |
+
[2023-02-22 21:22:59,195][32263] Decorrelating experience for 0 frames...
|
116 |
+
[2023-02-22 21:22:59,738][32249] Decorrelating experience for 32 frames...
|
117 |
+
[2023-02-22 21:22:59,739][32263] Decorrelating experience for 32 frames...
|
118 |
+
[2023-02-22 21:22:59,739][32246] Decorrelating experience for 32 frames...
|
119 |
+
[2023-02-22 21:22:59,740][32253] Decorrelating experience for 0 frames...
|
120 |
+
[2023-02-22 21:22:59,744][32248] Decorrelating experience for 0 frames...
|
121 |
+
[2023-02-22 21:22:59,745][32247] Decorrelating experience for 32 frames...
|
122 |
+
[2023-02-22 21:23:00,186][32253] Decorrelating experience for 32 frames...
|
123 |
+
[2023-02-22 21:23:00,320][32252] Decorrelating experience for 32 frames...
|
124 |
+
[2023-02-22 21:23:00,322][32248] Decorrelating experience for 32 frames...
|
125 |
+
[2023-02-22 21:23:00,323][32263] Decorrelating experience for 64 frames...
|
126 |
+
[2023-02-22 21:23:00,324][32246] Decorrelating experience for 64 frames...
|
127 |
+
[2023-02-22 21:23:00,324][32262] Decorrelating experience for 32 frames...
|
128 |
+
[2023-02-22 21:23:00,527][32249] Decorrelating experience for 64 frames...
|
129 |
+
[2023-02-22 21:23:00,770][32246] Decorrelating experience for 96 frames...
|
130 |
+
[2023-02-22 21:23:00,863][32253] Decorrelating experience for 64 frames...
|
131 |
+
[2023-02-22 21:23:00,863][32263] Decorrelating experience for 96 frames...
|
132 |
+
[2023-02-22 21:23:00,863][32252] Decorrelating experience for 64 frames...
|
133 |
+
[2023-02-22 21:23:00,865][32247] Decorrelating experience for 64 frames...
|
134 |
+
[2023-02-22 21:23:01,383][32252] Decorrelating experience for 96 frames...
|
135 |
+
[2023-02-22 21:23:01,384][32253] Decorrelating experience for 96 frames...
|
136 |
+
[2023-02-22 21:23:01,384][32247] Decorrelating experience for 96 frames...
|
137 |
+
[2023-02-22 21:23:01,386][32249] Decorrelating experience for 96 frames...
|
138 |
+
[2023-02-22 21:23:01,386][32248] Decorrelating experience for 64 frames...
|
139 |
+
[2023-02-22 21:23:01,776][32262] Decorrelating experience for 64 frames...
|
140 |
+
[2023-02-22 21:23:01,777][32248] Decorrelating experience for 96 frames...
|
141 |
+
[2023-02-22 21:23:02,120][32262] Decorrelating experience for 96 frames...
|
142 |
+
[2023-02-22 21:23:02,338][32230] Signal inference workers to stop experience collection...
|
143 |
+
[2023-02-22 21:23:02,342][32245] InferenceWorker_p0-w0: stopping experience collection
|
144 |
+
[2023-02-22 21:23:03,156][24717] Fps is (10 sec: 0.0, 60 sec: 0.0, 300 sec: 0.0). Total num frames: 0. Throughput: 0: 7.2. Samples: 36. Policy #0 lag: (min: -1.0, avg: -1.0, max: -1.0)
|
145 |
+
[2023-02-22 21:23:03,158][24717] Avg episode reward: [(0, '2.456')]
|
146 |
+
[2023-02-22 21:23:04,256][32230] Signal inference workers to resume experience collection...
|
147 |
+
[2023-02-22 21:23:04,256][32245] InferenceWorker_p0-w0: resuming experience collection
|
148 |
+
[2023-02-22 21:23:06,289][32245] Updated weights for policy 0, policy_version 10 (0.0008)
|
149 |
+
[2023-02-22 21:23:07,913][32245] Updated weights for policy 0, policy_version 20 (0.0010)
|
150 |
+
[2023-02-22 21:23:08,156][24717] Fps is (10 sec: 8601.6, 60 sec: 8601.6, 300 sec: 8601.6). Total num frames: 86016. Throughput: 0: 1899.2. Samples: 18992. Policy #0 lag: (min: 0.0, avg: 0.9, max: 2.0)
|
151 |
+
[2023-02-22 21:23:08,157][24717] Avg episode reward: [(0, '4.545')]
|
152 |
+
[2023-02-22 21:23:09,552][32245] Updated weights for policy 0, policy_version 30 (0.0007)
|
153 |
+
[2023-02-22 21:23:10,866][24717] Heartbeat connected on Batcher_0
|
154 |
+
[2023-02-22 21:23:10,869][24717] Heartbeat connected on LearnerWorker_p0
|
155 |
+
[2023-02-22 21:23:10,881][24717] Heartbeat connected on InferenceWorker_p0-w0
|
156 |
+
[2023-02-22 21:23:10,887][24717] Heartbeat connected on RolloutWorker_w2
|
157 |
+
[2023-02-22 21:23:10,890][24717] Heartbeat connected on RolloutWorker_w0
|
158 |
+
[2023-02-22 21:23:10,892][24717] Heartbeat connected on RolloutWorker_w3
|
159 |
+
[2023-02-22 21:23:10,893][24717] Heartbeat connected on RolloutWorker_w1
|
160 |
+
[2023-02-22 21:23:10,898][24717] Heartbeat connected on RolloutWorker_w4
|
161 |
+
[2023-02-22 21:23:10,900][24717] Heartbeat connected on RolloutWorker_w5
|
162 |
+
[2023-02-22 21:23:10,905][24717] Heartbeat connected on RolloutWorker_w6
|
163 |
+
[2023-02-22 21:23:10,907][24717] Heartbeat connected on RolloutWorker_w7
|
164 |
+
[2023-02-22 21:23:11,206][32245] Updated weights for policy 0, policy_version 40 (0.0007)
|
165 |
+
[2023-02-22 21:23:12,868][32245] Updated weights for policy 0, policy_version 50 (0.0008)
|
166 |
+
[2023-02-22 21:23:13,156][24717] Fps is (10 sec: 20889.3, 60 sec: 13926.3, 300 sec: 13926.3). Total num frames: 208896. Throughput: 0: 2502.7. Samples: 37540. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
167 |
+
[2023-02-22 21:23:13,158][24717] Avg episode reward: [(0, '4.397')]
|
168 |
+
[2023-02-22 21:23:13,159][32230] Saving new best policy, reward=4.397!
|
169 |
+
[2023-02-22 21:23:14,551][32245] Updated weights for policy 0, policy_version 60 (0.0007)
|
170 |
+
[2023-02-22 21:23:16,232][32245] Updated weights for policy 0, policy_version 70 (0.0006)
|
171 |
+
[2023-02-22 21:23:17,952][32245] Updated weights for policy 0, policy_version 80 (0.0007)
|
172 |
+
[2023-02-22 21:23:18,156][24717] Fps is (10 sec: 24575.9, 60 sec: 16588.8, 300 sec: 16588.8). Total num frames: 331776. Throughput: 0: 3717.4. Samples: 74348. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
173 |
+
[2023-02-22 21:23:18,157][24717] Avg episode reward: [(0, '4.329')]
|
174 |
+
[2023-02-22 21:23:19,740][32245] Updated weights for policy 0, policy_version 90 (0.0010)
|
175 |
+
[2023-02-22 21:23:21,483][32245] Updated weights for policy 0, policy_version 100 (0.0007)
|
176 |
+
[2023-02-22 21:23:23,156][24717] Fps is (10 sec: 23757.1, 60 sec: 17858.6, 300 sec: 17858.6). Total num frames: 446464. Throughput: 0: 4375.5. Samples: 109386. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
177 |
+
[2023-02-22 21:23:23,157][24717] Avg episode reward: [(0, '4.485')]
|
178 |
+
[2023-02-22 21:23:23,159][32230] Saving new best policy, reward=4.485!
|
179 |
+
[2023-02-22 21:23:23,248][32245] Updated weights for policy 0, policy_version 110 (0.0009)
|
180 |
+
[2023-02-22 21:23:25,020][32245] Updated weights for policy 0, policy_version 120 (0.0008)
|
181 |
+
[2023-02-22 21:23:26,756][32245] Updated weights for policy 0, policy_version 130 (0.0007)
|
182 |
+
[2023-02-22 21:23:28,156][24717] Fps is (10 sec: 23347.1, 60 sec: 18841.6, 300 sec: 18841.6). Total num frames: 565248. Throughput: 0: 4225.1. Samples: 126752. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
183 |
+
[2023-02-22 21:23:28,158][24717] Avg episode reward: [(0, '4.570')]
|
184 |
+
[2023-02-22 21:23:28,162][32230] Saving new best policy, reward=4.570!
|
185 |
+
[2023-02-22 21:23:28,520][32245] Updated weights for policy 0, policy_version 140 (0.0008)
|
186 |
+
[2023-02-22 21:23:30,204][32245] Updated weights for policy 0, policy_version 150 (0.0007)
|
187 |
+
[2023-02-22 21:23:31,950][32245] Updated weights for policy 0, policy_version 160 (0.0008)
|
188 |
+
[2023-02-22 21:23:33,156][24717] Fps is (10 sec: 23756.7, 60 sec: 19543.8, 300 sec: 19543.8). Total num frames: 684032. Throughput: 0: 4632.2. Samples: 162128. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
189 |
+
[2023-02-22 21:23:33,157][24717] Avg episode reward: [(0, '4.758')]
|
190 |
+
[2023-02-22 21:23:33,159][32230] Saving new best policy, reward=4.758!
|
191 |
+
[2023-02-22 21:23:33,637][32245] Updated weights for policy 0, policy_version 170 (0.0006)
|
192 |
+
[2023-02-22 21:23:35,345][32245] Updated weights for policy 0, policy_version 180 (0.0008)
|
193 |
+
[2023-02-22 21:23:37,053][32245] Updated weights for policy 0, policy_version 190 (0.0007)
|
194 |
+
[2023-02-22 21:23:38,156][24717] Fps is (10 sec: 23757.0, 60 sec: 20070.4, 300 sec: 20070.4). Total num frames: 802816. Throughput: 0: 4956.4. Samples: 198256. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
195 |
+
[2023-02-22 21:23:38,158][24717] Avg episode reward: [(0, '5.231')]
|
196 |
+
[2023-02-22 21:23:38,163][32230] Saving new best policy, reward=5.231!
|
197 |
+
[2023-02-22 21:23:38,776][32245] Updated weights for policy 0, policy_version 200 (0.0006)
|
198 |
+
[2023-02-22 21:23:40,492][32245] Updated weights for policy 0, policy_version 210 (0.0008)
|
199 |
+
[2023-02-22 21:23:42,206][32245] Updated weights for policy 0, policy_version 220 (0.0007)
|
200 |
+
[2023-02-22 21:23:43,156][24717] Fps is (10 sec: 23756.8, 60 sec: 20480.0, 300 sec: 20480.0). Total num frames: 921600. Throughput: 0: 4802.7. Samples: 216120. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
201 |
+
[2023-02-22 21:23:43,159][24717] Avg episode reward: [(0, '6.078')]
|
202 |
+
[2023-02-22 21:23:43,160][32230] Saving new best policy, reward=6.078!
|
203 |
+
[2023-02-22 21:23:43,956][32245] Updated weights for policy 0, policy_version 230 (0.0010)
|
204 |
+
[2023-02-22 21:23:45,660][32245] Updated weights for policy 0, policy_version 240 (0.0008)
|
205 |
+
[2023-02-22 21:23:47,448][32245] Updated weights for policy 0, policy_version 250 (0.0006)
|
206 |
+
[2023-02-22 21:23:48,156][24717] Fps is (10 sec: 23346.9, 60 sec: 20725.7, 300 sec: 20725.7). Total num frames: 1036288. Throughput: 0: 5590.7. Samples: 251620. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
207 |
+
[2023-02-22 21:23:48,161][24717] Avg episode reward: [(0, '6.450')]
|
208 |
+
[2023-02-22 21:23:48,167][32230] Saving new best policy, reward=6.450!
|
209 |
+
[2023-02-22 21:23:49,218][32245] Updated weights for policy 0, policy_version 260 (0.0008)
|
210 |
+
[2023-02-22 21:23:50,966][32245] Updated weights for policy 0, policy_version 270 (0.0012)
|
211 |
+
[2023-02-22 21:23:52,686][32245] Updated weights for policy 0, policy_version 280 (0.0008)
|
212 |
+
[2023-02-22 21:23:53,156][24717] Fps is (10 sec: 23347.1, 60 sec: 21001.3, 300 sec: 21001.3). Total num frames: 1155072. Throughput: 0: 5946.3. Samples: 286574. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
213 |
+
[2023-02-22 21:23:53,159][24717] Avg episode reward: [(0, '7.235')]
|
214 |
+
[2023-02-22 21:23:53,160][32230] Saving new best policy, reward=7.235!
|
215 |
+
[2023-02-22 21:23:54,405][32245] Updated weights for policy 0, policy_version 290 (0.0008)
|
216 |
+
[2023-02-22 21:23:56,162][32245] Updated weights for policy 0, policy_version 300 (0.0008)
|
217 |
+
[2023-02-22 21:23:57,856][32245] Updated weights for policy 0, policy_version 310 (0.0007)
|
218 |
+
[2023-02-22 21:23:58,156][24717] Fps is (10 sec: 23757.1, 60 sec: 21230.9, 300 sec: 21230.9). Total num frames: 1273856. Throughput: 0: 5927.5. Samples: 304276. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
219 |
+
[2023-02-22 21:23:58,160][24717] Avg episode reward: [(0, '7.943')]
|
220 |
+
[2023-02-22 21:23:58,164][32230] Saving new best policy, reward=7.943!
|
221 |
+
[2023-02-22 21:23:59,584][32245] Updated weights for policy 0, policy_version 320 (0.0006)
|
222 |
+
[2023-02-22 21:24:01,399][32245] Updated weights for policy 0, policy_version 330 (0.0007)
|
223 |
+
[2023-02-22 21:24:03,081][32245] Updated weights for policy 0, policy_version 340 (0.0008)
|
224 |
+
[2023-02-22 21:24:03,156][24717] Fps is (10 sec: 23756.8, 60 sec: 23210.7, 300 sec: 21425.2). Total num frames: 1392640. Throughput: 0: 5894.8. Samples: 339612. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
225 |
+
[2023-02-22 21:24:03,158][24717] Avg episode reward: [(0, '9.597')]
|
226 |
+
[2023-02-22 21:24:03,160][32230] Saving new best policy, reward=9.597!
|
227 |
+
[2023-02-22 21:24:04,774][32245] Updated weights for policy 0, policy_version 350 (0.0006)
|
228 |
+
[2023-02-22 21:24:06,472][32245] Updated weights for policy 0, policy_version 360 (0.0007)
|
229 |
+
[2023-02-22 21:24:08,079][32245] Updated weights for policy 0, policy_version 370 (0.0008)
|
230 |
+
[2023-02-22 21:24:08,156][24717] Fps is (10 sec: 24166.4, 60 sec: 23825.1, 300 sec: 21650.3). Total num frames: 1515520. Throughput: 0: 5926.3. Samples: 376070. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
231 |
+
[2023-02-22 21:24:08,159][24717] Avg episode reward: [(0, '11.741')]
|
232 |
+
[2023-02-22 21:24:08,164][32230] Saving new best policy, reward=11.741!
|
233 |
+
[2023-02-22 21:24:09,745][32245] Updated weights for policy 0, policy_version 380 (0.0006)
|
234 |
+
[2023-02-22 21:24:11,395][32245] Updated weights for policy 0, policy_version 390 (0.0006)
|
235 |
+
[2023-02-22 21:24:13,047][32245] Updated weights for policy 0, policy_version 400 (0.0007)
|
236 |
+
[2023-02-22 21:24:13,156][24717] Fps is (10 sec: 24576.1, 60 sec: 23825.1, 300 sec: 21845.4). Total num frames: 1638400. Throughput: 0: 5958.9. Samples: 394900. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
237 |
+
[2023-02-22 21:24:13,157][24717] Avg episode reward: [(0, '17.552')]
|
238 |
+
[2023-02-22 21:24:13,158][32230] Saving new best policy, reward=17.552!
|
239 |
+
[2023-02-22 21:24:14,722][32245] Updated weights for policy 0, policy_version 410 (0.0006)
|
240 |
+
[2023-02-22 21:24:16,491][32245] Updated weights for policy 0, policy_version 420 (0.0007)
|
241 |
+
[2023-02-22 21:24:18,156][24717] Fps is (10 sec: 24166.4, 60 sec: 23756.8, 300 sec: 21964.8). Total num frames: 1757184. Throughput: 0: 5979.0. Samples: 431182. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
242 |
+
[2023-02-22 21:24:18,158][24717] Avg episode reward: [(0, '17.099')]
|
243 |
+
[2023-02-22 21:24:18,175][32245] Updated weights for policy 0, policy_version 430 (0.0008)
|
244 |
+
[2023-02-22 21:24:19,921][32245] Updated weights for policy 0, policy_version 440 (0.0006)
|
245 |
+
[2023-02-22 21:24:21,535][32245] Updated weights for policy 0, policy_version 450 (0.0008)
|
246 |
+
[2023-02-22 21:24:23,156][24717] Fps is (10 sec: 24166.4, 60 sec: 23893.3, 300 sec: 22118.4). Total num frames: 1880064. Throughput: 0: 5985.9. Samples: 467622. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
247 |
+
[2023-02-22 21:24:23,157][24717] Avg episode reward: [(0, '16.734')]
|
248 |
+
[2023-02-22 21:24:23,244][32245] Updated weights for policy 0, policy_version 460 (0.0007)
|
249 |
+
[2023-02-22 21:24:24,887][32245] Updated weights for policy 0, policy_version 470 (0.0006)
|
250 |
+
[2023-02-22 21:24:26,622][32245] Updated weights for policy 0, policy_version 480 (0.0006)
|
251 |
+
[2023-02-22 21:24:28,157][24717] Fps is (10 sec: 24575.5, 60 sec: 23961.5, 300 sec: 22254.9). Total num frames: 2002944. Throughput: 0: 5995.7. Samples: 485926. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
252 |
+
[2023-02-22 21:24:28,158][24717] Avg episode reward: [(0, '15.807')]
|
253 |
+
[2023-02-22 21:24:28,351][32245] Updated weights for policy 0, policy_version 490 (0.0007)
|
254 |
+
[2023-02-22 21:24:30,011][32245] Updated weights for policy 0, policy_version 500 (0.0008)
|
255 |
+
[2023-02-22 21:24:31,794][32245] Updated weights for policy 0, policy_version 510 (0.0008)
|
256 |
+
[2023-02-22 21:24:33,156][24717] Fps is (10 sec: 24166.3, 60 sec: 23961.6, 300 sec: 22334.0). Total num frames: 2121728. Throughput: 0: 5997.7. Samples: 521516. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
257 |
+
[2023-02-22 21:24:33,157][24717] Avg episode reward: [(0, '20.357')]
|
258 |
+
[2023-02-22 21:24:33,159][32230] Saving new best policy, reward=20.357!
|
259 |
+
[2023-02-22 21:24:33,468][32245] Updated weights for policy 0, policy_version 520 (0.0006)
|
260 |
+
[2023-02-22 21:24:35,160][32245] Updated weights for policy 0, policy_version 530 (0.0006)
|
261 |
+
[2023-02-22 21:24:36,867][32245] Updated weights for policy 0, policy_version 540 (0.0007)
|
262 |
+
[2023-02-22 21:24:38,156][24717] Fps is (10 sec: 23757.3, 60 sec: 23961.6, 300 sec: 22405.1). Total num frames: 2240512. Throughput: 0: 6027.9. Samples: 557828. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
263 |
+
[2023-02-22 21:24:38,157][24717] Avg episode reward: [(0, '18.501')]
|
264 |
+
[2023-02-22 21:24:38,559][32245] Updated weights for policy 0, policy_version 550 (0.0008)
|
265 |
+
[2023-02-22 21:24:40,271][32245] Updated weights for policy 0, policy_version 560 (0.0008)
|
266 |
+
[2023-02-22 21:24:41,996][32245] Updated weights for policy 0, policy_version 570 (0.0007)
|
267 |
+
[2023-02-22 21:24:43,156][24717] Fps is (10 sec: 24166.5, 60 sec: 24029.9, 300 sec: 22508.5). Total num frames: 2363392. Throughput: 0: 6032.4. Samples: 575736. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
268 |
+
[2023-02-22 21:24:43,157][24717] Avg episode reward: [(0, '20.253')]
|
269 |
+
[2023-02-22 21:24:43,651][32245] Updated weights for policy 0, policy_version 580 (0.0006)
|
270 |
+
[2023-02-22 21:24:45,447][32245] Updated weights for policy 0, policy_version 590 (0.0008)
|
271 |
+
[2023-02-22 21:24:47,173][32245] Updated weights for policy 0, policy_version 600 (0.0007)
|
272 |
+
[2023-02-22 21:24:48,156][24717] Fps is (10 sec: 23756.8, 60 sec: 24029.9, 300 sec: 22528.0). Total num frames: 2478080. Throughput: 0: 6041.4. Samples: 611476. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
273 |
+
[2023-02-22 21:24:48,160][24717] Avg episode reward: [(0, '19.851')]
|
274 |
+
[2023-02-22 21:24:48,164][32230] Saving /home/flahoud/studies/collab/train_dir/default_experiment/checkpoint_p0/checkpoint_000000605_2478080.pth...
|
275 |
+
[2023-02-22 21:24:48,870][32245] Updated weights for policy 0, policy_version 610 (0.0008)
|
276 |
+
[2023-02-22 21:24:50,606][32245] Updated weights for policy 0, policy_version 620 (0.0009)
|
277 |
+
[2023-02-22 21:24:52,395][32245] Updated weights for policy 0, policy_version 630 (0.0009)
|
278 |
+
[2023-02-22 21:24:53,156][24717] Fps is (10 sec: 23347.2, 60 sec: 24029.9, 300 sec: 22581.4). Total num frames: 2596864. Throughput: 0: 6013.9. Samples: 646694. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
279 |
+
[2023-02-22 21:24:53,157][24717] Avg episode reward: [(0, '22.850')]
|
280 |
+
[2023-02-22 21:24:53,160][32230] Saving new best policy, reward=22.850!
|
281 |
+
[2023-02-22 21:24:54,156][32245] Updated weights for policy 0, policy_version 640 (0.0008)
|
282 |
+
[2023-02-22 21:24:55,879][32245] Updated weights for policy 0, policy_version 650 (0.0007)
|
283 |
+
[2023-02-22 21:24:57,621][32245] Updated weights for policy 0, policy_version 660 (0.0007)
|
284 |
+
[2023-02-22 21:24:58,156][24717] Fps is (10 sec: 23756.8, 60 sec: 24029.9, 300 sec: 22630.4). Total num frames: 2715648. Throughput: 0: 5991.9. Samples: 664536. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
285 |
+
[2023-02-22 21:24:58,157][24717] Avg episode reward: [(0, '23.369')]
|
286 |
+
[2023-02-22 21:24:58,161][32230] Saving new best policy, reward=23.369!
|
287 |
+
[2023-02-22 21:24:59,373][32245] Updated weights for policy 0, policy_version 670 (0.0008)
|
288 |
+
[2023-02-22 21:25:01,221][32245] Updated weights for policy 0, policy_version 680 (0.0009)
|
289 |
+
[2023-02-22 21:25:02,970][32245] Updated weights for policy 0, policy_version 690 (0.0007)
|
290 |
+
[2023-02-22 21:25:03,156][24717] Fps is (10 sec: 23347.2, 60 sec: 23961.6, 300 sec: 22642.7). Total num frames: 2830336. Throughput: 0: 5951.6. Samples: 699006. Policy #0 lag: (min: 0.0, avg: 0.5, max: 2.0)
|
291 |
+
[2023-02-22 21:25:03,157][24717] Avg episode reward: [(0, '21.691')]
|
292 |
+
[2023-02-22 21:25:04,706][32245] Updated weights for policy 0, policy_version 700 (0.0007)
|
293 |
+
[2023-02-22 21:25:06,440][32245] Updated weights for policy 0, policy_version 710 (0.0007)
|
294 |
+
[2023-02-22 21:25:08,156][24717] Fps is (10 sec: 22937.6, 60 sec: 23825.1, 300 sec: 22654.0). Total num frames: 2945024. Throughput: 0: 5924.6. Samples: 734228. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
295 |
+
[2023-02-22 21:25:08,159][24717] Avg episode reward: [(0, '20.888')]
|
296 |
+
[2023-02-22 21:25:08,168][32245] Updated weights for policy 0, policy_version 720 (0.0008)
|
297 |
+
[2023-02-22 21:25:09,866][32245] Updated weights for policy 0, policy_version 730 (0.0009)
|
298 |
+
[2023-02-22 21:25:11,496][32245] Updated weights for policy 0, policy_version 740 (0.0008)
|
299 |
+
[2023-02-22 21:25:13,140][32245] Updated weights for policy 0, policy_version 750 (0.0007)
|
300 |
+
[2023-02-22 21:25:13,156][24717] Fps is (10 sec: 24166.4, 60 sec: 23893.3, 300 sec: 22755.6). Total num frames: 3072000. Throughput: 0: 5921.3. Samples: 752384. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
301 |
+
[2023-02-22 21:25:13,157][24717] Avg episode reward: [(0, '22.374')]
|
302 |
+
[2023-02-22 21:25:14,875][32245] Updated weights for policy 0, policy_version 760 (0.0008)
|
303 |
+
[2023-02-22 21:25:16,543][32245] Updated weights for policy 0, policy_version 770 (0.0007)
|
304 |
+
[2023-02-22 21:25:18,156][24717] Fps is (10 sec: 24576.0, 60 sec: 23893.3, 300 sec: 22791.3). Total num frames: 3190784. Throughput: 0: 5945.4. Samples: 789058. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
305 |
+
[2023-02-22 21:25:18,157][24717] Avg episode reward: [(0, '22.226')]
|
306 |
+
[2023-02-22 21:25:18,258][32245] Updated weights for policy 0, policy_version 780 (0.0006)
|
307 |
+
[2023-02-22 21:25:19,995][32245] Updated weights for policy 0, policy_version 790 (0.0007)
|
308 |
+
[2023-02-22 21:25:21,683][32245] Updated weights for policy 0, policy_version 800 (0.0006)
|
309 |
+
[2023-02-22 21:25:23,156][24717] Fps is (10 sec: 23756.7, 60 sec: 23825.0, 300 sec: 22824.6). Total num frames: 3309568. Throughput: 0: 5944.3. Samples: 825322. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
310 |
+
[2023-02-22 21:25:23,158][24717] Avg episode reward: [(0, '23.568')]
|
311 |
+
[2023-02-22 21:25:23,159][32230] Saving new best policy, reward=23.568!
|
312 |
+
[2023-02-22 21:25:23,385][32245] Updated weights for policy 0, policy_version 810 (0.0007)
|
313 |
+
[2023-02-22 21:25:25,018][32245] Updated weights for policy 0, policy_version 820 (0.0006)
|
314 |
+
[2023-02-22 21:25:26,705][32245] Updated weights for policy 0, policy_version 830 (0.0006)
|
315 |
+
[2023-02-22 21:25:28,156][24717] Fps is (10 sec: 24166.4, 60 sec: 23825.2, 300 sec: 22883.0). Total num frames: 3432448. Throughput: 0: 5950.8. Samples: 843522. Policy #0 lag: (min: 0.0, avg: 0.8, max: 2.0)
|
316 |
+
[2023-02-22 21:25:28,157][24717] Avg episode reward: [(0, '21.152')]
|
317 |
+
[2023-02-22 21:25:28,392][32245] Updated weights for policy 0, policy_version 840 (0.0008)
|
318 |
+
[2023-02-22 21:25:30,067][32245] Updated weights for policy 0, policy_version 850 (0.0006)
|
319 |
+
[2023-02-22 21:25:31,809][32245] Updated weights for policy 0, policy_version 860 (0.0009)
|
320 |
+
[2023-02-22 21:25:33,156][24717] Fps is (10 sec: 24576.2, 60 sec: 23893.4, 300 sec: 22937.6). Total num frames: 3555328. Throughput: 0: 5961.2. Samples: 879730. Policy #0 lag: (min: 0.0, avg: 0.6, max: 1.0)
|
321 |
+
[2023-02-22 21:25:33,157][24717] Avg episode reward: [(0, '25.351')]
|
322 |
+
[2023-02-22 21:25:33,159][32230] Saving new best policy, reward=25.351!
|
323 |
+
[2023-02-22 21:25:33,498][32245] Updated weights for policy 0, policy_version 870 (0.0006)
|
324 |
+
[2023-02-22 21:25:35,212][32245] Updated weights for policy 0, policy_version 880 (0.0007)
|
325 |
+
[2023-02-22 21:25:37,001][32245] Updated weights for policy 0, policy_version 890 (0.0012)
|
326 |
+
[2023-02-22 21:25:38,156][24717] Fps is (10 sec: 23756.8, 60 sec: 23825.1, 300 sec: 22937.6). Total num frames: 3670016. Throughput: 0: 5960.4. Samples: 914910. Policy #0 lag: (min: 0.0, avg: 0.7, max: 2.0)
|
327 |
+
[2023-02-22 21:25:38,158][24717] Avg episode reward: [(0, '24.882')]
|
328 |
+
[2023-02-22 21:25:38,807][32245] Updated weights for policy 0, policy_version 900 (0.0010)
|
329 |
+
[2023-02-22 21:25:40,496][32245] Updated weights for policy 0, policy_version 910 (0.0006)
|
330 |
+
[2023-02-22 21:25:42,288][32245] Updated weights for policy 0, policy_version 920 (0.0008)
|
331 |
+
[2023-02-22 21:25:43,157][24717] Fps is (10 sec: 22935.2, 60 sec: 23688.1, 300 sec: 22937.5). Total num frames: 3784704. Throughput: 0: 5959.6. Samples: 932726. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
332 |
+
[2023-02-22 21:25:43,158][24717] Avg episode reward: [(0, '23.635')]
|
333 |
+
[2023-02-22 21:25:44,045][32245] Updated weights for policy 0, policy_version 930 (0.0008)
|
334 |
+
[2023-02-22 21:25:45,787][32245] Updated weights for policy 0, policy_version 940 (0.0008)
|
335 |
+
[2023-02-22 21:25:47,486][32245] Updated weights for policy 0, policy_version 950 (0.0007)
|
336 |
+
[2023-02-22 21:25:48,156][24717] Fps is (10 sec: 23347.0, 60 sec: 23756.8, 300 sec: 22961.7). Total num frames: 3903488. Throughput: 0: 5975.9. Samples: 967920. Policy #0 lag: (min: 0.0, avg: 0.6, max: 2.0)
|
337 |
+
[2023-02-22 21:25:48,158][24717] Avg episode reward: [(0, '25.987')]
|
338 |
+
[2023-02-22 21:25:48,180][32230] Saving new best policy, reward=25.987!
|
339 |
+
[2023-02-22 21:25:49,224][32245] Updated weights for policy 0, policy_version 960 (0.0008)
|
340 |
+
[2023-02-22 21:25:50,941][32245] Updated weights for policy 0, policy_version 970 (0.0008)
|
341 |
+
[2023-02-22 21:25:52,319][32230] Stopping Batcher_0...
|
342 |
+
[2023-02-22 21:25:52,319][32230] Saving /home/flahoud/studies/collab/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
343 |
+
[2023-02-22 21:25:52,319][24717] Component Batcher_0 stopped!
|
344 |
+
[2023-02-22 21:25:52,319][32230] Loop batcher_evt_loop terminating...
|
345 |
+
[2023-02-22 21:25:52,333][24717] Component RolloutWorker_w5 stopped!
|
346 |
+
[2023-02-22 21:25:52,334][32253] Stopping RolloutWorker_w4...
|
347 |
+
[2023-02-22 21:25:52,335][32253] Loop rollout_proc4_evt_loop terminating...
|
348 |
+
[2023-02-22 21:25:52,335][24717] Component RolloutWorker_w4 stopped!
|
349 |
+
[2023-02-22 21:25:52,335][32252] Stopping RolloutWorker_w5...
|
350 |
+
[2023-02-22 21:25:52,336][32252] Loop rollout_proc5_evt_loop terminating...
|
351 |
+
[2023-02-22 21:25:52,337][32248] Stopping RolloutWorker_w2...
|
352 |
+
[2023-02-22 21:25:52,337][32248] Loop rollout_proc2_evt_loop terminating...
|
353 |
+
[2023-02-22 21:25:52,337][24717] Component RolloutWorker_w2 stopped!
|
354 |
+
[2023-02-22 21:25:52,338][32247] Stopping RolloutWorker_w1...
|
355 |
+
[2023-02-22 21:25:52,338][24717] Component RolloutWorker_w1 stopped!
|
356 |
+
[2023-02-22 21:25:52,341][32262] Stopping RolloutWorker_w6...
|
357 |
+
[2023-02-22 21:25:52,341][24717] Component RolloutWorker_w6 stopped!
|
358 |
+
[2023-02-22 21:25:52,342][32262] Loop rollout_proc6_evt_loop terminating...
|
359 |
+
[2023-02-22 21:25:52,339][32247] Loop rollout_proc1_evt_loop terminating...
|
360 |
+
[2023-02-22 21:25:52,344][32245] Weights refcount: 2 0
|
361 |
+
[2023-02-22 21:25:52,345][32245] Stopping InferenceWorker_p0-w0...
|
362 |
+
[2023-02-22 21:25:52,345][32245] Loop inference_proc0-0_evt_loop terminating...
|
363 |
+
[2023-02-22 21:25:52,345][24717] Component InferenceWorker_p0-w0 stopped!
|
364 |
+
[2023-02-22 21:25:52,348][24717] Component RolloutWorker_w0 stopped!
|
365 |
+
[2023-02-22 21:25:52,348][32246] Stopping RolloutWorker_w0...
|
366 |
+
[2023-02-22 21:25:52,349][32246] Loop rollout_proc0_evt_loop terminating...
|
367 |
+
[2023-02-22 21:25:52,353][32263] Stopping RolloutWorker_w7...
|
368 |
+
[2023-02-22 21:25:52,353][32263] Loop rollout_proc7_evt_loop terminating...
|
369 |
+
[2023-02-22 21:25:52,353][24717] Component RolloutWorker_w7 stopped!
|
370 |
+
[2023-02-22 21:25:52,380][32230] Saving new best policy, reward=28.129!
|
371 |
+
[2023-02-22 21:25:52,411][32249] Stopping RolloutWorker_w3...
|
372 |
+
[2023-02-22 21:25:52,411][32249] Loop rollout_proc3_evt_loop terminating...
|
373 |
+
[2023-02-22 21:25:52,411][24717] Component RolloutWorker_w3 stopped!
|
374 |
+
[2023-02-22 21:25:52,447][32230] Saving /home/flahoud/studies/collab/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
375 |
+
[2023-02-22 21:25:52,520][32230] Stopping LearnerWorker_p0...
|
376 |
+
[2023-02-22 21:25:52,521][32230] Loop learner_proc0_evt_loop terminating...
|
377 |
+
[2023-02-22 21:25:52,520][24717] Component LearnerWorker_p0 stopped!
|
378 |
+
[2023-02-22 21:25:52,522][24717] Waiting for process learner_proc0 to stop...
|
379 |
+
[2023-02-22 21:25:53,288][24717] Waiting for process inference_proc0-0 to join...
|
380 |
+
[2023-02-22 21:25:53,289][24717] Waiting for process rollout_proc0 to join...
|
381 |
+
[2023-02-22 21:25:53,290][24717] Waiting for process rollout_proc1 to join...
|
382 |
+
[2023-02-22 21:25:53,290][24717] Waiting for process rollout_proc2 to join...
|
383 |
+
[2023-02-22 21:25:53,291][24717] Waiting for process rollout_proc3 to join...
|
384 |
+
[2023-02-22 21:25:53,292][24717] Waiting for process rollout_proc4 to join...
|
385 |
+
[2023-02-22 21:25:53,293][24717] Waiting for process rollout_proc5 to join...
|
386 |
+
[2023-02-22 21:25:53,293][24717] Waiting for process rollout_proc6 to join...
|
387 |
+
[2023-02-22 21:25:53,294][24717] Waiting for process rollout_proc7 to join...
|
388 |
+
[2023-02-22 21:25:53,295][24717] Batcher 0 profile tree view:
|
389 |
+
batching: 13.1577, releasing_batches: 0.0181
|
390 |
+
[2023-02-22 21:25:53,295][24717] InferenceWorker_p0-w0 profile tree view:
|
391 |
+
wait_policy: 0.0000
|
392 |
+
wait_policy_total: 4.3703
|
393 |
+
update_model: 2.3692
|
394 |
+
weight_update: 0.0008
|
395 |
+
one_step: 0.0019
|
396 |
+
handle_policy_step: 155.6174
|
397 |
+
deserialize: 7.8731, stack: 0.8396, obs_to_device_normalize: 39.5270, forward: 65.6351, send_messages: 13.4852
|
398 |
+
prepare_outputs: 21.2746
|
399 |
+
to_cpu: 12.7880
|
400 |
+
[2023-02-22 21:25:53,296][24717] Learner 0 profile tree view:
|
401 |
+
misc: 0.0051, prepare_batch: 6.8617
|
402 |
+
train: 17.8979
|
403 |
+
epoch_init: 0.0044, minibatch_init: 0.0044, losses_postprocess: 0.2348, kl_divergence: 0.2579, after_optimizer: 3.0676
|
404 |
+
calculate_losses: 7.3319
|
405 |
+
losses_init: 0.0024, forward_head: 0.7714, bptt_initial: 4.3619, tail: 0.4261, advantages_returns: 0.1152, losses: 0.7021
|
406 |
+
bptt: 0.8259
|
407 |
+
bptt_forward_core: 0.7938
|
408 |
+
update: 6.7190
|
409 |
+
clip: 0.8093
|
410 |
+
[2023-02-22 21:25:53,297][24717] RolloutWorker_w0 profile tree view:
|
411 |
+
wait_for_trajectories: 0.0989, enqueue_policy_requests: 5.3262, env_step: 71.2475, overhead: 6.5361, complete_rollouts: 0.5137
|
412 |
+
save_policy_outputs: 5.6181
|
413 |
+
split_output_tensors: 2.7734
|
414 |
+
[2023-02-22 21:25:53,297][24717] RolloutWorker_w7 profile tree view:
|
415 |
+
wait_for_trajectories: 0.1168, enqueue_policy_requests: 5.4305, env_step: 72.9827, overhead: 6.5238, complete_rollouts: 0.4607
|
416 |
+
save_policy_outputs: 5.7456
|
417 |
+
split_output_tensors: 2.8446
|
418 |
+
[2023-02-22 21:25:53,299][24717] Loop Runner_EvtLoop terminating...
|
419 |
+
[2023-02-22 21:25:53,300][24717] Runner profile tree view:
|
420 |
+
main_loop: 182.3930
|
421 |
+
[2023-02-22 21:25:53,301][24717] Collected {0: 4005888}, FPS: 21962.9
|
422 |
+
[2023-02-22 21:26:34,224][24717] Loading existing experiment configuration from /home/flahoud/studies/collab/train_dir/default_experiment/config.json
|
423 |
+
[2023-02-22 21:26:34,225][24717] Overriding arg 'num_workers' with value 1 passed from command line
|
424 |
+
[2023-02-22 21:26:34,226][24717] Adding new argument 'no_render'=True that is not in the saved config file!
|
425 |
+
[2023-02-22 21:26:34,226][24717] Adding new argument 'save_video'=True that is not in the saved config file!
|
426 |
+
[2023-02-22 21:26:34,227][24717] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
427 |
+
[2023-02-22 21:26:34,228][24717] Adding new argument 'video_name'=None that is not in the saved config file!
|
428 |
+
[2023-02-22 21:26:34,228][24717] Adding new argument 'max_num_frames'=1000000000.0 that is not in the saved config file!
|
429 |
+
[2023-02-22 21:26:34,229][24717] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
430 |
+
[2023-02-22 21:26:34,229][24717] Adding new argument 'push_to_hub'=False that is not in the saved config file!
|
431 |
+
[2023-02-22 21:26:34,230][24717] Adding new argument 'hf_repository'=None that is not in the saved config file!
|
432 |
+
[2023-02-22 21:26:34,230][24717] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
433 |
+
[2023-02-22 21:26:34,231][24717] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
434 |
+
[2023-02-22 21:26:34,231][24717] Adding new argument 'train_script'=None that is not in the saved config file!
|
435 |
+
[2023-02-22 21:26:34,232][24717] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
436 |
+
[2023-02-22 21:26:34,233][24717] Using frameskip 1 and render_action_repeat=4 for evaluation
|
437 |
+
[2023-02-22 21:26:34,249][24717] Doom resolution: 160x120, resize resolution: (128, 72)
|
438 |
+
[2023-02-22 21:26:34,251][24717] RunningMeanStd input shape: (3, 72, 128)
|
439 |
+
[2023-02-22 21:26:34,252][24717] RunningMeanStd input shape: (1,)
|
440 |
+
[2023-02-22 21:26:34,264][24717] ConvEncoder: input_channels=3
|
441 |
+
[2023-02-22 21:26:34,367][24717] Conv encoder output size: 512
|
442 |
+
[2023-02-22 21:26:34,369][24717] Policy head output size: 512
|
443 |
+
[2023-02-22 21:26:37,138][24717] Loading state from checkpoint /home/flahoud/studies/collab/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
444 |
+
[2023-02-22 21:26:38,361][24717] Num frames 100...
|
445 |
+
[2023-02-22 21:26:38,459][24717] Num frames 200...
|
446 |
+
[2023-02-22 21:26:38,556][24717] Num frames 300...
|
447 |
+
[2023-02-22 21:26:38,658][24717] Num frames 400...
|
448 |
+
[2023-02-22 21:26:38,753][24717] Num frames 500...
|
449 |
+
[2023-02-22 21:26:38,849][24717] Num frames 600...
|
450 |
+
[2023-02-22 21:26:38,945][24717] Num frames 700...
|
451 |
+
[2023-02-22 21:26:39,045][24717] Num frames 800...
|
452 |
+
[2023-02-22 21:26:39,097][24717] Avg episode rewards: #0: 16.000, true rewards: #0: 8.000
|
453 |
+
[2023-02-22 21:26:39,098][24717] Avg episode reward: 16.000, avg true_objective: 8.000
|
454 |
+
[2023-02-22 21:26:39,202][24717] Num frames 900...
|
455 |
+
[2023-02-22 21:26:39,299][24717] Num frames 1000...
|
456 |
+
[2023-02-22 21:26:39,395][24717] Num frames 1100...
|
457 |
+
[2023-02-22 21:26:39,491][24717] Num frames 1200...
|
458 |
+
[2023-02-22 21:26:39,588][24717] Num frames 1300...
|
459 |
+
[2023-02-22 21:26:39,690][24717] Num frames 1400...
|
460 |
+
[2023-02-22 21:26:39,803][24717] Avg episode rewards: #0: 14.805, true rewards: #0: 7.305
|
461 |
+
[2023-02-22 21:26:39,804][24717] Avg episode reward: 14.805, avg true_objective: 7.305
|
462 |
+
[2023-02-22 21:26:39,843][24717] Num frames 1500...
|
463 |
+
[2023-02-22 21:26:39,946][24717] Num frames 1600...
|
464 |
+
[2023-02-22 21:26:40,046][24717] Num frames 1700...
|
465 |
+
[2023-02-22 21:26:40,148][24717] Num frames 1800...
|
466 |
+
[2023-02-22 21:26:40,247][24717] Num frames 1900...
|
467 |
+
[2023-02-22 21:26:40,346][24717] Num frames 2000...
|
468 |
+
[2023-02-22 21:26:40,445][24717] Num frames 2100...
|
469 |
+
[2023-02-22 21:26:40,542][24717] Num frames 2200...
|
470 |
+
[2023-02-22 21:26:40,689][24717] Avg episode rewards: #0: 14.977, true rewards: #0: 7.643
|
471 |
+
[2023-02-22 21:26:40,690][24717] Avg episode reward: 14.977, avg true_objective: 7.643
|
472 |
+
[2023-02-22 21:26:40,698][24717] Num frames 2300...
|
473 |
+
[2023-02-22 21:26:40,798][24717] Num frames 2400...
|
474 |
+
[2023-02-22 21:26:40,901][24717] Num frames 2500...
|
475 |
+
[2023-02-22 21:26:40,999][24717] Num frames 2600...
|
476 |
+
[2023-02-22 21:26:41,095][24717] Num frames 2700...
|
477 |
+
[2023-02-22 21:26:41,199][24717] Num frames 2800...
|
478 |
+
[2023-02-22 21:26:41,293][24717] Num frames 2900...
|
479 |
+
[2023-02-22 21:26:41,384][24717] Num frames 3000...
|
480 |
+
[2023-02-22 21:26:41,475][24717] Num frames 3100...
|
481 |
+
[2023-02-22 21:26:41,570][24717] Num frames 3200...
|
482 |
+
[2023-02-22 21:26:41,668][24717] Num frames 3300...
|
483 |
+
[2023-02-22 21:26:41,772][24717] Num frames 3400...
|
484 |
+
[2023-02-22 21:26:41,875][24717] Num frames 3500...
|
485 |
+
[2023-02-22 21:26:42,000][24717] Avg episode rewards: #0: 18.183, true rewards: #0: 8.932
|
486 |
+
[2023-02-22 21:26:42,001][24717] Avg episode reward: 18.183, avg true_objective: 8.932
|
487 |
+
[2023-02-22 21:26:42,034][24717] Num frames 3600...
|
488 |
+
[2023-02-22 21:26:42,140][24717] Num frames 3700...
|
489 |
+
[2023-02-22 21:26:42,243][24717] Num frames 3800...
|
490 |
+
[2023-02-22 21:26:42,342][24717] Num frames 3900...
|
491 |
+
[2023-02-22 21:26:42,446][24717] Num frames 4000...
|
492 |
+
[2023-02-22 21:26:42,548][24717] Num frames 4100...
|
493 |
+
[2023-02-22 21:26:42,648][24717] Num frames 4200...
|
494 |
+
[2023-02-22 21:26:42,776][24717] Avg episode rewards: #0: 16.954, true rewards: #0: 8.554
|
495 |
+
[2023-02-22 21:26:42,777][24717] Avg episode reward: 16.954, avg true_objective: 8.554
|
496 |
+
[2023-02-22 21:26:42,800][24717] Num frames 4300...
|
497 |
+
[2023-02-22 21:26:42,898][24717] Num frames 4400...
|
498 |
+
[2023-02-22 21:26:42,995][24717] Num frames 4500...
|
499 |
+
[2023-02-22 21:26:43,090][24717] Num frames 4600...
|
500 |
+
[2023-02-22 21:26:43,194][24717] Num frames 4700...
|
501 |
+
[2023-02-22 21:26:43,294][24717] Num frames 4800...
|
502 |
+
[2023-02-22 21:26:43,391][24717] Num frames 4900...
|
503 |
+
[2023-02-22 21:26:43,491][24717] Num frames 5000...
|
504 |
+
[2023-02-22 21:26:43,590][24717] Num frames 5100...
|
505 |
+
[2023-02-22 21:26:43,687][24717] Num frames 5200...
|
506 |
+
[2023-02-22 21:26:43,789][24717] Num frames 5300...
|
507 |
+
[2023-02-22 21:26:43,889][24717] Num frames 5400...
|
508 |
+
[2023-02-22 21:26:43,992][24717] Num frames 5500...
|
509 |
+
[2023-02-22 21:26:44,093][24717] Num frames 5600...
|
510 |
+
[2023-02-22 21:26:44,171][24717] Avg episode rewards: #0: 19.535, true rewards: #0: 9.368
|
511 |
+
[2023-02-22 21:26:44,172][24717] Avg episode reward: 19.535, avg true_objective: 9.368
|
512 |
+
[2023-02-22 21:26:44,254][24717] Num frames 5700...
|
513 |
+
[2023-02-22 21:26:44,359][24717] Num frames 5800...
|
514 |
+
[2023-02-22 21:26:44,461][24717] Num frames 5900...
|
515 |
+
[2023-02-22 21:26:44,563][24717] Num frames 6000...
|
516 |
+
[2023-02-22 21:26:44,663][24717] Num frames 6100...
|
517 |
+
[2023-02-22 21:26:44,815][24717] Avg episode rewards: #0: 18.139, true rewards: #0: 8.853
|
518 |
+
[2023-02-22 21:26:44,817][24717] Avg episode reward: 18.139, avg true_objective: 8.853
|
519 |
+
[2023-02-22 21:26:44,820][24717] Num frames 6200...
|
520 |
+
[2023-02-22 21:26:44,921][24717] Num frames 6300...
|
521 |
+
[2023-02-22 21:26:45,024][24717] Num frames 6400...
|
522 |
+
[2023-02-22 21:26:45,122][24717] Num frames 6500...
|
523 |
+
[2023-02-22 21:26:45,222][24717] Num frames 6600...
|
524 |
+
[2023-02-22 21:26:45,317][24717] Num frames 6700...
|
525 |
+
[2023-02-22 21:26:45,413][24717] Num frames 6800...
|
526 |
+
[2023-02-22 21:26:45,510][24717] Num frames 6900...
|
527 |
+
[2023-02-22 21:26:45,604][24717] Num frames 7000...
|
528 |
+
[2023-02-22 21:26:45,703][24717] Num frames 7100...
|
529 |
+
[2023-02-22 21:26:45,813][24717] Num frames 7200...
|
530 |
+
[2023-02-22 21:26:45,923][24717] Avg episode rewards: #0: 18.571, true rewards: #0: 9.071
|
531 |
+
[2023-02-22 21:26:45,924][24717] Avg episode reward: 18.571, avg true_objective: 9.071
|
532 |
+
[2023-02-22 21:26:45,967][24717] Num frames 7300...
|
533 |
+
[2023-02-22 21:26:46,066][24717] Num frames 7400...
|
534 |
+
[2023-02-22 21:26:46,160][24717] Num frames 7500...
|
535 |
+
[2023-02-22 21:26:46,259][24717] Num frames 7600...
|
536 |
+
[2023-02-22 21:26:46,352][24717] Num frames 7700...
|
537 |
+
[2023-02-22 21:26:46,451][24717] Num frames 7800...
|
538 |
+
[2023-02-22 21:26:46,548][24717] Num frames 7900...
|
539 |
+
[2023-02-22 21:26:46,646][24717] Num frames 8000...
|
540 |
+
[2023-02-22 21:26:46,726][24717] Avg episode rewards: #0: 18.250, true rewards: #0: 8.917
|
541 |
+
[2023-02-22 21:26:46,727][24717] Avg episode reward: 18.250, avg true_objective: 8.917
|
542 |
+
[2023-02-22 21:26:46,803][24717] Num frames 8100...
|
543 |
+
[2023-02-22 21:26:46,897][24717] Num frames 8200...
|
544 |
+
[2023-02-22 21:26:46,991][24717] Num frames 8300...
|
545 |
+
[2023-02-22 21:26:47,084][24717] Num frames 8400...
|
546 |
+
[2023-02-22 21:26:47,177][24717] Num frames 8500...
|
547 |
+
[2023-02-22 21:26:47,270][24717] Num frames 8600...
|
548 |
+
[2023-02-22 21:26:47,383][24717] Avg episode rewards: #0: 17.462, true rewards: #0: 8.662
|
549 |
+
[2023-02-22 21:26:47,384][24717] Avg episode reward: 17.462, avg true_objective: 8.662
|
550 |
+
[2023-02-22 21:27:02,989][24717] Replay video saved to /home/flahoud/studies/collab/train_dir/default_experiment/replay.mp4!
|
551 |
+
[2023-02-22 21:32:31,282][24717] Loading existing experiment configuration from /home/flahoud/studies/collab/train_dir/default_experiment/config.json
|
552 |
+
[2023-02-22 21:32:31,283][24717] Overriding arg 'num_workers' with value 1 passed from command line
|
553 |
+
[2023-02-22 21:32:31,284][24717] Adding new argument 'no_render'=True that is not in the saved config file!
|
554 |
+
[2023-02-22 21:32:31,285][24717] Adding new argument 'save_video'=True that is not in the saved config file!
|
555 |
+
[2023-02-22 21:32:31,286][24717] Adding new argument 'video_frames'=1000000000.0 that is not in the saved config file!
|
556 |
+
[2023-02-22 21:32:31,286][24717] Adding new argument 'video_name'=None that is not in the saved config file!
|
557 |
+
[2023-02-22 21:32:31,286][24717] Adding new argument 'max_num_frames'=100000 that is not in the saved config file!
|
558 |
+
[2023-02-22 21:32:31,287][24717] Adding new argument 'max_num_episodes'=10 that is not in the saved config file!
|
559 |
+
[2023-02-22 21:32:31,287][24717] Adding new argument 'push_to_hub'=True that is not in the saved config file!
|
560 |
+
[2023-02-22 21:32:31,288][24717] Adding new argument 'hf_repository'='GrimReaperSam/rl_course_vizdoom_health_gathering_supreme' that is not in the saved config file!
|
561 |
+
[2023-02-22 21:32:31,288][24717] Adding new argument 'policy_index'=0 that is not in the saved config file!
|
562 |
+
[2023-02-22 21:32:31,289][24717] Adding new argument 'eval_deterministic'=False that is not in the saved config file!
|
563 |
+
[2023-02-22 21:32:31,289][24717] Adding new argument 'train_script'=None that is not in the saved config file!
|
564 |
+
[2023-02-22 21:32:31,291][24717] Adding new argument 'enjoy_script'=None that is not in the saved config file!
|
565 |
+
[2023-02-22 21:32:31,292][24717] Using frameskip 1 and render_action_repeat=4 for evaluation
|
566 |
+
[2023-02-22 21:32:31,303][24717] RunningMeanStd input shape: (3, 72, 128)
|
567 |
+
[2023-02-22 21:32:31,304][24717] RunningMeanStd input shape: (1,)
|
568 |
+
[2023-02-22 21:32:31,315][24717] ConvEncoder: input_channels=3
|
569 |
+
[2023-02-22 21:32:31,371][24717] Conv encoder output size: 512
|
570 |
+
[2023-02-22 21:32:31,372][24717] Policy head output size: 512
|
571 |
+
[2023-02-22 21:32:31,403][24717] Loading state from checkpoint /home/flahoud/studies/collab/train_dir/default_experiment/checkpoint_p0/checkpoint_000000978_4005888.pth...
|
572 |
+
[2023-02-22 21:32:31,890][24717] Num frames 100...
|
573 |
+
[2023-02-22 21:32:31,981][24717] Num frames 200...
|
574 |
+
[2023-02-22 21:32:32,072][24717] Num frames 300...
|
575 |
+
[2023-02-22 21:32:32,172][24717] Num frames 400...
|
576 |
+
[2023-02-22 21:32:32,320][24717] Avg episode rewards: #0: 7.980, true rewards: #0: 4.980
|
577 |
+
[2023-02-22 21:32:32,321][24717] Avg episode reward: 7.980, avg true_objective: 4.980
|
578 |
+
[2023-02-22 21:32:32,323][24717] Num frames 500...
|
579 |
+
[2023-02-22 21:32:32,420][24717] Num frames 600...
|
580 |
+
[2023-02-22 21:32:32,518][24717] Num frames 700...
|
581 |
+
[2023-02-22 21:32:32,623][24717] Num frames 800...
|
582 |
+
[2023-02-22 21:32:32,718][24717] Num frames 900...
|
583 |
+
[2023-02-22 21:32:32,811][24717] Num frames 1000...
|
584 |
+
[2023-02-22 21:32:32,928][24717] Num frames 1100...
|
585 |
+
[2023-02-22 21:32:33,028][24717] Num frames 1200...
|
586 |
+
[2023-02-22 21:32:33,125][24717] Num frames 1300...
|
587 |
+
[2023-02-22 21:32:33,222][24717] Num frames 1400...
|
588 |
+
[2023-02-22 21:32:33,324][24717] Num frames 1500...
|
589 |
+
[2023-02-22 21:32:33,427][24717] Num frames 1600...
|
590 |
+
[2023-02-22 21:32:33,531][24717] Num frames 1700...
|
591 |
+
[2023-02-22 21:32:33,631][24717] Num frames 1800...
|
592 |
+
[2023-02-22 21:32:33,730][24717] Num frames 1900...
|
593 |
+
[2023-02-22 21:32:33,835][24717] Num frames 2000...
|
594 |
+
[2023-02-22 21:32:33,937][24717] Num frames 2100...
|
595 |
+
[2023-02-22 21:32:34,048][24717] Num frames 2200...
|
596 |
+
[2023-02-22 21:32:34,193][24717] Avg episode rewards: #0: 26.950, true rewards: #0: 11.450
|
597 |
+
[2023-02-22 21:32:34,194][24717] Avg episode reward: 26.950, avg true_objective: 11.450
|
598 |
+
[2023-02-22 21:32:34,206][24717] Num frames 2300...
|
599 |
+
[2023-02-22 21:32:34,315][24717] Num frames 2400...
|
600 |
+
[2023-02-22 21:32:34,424][24717] Num frames 2500...
|
601 |
+
[2023-02-22 21:32:34,528][24717] Num frames 2600...
|
602 |
+
[2023-02-22 21:32:34,632][24717] Num frames 2700...
|
603 |
+
[2023-02-22 21:32:34,736][24717] Num frames 2800...
|
604 |
+
[2023-02-22 21:32:34,843][24717] Num frames 2900...
|
605 |
+
[2023-02-22 21:32:34,943][24717] Num frames 3000...
|
606 |
+
[2023-02-22 21:32:35,049][24717] Num frames 3100...
|
607 |
+
[2023-02-22 21:32:35,153][24717] Num frames 3200...
|
608 |
+
[2023-02-22 21:32:35,254][24717] Num frames 3300...
|
609 |
+
[2023-02-22 21:32:35,356][24717] Num frames 3400...
|
610 |
+
[2023-02-22 21:32:35,457][24717] Num frames 3500...
|
611 |
+
[2023-02-22 21:32:35,558][24717] Num frames 3600...
|
612 |
+
[2023-02-22 21:32:35,655][24717] Num frames 3700...
|
613 |
+
[2023-02-22 21:32:35,741][24717] Avg episode rewards: #0: 30.766, true rewards: #0: 12.433
|
614 |
+
[2023-02-22 21:32:35,742][24717] Avg episode reward: 30.766, avg true_objective: 12.433
|
615 |
+
[2023-02-22 21:32:35,816][24717] Num frames 3800...
|
616 |
+
[2023-02-22 21:32:35,916][24717] Num frames 3900...
|
617 |
+
[2023-02-22 21:32:36,017][24717] Num frames 4000...
|
618 |
+
[2023-02-22 21:32:36,125][24717] Num frames 4100...
|
619 |
+
[2023-02-22 21:32:36,232][24717] Num frames 4200...
|
620 |
+
[2023-02-22 21:32:36,342][24717] Num frames 4300...
|
621 |
+
[2023-02-22 21:32:36,445][24717] Num frames 4400...
|
622 |
+
[2023-02-22 21:32:36,553][24717] Num frames 4500...
|
623 |
+
[2023-02-22 21:32:36,664][24717] Num frames 4600...
|
624 |
+
[2023-02-22 21:32:36,734][24717] Avg episode rewards: #0: 28.032, true rewards: #0: 11.532
|
625 |
+
[2023-02-22 21:32:36,735][24717] Avg episode reward: 28.032, avg true_objective: 11.532
|
626 |
+
[2023-02-22 21:32:36,823][24717] Num frames 4700...
|
627 |
+
[2023-02-22 21:32:36,929][24717] Num frames 4800...
|
628 |
+
[2023-02-22 21:32:37,027][24717] Num frames 4900...
|
629 |
+
[2023-02-22 21:32:37,131][24717] Num frames 5000...
|
630 |
+
[2023-02-22 21:32:37,231][24717] Num frames 5100...
|
631 |
+
[2023-02-22 21:32:37,331][24717] Num frames 5200...
|
632 |
+
[2023-02-22 21:32:37,428][24717] Num frames 5300...
|
633 |
+
[2023-02-22 21:32:37,530][24717] Num frames 5400...
|
634 |
+
[2023-02-22 21:32:37,643][24717] Num frames 5500...
|
635 |
+
[2023-02-22 21:32:37,747][24717] Num frames 5600...
|
636 |
+
[2023-02-22 21:32:37,817][24717] Avg episode rewards: #0: 27.428, true rewards: #0: 11.228
|
637 |
+
[2023-02-22 21:32:37,818][24717] Avg episode reward: 27.428, avg true_objective: 11.228
|
638 |
+
[2023-02-22 21:32:37,907][24717] Num frames 5700...
|
639 |
+
[2023-02-22 21:32:38,007][24717] Num frames 5800...
|
640 |
+
[2023-02-22 21:32:38,106][24717] Num frames 5900...
|
641 |
+
[2023-02-22 21:32:38,202][24717] Num frames 6000...
|
642 |
+
[2023-02-22 21:32:38,296][24717] Num frames 6100...
|
643 |
+
[2023-02-22 21:32:38,398][24717] Num frames 6200...
|
644 |
+
[2023-02-22 21:32:38,494][24717] Num frames 6300...
|
645 |
+
[2023-02-22 21:32:38,593][24717] Num frames 6400...
|
646 |
+
[2023-02-22 21:32:38,689][24717] Num frames 6500...
|
647 |
+
[2023-02-22 21:32:38,755][24717] Avg episode rewards: #0: 25.850, true rewards: #0: 10.850
|
648 |
+
[2023-02-22 21:32:38,757][24717] Avg episode reward: 25.850, avg true_objective: 10.850
|
649 |
+
[2023-02-22 21:32:38,845][24717] Num frames 6600...
|
650 |
+
[2023-02-22 21:32:38,943][24717] Num frames 6700...
|
651 |
+
[2023-02-22 21:32:39,043][24717] Num frames 6800...
|
652 |
+
[2023-02-22 21:32:39,187][24717] Avg episode rewards: #0: 23.134, true rewards: #0: 9.849
|
653 |
+
[2023-02-22 21:32:39,188][24717] Avg episode reward: 23.134, avg true_objective: 9.849
|
654 |
+
[2023-02-22 21:32:39,195][24717] Num frames 6900...
|
655 |
+
[2023-02-22 21:32:39,302][24717] Num frames 7000...
|
656 |
+
[2023-02-22 21:32:39,399][24717] Num frames 7100...
|
657 |
+
[2023-02-22 21:32:39,501][24717] Num frames 7200...
|
658 |
+
[2023-02-22 21:32:39,600][24717] Num frames 7300...
|
659 |
+
[2023-02-22 21:32:39,696][24717] Num frames 7400...
|
660 |
+
[2023-02-22 21:32:39,790][24717] Num frames 7500...
|
661 |
+
[2023-02-22 21:32:39,884][24717] Num frames 7600...
|
662 |
+
[2023-02-22 21:32:39,985][24717] Num frames 7700...
|
663 |
+
[2023-02-22 21:32:40,084][24717] Num frames 7800...
|
664 |
+
[2023-02-22 21:32:40,187][24717] Num frames 7900...
|
665 |
+
[2023-02-22 21:32:40,292][24717] Avg episode rewards: #0: 22.937, true rewards: #0: 9.937
|
666 |
+
[2023-02-22 21:32:40,294][24717] Avg episode reward: 22.937, avg true_objective: 9.937
|
667 |
+
[2023-02-22 21:32:40,349][24717] Num frames 8000...
|
668 |
+
[2023-02-22 21:32:40,445][24717] Num frames 8100...
|
669 |
+
[2023-02-22 21:32:40,550][24717] Num frames 8200...
|
670 |
+
[2023-02-22 21:32:40,657][24717] Num frames 8300...
|
671 |
+
[2023-02-22 21:32:40,805][24717] Avg episode rewards: #0: 20.998, true rewards: #0: 9.331
|
672 |
+
[2023-02-22 21:32:40,806][24717] Avg episode reward: 20.998, avg true_objective: 9.331
|
673 |
+
[2023-02-22 21:32:40,809][24717] Num frames 8400...
|
674 |
+
[2023-02-22 21:32:40,907][24717] Num frames 8500...
|
675 |
+
[2023-02-22 21:32:41,012][24717] Num frames 8600...
|
676 |
+
[2023-02-22 21:32:41,114][24717] Num frames 8700...
|
677 |
+
[2023-02-22 21:32:41,255][24717] Avg episode rewards: #0: 19.282, true rewards: #0: 8.782
|
678 |
+
[2023-02-22 21:32:41,256][24717] Avg episode reward: 19.282, avg true_objective: 8.782
|
679 |
+
[2023-02-22 21:32:57,417][24717] Replay video saved to /home/flahoud/studies/collab/train_dir/default_experiment/replay.mp4!
|