dimasik87 commited on
Commit
5685b59
·
verified ·
1 Parent(s): baf177f

Training in progress, step 15, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e1b9cc10e59eee0a482ef058cdd892f5ee95113383053a597b42bfdb154b7124
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a5b66bb5cdf13092a0931b868dc49722c0c159ebf5360a226abe1d4473218f
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:4317a6ae5f87fb4a3a2c25cb4308b3c896713ee28ff43548cdab62d58ba48bc0
3
  size 90310010
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3a2359906f8660facbbf435cdea237820c31f9e940a763a301c53421776fdd41
3
  size 90310010
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8194e1b8c74d7216b4daf200bbfe7e0ecb859e151ee00b0f9ea8e824e6edee25
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cab4d6c2ce43e24d19e16d85f2f7af6e830104f5a5c4b29ee3dac335d1d10f59
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bb578e75c11a81e85dda67a691f96ba4793a02960f1409fd3e1511aac873491a
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:55cc6a3635d19619caf820a77458fa3cfe7756f7bb9d10678c62733cb46f36c0
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.018674136321195144,
5
  "eval_steps": 5,
6
- "global_step": 10,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -101,6 +101,49 @@
101
  "eval_samples_per_second": 29.718,
102
  "eval_steps_per_second": 14.859,
103
  "step": 10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
  ],
106
  "logging_steps": 1,
@@ -120,7 +163,7 @@
120
  "attributes": {}
121
  }
122
  },
123
- "total_flos": 967722569564160.0,
124
  "train_batch_size": 2,
125
  "trial_name": null,
126
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.028011204481792718,
5
  "eval_steps": 5,
6
+ "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
101
  "eval_samples_per_second": 29.718,
102
  "eval_steps_per_second": 14.859,
103
  "step": 10
104
+ },
105
+ {
106
+ "epoch": 0.02054154995331466,
107
+ "grad_norm": 14.252686500549316,
108
+ "learning_rate": 0.00019510565162951537,
109
+ "loss": 5.4462,
110
+ "step": 11
111
+ },
112
+ {
113
+ "epoch": 0.022408963585434174,
114
+ "grad_norm": 13.769002914428711,
115
+ "learning_rate": 0.00018090169943749476,
116
+ "loss": 5.9482,
117
+ "step": 12
118
+ },
119
+ {
120
+ "epoch": 0.02427637721755369,
121
+ "grad_norm": 14.226924896240234,
122
+ "learning_rate": 0.00015877852522924732,
123
+ "loss": 5.444,
124
+ "step": 13
125
+ },
126
+ {
127
+ "epoch": 0.026143790849673203,
128
+ "grad_norm": 12.833412170410156,
129
+ "learning_rate": 0.00013090169943749476,
130
+ "loss": 4.164,
131
+ "step": 14
132
+ },
133
+ {
134
+ "epoch": 0.028011204481792718,
135
+ "grad_norm": 29.60838508605957,
136
+ "learning_rate": 0.0001,
137
+ "loss": 5.389,
138
+ "step": 15
139
+ },
140
+ {
141
+ "epoch": 0.028011204481792718,
142
+ "eval_loss": 5.058675289154053,
143
+ "eval_runtime": 7.6248,
144
+ "eval_samples_per_second": 29.64,
145
+ "eval_steps_per_second": 14.82,
146
+ "step": 15
147
  }
148
  ],
149
  "logging_steps": 1,
 
163
  "attributes": {}
164
  }
165
  },
166
+ "total_flos": 1451583854346240.0,
167
  "train_batch_size": 2,
168
  "trial_name": null,
169
  "trial_params": null