kooff11 commited on
Commit
9f606c5
·
verified ·
1 Parent(s): 9d8dac8

Training in progress, step 26, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48d0f5c06b15d2287468433011bb8d33fb6fa8615006cb2aef854693bcd667dd
3
  size 45118424
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:443e4e4f937d73e0119310938caeeaeb381794e87ba4c05deb852213a5fa4f98
3
  size 45118424
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9335b7988d854204003dfd2d754bb20a6a1235830f7cb713558251e3291f93d8
3
  size 23159290
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f40a11bcf1f0c7118ef3e1efd4e6c078e8c6b74b94cd2e415a2af1297f7ed79f
3
  size 23159290
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:29f1d72b6c239612cdd5f3fa99a08419a8c573c441fb60cde5f8c820aea0be58
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb90d719e937dd8276373ed7d70fad2369f124f7cf828fbeecbc84855fde6133
3
  size 14512
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9cc97b0c454fb2242f4c07e8b154a05c758918a1383f9c98000cd4ccc34580b5
3
  size 14512
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1977bb9f4ad57391ef2df027cb00edafbd5be714333f455abd93cefc987f397c
3
  size 14512
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5adea6def46a60dd5782726d51a89bdf0f30226cd791e511d8af09a5644f99e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6e65bdb10468d12c8b6afa89fe8730e38a94a4f704431de04b0c1bf27440afce
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0030924999442457943,
5
  "eval_steps": 13,
6
- "global_step": 13,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -114,6 +114,105 @@
114
  "eval_samples_per_second": 39.826,
115
  "eval_steps_per_second": 9.957,
116
  "step": 13
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
117
  }
118
  ],
119
  "logging_steps": 1,
@@ -133,7 +232,7 @@
133
  "attributes": {}
134
  }
135
  },
136
- "total_flos": 4.025725889386906e+16,
137
  "train_batch_size": 2,
138
  "trial_name": null,
139
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0061849998884915885,
5
  "eval_steps": 13,
6
+ "global_step": 26,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
114
  "eval_samples_per_second": 39.826,
115
  "eval_steps_per_second": 9.957,
116
  "step": 13
117
+ },
118
+ {
119
+ "epoch": 0.0033303845553416247,
120
+ "grad_norm": 5.04780387878418,
121
+ "learning_rate": 8.535533905932738e-05,
122
+ "loss": 0.131,
123
+ "step": 14
124
+ },
125
+ {
126
+ "epoch": 0.0035682691664374547,
127
+ "grad_norm": 1.841731309890747,
128
+ "learning_rate": 8.296729075500344e-05,
129
+ "loss": 0.0827,
130
+ "step": 15
131
+ },
132
+ {
133
+ "epoch": 0.003806153777533285,
134
+ "grad_norm": 1.6053825616836548,
135
+ "learning_rate": 8.043807145043604e-05,
136
+ "loss": 0.0752,
137
+ "step": 16
138
+ },
139
+ {
140
+ "epoch": 0.004044038388629116,
141
+ "grad_norm": 1.511683702468872,
142
+ "learning_rate": 7.777851165098012e-05,
143
+ "loss": 0.0741,
144
+ "step": 17
145
+ },
146
+ {
147
+ "epoch": 0.004281922999724946,
148
+ "grad_norm": 5.841466903686523,
149
+ "learning_rate": 7.500000000000001e-05,
150
+ "loss": 0.0867,
151
+ "step": 18
152
+ },
153
+ {
154
+ "epoch": 0.004519807610820777,
155
+ "grad_norm": 5.441026210784912,
156
+ "learning_rate": 7.211443451095007e-05,
157
+ "loss": 0.0663,
158
+ "step": 19
159
+ },
160
+ {
161
+ "epoch": 0.004757692221916607,
162
+ "grad_norm": 1.9340760707855225,
163
+ "learning_rate": 6.91341716182545e-05,
164
+ "loss": 0.0498,
165
+ "step": 20
166
+ },
167
+ {
168
+ "epoch": 0.004995576833012437,
169
+ "grad_norm": 1.7755597829818726,
170
+ "learning_rate": 6.607197326515808e-05,
171
+ "loss": 0.0431,
172
+ "step": 21
173
+ },
174
+ {
175
+ "epoch": 0.0052334614441082675,
176
+ "grad_norm": 0.8990055918693542,
177
+ "learning_rate": 6.294095225512603e-05,
178
+ "loss": 0.0285,
179
+ "step": 22
180
+ },
181
+ {
182
+ "epoch": 0.0054713460552040976,
183
+ "grad_norm": 1.0627079010009766,
184
+ "learning_rate": 5.9754516100806423e-05,
185
+ "loss": 0.0427,
186
+ "step": 23
187
+ },
188
+ {
189
+ "epoch": 0.005709230666299928,
190
+ "grad_norm": 0.7508691549301147,
191
+ "learning_rate": 5.6526309611002594e-05,
192
+ "loss": 0.0114,
193
+ "step": 24
194
+ },
195
+ {
196
+ "epoch": 0.0059471152773957585,
197
+ "grad_norm": 0.787846028804779,
198
+ "learning_rate": 5.327015646150716e-05,
199
+ "loss": 0.0305,
200
+ "step": 25
201
+ },
202
+ {
203
+ "epoch": 0.0061849998884915885,
204
+ "grad_norm": 1.2530877590179443,
205
+ "learning_rate": 5e-05,
206
+ "loss": 0.0366,
207
+ "step": 26
208
+ },
209
+ {
210
+ "epoch": 0.0061849998884915885,
211
+ "eval_loss": 0.02799982577562332,
212
+ "eval_runtime": 711.2213,
213
+ "eval_samples_per_second": 39.819,
214
+ "eval_steps_per_second": 9.955,
215
+ "step": 26
216
  }
217
  ],
218
  "logging_steps": 1,
 
232
  "attributes": {}
233
  }
234
  },
235
+ "total_flos": 8.051451778773811e+16,
236
  "train_batch_size": 2,
237
  "trial_name": null,
238
  "trial_params": null