masatochi commited on
Commit
c565d94
·
verified ·
1 Parent(s): 510fe1a

Training in progress, step 25, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:67c0165d8664009d9959430ef2716c7d859c773d81e2222e8c47f493731a599e
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5992a86747cef14f63762668b2ac2fbf9e23ce5e8bb74133abf9d11c202cc4f4
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48c53e063b7068b148b3052d916b8604a4c885f59794b0c40d1ee43211c5a564
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2c1e8f5f88992d2a779e848ba59169c2a7ac5ded2dac15a4d2e66d46ef348edf
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9c11e5d26ba3ac2866e0013c6e62c861a528a14c5443a0c17aeedbcc0d6c649
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc695d50fa9c7a7b39e7508c0827b13b4400b02508b8aa483545d12dba8b4308
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e3d7496cd5cad5fcd343f51f06f864ca525a833da3ba71e9c4d90915510423ac
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7cbe95d5ecd33771846042e20aabf210775f4e6a78ced16f0764898d40abeba5
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.009780548933308882,
5
  "eval_steps": 34,
6
- "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -155,6 +155,41 @@
155
  "learning_rate": 0.00013333333333333334,
156
  "loss": 1.1023,
157
  "step": 20
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
158
  }
159
  ],
160
  "logging_steps": 1,
@@ -174,7 +209,7 @@
174
  "attributes": {}
175
  }
176
  },
177
- "total_flos": 8.877908391100416e+16,
178
  "train_batch_size": 3,
179
  "trial_name": null,
180
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.012225686166636102,
5
  "eval_steps": 34,
6
+ "global_step": 25,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
155
  "learning_rate": 0.00013333333333333334,
156
  "loss": 1.1023,
157
  "step": 20
158
+ },
159
+ {
160
+ "epoch": 0.010269576379974325,
161
+ "grad_norm": 0.4289817214012146,
162
+ "learning_rate": 0.00014,
163
+ "loss": 1.0195,
164
+ "step": 21
165
+ },
166
+ {
167
+ "epoch": 0.01075860382663977,
168
+ "grad_norm": 1.290226936340332,
169
+ "learning_rate": 0.00014666666666666666,
170
+ "loss": 0.9534,
171
+ "step": 22
172
+ },
173
+ {
174
+ "epoch": 0.011247631273305215,
175
+ "grad_norm": 0.5022335648536682,
176
+ "learning_rate": 0.00015333333333333334,
177
+ "loss": 0.8952,
178
+ "step": 23
179
+ },
180
+ {
181
+ "epoch": 0.011736658719970658,
182
+ "grad_norm": 0.5421992540359497,
183
+ "learning_rate": 0.00016,
184
+ "loss": 0.8553,
185
+ "step": 24
186
+ },
187
+ {
188
+ "epoch": 0.012225686166636102,
189
+ "grad_norm": 1.568458080291748,
190
+ "learning_rate": 0.0001666666666666667,
191
+ "loss": 0.9764,
192
+ "step": 25
193
  }
194
  ],
195
  "logging_steps": 1,
 
209
  "attributes": {}
210
  }
211
  },
212
+ "total_flos": 1.109738548887552e+17,
213
  "train_batch_size": 3,
214
  "trial_name": null,
215
  "trial_params": null