masatochi commited on
Commit
cbc761b
·
verified ·
1 Parent(s): e5b5625

Training in progress, step 165, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:53bad8d26f88728dcad49d9d67620340a524e6147bc3971fc55bb8b1402df5b4
3
  size 59827904
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f9354ab4a7ce8547671b14d84f50c16c066ccb67d905ae21bb089417cb3367e5
3
  size 59827904
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ecd579c529539ff37f6b6ff96f02721f0d3685a0b8a5cfa862e349f56ed45cd0
3
  size 30875540
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fb39ebd7a334944e5a271162b8f2bd07cc32d5062d51c937404ee4aad36c5bb8
3
  size 30875540
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1a492bc11190965aaac7d5575038263ec57f90649266f6d672b0255bafe0953f
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ae36b88f579a5ac24bd63c58055344d0b6ee1ce3c4b9a88d6e3a11fff784ffe
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fda70a7e2255bb2a6bed4064da315669732f8ee1496404c42573741f484eb69b
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9d255475980828d00dffcac4a69741da74c7e5ef4f645ab21d69e57306e3317
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.07824439146647105,
5
  "eval_steps": 34,
6
- "global_step": 160,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1167,6 +1167,41 @@
1167
  "learning_rate": 2.6099108277934103e-05,
1168
  "loss": 10.5182,
1169
  "step": 160
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1170
  }
1171
  ],
1172
  "logging_steps": 1,
@@ -1186,7 +1221,7 @@
1186
  "attributes": {}
1187
  }
1188
  },
1189
- "total_flos": 3.527167481531597e+17,
1190
  "train_batch_size": 3,
1191
  "trial_name": null,
1192
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08068952869979827,
5
  "eval_steps": 34,
6
+ "global_step": 165,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1167
  "learning_rate": 2.6099108277934103e-05,
1168
  "loss": 10.5182,
1169
  "step": 160
1170
+ },
1171
+ {
1172
+ "epoch": 0.0787334189131365,
1173
+ "grad_norm": 1.8735052790064742e+18,
1174
+ "learning_rate": 2.4866811044312665e-05,
1175
+ "loss": 9.7353,
1176
+ "step": 161
1177
+ },
1178
+ {
1179
+ "epoch": 0.07922244635980194,
1180
+ "grad_norm": 1.1486916833747927e+19,
1181
+ "learning_rate": 2.36601717258897e-05,
1182
+ "loss": 8.3726,
1183
+ "step": 162
1184
+ },
1185
+ {
1186
+ "epoch": 0.07971147380646738,
1187
+ "grad_norm": Infinity,
1188
+ "learning_rate": 2.2479602388887012e-05,
1189
+ "loss": 9.6089,
1190
+ "step": 163
1191
+ },
1192
+ {
1193
+ "epoch": 0.08020050125313283,
1194
+ "grad_norm": Infinity,
1195
+ "learning_rate": 2.132550619665168e-05,
1196
+ "loss": 8.7235,
1197
+ "step": 164
1198
+ },
1199
+ {
1200
+ "epoch": 0.08068952869979827,
1201
+ "grad_norm": Infinity,
1202
+ "learning_rate": 2.0198277271976052e-05,
1203
+ "loss": 8.9486,
1204
+ "step": 165
1205
  }
1206
  ],
1207
  "logging_steps": 1,
 
1221
  "attributes": {}
1222
  }
1223
  },
1224
+ "total_flos": 3.637391465329459e+17,
1225
  "train_batch_size": 3,
1226
  "trial_name": null,
1227
  "trial_params": null