masatochi commited on
Commit
1ee9b8b
·
verified ·
1 Parent(s): c8ffc36

Training in progress, step 175, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0336951ec883d6b5de72b925d727ecd4ca0ac6440aa667a10a1cb852d4a63fae
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1d9ef8895aecefde27b9815938ee8b70b7cb60258c13c343c6f4409007de4b8f
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7d23e380a2d27ccc3f0d359f7f0c38883537fc47cda2a0ef980473b32d1f6a5a
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:19e5799f46aa4ac27c728572da7de1ddb0568e884989194df0340fe82104010e
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:82e3dacf2b05874132bf6fb4325d351a4ab2ba13cd877561594093394c8a7acc
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:350629c8726cd8603e94d6658b9108dc8785e8448290744abeabee7c4b3b48d6
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:963abeacc51d2fd967b0d854376103780952d47675257465be60a3612cecb103
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:416d32fdf638b555a0fc031fb149fd18abaec9c234026f168a4a4bd45704a2a9
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.08313466593312549,
5
  "eval_steps": 34,
6
- "global_step": 170,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1245,6 +1245,41 @@
1245
  "eval_samples_per_second": 1.898,
1246
  "eval_steps_per_second": 0.633,
1247
  "step": 170
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1248
  }
1249
  ],
1250
  "logging_steps": 1,
@@ -1264,7 +1299,7 @@
1264
  "attributes": {}
1265
  }
1266
  },
1267
- "total_flos": 7.546222132435354e+17,
1268
  "train_batch_size": 3,
1269
  "trial_name": null,
1270
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.08557980316645272,
5
  "eval_steps": 34,
6
+ "global_step": 175,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1245
  "eval_samples_per_second": 1.898,
1246
  "eval_steps_per_second": 0.633,
1247
  "step": 170
1248
+ },
1249
+ {
1250
+ "epoch": 0.08362369337979095,
1251
+ "grad_norm": 0.4719301164150238,
1252
+ "learning_rate": 1.4020014855162755e-05,
1253
+ "loss": 1.0868,
1254
+ "step": 171
1255
+ },
1256
+ {
1257
+ "epoch": 0.08411272082645639,
1258
+ "grad_norm": 0.3227783143520355,
1259
+ "learning_rate": 1.3091105369447165e-05,
1260
+ "loss": 0.9113,
1261
+ "step": 172
1262
+ },
1263
+ {
1264
+ "epoch": 0.08460174827312184,
1265
+ "grad_norm": 0.37850451469421387,
1266
+ "learning_rate": 1.2191875191630209e-05,
1267
+ "loss": 1.0702,
1268
+ "step": 173
1269
+ },
1270
+ {
1271
+ "epoch": 0.08509077571978728,
1272
+ "grad_norm": 0.29281488060951233,
1273
+ "learning_rate": 1.1322631407993811e-05,
1274
+ "loss": 0.978,
1275
+ "step": 174
1276
+ },
1277
+ {
1278
+ "epoch": 0.08557980316645272,
1279
+ "grad_norm": 0.2620236277580261,
1280
+ "learning_rate": 1.0483670864493778e-05,
1281
+ "loss": 0.9053,
1282
+ "step": 175
1283
  }
1284
  ],
1285
  "logging_steps": 1,
 
1299
  "attributes": {}
1300
  }
1301
  },
1302
+ "total_flos": 7.768169842212864e+17,
1303
  "train_batch_size": 3,
1304
  "trial_name": null,
1305
  "trial_params": null