masatochi commited on
Commit
2dcfd6a
·
verified ·
1 Parent(s): ca2cc64

Training in progress, step 75, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93e02cdd1adcfe71a506fa5bc7635a71a137ea736debdec16ce3088c2f4288b7
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d5f46cdbcddbd110909abba05976b088ca0a215268860607fa465f10ca9912a1
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6bcd7ac9e515130617391943b3d9f5a46923b1b1cd7abb4951103e1e52bf91e
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:666644d5d3a68687287a0c66a70b9e5eaf88924d5e93ea735cf48152d89870b9
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c53416f50af32d04b9099560de94b7eda2ac7de98df5f4387b71785667ea642a
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:05e8b11cd8d83539c6a1db5f797527713c44262128b9b6a0754da46492005fea
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ff360a84775339d0d3e752ae059cb60f5332814e8f938b32d8d690f5d4087fc5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52408658f8afed2d0ac64c41a3c3f93b4c8a8478fa6362941012d17f634f7dad
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03423192126658109,
5
  "eval_steps": 34,
6
- "global_step": 70,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -521,6 +521,41 @@
521
  "learning_rate": 0.00017390089172206592,
522
  "loss": 0.9441,
523
  "step": 70
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
524
  }
525
  ],
526
  "logging_steps": 1,
@@ -540,7 +575,7 @@
540
  "attributes": {}
541
  }
542
  },
543
- "total_flos": 3.1072679368851456e+17,
544
  "train_batch_size": 3,
545
  "trial_name": null,
546
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.03667705849990831,
5
  "eval_steps": 34,
6
+ "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
521
  "learning_rate": 0.00017390089172206592,
522
  "loss": 0.9441,
523
  "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.03472094871324653,
527
+ "grad_norm": 0.3507513999938965,
528
+ "learning_rate": 0.00017264335740162242,
529
+ "loss": 0.9604,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.035209976159911976,
534
+ "grad_norm": 0.39182308316230774,
535
+ "learning_rate": 0.00017136101544117525,
536
+ "loss": 1.0598,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.03569900360657742,
541
+ "grad_norm": 0.3877043128013611,
542
+ "learning_rate": 0.0001700543037593291,
543
+ "loss": 0.8719,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.036188031053242864,
548
+ "grad_norm": 0.6628935933113098,
549
+ "learning_rate": 0.00016872366859692627,
550
+ "loss": 0.9241,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.03667705849990831,
555
+ "grad_norm": 0.45674100518226624,
556
+ "learning_rate": 0.00016736956436465573,
557
+ "loss": 1.2833,
558
+ "step": 75
559
  }
560
  ],
561
  "logging_steps": 1,
 
575
  "attributes": {}
576
  }
577
  },
578
+ "total_flos": 3.329215646662656e+17,
579
  "train_batch_size": 3,
580
  "trial_name": null,
581
  "trial_params": null