masatochi commited on
Commit
ee2b542
·
verified ·
1 Parent(s): 360dc07

Training in progress, step 80, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d5f46cdbcddbd110909abba05976b088ca0a215268860607fa465f10ca9912a1
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fac2a188ca757124ed37df4c4e3cd760af896f06cb1852013bed461e07edd15
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:666644d5d3a68687287a0c66a70b9e5eaf88924d5e93ea735cf48152d89870b9
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:94d7315c5918a3d91c3ff30c4f7cbbc0252e41f75551ad8b77f74d0ebc7a4e14
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:05e8b11cd8d83539c6a1db5f797527713c44262128b9b6a0754da46492005fea
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:01c1ee15529a526a9fe4c19508572c414c6ef23f4c5c37a9bdfac25ac8b91945
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:52408658f8afed2d0ac64c41a3c3f93b4c8a8478fa6362941012d17f634f7dad
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4bc75fc1c14b28d29d31fa9d4252536c919fc25a390fac3a1e8c09d6575b4029
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.03667705849990831,
5
  "eval_steps": 34,
6
- "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -556,6 +556,41 @@
556
  "learning_rate": 0.00016736956436465573,
557
  "loss": 1.2833,
558
  "step": 75
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
559
  }
560
  ],
561
  "logging_steps": 1,
@@ -575,7 +610,7 @@
575
  "attributes": {}
576
  }
577
  },
578
- "total_flos": 3.329215646662656e+17,
579
  "train_batch_size": 3,
580
  "trial_name": null,
581
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.039122195733235526,
5
  "eval_steps": 34,
6
+ "global_step": 80,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
556
  "learning_rate": 0.00016736956436465573,
557
  "loss": 1.2833,
558
  "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.03716608594657375,
562
+ "grad_norm": 0.5053292512893677,
563
+ "learning_rate": 0.0001659924534878723,
564
+ "loss": 0.9678,
565
+ "step": 76
566
+ },
567
+ {
568
+ "epoch": 0.037655113393239195,
569
+ "grad_norm": 0.31924471259117126,
570
+ "learning_rate": 0.00016459280624867874,
571
+ "loss": 0.9702,
572
+ "step": 77
573
+ },
574
+ {
575
+ "epoch": 0.03814414083990464,
576
+ "grad_norm": 0.37664365768432617,
577
+ "learning_rate": 0.0001631711006253251,
578
+ "loss": 0.8382,
579
+ "step": 78
580
+ },
581
+ {
582
+ "epoch": 0.03863316828657008,
583
+ "grad_norm": 0.3583241403102875,
584
+ "learning_rate": 0.0001617278221289793,
585
+ "loss": 1.0149,
586
+ "step": 79
587
+ },
588
+ {
589
+ "epoch": 0.039122195733235526,
590
+ "grad_norm": 0.3891322910785675,
591
+ "learning_rate": 0.00016026346363792567,
592
+ "loss": 1.2247,
593
+ "step": 80
594
  }
595
  ],
596
  "logging_steps": 1,
 
610
  "attributes": {}
611
  }
612
  },
613
+ "total_flos": 3.5511633564401664e+17,
614
  "train_batch_size": 3,
615
  "trial_name": null,
616
  "trial_params": null