masatochi commited on
Commit
0969863
·
verified ·
1 Parent(s): e213029

Training in progress, step 150, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2295db7c3787f041a0d921ea38ba92a0c96ad0c119ea49905329ab11b00ee672
3
  size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6297606052a4277aa0a1e4619bd299b3833e32268284b14aa730349c00e330a7
3
  size 83945296
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a20ad2af010c8f7b9e90d434aec4381b6abebb4e0ef496dc53a2987bcfd3c4fa
3
  size 43122580
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2366e26bc61fe9363daef1823fa6a4707c5d0477f89a365a7728b80458335ff1
3
  size 43122580
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8b70a97ff18a63ef26fe23f248a99f7e6c8585ce19a9713e037357e85de43714
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d95327014bf0e992837899fdc63d1e3049ea8baca978228506724df9c716834
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:be04ea4bc9f159499b4a7b296b15e0c0e5c54743663ee8550a26340683f89e32
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5a946cb282348d3fba8c242cd51f3b90b3dccbd24720ee6b6397a7e493e7b92c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.0709089797664894,
5
  "eval_steps": 34,
6
- "global_step": 145,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1062,6 +1062,41 @@
1062
  "learning_rate": 4.735678371226441e-05,
1063
  "loss": 0.8951,
1064
  "step": 145
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1065
  }
1066
  ],
1067
  "logging_steps": 1,
@@ -1081,7 +1116,7 @@
1081
  "attributes": {}
1082
  }
1083
  },
1084
- "total_flos": 6.436483583547802e+17,
1085
  "train_batch_size": 3,
1086
  "trial_name": null,
1087
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.07335411699981662,
5
  "eval_steps": 34,
6
+ "global_step": 150,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1062
  "learning_rate": 4.735678371226441e-05,
1063
  "loss": 0.8951,
1064
  "step": 145
1065
+ },
1066
+ {
1067
+ "epoch": 0.07139800721315484,
1068
+ "grad_norm": 0.4603347182273865,
1069
+ "learning_rate": 4.5794664352755055e-05,
1070
+ "loss": 1.0377,
1071
+ "step": 146
1072
+ },
1073
+ {
1074
+ "epoch": 0.07188703465982028,
1075
+ "grad_norm": 0.28372839093208313,
1076
+ "learning_rate": 4.425105606571145e-05,
1077
+ "loss": 1.008,
1078
+ "step": 147
1079
+ },
1080
+ {
1081
+ "epoch": 0.07237606210648573,
1082
+ "grad_norm": 0.29044821858406067,
1083
+ "learning_rate": 4.272648599194948e-05,
1084
+ "loss": 1.0118,
1085
+ "step": 148
1086
+ },
1087
+ {
1088
+ "epoch": 0.07286508955315117,
1089
+ "grad_norm": 0.3254542350769043,
1090
+ "learning_rate": 4.12214747707527e-05,
1091
+ "loss": 1.0088,
1092
+ "step": 149
1093
+ },
1094
+ {
1095
+ "epoch": 0.07335411699981662,
1096
+ "grad_norm": 0.3154352605342865,
1097
+ "learning_rate": 3.973653636207437e-05,
1098
+ "loss": 1.0273,
1099
+ "step": 150
1100
  }
1101
  ],
1102
  "logging_steps": 1,
 
1116
  "attributes": {}
1117
  }
1118
  },
1119
+ "total_flos": 6.658431293325312e+17,
1120
  "train_batch_size": 3,
1121
  "trial_name": null,
1122
  "trial_params": null