leixa commited on
Commit
bc2b341
·
verified ·
1 Parent(s): 7e3cfff

Training in progress, step 462, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a94171cecd33e76d6d8459a3fb63548959eb0685692b6a15cc318d297f2bd4ed
3
  size 93608
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f8d0c376cd6589d5b11a8f79cee03f674e63829a2290155bc1ede6fbec8d765
3
  size 93608
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0ebc1a322d1d003f0fa9b55767eb496276a9ff94588981aca8850a402da80a00
3
  size 197158
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed8b70b8a342812192d5c3b7c9978254f5ffabf514448203b80e1686d5fdc13b
3
  size 197158
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:301cc041eab7eb674260bf30ff0c9951da0d11395a56034569eaa0cb29a55e7b
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d31defcd29d743f2c19de79a505fa9f79f745600b1ccfe976120a3783f910551
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b9507fdefdfac1d5dbc7a23a3aeb675b6dd3cc22a3762b7e85ff02a1c9c43105
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7b58b44a2d5024ddc12e64ead45d5d25c7fc985d9aaeb44c7bc3de9b8cf56f23
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 2.0817843866171004,
5
  "eval_steps": 42,
6
- "global_step": 420,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1075,6 +1075,112 @@
1075
  "eval_samples_per_second": 190.476,
1076
  "eval_steps_per_second": 24.09,
1077
  "step": 420
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1078
  }
1079
  ],
1080
  "logging_steps": 3,
@@ -1094,7 +1200,7 @@
1094
  "attributes": {}
1095
  }
1096
  },
1097
- "total_flos": 2024747827200.0,
1098
  "train_batch_size": 8,
1099
  "trial_name": null,
1100
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 2.2899628252788102,
5
  "eval_steps": 42,
6
+ "global_step": 462,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1075
  "eval_samples_per_second": 190.476,
1076
  "eval_steps_per_second": 24.09,
1077
  "step": 420
1078
+ },
1079
+ {
1080
+ "epoch": 2.096654275092937,
1081
+ "grad_norm": 0.044985052198171616,
1082
+ "learning_rate": 5.9702234071631e-06,
1083
+ "loss": 12.1785,
1084
+ "step": 423
1085
+ },
1086
+ {
1087
+ "epoch": 2.1115241635687734,
1088
+ "grad_norm": 0.04410139098763466,
1089
+ "learning_rate": 5.5226705990794155e-06,
1090
+ "loss": 12.1236,
1091
+ "step": 426
1092
+ },
1093
+ {
1094
+ "epoch": 2.12639405204461,
1095
+ "grad_norm": 0.05194435641169548,
1096
+ "learning_rate": 5.091571939329048e-06,
1097
+ "loss": 11.6732,
1098
+ "step": 429
1099
+ },
1100
+ {
1101
+ "epoch": 2.141263940520446,
1102
+ "grad_norm": 0.05501653999090195,
1103
+ "learning_rate": 4.677086910538092e-06,
1104
+ "loss": 12.3777,
1105
+ "step": 432
1106
+ },
1107
+ {
1108
+ "epoch": 2.1561338289962824,
1109
+ "grad_norm": 0.04983314871788025,
1110
+ "learning_rate": 4.279368849209381e-06,
1111
+ "loss": 11.8892,
1112
+ "step": 435
1113
+ },
1114
+ {
1115
+ "epoch": 2.171003717472119,
1116
+ "grad_norm": 0.03725075721740723,
1117
+ "learning_rate": 3.898564888996476e-06,
1118
+ "loss": 11.2909,
1119
+ "step": 438
1120
+ },
1121
+ {
1122
+ "epoch": 2.1858736059479553,
1123
+ "grad_norm": 0.05022915452718735,
1124
+ "learning_rate": 3.534815906272404e-06,
1125
+ "loss": 12.1708,
1126
+ "step": 441
1127
+ },
1128
+ {
1129
+ "epoch": 2.200743494423792,
1130
+ "grad_norm": 0.04059191420674324,
1131
+ "learning_rate": 3.18825646801314e-06,
1132
+ "loss": 12.0311,
1133
+ "step": 444
1134
+ },
1135
+ {
1136
+ "epoch": 2.2156133828996283,
1137
+ "grad_norm": 0.056071698665618896,
1138
+ "learning_rate": 2.8590147820153513e-06,
1139
+ "loss": 11.767,
1140
+ "step": 447
1141
+ },
1142
+ {
1143
+ "epoch": 2.2304832713754648,
1144
+ "grad_norm": 0.0420910008251667,
1145
+ "learning_rate": 2.547212649466568e-06,
1146
+ "loss": 11.9581,
1147
+ "step": 450
1148
+ },
1149
+ {
1150
+ "epoch": 2.2453531598513012,
1151
+ "grad_norm": 0.05278888717293739,
1152
+ "learning_rate": 2.2529654198854835e-06,
1153
+ "loss": 12.2002,
1154
+ "step": 453
1155
+ },
1156
+ {
1157
+ "epoch": 2.2602230483271377,
1158
+ "grad_norm": 0.05625374615192413,
1159
+ "learning_rate": 1.9763819484490355e-06,
1160
+ "loss": 11.4997,
1161
+ "step": 456
1162
+ },
1163
+ {
1164
+ "epoch": 2.2750929368029738,
1165
+ "grad_norm": 0.03911924734711647,
1166
+ "learning_rate": 1.7175645557220566e-06,
1167
+ "loss": 11.6504,
1168
+ "step": 459
1169
+ },
1170
+ {
1171
+ "epoch": 2.2899628252788102,
1172
+ "grad_norm": 0.054204318672418594,
1173
+ "learning_rate": 1.4766089898042678e-06,
1174
+ "loss": 12.3732,
1175
+ "step": 462
1176
+ },
1177
+ {
1178
+ "epoch": 2.2899628252788102,
1179
+ "eval_loss": 11.911750793457031,
1180
+ "eval_runtime": 1.7787,
1181
+ "eval_samples_per_second": 191.152,
1182
+ "eval_steps_per_second": 24.175,
1183
+ "step": 462
1184
  }
1185
  ],
1186
  "logging_steps": 3,
 
1200
  "attributes": {}
1201
  }
1202
  },
1203
+ "total_flos": 2227222609920.0,
1204
  "train_batch_size": 8,
1205
  "trial_name": null,
1206
  "trial_params": null