dimasik87 commited on
Commit
a6e2dec
·
verified ·
1 Parent(s): 3b4a35f

Training in progress, step 28, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -10,22 +10,22 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "up_proj",
25
- "v_proj",
26
  "down_proj",
27
  "q_proj",
28
- "gate_proj",
 
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "gate_proj",
24
  "up_proj",
 
25
  "down_proj",
26
  "q_proj",
27
+ "v_proj",
28
+ "k_proj",
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:a3a447218ceeb22f88ac580f500013f1db5538e234b5917a523ce92149a4d624
3
- size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f187161c8d5d5c5260a2ea9a0301cd2bb88cf7dd2f2ae59676461f82342a76e
3
+ size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:2a873c8c05eb4b47015ce316e6dc76b2e828d0d13239508858822eaccd6c0ff2
3
- size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ffa960204e88cd73596a42c02a93cf199a9c9d598db8a7b78d5fe67937aafa62
3
+ size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fbde7a92d89dda9328b1682c532cac5c90350a756076648a9e5038556576ba90
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:03e3e3226f817a68f9ab1f9dea4ed436137a86d4339f33c925f2cae23f100ef7
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:cfc868d168373f518aa3d81b0bcf5eaf44208e53fb25d46f5a39136146621a60
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ff994fffd2fb6fe21545e6fbc55baa2a1474438a89b2d40605678f7de701427c
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.001904027925742911,
5
- "eval_steps": 3,
6
- "global_step": 21,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 9.066799646394814e-05,
13
- "grad_norm": 0.9196107983589172,
14
  "learning_rate": 2e-05,
15
  "loss": 0.9466,
16
  "step": 1
@@ -18,213 +18,262 @@
18
  {
19
  "epoch": 9.066799646394814e-05,
20
  "eval_loss": 1.1514371633529663,
21
- "eval_runtime": 280.9406,
22
- "eval_samples_per_second": 8.265,
23
- "eval_steps_per_second": 8.265,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0001813359929278963,
28
- "grad_norm": 1.2303106784820557,
29
  "learning_rate": 4e-05,
30
  "loss": 1.2689,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.00027200398939184443,
35
- "grad_norm": 0.953157901763916,
36
  "learning_rate": 6e-05,
37
- "loss": 0.9876,
38
  "step": 3
39
  },
40
  {
41
- "epoch": 0.00027200398939184443,
42
- "eval_loss": 1.1419087648391724,
43
- "eval_runtime": 281.2312,
44
- "eval_samples_per_second": 8.257,
45
- "eval_steps_per_second": 8.257,
46
- "step": 3
47
  },
48
  {
49
  "epoch": 0.0003626719858557926,
50
- "grad_norm": 1.2283321619033813,
51
- "learning_rate": 8e-05,
52
- "loss": 0.8963,
 
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.00045333998231974066,
57
- "grad_norm": 1.0397660732269287,
58
  "learning_rate": 0.0001,
59
- "loss": 1.064,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.0005440079787836889,
64
- "grad_norm": 1.3788177967071533,
65
  "learning_rate": 0.00012,
66
- "loss": 1.311,
67
- "step": 6
68
- },
69
- {
70
- "epoch": 0.0005440079787836889,
71
- "eval_loss": 0.9817929267883301,
72
- "eval_runtime": 281.7359,
73
- "eval_samples_per_second": 8.242,
74
- "eval_steps_per_second": 8.242,
75
  "step": 6
76
  },
77
  {
78
  "epoch": 0.000634675975247637,
79
- "grad_norm": 1.0661911964416504,
80
  "learning_rate": 0.00014,
81
- "loss": 0.9209,
82
  "step": 7
83
  },
84
  {
85
  "epoch": 0.0007253439717115851,
86
- "grad_norm": 1.0511986017227173,
87
  "learning_rate": 0.00016,
88
- "loss": 0.8409,
89
  "step": 8
90
  },
91
  {
92
- "epoch": 0.0008160119681755333,
93
- "grad_norm": 1.0703239440917969,
94
- "learning_rate": 0.00018,
95
- "loss": 0.8303,
96
- "step": 9
 
97
  },
98
  {
99
  "epoch": 0.0008160119681755333,
100
- "eval_loss": 0.6860486268997192,
101
- "eval_runtime": 285.0723,
102
- "eval_samples_per_second": 8.145,
103
- "eval_steps_per_second": 8.145,
104
  "step": 9
105
  },
106
  {
107
  "epoch": 0.0009066799646394813,
108
- "grad_norm": 1.1654818058013916,
109
  "learning_rate": 0.0002,
110
- "loss": 0.7193,
111
  "step": 10
112
  },
113
  {
114
  "epoch": 0.0009973479611034295,
115
- "grad_norm": 1.2375497817993164,
116
- "learning_rate": 0.00019781476007338058,
117
- "loss": 0.6006,
118
  "step": 11
119
  },
120
  {
121
  "epoch": 0.0010880159575673777,
122
- "grad_norm": 0.930781900882721,
123
- "learning_rate": 0.0001913545457642601,
124
- "loss": 0.4477,
125
  "step": 12
126
  },
127
  {
128
  "epoch": 0.0010880159575673777,
129
- "eval_loss": 0.43009814620018005,
130
- "eval_runtime": 283.6546,
131
- "eval_samples_per_second": 8.186,
132
- "eval_steps_per_second": 8.186,
133
  "step": 12
134
  },
135
  {
136
  "epoch": 0.0011786839540313258,
137
- "grad_norm": 1.0492234230041504,
138
- "learning_rate": 0.00018090169943749476,
139
- "loss": 0.4208,
140
  "step": 13
141
  },
142
  {
143
  "epoch": 0.001269351950495274,
144
- "grad_norm": 0.8570551872253418,
145
- "learning_rate": 0.00016691306063588583,
146
- "loss": 0.3245,
147
  "step": 14
148
  },
149
  {
150
  "epoch": 0.001360019946959222,
151
- "grad_norm": 1.2632145881652832,
152
- "learning_rate": 0.00015000000000000001,
153
- "loss": 0.2958,
154
  "step": 15
155
  },
156
  {
157
- "epoch": 0.001360019946959222,
158
- "eval_loss": 0.2870440185070038,
159
- "eval_runtime": 282.0334,
160
- "eval_samples_per_second": 8.233,
161
- "eval_steps_per_second": 8.233,
162
- "step": 15
163
  },
164
  {
165
  "epoch": 0.0014506879434231703,
166
- "grad_norm": 0.8736838698387146,
167
- "learning_rate": 0.00013090169943749476,
168
- "loss": 0.2332,
 
169
  "step": 16
170
  },
171
  {
172
  "epoch": 0.0015413559398871183,
173
- "grad_norm": 0.8168073892593384,
174
- "learning_rate": 0.00011045284632676536,
175
- "loss": 0.2588,
176
  "step": 17
177
  },
178
  {
179
  "epoch": 0.0016320239363510666,
180
- "grad_norm": 0.9538082480430603,
181
- "learning_rate": 8.954715367323468e-05,
182
- "loss": 0.2894,
183
- "step": 18
184
- },
185
- {
186
- "epoch": 0.0016320239363510666,
187
- "eval_loss": 0.2599656581878662,
188
- "eval_runtime": 281.2105,
189
- "eval_samples_per_second": 8.257,
190
- "eval_steps_per_second": 8.257,
191
  "step": 18
192
  },
193
  {
194
  "epoch": 0.0017226919328150146,
195
- "grad_norm": 0.7329400777816772,
196
- "learning_rate": 6.909830056250527e-05,
197
- "loss": 0.2436,
198
  "step": 19
199
  },
200
  {
201
  "epoch": 0.0018133599292789627,
202
- "grad_norm": 0.8232644200325012,
203
- "learning_rate": 5.000000000000002e-05,
204
- "loss": 0.1621,
205
  "step": 20
206
  },
207
  {
208
- "epoch": 0.001904027925742911,
209
- "grad_norm": 0.6404268741607666,
210
- "learning_rate": 3.308693936411421e-05,
211
- "loss": 0.1838,
212
- "step": 21
 
213
  },
214
  {
215
  "epoch": 0.001904027925742911,
216
- "eval_loss": 0.24966681003570557,
217
- "eval_runtime": 281.5685,
218
- "eval_samples_per_second": 8.247,
219
- "eval_steps_per_second": 8.247,
220
  "step": 21
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
221
  }
222
  ],
223
  "logging_steps": 1,
224
- "max_steps": 25,
225
  "num_input_tokens_seen": 0,
226
  "num_train_epochs": 1,
227
- "save_steps": 3,
228
  "stateful_callbacks": {
229
  "TrainerControl": {
230
  "args": {
@@ -237,7 +286,7 @@
237
  "attributes": {}
238
  }
239
  },
240
- "total_flos": 7953264779722752.0,
241
  "train_batch_size": 1,
242
  "trial_name": null,
243
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.002538703900990548,
5
+ "eval_steps": 4,
6
+ "global_step": 28,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 9.066799646394814e-05,
13
+ "grad_norm": 1.305655837059021,
14
  "learning_rate": 2e-05,
15
  "loss": 0.9466,
16
  "step": 1
 
18
  {
19
  "epoch": 9.066799646394814e-05,
20
  "eval_loss": 1.1514371633529663,
21
+ "eval_runtime": 281.757,
22
+ "eval_samples_per_second": 8.241,
23
+ "eval_steps_per_second": 8.241,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0001813359929278963,
28
+ "grad_norm": 1.7540494203567505,
29
  "learning_rate": 4e-05,
30
  "loss": 1.2689,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.00027200398939184443,
35
+ "grad_norm": 1.3806679248809814,
36
  "learning_rate": 6e-05,
37
+ "loss": 0.9866,
38
  "step": 3
39
  },
40
  {
41
+ "epoch": 0.0003626719858557926,
42
+ "grad_norm": 1.6540546417236328,
43
+ "learning_rate": 8e-05,
44
+ "loss": 0.886,
45
+ "step": 4
 
46
  },
47
  {
48
  "epoch": 0.0003626719858557926,
49
+ "eval_loss": 1.063585877418518,
50
+ "eval_runtime": 281.615,
51
+ "eval_samples_per_second": 8.245,
52
+ "eval_steps_per_second": 8.245,
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.00045333998231974066,
57
+ "grad_norm": 1.351820945739746,
58
  "learning_rate": 0.0001,
59
+ "loss": 1.0148,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.0005440079787836889,
64
+ "grad_norm": 1.6633617877960205,
65
  "learning_rate": 0.00012,
66
+ "loss": 1.1922,
 
 
 
 
 
 
 
 
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.000634675975247637,
71
+ "grad_norm": 1.216033697128296,
72
  "learning_rate": 0.00014,
73
+ "loss": 0.7838,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.0007253439717115851,
78
+ "grad_norm": 1.0886664390563965,
79
  "learning_rate": 0.00016,
80
+ "loss": 0.6905,
81
  "step": 8
82
  },
83
  {
84
+ "epoch": 0.0007253439717115851,
85
+ "eval_loss": 0.6156781911849976,
86
+ "eval_runtime": 282.2824,
87
+ "eval_samples_per_second": 8.226,
88
+ "eval_steps_per_second": 8.226,
89
+ "step": 8
90
  },
91
  {
92
  "epoch": 0.0008160119681755333,
93
+ "grad_norm": 1.2936314344406128,
94
+ "learning_rate": 0.00018,
95
+ "loss": 0.6603,
 
96
  "step": 9
97
  },
98
  {
99
  "epoch": 0.0009066799646394813,
100
+ "grad_norm": 1.429738163948059,
101
  "learning_rate": 0.0002,
102
+ "loss": 0.518,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.0009973479611034295,
107
+ "grad_norm": 1.501217007637024,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 0.475,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.0010880159575673777,
114
+ "grad_norm": 1.0113286972045898,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 0.3233,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.0010880159575673777,
121
+ "eval_loss": 0.29599475860595703,
122
+ "eval_runtime": 282.4723,
123
+ "eval_samples_per_second": 8.22,
124
+ "eval_steps_per_second": 8.22,
125
  "step": 12
126
  },
127
  {
128
  "epoch": 0.0011786839540313258,
129
+ "grad_norm": 1.045129656791687,
130
+ "learning_rate": 0.00019723699203976766,
131
+ "loss": 0.305,
132
  "step": 13
133
  },
134
  {
135
  "epoch": 0.001269351950495274,
136
+ "grad_norm": 0.7782332897186279,
137
+ "learning_rate": 0.00019510565162951537,
138
+ "loss": 0.2367,
139
  "step": 14
140
  },
141
  {
142
  "epoch": 0.001360019946959222,
143
+ "grad_norm": 1.1300699710845947,
144
+ "learning_rate": 0.0001923879532511287,
145
+ "loss": 0.1923,
146
  "step": 15
147
  },
148
  {
149
+ "epoch": 0.0014506879434231703,
150
+ "grad_norm": 0.6594939231872559,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 0.1823,
153
+ "step": 16
 
154
  },
155
  {
156
  "epoch": 0.0014506879434231703,
157
+ "eval_loss": 0.2610986828804016,
158
+ "eval_runtime": 282.2669,
159
+ "eval_samples_per_second": 8.226,
160
+ "eval_steps_per_second": 8.226,
161
  "step": 16
162
  },
163
  {
164
  "epoch": 0.0015413559398871183,
165
+ "grad_norm": 1.0713460445404053,
166
+ "learning_rate": 0.00018526401643540922,
167
+ "loss": 0.2457,
168
  "step": 17
169
  },
170
  {
171
  "epoch": 0.0016320239363510666,
172
+ "grad_norm": 1.2344204187393188,
173
+ "learning_rate": 0.00018090169943749476,
174
+ "loss": 0.272,
 
 
 
 
 
 
 
 
175
  "step": 18
176
  },
177
  {
178
  "epoch": 0.0017226919328150146,
179
+ "grad_norm": 0.7098881006240845,
180
+ "learning_rate": 0.0001760405965600031,
181
+ "loss": 0.225,
182
  "step": 19
183
  },
184
  {
185
  "epoch": 0.0018133599292789627,
186
+ "grad_norm": 0.8920848965644836,
187
+ "learning_rate": 0.00017071067811865476,
188
+ "loss": 0.1479,
189
  "step": 20
190
  },
191
  {
192
+ "epoch": 0.0018133599292789627,
193
+ "eval_loss": 0.23455704748630524,
194
+ "eval_runtime": 282.1688,
195
+ "eval_samples_per_second": 8.229,
196
+ "eval_steps_per_second": 8.229,
197
+ "step": 20
198
  },
199
  {
200
  "epoch": 0.001904027925742911,
201
+ "grad_norm": 0.5211899876594543,
202
+ "learning_rate": 0.00016494480483301836,
203
+ "loss": 0.1664,
 
204
  "step": 21
205
+ },
206
+ {
207
+ "epoch": 0.001994695922206859,
208
+ "grad_norm": 0.6904815435409546,
209
+ "learning_rate": 0.00015877852522924732,
210
+ "loss": 0.2361,
211
+ "step": 22
212
+ },
213
+ {
214
+ "epoch": 0.002085363918670807,
215
+ "grad_norm": 0.6779294013977051,
216
+ "learning_rate": 0.0001522498564715949,
217
+ "loss": 0.1939,
218
+ "step": 23
219
+ },
220
+ {
221
+ "epoch": 0.0021760319151347554,
222
+ "grad_norm": 0.8565766215324402,
223
+ "learning_rate": 0.00014539904997395468,
224
+ "loss": 0.2695,
225
+ "step": 24
226
+ },
227
+ {
228
+ "epoch": 0.0021760319151347554,
229
+ "eval_loss": 0.22548052668571472,
230
+ "eval_runtime": 282.2744,
231
+ "eval_samples_per_second": 8.226,
232
+ "eval_steps_per_second": 8.226,
233
+ "step": 24
234
+ },
235
+ {
236
+ "epoch": 0.0022666999115987033,
237
+ "grad_norm": 0.7228334546089172,
238
+ "learning_rate": 0.000138268343236509,
239
+ "loss": 0.2259,
240
+ "step": 25
241
+ },
242
+ {
243
+ "epoch": 0.0023573679080626515,
244
+ "grad_norm": 0.9707162976264954,
245
+ "learning_rate": 0.00013090169943749476,
246
+ "loss": 0.2173,
247
+ "step": 26
248
+ },
249
+ {
250
+ "epoch": 0.0024480359045265998,
251
+ "grad_norm": 0.5939818024635315,
252
+ "learning_rate": 0.00012334453638559057,
253
+ "loss": 0.2012,
254
+ "step": 27
255
+ },
256
+ {
257
+ "epoch": 0.002538703900990548,
258
+ "grad_norm": 0.3913775682449341,
259
+ "learning_rate": 0.0001156434465040231,
260
+ "loss": 0.1645,
261
+ "step": 28
262
+ },
263
+ {
264
+ "epoch": 0.002538703900990548,
265
+ "eval_loss": 0.21409040689468384,
266
+ "eval_runtime": 282.2725,
267
+ "eval_samples_per_second": 8.226,
268
+ "eval_steps_per_second": 8.226,
269
+ "step": 28
270
  }
271
  ],
272
  "logging_steps": 1,
273
+ "max_steps": 50,
274
  "num_input_tokens_seen": 0,
275
  "num_train_epochs": 1,
276
+ "save_steps": 4,
277
  "stateful_callbacks": {
278
  "TrainerControl": {
279
  "args": {
 
286
  "attributes": {}
287
  }
288
  },
289
+ "total_flos": 1.0572077400588288e+16,
290
  "train_batch_size": 1,
291
  "trial_name": null,
292
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf5f0cf90aa3deddbb881f441c0237bff1fd72ac4c2cbf0e67c55438798ba345
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:586972196f15dd32a3d87fb4e0aad2c5279b9a281041e5bf0b54d207daa83d8a
3
  size 6776