dimasik87 commited on
Commit
422bc53
·
verified ·
1 Parent(s): 2071033

Training in progress, step 20, checkpoint

Browse files
last-checkpoint/adapter_config.json CHANGED
@@ -10,22 +10,22 @@
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
- "lora_alpha": 16,
14
- "lora_dropout": 0.1,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
- "r": 8,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "k_proj",
24
  "up_proj",
25
- "v_proj",
26
  "down_proj",
27
  "q_proj",
28
- "gate_proj",
 
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
 
10
  "layers_pattern": null,
11
  "layers_to_transform": null,
12
  "loftq_config": {},
13
+ "lora_alpha": 32,
14
+ "lora_dropout": 0.05,
15
  "megatron_config": null,
16
  "megatron_core": "megatron.core",
17
  "modules_to_save": null,
18
  "peft_type": "LORA",
19
+ "r": 16,
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "gate_proj",
24
  "up_proj",
 
25
  "down_proj",
26
  "q_proj",
27
+ "v_proj",
28
+ "k_proj",
29
  "o_proj"
30
  ],
31
  "task_type": "CAUSAL_LM",
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:348195e700ee1bfc87c018227e4d2f3a0bb511d54afbfd2975d7946c4613726f
3
- size 83945296
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4d9d7f6f5a49802255561a63ff98d98ecae604938870de542fd695783388eaf
3
+ size 167832240
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e6d4d7b6b265001a28d87e37db3ba41199ad289d0b76d77febb0024d0beeb3b6
3
- size 168149074
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed0c546cccbaca811653df4de7f6dedde0b93c5dec7385a6c50e63a4b1a76414
3
+ size 335922386
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:145104a4b80a300d0bbedab2706c4b63661da76b1ba2a984f6b38fc79f23f355
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e94220cd9d4b14ce68864f3b3bce78920776716d21b826776f287a8e2f967b29
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8bb840dbc65c27ef8840855786e35bfea033749e5d7571cf601f6645aa29cc1e
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d605401690d7669ff16aeaca6820cbd8d0d605afe748c51045ce90888810a22
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,16 +1,16 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 0.001360019946959222,
5
- "eval_steps": 3,
6
- "global_step": 15,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 9.066799646394814e-05,
13
- "grad_norm": 0.9196107983589172,
14
  "learning_rate": 2e-05,
15
  "loss": 0.9466,
16
  "step": 1
@@ -18,155 +18,190 @@
18
  {
19
  "epoch": 9.066799646394814e-05,
20
  "eval_loss": 1.1514371633529663,
21
- "eval_runtime": 280.9406,
22
- "eval_samples_per_second": 8.265,
23
- "eval_steps_per_second": 8.265,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0001813359929278963,
28
- "grad_norm": 1.2303106784820557,
29
  "learning_rate": 4e-05,
30
  "loss": 1.2689,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.00027200398939184443,
35
- "grad_norm": 0.953157901763916,
36
  "learning_rate": 6e-05,
37
- "loss": 0.9876,
38
  "step": 3
39
  },
40
  {
41
- "epoch": 0.00027200398939184443,
42
- "eval_loss": 1.1419087648391724,
43
- "eval_runtime": 281.2312,
44
- "eval_samples_per_second": 8.257,
45
- "eval_steps_per_second": 8.257,
46
- "step": 3
47
  },
48
  {
49
  "epoch": 0.0003626719858557926,
50
- "grad_norm": 1.2283321619033813,
51
- "learning_rate": 8e-05,
52
- "loss": 0.8963,
 
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.00045333998231974066,
57
- "grad_norm": 1.0397660732269287,
58
  "learning_rate": 0.0001,
59
- "loss": 1.064,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.0005440079787836889,
64
- "grad_norm": 1.3788177967071533,
65
  "learning_rate": 0.00012,
66
- "loss": 1.311,
67
- "step": 6
68
- },
69
- {
70
- "epoch": 0.0005440079787836889,
71
- "eval_loss": 0.9817929267883301,
72
- "eval_runtime": 281.7359,
73
- "eval_samples_per_second": 8.242,
74
- "eval_steps_per_second": 8.242,
75
  "step": 6
76
  },
77
  {
78
  "epoch": 0.000634675975247637,
79
- "grad_norm": 1.0661911964416504,
80
  "learning_rate": 0.00014,
81
- "loss": 0.9209,
82
  "step": 7
83
  },
84
  {
85
  "epoch": 0.0007253439717115851,
86
- "grad_norm": 1.0511986017227173,
87
  "learning_rate": 0.00016,
88
- "loss": 0.8409,
89
  "step": 8
90
  },
91
  {
92
- "epoch": 0.0008160119681755333,
93
- "grad_norm": 1.0703239440917969,
94
- "learning_rate": 0.00018,
95
- "loss": 0.8303,
96
- "step": 9
 
97
  },
98
  {
99
  "epoch": 0.0008160119681755333,
100
- "eval_loss": 0.6860486268997192,
101
- "eval_runtime": 285.0723,
102
- "eval_samples_per_second": 8.145,
103
- "eval_steps_per_second": 8.145,
104
  "step": 9
105
  },
106
  {
107
  "epoch": 0.0009066799646394813,
108
- "grad_norm": 1.1654818058013916,
109
  "learning_rate": 0.0002,
110
- "loss": 0.7193,
111
  "step": 10
112
  },
113
  {
114
  "epoch": 0.0009973479611034295,
115
- "grad_norm": 1.2375497817993164,
116
- "learning_rate": 0.00019781476007338058,
117
- "loss": 0.6006,
118
  "step": 11
119
  },
120
  {
121
  "epoch": 0.0010880159575673777,
122
- "grad_norm": 0.930781900882721,
123
- "learning_rate": 0.0001913545457642601,
124
- "loss": 0.4477,
125
  "step": 12
126
  },
127
  {
128
  "epoch": 0.0010880159575673777,
129
- "eval_loss": 0.43009814620018005,
130
- "eval_runtime": 283.6546,
131
- "eval_samples_per_second": 8.186,
132
- "eval_steps_per_second": 8.186,
133
  "step": 12
134
  },
135
  {
136
  "epoch": 0.0011786839540313258,
137
- "grad_norm": 1.0492234230041504,
138
- "learning_rate": 0.00018090169943749476,
139
- "loss": 0.4208,
140
  "step": 13
141
  },
142
  {
143
  "epoch": 0.001269351950495274,
144
- "grad_norm": 0.8570551872253418,
145
- "learning_rate": 0.00016691306063588583,
146
- "loss": 0.3245,
147
  "step": 14
148
  },
149
  {
150
  "epoch": 0.001360019946959222,
151
- "grad_norm": 1.2632145881652832,
152
- "learning_rate": 0.00015000000000000001,
153
- "loss": 0.2958,
154
  "step": 15
155
  },
156
  {
157
- "epoch": 0.001360019946959222,
158
- "eval_loss": 0.2870440185070038,
159
- "eval_runtime": 282.0334,
160
- "eval_samples_per_second": 8.233,
161
- "eval_steps_per_second": 8.233,
162
- "step": 15
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
163
  }
164
  ],
165
  "logging_steps": 1,
166
- "max_steps": 25,
167
  "num_input_tokens_seen": 0,
168
  "num_train_epochs": 1,
169
- "save_steps": 3,
170
  "stateful_callbacks": {
171
  "TrainerControl": {
172
  "args": {
@@ -179,7 +214,7 @@
179
  "attributes": {}
180
  }
181
  },
182
- "total_flos": 5733749027241984.0,
183
  "train_batch_size": 1,
184
  "trial_name": null,
185
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 0.0018133599292789627,
5
+ "eval_steps": 4,
6
+ "global_step": 20,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
  "epoch": 9.066799646394814e-05,
13
+ "grad_norm": 1.305655837059021,
14
  "learning_rate": 2e-05,
15
  "loss": 0.9466,
16
  "step": 1
 
18
  {
19
  "epoch": 9.066799646394814e-05,
20
  "eval_loss": 1.1514371633529663,
21
+ "eval_runtime": 281.757,
22
+ "eval_samples_per_second": 8.241,
23
+ "eval_steps_per_second": 8.241,
24
  "step": 1
25
  },
26
  {
27
  "epoch": 0.0001813359929278963,
28
+ "grad_norm": 1.7540494203567505,
29
  "learning_rate": 4e-05,
30
  "loss": 1.2689,
31
  "step": 2
32
  },
33
  {
34
  "epoch": 0.00027200398939184443,
35
+ "grad_norm": 1.3806679248809814,
36
  "learning_rate": 6e-05,
37
+ "loss": 0.9866,
38
  "step": 3
39
  },
40
  {
41
+ "epoch": 0.0003626719858557926,
42
+ "grad_norm": 1.6540546417236328,
43
+ "learning_rate": 8e-05,
44
+ "loss": 0.886,
45
+ "step": 4
 
46
  },
47
  {
48
  "epoch": 0.0003626719858557926,
49
+ "eval_loss": 1.063585877418518,
50
+ "eval_runtime": 281.615,
51
+ "eval_samples_per_second": 8.245,
52
+ "eval_steps_per_second": 8.245,
53
  "step": 4
54
  },
55
  {
56
  "epoch": 0.00045333998231974066,
57
+ "grad_norm": 1.351820945739746,
58
  "learning_rate": 0.0001,
59
+ "loss": 1.0148,
60
  "step": 5
61
  },
62
  {
63
  "epoch": 0.0005440079787836889,
64
+ "grad_norm": 1.6633617877960205,
65
  "learning_rate": 0.00012,
66
+ "loss": 1.1922,
 
 
 
 
 
 
 
 
67
  "step": 6
68
  },
69
  {
70
  "epoch": 0.000634675975247637,
71
+ "grad_norm": 1.216033697128296,
72
  "learning_rate": 0.00014,
73
+ "loss": 0.7838,
74
  "step": 7
75
  },
76
  {
77
  "epoch": 0.0007253439717115851,
78
+ "grad_norm": 1.0886664390563965,
79
  "learning_rate": 0.00016,
80
+ "loss": 0.6905,
81
  "step": 8
82
  },
83
  {
84
+ "epoch": 0.0007253439717115851,
85
+ "eval_loss": 0.6156781911849976,
86
+ "eval_runtime": 282.2824,
87
+ "eval_samples_per_second": 8.226,
88
+ "eval_steps_per_second": 8.226,
89
+ "step": 8
90
  },
91
  {
92
  "epoch": 0.0008160119681755333,
93
+ "grad_norm": 1.2936314344406128,
94
+ "learning_rate": 0.00018,
95
+ "loss": 0.6603,
 
96
  "step": 9
97
  },
98
  {
99
  "epoch": 0.0009066799646394813,
100
+ "grad_norm": 1.429738163948059,
101
  "learning_rate": 0.0002,
102
+ "loss": 0.518,
103
  "step": 10
104
  },
105
  {
106
  "epoch": 0.0009973479611034295,
107
+ "grad_norm": 1.501217007637024,
108
+ "learning_rate": 0.0001996917333733128,
109
+ "loss": 0.475,
110
  "step": 11
111
  },
112
  {
113
  "epoch": 0.0010880159575673777,
114
+ "grad_norm": 1.0113286972045898,
115
+ "learning_rate": 0.00019876883405951377,
116
+ "loss": 0.3233,
117
  "step": 12
118
  },
119
  {
120
  "epoch": 0.0010880159575673777,
121
+ "eval_loss": 0.29599475860595703,
122
+ "eval_runtime": 282.4723,
123
+ "eval_samples_per_second": 8.22,
124
+ "eval_steps_per_second": 8.22,
125
  "step": 12
126
  },
127
  {
128
  "epoch": 0.0011786839540313258,
129
+ "grad_norm": 1.045129656791687,
130
+ "learning_rate": 0.00019723699203976766,
131
+ "loss": 0.305,
132
  "step": 13
133
  },
134
  {
135
  "epoch": 0.001269351950495274,
136
+ "grad_norm": 0.7782332897186279,
137
+ "learning_rate": 0.00019510565162951537,
138
+ "loss": 0.2367,
139
  "step": 14
140
  },
141
  {
142
  "epoch": 0.001360019946959222,
143
+ "grad_norm": 1.1300699710845947,
144
+ "learning_rate": 0.0001923879532511287,
145
+ "loss": 0.1923,
146
  "step": 15
147
  },
148
  {
149
+ "epoch": 0.0014506879434231703,
150
+ "grad_norm": 0.6594939231872559,
151
+ "learning_rate": 0.0001891006524188368,
152
+ "loss": 0.1823,
153
+ "step": 16
154
+ },
155
+ {
156
+ "epoch": 0.0014506879434231703,
157
+ "eval_loss": 0.2610986828804016,
158
+ "eval_runtime": 282.2669,
159
+ "eval_samples_per_second": 8.226,
160
+ "eval_steps_per_second": 8.226,
161
+ "step": 16
162
+ },
163
+ {
164
+ "epoch": 0.0015413559398871183,
165
+ "grad_norm": 1.0713460445404053,
166
+ "learning_rate": 0.00018526401643540922,
167
+ "loss": 0.2457,
168
+ "step": 17
169
+ },
170
+ {
171
+ "epoch": 0.0016320239363510666,
172
+ "grad_norm": 1.2344204187393188,
173
+ "learning_rate": 0.00018090169943749476,
174
+ "loss": 0.272,
175
+ "step": 18
176
+ },
177
+ {
178
+ "epoch": 0.0017226919328150146,
179
+ "grad_norm": 0.7098881006240845,
180
+ "learning_rate": 0.0001760405965600031,
181
+ "loss": 0.225,
182
+ "step": 19
183
+ },
184
+ {
185
+ "epoch": 0.0018133599292789627,
186
+ "grad_norm": 0.8920848965644836,
187
+ "learning_rate": 0.00017071067811865476,
188
+ "loss": 0.1479,
189
+ "step": 20
190
+ },
191
+ {
192
+ "epoch": 0.0018133599292789627,
193
+ "eval_loss": 0.23455704748630524,
194
+ "eval_runtime": 282.1688,
195
+ "eval_samples_per_second": 8.229,
196
+ "eval_steps_per_second": 8.229,
197
+ "step": 20
198
  }
199
  ],
200
  "logging_steps": 1,
201
+ "max_steps": 50,
202
  "num_input_tokens_seen": 0,
203
  "num_train_epochs": 1,
204
+ "save_steps": 4,
205
  "stateful_callbacks": {
206
  "TrainerControl": {
207
  "args": {
 
214
  "attributes": {}
215
  }
216
  },
217
+ "total_flos": 7604476726738944.0,
218
  "train_batch_size": 1,
219
  "trial_name": null,
220
  "trial_params": null
last-checkpoint/training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:bf5f0cf90aa3deddbb881f441c0237bff1fd72ac4c2cbf0e67c55438798ba345
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:586972196f15dd32a3d87fb4e0aad2c5279b9a281041e5bf0b54d207daa83d8a
3
  size 6776