preetam7 commited on
Commit
f1388b5
·
verified ·
1 Parent(s): f33baf3

Upload 11 files

Browse files
adapter_config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "microsoft/Phi-3-mini-4k-instruct",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "qkv_proj",
24
+ "down_proj",
25
+ "o_proj",
26
+ "gate_up_proj"
27
+ ],
28
+ "task_type": "CAUSAL_LM",
29
+ "use_dora": false,
30
+ "use_rslora": false
31
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d9c54b81b7de53f7d5e8be09b01ddf4778b4f0cf1e55e9f086ccb5e569647b1d
3
+ size 50365768
added_tokens.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "<|assistant|>": 32001,
3
+ "<|endoftext|>": 32000,
4
+ "<|end|>": 32007,
5
+ "<|placeholder1|>": 32002,
6
+ "<|placeholder2|>": 32003,
7
+ "<|placeholder3|>": 32004,
8
+ "<|placeholder4|>": 32005,
9
+ "<|placeholder5|>": 32008,
10
+ "<|placeholder6|>": 32009,
11
+ "<|system|>": 32006,
12
+ "<|user|>": 32010
13
+ }
llamaboard_config.yaml ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ top.booster: none
2
+ top.checkpoint_path: null
3
+ top.finetuning_type: lora
4
+ top.model_name: Phi3-4B-4k-Chat
5
+ top.quantization_bit: '4'
6
+ top.rope_scaling: none
7
+ top.template: phi
8
+ top.visual_inputs: false
9
+ train.additional_target: ''
10
+ train.badam_mode: layer
11
+ train.badam_switch_interval: 50
12
+ train.badam_switch_mode: ascending
13
+ train.badam_update_ratio: 0.05
14
+ train.batch_size: 2
15
+ train.compute_type: fp16
16
+ train.create_new_adapter: false
17
+ train.cutoff_len: 4096
18
+ train.dataset:
19
+ - claimver_kgllm
20
+ train.dataset_dir: data
21
+ train.ds_offload: false
22
+ train.ds_stage: none
23
+ train.freeze_extra_modules: ''
24
+ train.freeze_trainable_layers: 2
25
+ train.freeze_trainable_modules: all
26
+ train.galore_rank: 16
27
+ train.galore_scale: 0.25
28
+ train.galore_target: all
29
+ train.galore_update_interval: 200
30
+ train.gradient_accumulation_steps: 8
31
+ train.learning_rate: 5e-5
32
+ train.logging_steps: 5
33
+ train.lora_alpha: 16
34
+ train.lora_dropout: 0
35
+ train.lora_rank: 8
36
+ train.lora_target: ''
37
+ train.loraplus_lr_ratio: 0
38
+ train.lr_scheduler_type: cosine
39
+ train.max_grad_norm: '1.0'
40
+ train.max_samples: '100000'
41
+ train.neftune_alpha: 0
42
+ train.num_train_epochs: '2'
43
+ train.optim: adamw_torch
44
+ train.packing: false
45
+ train.ppo_score_norm: false
46
+ train.ppo_whiten_rewards: false
47
+ train.pref_beta: 0.1
48
+ train.pref_ftx: 0
49
+ train.pref_loss: sigmoid
50
+ train.report_to: false
51
+ train.resize_vocab: false
52
+ train.reward_model: null
53
+ train.save_steps: 100
54
+ train.shift_attn: false
55
+ train.training_stage: Supervised Fine-Tuning
56
+ train.upcast_layernorm: false
57
+ train.use_badam: false
58
+ train.use_dora: false
59
+ train.use_galore: false
60
+ train.use_llama_pro: false
61
+ train.use_rslora: false
62
+ train.val_size: 0
63
+ train.warmup_steps: 0
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<s>",
4
+ "lstrip": false,
5
+ "normalized": false,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|end|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<unk>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e556afd44213b6bd1be2b850ebbbd98f5481437a8021afaf58ee7fb1818d347
3
+ size 499723
tokenizer_config.json ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<unk>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<s>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "</s>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": true,
26
+ "single_word": false,
27
+ "special": false
28
+ },
29
+ "32000": {
30
+ "content": "<|endoftext|>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "32001": {
38
+ "content": "<|assistant|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": true,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "32002": {
46
+ "content": "<|placeholder1|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": true,
50
+ "single_word": false,
51
+ "special": true
52
+ },
53
+ "32003": {
54
+ "content": "<|placeholder2|>",
55
+ "lstrip": false,
56
+ "normalized": false,
57
+ "rstrip": true,
58
+ "single_word": false,
59
+ "special": true
60
+ },
61
+ "32004": {
62
+ "content": "<|placeholder3|>",
63
+ "lstrip": false,
64
+ "normalized": false,
65
+ "rstrip": true,
66
+ "single_word": false,
67
+ "special": true
68
+ },
69
+ "32005": {
70
+ "content": "<|placeholder4|>",
71
+ "lstrip": false,
72
+ "normalized": false,
73
+ "rstrip": true,
74
+ "single_word": false,
75
+ "special": true
76
+ },
77
+ "32006": {
78
+ "content": "<|system|>",
79
+ "lstrip": false,
80
+ "normalized": false,
81
+ "rstrip": true,
82
+ "single_word": false,
83
+ "special": true
84
+ },
85
+ "32007": {
86
+ "content": "<|end|>",
87
+ "lstrip": false,
88
+ "normalized": false,
89
+ "rstrip": false,
90
+ "single_word": false,
91
+ "special": true
92
+ },
93
+ "32008": {
94
+ "content": "<|placeholder5|>",
95
+ "lstrip": false,
96
+ "normalized": false,
97
+ "rstrip": true,
98
+ "single_word": false,
99
+ "special": true
100
+ },
101
+ "32009": {
102
+ "content": "<|placeholder6|>",
103
+ "lstrip": false,
104
+ "normalized": false,
105
+ "rstrip": true,
106
+ "single_word": false,
107
+ "special": true
108
+ },
109
+ "32010": {
110
+ "content": "<|user|>",
111
+ "lstrip": false,
112
+ "normalized": false,
113
+ "rstrip": true,
114
+ "single_word": false,
115
+ "special": true
116
+ }
117
+ },
118
+ "bos_token": "<s>",
119
+ "chat_template": "{% set system_message = 'You are a helpful AI assistant.' %}{% if messages[0]['role'] == 'system' %}{% set system_message = messages[0]['content'] %}{% endif %}{% if system_message is defined %}{{ '<s>' + '<|system|>\n' + system_message + '<|end|>\n' }}{% endif %}{% for message in messages %}{% set content = message['content'] %}{% if message['role'] == 'user' %}{{ '<|user|>\n' + content + '<|end|>\n<|assistant|>\n' }}{% elif message['role'] == 'assistant' %}{{ content + '<|end|>' + '\n' }}{% endif %}{% endfor %}",
120
+ "clean_up_tokenization_spaces": false,
121
+ "eos_token": "<|end|>",
122
+ "legacy": false,
123
+ "model_max_length": 4096,
124
+ "pad_token": "<|endoftext|>",
125
+ "padding_side": "right",
126
+ "sp_model_kwargs": {},
127
+ "split_special_tokens": false,
128
+ "tokenizer_class": "LlamaTokenizer",
129
+ "unk_token": "<unk>",
130
+ "use_default_system_prompt": false
131
+ }
trainer_state.json ADDED
@@ -0,0 +1,379 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 1.9952941176470587,
5
+ "eval_steps": 500,
6
+ "global_step": 212,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.047058823529411764,
13
+ "grad_norm": 0.5000221729278564,
14
+ "learning_rate": 4.9931407070965254e-05,
15
+ "loss": 0.841,
16
+ "num_input_tokens_seen": 114256,
17
+ "step": 5
18
+ },
19
+ {
20
+ "epoch": 0.09411764705882353,
21
+ "grad_norm": 0.38539719581604004,
22
+ "learning_rate": 4.97260046830541e-05,
23
+ "loss": 0.692,
24
+ "num_input_tokens_seen": 225200,
25
+ "step": 10
26
+ },
27
+ {
28
+ "epoch": 0.1411764705882353,
29
+ "grad_norm": 0.275894433259964,
30
+ "learning_rate": 4.9384919968379945e-05,
31
+ "loss": 0.5684,
32
+ "num_input_tokens_seen": 338592,
33
+ "step": 15
34
+ },
35
+ {
36
+ "epoch": 0.18823529411764706,
37
+ "grad_norm": 0.22247478365898132,
38
+ "learning_rate": 4.891002460691306e-05,
39
+ "loss": 0.5256,
40
+ "num_input_tokens_seen": 455856,
41
+ "step": 20
42
+ },
43
+ {
44
+ "epoch": 0.23529411764705882,
45
+ "grad_norm": 0.20748302340507507,
46
+ "learning_rate": 4.83039245557597e-05,
47
+ "loss": 0.483,
48
+ "num_input_tokens_seen": 569056,
49
+ "step": 25
50
+ },
51
+ {
52
+ "epoch": 0.2823529411764706,
53
+ "grad_norm": 0.207512766122818,
54
+ "learning_rate": 4.756994574914359e-05,
55
+ "loss": 0.4401,
56
+ "num_input_tokens_seen": 683408,
57
+ "step": 30
58
+ },
59
+ {
60
+ "epoch": 0.32941176470588235,
61
+ "grad_norm": 0.18322080373764038,
62
+ "learning_rate": 4.6712115847560355e-05,
63
+ "loss": 0.4162,
64
+ "num_input_tokens_seen": 802032,
65
+ "step": 35
66
+ },
67
+ {
68
+ "epoch": 0.3764705882352941,
69
+ "grad_norm": 0.16893012821674347,
70
+ "learning_rate": 4.573514213625505e-05,
71
+ "loss": 0.3951,
72
+ "num_input_tokens_seen": 917808,
73
+ "step": 40
74
+ },
75
+ {
76
+ "epoch": 0.4235294117647059,
77
+ "grad_norm": 0.17897149920463562,
78
+ "learning_rate": 4.464438569430354e-05,
79
+ "loss": 0.4007,
80
+ "num_input_tokens_seen": 1037840,
81
+ "step": 45
82
+ },
83
+ {
84
+ "epoch": 0.47058823529411764,
85
+ "grad_norm": 0.14339417219161987,
86
+ "learning_rate": 4.344583197604318e-05,
87
+ "loss": 0.3672,
88
+ "num_input_tokens_seen": 1154864,
89
+ "step": 50
90
+ },
91
+ {
92
+ "epoch": 0.5176470588235295,
93
+ "grad_norm": 0.1722479611635208,
94
+ "learning_rate": 4.214605796628527e-05,
95
+ "loss": 0.3764,
96
+ "num_input_tokens_seen": 1273984,
97
+ "step": 55
98
+ },
99
+ {
100
+ "epoch": 0.5647058823529412,
101
+ "grad_norm": 0.15474933385849,
102
+ "learning_rate": 4.075219608954278e-05,
103
+ "loss": 0.361,
104
+ "num_input_tokens_seen": 1391904,
105
+ "step": 60
106
+ },
107
+ {
108
+ "epoch": 0.611764705882353,
109
+ "grad_norm": 0.14795532822608948,
110
+ "learning_rate": 3.927189507131938e-05,
111
+ "loss": 0.3472,
112
+ "num_input_tokens_seen": 1506640,
113
+ "step": 65
114
+ },
115
+ {
116
+ "epoch": 0.6588235294117647,
117
+ "grad_norm": 0.1496661901473999,
118
+ "learning_rate": 3.7713277966230514e-05,
119
+ "loss": 0.3497,
120
+ "num_input_tokens_seen": 1619120,
121
+ "step": 70
122
+ },
123
+ {
124
+ "epoch": 0.7058823529411765,
125
+ "grad_norm": 0.15912510454654694,
126
+ "learning_rate": 3.608489758327472e-05,
127
+ "loss": 0.3552,
128
+ "num_input_tokens_seen": 1739248,
129
+ "step": 75
130
+ },
131
+ {
132
+ "epoch": 0.7529411764705882,
133
+ "grad_norm": 0.17292849719524384,
134
+ "learning_rate": 3.4395689552855955e-05,
135
+ "loss": 0.3369,
136
+ "num_input_tokens_seen": 1854336,
137
+ "step": 80
138
+ },
139
+ {
140
+ "epoch": 0.8,
141
+ "grad_norm": 0.1803598403930664,
142
+ "learning_rate": 3.265492329309867e-05,
143
+ "loss": 0.3302,
144
+ "num_input_tokens_seen": 1971904,
145
+ "step": 85
146
+ },
147
+ {
148
+ "epoch": 0.8470588235294118,
149
+ "grad_norm": 0.171905979514122,
150
+ "learning_rate": 3.0872151144524595e-05,
151
+ "loss": 0.3305,
152
+ "num_input_tokens_seen": 2080496,
153
+ "step": 90
154
+ },
155
+ {
156
+ "epoch": 0.8941176470588236,
157
+ "grad_norm": 0.17595848441123962,
158
+ "learning_rate": 2.9057155952211502e-05,
159
+ "loss": 0.3239,
160
+ "num_input_tokens_seen": 2191968,
161
+ "step": 95
162
+ },
163
+ {
164
+ "epoch": 0.9411764705882353,
165
+ "grad_norm": 0.16902042925357819,
166
+ "learning_rate": 2.7219897383073373e-05,
167
+ "loss": 0.3111,
168
+ "num_input_tokens_seen": 2306544,
169
+ "step": 100
170
+ },
171
+ {
172
+ "epoch": 0.9882352941176471,
173
+ "grad_norm": 0.17742857336997986,
174
+ "learning_rate": 2.537045727284232e-05,
175
+ "loss": 0.2959,
176
+ "num_input_tokens_seen": 2418400,
177
+ "step": 105
178
+ },
179
+ {
180
+ "epoch": 1.035294117647059,
181
+ "grad_norm": 0.16856059432029724,
182
+ "learning_rate": 2.3518984302657146e-05,
183
+ "loss": 0.306,
184
+ "num_input_tokens_seen": 2537984,
185
+ "step": 110
186
+ },
187
+ {
188
+ "epoch": 1.0823529411764705,
189
+ "grad_norm": 0.19464781880378723,
190
+ "learning_rate": 2.1675638308842145e-05,
191
+ "loss": 0.3206,
192
+ "num_input_tokens_seen": 2656416,
193
+ "step": 115
194
+ },
195
+ {
196
+ "epoch": 1.1294117647058823,
197
+ "grad_norm": 0.17075157165527344,
198
+ "learning_rate": 1.9850534531472546e-05,
199
+ "loss": 0.3053,
200
+ "num_input_tokens_seen": 2773280,
201
+ "step": 120
202
+ },
203
+ {
204
+ "epoch": 1.1764705882352942,
205
+ "grad_norm": 0.173744335770607,
206
+ "learning_rate": 1.8053688107658908e-05,
207
+ "loss": 0.3176,
208
+ "num_input_tokens_seen": 2889024,
209
+ "step": 125
210
+ },
211
+ {
212
+ "epoch": 1.223529411764706,
213
+ "grad_norm": 0.18248489499092102,
214
+ "learning_rate": 1.6294959114140034e-05,
215
+ "loss": 0.2997,
216
+ "num_input_tokens_seen": 3004880,
217
+ "step": 130
218
+ },
219
+ {
220
+ "epoch": 1.2705882352941176,
221
+ "grad_norm": 0.1806834191083908,
222
+ "learning_rate": 1.4583998460759424e-05,
223
+ "loss": 0.2954,
224
+ "num_input_tokens_seen": 3117936,
225
+ "step": 135
226
+ },
227
+ {
228
+ "epoch": 1.3176470588235294,
229
+ "grad_norm": 0.1907871961593628,
230
+ "learning_rate": 1.2930194931731382e-05,
231
+ "loss": 0.3048,
232
+ "num_input_tokens_seen": 3231712,
233
+ "step": 140
234
+ },
235
+ {
236
+ "epoch": 1.3647058823529412,
237
+ "grad_norm": 0.20322641730308533,
238
+ "learning_rate": 1.1342623665304209e-05,
239
+ "loss": 0.3208,
240
+ "num_input_tokens_seen": 3350528,
241
+ "step": 145
242
+ },
243
+ {
244
+ "epoch": 1.4117647058823528,
245
+ "grad_norm": 0.16647660732269287,
246
+ "learning_rate": 9.829996354535172e-06,
247
+ "loss": 0.3015,
248
+ "num_input_tokens_seen": 3464144,
249
+ "step": 150
250
+ },
251
+ {
252
+ "epoch": 1.4588235294117646,
253
+ "grad_norm": 0.18004418909549713,
254
+ "learning_rate": 8.400613442446948e-06,
255
+ "loss": 0.3106,
256
+ "num_input_tokens_seen": 3580464,
257
+ "step": 155
258
+ },
259
+ {
260
+ "epoch": 1.5058823529411764,
261
+ "grad_norm": 0.1924523115158081,
262
+ "learning_rate": 7.062318573891716e-06,
263
+ "loss": 0.3055,
264
+ "num_input_tokens_seen": 3695040,
265
+ "step": 160
266
+ },
267
+ {
268
+ "epoch": 1.5529411764705883,
269
+ "grad_norm": 0.19149142503738403,
270
+ "learning_rate": 5.822455554065217e-06,
271
+ "loss": 0.3069,
272
+ "num_input_tokens_seen": 3812512,
273
+ "step": 165
274
+ },
275
+ {
276
+ "epoch": 1.6,
277
+ "grad_norm": 0.19474638998508453,
278
+ "learning_rate": 4.687828049857967e-06,
279
+ "loss": 0.2806,
280
+ "num_input_tokens_seen": 3922976,
281
+ "step": 170
282
+ },
283
+ {
284
+ "epoch": 1.6470588235294117,
285
+ "grad_norm": 0.19301049411296844,
286
+ "learning_rate": 3.6646622551801345e-06,
287
+ "loss": 0.2782,
288
+ "num_input_tokens_seen": 4038656,
289
+ "step": 175
290
+ },
291
+ {
292
+ "epoch": 1.6941176470588235,
293
+ "grad_norm": 0.20220619440078735,
294
+ "learning_rate": 2.75857272513132e-06,
295
+ "loss": 0.3066,
296
+ "num_input_tokens_seen": 4154032,
297
+ "step": 180
298
+ },
299
+ {
300
+ "epoch": 1.7411764705882353,
301
+ "grad_norm": 0.19466643035411835,
302
+ "learning_rate": 1.9745315664982276e-06,
303
+ "loss": 0.2908,
304
+ "num_input_tokens_seen": 4267600,
305
+ "step": 185
306
+ },
307
+ {
308
+ "epoch": 1.788235294117647,
309
+ "grad_norm": 0.17281493544578552,
310
+ "learning_rate": 1.3168411536452152e-06,
311
+ "loss": 0.2829,
312
+ "num_input_tokens_seen": 4382640,
313
+ "step": 190
314
+ },
315
+ {
316
+ "epoch": 1.835294117647059,
317
+ "grad_norm": 0.19972845911979675,
318
+ "learning_rate": 7.891105195175358e-07,
319
+ "loss": 0.2968,
320
+ "num_input_tokens_seen": 4503040,
321
+ "step": 195
322
+ },
323
+ {
324
+ "epoch": 1.8823529411764706,
325
+ "grad_norm": 0.17375272512435913,
326
+ "learning_rate": 3.9423555131007925e-07,
327
+ "loss": 0.2968,
328
+ "num_input_tokens_seen": 4618144,
329
+ "step": 200
330
+ },
331
+ {
332
+ "epoch": 1.9294117647058824,
333
+ "grad_norm": 0.17855483293533325,
334
+ "learning_rate": 1.343830994765982e-07,
335
+ "loss": 0.2879,
336
+ "num_input_tokens_seen": 4731744,
337
+ "step": 205
338
+ },
339
+ {
340
+ "epoch": 1.9764705882352942,
341
+ "grad_norm": 0.20227883756160736,
342
+ "learning_rate": 1.0979087280141298e-08,
343
+ "loss": 0.3026,
344
+ "num_input_tokens_seen": 4844416,
345
+ "step": 210
346
+ },
347
+ {
348
+ "epoch": 1.9952941176470587,
349
+ "num_input_tokens_seen": 4890624,
350
+ "step": 212,
351
+ "total_flos": 1.0960363294870733e+17,
352
+ "train_loss": 0.36075482390961555,
353
+ "train_runtime": 1824.8202,
354
+ "train_samples_per_second": 3.726,
355
+ "train_steps_per_second": 0.116
356
+ }
357
+ ],
358
+ "logging_steps": 5,
359
+ "max_steps": 212,
360
+ "num_input_tokens_seen": 4890624,
361
+ "num_train_epochs": 2,
362
+ "save_steps": 100,
363
+ "stateful_callbacks": {
364
+ "TrainerControl": {
365
+ "args": {
366
+ "should_epoch_stop": false,
367
+ "should_evaluate": false,
368
+ "should_log": false,
369
+ "should_save": true,
370
+ "should_training_stop": false
371
+ },
372
+ "attributes": {}
373
+ }
374
+ },
375
+ "total_flos": 1.0960363294870733e+17,
376
+ "train_batch_size": 2,
377
+ "trial_name": null,
378
+ "trial_params": null
379
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6044ecd5cce8a7c0f82ecb44a89fed522dd530bc42bf831f6f045c1fbb64ccaa
3
+ size 5304
training_args.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ cutoff_len: 4096
2
+ dataset: claimver_kgllm
3
+ dataset_dir: data
4
+ ddp_timeout: 180000000
5
+ do_train: true
6
+ finetuning_type: lora
7
+ flash_attn: auto
8
+ fp16: true
9
+ gradient_accumulation_steps: 8
10
+ include_num_input_tokens_seen: true
11
+ learning_rate: 5.0e-05
12
+ logging_steps: 5
13
+ lora_alpha: 16
14
+ lora_dropout: 0
15
+ lora_rank: 8
16
+ lora_target: all
17
+ lr_scheduler_type: cosine
18
+ max_grad_norm: 1.0
19
+ max_samples: 100000
20
+ model_name_or_path: microsoft/Phi-3-mini-4k-instruct
21
+ num_train_epochs: 2.0
22
+ optim: adamw_torch
23
+ output_dir: saves/Phi3-4B-4k-Chat/lora/phi3-4B4K-chat_claimver_run2
24
+ packing: false
25
+ per_device_train_batch_size: 2
26
+ plot_loss: true
27
+ preprocessing_num_workers: 16
28
+ quantization_bit: 4
29
+ report_to: none
30
+ save_steps: 100
31
+ stage: sft
32
+ template: phi
33
+ warmup_steps: 0