Kat380 commited on
Commit
6fe4dca
·
verified ·
1 Parent(s): fbb2e0a

upload model checkpoints

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ tokenizer.json filter=lfs diff=lfs merge=lfs -text
README.md CHANGED
@@ -1,3 +1,76 @@
1
- ---
2
- license: gemma
3
- ---
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ library_name: peft
3
+ tags:
4
+ - alignment-handbook
5
+ - generated_from_trainer
6
+ datasets:
7
+ - llama-duo/synth_summarize_dataset_dedup
8
+ base_model: google/gemma-7b
9
+ model-index:
10
+ - name: gemma7b-summarize-gemini1_5flash-4k
11
+ results: []
12
+ ---
13
+
14
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
15
+ should probably proofread and complete it, then remove this comment. -->
16
+
17
+ # gemma7b-summarize-gemini1_5flash-4k
18
+
19
+ This model is a fine-tuned version of [google/gemma-7b](https://huggingface.co/google/gemma-7b) on the llama-duo/synth_summarize_dataset_dedup dataset.
20
+ It achieves the following results on the evaluation set:
21
+ - Loss: 6.0056
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - learning_rate: 0.0002
41
+ - train_batch_size: 4
42
+ - eval_batch_size: 2
43
+ - seed: 42
44
+ - distributed_type: multi-GPU
45
+ - num_devices: 8
46
+ - gradient_accumulation_steps: 2
47
+ - total_train_batch_size: 64
48
+ - total_eval_batch_size: 16
49
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
50
+ - lr_scheduler_type: cosine
51
+ - lr_scheduler_warmup_ratio: 0.1
52
+ - num_epochs: 10
53
+
54
+ ### Training results
55
+
56
+ | Training Loss | Epoch | Step | Validation Loss |
57
+ |:-------------:|:-----:|:----:|:---------------:|
58
+ | 45.0263 | 1.0 | 7 | 13.7531 |
59
+ | 26.2686 | 2.0 | 14 | 8.7768 |
60
+ | 19.4522 | 3.0 | 21 | 7.7116 |
61
+ | 17.9837 | 4.0 | 28 | 7.1805 |
62
+ | 14.7187 | 5.0 | 35 | 7.0784 |
63
+ | 12.1795 | 6.0 | 42 | 7.0367 |
64
+ | 9.1407 | 7.0 | 49 | 6.5706 |
65
+ | 5.133 | 8.0 | 56 | 6.1854 |
66
+ | 4.2385 | 9.0 | 63 | 6.0276 |
67
+ | 3.8163 | 10.0 | 70 | 6.0056 |
68
+
69
+
70
+ ### Framework versions
71
+
72
+ - PEFT 0.10.0
73
+ - Transformers 4.40.0
74
+ - Pytorch 2.1.2+cu121
75
+ - Datasets 2.18.0
76
+ - Tokenizers 0.19.1
adapter_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "google/gemma-7b",
5
+ "bias": "none",
6
+ "fan_in_fan_out": false,
7
+ "inference_mode": true,
8
+ "init_lora_weights": true,
9
+ "layer_replication": null,
10
+ "layers_pattern": null,
11
+ "layers_to_transform": null,
12
+ "loftq_config": {},
13
+ "lora_alpha": 16,
14
+ "lora_dropout": 0.05,
15
+ "megatron_config": null,
16
+ "megatron_core": "megatron.core",
17
+ "modules_to_save": null,
18
+ "peft_type": "LORA",
19
+ "r": 8,
20
+ "rank_pattern": {},
21
+ "revision": null,
22
+ "target_modules": [
23
+ "q_proj",
24
+ "v_proj"
25
+ ],
26
+ "task_type": "CAUSAL_LM",
27
+ "use_dora": false,
28
+ "use_rslora": false
29
+ }
adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c46e2d06e5ebe26fb7ed111074d57feffd676b6edaef0e629b879a193b9c7b4c
3
+ size 6437384
all_results.json ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 6.005556583404541,
4
+ "eval_runtime": 0.2485,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 40.245,
7
+ "eval_steps_per_second": 4.025,
8
+ "total_flos": 2.1344245100262195e+17,
9
+ "train_loss": 14.839824295043945,
10
+ "train_runtime": 171.5695,
11
+ "train_samples": 3959,
12
+ "train_samples_per_second": 24.655,
13
+ "train_steps_per_second": 0.408
14
+ }
config.json ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/gemma-7b",
3
+ "architectures": [
4
+ "GemmaForCausalLM"
5
+ ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "bos_token_id": 2,
9
+ "eos_token_id": 1,
10
+ "head_dim": 256,
11
+ "hidden_act": "gelu",
12
+ "hidden_activation": null,
13
+ "hidden_size": 3072,
14
+ "initializer_range": 0.02,
15
+ "intermediate_size": 24576,
16
+ "max_position_embeddings": 8192,
17
+ "model_type": "gemma",
18
+ "num_attention_heads": 16,
19
+ "num_hidden_layers": 28,
20
+ "num_key_value_heads": 16,
21
+ "pad_token_id": 0,
22
+ "quantization_config": {
23
+ "_load_in_4bit": true,
24
+ "_load_in_8bit": false,
25
+ "bnb_4bit_compute_dtype": "bfloat16",
26
+ "bnb_4bit_quant_storage": "uint8",
27
+ "bnb_4bit_quant_type": "nf4",
28
+ "bnb_4bit_use_double_quant": false,
29
+ "llm_int8_enable_fp32_cpu_offload": false,
30
+ "llm_int8_has_fp16_weight": false,
31
+ "llm_int8_skip_modules": null,
32
+ "llm_int8_threshold": 6.0,
33
+ "load_in_4bit": true,
34
+ "load_in_8bit": false,
35
+ "quant_method": "bitsandbytes"
36
+ },
37
+ "rms_norm_eps": 1e-06,
38
+ "rope_scaling": null,
39
+ "rope_theta": 10000.0,
40
+ "torch_dtype": "bfloat16",
41
+ "transformers_version": "4.40.0",
42
+ "use_cache": true,
43
+ "vocab_size": 256000
44
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_loss": 6.005556583404541,
4
+ "eval_runtime": 0.2485,
5
+ "eval_samples": 25,
6
+ "eval_samples_per_second": 40.245,
7
+ "eval_steps_per_second": 4.025
8
+ }
runs/Jun13_09-53-41_gpu1-1/events.out.tfevents.1718243958.gpu1-1.503140.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bec456fd01d1b6f05d7ce98d55e4f1a78d9a5c084d7d7915de30768b7bbde33b
3
+ size 11596
runs/Jun13_09-53-41_gpu1-1/events.out.tfevents.1718244130.gpu1-1.503140.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b0d217555f80c57c81fb0ed3f5e66fc00f9b912baca47c9b289ef1e174bebeaf
3
+ size 354
special_tokens_map.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<bos>",
8
+ "lstrip": false,
9
+ "normalized": false,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<pad>",
22
+ "lstrip": false,
23
+ "normalized": false,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<unk>",
29
+ "lstrip": false,
30
+ "normalized": false,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
+ }
tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:322a5f52ab5cab196761ab397a022d6fa3a2e1418585e532bb6efb2fedd2ae94
3
+ size 17477501
tokenizer_config.json ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": false,
3
+ "add_eos_token": false,
4
+ "added_tokens_decoder": {
5
+ "0": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": false,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "1": {
14
+ "content": "<eos>",
15
+ "lstrip": false,
16
+ "normalized": false,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ },
21
+ "2": {
22
+ "content": "<bos>",
23
+ "lstrip": false,
24
+ "normalized": false,
25
+ "rstrip": false,
26
+ "single_word": false,
27
+ "special": true
28
+ },
29
+ "3": {
30
+ "content": "<unk>",
31
+ "lstrip": false,
32
+ "normalized": false,
33
+ "rstrip": false,
34
+ "single_word": false,
35
+ "special": true
36
+ },
37
+ "106": {
38
+ "content": "<|im_start|>",
39
+ "lstrip": false,
40
+ "normalized": false,
41
+ "rstrip": false,
42
+ "single_word": false,
43
+ "special": true
44
+ },
45
+ "107": {
46
+ "content": "<|im_end|>",
47
+ "lstrip": false,
48
+ "normalized": false,
49
+ "rstrip": false,
50
+ "single_word": false,
51
+ "special": true
52
+ }
53
+ },
54
+ "additional_special_tokens": [
55
+ "<|im_start|>",
56
+ "<|im_end|>"
57
+ ],
58
+ "bos_token": "<bos>",
59
+ "chat_template": "{% if messages[0]['role'] == 'user' or messages[0]['role'] == 'system' %}{{ bos_token }}{% endif %}{% for message in messages %}{{ '<|im_start|>' + message['role'] + '\n' + message['content'] + '<|im_end|>' + '\n' }}{% endfor %}{% if add_generation_prompt %}{{ '<|im_start|>assistant\n' }}{% elif messages[-1]['role'] == 'assistant' %}{{ eos_token }}{% endif %}",
60
+ "clean_up_tokenization_spaces": false,
61
+ "eos_token": "<eos>",
62
+ "legacy": null,
63
+ "model_max_length": 2048,
64
+ "pad_token": "<pad>",
65
+ "sp_model_kwargs": {},
66
+ "spaces_between_special_tokens": false,
67
+ "tokenizer_class": "GemmaTokenizer",
68
+ "unk_token": "<unk>",
69
+ "use_default_system_prompt": false
70
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "total_flos": 2.1344245100262195e+17,
4
+ "train_loss": 14.839824295043945,
5
+ "train_runtime": 171.5695,
6
+ "train_samples": 3959,
7
+ "train_samples_per_second": 24.655,
8
+ "train_steps_per_second": 0.408
9
+ }
trainer_state.json ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 70,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.14285714285714285,
13
+ "grad_norm": 197.0,
14
+ "learning_rate": 2.857142857142857e-05,
15
+ "loss": 51.1707,
16
+ "step": 1
17
+ },
18
+ {
19
+ "epoch": 0.7142857142857143,
20
+ "grad_norm": 104.0,
21
+ "learning_rate": 0.00014285714285714287,
22
+ "loss": 45.0263,
23
+ "step": 5
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_loss": 13.753079414367676,
28
+ "eval_runtime": 0.2534,
29
+ "eval_samples_per_second": 39.465,
30
+ "eval_steps_per_second": 3.946,
31
+ "step": 7
32
+ },
33
+ {
34
+ "epoch": 1.4285714285714286,
35
+ "grad_norm": 20.25,
36
+ "learning_rate": 0.00019888308262251285,
37
+ "loss": 26.2686,
38
+ "step": 10
39
+ },
40
+ {
41
+ "epoch": 2.0,
42
+ "eval_loss": 8.776823997497559,
43
+ "eval_runtime": 0.2365,
44
+ "eval_samples_per_second": 42.288,
45
+ "eval_steps_per_second": 4.229,
46
+ "step": 14
47
+ },
48
+ {
49
+ "epoch": 2.142857142857143,
50
+ "grad_norm": 4.875,
51
+ "learning_rate": 0.00019214762118704076,
52
+ "loss": 20.9714,
53
+ "step": 15
54
+ },
55
+ {
56
+ "epoch": 2.857142857142857,
57
+ "grad_norm": 5.0,
58
+ "learning_rate": 0.00017971325072229226,
59
+ "loss": 19.4522,
60
+ "step": 20
61
+ },
62
+ {
63
+ "epoch": 3.0,
64
+ "eval_loss": 7.711602210998535,
65
+ "eval_runtime": 0.2355,
66
+ "eval_samples_per_second": 42.47,
67
+ "eval_steps_per_second": 4.247,
68
+ "step": 21
69
+ },
70
+ {
71
+ "epoch": 3.571428571428571,
72
+ "grad_norm": 8.5625,
73
+ "learning_rate": 0.00016234898018587337,
74
+ "loss": 17.9837,
75
+ "step": 25
76
+ },
77
+ {
78
+ "epoch": 4.0,
79
+ "eval_loss": 7.1805419921875,
80
+ "eval_runtime": 0.2358,
81
+ "eval_samples_per_second": 42.415,
82
+ "eval_steps_per_second": 4.242,
83
+ "step": 28
84
+ },
85
+ {
86
+ "epoch": 4.285714285714286,
87
+ "grad_norm": 13.75,
88
+ "learning_rate": 0.00014112871031306119,
89
+ "loss": 16.9838,
90
+ "step": 30
91
+ },
92
+ {
93
+ "epoch": 5.0,
94
+ "grad_norm": 18.375,
95
+ "learning_rate": 0.00011736481776669306,
96
+ "loss": 14.7187,
97
+ "step": 35
98
+ },
99
+ {
100
+ "epoch": 5.0,
101
+ "eval_loss": 7.078407287597656,
102
+ "eval_runtime": 0.2345,
103
+ "eval_samples_per_second": 42.639,
104
+ "eval_steps_per_second": 4.264,
105
+ "step": 35
106
+ },
107
+ {
108
+ "epoch": 5.714285714285714,
109
+ "grad_norm": 23.25,
110
+ "learning_rate": 9.252699064135758e-05,
111
+ "loss": 12.1795,
112
+ "step": 40
113
+ },
114
+ {
115
+ "epoch": 6.0,
116
+ "eval_loss": 7.036722660064697,
117
+ "eval_runtime": 0.2361,
118
+ "eval_samples_per_second": 42.352,
119
+ "eval_steps_per_second": 4.235,
120
+ "step": 42
121
+ },
122
+ {
123
+ "epoch": 6.428571428571429,
124
+ "grad_norm": 27.125,
125
+ "learning_rate": 6.815133497483157e-05,
126
+ "loss": 9.1407,
127
+ "step": 45
128
+ },
129
+ {
130
+ "epoch": 7.0,
131
+ "eval_loss": 6.5706281661987305,
132
+ "eval_runtime": 0.236,
133
+ "eval_samples_per_second": 42.371,
134
+ "eval_steps_per_second": 4.237,
135
+ "step": 49
136
+ },
137
+ {
138
+ "epoch": 7.142857142857143,
139
+ "grad_norm": 26.25,
140
+ "learning_rate": 4.574537361342407e-05,
141
+ "loss": 6.7417,
142
+ "step": 50
143
+ },
144
+ {
145
+ "epoch": 7.857142857142857,
146
+ "grad_norm": 22.25,
147
+ "learning_rate": 2.669481281701739e-05,
148
+ "loss": 5.133,
149
+ "step": 55
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_loss": 6.185364723205566,
154
+ "eval_runtime": 0.2416,
155
+ "eval_samples_per_second": 41.387,
156
+ "eval_steps_per_second": 4.139,
157
+ "step": 56
158
+ },
159
+ {
160
+ "epoch": 8.571428571428571,
161
+ "grad_norm": 22.125,
162
+ "learning_rate": 1.2177842662977135e-05,
163
+ "loss": 4.2385,
164
+ "step": 60
165
+ },
166
+ {
167
+ "epoch": 9.0,
168
+ "eval_loss": 6.027551174163818,
169
+ "eval_runtime": 0.2379,
170
+ "eval_samples_per_second": 42.042,
171
+ "eval_steps_per_second": 4.204,
172
+ "step": 63
173
+ },
174
+ {
175
+ "epoch": 9.285714285714286,
176
+ "grad_norm": 20.5,
177
+ "learning_rate": 3.092271377092215e-06,
178
+ "loss": 3.8744,
179
+ "step": 65
180
+ },
181
+ {
182
+ "epoch": 10.0,
183
+ "grad_norm": 20.25,
184
+ "learning_rate": 0.0,
185
+ "loss": 3.8163,
186
+ "step": 70
187
+ },
188
+ {
189
+ "epoch": 10.0,
190
+ "eval_loss": 6.005556583404541,
191
+ "eval_runtime": 0.2357,
192
+ "eval_samples_per_second": 42.424,
193
+ "eval_steps_per_second": 4.242,
194
+ "step": 70
195
+ },
196
+ {
197
+ "epoch": 10.0,
198
+ "step": 70,
199
+ "total_flos": 2.1344245100262195e+17,
200
+ "train_loss": 14.839824295043945,
201
+ "train_runtime": 171.5695,
202
+ "train_samples_per_second": 24.655,
203
+ "train_steps_per_second": 0.408
204
+ }
205
+ ],
206
+ "logging_steps": 5,
207
+ "max_steps": 70,
208
+ "num_input_tokens_seen": 0,
209
+ "num_train_epochs": 10,
210
+ "save_steps": 100,
211
+ "total_flos": 2.1344245100262195e+17,
212
+ "train_batch_size": 4,
213
+ "trial_name": null,
214
+ "trial_params": null
215
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8262b4c9a9a83928ab76d05376562941a57a2f4de1fb8fb1c8fc7b8fa52d4040
3
+ size 5176