Upload folder using huggingface_hub
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- adapter_config.json +34 -0
- adapter_model.safetensors +3 -0
- checkpoint-100/adapter_config.json +34 -0
- checkpoint-100/adapter_model.safetensors +3 -0
- checkpoint-100/generation_config.json +6 -0
- checkpoint-100/optimizer.pt +3 -0
- checkpoint-100/rng_state.pth +3 -0
- checkpoint-100/scheduler.pt +3 -0
- checkpoint-100/special_tokens_map.json +35 -0
- checkpoint-100/tokenizer.json +0 -0
- checkpoint-100/tokenizer.model +3 -0
- checkpoint-100/tokenizer_config.json +46 -0
- checkpoint-100/trainer_state.json +161 -0
- checkpoint-100/training_args.bin +3 -0
- checkpoint-20/adapter_config.json +34 -0
- checkpoint-20/adapter_model.safetensors +3 -0
- checkpoint-20/generation_config.json +6 -0
- checkpoint-20/optimizer.pt +3 -0
- checkpoint-20/rng_state.pth +3 -0
- checkpoint-20/scheduler.pt +3 -0
- checkpoint-20/special_tokens_map.json +35 -0
- checkpoint-20/tokenizer.json +0 -0
- checkpoint-20/tokenizer.model +3 -0
- checkpoint-20/tokenizer_config.json +46 -0
- checkpoint-20/trainer_state.json +49 -0
- checkpoint-20/training_args.bin +3 -0
- checkpoint-40/adapter_config.json +34 -0
- checkpoint-40/adapter_model.safetensors +3 -0
- checkpoint-40/generation_config.json +6 -0
- checkpoint-40/optimizer.pt +3 -0
- checkpoint-40/rng_state.pth +3 -0
- checkpoint-40/scheduler.pt +3 -0
- checkpoint-40/special_tokens_map.json +35 -0
- checkpoint-40/tokenizer.json +0 -0
- checkpoint-40/tokenizer.model +3 -0
- checkpoint-40/tokenizer_config.json +46 -0
- checkpoint-40/trainer_state.json +77 -0
- checkpoint-40/training_args.bin +3 -0
- checkpoint-60/adapter_config.json +34 -0
- checkpoint-60/adapter_model.safetensors +3 -0
- checkpoint-60/generation_config.json +6 -0
- checkpoint-60/optimizer.pt +3 -0
- checkpoint-60/rng_state.pth +3 -0
- checkpoint-60/scheduler.pt +3 -0
- checkpoint-60/special_tokens_map.json +35 -0
- checkpoint-60/tokenizer.json +0 -0
- checkpoint-60/tokenizer.model +3 -0
- checkpoint-60/tokenizer_config.json +46 -0
- checkpoint-60/trainer_state.json +105 -0
- checkpoint-60/training_args.bin +3 -0
adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Rakuten/RakutenAI-7B-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": false,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj",
|
26 |
+
"down_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"up_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a53bcbc68dec82e2ce8cf2d2d439a8dbf38126c02cc88a567925a6781f6a782f
|
3 |
+
size 167832240
|
checkpoint-100/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Rakuten/RakutenAI-7B-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": false,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj",
|
26 |
+
"down_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"up_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
checkpoint-100/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a53bcbc68dec82e2ce8cf2d2d439a8dbf38126c02cc88a567925a6781f6a782f
|
3 |
+
size 167832240
|
checkpoint-100/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.1"
|
6 |
+
}
|
checkpoint-100/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9c62536abee8b78adb6147453bd71c877d7d2b4d5a8f8d034d44a921275e11ab
|
3 |
+
size 335810482
|
checkpoint-100/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:808521b8055ae579535313bc1b5f324216971386eaeef0a87693a238c17a92b3
|
3 |
+
size 14168
|
checkpoint-100/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:76b4fa9c11af5ffcd754861813fd859eeac70d14b38222bd674da5b3ddfefcfe
|
3 |
+
size 1056
|
checkpoint-100/special_tokens_map.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<unk>",
|
4 |
+
"<s>",
|
5 |
+
"</s>"
|
6 |
+
],
|
7 |
+
"bos_token": {
|
8 |
+
"content": "<s>",
|
9 |
+
"lstrip": false,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"eos_token": {
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"pad_token": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false
|
27 |
+
},
|
28 |
+
"unk_token": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false
|
34 |
+
}
|
35 |
+
}
|
checkpoint-100/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-100/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7978987401ef447724ded0544d048831954b7517a96555ce1593149e3678b6dc
|
3 |
+
size 755169
|
checkpoint-100/tokenizer_config.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [
|
31 |
+
"<unk>",
|
32 |
+
"<s>",
|
33 |
+
"</s>"
|
34 |
+
],
|
35 |
+
"bos_token": "<s>",
|
36 |
+
"clean_up_tokenization_spaces": false,
|
37 |
+
"eos_token": "</s>",
|
38 |
+
"legacy": true,
|
39 |
+
"model_max_length": 1000000000000000019884624838656,
|
40 |
+
"pad_token": "</s>",
|
41 |
+
"sp_model_kwargs": {},
|
42 |
+
"spaces_between_special_tokens": false,
|
43 |
+
"tokenizer_class": "LlamaTokenizer",
|
44 |
+
"unk_token": "<unk>",
|
45 |
+
"use_default_system_prompt": true
|
46 |
+
}
|
checkpoint-100/trainer_state.json
ADDED
@@ -0,0 +1,161 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 4.081632653061225,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 100,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 5.041278839111328,
|
14 |
+
"learning_rate": 2.9968542393565676e-06,
|
15 |
+
"loss": 1.9371,
|
16 |
+
"step": 5
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.41,
|
20 |
+
"grad_norm": NaN,
|
21 |
+
"learning_rate": 2.9717672653473587e-06,
|
22 |
+
"loss": 1.806,
|
23 |
+
"step": 10
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.61,
|
27 |
+
"grad_norm": 3.5015058517456055,
|
28 |
+
"learning_rate": 2.905810057509516e-06,
|
29 |
+
"loss": 1.7373,
|
30 |
+
"step": 15
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.82,
|
34 |
+
"grad_norm": 3.583218574523926,
|
35 |
+
"learning_rate": 2.803067604777227e-06,
|
36 |
+
"loss": 1.7471,
|
37 |
+
"step": 20
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.02,
|
41 |
+
"grad_norm": 4.409696102142334,
|
42 |
+
"learning_rate": 2.666228326019474e-06,
|
43 |
+
"loss": 1.6622,
|
44 |
+
"step": 25
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.22,
|
48 |
+
"grad_norm": 3.5655429363250732,
|
49 |
+
"learning_rate": 2.498872837517522e-06,
|
50 |
+
"loss": 1.6483,
|
51 |
+
"step": 30
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.43,
|
55 |
+
"grad_norm": 4.161169528961182,
|
56 |
+
"learning_rate": 2.305380260473476e-06,
|
57 |
+
"loss": 1.674,
|
58 |
+
"step": 35
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.63,
|
62 |
+
"grad_norm": 3.5801784992218018,
|
63 |
+
"learning_rate": 2.090813634373931e-06,
|
64 |
+
"loss": 1.6635,
|
65 |
+
"step": 40
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 1.84,
|
69 |
+
"grad_norm": 3.7053236961364746,
|
70 |
+
"learning_rate": 1.8607874345493807e-06,
|
71 |
+
"loss": 1.6857,
|
72 |
+
"step": 45
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 2.04,
|
76 |
+
"grad_norm": 3.145810127258301,
|
77 |
+
"learning_rate": 1.6213206605421064e-06,
|
78 |
+
"loss": 1.6165,
|
79 |
+
"step": 50
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 2.24,
|
83 |
+
"grad_norm": 3.514664649963379,
|
84 |
+
"learning_rate": 1.3786793394578939e-06,
|
85 |
+
"loss": 1.6025,
|
86 |
+
"step": 55
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 2.45,
|
90 |
+
"grad_norm": 3.9232802391052246,
|
91 |
+
"learning_rate": 1.13921256545062e-06,
|
92 |
+
"loss": 1.5615,
|
93 |
+
"step": 60
|
94 |
+
},
|
95 |
+
{
|
96 |
+
"epoch": 2.65,
|
97 |
+
"grad_norm": 3.0508198738098145,
|
98 |
+
"learning_rate": 9.091863656260696e-07,
|
99 |
+
"loss": 1.6076,
|
100 |
+
"step": 65
|
101 |
+
},
|
102 |
+
{
|
103 |
+
"epoch": 2.86,
|
104 |
+
"grad_norm": 3.7754147052764893,
|
105 |
+
"learning_rate": 6.946197395265243e-07,
|
106 |
+
"loss": 1.664,
|
107 |
+
"step": 70
|
108 |
+
},
|
109 |
+
{
|
110 |
+
"epoch": 3.06,
|
111 |
+
"grad_norm": 3.5631840229034424,
|
112 |
+
"learning_rate": 5.011271624824787e-07,
|
113 |
+
"loss": 1.5976,
|
114 |
+
"step": 75
|
115 |
+
},
|
116 |
+
{
|
117 |
+
"epoch": 3.27,
|
118 |
+
"grad_norm": 3.4716637134552,
|
119 |
+
"learning_rate": 3.337716739805264e-07,
|
120 |
+
"loss": 1.5761,
|
121 |
+
"step": 80
|
122 |
+
},
|
123 |
+
{
|
124 |
+
"epoch": 3.47,
|
125 |
+
"grad_norm": 3.624776601791382,
|
126 |
+
"learning_rate": 1.9693239522277327e-07,
|
127 |
+
"loss": 1.5924,
|
128 |
+
"step": 85
|
129 |
+
},
|
130 |
+
{
|
131 |
+
"epoch": 3.67,
|
132 |
+
"grad_norm": 3.4213175773620605,
|
133 |
+
"learning_rate": 9.418994249048474e-08,
|
134 |
+
"loss": 1.6305,
|
135 |
+
"step": 90
|
136 |
+
},
|
137 |
+
{
|
138 |
+
"epoch": 3.88,
|
139 |
+
"grad_norm": 3.8403515815734863,
|
140 |
+
"learning_rate": 2.8232734652641424e-08,
|
141 |
+
"loss": 1.6219,
|
142 |
+
"step": 95
|
143 |
+
},
|
144 |
+
{
|
145 |
+
"epoch": 4.08,
|
146 |
+
"grad_norm": 3.393942356109619,
|
147 |
+
"learning_rate": 7.866464317276001e-10,
|
148 |
+
"loss": 1.5561,
|
149 |
+
"step": 100
|
150 |
+
}
|
151 |
+
],
|
152 |
+
"logging_steps": 5,
|
153 |
+
"max_steps": 100,
|
154 |
+
"num_input_tokens_seen": 0,
|
155 |
+
"num_train_epochs": 5,
|
156 |
+
"save_steps": 20,
|
157 |
+
"total_flos": 8673113877921792.0,
|
158 |
+
"train_batch_size": 8,
|
159 |
+
"trial_name": null,
|
160 |
+
"trial_params": null
|
161 |
+
}
|
checkpoint-100/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcd55fa32c983512f289bcc506b75cd6687379a244a95f246ddb3cda8a97ea11
|
3 |
+
size 4960
|
checkpoint-20/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Rakuten/RakutenAI-7B-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": false,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj",
|
26 |
+
"down_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"up_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
checkpoint-20/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:81169049628740146f07ac7e1f2c59f6475467aa39b48ea79092281b1cf4f31f
|
3 |
+
size 167832240
|
checkpoint-20/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.1"
|
6 |
+
}
|
checkpoint-20/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b5c5a5f5f27d9ba6aebc06e9fb489f05042a6881eeeddbd130172ad2723e6c5
|
3 |
+
size 335810482
|
checkpoint-20/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c38429496778665cedc2e268e56dc0476144498310916d1f0cfff08c093b6b5c
|
3 |
+
size 14168
|
checkpoint-20/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ce642ef0996d1e3d6618dd62493c8bbb08bd03811c25a654671f6952e68b2cd2
|
3 |
+
size 1056
|
checkpoint-20/special_tokens_map.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<unk>",
|
4 |
+
"<s>",
|
5 |
+
"</s>"
|
6 |
+
],
|
7 |
+
"bos_token": {
|
8 |
+
"content": "<s>",
|
9 |
+
"lstrip": false,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"eos_token": {
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"pad_token": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false
|
27 |
+
},
|
28 |
+
"unk_token": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false
|
34 |
+
}
|
35 |
+
}
|
checkpoint-20/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-20/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7978987401ef447724ded0544d048831954b7517a96555ce1593149e3678b6dc
|
3 |
+
size 755169
|
checkpoint-20/tokenizer_config.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [
|
31 |
+
"<unk>",
|
32 |
+
"<s>",
|
33 |
+
"</s>"
|
34 |
+
],
|
35 |
+
"bos_token": "<s>",
|
36 |
+
"clean_up_tokenization_spaces": false,
|
37 |
+
"eos_token": "</s>",
|
38 |
+
"legacy": true,
|
39 |
+
"model_max_length": 1000000000000000019884624838656,
|
40 |
+
"pad_token": "</s>",
|
41 |
+
"sp_model_kwargs": {},
|
42 |
+
"spaces_between_special_tokens": false,
|
43 |
+
"tokenizer_class": "LlamaTokenizer",
|
44 |
+
"unk_token": "<unk>",
|
45 |
+
"use_default_system_prompt": true
|
46 |
+
}
|
checkpoint-20/trainer_state.json
ADDED
@@ -0,0 +1,49 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 0.8163265306122449,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 20,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 5.041278839111328,
|
14 |
+
"learning_rate": 2.9968542393565676e-06,
|
15 |
+
"loss": 1.9371,
|
16 |
+
"step": 5
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.41,
|
20 |
+
"grad_norm": NaN,
|
21 |
+
"learning_rate": 2.9717672653473587e-06,
|
22 |
+
"loss": 1.806,
|
23 |
+
"step": 10
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.61,
|
27 |
+
"grad_norm": 3.5015058517456055,
|
28 |
+
"learning_rate": 2.905810057509516e-06,
|
29 |
+
"loss": 1.7373,
|
30 |
+
"step": 15
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.82,
|
34 |
+
"grad_norm": 3.583218574523926,
|
35 |
+
"learning_rate": 2.803067604777227e-06,
|
36 |
+
"loss": 1.7471,
|
37 |
+
"step": 20
|
38 |
+
}
|
39 |
+
],
|
40 |
+
"logging_steps": 5,
|
41 |
+
"max_steps": 100,
|
42 |
+
"num_input_tokens_seen": 0,
|
43 |
+
"num_train_epochs": 5,
|
44 |
+
"save_steps": 20,
|
45 |
+
"total_flos": 1729235156533248.0,
|
46 |
+
"train_batch_size": 8,
|
47 |
+
"trial_name": null,
|
48 |
+
"trial_params": null
|
49 |
+
}
|
checkpoint-20/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcd55fa32c983512f289bcc506b75cd6687379a244a95f246ddb3cda8a97ea11
|
3 |
+
size 4960
|
checkpoint-40/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Rakuten/RakutenAI-7B-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": false,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj",
|
26 |
+
"down_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"up_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
checkpoint-40/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:201e57d1be1961bebbcd1ff8eda8c6677e4a69421937df43b308c971c3108aea
|
3 |
+
size 167832240
|
checkpoint-40/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.1"
|
6 |
+
}
|
checkpoint-40/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:079e8a89a0226983e6e8ae22966abf6aed4e5ebf6d41cb8dab776e531a5362f7
|
3 |
+
size 335810482
|
checkpoint-40/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e0230bfed5d2d4ba948095836587d23305d972aac689f5cdcf2fefea079cce46
|
3 |
+
size 14168
|
checkpoint-40/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:701d4e651a2909e8f790218a3f92bf4c4a7e6e82039f18b093a26a63dfa65d6a
|
3 |
+
size 1056
|
checkpoint-40/special_tokens_map.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<unk>",
|
4 |
+
"<s>",
|
5 |
+
"</s>"
|
6 |
+
],
|
7 |
+
"bos_token": {
|
8 |
+
"content": "<s>",
|
9 |
+
"lstrip": false,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"eos_token": {
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"pad_token": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false
|
27 |
+
},
|
28 |
+
"unk_token": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false
|
34 |
+
}
|
35 |
+
}
|
checkpoint-40/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-40/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7978987401ef447724ded0544d048831954b7517a96555ce1593149e3678b6dc
|
3 |
+
size 755169
|
checkpoint-40/tokenizer_config.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [
|
31 |
+
"<unk>",
|
32 |
+
"<s>",
|
33 |
+
"</s>"
|
34 |
+
],
|
35 |
+
"bos_token": "<s>",
|
36 |
+
"clean_up_tokenization_spaces": false,
|
37 |
+
"eos_token": "</s>",
|
38 |
+
"legacy": true,
|
39 |
+
"model_max_length": 1000000000000000019884624838656,
|
40 |
+
"pad_token": "</s>",
|
41 |
+
"sp_model_kwargs": {},
|
42 |
+
"spaces_between_special_tokens": false,
|
43 |
+
"tokenizer_class": "LlamaTokenizer",
|
44 |
+
"unk_token": "<unk>",
|
45 |
+
"use_default_system_prompt": true
|
46 |
+
}
|
checkpoint-40/trainer_state.json
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 1.6326530612244898,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 40,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 5.041278839111328,
|
14 |
+
"learning_rate": 2.9968542393565676e-06,
|
15 |
+
"loss": 1.9371,
|
16 |
+
"step": 5
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.41,
|
20 |
+
"grad_norm": NaN,
|
21 |
+
"learning_rate": 2.9717672653473587e-06,
|
22 |
+
"loss": 1.806,
|
23 |
+
"step": 10
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.61,
|
27 |
+
"grad_norm": 3.5015058517456055,
|
28 |
+
"learning_rate": 2.905810057509516e-06,
|
29 |
+
"loss": 1.7373,
|
30 |
+
"step": 15
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.82,
|
34 |
+
"grad_norm": 3.583218574523926,
|
35 |
+
"learning_rate": 2.803067604777227e-06,
|
36 |
+
"loss": 1.7471,
|
37 |
+
"step": 20
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.02,
|
41 |
+
"grad_norm": 4.409696102142334,
|
42 |
+
"learning_rate": 2.666228326019474e-06,
|
43 |
+
"loss": 1.6622,
|
44 |
+
"step": 25
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.22,
|
48 |
+
"grad_norm": 3.5655429363250732,
|
49 |
+
"learning_rate": 2.498872837517522e-06,
|
50 |
+
"loss": 1.6483,
|
51 |
+
"step": 30
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.43,
|
55 |
+
"grad_norm": 4.161169528961182,
|
56 |
+
"learning_rate": 2.305380260473476e-06,
|
57 |
+
"loss": 1.674,
|
58 |
+
"step": 35
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.63,
|
62 |
+
"grad_norm": 3.5801784992218018,
|
63 |
+
"learning_rate": 2.090813634373931e-06,
|
64 |
+
"loss": 1.6635,
|
65 |
+
"step": 40
|
66 |
+
}
|
67 |
+
],
|
68 |
+
"logging_steps": 5,
|
69 |
+
"max_steps": 100,
|
70 |
+
"num_input_tokens_seen": 0,
|
71 |
+
"num_train_epochs": 5,
|
72 |
+
"save_steps": 20,
|
73 |
+
"total_flos": 3471636198850560.0,
|
74 |
+
"train_batch_size": 8,
|
75 |
+
"trial_name": null,
|
76 |
+
"trial_params": null
|
77 |
+
}
|
checkpoint-40/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcd55fa32c983512f289bcc506b75cd6687379a244a95f246ddb3cda8a97ea11
|
3 |
+
size 4960
|
checkpoint-60/adapter_config.json
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"alpha_pattern": {},
|
3 |
+
"auto_mapping": null,
|
4 |
+
"base_model_name_or_path": "Rakuten/RakutenAI-7B-instruct",
|
5 |
+
"bias": "none",
|
6 |
+
"fan_in_fan_out": false,
|
7 |
+
"inference_mode": false,
|
8 |
+
"init_lora_weights": true,
|
9 |
+
"layer_replication": null,
|
10 |
+
"layers_pattern": null,
|
11 |
+
"layers_to_transform": null,
|
12 |
+
"loftq_config": {},
|
13 |
+
"lora_alpha": 32,
|
14 |
+
"lora_dropout": 0.05,
|
15 |
+
"megatron_config": null,
|
16 |
+
"megatron_core": "megatron.core",
|
17 |
+
"modules_to_save": null,
|
18 |
+
"peft_type": "LORA",
|
19 |
+
"r": 16,
|
20 |
+
"rank_pattern": {},
|
21 |
+
"revision": null,
|
22 |
+
"target_modules": [
|
23 |
+
"v_proj",
|
24 |
+
"q_proj",
|
25 |
+
"gate_proj",
|
26 |
+
"down_proj",
|
27 |
+
"o_proj",
|
28 |
+
"k_proj",
|
29 |
+
"up_proj"
|
30 |
+
],
|
31 |
+
"task_type": "CAUSAL_LM",
|
32 |
+
"use_dora": false,
|
33 |
+
"use_rslora": false
|
34 |
+
}
|
checkpoint-60/adapter_model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:ffdddd99b7e2dac2fbe6f1527394f3bd35c8f4cec7f824d6a680b150eab29752
|
3 |
+
size 167832240
|
checkpoint-60/generation_config.json
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_from_model_config": true,
|
3 |
+
"bos_token_id": 1,
|
4 |
+
"eos_token_id": 2,
|
5 |
+
"transformers_version": "4.39.1"
|
6 |
+
}
|
checkpoint-60/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a2ca205f6859d421a936cd43599ebb3a9e6b4746a4617aec3eb1e4451973a6ab
|
3 |
+
size 335810482
|
checkpoint-60/rng_state.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e6a26fff3a3d4ce3ae0cf2b12423f9b87839511db578ab2145a0af9abb15ae98
|
3 |
+
size 14168
|
checkpoint-60/scheduler.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:66ff6bbcef17af5c104327b1fe8909f5fc08bf691eb358ca5fd99210a287b128
|
3 |
+
size 1056
|
checkpoint-60/special_tokens_map.json
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"additional_special_tokens": [
|
3 |
+
"<unk>",
|
4 |
+
"<s>",
|
5 |
+
"</s>"
|
6 |
+
],
|
7 |
+
"bos_token": {
|
8 |
+
"content": "<s>",
|
9 |
+
"lstrip": false,
|
10 |
+
"normalized": true,
|
11 |
+
"rstrip": false,
|
12 |
+
"single_word": false
|
13 |
+
},
|
14 |
+
"eos_token": {
|
15 |
+
"content": "</s>",
|
16 |
+
"lstrip": false,
|
17 |
+
"normalized": true,
|
18 |
+
"rstrip": false,
|
19 |
+
"single_word": false
|
20 |
+
},
|
21 |
+
"pad_token": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false
|
27 |
+
},
|
28 |
+
"unk_token": {
|
29 |
+
"content": "<unk>",
|
30 |
+
"lstrip": false,
|
31 |
+
"normalized": true,
|
32 |
+
"rstrip": false,
|
33 |
+
"single_word": false
|
34 |
+
}
|
35 |
+
}
|
checkpoint-60/tokenizer.json
ADDED
The diff for this file is too large to render.
See raw diff
|
|
checkpoint-60/tokenizer.model
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7978987401ef447724ded0544d048831954b7517a96555ce1593149e3678b6dc
|
3 |
+
size 755169
|
checkpoint-60/tokenizer_config.json
ADDED
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"add_bos_token": true,
|
3 |
+
"add_eos_token": false,
|
4 |
+
"added_tokens_decoder": {
|
5 |
+
"0": {
|
6 |
+
"content": "<unk>",
|
7 |
+
"lstrip": false,
|
8 |
+
"normalized": true,
|
9 |
+
"rstrip": false,
|
10 |
+
"single_word": false,
|
11 |
+
"special": true
|
12 |
+
},
|
13 |
+
"1": {
|
14 |
+
"content": "<s>",
|
15 |
+
"lstrip": false,
|
16 |
+
"normalized": true,
|
17 |
+
"rstrip": false,
|
18 |
+
"single_word": false,
|
19 |
+
"special": true
|
20 |
+
},
|
21 |
+
"2": {
|
22 |
+
"content": "</s>",
|
23 |
+
"lstrip": false,
|
24 |
+
"normalized": true,
|
25 |
+
"rstrip": false,
|
26 |
+
"single_word": false,
|
27 |
+
"special": true
|
28 |
+
}
|
29 |
+
},
|
30 |
+
"additional_special_tokens": [
|
31 |
+
"<unk>",
|
32 |
+
"<s>",
|
33 |
+
"</s>"
|
34 |
+
],
|
35 |
+
"bos_token": "<s>",
|
36 |
+
"clean_up_tokenization_spaces": false,
|
37 |
+
"eos_token": "</s>",
|
38 |
+
"legacy": true,
|
39 |
+
"model_max_length": 1000000000000000019884624838656,
|
40 |
+
"pad_token": "</s>",
|
41 |
+
"sp_model_kwargs": {},
|
42 |
+
"spaces_between_special_tokens": false,
|
43 |
+
"tokenizer_class": "LlamaTokenizer",
|
44 |
+
"unk_token": "<unk>",
|
45 |
+
"use_default_system_prompt": true
|
46 |
+
}
|
checkpoint-60/trainer_state.json
ADDED
@@ -0,0 +1,105 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"best_metric": null,
|
3 |
+
"best_model_checkpoint": null,
|
4 |
+
"epoch": 2.4489795918367347,
|
5 |
+
"eval_steps": 500,
|
6 |
+
"global_step": 60,
|
7 |
+
"is_hyper_param_search": false,
|
8 |
+
"is_local_process_zero": true,
|
9 |
+
"is_world_process_zero": true,
|
10 |
+
"log_history": [
|
11 |
+
{
|
12 |
+
"epoch": 0.2,
|
13 |
+
"grad_norm": 5.041278839111328,
|
14 |
+
"learning_rate": 2.9968542393565676e-06,
|
15 |
+
"loss": 1.9371,
|
16 |
+
"step": 5
|
17 |
+
},
|
18 |
+
{
|
19 |
+
"epoch": 0.41,
|
20 |
+
"grad_norm": NaN,
|
21 |
+
"learning_rate": 2.9717672653473587e-06,
|
22 |
+
"loss": 1.806,
|
23 |
+
"step": 10
|
24 |
+
},
|
25 |
+
{
|
26 |
+
"epoch": 0.61,
|
27 |
+
"grad_norm": 3.5015058517456055,
|
28 |
+
"learning_rate": 2.905810057509516e-06,
|
29 |
+
"loss": 1.7373,
|
30 |
+
"step": 15
|
31 |
+
},
|
32 |
+
{
|
33 |
+
"epoch": 0.82,
|
34 |
+
"grad_norm": 3.583218574523926,
|
35 |
+
"learning_rate": 2.803067604777227e-06,
|
36 |
+
"loss": 1.7471,
|
37 |
+
"step": 20
|
38 |
+
},
|
39 |
+
{
|
40 |
+
"epoch": 1.02,
|
41 |
+
"grad_norm": 4.409696102142334,
|
42 |
+
"learning_rate": 2.666228326019474e-06,
|
43 |
+
"loss": 1.6622,
|
44 |
+
"step": 25
|
45 |
+
},
|
46 |
+
{
|
47 |
+
"epoch": 1.22,
|
48 |
+
"grad_norm": 3.5655429363250732,
|
49 |
+
"learning_rate": 2.498872837517522e-06,
|
50 |
+
"loss": 1.6483,
|
51 |
+
"step": 30
|
52 |
+
},
|
53 |
+
{
|
54 |
+
"epoch": 1.43,
|
55 |
+
"grad_norm": 4.161169528961182,
|
56 |
+
"learning_rate": 2.305380260473476e-06,
|
57 |
+
"loss": 1.674,
|
58 |
+
"step": 35
|
59 |
+
},
|
60 |
+
{
|
61 |
+
"epoch": 1.63,
|
62 |
+
"grad_norm": 3.5801784992218018,
|
63 |
+
"learning_rate": 2.090813634373931e-06,
|
64 |
+
"loss": 1.6635,
|
65 |
+
"step": 40
|
66 |
+
},
|
67 |
+
{
|
68 |
+
"epoch": 1.84,
|
69 |
+
"grad_norm": 3.7053236961364746,
|
70 |
+
"learning_rate": 1.8607874345493807e-06,
|
71 |
+
"loss": 1.6857,
|
72 |
+
"step": 45
|
73 |
+
},
|
74 |
+
{
|
75 |
+
"epoch": 2.04,
|
76 |
+
"grad_norm": 3.145810127258301,
|
77 |
+
"learning_rate": 1.6213206605421064e-06,
|
78 |
+
"loss": 1.6165,
|
79 |
+
"step": 50
|
80 |
+
},
|
81 |
+
{
|
82 |
+
"epoch": 2.24,
|
83 |
+
"grad_norm": 3.514664649963379,
|
84 |
+
"learning_rate": 1.3786793394578939e-06,
|
85 |
+
"loss": 1.6025,
|
86 |
+
"step": 55
|
87 |
+
},
|
88 |
+
{
|
89 |
+
"epoch": 2.45,
|
90 |
+
"grad_norm": 3.9232802391052246,
|
91 |
+
"learning_rate": 1.13921256545062e-06,
|
92 |
+
"loss": 1.5615,
|
93 |
+
"step": 60
|
94 |
+
}
|
95 |
+
],
|
96 |
+
"logging_steps": 5,
|
97 |
+
"max_steps": 100,
|
98 |
+
"num_input_tokens_seen": 0,
|
99 |
+
"num_train_epochs": 5,
|
100 |
+
"save_steps": 20,
|
101 |
+
"total_flos": 5264188871884800.0,
|
102 |
+
"train_batch_size": 8,
|
103 |
+
"trial_name": null,
|
104 |
+
"trial_params": null
|
105 |
+
}
|
checkpoint-60/training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dcd55fa32c983512f289bcc506b75cd6687379a244a95f246ddb3cda8a97ea11
|
3 |
+
size 4960
|