QuantiPhy commited on
Commit
ef9e5aa
Β·
verified Β·
1 Parent(s): c357e7f

Training in progress, step 1

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "bigcode/starcoderbase-1b",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -20,10 +20,8 @@
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
- "c_attn",
24
- "q_attn",
25
- "c_proj",
26
- "c_fc"
27
  ],
28
  "task_type": "CAUSAL_LM",
29
  "use_dora": false,
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "codellama/CodeLlama-7b-Instruct-hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
20
  "rank_pattern": {},
21
  "revision": null,
22
  "target_modules": [
23
+ "v_proj",
24
+ "q_proj"
 
 
25
  ],
26
  "task_type": "CAUSAL_LM",
27
  "use_dora": false,
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:269086357d92a6a6e0944beb4132fd6a8ae7e5ca1fb627f21c04ec09bdabce55
3
- size 22241240
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:07edbb98873942528576faf05e04385ec47a2aa010b8f0b739a0fbcbe11a7287
3
+ size 16794200
runs/Aug09_17-53-33_d6f7ca875779/events.out.tfevents.1723226042.d6f7ca875779.614.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:74b44f32b5e51b6bb9f20e16386881f104b7551703fca0e1caf53b1a04b443d3
3
+ size 5922
special_tokens_map.json CHANGED
@@ -1,41 +1,27 @@
1
  {
2
  "additional_special_tokens": [
3
- "<|endoftext|>",
4
- "<fim_prefix>",
5
- "<fim_middle>",
6
- "<fim_suffix>",
7
- "<fim_pad>",
8
- "<filename>",
9
- "<gh_stars>",
10
- "<issue_start>",
11
- "<issue_comment>",
12
- "<issue_closed>",
13
- "<jupyter_start>",
14
- "<jupyter_text>",
15
- "<jupyter_code>",
16
- "<jupyter_output>",
17
- "<empty_output>",
18
- "<commit_before>",
19
- "<commit_msg>",
20
- "<commit_after>",
21
- "<reponame>"
22
  ],
23
  "bos_token": {
24
- "content": "<|endoftext|>",
25
  "lstrip": false,
26
  "normalized": false,
27
  "rstrip": false,
28
  "single_word": false
29
  },
30
  "eos_token": {
31
- "content": "<|endoftext|>",
32
  "lstrip": false,
33
  "normalized": false,
34
  "rstrip": false,
35
  "single_word": false
36
  },
 
37
  "unk_token": {
38
- "content": "<|endoftext|>",
39
  "lstrip": false,
40
  "normalized": false,
41
  "rstrip": false,
 
1
  {
2
  "additional_special_tokens": [
3
+ "▁<PRE>",
4
+ "▁<MID>",
5
+ "▁<SUF>",
6
+ "▁<EOT>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
7
  ],
8
  "bos_token": {
9
+ "content": "<s>",
10
  "lstrip": false,
11
  "normalized": false,
12
  "rstrip": false,
13
  "single_word": false
14
  },
15
  "eos_token": {
16
+ "content": "</s>",
17
  "lstrip": false,
18
  "normalized": false,
19
  "rstrip": false,
20
  "single_word": false
21
  },
22
+ "pad_token": "</s>",
23
  "unk_token": {
24
+ "content": "<unk>",
25
  "lstrip": false,
26
  "normalized": false,
27
  "rstrip": false,
tokenizer.json CHANGED
The diff for this file is too large to render. See raw diff
 
tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:45ccb9c8b6b561889acea59191d66986d314e7cbd6a78abc6e49b139ca91c1e6
3
+ size 500058
tokenizer_config.json CHANGED
@@ -1,8 +1,9 @@
1
  {
2
- "add_prefix_space": false,
 
3
  "added_tokens_decoder": {
4
  "0": {
5
- "content": "<|endoftext|>",
6
  "lstrip": false,
7
  "normalized": false,
8
  "rstrip": false,
@@ -10,7 +11,7 @@
10
  "special": true
11
  },
12
  "1": {
13
- "content": "<fim_prefix>",
14
  "lstrip": false,
15
  "normalized": false,
16
  "rstrip": false,
@@ -18,135 +19,39 @@
18
  "special": true
19
  },
20
  "2": {
21
- "content": "<fim_middle>",
22
  "lstrip": false,
23
  "normalized": false,
24
  "rstrip": false,
25
  "single_word": false,
26
  "special": true
27
  },
28
- "3": {
29
- "content": "<fim_suffix>",
30
  "lstrip": false,
31
  "normalized": false,
32
  "rstrip": false,
33
  "single_word": false,
34
  "special": true
35
  },
36
- "4": {
37
- "content": "<fim_pad>",
38
  "lstrip": false,
39
  "normalized": false,
40
  "rstrip": false,
41
  "single_word": false,
42
  "special": true
43
  },
44
- "5": {
45
- "content": "<filename>",
46
  "lstrip": false,
47
  "normalized": false,
48
  "rstrip": false,
49
  "single_word": false,
50
  "special": true
51
  },
52
- "6": {
53
- "content": "<gh_stars>",
54
- "lstrip": false,
55
- "normalized": false,
56
- "rstrip": false,
57
- "single_word": false,
58
- "special": true
59
- },
60
- "7": {
61
- "content": "<issue_start>",
62
- "lstrip": false,
63
- "normalized": false,
64
- "rstrip": false,
65
- "single_word": false,
66
- "special": true
67
- },
68
- "8": {
69
- "content": "<issue_comment>",
70
- "lstrip": false,
71
- "normalized": false,
72
- "rstrip": false,
73
- "single_word": false,
74
- "special": true
75
- },
76
- "9": {
77
- "content": "<issue_closed>",
78
- "lstrip": false,
79
- "normalized": false,
80
- "rstrip": false,
81
- "single_word": false,
82
- "special": true
83
- },
84
- "10": {
85
- "content": "<jupyter_start>",
86
- "lstrip": false,
87
- "normalized": false,
88
- "rstrip": false,
89
- "single_word": false,
90
- "special": true
91
- },
92
- "11": {
93
- "content": "<jupyter_text>",
94
- "lstrip": false,
95
- "normalized": false,
96
- "rstrip": false,
97
- "single_word": false,
98
- "special": true
99
- },
100
- "12": {
101
- "content": "<jupyter_code>",
102
- "lstrip": false,
103
- "normalized": false,
104
- "rstrip": false,
105
- "single_word": false,
106
- "special": true
107
- },
108
- "13": {
109
- "content": "<jupyter_output>",
110
- "lstrip": false,
111
- "normalized": false,
112
- "rstrip": false,
113
- "single_word": false,
114
- "special": true
115
- },
116
- "14": {
117
- "content": "<empty_output>",
118
- "lstrip": false,
119
- "normalized": false,
120
- "rstrip": false,
121
- "single_word": false,
122
- "special": true
123
- },
124
- "15": {
125
- "content": "<commit_before>",
126
- "lstrip": false,
127
- "normalized": false,
128
- "rstrip": false,
129
- "single_word": false,
130
- "special": true
131
- },
132
- "16": {
133
- "content": "<commit_msg>",
134
- "lstrip": false,
135
- "normalized": false,
136
- "rstrip": false,
137
- "single_word": false,
138
- "special": true
139
- },
140
- "17": {
141
- "content": "<commit_after>",
142
- "lstrip": false,
143
- "normalized": false,
144
- "rstrip": false,
145
- "single_word": false,
146
- "special": true
147
- },
148
- "18": {
149
- "content": "<reponame>",
150
  "lstrip": false,
151
  "normalized": false,
152
  "rstrip": false,
@@ -155,31 +60,25 @@
155
  }
156
  },
157
  "additional_special_tokens": [
158
- "<|endoftext|>",
159
- "<fim_prefix>",
160
- "<fim_middle>",
161
- "<fim_suffix>",
162
- "<fim_pad>",
163
- "<filename>",
164
- "<gh_stars>",
165
- "<issue_start>",
166
- "<issue_comment>",
167
- "<issue_closed>",
168
- "<jupyter_start>",
169
- "<jupyter_text>",
170
- "<jupyter_code>",
171
- "<jupyter_output>",
172
- "<empty_output>",
173
- "<commit_before>",
174
- "<commit_msg>",
175
- "<commit_after>",
176
- "<reponame>"
177
  ],
178
- "bos_token": "<|endoftext|>",
179
- "clean_up_tokenization_spaces": true,
180
- "eos_token": "<|endoftext|>",
 
 
 
 
 
181
  "model_max_length": 1000000000000000019884624838656,
182
- "tokenizer_class": "GPT2Tokenizer",
183
- "unk_token": "<|endoftext|>",
184
- "vocab_size": 49152
 
 
 
 
185
  }
 
1
  {
2
+ "add_bos_token": true,
3
+ "add_eos_token": false,
4
  "added_tokens_decoder": {
5
  "0": {
6
+ "content": "<unk>",
7
  "lstrip": false,
8
  "normalized": false,
9
  "rstrip": false,
 
11
  "special": true
12
  },
13
  "1": {
14
+ "content": "<s>",
15
  "lstrip": false,
16
  "normalized": false,
17
  "rstrip": false,
 
19
  "special": true
20
  },
21
  "2": {
22
+ "content": "</s>",
23
  "lstrip": false,
24
  "normalized": false,
25
  "rstrip": false,
26
  "single_word": false,
27
  "special": true
28
  },
29
+ "32007": {
30
+ "content": "▁<PRE>",
31
  "lstrip": false,
32
  "normalized": false,
33
  "rstrip": false,
34
  "single_word": false,
35
  "special": true
36
  },
37
+ "32008": {
38
+ "content": "▁<SUF>",
39
  "lstrip": false,
40
  "normalized": false,
41
  "rstrip": false,
42
  "single_word": false,
43
  "special": true
44
  },
45
+ "32009": {
46
+ "content": "▁<MID>",
47
  "lstrip": false,
48
  "normalized": false,
49
  "rstrip": false,
50
  "single_word": false,
51
  "special": true
52
  },
53
+ "32010": {
54
+ "content": "▁<EOT>",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
55
  "lstrip": false,
56
  "normalized": false,
57
  "rstrip": false,
 
60
  }
61
  },
62
  "additional_special_tokens": [
63
+ "▁<PRE>",
64
+ "▁<MID>",
65
+ "▁<SUF>",
66
+ "▁<EOT>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
67
  ],
68
+ "bos_token": "<s>",
69
+ "chat_template": "{% if messages[0]['role'] == 'system' %}{% set loop_messages = messages[1:] %}{% set system_message = messages[0]['content'] %}{% else %}{% set loop_messages = messages %}{% set system_message = false %}{% endif %}{% for message in loop_messages %}{% if (message['role'] == 'user') != (loop.index0 % 2 == 0) %}{{ raise_exception('Conversation roles must alternate user/assistant/user/assistant/...') }}{% endif %}{% if loop.index0 == 0 and system_message != false %}{% set content = '<<SYS>>\\n' + system_message + '\\n<</SYS>>\\n\\n' + message['content'] %}{% else %}{% set content = message['content'] %}{% endif %}{% if message['role'] == 'user' %}{{ bos_token + '[INST] ' + content | trim + ' [/INST]' }}{% elif message['role'] == 'assistant' %}{{ ' ' + content | trim + ' ' + eos_token }}{% endif %}{% endfor %}",
70
+ "clean_up_tokenization_spaces": false,
71
+ "eos_token": "</s>",
72
+ "eot_token": "▁<EOT>",
73
+ "fill_token": "<FILL_ME>",
74
+ "legacy": null,
75
+ "middle_token": "▁<MID>",
76
  "model_max_length": 1000000000000000019884624838656,
77
+ "pad_token": "</s>",
78
+ "prefix_token": "▁<PRE>",
79
+ "sp_model_kwargs": {},
80
+ "suffix_token": "▁<SUF>",
81
+ "tokenizer_class": "CodeLlamaTokenizer",
82
+ "unk_token": "<unk>",
83
+ "use_default_system_prompt": false
84
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7958ab3e98cf0ec1941577ad8b565eba1c1e2260b3501f56784860288df23b04
3
  size 5176
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:800e431b889f77ef6fbad1daa037a96ed1251b558378fb684be897a0b2c377fb
3
  size 5176