abgoswam commited on
Commit
fe9d2e7
·
verified ·
1 Parent(s): 68ab699

Training in progress, step 300

Browse files
adapter_config.json CHANGED
@@ -1,7 +1,7 @@
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
- "base_model_name_or_path": "phi7b_websynthv2_phirecipe_sft_hf",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
@@ -19,13 +19,7 @@
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
- "o_proj",
23
- "up_proj",
24
- "k_proj",
25
- "down_proj",
26
- "v_proj",
27
- "q_proj",
28
- "gate_proj"
29
  ],
30
  "task_type": "CAUSAL_LM"
31
  }
 
1
  {
2
  "alpha_pattern": {},
3
  "auto_mapping": null,
4
+ "base_model_name_or_path": "tlg7b_output_dir",
5
  "bias": "none",
6
  "fan_in_fan_out": false,
7
  "inference_mode": true,
 
19
  "rank_pattern": {},
20
  "revision": null,
21
  "target_modules": [
22
+ "down_proj"
 
 
 
 
 
 
23
  ],
24
  "task_type": "CAUSAL_LM"
25
  }
adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d3bbc0152ddcc8fb347851d846b2f06588c8a0c515612eb9fd9406691d669e4
3
- size 639692768
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:15785311cfbe3a1284c54bacb24eb40e9c3aafade600bbfaabf7c72991179a5d
3
+ size 123740608
cl100k_base.tiktoken ADDED
The diff for this file is too large to render. See raw diff
 
runs/Mar12_21-54-29_node-0/events.out.tfevents.1710305786.node-0.885765.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61b6482b94f274bed9a565e06faf68bf9dc8f161461cd334abd9d6cb33bfb64a
3
+ size 5409
runs/Mar12_22-12-16_node-0/events.out.tfevents.1710307015.node-0.895354.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e77484b858b4795c4854abb8f64acbd1c606d22e8b921cfab58594e456eacf8f
3
+ size 88
runs/Mar12_23-01-07_node-0/events.out.tfevents.1710309917.node-0.993580.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48a565af9c17cfdfe8937be5cccbae4022cf62992bd8e1d277abca75c14556c6
3
+ size 5409
runs/Mar12_23-36-44_node-0/events.out.tfevents.1710311994.node-0.1035009.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:50a177705cb996daf02041ba8bdc7394f3d6fd8a7d692cb6767e973c525ad252
3
+ size 72393
runs/Mar13_00-37-44_node-0/events.out.tfevents.1710315813.node-0.1124704.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6fc58248d9cf7ba8646838521dd819edb828f4ff503181c3fe683b0bf42909fc
3
+ size 212492
special_tokens_map.json CHANGED
@@ -1,33 +1,5 @@
1
  {
2
- "additional_special_tokens": [
3
- "<|/inst|>"
4
- ],
5
- "bos_token": {
6
- "content": "<s>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false
11
- },
12
- "eos_token": {
13
- "content": "<|endoftext|>",
14
- "lstrip": false,
15
- "normalized": false,
16
- "rstrip": false,
17
- "single_word": false
18
- },
19
- "pad_token": {
20
- "content": "<|endoftext|>",
21
- "lstrip": false,
22
- "normalized": false,
23
- "rstrip": false,
24
- "single_word": false
25
- },
26
- "unk_token": {
27
- "content": "<unk>",
28
- "lstrip": false,
29
- "normalized": false,
30
- "rstrip": false,
31
- "single_word": false
32
- }
33
  }
 
1
  {
2
+ "bos_token": "<|endoftext|>",
3
+ "eos_token": "<|endoftext|>",
4
+ "pad_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
tokenization_tlg.py ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from https://huggingface.co/Qwen/Qwen-7B-Chat/blob/main/tokenization_qwen.py
2
+ import os
3
+ from typing import Collection, List, Optional, Dict, Set, Tuple, Union
4
+
5
+ import base64
6
+
7
+ from transformers import PreTrainedTokenizer, AddedToken
8
+ import tiktoken
9
+
10
+
11
+ """
12
+ This tokenizer is almost identical to tiktoken.get_encoding("cl100k_base")
13
+ with a few additional special tokens to support the ChatML format.
14
+
15
+ TODO(bapatra): Right now, I do not save the special tokens to the vocab file.
16
+ Maybe in the future, that would be useful? Can add that support later.
17
+
18
+ """
19
+
20
+ def _load_tiktoken_bpe(tiktoken_bpe_file: str) -> Dict[bytes, int]:
21
+ with open(tiktoken_bpe_file, "rb") as f:
22
+ contents = f.read()
23
+ return {
24
+ base64.b64decode(token): int(rank)
25
+ for token, rank in (line.split() for line in contents.splitlines() if line)
26
+ }
27
+
28
+
29
+ SPECIAL_TOKENS = {
30
+ # tiktoken.get_encoding("cl100k_base")._special_tokens
31
+ '<|endoftext|>': 100257,
32
+ '<|fim_prefix|>': 100258,
33
+ '<|fim_middle|>': 100259,
34
+ '<|fim_suffix|>': 100260,
35
+ '<|endofprompt|>': 100276,
36
+ # ChatML Related Special Tokens
37
+ "<|im_start|>": 100264,
38
+ "<|im_end|>": 100265,
39
+ }
40
+
41
+ class TLGv4Tokenizer(PreTrainedTokenizer):
42
+ vocab_files_names = {
43
+ "vocab_file": "cl100k_base.tiktoken"
44
+ }
45
+
46
+ model_input_names: List[str] = ["input_ids", "attention_mask"]
47
+
48
+ def __init__(
49
+ self,
50
+ vocab_file: Optional[str] = None,
51
+ errors: str = "replace",
52
+ **kwargs
53
+ ) -> None:
54
+ # PreTrainedTokenizer's init calls _add_tokens, which in turn checks
55
+ # if the token is present in `self.special_tokens``. Hence instantiating it here.
56
+ # The way Qwen gets around this is by checking against SPECIAL_TOKENS
57
+ # But I think it's better to check against the objects own `special_tokens`
58
+ # in case we eventually want to allow the tokenizer to have special tokens.
59
+ self.special_tokens = SPECIAL_TOKENS
60
+
61
+ super().__init__(**kwargs)
62
+ self.errors = errors
63
+
64
+ base = tiktoken.get_encoding("cl100k_base")
65
+ if vocab_file is None:
66
+ self.mergeable_ranks: Dict[bytes, int] = base._mergeable_ranks
67
+ else:
68
+ self.mergeable_ranks = _load_tiktoken_bpe(vocab_file)
69
+
70
+ self.pat_str = base._pat_str
71
+
72
+ enc = tiktoken.Encoding(
73
+ name="cl100k_im",
74
+ pat_str=self.pat_str,
75
+ mergeable_ranks=self.mergeable_ranks,
76
+ special_tokens=self.special_tokens,
77
+ )
78
+ self.tokenizer = enc
79
+
80
+ self.decoder: Dict[int, bytes] = {
81
+ v: k for k, v in self.mergeable_ranks.items()
82
+ }
83
+ self.decoder.update({v: k for k, v in self.special_tokens.items()})
84
+
85
+ self.eod_id = self.tokenizer.eot_token
86
+ self._eos_token = self._convert_id_to_token(self.eod_id)
87
+
88
+ self.im_start_id = self.special_tokens["<|im_start|>"]
89
+ self.im_end_id = self.special_tokens["<|im_end|>"]
90
+
91
+ def __getstate__(self):
92
+ state = self.__dict__.copy()
93
+ del state["tokenizer"]
94
+ return state
95
+
96
+ def __setstate__(self, state):
97
+ self.__dict__ = state
98
+ enc = tiktoken.Encoding(
99
+ name="cl100k_im",
100
+ pat_str=self.pat_str,
101
+ mergeable_ranks=self.mergeable_ranks,
102
+ special_tokens=self.special_tokens,
103
+ )
104
+ self.tokenizer = enc
105
+
106
+ def __len__(self):
107
+ return self.tokenizer.n_vocab
108
+
109
+ def get_vocab(self) -> Dict[Union[str, bytes], int]:
110
+ return {**self.mergeable_ranks, **self.special_tokens}
111
+
112
+ def convert_tokens_to_ids(
113
+ self,
114
+ tokens: Union[bytes, str, List[Union[bytes, str]]]
115
+ ) -> Union[int, List[int]]:
116
+ ids = []
117
+ if isinstance(tokens, (str, bytes)):
118
+ if tokens in self.special_tokens:
119
+ return self.special_tokens[tokens]
120
+ else:
121
+ return self.mergeable_ranks.get(tokens)
122
+ ids: List[int] = []
123
+ for token in tokens:
124
+ ids.append(self.convert_tokens_to_ids(token))
125
+ return ids
126
+
127
+ def _add_tokens(
128
+ self,
129
+ new_tokens: Union[List[str], List[AddedToken]],
130
+ special_tokens: bool = False,
131
+ ) -> int:
132
+ if not special_tokens and new_tokens:
133
+ raise ValueError("Only special tokens can be added to this tokenizer")
134
+ for token in new_tokens:
135
+ surface_form = token.content if isinstance(token, AddedToken) else token
136
+ if surface_form not in self.special_tokens:
137
+ raise ValueError(
138
+ "For now, we do not support unknown special tokens\n"
139
+ "In the future, if there is a need for this, we can add special tokens to the tokenizer\n"
140
+ "starting from rank 100261 - 100263 and then 100266 - 100275.\n"
141
+ "And finally, we can re-construct the enc object back\n"
142
+ )
143
+ return 0
144
+
145
+ def save_vocabulary(self, save_directory: str, **kwargs) -> Tuple[str]:
146
+ file_path = os.path.join(save_directory, "cl100k_base.tiktoken")
147
+ with open(file_path, "w") as f:
148
+ for token, rank in self.mergeable_ranks.items():
149
+ line = base64.b64encode(token).decode("utf-8") + " " + str(rank) + "\n"
150
+ f.write(line)
151
+ return (file_path,)
152
+
153
+ def tokenize(
154
+ self,
155
+ text: str,
156
+ allowed_special: Union[Set, str] = "all",
157
+ disallowed_special: Union[Collection, str] = (),
158
+ **kwargs
159
+ ) -> List[Union[bytes, str]]:
160
+ tokens: List[Union[bytes, str]] = []
161
+ for token_id in self.tokenizer.encode(
162
+ text, allowed_special=allowed_special, disallowed_special=disallowed_special
163
+ ):
164
+ tokens.append(self.decoder[token_id])
165
+ return tokens
166
+
167
+ def convert_tokens_to_string(self, tokens: List[Union[bytes, str]]) -> str:
168
+ """
169
+ Converts a sequence of tokens in a single string.
170
+ """
171
+ text = ""
172
+ temp = b""
173
+ for t in tokens:
174
+ if isinstance(t, str):
175
+ if temp:
176
+ text += temp.decode("utf-8", errors=self.errors)
177
+ temp = b""
178
+ text += t
179
+ elif isinstance(t, bytes):
180
+ temp += t
181
+ else:
182
+ raise TypeError("token should only be of type types or str")
183
+ if temp:
184
+ text += temp.decode("utf-8", errors=self.errors)
185
+ return text
186
+
187
+ @property
188
+ def vocab_size(self):
189
+ return self.tokenizer.n_vocab
190
+
191
+ @property
192
+ def eos_token_id(self) -> int:
193
+ return self.eod_id
194
+
195
+ def _convert_id_to_token(self, index: int) -> Union[bytes, str]:
196
+ """Converts an id to a token, special tokens included"""
197
+ if index in self.decoder:
198
+ return self.decoder[index]
199
+ raise ValueError("unknown ids")
200
+
201
+ def _convert_token_to_id(self, token: Union[bytes, str]) -> int:
202
+ """Converts a token to an id using the vocab, special tokens included"""
203
+ if token in self.special_tokens:
204
+ return self.special_tokens[token]
205
+ if token in self.mergeable_ranks:
206
+ return self.mergeable_ranks[token]
207
+ raise ValueError("unknown token")
208
+
209
+ def _tokenize(self, text: str, **kwargs):
210
+ """
211
+ Converts a string in a sequence of tokens (string), using the tokenizer. Split in words for word-based
212
+ vocabulary or sub-words for sub-word-based vocabularies (BPE/SentencePieces/WordPieces).
213
+ Do NOT take care of added tokens.
214
+ """
215
+ raise NotImplementedError
216
+
217
+ def _decode(
218
+ self,
219
+ token_ids: Union[int, List[int]],
220
+ skip_special_tokens: bool = False,
221
+ errors: str = None,
222
+ **kwargs,
223
+ ) -> str:
224
+ if isinstance(token_ids, int):
225
+ token_ids = [token_ids]
226
+ if skip_special_tokens:
227
+ token_ids = [i for i in token_ids if i < self.eod_id]
228
+ return self.tokenizer.decode(token_ids, errors=errors or self.errors)
229
+
230
+
tokenizer_config.json CHANGED
@@ -1,349 +1,16 @@
1
  {
2
- "add_bos_token": false,
3
- "add_eos_token": false,
4
- "added_tokens_decoder": {
5
- "0": {
6
- "content": "<unk>",
7
- "lstrip": false,
8
- "normalized": false,
9
- "rstrip": false,
10
- "single_word": false,
11
- "special": true
12
- },
13
- "1": {
14
- "content": "<s>",
15
- "lstrip": false,
16
- "normalized": false,
17
- "rstrip": false,
18
- "single_word": false,
19
- "special": true
20
- },
21
- "2": {
22
- "content": "</s>",
23
- "lstrip": false,
24
- "normalized": false,
25
- "rstrip": true,
26
- "single_word": false,
27
- "special": false
28
- },
29
- "32000": {
30
- "content": "<|endoftext|>",
31
- "lstrip": false,
32
- "normalized": false,
33
- "rstrip": false,
34
- "single_word": false,
35
- "special": true
36
- },
37
- "32001": {
38
- "content": "<|assistant|>",
39
- "lstrip": false,
40
- "normalized": false,
41
- "rstrip": true,
42
- "single_word": false,
43
- "special": true
44
- },
45
- "32002": {
46
- "content": "<|step|>",
47
- "lstrip": false,
48
- "normalized": false,
49
- "rstrip": true,
50
- "single_word": false,
51
- "special": true
52
- },
53
- "32003": {
54
- "content": "<|function_output|>",
55
- "lstrip": false,
56
- "normalized": false,
57
- "rstrip": true,
58
- "single_word": false,
59
- "special": true
60
- },
61
- "32004": {
62
- "content": "<|tag|>",
63
- "lstrip": false,
64
- "normalized": false,
65
- "rstrip": true,
66
- "single_word": false,
67
- "special": true
68
- },
69
- "32005": {
70
- "content": "<|function_call|>",
71
- "lstrip": false,
72
- "normalized": false,
73
- "rstrip": true,
74
- "single_word": false,
75
- "special": true
76
- },
77
- "32006": {
78
- "content": "<|system|>",
79
- "lstrip": false,
80
- "normalized": false,
81
- "rstrip": true,
82
- "single_word": false,
83
- "special": true
84
- },
85
- "32007": {
86
- "content": "<|end|>",
87
- "lstrip": false,
88
- "normalized": false,
89
- "rstrip": true,
90
- "single_word": false,
91
- "special": true
92
- },
93
- "32008": {
94
- "content": "<|raw|>",
95
- "lstrip": false,
96
- "normalized": false,
97
- "rstrip": true,
98
- "single_word": false,
99
- "special": true
100
- },
101
- "32009": {
102
- "content": "<|continue|>",
103
- "lstrip": false,
104
- "normalized": false,
105
- "rstrip": true,
106
- "single_word": false,
107
- "special": true
108
- },
109
- "32010": {
110
- "content": "<|user|>",
111
- "lstrip": false,
112
- "normalized": false,
113
- "rstrip": true,
114
- "single_word": false,
115
- "special": true
116
- },
117
- "32011": {
118
- "content": "<|function_list|>",
119
- "lstrip": false,
120
- "normalized": false,
121
- "rstrip": true,
122
- "single_word": false,
123
- "special": true
124
- },
125
- "32012": {
126
- "content": "<|calc|>",
127
- "lstrip": false,
128
- "normalized": false,
129
- "rstrip": true,
130
- "single_word": false,
131
- "special": true
132
- },
133
- "32013": {
134
- "content": "<|code|>",
135
- "lstrip": false,
136
- "normalized": false,
137
- "rstrip": true,
138
- "single_word": false,
139
- "special": true
140
- },
141
- "32014": {
142
- "content": "<|/code|>",
143
- "lstrip": false,
144
- "normalized": false,
145
- "rstrip": true,
146
- "single_word": false,
147
- "special": true
148
- },
149
- "32015": {
150
- "content": "<|summary|>",
151
- "lstrip": false,
152
- "normalized": false,
153
- "rstrip": true,
154
- "single_word": false,
155
- "special": true
156
- },
157
- "32016": {
158
- "content": "<|resource|>",
159
- "lstrip": false,
160
- "normalized": false,
161
- "rstrip": true,
162
- "single_word": false,
163
- "special": true
164
- },
165
- "32017": {
166
- "content": "<|assistant_mask|>",
167
- "lstrip": false,
168
- "normalized": false,
169
- "rstrip": true,
170
- "single_word": false,
171
- "special": true
172
- },
173
- "32018": {
174
- "content": "<|start|>",
175
- "lstrip": false,
176
- "normalized": false,
177
- "rstrip": true,
178
- "single_word": false,
179
- "special": true
180
- },
181
- "32019": {
182
- "content": "<|message|>",
183
- "lstrip": false,
184
- "normalized": false,
185
- "rstrip": true,
186
- "single_word": false,
187
- "special": true
188
- },
189
- "32020": {
190
- "content": "<|fim_prefix|>",
191
- "lstrip": false,
192
- "normalized": false,
193
- "rstrip": true,
194
- "single_word": false,
195
- "special": true
196
- },
197
- "32021": {
198
- "content": "<|fim_middle|>",
199
- "lstrip": false,
200
- "normalized": false,
201
- "rstrip": true,
202
- "single_word": false,
203
- "special": true
204
- },
205
- "32022": {
206
- "content": "<|fim_suffix|>",
207
- "lstrip": false,
208
- "normalized": false,
209
- "rstrip": true,
210
- "single_word": false,
211
- "special": true
212
- },
213
- "32023": {
214
- "content": "<|meta_start|>",
215
- "lstrip": false,
216
- "normalized": false,
217
- "rstrip": true,
218
- "single_word": false,
219
- "special": true
220
- },
221
- "32024": {
222
- "content": "<|ipynb_marker|>",
223
- "lstrip": false,
224
- "normalized": false,
225
- "rstrip": true,
226
- "single_word": false,
227
- "special": true
228
- },
229
- "32025": {
230
- "content": "<|diff_marker|>",
231
- "lstrip": false,
232
- "normalized": false,
233
- "rstrip": true,
234
- "single_word": false,
235
- "special": true
236
- },
237
- "32026": {
238
- "content": "<|ghissue|>",
239
- "lstrip": false,
240
- "normalized": false,
241
- "rstrip": true,
242
- "single_word": false,
243
- "special": true
244
- },
245
- "32027": {
246
- "content": "<|ghreview|>",
247
- "lstrip": false,
248
- "normalized": false,
249
- "rstrip": true,
250
- "single_word": false,
251
- "special": true
252
- },
253
- "32028": {
254
- "content": "<|disc_start|>",
255
- "lstrip": false,
256
- "normalized": false,
257
- "rstrip": true,
258
- "single_word": false,
259
- "special": true
260
- },
261
- "32029": {
262
- "content": "<|disc_sep|>",
263
- "lstrip": false,
264
- "normalized": false,
265
- "rstrip": true,
266
- "single_word": false,
267
- "special": true
268
- },
269
- "32030": {
270
- "content": "<|disc_thread|><|query|>",
271
- "lstrip": false,
272
- "normalized": false,
273
- "rstrip": true,
274
- "single_word": false,
275
- "special": true
276
- },
277
- "32031": {
278
- "content": "<|/query|>",
279
- "lstrip": false,
280
- "normalized": false,
281
- "rstrip": true,
282
- "single_word": false,
283
- "special": true
284
- },
285
- "32032": {
286
- "content": "<|data|>",
287
- "lstrip": false,
288
- "normalized": false,
289
- "rstrip": true,
290
- "single_word": false,
291
- "special": true
292
- },
293
- "32033": {
294
- "content": "<|/data|>",
295
- "lstrip": false,
296
- "normalized": false,
297
- "rstrip": true,
298
- "single_word": false,
299
- "special": true
300
- },
301
- "32034": {
302
- "content": "<|sys|>",
303
- "lstrip": false,
304
- "normalized": false,
305
- "rstrip": true,
306
- "single_word": false,
307
- "special": true
308
- },
309
- "32035": {
310
- "content": "<|/sys|>",
311
- "lstrip": false,
312
- "normalized": false,
313
- "rstrip": true,
314
- "single_word": false,
315
- "special": true
316
- },
317
- "32036": {
318
- "content": "<|inst|>",
319
- "lstrip": false,
320
- "normalized": false,
321
- "rstrip": true,
322
- "single_word": false,
323
- "special": true
324
- },
325
- "32037": {
326
- "content": "<|/inst|>",
327
- "lstrip": false,
328
- "normalized": false,
329
- "rstrip": true,
330
- "single_word": false,
331
- "special": true
332
- }
333
  },
334
- "additional_special_tokens": [
335
- "<|/inst|>"
336
- ],
337
- "bos_token": "<s>",
338
  "chat_template": "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{{ eos_token }}",
339
- "clean_up_tokenization_spaces": false,
340
  "eos_token": "<|endoftext|>",
341
- "legacy": false,
342
  "model_max_length": 2048,
343
  "pad_token": "<|endoftext|>",
344
- "padding_side": "right",
345
- "sp_model_kwargs": {},
346
- "tokenizer_class": "LlamaTokenizer",
347
- "unk_token": "<unk>",
348
- "use_default_system_prompt": false
349
  }
 
1
  {
2
+ "added_tokens_decoder": {},
3
+ "auto_map": {
4
+ "AutoTokenizer": [
5
+ "tokenization_tlg.TLGv4Tokenizer",
6
+ null
7
+ ]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  },
9
+ "bos_token": "<|endoftext|>",
 
 
 
10
  "chat_template": "{% for message in messages %}{{'<|' + message['role'] + '|>' + '\n' + message['content'] + '<|end|>\n' }}{% endfor %}{{ eos_token }}",
11
+ "clean_up_tokenization_spaces": true,
12
  "eos_token": "<|endoftext|>",
 
13
  "model_max_length": 2048,
14
  "pad_token": "<|endoftext|>",
15
+ "tokenizer_class": "TLGv4Tokenizer"
 
 
 
 
16
  }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:113ff30fd80802d3b07e9de066629dcd77a72bd8a6216ea4dd2e1b11dd350f72
3
  size 5048
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ad51a567828c83ac699881fb08eaa30f691eb81219b1e12abbd0e0b9f0026212
3
  size 5048