suriya7 commited on
Commit
ecce1fb
·
verified ·
1 Parent(s): c63f016

Upload tokenizer

Browse files
Files changed (3) hide show
  1. special_tokens_map.json +32 -3
  2. tokenizer_config.json +34 -1
  3. vocab.json +0 -0
special_tokens_map.json CHANGED
@@ -1,5 +1,34 @@
1
  {
2
- "bos_token": "<|endoftext|>",
3
- "eos_token": "<|endoftext|>",
4
- "unk_token": "<|endoftext|>"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  }
 
1
  {
2
+ "additional_special_tokens": [
3
+ "<|im_start|>",
4
+ "<|im_end|>"
5
+ ],
6
+ "bos_token": {
7
+ "content": "<|endoftext|>",
8
+ "lstrip": false,
9
+ "normalized": true,
10
+ "rstrip": false,
11
+ "single_word": false
12
+ },
13
+ "eos_token": {
14
+ "content": "<|endoftext|>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false
19
+ },
20
+ "pad_token": {
21
+ "content": "<|endoftext|>",
22
+ "lstrip": false,
23
+ "normalized": true,
24
+ "rstrip": false,
25
+ "single_word": false
26
+ },
27
+ "unk_token": {
28
+ "content": "<|endoftext|>",
29
+ "lstrip": false,
30
+ "normalized": true,
31
+ "rstrip": false,
32
+ "single_word": false
33
+ }
34
  }
tokenizer_config.json CHANGED
@@ -1,4 +1,6 @@
1
  {
 
 
2
  "add_prefix_space": false,
3
  "added_tokens_decoder": {
4
  "50256": {
@@ -8,12 +10,43 @@
8
  "rstrip": false,
9
  "single_word": false,
10
  "special": true
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  }
12
  },
 
 
 
 
13
  "bos_token": "<|endoftext|>",
14
  "clean_up_tokenization_spaces": false,
15
  "eos_token": "<|endoftext|>",
16
- "model_max_length": 1024,
 
 
 
17
  "tokenizer_class": "GPT2Tokenizer",
18
  "unk_token": "<|endoftext|>"
19
  }
 
1
  {
2
+ "add_bos_token": false,
3
+ "add_eos_token": true,
4
  "add_prefix_space": false,
5
  "added_tokens_decoder": {
6
  "50256": {
 
10
  "rstrip": false,
11
  "single_word": false,
12
  "special": true
13
+ },
14
+ "50257": {
15
+ "content": "[PAD]",
16
+ "lstrip": false,
17
+ "normalized": false,
18
+ "rstrip": false,
19
+ "single_word": false,
20
+ "special": true
21
+ },
22
+ "50258": {
23
+ "content": "<|im_start|>",
24
+ "lstrip": false,
25
+ "normalized": false,
26
+ "rstrip": false,
27
+ "single_word": false,
28
+ "special": true
29
+ },
30
+ "50259": {
31
+ "content": "<|im_end|>",
32
+ "lstrip": false,
33
+ "normalized": false,
34
+ "rstrip": false,
35
+ "single_word": false,
36
+ "special": true
37
  }
38
  },
39
+ "additional_special_tokens": [
40
+ "<|im_start|>",
41
+ "<|im_end|>"
42
+ ],
43
  "bos_token": "<|endoftext|>",
44
  "clean_up_tokenization_spaces": false,
45
  "eos_token": "<|endoftext|>",
46
+ "errors": "replace",
47
+ "model_max_length": 512,
48
+ "pad_token": "<|endoftext|>",
49
+ "padding_side": "right",
50
  "tokenizer_class": "GPT2Tokenizer",
51
  "unk_token": "<|endoftext|>"
52
  }
vocab.json CHANGED
The diff for this file is too large to render. See raw diff