tgool commited on
Commit
de93dd8
·
verified ·
1 Parent(s): 2f0f3d0

koelectra-base-discriminator

Browse files
Files changed (6) hide show
  1. README.md +55 -0
  2. config.json +20 -0
  3. gitattributes +8 -0
  4. pytorch_model.bin +3 -0
  5. tokenizer_config.json +4 -0
  6. vocab.txt +0 -0
README.md ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ language: ko
3
+ license: apache-2.0
4
+ tags:
5
+ - korean
6
+ ---
7
+
8
+ # KoELECTRA (Base Discriminator)
9
+
10
+ Pretrained ELECTRA Language Model for Korean (`koelectra-base-discriminator`)
11
+
12
+ For more detail, please see [original repository](https://github.com/monologg/KoELECTRA/blob/master/README_EN.md).
13
+
14
+ ## Usage
15
+
16
+ ### Load model and tokenizer
17
+
18
+ ```python
19
+ >>> from transformers import ElectraModel, ElectraTokenizer
20
+
21
+ >>> model = ElectraModel.from_pretrained("monologg/koelectra-base-discriminator")
22
+ >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator")
23
+ ```
24
+
25
+ ### Tokenizer example
26
+
27
+ ```python
28
+ >>> from transformers import ElectraTokenizer
29
+ >>> tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator")
30
+ >>> tokenizer.tokenize("[CLS] 한국어 ELECTRA를 공유합니다. [SEP]")
31
+ ['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]']
32
+ >>> tokenizer.convert_tokens_to_ids(['[CLS]', '한국어', 'E', '##L', '##EC', '##T', '##RA', '##를', '공유', '##합니다', '.', '[SEP]'])
33
+ [2, 18429, 41, 6240, 15229, 6204, 20894, 5689, 12622, 10690, 18, 3]
34
+ ```
35
+
36
+ ## Example using ElectraForPreTraining
37
+
38
+ ```python
39
+ import torch
40
+ from transformers import ElectraForPreTraining, ElectraTokenizer
41
+
42
+ discriminator = ElectraForPreTraining.from_pretrained("monologg/koelectra-base-discriminator")
43
+ tokenizer = ElectraTokenizer.from_pretrained("monologg/koelectra-base-discriminator")
44
+
45
+ sentence = "나는 방금 밥을 먹었다."
46
+ fake_sentence = "나는 내일 밥을 먹었다."
47
+
48
+ fake_tokens = tokenizer.tokenize(fake_sentence)
49
+ fake_inputs = tokenizer.encode(fake_sentence, return_tensors="pt")
50
+
51
+ discriminator_outputs = discriminator(fake_inputs)
52
+ predictions = torch.round((torch.sign(discriminator_outputs[0]) + 1) / 2)
53
+
54
+ print(list(zip(fake_tokens, predictions.tolist()[1:-1])))
55
+ ```
config.json ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ElectraForPreTraining"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.1,
6
+ "embedding_size": 768,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.1,
9
+ "hidden_size": 768,
10
+ "initializer_range": 0.02,
11
+ "intermediate_size": 3072,
12
+ "layer_norm_eps": 1e-12,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "electra",
15
+ "num_attention_heads": 12,
16
+ "num_hidden_layers": 12,
17
+ "pad_token_id": 0,
18
+ "type_vocab_size": 2,
19
+ "vocab_size": 32200
20
+ }
gitattributes ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
2
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.h5 filter=lfs diff=lfs merge=lfs -text
5
+ *.tflite filter=lfs diff=lfs merge=lfs -text
6
+ *.tar.gz filter=lfs diff=lfs merge=lfs -text
7
+ *.ot filter=lfs diff=lfs merge=lfs -text
8
+ *.onnx filter=lfs diff=lfs merge=lfs -text
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:374366ceb13a13d6b0754ae3f5bb9851f1a255af7463249355b6d1d6a37fd3ff
3
+ size 443135628
tokenizer_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "do_lower_case": false,
3
+ "max_len": 512
4
+ }
vocab.txt ADDED
The diff for this file is too large to render. See raw diff