update
Browse files- data/train.txt +0 -0
- eval.py +8 -0
- generate_dataset.py +33 -0
- requirements.txt +1 -0
data/train.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|
eval.py
ADDED
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from underthesea import word_tokenize
|
3 |
+
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
4 |
+
from os.path import dirname, join
|
5 |
+
|
6 |
+
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
|
7 |
+
|
8 |
+
sentences = dataset["train"]["text"]
|
generate_dataset.py
ADDED
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from datasets import load_dataset
|
2 |
+
from underthesea import word_tokenize
|
3 |
+
from underthesea.pipeline.word_tokenize.regex_tokenize import tokenize
|
4 |
+
from os.path import dirname, join
|
5 |
+
from underthesea.utils import logger
|
6 |
+
|
7 |
+
dataset = load_dataset("undertheseanlp/UTS_Text_v1")
|
8 |
+
|
9 |
+
sentences = dataset["train"]["text"]
|
10 |
+
pwd = dirname(__file__)
|
11 |
+
data_file = join(pwd, "data/train.txt")
|
12 |
+
with open(data_file, "w") as f:
|
13 |
+
f.write("")
|
14 |
+
|
15 |
+
|
16 |
+
f = open(data_file, "a")
|
17 |
+
content = ""
|
18 |
+
for j, s in enumerate(sentences):
|
19 |
+
if j % 100 == 0 and j > 0:
|
20 |
+
f.write(content)
|
21 |
+
content = ""
|
22 |
+
logger.info(j)
|
23 |
+
words = word_tokenize(s)
|
24 |
+
for word in words:
|
25 |
+
tokens = tokenize(word)
|
26 |
+
for i, token in enumerate(tokens):
|
27 |
+
if i == 0:
|
28 |
+
tag = "B-W"
|
29 |
+
else:
|
30 |
+
tag = "I-W"
|
31 |
+
content += token + "\t" + tag + "\n"
|
32 |
+
content += "\n"
|
33 |
+
f.close()
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
datasets
|