Kosuke-Yamada
commited on
Commit
·
53588e8
1
Parent(s):
18bdb72
add llm-jp-eval datasets
Browse files- base.py +117 -0
- chabsa.py +66 -0
- coreference.py +62 -0
- dependency.py +40 -0
- jamp.py +61 -0
- janli.py +52 -0
- jcommonsenseqa.py +70 -0
- jemhopqa.py +68 -0
- jnli.py +70 -0
- jsem.py +93 -0
- jsick.py +61 -0
- jsquad.py +74 -0
- jsts.py +70 -0
- llm-jp-eval.py +159 -0
- mawps.py +75 -0
- ner.py +71 -0
- niilc.py +77 -0
- pas.py +110 -0
- reading.py +83 -0
- requirements.txt +1 -0
- wiki_base.py +112 -0
base.py
ADDED
@@ -0,0 +1,117 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import copy
|
2 |
+
import json
|
3 |
+
from dataclasses import asdict, dataclass
|
4 |
+
from logging import DEBUG, StreamHandler, getLogger
|
5 |
+
from pathlib import Path
|
6 |
+
|
7 |
+
logger = getLogger(__name__)
|
8 |
+
handler = StreamHandler()
|
9 |
+
handler.setLevel(DEBUG)
|
10 |
+
logger.setLevel(DEBUG)
|
11 |
+
logger.addHandler(handler)
|
12 |
+
logger.propagate = False
|
13 |
+
|
14 |
+
|
15 |
+
@dataclass(frozen=True)
|
16 |
+
class Sample:
|
17 |
+
input: str
|
18 |
+
output: str
|
19 |
+
|
20 |
+
|
21 |
+
@dataclass
|
22 |
+
class OutputInfo:
|
23 |
+
instruction: str
|
24 |
+
output_length: int
|
25 |
+
metrics: list[str]
|
26 |
+
few_shots: list[Sample]
|
27 |
+
samples: list[Sample]
|
28 |
+
|
29 |
+
|
30 |
+
class BaseDatasetProcessor:
|
31 |
+
data_name: str
|
32 |
+
|
33 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
34 |
+
self.dataset_dir: Path = dataset_dir
|
35 |
+
self.dataset_dir.mkdir(parents=True, exist_ok=True)
|
36 |
+
self.raw_dir: Path = dataset_dir / "raw_files"
|
37 |
+
self.raw_dir.mkdir(parents=True, exist_ok=True)
|
38 |
+
self.evaluation_dir: Path = dataset_dir / version_name / "evaluation"
|
39 |
+
self.evaluation_dir.mkdir(parents=True, exist_ok=True)
|
40 |
+
self.tuning_dir: Path = dataset_dir / version_name / "tuning"
|
41 |
+
self.tuning_dir.mkdir(parents=True, exist_ok=True)
|
42 |
+
self.output_info: OutputInfo = OutputInfo(
|
43 |
+
instruction="",
|
44 |
+
output_length=0,
|
45 |
+
metrics=[],
|
46 |
+
few_shots=[],
|
47 |
+
samples=[],
|
48 |
+
)
|
49 |
+
|
50 |
+
def _save_evaluation_data(self, samples: list[Sample], output_path: Path) -> None:
|
51 |
+
output_path.parent.mkdir(parents=True, exist_ok=True)
|
52 |
+
saved_output_info: OutputInfo = copy.deepcopy(self.output_info)
|
53 |
+
saved_output_info.samples = samples
|
54 |
+
assert saved_output_info.instruction != ""
|
55 |
+
assert saved_output_info.output_length != 0
|
56 |
+
assert saved_output_info.metrics != []
|
57 |
+
assert saved_output_info.samples != []
|
58 |
+
with output_path.open("w", encoding="utf-8") as f:
|
59 |
+
json.dump(asdict(saved_output_info), f, ensure_ascii=False, indent=4)
|
60 |
+
|
61 |
+
def pipeline(self) -> None:
|
62 |
+
logger.info(f"Start processing {self.data_name}")
|
63 |
+
self.download()
|
64 |
+
logger.info(f"Finish downloading {self.data_name}")
|
65 |
+
self.preprocess_evaluation_data()
|
66 |
+
logger.info(f"Finish preprocessing evaluation data of {self.data_name}")
|
67 |
+
self.preprocess_tuning_data()
|
68 |
+
logger.info(f"Finish preprocessing tuning data of {self.data_name}")
|
69 |
+
|
70 |
+
def download(self) -> None:
|
71 |
+
raise NotImplementedError()
|
72 |
+
|
73 |
+
def preprocess_evaluation_data(self) -> None:
|
74 |
+
raise NotImplementedError()
|
75 |
+
|
76 |
+
def preprocess_tuning_data(self) -> None:
|
77 |
+
for data_split in ["train", "dev"]:
|
78 |
+
path: Path = self.evaluation_dir / data_split / f"{self.data_name}.json"
|
79 |
+
if not path.exists():
|
80 |
+
print(f"{data_split} data is not found: {path}")
|
81 |
+
return
|
82 |
+
|
83 |
+
with path.open(encoding="utf-8") as f:
|
84 |
+
loaded_samples = json.load(f)
|
85 |
+
|
86 |
+
saved_samples: list[dict[str, str]] = []
|
87 |
+
for sample_idx, sample in enumerate(loaded_samples["samples"]):
|
88 |
+
text: str = (
|
89 |
+
f"以下は、タスクを説明する指示と、文脈のある入力の組み合わせです。要求を適切に満たす応答を書きなさい。\n\n### 指示:\n{loaded_samples['instruction']}"
|
90 |
+
)
|
91 |
+
if sample["input"]:
|
92 |
+
text += f"\n\n### 入力:\n{sample['input']}"
|
93 |
+
text += f"\n\n### 応答:\n{sample['output']}"
|
94 |
+
saved_samples.append(
|
95 |
+
{
|
96 |
+
"ID": f"{self.data_name}-{sample_idx}",
|
97 |
+
"instruction": loaded_samples["instruction"],
|
98 |
+
"input": sample["input"],
|
99 |
+
"output": sample["output"],
|
100 |
+
"text": text,
|
101 |
+
}
|
102 |
+
)
|
103 |
+
tuning_split_dir: Path = self.tuning_dir / data_split
|
104 |
+
if not tuning_split_dir.exists():
|
105 |
+
tuning_split_dir.mkdir(parents=True)
|
106 |
+
with (tuning_split_dir / f"{self.data_name}.json").open(
|
107 |
+
"w", encoding="utf-8"
|
108 |
+
) as f:
|
109 |
+
json.dump(saved_samples, f, ensure_ascii=False, indent=4)
|
110 |
+
if data_split == "dev":
|
111 |
+
tuning_dev_first100_dir: Path = self.tuning_dir / "dev_first100"
|
112 |
+
if not tuning_dev_first100_dir.exists():
|
113 |
+
tuning_dev_first100_dir.mkdir(parents=True)
|
114 |
+
with (tuning_dev_first100_dir / f"{self.data_name}.json").open(
|
115 |
+
"w", encoding="utf-8"
|
116 |
+
) as f:
|
117 |
+
json.dump(saved_samples[:100], f, ensure_ascii=False, indent=4)
|
chabsa.py
ADDED
@@ -0,0 +1,66 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
from zipfile import ZipFile
|
6 |
+
|
7 |
+
from .base import BaseDatasetProcessor, Sample
|
8 |
+
|
9 |
+
|
10 |
+
class ChabsaDatasetProcessor(BaseDatasetProcessor):
|
11 |
+
data_name = "chabsa"
|
12 |
+
|
13 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
14 |
+
super().__init__(dataset_dir, version_name)
|
15 |
+
self.output_info.instruction = (
|
16 |
+
"与えられた文章から固有表現で書かれたターゲットの名前を抽出し、それに対する極性をpositive、neutral、negativeの中から選択して下さい。"
|
17 |
+
"固有表現で書かれたターゲットの名前と、それに対する極性(positive、neutral、negativeのいずれか)のペアをスペース( )で区切って出力し、それ以外には何も含めないことを厳守してください。"
|
18 |
+
"答えが複数の場合、改行で繋げてください。"
|
19 |
+
"ただし、ターゲットは固有表現である市場、市況、会社/法人、グループ、会社内の部門、事業部、事業領域、製品、サービスの名称などを指すこととします。"
|
20 |
+
)
|
21 |
+
self.output_info.output_length = 191
|
22 |
+
self.output_info.metrics = ["set_f1"]
|
23 |
+
|
24 |
+
def download(self):
|
25 |
+
raw_path: Path = self.raw_dir / f"{self.data_name}"
|
26 |
+
if not raw_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://s3-ap-northeast-1.amazonaws.com/dev.tech-sketch.jp/chakki/public/chABSA-dataset.zip",
|
29 |
+
str(self.raw_dir / f"{self.data_name}.zip"),
|
30 |
+
)
|
31 |
+
with ZipFile(self.raw_dir / f"{self.data_name}.zip") as fw:
|
32 |
+
fw.extractall(raw_path)
|
33 |
+
|
34 |
+
def preprocess_evaluation_data(self):
|
35 |
+
samples: list[Sample] = []
|
36 |
+
for path in sorted(
|
37 |
+
(self.raw_dir / self.data_name / "chABSA-dataset").glob("*.json")
|
38 |
+
):
|
39 |
+
with path.open(encoding="utf-8") as f:
|
40 |
+
loaded_sample: dict = json.load(f)
|
41 |
+
for sentence in loaded_sample["sentences"]:
|
42 |
+
outputs: list[str] = []
|
43 |
+
for opinion in sentence["opinions"]:
|
44 |
+
outputs.append(f"{opinion['target']} {opinion['polarity']}")
|
45 |
+
if not outputs:
|
46 |
+
continue
|
47 |
+
sample: Sample = Sample(
|
48 |
+
input=f"文章:{sentence['sentence']}", output="\n".join(outputs)
|
49 |
+
)
|
50 |
+
samples.append(sample)
|
51 |
+
|
52 |
+
random.seed(42)
|
53 |
+
random.shuffle(samples)
|
54 |
+
|
55 |
+
self._save_evaluation_data(
|
56 |
+
samples[: int(len(samples) * 0.8)],
|
57 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
58 |
+
)
|
59 |
+
self._save_evaluation_data(
|
60 |
+
samples[int(len(samples) * 0.8) : int(len(samples) * 0.9)],
|
61 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
62 |
+
)
|
63 |
+
self._save_evaluation_data(
|
64 |
+
samples[int(len(samples) * 0.9) :],
|
65 |
+
self.evaluation_dir / "test" / f"{self.data_name}.json",
|
66 |
+
)
|
coreference.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import textwrap
|
2 |
+
from collections import defaultdict
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
from rhoknp import BasePhrase, Document
|
6 |
+
|
7 |
+
from .base import OutputInfo, Sample
|
8 |
+
from .wiki_base import WikipediaBaseDatasetProcessor, convert_base_phrase_to_text
|
9 |
+
|
10 |
+
|
11 |
+
class WikipediaCoreferenceDatasetProcessor(WikipediaBaseDatasetProcessor):
|
12 |
+
data_name = "wiki_coreference"
|
13 |
+
INNER_DELIMITER = " "
|
14 |
+
OUTER_DELIMITER = "\n"
|
15 |
+
|
16 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
17 |
+
output_info = OutputInfo(
|
18 |
+
instruction=textwrap.dedent(
|
19 |
+
"""\
|
20 |
+
与えられたテキストから同一の対象を指し示すフレーズを全て抽出してください。回答の他には何も含めないことを厳守してください。回答は以下のような形式で答えてください。
|
21 |
+
フレーズ1 フレーズ2 フレーズ3
|
22 |
+
フレーズ4 フレーズ5
|
23 |
+
"""
|
24 |
+
).rstrip(),
|
25 |
+
output_length=256, # Max number of tokens tokenized by `llm-jp/llm-jp-1.3b-v1.0` is 131.
|
26 |
+
metrics=["set_f1"],
|
27 |
+
few_shots=[],
|
28 |
+
samples=[],
|
29 |
+
)
|
30 |
+
super().__init__(dataset_dir, version_name, output_info)
|
31 |
+
|
32 |
+
@staticmethod
|
33 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
34 |
+
filtered_sentences = [
|
35 |
+
sent for sent in document.sentences if "括弧始" not in sent.misc_comment
|
36 |
+
]
|
37 |
+
filtered_document = Document.from_sentences(filtered_sentences)
|
38 |
+
coreference_texts = []
|
39 |
+
eids_to_base_phrases: dict[tuple[int, ...], list[BasePhrase]] = defaultdict(
|
40 |
+
list
|
41 |
+
)
|
42 |
+
for base_phrase in filtered_document.base_phrases:
|
43 |
+
if not base_phrase.entities:
|
44 |
+
continue
|
45 |
+
eids = sorted(entity.eid for entity in base_phrase.entities)
|
46 |
+
if base_phrase not in eids_to_base_phrases[tuple(eids)]:
|
47 |
+
eids_to_base_phrases[tuple(eids)].append(base_phrase)
|
48 |
+
|
49 |
+
for base_phrases in eids_to_base_phrases.values():
|
50 |
+
if len(base_phrases) < 2:
|
51 |
+
continue
|
52 |
+
sorted_phrases = sorted(base_phrases, key=lambda p: p.global_index)
|
53 |
+
mention_texts = [convert_base_phrase_to_text(bp) for bp in sorted_phrases]
|
54 |
+
coreference_texts.append(
|
55 |
+
WikipediaCoreferenceDatasetProcessor.INNER_DELIMITER.join(mention_texts)
|
56 |
+
)
|
57 |
+
return Sample(
|
58 |
+
input=filtered_document.text,
|
59 |
+
output=WikipediaCoreferenceDatasetProcessor.OUTER_DELIMITER.join(
|
60 |
+
coreference_texts
|
61 |
+
),
|
62 |
+
)
|
dependency.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import textwrap
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
from rhoknp import Document
|
5 |
+
|
6 |
+
from .base import OutputInfo, Sample
|
7 |
+
from .wiki_base import WikipediaBaseDatasetProcessor
|
8 |
+
|
9 |
+
|
10 |
+
class WikipediaDependencyDatasetProcessor(WikipediaBaseDatasetProcessor):
|
11 |
+
data_name = "wiki_dependency"
|
12 |
+
|
13 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
14 |
+
output_info = OutputInfo(
|
15 |
+
instruction=textwrap.dedent(
|
16 |
+
"""\
|
17 |
+
与えられたテキストについて文節間の係り受け関係を列挙してください。回答の他には何も含めないことを厳守してください。回答は以下のような形式で答えてください。
|
18 |
+
係り元文節1 -> 係り先文節1
|
19 |
+
係り元文節2 -> 係り先文節2
|
20 |
+
"""
|
21 |
+
).rstrip(),
|
22 |
+
output_length=512, # Max number of tokens tokenized by `llm-jp/llm-jp-1.3b-v1.0` is 844.
|
23 |
+
metrics=["set_f1"],
|
24 |
+
few_shots=[],
|
25 |
+
samples=[],
|
26 |
+
)
|
27 |
+
super().__init__(dataset_dir, version_name, output_info)
|
28 |
+
|
29 |
+
@staticmethod
|
30 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
31 |
+
text = ""
|
32 |
+
dependencies: list[str] = []
|
33 |
+
for sentence in document.sentences:
|
34 |
+
if "括弧始" in sentence.misc_comment:
|
35 |
+
continue
|
36 |
+
text += sentence.text
|
37 |
+
for phrase in sentence.phrases:
|
38 |
+
if parent_phrase := phrase.parent:
|
39 |
+
dependencies.append(f"{phrase.text} -> {parent_phrase.text}")
|
40 |
+
return Sample(input=text, output="\n".join(dependencies))
|
jamp.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from pathlib import Path
|
3 |
+
from urllib.request import urlretrieve
|
4 |
+
|
5 |
+
from .base import BaseDatasetProcessor, Sample
|
6 |
+
|
7 |
+
|
8 |
+
class JAMPDatasetProcessor(BaseDatasetProcessor):
|
9 |
+
data_name = "jamp"
|
10 |
+
|
11 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
12 |
+
super().__init__(dataset_dir, version_name)
|
13 |
+
self.output_info.instruction = "前提と仮説の関係をentailment、contradiction、neutralの中から回答してください。それ以外には何も含めないことを厳守してください。\n\n制約:\n- 前提から仮説が、時間関係上導出可能である場合はentailmentと出力\n- 前提と仮説が両立しえない場合はcontradictionと出力\n- そのいずれでもない場合はneutralと出力"
|
14 |
+
self.output_info.output_length = 3
|
15 |
+
self.output_info.metrics = ["exact_match"]
|
16 |
+
|
17 |
+
def download(self):
|
18 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.tsv"
|
19 |
+
if not raw_train_path.exists():
|
20 |
+
urlretrieve(
|
21 |
+
"https://raw.githubusercontent.com/tomo-ut/temporalNLI_dataset/main/dataset/train_all.tsv",
|
22 |
+
str(raw_train_path),
|
23 |
+
)
|
24 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.tsv"
|
25 |
+
if not raw_test_path.exists():
|
26 |
+
urlretrieve(
|
27 |
+
"https://raw.githubusercontent.com/tomo-ut/temporalNLI_dataset/main/dataset/test_2.tsv",
|
28 |
+
str(raw_test_path),
|
29 |
+
)
|
30 |
+
|
31 |
+
def preprocess_evaluation_data(self):
|
32 |
+
train_dev_samples: list[Sample] = []
|
33 |
+
with (self.raw_dir / f"{self.data_name}_train.tsv").open() as f_train:
|
34 |
+
next(f_train)
|
35 |
+
for line in f_train:
|
36 |
+
row: list[str] = line.split("\t")
|
37 |
+
train_dev_samples.append(
|
38 |
+
Sample(input=f"前提:{row[1]}\n仮説:{row[2]}", output=row[3])
|
39 |
+
)
|
40 |
+
random.seed(42)
|
41 |
+
random.shuffle(train_dev_samples)
|
42 |
+
self._save_evaluation_data(
|
43 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
44 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
45 |
+
)
|
46 |
+
self._save_evaluation_data(
|
47 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
48 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
49 |
+
)
|
50 |
+
|
51 |
+
test_samples: list[Sample] = []
|
52 |
+
with (self.raw_dir / f"{self.data_name}_test.tsv").open() as f_test:
|
53 |
+
next(f_test)
|
54 |
+
for line in f_test:
|
55 |
+
row: list[str] = line.split("\t")
|
56 |
+
test_samples.append(
|
57 |
+
Sample(input=f"前提:{row[1]}\n仮説:{row[2]}", output=row[3])
|
58 |
+
)
|
59 |
+
self._save_evaluation_data(
|
60 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
61 |
+
)
|
janli.py
ADDED
@@ -0,0 +1,52 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from pathlib import Path
|
3 |
+
from urllib.request import urlretrieve
|
4 |
+
|
5 |
+
from .base import BaseDatasetProcessor, Sample
|
6 |
+
|
7 |
+
|
8 |
+
class JaNLIDatasetProcessor(BaseDatasetProcessor):
|
9 |
+
data_name = "janli"
|
10 |
+
|
11 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
12 |
+
super().__init__(dataset_dir, version_name)
|
13 |
+
self.output_info.instruction = "前提と仮説の関係をentailment、non-entailmentの中から回答してください。それ以外には何も含めないことを厳守してください。\n\n制約:\n- 前提に対して仮説が同じ意味を含む場合は、entailmentと出力\n- 前提に対して仮説が異なる意味を含む場合は、non-entailmentと出力"
|
14 |
+
self.output_info.output_length = 7
|
15 |
+
self.output_info.metrics = ["exact_match"]
|
16 |
+
|
17 |
+
def download(self):
|
18 |
+
raw_path: Path = self.raw_dir / f"{self.data_name}.tsv"
|
19 |
+
if not raw_path.exists():
|
20 |
+
urlretrieve(
|
21 |
+
"https://raw.githubusercontent.com/verypluming/JaNLI/main/janli.tsv",
|
22 |
+
str(raw_path),
|
23 |
+
)
|
24 |
+
|
25 |
+
def preprocess_evaluation_data(self):
|
26 |
+
train_dev_samples: list[Sample] = []
|
27 |
+
test_samples: list[Sample] = []
|
28 |
+
with (self.raw_dir / f"{self.data_name}.tsv").open() as f:
|
29 |
+
next(f)
|
30 |
+
for line in f:
|
31 |
+
row: list[str] = line.split("\t")
|
32 |
+
if row[-1].strip() == "test":
|
33 |
+
test_samples.append(
|
34 |
+
Sample(input=f"前提:{row[1]}\n仮説:{row[2]}", output=row[3])
|
35 |
+
)
|
36 |
+
else:
|
37 |
+
train_dev_samples.append(
|
38 |
+
Sample(input=f"前提:{row[1]}\n仮説:{row[2]}", output=row[3])
|
39 |
+
)
|
40 |
+
random.seed(42)
|
41 |
+
random.shuffle(train_dev_samples)
|
42 |
+
self._save_evaluation_data(
|
43 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
44 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
45 |
+
)
|
46 |
+
self._save_evaluation_data(
|
47 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
48 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
49 |
+
)
|
50 |
+
self._save_evaluation_data(
|
51 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
52 |
+
)
|
jcommonsenseqa.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class JCommonsenseQADatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "jcommonsenseqa"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "質問と回答の選択肢を入力として受け取り、選択肢から回答を選択してください。なお、回答は選択肢の番号(例:0)でするものとします。 回答となる数値をint型で返し、他には何も含めないことを厳守してください。"
|
15 |
+
self.output_info.output_length = 1
|
16 |
+
self.output_info.metrics = ["exact_match"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.json"
|
20 |
+
if not raw_train_path.exists():
|
21 |
+
urlretrieve(
|
22 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/train-v1.1.json",
|
23 |
+
str(raw_train_path),
|
24 |
+
)
|
25 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.json"
|
26 |
+
if not raw_test_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jcommonsenseqa-v1.1/valid-v1.1.json",
|
29 |
+
str(raw_test_path),
|
30 |
+
)
|
31 |
+
|
32 |
+
def preprocess_evaluation_data(self):
|
33 |
+
train_dev_samples: list[Sample] = []
|
34 |
+
with (self.raw_dir / f"{self.data_name}_train.json").open(
|
35 |
+
encoding="utf-8"
|
36 |
+
) as f_train:
|
37 |
+
for line in f_train:
|
38 |
+
sample_dict: dict = json.loads(line.strip())
|
39 |
+
train_dev_samples.append(
|
40 |
+
Sample(
|
41 |
+
input=f"質問:{sample_dict['question']}\n選択肢:0.{sample_dict['choice0']},1.{sample_dict['choice1']},2.{sample_dict['choice2']},3.{sample_dict['choice3']},4.{sample_dict['choice4']}",
|
42 |
+
output=str(sample_dict["label"]),
|
43 |
+
)
|
44 |
+
)
|
45 |
+
random.seed(42)
|
46 |
+
random.shuffle(train_dev_samples)
|
47 |
+
self._save_evaluation_data(
|
48 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
49 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
50 |
+
)
|
51 |
+
self._save_evaluation_data(
|
52 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
53 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
54 |
+
)
|
55 |
+
|
56 |
+
test_samples: list[Sample] = []
|
57 |
+
with (self.raw_dir / f"{self.data_name}_test.json").open(
|
58 |
+
encoding="utf-8"
|
59 |
+
) as f_test:
|
60 |
+
for line in f_test:
|
61 |
+
sample_dict: dict = json.loads(line.strip())
|
62 |
+
test_samples.append(
|
63 |
+
Sample(
|
64 |
+
input=f"質問:{sample_dict['question']}\n選択肢:0.{sample_dict['choice0']},1.{sample_dict['choice1']},2.{sample_dict['choice2']},3.{sample_dict['choice3']},4.{sample_dict['choice4']}",
|
65 |
+
output=str(sample_dict["label"]),
|
66 |
+
)
|
67 |
+
)
|
68 |
+
self._save_evaluation_data(
|
69 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
70 |
+
)
|
jemhopqa.py
ADDED
@@ -0,0 +1,68 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class JEMHopQADatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "jemhopqa"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "質問を入力とし、回答を出力してください。回答の他には何も含めないことを厳守してください。回答が'はい'と'いいえ'で答えることができる場合、'YES'と'NO'で答えてください。"
|
15 |
+
self.output_info.output_length = 15
|
16 |
+
self.output_info.metrics = ["char_f1"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.json"
|
20 |
+
if not raw_train_path.exists():
|
21 |
+
urlretrieve(
|
22 |
+
"https://raw.githubusercontent.com/aiishii/JEMHopQA/main/corpus/train.json",
|
23 |
+
str(raw_train_path),
|
24 |
+
)
|
25 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.json"
|
26 |
+
if not raw_test_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://raw.githubusercontent.com/aiishii/JEMHopQA/main/corpus/dev.json",
|
29 |
+
str(raw_test_path),
|
30 |
+
)
|
31 |
+
|
32 |
+
def preprocess_evaluation_data(self):
|
33 |
+
train_dev_samples: list[Sample] = []
|
34 |
+
with (self.raw_dir / f"{self.data_name}_train.json").open(
|
35 |
+
encoding="utf-8"
|
36 |
+
) as f_train:
|
37 |
+
for loaded_sample in json.load(f_train):
|
38 |
+
train_dev_samples.append(
|
39 |
+
Sample(
|
40 |
+
input=f"質問:{loaded_sample['question']}",
|
41 |
+
output=loaded_sample["answer"],
|
42 |
+
)
|
43 |
+
)
|
44 |
+
random.seed(42)
|
45 |
+
random.shuffle(train_dev_samples)
|
46 |
+
self._save_evaluation_data(
|
47 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
48 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
49 |
+
)
|
50 |
+
self._save_evaluation_data(
|
51 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
52 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
53 |
+
)
|
54 |
+
|
55 |
+
test_samples: list[Sample] = []
|
56 |
+
with (self.raw_dir / f"{self.data_name}_test.json").open(
|
57 |
+
encoding="utf-8"
|
58 |
+
) as f_test:
|
59 |
+
for loaded_sample in json.load(f_test):
|
60 |
+
test_samples.append(
|
61 |
+
Sample(
|
62 |
+
input=f"質問:{loaded_sample['question']}",
|
63 |
+
output=loaded_sample["answer"],
|
64 |
+
)
|
65 |
+
)
|
66 |
+
self._save_evaluation_data(
|
67 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
68 |
+
)
|
jnli.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class JNLIDatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "jnli"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "前提と仮説の関係をentailment、contradiction、neutralの中から回答してください。それ以外には何も含めないことを厳守してください。\n\n制約:\n- 前提から仮説が、論理的知識や常識的知識を用いて導出可能である場合はentailmentと出力\n- 前提と仮説が両立しえない場合はcontradictionと出力\n- そのいずれでもない場合はneutralと出力"
|
15 |
+
self.output_info.output_length = 3
|
16 |
+
self.output_info.metrics = ["exact_match"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.json"
|
20 |
+
if not raw_train_path.exists():
|
21 |
+
urlretrieve(
|
22 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/train-v1.1.json",
|
23 |
+
str(raw_train_path),
|
24 |
+
)
|
25 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.json"
|
26 |
+
if not raw_test_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jnli-v1.1/valid-v1.1.json",
|
29 |
+
str(raw_test_path),
|
30 |
+
)
|
31 |
+
|
32 |
+
def preprocess_evaluation_data(self):
|
33 |
+
train_dev_samples: list[Sample] = []
|
34 |
+
with (self.raw_dir / f"{self.data_name}_train.json").open(
|
35 |
+
encoding="utf-8"
|
36 |
+
) as f_train:
|
37 |
+
for line in f_train:
|
38 |
+
sample_dict: dict = json.loads(line.strip())
|
39 |
+
train_dev_samples.append(
|
40 |
+
Sample(
|
41 |
+
input=f"前提:{sample_dict['sentence1']}\n仮説:{sample_dict['sentence2']}",
|
42 |
+
output=sample_dict["label"],
|
43 |
+
)
|
44 |
+
)
|
45 |
+
random.seed(42)
|
46 |
+
random.shuffle(train_dev_samples)
|
47 |
+
self._save_evaluation_data(
|
48 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
49 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
50 |
+
)
|
51 |
+
self._save_evaluation_data(
|
52 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
53 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
54 |
+
)
|
55 |
+
|
56 |
+
test_samples: list[Sample] = []
|
57 |
+
with (self.raw_dir / f"{self.data_name}_test.json").open(
|
58 |
+
encoding="utf-8"
|
59 |
+
) as f_test:
|
60 |
+
for line in f_test:
|
61 |
+
sample_dict: dict = json.loads(line.strip())
|
62 |
+
test_samples.append(
|
63 |
+
Sample(
|
64 |
+
input=f"前提:{sample_dict['sentence1']}\n仮説:{sample_dict['sentence2']}",
|
65 |
+
output=sample_dict["label"],
|
66 |
+
)
|
67 |
+
)
|
68 |
+
self._save_evaluation_data(
|
69 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
70 |
+
)
|
jsem.py
ADDED
@@ -0,0 +1,93 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from pathlib import Path
|
3 |
+
from urllib.request import urlretrieve
|
4 |
+
|
5 |
+
import xmltodict
|
6 |
+
|
7 |
+
from .base import BaseDatasetProcessor, Sample
|
8 |
+
|
9 |
+
FILENAMES: list[str] = [
|
10 |
+
"Adjectives",
|
11 |
+
"Adverb",
|
12 |
+
"Attitudes",
|
13 |
+
"AuxiliaryVerb",
|
14 |
+
"CaseParticle",
|
15 |
+
"Comparatives",
|
16 |
+
"CompoundAdjective",
|
17 |
+
"CompoundVerb",
|
18 |
+
"Conditional",
|
19 |
+
"Conjunction",
|
20 |
+
"Coordination",
|
21 |
+
"Ellipsis",
|
22 |
+
"GeneralizedQuantifier",
|
23 |
+
"Modality",
|
24 |
+
"NP",
|
25 |
+
"NewAdjective",
|
26 |
+
"NominalAnaphora",
|
27 |
+
"Plurals",
|
28 |
+
"Question",
|
29 |
+
"TemporalReference",
|
30 |
+
"Toritate",
|
31 |
+
"Verbs",
|
32 |
+
]
|
33 |
+
|
34 |
+
|
35 |
+
class JSeMDatasetProcessor(BaseDatasetProcessor):
|
36 |
+
data_name = "jsem"
|
37 |
+
|
38 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
39 |
+
super().__init__(dataset_dir, version_name)
|
40 |
+
self.output_info.instruction = "前提と仮説の関係をyes、no、unknown、undefの中から回答してください。それ以外には何も含めないことを厳守してください。\n\n制約:\n- 前提が仮説を含意する場合はyesと出力\n- 前提が仮説の否定を含意する場合はnoと出力\n- 前提が仮説を含意せず、その否定も含意しない場合はunknownと出力\n- 与えられた情報のみからは判断ができない場合はundefと出力"
|
41 |
+
self.output_info.output_length = 3
|
42 |
+
self.output_info.metrics = ["exact_match"]
|
43 |
+
|
44 |
+
def download(self):
|
45 |
+
for filename in FILENAMES:
|
46 |
+
raw_path: Path = self.raw_dir / f"{self.data_name}_{filename}.xml"
|
47 |
+
if not raw_path.exists():
|
48 |
+
urlretrieve(
|
49 |
+
f"https://raw.githubusercontent.com/DaisukeBekki/JSeM/master/data/v1.0/{filename}.xml",
|
50 |
+
str(raw_path),
|
51 |
+
)
|
52 |
+
|
53 |
+
def preprocess_evaluation_data(self):
|
54 |
+
samples: list[Sample] = []
|
55 |
+
for filename in FILENAMES:
|
56 |
+
with (self.raw_dir / f"{self.data_name}_{filename}.xml").open() as f:
|
57 |
+
dict_data: dict = xmltodict.parse(f.read())
|
58 |
+
for problem in dict_data["jsem-dataset"]["problem"]:
|
59 |
+
if problem["@answer"] in [
|
60 |
+
"",
|
61 |
+
"infelicitous",
|
62 |
+
"unacceptable",
|
63 |
+
"weakacceptable",
|
64 |
+
]:
|
65 |
+
# we do not use 'infelicitous', 'unacceptable', 'weakacceptable' labels
|
66 |
+
continue
|
67 |
+
premise: str = (
|
68 |
+
"".join(p["script"] for p in problem["p"])
|
69 |
+
if isinstance(problem["p"], list)
|
70 |
+
else problem["p"]["script"]
|
71 |
+
)
|
72 |
+
hypothesis: str = problem["h"]["script"]
|
73 |
+
samples.append(
|
74 |
+
Sample(
|
75 |
+
input=f"前提:{premise}\n仮説:{hypothesis}",
|
76 |
+
output=problem["@answer"],
|
77 |
+
)
|
78 |
+
)
|
79 |
+
|
80 |
+
random.seed(42)
|
81 |
+
random.shuffle(samples)
|
82 |
+
self._save_evaluation_data(
|
83 |
+
samples[: int(len(samples) * 0.8)],
|
84 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
85 |
+
)
|
86 |
+
self._save_evaluation_data(
|
87 |
+
samples[int(len(samples) * 0.8) : int(len(samples) * 0.9)],
|
88 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
89 |
+
)
|
90 |
+
self._save_evaluation_data(
|
91 |
+
samples[int(len(samples) * 0.9) :],
|
92 |
+
self.evaluation_dir / "test" / f"{self.data_name}.json",
|
93 |
+
)
|
jsick.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from pathlib import Path
|
3 |
+
from urllib.request import urlretrieve
|
4 |
+
|
5 |
+
from .base import BaseDatasetProcessor, Sample
|
6 |
+
|
7 |
+
|
8 |
+
class JSICKDatasetProcessor(BaseDatasetProcessor):
|
9 |
+
data_name = "jsick"
|
10 |
+
|
11 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
12 |
+
super().__init__(dataset_dir, version_name)
|
13 |
+
self.output_info.instruction = "前提と仮説の関係をentailment、contradiction、neutralの中から回答してください。それ以外には何も含めないことを厳守してください。\n\n制約:\n- 前提が真であるとき仮説が必ず真になる場合はentailmentと出力\n- 前提が真であるとき仮説が必ず偽になる場合はcontradictionと出力\n- そのいずれでもない場合はneutralと出力"
|
14 |
+
self.output_info.output_length = 3
|
15 |
+
self.output_info.metrics = ["exact_match"]
|
16 |
+
|
17 |
+
def download(self):
|
18 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.tsv"
|
19 |
+
if not raw_train_path.exists():
|
20 |
+
urlretrieve(
|
21 |
+
"https://raw.githubusercontent.com/verypluming/JSICK/main/jsick/train.tsv",
|
22 |
+
str(raw_train_path),
|
23 |
+
)
|
24 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.tsv"
|
25 |
+
if not raw_test_path.exists():
|
26 |
+
urlretrieve(
|
27 |
+
"https://raw.githubusercontent.com/verypluming/JSICK/main/jsick/test.tsv",
|
28 |
+
str(raw_test_path),
|
29 |
+
)
|
30 |
+
|
31 |
+
def preprocess_evaluation_data(self):
|
32 |
+
train_dev_samples: list[Sample] = []
|
33 |
+
with (self.raw_dir / f"{self.data_name}_train.tsv").open() as f_train:
|
34 |
+
next(f_train)
|
35 |
+
for line in f_train:
|
36 |
+
row: list[str] = line.split("\t")
|
37 |
+
train_dev_samples.append(
|
38 |
+
Sample(input=f"前提:{row[8]}\n仮説:{row[9]}", output=row[10])
|
39 |
+
)
|
40 |
+
random.seed(42)
|
41 |
+
random.shuffle(train_dev_samples)
|
42 |
+
self._save_evaluation_data(
|
43 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
44 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
45 |
+
)
|
46 |
+
self._save_evaluation_data(
|
47 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
48 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
49 |
+
)
|
50 |
+
|
51 |
+
test_samples: list[Sample] = []
|
52 |
+
with (self.raw_dir / f"{self.data_name}_test.tsv").open() as f_test:
|
53 |
+
next(f_test)
|
54 |
+
for line in f_test:
|
55 |
+
row: list[str] = line.split("\t")
|
56 |
+
test_samples.append(
|
57 |
+
Sample(input=f"前提:{row[8]}\n仮説:{row[9]}", output=row[10])
|
58 |
+
)
|
59 |
+
self._save_evaluation_data(
|
60 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
61 |
+
)
|
jsquad.py
ADDED
@@ -0,0 +1,74 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class JSQuADDatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "jsquad"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "質問に対する回答を文章から一言で抽出してください。回答は名詞で答えてください。 それ以外には何も含めないことを厳守してください。"
|
15 |
+
self.output_info.output_length = 25
|
16 |
+
self.output_info.metrics = ["char_f1"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.json"
|
20 |
+
if not raw_train_path.exists():
|
21 |
+
urlretrieve(
|
22 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/train-v1.1.json",
|
23 |
+
str(raw_train_path),
|
24 |
+
)
|
25 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.json"
|
26 |
+
if not raw_test_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsquad-v1.1/valid-v1.1.json",
|
29 |
+
str(raw_test_path),
|
30 |
+
)
|
31 |
+
|
32 |
+
def preprocess_evaluation_data(self):
|
33 |
+
train_dev_samples: list[Sample] = []
|
34 |
+
with (self.raw_dir / f"{self.data_name}_train.json").open(
|
35 |
+
encoding="utf-8"
|
36 |
+
) as f_train:
|
37 |
+
sample_dict: dict = json.load(f_train)
|
38 |
+
for problem in sample_dict["data"]:
|
39 |
+
for p in problem["paragraphs"]:
|
40 |
+
for q in p["qas"]:
|
41 |
+
train_dev_samples.append(
|
42 |
+
Sample(
|
43 |
+
input=f"文章:{p['context']}\n質問:{q['question']}",
|
44 |
+
output=q["answers"][0]["text"],
|
45 |
+
)
|
46 |
+
)
|
47 |
+
random.seed(42)
|
48 |
+
random.shuffle(train_dev_samples)
|
49 |
+
self._save_evaluation_data(
|
50 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
51 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
52 |
+
)
|
53 |
+
self._save_evaluation_data(
|
54 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
55 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
56 |
+
)
|
57 |
+
|
58 |
+
test_samples: list[Sample] = []
|
59 |
+
with (self.raw_dir / f"{self.data_name}_test.json").open(
|
60 |
+
encoding="utf-8"
|
61 |
+
) as f_test:
|
62 |
+
sample_dict: dict = json.load(f_test)
|
63 |
+
for problem in sample_dict["data"]:
|
64 |
+
for p in problem["paragraphs"]:
|
65 |
+
for q in p["qas"]:
|
66 |
+
test_samples.append(
|
67 |
+
Sample(
|
68 |
+
input=f"文章:{p['context']}\n質問:{q['question']}",
|
69 |
+
output=q["answers"][0]["text"],
|
70 |
+
)
|
71 |
+
)
|
72 |
+
self._save_evaluation_data(
|
73 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
74 |
+
)
|
jsts.py
ADDED
@@ -0,0 +1,70 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class JSTSDatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "jsts"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "日本語の文ペアの意味がどのくらい近いかを判定し、類似度を0.0〜5.0までの間の値で付与してください。0.0に近いほど文ペアの意味が異なり、5.0に近いほど文ペアの意味が似ていることを表しています。整数値のみを返し、それ以外には何も含めないことを厳守してください。"
|
15 |
+
self.output_info.output_length = 3
|
16 |
+
self.output_info.metrics = ["pearson", "spearman"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.json"
|
20 |
+
if not raw_train_path.exists():
|
21 |
+
urlretrieve(
|
22 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/train-v1.1.json",
|
23 |
+
str(raw_train_path),
|
24 |
+
)
|
25 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.json"
|
26 |
+
if not raw_test_path.exists():
|
27 |
+
urlretrieve(
|
28 |
+
"https://raw.githubusercontent.com/yahoojapan/JGLUE/main/datasets/jsts-v1.1/valid-v1.1.json",
|
29 |
+
str(raw_test_path),
|
30 |
+
)
|
31 |
+
|
32 |
+
def preprocess_evaluation_data(self):
|
33 |
+
train_dev_samples: list[dict[str, str]] = []
|
34 |
+
with (self.raw_dir / f"{self.data_name}_train.json").open(
|
35 |
+
encoding="utf-8"
|
36 |
+
) as f_train:
|
37 |
+
for line in f_train:
|
38 |
+
sample_dict: dict = json.loads(line.strip())
|
39 |
+
train_dev_samples.append(
|
40 |
+
Sample(
|
41 |
+
input=f"文1:{sample_dict['sentence1']}\n文2:{sample_dict['sentence2']}",
|
42 |
+
output=str(sample_dict["label"]),
|
43 |
+
)
|
44 |
+
)
|
45 |
+
random.seed(42)
|
46 |
+
random.shuffle(train_dev_samples)
|
47 |
+
self._save_evaluation_data(
|
48 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
49 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
50 |
+
)
|
51 |
+
self._save_evaluation_data(
|
52 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
53 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
54 |
+
)
|
55 |
+
|
56 |
+
test_samples: list[Sample] = []
|
57 |
+
with (self.raw_dir / f"{self.data_name}_test.json").open(
|
58 |
+
encoding="utf-8"
|
59 |
+
) as f_test:
|
60 |
+
for line in f_test:
|
61 |
+
sample_dict: dict = json.loads(line.strip())
|
62 |
+
test_samples.append(
|
63 |
+
Sample(
|
64 |
+
input=f"文1:{sample_dict['sentence1']}\n文2:{sample_dict['sentence2']}",
|
65 |
+
output=str(sample_dict["label"]),
|
66 |
+
)
|
67 |
+
)
|
68 |
+
self._save_evaluation_data(
|
69 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
70 |
+
)
|
llm-jp-eval.py
ADDED
@@ -0,0 +1,159 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from __future__ import annotations
|
2 |
+
|
3 |
+
import json
|
4 |
+
from pathlib import Path
|
5 |
+
from typing import Generator
|
6 |
+
|
7 |
+
import datasets
|
8 |
+
|
9 |
+
from .base import BaseDatasetProcessor
|
10 |
+
from .chabsa import ChabsaDatasetProcessor
|
11 |
+
from .coreference import WikipediaCoreferenceDatasetProcessor
|
12 |
+
from .dependency import WikipediaDependencyDatasetProcessor
|
13 |
+
from .jamp import JAMPDatasetProcessor
|
14 |
+
from .janli import JaNLIDatasetProcessor
|
15 |
+
from .jcommonsenseqa import JCommonsenseQADatasetProcessor
|
16 |
+
from .jemhopqa import JEMHopQADatasetProcessor
|
17 |
+
from .jnli import JNLIDatasetProcessor
|
18 |
+
from .jsem import JSeMDatasetProcessor
|
19 |
+
from .jsick import JSICKDatasetProcessor
|
20 |
+
from .jsquad import JSQuADDatasetProcessor
|
21 |
+
from .jsts import JSTSDatasetProcessor
|
22 |
+
from .mawps import MawpsDatasetProcessor
|
23 |
+
from .ner import WikipediaNERDatasetProcessor
|
24 |
+
from .niilc import NIILCDatasetProcessor
|
25 |
+
from .pas import WikipediaPASDatasetProcessor
|
26 |
+
from .reading import WikipediaReadingDatasetProcessor
|
27 |
+
from .wiki_base import WikipediaBaseDatasetProcessor
|
28 |
+
|
29 |
+
_CITATION = ""
|
30 |
+
_DESCRIPTION = "These are datasets including the benchmark 'llm-jp-eval.'"
|
31 |
+
_HOMEPAGE = "https://github.com/llm-jp/llm-jp-eval/tree/v1.3.0"
|
32 |
+
_LICENSE = "This work is license under Apache-2.0 license"
|
33 |
+
_VERSION = datasets.Version("1.3.0")
|
34 |
+
|
35 |
+
|
36 |
+
class LlmJpEvalConfig(datasets.BuilderConfig):
|
37 |
+
def __init__(
|
38 |
+
self,
|
39 |
+
name: str = "default",
|
40 |
+
version: datasets.Version | str | None = _VERSION,
|
41 |
+
data_dir: str | None = None,
|
42 |
+
data_files: datasets.data_files.DataFilesDict | None = None,
|
43 |
+
description: str | None = _DESCRIPTION,
|
44 |
+
output_dir: str = "./data_llm-jp-eval",
|
45 |
+
) -> None:
|
46 |
+
super().__init__(
|
47 |
+
name=name,
|
48 |
+
version=version,
|
49 |
+
data_dir=data_dir,
|
50 |
+
data_files=data_files,
|
51 |
+
description=description,
|
52 |
+
)
|
53 |
+
self.output_dir = output_dir
|
54 |
+
|
55 |
+
|
56 |
+
class LlmJpEval(datasets.GeneratorBasedBuilder):
|
57 |
+
|
58 |
+
BUILDER_CONFIGS = [
|
59 |
+
LlmJpEvalConfig(name="jamp"),
|
60 |
+
LlmJpEvalConfig(name="janli"),
|
61 |
+
LlmJpEvalConfig(name="jnli"),
|
62 |
+
LlmJpEvalConfig(name="jsem"),
|
63 |
+
LlmJpEvalConfig(name="jsick"),
|
64 |
+
LlmJpEvalConfig(name="jemhopqa"),
|
65 |
+
LlmJpEvalConfig(name="niilc"),
|
66 |
+
LlmJpEvalConfig(name="jsquad"),
|
67 |
+
LlmJpEvalConfig(name="jcommonsenseqa"),
|
68 |
+
LlmJpEvalConfig(name="chabsa"),
|
69 |
+
LlmJpEvalConfig(name="wiki_reading"),
|
70 |
+
LlmJpEvalConfig(name="wiki_ner"),
|
71 |
+
LlmJpEvalConfig(name="wiki_dependency"),
|
72 |
+
LlmJpEvalConfig(name="wiki_pas"),
|
73 |
+
LlmJpEvalConfig(name="wiki_coreference"),
|
74 |
+
LlmJpEvalConfig(name="mawps"),
|
75 |
+
LlmJpEvalConfig(name="jsts"),
|
76 |
+
]
|
77 |
+
|
78 |
+
def _info(self) -> datasets.DatasetInfo:
|
79 |
+
return datasets.DatasetInfo(
|
80 |
+
description=_DESCRIPTION,
|
81 |
+
citation=_CITATION,
|
82 |
+
homepage=_HOMEPAGE,
|
83 |
+
license=_LICENSE,
|
84 |
+
features=datasets.Features(
|
85 |
+
{
|
86 |
+
"input": datasets.Value("string"),
|
87 |
+
"output": datasets.Value("string"),
|
88 |
+
}
|
89 |
+
),
|
90 |
+
)
|
91 |
+
|
92 |
+
def _split_generators(
|
93 |
+
self, dl_manager: datasets.DownloadManager
|
94 |
+
) -> list[datasets.SplitGenerator]:
|
95 |
+
output_dir = Path(self.config.output_dir)
|
96 |
+
version_name = self.config.version.version_str
|
97 |
+
dataset_name = self.config.name
|
98 |
+
|
99 |
+
if dataset_name == "jamp":
|
100 |
+
processor = JAMPDatasetProcessor
|
101 |
+
elif dataset_name == "janli":
|
102 |
+
processor = JaNLIDatasetProcessor
|
103 |
+
elif dataset_name == "jnli":
|
104 |
+
processor = JNLIDatasetProcessor
|
105 |
+
elif dataset_name == "jsem":
|
106 |
+
processor = JSeMDatasetProcessor
|
107 |
+
elif dataset_name == "jsick":
|
108 |
+
processor = JSICKDatasetProcessor
|
109 |
+
elif dataset_name == "jemhopqa":
|
110 |
+
processor = JEMHopQADatasetProcessor
|
111 |
+
elif dataset_name == "niilc":
|
112 |
+
processor = NIILCDatasetProcessor
|
113 |
+
elif dataset_name == "jsquad":
|
114 |
+
processor = JSQuADDatasetProcessor
|
115 |
+
elif dataset_name == "jcommonsenseqa":
|
116 |
+
processor = JCommonsenseQADatasetProcessor
|
117 |
+
elif dataset_name == "chabsa":
|
118 |
+
processor = ChabsaDatasetProcessor
|
119 |
+
elif dataset_name == "wiki_reading":
|
120 |
+
processor = WikipediaReadingDatasetProcessor
|
121 |
+
elif dataset_name == "wiki_ner":
|
122 |
+
processor = WikipediaNERDatasetProcessor
|
123 |
+
elif dataset_name == "wiki_dependency":
|
124 |
+
processor = WikipediaDependencyDatasetProcessor
|
125 |
+
elif dataset_name == "wiki_pas":
|
126 |
+
processor = WikipediaPASDatasetProcessor
|
127 |
+
elif dataset_name == "wiki_coreference":
|
128 |
+
processor = WikipediaCoreferenceDatasetProcessor
|
129 |
+
elif dataset_name == "mawps":
|
130 |
+
processor = MawpsDatasetProcessor
|
131 |
+
elif dataset_name == "jsts":
|
132 |
+
processor = JSTSDatasetProcessor
|
133 |
+
|
134 |
+
processor(output_dir, version_name).pipeline()
|
135 |
+
|
136 |
+
eval_dir = output_dir / version_name / "evaluation"
|
137 |
+
with (eval_dir / "train" / f"{dataset_name}.json").open(encoding="utf-8") as f:
|
138 |
+
train_data = json.load(f)
|
139 |
+
with (eval_dir / "dev" / f"{dataset_name}.json").open(encoding="utf-8") as f:
|
140 |
+
validation_data = json.load(f)
|
141 |
+
with (eval_dir / "test" / f"{dataset_name}.json").open(encoding="utf-8") as f:
|
142 |
+
test_data = json.load(f)
|
143 |
+
|
144 |
+
return [
|
145 |
+
datasets.SplitGenerator(
|
146 |
+
name=datasets.Split.TRAIN, gen_kwargs={"data": train_data["samples"]}
|
147 |
+
),
|
148 |
+
datasets.SplitGenerator(
|
149 |
+
name=datasets.Split.VALIDATION,
|
150 |
+
gen_kwargs={"data": validation_data["samples"]},
|
151 |
+
),
|
152 |
+
datasets.SplitGenerator(
|
153 |
+
name=datasets.Split.TEST, gen_kwargs={"data": test_data["samples"]}
|
154 |
+
),
|
155 |
+
]
|
156 |
+
|
157 |
+
def _generate_examples(self, data: list[dict[str, str]]) -> Generator:
|
158 |
+
for i, d in enumerate(data):
|
159 |
+
yield i, d
|
mawps.py
ADDED
@@ -0,0 +1,75 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
import random
|
3 |
+
from pathlib import Path
|
4 |
+
from urllib.request import urlretrieve
|
5 |
+
|
6 |
+
from .base import BaseDatasetProcessor, Sample
|
7 |
+
|
8 |
+
|
9 |
+
class MawpsDatasetProcessor(BaseDatasetProcessor):
|
10 |
+
data_name = "mawps"
|
11 |
+
|
12 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
13 |
+
super().__init__(dataset_dir, version_name)
|
14 |
+
self.output_info.instruction = "与えられた計算問題に対する答えを整数または小数で出力してください。数値のみを出力し、それ以外には何も含めないことを厳守してください。"
|
15 |
+
self.output_info.output_length = 10
|
16 |
+
self.output_info.metrics = ["exact_match"]
|
17 |
+
|
18 |
+
def download(self):
|
19 |
+
dataset_url_base = "https://raw.githubusercontent.com/nlp-waseda/chain-of-thought-ja-dataset/2ad9fcbc597e70424f0b40ee2749570ba3f581bf/dataset/mawps/"
|
20 |
+
raw_path: Path = self.raw_dir / self.data_name
|
21 |
+
|
22 |
+
if not raw_path.exists():
|
23 |
+
raw_path.mkdir()
|
24 |
+
for file_name in [
|
25 |
+
"zero_shot_example.json",
|
26 |
+
"shot_example.json",
|
27 |
+
"test.json",
|
28 |
+
]:
|
29 |
+
urlretrieve(
|
30 |
+
dataset_url_base + file_name,
|
31 |
+
str(raw_path / file_name),
|
32 |
+
)
|
33 |
+
|
34 |
+
def preprocess_evaluation_data(self):
|
35 |
+
qa_delimiter = "\n解答:"
|
36 |
+
train_samples: list[Sample] = []
|
37 |
+
|
38 |
+
# Extract training samples from
|
39 |
+
# zero_shot_example.json and shot_example.json
|
40 |
+
raw_path = self.raw_dir / self.data_name
|
41 |
+
with open(raw_path / "zero_shot_example.json") as f:
|
42 |
+
zero_shot_example: list[dict[str, str]] = json.load(f)
|
43 |
+
with open(raw_path / "shot_example.json") as f:
|
44 |
+
shot_example: list[dict[str, str]] = json.load(f)
|
45 |
+
for example in zero_shot_example + shot_example:
|
46 |
+
[question, answer] = example["shot_example"].split(qa_delimiter)
|
47 |
+
answer_num_string = answer.split("答えは")[1].split("です。")[0]
|
48 |
+
train_samples.append(Sample(input=question, output=answer_num_string))
|
49 |
+
|
50 |
+
test_samples: list[Sample] = []
|
51 |
+
# Split test.json into dev & test sets
|
52 |
+
with open(raw_path / "test.json") as f:
|
53 |
+
test_example: list[dict] = json.load(f)
|
54 |
+
|
55 |
+
for example in test_example:
|
56 |
+
question = example["question"].split(qa_delimiter)[0]
|
57 |
+
answer = example["answer"]
|
58 |
+
test_samples.append(Sample(input=question, output=answer))
|
59 |
+
|
60 |
+
# Shuffle the test samples
|
61 |
+
random.seed(42)
|
62 |
+
random.shuffle(test_samples)
|
63 |
+
|
64 |
+
self._save_evaluation_data(
|
65 |
+
train_samples,
|
66 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
67 |
+
)
|
68 |
+
self._save_evaluation_data(
|
69 |
+
test_samples[: int(len(test_samples) / 2)],
|
70 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
71 |
+
)
|
72 |
+
self._save_evaluation_data(
|
73 |
+
test_samples[int(len(test_samples) / 2) :],
|
74 |
+
self.evaluation_dir / "test" / f"{self.data_name}.json",
|
75 |
+
)
|
ner.py
ADDED
@@ -0,0 +1,71 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import textwrap
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
from rhoknp import Document
|
6 |
+
from rhoknp.props.named_entity import NamedEntityCategory
|
7 |
+
|
8 |
+
from .base import OutputInfo, Sample
|
9 |
+
from .wiki_base import WikipediaBaseDatasetProcessor
|
10 |
+
|
11 |
+
|
12 |
+
class WikipediaNERDatasetProcessor(WikipediaBaseDatasetProcessor):
|
13 |
+
data_name = "wiki_ner"
|
14 |
+
NE_CATEGORY_TO_TEXT = {
|
15 |
+
NamedEntityCategory.ORGANIZATION: "組織名",
|
16 |
+
NamedEntityCategory.PERSON: "人名",
|
17 |
+
NamedEntityCategory.LOCATION: "地名",
|
18 |
+
NamedEntityCategory.ARTIFACT: "固有物名",
|
19 |
+
NamedEntityCategory.DATE: "日付表現",
|
20 |
+
NamedEntityCategory.TIME: "時刻表現",
|
21 |
+
NamedEntityCategory.MONEY: "金額表現",
|
22 |
+
NamedEntityCategory.PERCENT: "割合表現",
|
23 |
+
}
|
24 |
+
DELIMITER = " "
|
25 |
+
|
26 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
27 |
+
output_info = OutputInfo(
|
28 |
+
instruction=textwrap.dedent(
|
29 |
+
f"""\
|
30 |
+
与えられたテキストから固有表現({"、".join(self.NE_CATEGORY_TO_TEXT.values())})を全て抽出してください。回答の他には何も含めないことを厳守してください。回答には「固有表現1(種類1){self.DELIMITER}固有表現2(種類2)」のように固有表現の種類も含めてください。
|
31 |
+
"""
|
32 |
+
).rstrip(),
|
33 |
+
output_length=256, # Max number of tokens tokenized by `llm-jp/llm-jp-1.3b-v1.0` is 221.
|
34 |
+
metrics=["set_f1"],
|
35 |
+
few_shots=[],
|
36 |
+
samples=[],
|
37 |
+
)
|
38 |
+
super().__init__(dataset_dir, version_name, output_info)
|
39 |
+
|
40 |
+
@staticmethod
|
41 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
42 |
+
parenthesis_pat = re.compile(r"括弧始:(\S+) 括弧終:(\S+) 括弧位置:(\d+)")
|
43 |
+
doc_text = ""
|
44 |
+
sentence_iter = iter(document.sentences)
|
45 |
+
while sentence := next(sentence_iter, None):
|
46 |
+
text: str = sentence.text
|
47 |
+
if "括弧削除" in sentence.misc_comment:
|
48 |
+
sentence = next(sentence_iter)
|
49 |
+
match = parenthesis_pat.search(sentence.misc_comment)
|
50 |
+
assert match is not None
|
51 |
+
par_start: str = match.group(1)
|
52 |
+
par_end: str = match.group(2)
|
53 |
+
par_pos = int(match.group(3))
|
54 |
+
text = (
|
55 |
+
text[:par_pos]
|
56 |
+
+ par_start
|
57 |
+
+ sentence.text
|
58 |
+
+ par_end
|
59 |
+
+ text[par_pos:]
|
60 |
+
) # resurrect parentheses
|
61 |
+
doc_text += text
|
62 |
+
ne_texts: list[str] = []
|
63 |
+
for named_entity in document.named_entities:
|
64 |
+
if named_entity.category == NamedEntityCategory.OPTIONAL:
|
65 |
+
continue
|
66 |
+
ne_texts.append(
|
67 |
+
f"{named_entity.text}({WikipediaNERDatasetProcessor.NE_CATEGORY_TO_TEXT[named_entity.category]})"
|
68 |
+
)
|
69 |
+
return Sample(
|
70 |
+
input=doc_text, output=WikipediaNERDatasetProcessor.DELIMITER.join(ne_texts)
|
71 |
+
)
|
niilc.py
ADDED
@@ -0,0 +1,77 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
from pathlib import Path
|
3 |
+
from urllib.request import urlretrieve
|
4 |
+
|
5 |
+
import xmltodict
|
6 |
+
|
7 |
+
from .base import BaseDatasetProcessor, Sample
|
8 |
+
|
9 |
+
|
10 |
+
class NIILCDatasetProcessor(BaseDatasetProcessor):
|
11 |
+
data_name = "niilc"
|
12 |
+
|
13 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
14 |
+
super().__init__(dataset_dir, version_name)
|
15 |
+
self.output_info.instruction = "質問に対する答えを出力してください。回答の他には何も含めないことを厳守してください。答えが複数の場合、コンマ(,)で繋げてください。"
|
16 |
+
self.output_info.output_length = 180
|
17 |
+
self.output_info.metrics = ["char_f1"]
|
18 |
+
|
19 |
+
def download(self):
|
20 |
+
raw_train_path: Path = self.raw_dir / f"{self.data_name}_train.xml"
|
21 |
+
if not raw_train_path.exists():
|
22 |
+
urlretrieve(
|
23 |
+
"https://raw.githubusercontent.com/mynlp/niilc-qa/master/data/NIILC-ECQA2015_dev.xml",
|
24 |
+
str(raw_train_path),
|
25 |
+
)
|
26 |
+
raw_test_path: Path = self.raw_dir / f"{self.data_name}_test.xml"
|
27 |
+
if not raw_test_path.exists():
|
28 |
+
urlretrieve(
|
29 |
+
"https://raw.githubusercontent.com/mynlp/niilc-qa/master/data/NIILC-ECQA2015_test.xml",
|
30 |
+
str(raw_test_path),
|
31 |
+
)
|
32 |
+
|
33 |
+
def preprocess_evaluation_data(self):
|
34 |
+
train_dev_samples: list[Sample] = []
|
35 |
+
with (self.raw_dir / f"{self.data_name}_train.xml").open() as f:
|
36 |
+
dict_data: dict = xmltodict.parse(f.read())
|
37 |
+
for problem in dict_data["questions"]["question"]:
|
38 |
+
if isinstance(problem["answers"]["answer"], list):
|
39 |
+
answer: str = ",".join(
|
40 |
+
[ans for ans in problem["answers"]["answer"] if ans]
|
41 |
+
)
|
42 |
+
else:
|
43 |
+
answer = problem["answers"]["answer"]
|
44 |
+
if answer == "-":
|
45 |
+
continue
|
46 |
+
train_dev_samples.append(
|
47 |
+
Sample(input=f"質問:{problem['text']}", output=answer)
|
48 |
+
)
|
49 |
+
random.seed(42)
|
50 |
+
random.shuffle(train_dev_samples)
|
51 |
+
self._save_evaluation_data(
|
52 |
+
train_dev_samples[: int(len(train_dev_samples) * 0.9)],
|
53 |
+
self.evaluation_dir / "train" / f"{self.data_name}.json",
|
54 |
+
)
|
55 |
+
self._save_evaluation_data(
|
56 |
+
train_dev_samples[int(len(train_dev_samples) * 0.9) :],
|
57 |
+
self.evaluation_dir / "dev" / f"{self.data_name}.json",
|
58 |
+
)
|
59 |
+
|
60 |
+
test_samples: list[Sample] = []
|
61 |
+
with (self.raw_dir / f"{self.data_name}_test.xml").open() as f:
|
62 |
+
dict_data: dict = xmltodict.parse(f.read())
|
63 |
+
for problem in dict_data["questions"]["question"]:
|
64 |
+
if isinstance(problem["answers"]["answer"], list):
|
65 |
+
answer: str = ",".join(
|
66 |
+
[ans for ans in problem["answers"]["answer"] if ans]
|
67 |
+
)
|
68 |
+
else:
|
69 |
+
answer = problem["answers"]["answer"]
|
70 |
+
if answer == "-":
|
71 |
+
continue
|
72 |
+
test_samples.append(
|
73 |
+
Sample(input=f"質問:{problem['text']}", output=answer)
|
74 |
+
)
|
75 |
+
self._save_evaluation_data(
|
76 |
+
test_samples, self.evaluation_dir / "test" / f"{self.data_name}.json"
|
77 |
+
)
|
pas.py
ADDED
@@ -0,0 +1,110 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import re
|
2 |
+
import textwrap
|
3 |
+
from pathlib import Path
|
4 |
+
|
5 |
+
from rhoknp import Document
|
6 |
+
from rhoknp.cohesion import EndophoraArgument, Predicate
|
7 |
+
|
8 |
+
from .base import OutputInfo, Sample
|
9 |
+
from .wiki_base import WikipediaBaseDatasetProcessor, convert_base_phrase_to_text
|
10 |
+
|
11 |
+
|
12 |
+
class WikipediaPASDatasetProcessor(WikipediaBaseDatasetProcessor):
|
13 |
+
data_name = "wiki_pas"
|
14 |
+
PAS_CASES = [
|
15 |
+
"ガ",
|
16 |
+
"デ",
|
17 |
+
"ト",
|
18 |
+
"ニ",
|
19 |
+
"ヘ",
|
20 |
+
"ヲ",
|
21 |
+
"カラ",
|
22 |
+
"ガ2",
|
23 |
+
"マデ",
|
24 |
+
"ヨリ",
|
25 |
+
]
|
26 |
+
INNER_DELIMITER = " "
|
27 |
+
OUTER_DELIMITER = "\n"
|
28 |
+
|
29 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
30 |
+
output_info = OutputInfo(
|
31 |
+
instruction=textwrap.dedent(
|
32 |
+
"""\
|
33 |
+
与えられたテキストから述語項構造を全て抽出してください。回答の他には何も含めないことを厳守してください。回答は以下のような形式で答えてください。
|
34 |
+
述語1 ガ:項1 ヲ:項2 ニ:項3
|
35 |
+
述語2 ガ:項4 ヲ:項5
|
36 |
+
"""
|
37 |
+
).rstrip(),
|
38 |
+
output_length=512, # Max number of tokens tokenized by `llm-jp/llm-jp-1.3b-v1.0` is 412.
|
39 |
+
metrics=["set_f1"],
|
40 |
+
few_shots=[],
|
41 |
+
samples=[],
|
42 |
+
)
|
43 |
+
super().__init__(dataset_dir, version_name, output_info)
|
44 |
+
|
45 |
+
@staticmethod
|
46 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
47 |
+
parenthesis_pat = re.compile(r"括弧始:(\S+) 括弧終:(\S+) 括弧位置:(\d+)")
|
48 |
+
doc_text = ""
|
49 |
+
sentence_iter = iter(document.sentences)
|
50 |
+
while sentence := next(sentence_iter, None):
|
51 |
+
text: str = sentence.text
|
52 |
+
if "括弧削除" in sentence.misc_comment:
|
53 |
+
sentence = next(sentence_iter)
|
54 |
+
match = parenthesis_pat.search(sentence.misc_comment)
|
55 |
+
assert match is not None
|
56 |
+
par_start: str = match.group(1)
|
57 |
+
par_end: str = match.group(2)
|
58 |
+
par_pos = int(match.group(3))
|
59 |
+
text = (
|
60 |
+
text[:par_pos]
|
61 |
+
+ par_start
|
62 |
+
+ sentence.text
|
63 |
+
+ par_end
|
64 |
+
+ text[par_pos:]
|
65 |
+
) # resurrect parentheses
|
66 |
+
doc_text += text
|
67 |
+
pas_texts: list[str] = []
|
68 |
+
for pas in document.pas_list:
|
69 |
+
argument_text = ""
|
70 |
+
for case in WikipediaPASDatasetProcessor.PAS_CASES:
|
71 |
+
for argument in pas.get_arguments(case, relax=False):
|
72 |
+
if not isinstance(argument, EndophoraArgument):
|
73 |
+
continue
|
74 |
+
if argument.phrase == pas.predicate.phrase:
|
75 |
+
continue
|
76 |
+
if isinstance(argument, EndophoraArgument):
|
77 |
+
case_text = case if case != "ガ2" else "ガ"
|
78 |
+
arg_text = convert_base_phrase_to_text(argument.base_phrase)
|
79 |
+
argument_text += f"{WikipediaPASDatasetProcessor.INNER_DELIMITER}{case_text}:{arg_text}"
|
80 |
+
predicate_text: str = (
|
81 |
+
WikipediaPASDatasetProcessor._convert_predicate_to_text(pas.predicate)
|
82 |
+
)
|
83 |
+
if predicate_text and argument_text:
|
84 |
+
pas_texts.append(f"{predicate_text}{argument_text}")
|
85 |
+
return Sample(
|
86 |
+
input=doc_text,
|
87 |
+
output=WikipediaPASDatasetProcessor.OUTER_DELIMITER.join(pas_texts),
|
88 |
+
)
|
89 |
+
|
90 |
+
@staticmethod
|
91 |
+
def _convert_predicate_to_text(predicate: Predicate) -> str:
|
92 |
+
"""Get the core text without ancillary words for a predicate."""
|
93 |
+
morphemes = predicate.phrase.morphemes
|
94 |
+
start_index = 0
|
95 |
+
for morpheme in morphemes:
|
96 |
+
if morpheme.pos in ("助詞", "特殊", "判定詞"):
|
97 |
+
start_index += 1
|
98 |
+
else:
|
99 |
+
break
|
100 |
+
end_index = len(morphemes)
|
101 |
+
for morpheme in reversed(morphemes):
|
102 |
+
if morpheme.pos in ("助詞", "特殊"):
|
103 |
+
end_index -= 1
|
104 |
+
else:
|
105 |
+
break
|
106 |
+
ret = "".join(m.text for m in morphemes[start_index:end_index])
|
107 |
+
if not ret:
|
108 |
+
start_index = 0
|
109 |
+
end_index = len(morphemes)
|
110 |
+
return "".join(m.text for m in morphemes[start_index:end_index])
|
reading.py
ADDED
@@ -0,0 +1,83 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import textwrap
|
2 |
+
from pathlib import Path
|
3 |
+
|
4 |
+
from rhoknp import Document, Morpheme
|
5 |
+
|
6 |
+
from .base import OutputInfo, Sample
|
7 |
+
from .wiki_base import WikipediaBaseDatasetProcessor
|
8 |
+
|
9 |
+
_ALPHABETS = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
|
10 |
+
_ALPH_READINGS = [
|
11 |
+
"えー",
|
12 |
+
"びー",
|
13 |
+
"しー",
|
14 |
+
"でぃー",
|
15 |
+
"いー",
|
16 |
+
"えふ",
|
17 |
+
"じー",
|
18 |
+
"えいち",
|
19 |
+
"あい",
|
20 |
+
"じぇー",
|
21 |
+
"けー",
|
22 |
+
"える",
|
23 |
+
"えむ",
|
24 |
+
"えぬ",
|
25 |
+
"おー",
|
26 |
+
"ぴー",
|
27 |
+
"きゅー",
|
28 |
+
"あーる",
|
29 |
+
"えす",
|
30 |
+
"てぃー",
|
31 |
+
"ゆー",
|
32 |
+
"ぶい",
|
33 |
+
"だぶりゅー",
|
34 |
+
"えっくす",
|
35 |
+
"わい",
|
36 |
+
"ぜっと",
|
37 |
+
]
|
38 |
+
|
39 |
+
|
40 |
+
class WikipediaReadingDatasetProcessor(WikipediaBaseDatasetProcessor):
|
41 |
+
data_name = "wiki_reading"
|
42 |
+
ALPH_TO_HIRA: dict[str, str] = {a: r for a, r in zip(_ALPHABETS, _ALPH_READINGS)}
|
43 |
+
|
44 |
+
def __init__(self, dataset_dir: Path, version_name: str) -> None:
|
45 |
+
output_info = OutputInfo(
|
46 |
+
instruction=textwrap.dedent(
|
47 |
+
"""\
|
48 |
+
与えられたテキストを全てひらがなに変換してください。回答の他には何も含めないことを厳守してください。
|
49 |
+
"""
|
50 |
+
).rstrip(),
|
51 |
+
output_length=512, # Max number of tokens tokenized by `llm-jp/llm-jp-1.3b-v1.0` is 363.
|
52 |
+
metrics=["char_f1"],
|
53 |
+
few_shots=[],
|
54 |
+
samples=[],
|
55 |
+
)
|
56 |
+
super().__init__(dataset_dir, version_name, output_info)
|
57 |
+
|
58 |
+
@staticmethod
|
59 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
60 |
+
text = ""
|
61 |
+
reading = ""
|
62 |
+
for sentence in document.sentences:
|
63 |
+
if "括弧始" in sentence.misc_comment:
|
64 |
+
continue
|
65 |
+
text += sentence.text
|
66 |
+
reading += "".join(
|
67 |
+
WikipediaReadingDatasetProcessor._get_reading(morpheme)
|
68 |
+
for morpheme in sentence.morphemes
|
69 |
+
)
|
70 |
+
return Sample(input=text, output=reading)
|
71 |
+
|
72 |
+
@staticmethod
|
73 |
+
def _get_reading(morpheme: Morpheme) -> str:
|
74 |
+
# 熟字訓
|
75 |
+
if morpheme.reading in (" ", " ") and morpheme.text not in (" ", " "):
|
76 |
+
return ""
|
77 |
+
# some morphemes have multiple readings (e.g., にほん/にっぽん), so we just use the first one
|
78 |
+
reading = morpheme.reading.split("/")[0]
|
79 |
+
if all(char in _ALPHABETS for char in reading):
|
80 |
+
return "".join(
|
81 |
+
WikipediaReadingDatasetProcessor.ALPH_TO_HIRA[char] for char in reading
|
82 |
+
)
|
83 |
+
return reading
|
requirements.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
xmltodict
|
wiki_base.py
ADDED
@@ -0,0 +1,112 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import logging
|
2 |
+
from pathlib import Path
|
3 |
+
from typing import Literal
|
4 |
+
|
5 |
+
import requests
|
6 |
+
from rhoknp import BasePhrase, Document
|
7 |
+
from typing_extensions import TypeAlias
|
8 |
+
|
9 |
+
from .base import BaseDatasetProcessor, OutputInfo, Sample
|
10 |
+
|
11 |
+
WAC_VERSION = "v1.1.2"
|
12 |
+
|
13 |
+
Split: TypeAlias = Literal["train", "dev", "test"]
|
14 |
+
|
15 |
+
logging.getLogger("rhoknp").setLevel(logging.ERROR)
|
16 |
+
|
17 |
+
|
18 |
+
class WikipediaBaseDatasetProcessor(BaseDatasetProcessor):
|
19 |
+
def __init__(
|
20 |
+
self, dataset_dir: Path, version_name: str, output_info: OutputInfo
|
21 |
+
) -> None:
|
22 |
+
super().__init__(dataset_dir, version_name)
|
23 |
+
self.output_info = output_info
|
24 |
+
|
25 |
+
def download(self) -> None:
|
26 |
+
self._download_split("train")
|
27 |
+
self._download_split("dev")
|
28 |
+
self._download_split("test")
|
29 |
+
|
30 |
+
def _download_split(self, split: Split) -> None:
|
31 |
+
wac_base_url = f"https://raw.githubusercontent.com/ku-nlp/WikipediaAnnotatedCorpus/{WAC_VERSION}"
|
32 |
+
response = requests.get(f"{wac_base_url}/id/{split}.id", timeout=10)
|
33 |
+
response.raise_for_status()
|
34 |
+
doc_ids: list[str] = response.text.splitlines()
|
35 |
+
save_dir = self.raw_dir / split
|
36 |
+
save_dir.mkdir(exist_ok=True)
|
37 |
+
for doc_id in doc_ids:
|
38 |
+
if (raw_file := save_dir / f"{doc_id}.knp").exists():
|
39 |
+
continue
|
40 |
+
response = requests.get(
|
41 |
+
f"{wac_base_url}/knp/{doc_id[:8]}/{doc_id}.knp", timeout=10
|
42 |
+
)
|
43 |
+
response.raise_for_status()
|
44 |
+
raw_file.write_text(response.text, encoding="utf-8")
|
45 |
+
|
46 |
+
def preprocess_evaluation_data(self) -> None:
|
47 |
+
self._preprocess_evaluation_split("train")
|
48 |
+
self._preprocess_evaluation_split("dev")
|
49 |
+
self._preprocess_evaluation_split("test")
|
50 |
+
|
51 |
+
def _preprocess_evaluation_split(self, split: Split) -> None:
|
52 |
+
samples: list[Sample] = []
|
53 |
+
for knp_file in sorted(self.raw_dir.joinpath(split).glob("*.knp")):
|
54 |
+
document = Document.from_knp(knp_file.read_text(encoding="utf-8"))
|
55 |
+
sample = self.convert_document_to_sample(document)
|
56 |
+
if sample.output.strip() != "":
|
57 |
+
samples.append(sample)
|
58 |
+
self._save_evaluation_data(
|
59 |
+
samples, self.evaluation_dir / split / f"{self.data_name}.json"
|
60 |
+
)
|
61 |
+
|
62 |
+
@staticmethod
|
63 |
+
def convert_document_to_sample(document: Document) -> Sample:
|
64 |
+
raise NotImplementedError
|
65 |
+
|
66 |
+
|
67 |
+
def convert_base_phrase_to_text(base_phrase: BasePhrase) -> str:
|
68 |
+
base_phrases_within_phrase = base_phrase.phrase.base_phrases
|
69 |
+
anchor_index = base_phrases_within_phrase.index(base_phrase)
|
70 |
+
all_children = _find_children(base_phrase)
|
71 |
+
expanded_base_phrases = [base_phrase]
|
72 |
+
# forward search
|
73 |
+
for base_phrase_index in range(anchor_index + 1, len(base_phrases_within_phrase)):
|
74 |
+
if base_phrases_within_phrase[base_phrase_index] in all_children:
|
75 |
+
expanded_base_phrases.append(base_phrases_within_phrase[base_phrase_index])
|
76 |
+
else:
|
77 |
+
break
|
78 |
+
# backward search
|
79 |
+
for base_phrase_index in reversed(range(anchor_index)):
|
80 |
+
if base_phrases_within_phrase[base_phrase_index] in all_children:
|
81 |
+
expanded_base_phrases.append(base_phrases_within_phrase[base_phrase_index])
|
82 |
+
else:
|
83 |
+
break
|
84 |
+
expanded_base_phrases.sort(key=lambda bp: bp.index)
|
85 |
+
morphemes = [m for bp in expanded_base_phrases for m in bp.morphemes]
|
86 |
+
start_index = 0
|
87 |
+
for morpheme in morphemes:
|
88 |
+
if morpheme.pos in ("助詞", "特殊", "判定詞"):
|
89 |
+
start_index += 1
|
90 |
+
else:
|
91 |
+
break
|
92 |
+
end_index = len(morphemes)
|
93 |
+
for morpheme in reversed(morphemes):
|
94 |
+
if morpheme.pos in ("助詞", "特殊", "判定詞"):
|
95 |
+
end_index -= 1
|
96 |
+
else:
|
97 |
+
break
|
98 |
+
ret = "".join(m.text for m in morphemes[start_index:end_index])
|
99 |
+
if not ret:
|
100 |
+
start_index = 0
|
101 |
+
end_index = len(morphemes)
|
102 |
+
return "".join(m.text for m in morphemes[start_index:end_index])
|
103 |
+
|
104 |
+
|
105 |
+
def _find_children(node: BasePhrase) -> list[BasePhrase]:
|
106 |
+
"""Find all dependents with DFS."""
|
107 |
+
if not node.children:
|
108 |
+
return [node]
|
109 |
+
results = [node]
|
110 |
+
for child in node.children:
|
111 |
+
results.extend(_find_children(child))
|
112 |
+
return results
|