|
from __future__ import annotations |
|
from typing import NamedTuple |
|
import MeCab |
|
from transformers import PreTrainedTokenizer |
|
|
|
|
|
class MeCabResult(NamedTuple): |
|
"""MeCab解析結果の型 |
|
""" |
|
hyosokei: str |
|
hinshi: str |
|
hinshi_saibunrui_1: str |
|
hinshi_saibunrui_2: str |
|
hinshi_saibunrui_3: str |
|
katsuyokei_1: str |
|
katsuyokei_2: str |
|
genkei: str |
|
yomi: str |
|
hatsuon: str |
|
|
|
|
|
class MeCabTokenizer(PreTrainedTokenizer): |
|
|
|
def __init__(self, |
|
hinshi: list[str] | None = None, |
|
mecab_dicdir: str | None = None, |
|
**kwargs): |
|
"""初期化処理 |
|
|
|
Args: |
|
hinshi (list[str] | None): 抽出する品詞 |
|
mecab_dicdir (str | None, optional): dicrcのあるディレクトリ |
|
""" |
|
|
|
self.target_hinshi = hinshi |
|
if mecab_dicdir is not None: |
|
self.mecab = MeCab.Tagger(f"-d {mecab_dicdir}") |
|
else: |
|
self.mecab = MeCab.Tagger() |
|
|
|
super().__init__(**kwargs) |
|
|
|
def _tokenize(self, text: str) -> list[str]: |
|
"""文章から特定の品詞の単語を返します。 |
|
|
|
Args: |
|
text (str): 文章 |
|
|
|
Returns: |
|
list[str]: 特定の品詞の単語 |
|
""" |
|
|
|
out = [] |
|
|
|
result_words = self.mecab_analyze(text) |
|
for result_word in result_words: |
|
|
|
if result_word.hyosokei == "": |
|
continue |
|
if self.target_hinshi is not None: |
|
if result_word.hinshi in self.target_hinshi: |
|
|
|
out.append(result_word.hyosokei) |
|
else: |
|
continue |
|
else: |
|
out.append(result_word.hyosokei) |
|
return out |
|
|
|
def mecab_analyze(self, text: str) -> list[MeCabResult]: |
|
"""文章をMecabで分析します。 |
|
|
|
Args: |
|
text (str): 文章 |
|
|
|
Returns: |
|
list[MeCabResult]: MeCabの解析結果 |
|
""" |
|
node = self.mecab.parseToNode(text) |
|
|
|
out = [] |
|
while node: |
|
args = [] |
|
args.append(node.surface) |
|
feature = node.feature.split(",") |
|
args.extend(feature) |
|
mecab_result = MeCabResult(args[0], args[1], args[2], args[3], |
|
args[4], args[5], args[6], args[7], |
|
args[8], args[9]) |
|
out.append(mecab_result) |
|
node = node.next |
|
return out |
|
|