Datasets:
Tasks:
Token Classification
Modalities:
Text
Formats:
parquet
Sub-tasks:
named-entity-recognition
Languages:
Tagalog
Size:
1K - 10K
ArXiv:
DOI:
License:
File size: 1,735 Bytes
1b0f91f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 |
from pathlib import Path
import spacy
import typer
from spacy.tokens import DocBin
from wasabi import msg
DELIMITER = "-DOCSTART- -X- O O"
def spacy_to_iob(
# fmt: off
spacy_indir: Path = typer.Argument(..., help="Path to the directory containing the spaCy files."),
iob_outdir: Path = typer.Argument(..., help="Path to the directory to save the IOB files."),
lang: str = typer.Option("tl", "-l", "--lang", help="Language code for the spaCy vocab."),
verbose: bool = typer.Option(False, "-v", "--verbose", help="Print additional information."),
delimiter: str = typer.Option(DELIMITER, "-d", "--delimiter", help="Delimiter between examples.")
# fmt: on
):
"""Convert spaCy files into IOB-formatted files."""
nlp = spacy.blank(lang)
for spacy_file in spacy_indir.glob(f"*.spacy"):
msg.text(f"Converting {str(spacy_file)}", show=verbose)
doc_bin = DocBin().from_disk(spacy_file)
docs = doc_bin.get_docs(nlp.vocab)
lines = [] # container for the IOB lines later on
for doc in docs:
lines.append(delimiter)
lines.append("\n\n")
for token in doc:
label = (
f"{token.ent_iob_}-{token.ent_type_}"
if token.ent_iob_ != "O"
else "O"
)
line = f"{token.text}\t{label}"
lines.append(line)
lines.append("\n")
lines.append("\n")
iob_file = iob_outdir / f"{spacy_file.stem}.iob"
with open(iob_file, "w", encoding="utf-8") as f:
f.writelines(lines)
msg.good(f"Saved to {iob_file}")
if __name__ == "__main__":
typer.run(spacy_to_iob)
|