SpacyModelCreator / utils /json_to_spacy.py
WebashalarForML's picture
Upload 4 files
2f2758d verified
raw
history blame
2.42 kB
import json
import spacy
from spacy.tokens import DocBin
def read_in_chunks(file_path, chunk_size=1024):
with open(file_path, 'r', encoding='utf-8') as file:
while True:
data = file.read(chunk_size)
if not data:
break
yield data
def convert_json_to_spacy(json_file_path, spacy_file_path):
# Read the file in chunks and combine the chunks
file_content = ""
for chunk in read_in_chunks(json_file_path):
file_content += chunk
# Parse the JSON data
data = json.loads(file_content)
# Prepare the data for spaCy
spacy_format = []
for item in data:
text = item[0] # The first element in the list is the text
entities = item[1]['entities'] # The second element contains the dictionary with 'entities'
spacy_entities = [(start, end, label) for start, end, label in entities]
spacy_format.append({"text": text, "entities": spacy_entities})
# Create a blank English model
nlp = spacy.blank("en")
# Initialize a DocBin object
doc_bin = DocBin()
# Convert the data to spaCy Doc objects and add to DocBin
for entry in spacy_format:
doc = nlp.make_doc(entry["text"])
# Convert entities
entities = []
seen_positions = set() # To track positions and avoid overlap
for start, end, label in entry["entities"]:
# Ensure span is within the document's length
if start < 0 or end > len(doc.text) or start >= end:
print(f"Invalid span: start={start}, end={end}, label={label}")
continue
# Check for overlaps and prioritize entities
if not any(start < e_end and end > e_start for e_start, e_end, _ in seen_positions):
span = doc.char_span(start, end, label=label)
if span is not None:
entities.append(span)
seen_positions.add((start, end, label))
else:
print(f"Overlapping span: start={start}, end={end}, label={label}")
# Set entities
doc.ents = entities
# Add to DocBin
doc_bin.add(doc)
# Save the DocBin to a .spacy file
doc_bin.to_disk(spacy_file_path)
print(f"Data has been successfully saved to {spacy_file_path}!")