KaraKaraWitch commited on
Commit
6ac1b59
·
1 Parent(s): 1ca7291
Files changed (1) hide show
  1. script/extract-text.py +0 -147
script/extract-text.py DELETED
@@ -1,147 +0,0 @@
1
- import os
2
- import json
3
- from bs4 import BeautifulSoup
4
- import ebooklib
5
- from ebooklib import epub
6
- import re
7
- import xml.etree.ElementTree as ET
8
-
9
- folder_path = './books' # Replace with your folder path
10
- output_file_pattern = './output/output_part_{}.jsonl' # Pattern for output files
11
- part_size = 1 # Number of files per part
12
- part_counter = 0
13
- file_counter = 0
14
-
15
- def correct_french_punctuation(text):
16
- # Correct spaces before punctuation in French
17
- text = re.sub(r'\s+([?!:;])', r'\1', text) # Remove space before punctuation
18
- text = re.sub(r'([?!:;])\s*', r'\1 ', text) # Add space after punctuation if not already present
19
- text = re.sub(r'\s*-\s*', '-', text)
20
- text = re.sub(r'\s*–\s*', '-', text)
21
- return text
22
-
23
- def find_navpoint_2_in_toc(book):
24
- toc_item = book.get_item_with_id('ncx')
25
- if toc_item is None:
26
- return None
27
- toc_content = toc_item.get_content()
28
- namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
29
- toc_root = ET.fromstring(toc_content)
30
-
31
- nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
32
- for nav_point in nav_points:
33
- if nav_point.attrib.get('id') == 'navpoint-2':
34
- return nav_point.text if nav_point.text else None
35
- return None
36
-
37
- def find_section_href_in_toc(book, section_title):
38
- toc_item = book.get_item_with_id('ncx')
39
- if toc_item is None:
40
- return None
41
- toc_content = toc_item.get_content()
42
- namespaces = {'ncx': 'http://www.daisy.org/z3986/2005/ncx/'}
43
- toc_root = ET.fromstring(toc_content)
44
- nav_points = toc_root.findall('.//ncx:navPoint', namespaces)
45
- for nav_point in nav_points:
46
- text_elements = nav_point.findall('.//ncx:navLabel/ncx:text', namespaces)
47
- for text_element in text_elements:
48
- if text_element.text == section_title:
49
- content_element = nav_point.find('.//ncx:content', namespaces)
50
- if content_element is not None:
51
- return content_element.attrib['src']
52
- return None
53
-
54
- def extract_content_from_epub(book):
55
- text = ''
56
- start_section = find_section_href_in_toc(book, "Avant propos") or find_section_href_in_toc(book, "Premier Chapitre")
57
- end_section_1 = find_section_href_in_toc(book, "À propos de cette édition électronique")
58
- end_section_2 = find_section_href_in_toc(book, "Bibliographie – Œuvres complètes")
59
-
60
- # Determine the final end section
61
- if end_section_1 is not None and end_section_2 is not None:
62
- end_section = end_section_1 if end_section_1 < end_section_2 else end_section_2
63
- elif end_section_1 is not None:
64
- end_section = end_section_1
65
- else:
66
- end_section = end_section_2
67
-
68
- extracting = start_section is None # Start extracting if no specific start section
69
-
70
- for item in book.get_items():
71
- if item.get_type() == ebooklib.ITEM_DOCUMENT:
72
- item_id = item.get_name()
73
- if start_section and start_section in item_id:
74
- extracting = True
75
- if end_section and end_section in item_id:
76
- break
77
- if extracting or not start_section:
78
- try:
79
- soup = BeautifulSoup(item.get_content(), 'html.parser')
80
- for p in soup.find_all('p'): # Process paragraph by paragraph
81
- paragraph = p.get_text(separator='\n')
82
- paragraph = paragraph.replace(u'\xa0', ' ')
83
- paragraph = correct_french_punctuation(paragraph)
84
- text += paragraph + '\n'
85
- # Check for end phrases after each paragraph
86
- if "FIN" in paragraph:
87
- text = text.split("FIN", 1)[0]
88
- print("End of book reached")
89
- return text
90
- elif "la Bibliothèque électronique du Québec" in paragraph:
91
- text = text.split("la Bibliothèque électronique du Québec", 1)[0]
92
- print("End of book reached")
93
- return text
94
- elif "ouvrage est le" in paragraph:
95
- text = text.split("ouvrage est le", 1)[0]
96
- print("End of book reached")
97
- return text
98
- except Exception as e:
99
- print(f"Error processing content: {e}")
100
-
101
- if not text:
102
- print("Fallback: Adding all text as no specific sections were found.")
103
- for item in book.get_items():
104
- if item.get_type() == ebooklib.ITEM_DOCUMENT:
105
- try:
106
- soup = BeautifulSoup(item.get_content(), 'html.parser')
107
- text += soup.get_text(separator='\n').replace(u'\xa0', ' ') + '\n'
108
- except Exception as e:
109
- print(f"Error in fallback processing: {e}")
110
-
111
- return text
112
-
113
-
114
-
115
-
116
- def extract_metadata_from_epub(book):
117
- metadata = {}
118
- try:
119
- metadata['title'] = book.get_metadata('DC', 'title')
120
- metadata['author'] = book.get_metadata('DC', 'creator')
121
- metadata['publisher'] = book.get_metadata('DC', 'publisher')
122
- # Add more metadata fields if needed
123
- except Exception as e:
124
- print(f"Error extracting metadata: {e}")
125
- return metadata
126
-
127
- for file in os.listdir(folder_path):
128
- if file.endswith('.epub'):
129
- if file_counter % part_size == 0:
130
- if 'jsonl_file' in locals():
131
- jsonl_file.close()
132
- part_counter += 1
133
- jsonl_file = open(output_file_pattern.format(part_counter), 'w', encoding='utf-8')
134
-
135
- full_path = os.path.join(folder_path, file)
136
- try:
137
- book = epub.read_epub(full_path)
138
- text = extract_content_from_epub(book)
139
- meta = extract_metadata_from_epub(book)
140
- jsonl_file.write(json.dumps({"text": text, "meta": meta}, ensure_ascii=False) + '\n')
141
- file_counter += 1
142
- print(f"reading file {file}")
143
- except Exception as e:
144
- print(f"Error reading file {file}: {e}")
145
-
146
- if 'jsonl_file' in locals():
147
- jsonl_file.close()