Datasets:
Size:
10B<n<100B
File size: 5,443 Bytes
a454f6c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 |
import concurrent.futures as conc
import faulthandler
import pathlib
import traceback
import typing
import orjson
import tqdm
import typer
from bs4 import BeautifulSoup, Tag
from markdownify import MarkdownConverter
from html2markdown import WikiConverter
app = typer.Typer()
def wikipedia_figures(soup: BeautifulSoup, converter: MarkdownConverter):
"""Extracts figures
Args:
soup (BeautifulSoup): The BeautifulSoup4 article
meta (dict): Metadata
Returns:
list of figures
"""
figure_url = set()
figures = []
title = soup.find("title")
title = None if not title else title.get_text()
# print(title)
for figure_element in soup.select('figure[typeof*="mw:File"]'):
# print(figure_element)
figcaption = figure_element.find("figcaption")
img = figure_element.select_one("a > img")
if not figcaption or not img:
continue
if not figcaption.get_text(strip=True):
sibling = figure_element.next_sibling
if (
sibling
and isinstance(sibling,Tag)
and sibling.name == "div"
and "infobox-caption" in sibling.attrs.get("class",[])
):
figcaption = sibling
figcaption = None
# if figcaption is None:
# print("figcap is none", title, figure_element.parent)
# continue
if "commons/" not in img["src"]:
# print("NotCommons", title, figcaption)
continue
orig_src = "/".join(
img.get("src","").replace("commons/thumb", "commons").split("/")[:-1]
)
if orig_src.endswith((".svg",)):
continue
if orig_src.endswith("/"):
print(title, figure_element)
continue
caption = None
if isinstance(figcaption,Tag):
[i.decompose() for i in figcaption.find_all("style")]
caption = (
converter.convert_soup(figcaption)
.replace("\r", " ")
.replace("\n", " ")
.replace(" ", " ")
.replace(" ", " ")
)
if orig_src in figure_url:
continue
figure_url.add(orig_src)
orig_src = f"https:{orig_src}"
figures.append([orig_src, caption])
# print(orig_src, caption)
return figures
@app.command()
def process_root(folder: pathlib.Path, output_folder: pathlib.Path):
futures:list[conc.Future] = []
faulthandler.enable(file=open("crash_dump.txt", "a"))
with conc.ProcessPoolExecutor(max_workers=90) as executor:
for root_folder in folder.iterdir():
if root_folder.is_dir():
processed_root = (output_folder / root_folder.name).resolve()
print("Processing Root", root_folder, processed_root)
if not root_folder.is_dir():
processed_root.mkdir(exist_ok=True, parents=True)
# process_folder(root_folder, output_folder / root_folder.name)
for root_file in root_folder.glob("*.ndjson"):
futures.append(
executor.submit(
process_file,
root_file,
processed_root / root_file.name,
progress=False,
)
)
for future in conc.as_completed(futures):
try:
future_response = future.result()
print("future processed", future_response)
except Exception as e:
traceback.print_exception(e)
pass
@app.command()
def process_folder(folder: pathlib.Path, output_folder: pathlib.Path):
if output_folder is not None and not output_folder.is_dir():
output_folder.mkdir(exist_ok=True, parents=True)
with conc.ProcessPoolExecutor(max_workers=180) as executor:
futures = []
for file in folder.glob("*.ndjson"):
futures.append(
executor.submit(
process_file, file, output_folder / file.name, progress=False
)
)
for future in conc.as_completed(futures):
future.result()
@app.command()
def process_file(
file: pathlib.Path,
output_file: typing.Optional[pathlib.Path] = None,
progress: bool = True,
):
fout = None
if output_file:
fout = open(output_file, "wb")
pbar = None
if progress:
pbar = tqdm.tqdm()
converter = WikiConverter()
with open(file, "rb") as f:
for line in f:
try:
wiki_data = orjson.loads(line)
except orjson.JSONDecodeError:
pass
if not wiki_data["article_body"].get("wikitext"):
continue
figures = wikipedia_figures(
BeautifulSoup(wiki_data["article_body"]["html"], "lxml"), converter
)
if figures and fout:
fout.write(orjson.dumps({"figure_media": figures}))
fout.write(b"\n")
fout.flush()
if pbar is not None:
pbar.update(1)
if fout:
fout.close()
if pbar is not None:
pbar.close()
return output_file if output_file else None
if __name__ == "__main__":
app()
|