import os from pathlib import Path from typing import List import datasets import pdf2image from tqdm import tqdm import io datasets.logging.set_verbosity_info() logger = datasets.logging.get_logger(__name__) MODE="binary" _CITATION = """\ @inproceedings{jordyvlRVLmulti, title = {}, author = {Jordy Van Landeghem}, booktitle = {}, year = {2023} } """ _DESCRIPTION = """\ blabla """ _HOMEPAGE = "TBD" _LICENSE = "https://www.industrydocuments.ucsf.edu/help/copyright/" _BINARY_URL = ( "https://huggingface.co/datasets/jordyvl/unit-test_PDFfolder/resolve/main/data/data.tar.gz" ) _BACKOFF_folder = "/mnt/lerna/data/RVL-CDIP_pdf" # "/mnt/lerna/data/RVL_CDIP_multi.tar.gz" _CLASSES = [ "letter", "form", "email", "handwritten", "advertisement", "scientific_report", "scientific_publication", "specification", "file_folder", "news_article", "budget", "invoice", "presentation", "questionnaire", "resume", "memo", ] def batched_conversion(pdf_file): info = pdf2image.pdfinfo_from_path(pdf_file, userpw=None, poppler_path=None) maxPages = info["Pages"] logger.info(f"{pdf_file} has {str(maxPages)} pages") images = [] for page in range(1, maxPages + 1, 10): images.extend( pdf2image.convert_from_path( pdf_file, dpi=200, first_page=page, last_page=min(page + 10 - 1, maxPages) ) ) return images def open_pdf_binary(pdf_file): with open(pdf_file, "rb") as f: return f.read() class RvlCdipMulti(datasets.GeneratorBasedBuilder): BUILDER_CONFIGS = [ datasets.BuilderConfig( name="default", version=datasets.Version("0.0.1", ""), description="", ) ] def __init__(self, *args, examples_per_class=None, **kwargs): super().__init__(*args, **kwargs) # Batch size used by the ArrowWriter # It defines the number of samples that are kept in memory before writing them # and also the length of the arrow chunks # None means that the ArrowWriter will use its default value # self._writer_batch_size = writer_batch_size # self.writer_batch_size = writer_batch_size # examples per class to stop generating self.examples_per_class = examples_per_class @property def manual_download_instructions(self): return ( "To use RVL-CDIP_multi you have to download it manually. Please extract all files in one folder and load the dataset with: " "`datasets.load_dataset('jordyvl/rvl_cdip_multi', data_dir='path/to/folder/folder_name')`" ) def _info(self): # DEFAULT_WRITER_BATCH_SIZE folder = None if isinstance(self.config.data_files, str): folder = self.config.data_files # needs to be extracted cuz zip/tar else: if isinstance(self.config.data_dir, str): folder = self.config.data_dir # contains the folder structure at someone local disk else: folder = _BACKOFF_folder # my local path, others should set data_dir or data_files self.config.data_dir = folder return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "file": datasets.Value("binary"), #datasets.Sequence(datasets.Image()), "labels": datasets.features.ClassLabel(names=_CLASSES), } ), task_templates=None, ) def _split_generators( self, dl_manager: datasets.DownloadManager ) -> List[datasets.SplitGenerator]: if os.path.isdir(self.config.data_dir): data_files = { labelset: os.path.join(self.config.data_dir, labelset) for labelset in sorted(os.listdir(self.config.data_dir), reverse=True) if not "csv" in labelset } # self.config.data_dir = None elif self.config.data_dir.endswith(".tar.gz"): archive_path = dl_manager.download(self.config.data_dir) data_files = dl_manager.iter_archive(archive_path) raise NotImplementedError() elif self.config.data_dir.endswith(".zip"): archive_path = dl_manager.download_and_extract(self.config.data_dir) data_files = dl_manager.iter_archive(archive_path) raise NotImplementedError() splits = [] for split_name, folder in data_files.items(): print(folder) splits.append( datasets.SplitGenerator(name=split_name, gen_kwargs={"archive_path": folder}) ) return splits def _generate_examples(self, archive_path): labels = self.info.features["labels"] extensions = {".pdf", ".PDF"} for i, path in tqdm(enumerate(Path(archive_path).glob("**/*/*")), desc=f"{archive_path}"): if path.suffix in extensions: try: images = open_pdf_binary(path) #images = batched_conversion(path) yield path.name, { "file": images, "labels": labels.encode_example(path.parent.name.lower()), } except Exception as e: logger.warning(f"{e} failed to parse {i}") # can do a map afterwards