arxyzan commited on
Commit
454e962
·
1 Parent(s): 546483a

Create parsynth-ocr-200k.py

Browse files
Files changed (1) hide show
  1. parsynth-ocr-200k.py +79 -0
parsynth-ocr-200k.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import csv
2
+ import os
3
+
4
+ import datasets
5
+
6
+ logger = datasets.logging.get_logger(__name__)
7
+
8
+ _CITATION = """"""
9
+
10
+ _DESCRIPTION = """ParsynthOCR-200k: A synthetic dataset for OCR. (A 200k samples preview)"""
11
+
12
+ _DOWNLOAD_URLS = {
13
+ "train": "https://huggingface.co/datasets/hezarai/parsynth-ocr-200k/resolve/main/annotations_train.csv",
14
+ "test": "https://huggingface.co/datasets/hezarai/parsynth-ocr-200k/resolve/main/annotations_test.csv",
15
+ "data": "https://huggingface.co/datasets/hezarai/parsynth-ocr-200k/resolve/main/images.zip",
16
+ }
17
+
18
+ ZIP_IMAGES_DIR = "parsynth-ocr-200k"
19
+
20
+
21
+ class ParsynthOCR200KConfig(datasets.BuilderConfig):
22
+ def __init__(self, **kwargs):
23
+ super(ParsynthOCR200KConfig, self).__init__(**kwargs)
24
+
25
+
26
+ class ParsynthOCR200K(datasets.GeneratorBasedBuilder):
27
+ BUILDER_CONFIGS = [
28
+ ParsynthOCR200KConfig(
29
+ name="Parsynth200K",
30
+ version=datasets.Version("1.0.0"),
31
+ description=_DESCRIPTION,
32
+ ),
33
+ ]
34
+
35
+ def _info(self):
36
+ return datasets.DatasetInfo(
37
+ description=_DESCRIPTION,
38
+ features=datasets.Features(
39
+ {
40
+ "image_path": datasets.Value("string"),
41
+ "text": datasets.Value("string"),
42
+ }
43
+ ),
44
+ citation=_CITATION,
45
+ )
46
+
47
+ def _split_generators(self, dl_manager):
48
+ """
49
+ Return SplitGenerators.
50
+ """
51
+
52
+ train_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["train"])
53
+ test_path = dl_manager.download_and_extract(_DOWNLOAD_URLS["test"])
54
+ archive_path = dl_manager.download(_DOWNLOAD_URLS["data"])
55
+ images_dir = dl_manager.extract(archive_path) if not dl_manager.is_streaming else ""
56
+ images_dir = os.path.join(images_dir, ZIP_IMAGES_DIR)
57
+
58
+ return [
59
+ datasets.SplitGenerator(
60
+ name=datasets.Split.TRAIN, gen_kwargs={"annotations_file": train_path, "images_dir": images_dir}
61
+ ),
62
+ datasets.SplitGenerator(
63
+ name=datasets.Split.TEST, gen_kwargs={"annotations_file": test_path, "images_dir": images_dir}
64
+ ),
65
+ ]
66
+
67
+ def _generate_examples(self, annotations_file, images_dir):
68
+ logger.info("⏳ Generating examples from = %s", annotations_file)
69
+
70
+ with open(annotations_file, encoding="utf-8") as csv_file:
71
+ csv_reader = csv.reader(csv_file, quotechar='"', skipinitialspace=True)
72
+
73
+ # Skip header
74
+ next(csv_reader, None)
75
+
76
+ for id_, row in enumerate(csv_reader):
77
+ filename, text = row
78
+ image_path = os.path.join(images_dir, filename)
79
+ yield id_, {"image_path": image_path, "text": text}