Spaces:
Runtime error
Runtime error
File size: 17,553 Bytes
d0e0e62 ec64fd3 d0e0e62 17b7e45 d0e0e62 ec64fd3 d0e0e62 d1016a3 ec64fd3 d1016a3 17b7e45 d1016a3 17b7e45 ec64fd3 d1016a3 d0e0e62 ec64fd3 7e13339 ec64fd3 7e13339 ec64fd3 7e13339 d0e0e62 17b7e45 d0e0e62 7e13339 ec64fd3 d0e0e62 ec64fd3 d0e0e62 d768dd3 7e13339 d0e0e62 d768dd3 7e13339 d768dd3 d0e0e62 d768dd3 7e13339 d768dd3 d0e0e62 d768dd3 7e13339 d0e0e62 ec64fd3 d0e0e62 7e13339 d0e0e62 ec64fd3 600e7d4 d0e0e62 ec64fd3 07d2543 d0e0e62 ec64fd3 d0e0e62 ec64fd3 7e13339 d0e0e62 ec64fd3 d768dd3 d0e0e62 ec64fd3 d768dd3 d0e0e62 ec64fd3 d0e0e62 ec64fd3 d0e0e62 d768dd3 d0e0e62 7e13339 d0e0e62 7e13339 d0e0e62 600e7d4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 |
import gradio as gr
from PIL import Image, ImageDraw, ImageFont
import random
import pandas as pd
import numpy as np
from datasets import concatenate_datasets
from operator import itemgetter
import collections
# download datasets
from datasets import load_dataset
dataset_small = load_dataset("pierreguillou/DocLayNet-small")
dataset_base = load_dataset("pierreguillou/DocLayNet-base")
id2label = {idx:label for idx,label in enumerate(dataset_small["train"].features["categories"].feature.names)}
label2id = {label:idx for idx,label in id2label.items()}
labels = [label for idx, label in id2label.items()]
# need to change the coordinates format
def convert_box(box):
x, y, w, h = tuple(box) # the row comes in (left, top, width, height) format
actual_box = [x, y, x+w, y+h] # we turn it into (left, top, left+widght, top+height) to get the actual box
return actual_box
# get back original size
def original_box(box, original_width, original_height, coco_width, coco_height):
return [
int(original_width * (box[0] / coco_width)),
int(original_height * (box[1] / coco_height)),
int(original_width * (box[2] / coco_width)),
int(original_height* (box[3] / coco_height)),
]
# function to sort bounding boxes
def get_sorted_boxes(bboxes):
# sort by y from page top to bottom
bboxes = sorted(bboxes, key=itemgetter(1), reverse=False)
y_list = [bbox[1] for bbox in bboxes]
# sort by x from page left to right when boxes with same y
if len(list(set(y_list))) != len(y_list):
y_list_duplicates_indexes = dict()
y_list_duplicates = [item for item, count in collections.Counter(y_list).items() if count > 1]
for item in y_list_duplicates:
y_list_duplicates_indexes[item] = [i for i, e in enumerate(y_list) if e == item]
bbox_list_y_duplicates = sorted(np.array(bboxes)[y_list_duplicates_indexes[item]].tolist(), key=itemgetter(0), reverse=False)
np_array_bboxes = np.array(bboxes)
np_array_bboxes[y_list_duplicates_indexes[item]] = np.array(bbox_list_y_duplicates)
bboxes = np_array_bboxes.tolist()
return bboxes
# categories colors
label2color = {
'Caption': 'brown',
'Footnote': 'orange',
'Formula': 'gray',
'List-item': 'yellow',
'Page-footer': 'red',
'Page-header': 'red',
'Picture': 'violet',
'Section-header': 'orange',
'Table': 'green',
'Text': 'blue',
'Title': 'pink'
}
# image witout content
examples_dir = 'samples/'
images_wo_content = examples_dir + "wo_content.png"
df_paragraphs_wo_content, df_lines_wo_content = pd.DataFrame(), pd.DataFrame()
df_paragraphs_wo_content["paragraphs"] = [0]
df_paragraphs_wo_content["categories"] = ["no content"]
df_paragraphs_wo_content["texts"] = ["no content"]
df_paragraphs_wo_content["bounding boxes"] = ["no content"]
df_lines_wo_content["lines"] = [0]
df_lines_wo_content["categories"] = ["no content"]
df_lines_wo_content["texts"] = ["no content"]
df_lines_wo_content["bounding boxes"] = ["no content"]
# lists
font = ImageFont.load_default()
dataset_names = ["small", "base"]
splits = ["all", "train", "validation", "test"]
domains = ["all", "Financial Reports", "Manuals", "Scientific Articles", "Laws & Regulations", "Patents", "Government Tenders"]
domains_names = [domain_name.lower().replace(" ", "_").replace("&", "and") for domain_name in domains]
categories = labels + ["all"]
# function to get a rendom image and all data from DocLayNet
def generate_annotated_image(dataset_name, split, domain, category):
# error message
msg_error = ""
# get dataset
if dataset_name == "small": example = dataset_small
else: example = dataset_base
# get split
if split == "all":
example = concatenate_datasets([example["train"], example["validation"], example["test"]])
else:
example = example[split]
# get domain
domain_name = domains_names[domains.index(domain)]
if domain_name != "all":
example = example.filter(lambda example: example["doc_category"] == domain_name)
if len(example) == 0:
msg_error = f'There is no image with at least one labeled bounding box that matches your settings (dataset: "DocLayNet {dataset_name}" / domain: "{domain}" / split: "{split}").'
example = dict()
# get category
idx_list = list()
if category != "all":
for idx, categories_list in enumerate(example["categories"]):
if int(label2id[category]) in categories_list:
idx_list.append(idx)
if len(idx_list) > 0:
example = example.select(idx_list)
else:
msg_error = f'There is no image with at least one labeled bounding box that matches your settings (dataset: "DocLayNet {dataset_name}" / split: "{split}" / domain: "{domain}" / category: "{category}").'
example = dict()
if len(msg_error) > 0:
# save image files
Image.open(images_wo_content).save("wo_content.png")
# save csv files
df_paragraphs_wo_content.to_csv("paragraphs_wo_content.csv", encoding="utf-8", index=False)
df_lines_wo_content.to_csv("lines_wo_content.csv", encoding="utf-8", index=False)
return msg_error, "wo_content.png", images_wo_content, images_wo_content, "wo_content.png", "wo_content.png", df_paragraphs_wo_content, df_lines_wo_content, gr.File.update(value="paragraphs_wo_content.csv", visible=False), gr.File.update(value="lines_wo_content.csv", visible=False)
else:
# get random image & PDF data
index = random.randint(0, len(example))
image = example[index]["image"] # original image
coco_width, coco_height = example[index]["coco_width"], example[index]["coco_height"]
original_width, original_height = example[index]["original_width"], example[index]["original_height"]
original_filename = example[index]["original_filename"]
page_no = example[index]["page_no"]
num_pages = example[index]["num_pages"]
# resize image to original
image = image.resize((original_width, original_height))
# get image of PDF without bounding boxes
img_file = original_filename.replace(".pdf", ".png")
image.save(img_file)
# get corresponding annotations
texts = example[index]["texts"]
bboxes_block = example[index]["bboxes_block"]
bboxes_line = example[index]["bboxes_line"]
categories = example[index]["categories"]
domain = example[index]["doc_category"]
# convert boxes to original
original_bboxes_block = [original_box(convert_box(box), original_width, original_height, coco_width, coco_height) for box in bboxes_block]
original_bboxes_line = [original_box(convert_box(box), original_width, original_height, coco_width, coco_height) for box in bboxes_line]
original_bboxes = [original_bboxes_block, original_bboxes_line]
##### block boxes #####
# get list of unique block boxes
original_blocks = dict()
original_bboxes_block_list = list()
original_bbox_block_prec = list()
for count_block, original_bbox_block in enumerate(original_bboxes_block):
if original_bbox_block != original_bbox_block_prec:
original_bbox_block_indexes = [i for i, original_bbox in enumerate(original_bboxes_block) if original_bbox == original_bbox_block]
original_blocks[count_block] = original_bbox_block_indexes
original_bboxes_block_list.append(original_bbox_block)
original_bbox_block_prec = original_bbox_block
# get list of categories and texts by unique block boxes
category_block_list, text_block_list = list(), list()
for original_bbox_block in original_bboxes_block_list:
count_block = original_bboxes_block.index(original_bbox_block)
original_bbox_block_indexes = original_blocks[count_block]
category_block = categories[original_bbox_block_indexes[0]]
category_block_list.append(category_block)
if id2label[category_block] == "Text" or id2label[category_block] == "Caption" or id2label[category_block] == "Footnote":
text_block = ' '.join(np.array(texts)[original_bbox_block_indexes].tolist())
elif id2label[category_block] == "Section-header" or id2label[category_block] == "Title" or id2label[category_block] == "Picture" or id2label[category_block] == "Formula" or id2label[category_block] == "List-item" or id2label[category_block] == "Table" or id2label[category_block] == "Page-header" or id2label[category_block] == "Page-footer":
text_block = '\n'.join(np.array(texts)[original_bbox_block_indexes].tolist())
text_block_list.append(text_block)
# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
sorted_original_bboxes_block_list = get_sorted_boxes(original_bboxes_block_list)
sorted_original_bboxes_block_list_indexes = [original_bboxes_block_list.index(item) for item in sorted_original_bboxes_block_list]
sorted_category_block_list = np.array(category_block_list)[sorted_original_bboxes_block_list_indexes].tolist()
sorted_text_block_list = np.array(text_block_list)[sorted_original_bboxes_block_list_indexes].tolist()
##### line boxes ####
# sort data from y = 0 to end of page (and after, x=0 to end of page when necessary)
original_bboxes_line_list = original_bboxes_line
category_line_list = categories
text_line_list = texts
sorted_original_bboxes_line_list = get_sorted_boxes(original_bboxes_line_list)
sorted_original_bboxes_line_list_indexes = [original_bboxes_line_list.index(item) for item in sorted_original_bboxes_line_list]
sorted_category_line_list = np.array(category_line_list)[sorted_original_bboxes_line_list_indexes].tolist()
sorted_text_line_list = np.array(text_line_list)[sorted_original_bboxes_line_list_indexes].tolist()
# setup images & PDF data
columns = 2
images = [image.copy(), image.copy()]
num_imgs = len(images)
imgs, df_paragraphs, df_lines = dict(), pd.DataFrame(), pd.DataFrame()
for i, img in enumerate(images):
draw = ImageDraw.Draw(img)
for box, label_idx, text in zip(original_bboxes[i], categories, texts):
label = id2label[label_idx]
color = label2color[label]
draw.rectangle(box, outline=color)
text = text.encode('latin-1', 'replace').decode('latin-1') # https://stackoverflow.com/questions/56761449/unicodeencodeerror-latin-1-codec-cant-encode-character-u2013-writing-to
draw.text((box[0] + 10, box[1] - 10), text=label, fill=color, font=font)
if i == 0:
imgs["paragraphs"] = img
# save
img_paragraphs = "img_paragraphs_" + original_filename.replace(".pdf", ".png")
img.save(img_paragraphs)
df_paragraphs["paragraphs"] = list(range(len(sorted_original_bboxes_block_list)))
df_paragraphs["categories"] = [id2label[label_idx] for label_idx in sorted_category_block_list]
df_paragraphs["texts"] = sorted_text_block_list
df_paragraphs["bounding boxes"] = [str(bbox) for bbox in sorted_original_bboxes_block_list]
# save
csv_paragraphs = "csv_paragraphs_" + original_filename.replace(".pdf", ".csv")
df_paragraphs.to_csv(csv_paragraphs, encoding="utf-8", index=False)
else:
imgs["lines"] = img
# save
img_lines = "img_lines_" + original_filename.replace(".pdf", ".png")
img.save(img_lines)
df_lines["lines"] = list(range(len(sorted_original_bboxes_line_list)))
df_lines["categories"] = [id2label[label_idx] for label_idx in sorted_category_line_list]
df_lines["texts"] = sorted_text_line_list
df_lines["bounding boxes"] = [str(bbox) for bbox in sorted_original_bboxes_line_list]
# save
csv_lines = "csv_lines_" + original_filename.replace(".pdf", ".csv")
df_lines.to_csv(csv_lines, encoding="utf-8", index=False)
msg = f'The page {page_no} of the PDF "{original_filename}" (domain: "{domain}") matches your settings.'
return msg, img_file, imgs["paragraphs"], imgs["lines"], img_paragraphs, img_lines, df_paragraphs, df_lines, gr.File.update(value=csv_paragraphs, visible=True), gr.File.update(value=csv_lines, visible=True)
# gradio APP
with gr.Blocks(title="DocLayNet image viewer", css=".gradio-container") as demo:
gr.HTML("""
<div style="font-family:'Times New Roman', 'Serif'; font-size:26pt; font-weight:bold; text-align:center;"><h1>DocLayNet image viewer</h1></div>
<div style="margin-top: 40px"><p>(01/29/2023) This APP is an image viewer of the DocLayNet dataset and a data extraction tool.</p></div>
<div><p>It uses the datasets <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/datasets/pierreguillou/DocLayNet-small" target="_blank">DocLayNet small</a> and <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/datasets/pierreguillou/DocLayNet-base" target="_blank">DocLayNet base</a> (you can also run this APP in Google Colab by running this <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://github.com/piegu/language-models/blob/master/DocLayNet_image_viewer_APP.ipynb" target="_blank">notebook</a>).</p></div>
<div><p>Make your settings and the output will show 2 images of a randomly selected PDF with labeled bounding boxes, one of paragraphs and the other of lines, and their corresponding tables of texts with their labels.</p></div>
<div><p>For example, if you select the domain "laws_and_regulations" and the category "Caption", you will get a random PDF that corresponds to these settings (ie, it will have at least one bounding box labeled with "Caption" in the PDF).</p></div>
<div><p><b>WARNING</b>: if the app crashes or runs without providing a result, refresh the page (<a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://huggingface.co/spaces/pierreguillou/DocLayNet-image-viewer">DocLayNet image viewer</a>) and run a search again. If the same problem occurs again, prefer the DocLayNet small. Thanks.</p></div>
<div style="margin-top: 20px"><p>More information about the DocLayNet datasets and this APP in the following blog post: <a style="text-decoration: none; border-bottom: #64b5f6 0.125em solid; color: #64b5f6" href="https://medium.com/@pierre_guillou/document-ai-processing-of-doclaynet-dataset-to-be-used-by-layout-models-of-the-hugging-face-hub-308d8bd81cdb" target="_blank">(01/27/2023) Document AI | Processing of DocLayNet dataset to be used by layout models of the Hugging Face hub (finetuning, inference)</a></div>
""")
with gr.Row():
with gr.Column():
dataset_name_gr = gr.Radio(dataset_names, value="small", label="DocLayNet dataset")
with gr.Column():
split_gr = gr.Dropdown(splits, value="all", label="Split")
with gr.Column():
domain_gr = gr.Dropdown(domains, value="all", label="Domain")
with gr.Column():
category_gr = gr.Dropdown(categories, value="all", label="Category")
btn = gr.Button("Display labeled PDF image & data")
with gr.Row():
with gr.Column():
output_msg = gr.Textbox(label="Output message")
with gr.Column():
img_file = gr.File(visible=True, label="Image file of the PDF")
with gr.Row():
with gr.Column():
img_paragraphs_file = gr.File(visible=True, label="Image file (labeled paragraphs)")
img_paragraphs = gr.Image(type="pil", label="Bounding boxes of labeled paragraphs", visible=True)
with gr.Column():
img_lines_file = gr.File(visible=True, label="Image file (labeled lines)")
img_lines = gr.Image(type="pil", label="Bounding boxes of labeled lines", visible=True)
with gr.Row():
with gr.Column():
with gr.Row():
csv_paragraphs = gr.File(visible=False, label="CSV file (paragraphs)")
with gr.Row():
df_paragraphs = gr.Dataframe(
headers=["paragraphs", "categories", "texts", "bounding boxes"],
datatype=["number", "str", "str", "str"],
col_count=(4, "fixed"),
visible=True,
label="Paragraphs data",
type="pandas",
wrap=True
)
with gr.Column():
with gr.Row():
csv_lines = gr.File(visible=False, label="CSV file (lines)")
with gr.Row():
df_lines = gr.Dataframe(
headers=["lines", "categories", "texts", "bounding boxes"],
datatype=["number", "str", "str", "str"],
col_count=(4, "fixed"),
visible=True,
label="Lines data",
type="pandas",
wrap=True
)
btn.click(generate_annotated_image, inputs=[dataset_name_gr, split_gr, domain_gr, category_gr], outputs=[output_msg, img_file, img_paragraphs, img_lines, img_paragraphs_file, img_lines_file, df_paragraphs, df_lines, csv_paragraphs, csv_lines])
gr.Markdown("## Example")
gr.Examples(
[["small", "all", "all", "all"]],
[dataset_name_gr, split_gr, domain_gr, category_gr],
[output_msg, img_file, img_paragraphs, img_lines, img_paragraphs_file, img_lines_file, df_paragraphs, df_lines, csv_paragraphs, csv_lines],
fn=generate_annotated_image,
cache_examples=True,
)
demo.launch() |