File size: 11,382 Bytes
20ea893
 
 
13b2261
20ea893
 
 
 
 
 
 
35fbdd7
13b2261
20ea893
 
1d99bd3
 
 
 
 
 
20ea893
 
1d99bd3
4626454
20ea893
 
1d99bd3
20ea893
 
1d99bd3
20ea893
 
1d99bd3
 
 
 
 
 
 
 
 
 
20ea893
 
 
1d99bd3
13b2261
 
 
 
20ea893
 
 
 
13b2261
1d99bd3
 
13b2261
00eb151
 
 
 
20ea893
35fbdd7
 
 
1d99bd3
20ea893
 
 
 
a9c180f
35fbdd7
1d99bd3
 
 
 
 
 
20ea893
 
4626454
20ea893
 
 
 
1d99bd3
 
4626454
20ea893
 
 
1d99bd3
13b2261
 
 
 
 
1d99bd3
 
 
20ea893
13b2261
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1d99bd3
 
 
 
13b2261
20ea893
 
 
 
 
 
 
 
1d99bd3
20ea893
1546b82
1d99bd3
13b2261
20ea893
4626454
1d99bd3
20ea893
 
 
1d99bd3
20ea893
1d99bd3
20ea893
1d99bd3
20ea893
 
 
 
 
1d99bd3
 
 
 
 
 
20ea893
 
 
 
 
 
 
 
1d99bd3
20ea893
 
1d99bd3
20ea893
 
 
 
 
 
 
1d99bd3
20ea893
 
 
1546b82
20ea893
 
 
1546b82
20ea893
 
 
 
 
 
1d99bd3
 
 
20ea893
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
import os
import re
import sys
import utils
import random
import urllib
import asyncio
import aiohttp
import aiofiles
import argparse
import concurrent
import html as libhtml
from constants import *
from bs4 import BeautifulSoup

async def process_link(scrape_args, scrape_state):
    image_id = re.search("id=(\d+)", scrape_args.target).group(1)
    image_id_int = int(image_id)
    scrape_state.last_reached_image_id = image_id_int
    image_id_already_exists = image_id in scrape_state.existing_image_ids
    if image_id_already_exists and image_id_int % 100 < 99:
        # print(f"Image {image_id} already exists, skipped.")
        return
    scrape_state.existing_image_ids.add(image_id)
    error = None
    for i in range(1, MAX_RETRY + 2): # 1 indexed.
        try:
            if utils.get_sigint_count() >= 1 or isinstance(scrape_args.max_scrape_count, int) and scrape_state.scraped_image_count >= scrape_args.max_scrape_count:
                break
            # print(f"Processing image {image_id}...")
            async with scrape_state.session.get(scrape_args.target) as response:
                html = await response.text()
            soup = BeautifulSoup(html, "html.parser")
            score_span = soup.find("span", id="psc" + image_id)
            if score_span:
                scrape_state.last_reached_image_score = int(score_span.contents[0])
            if image_id_already_exists:
                # print(f"Image {image_id} already exists, skipped.")
                return
            video_container = soup.find("video", id="gelcomVideoPlayer")
            if video_container:
                print(f"Image {image_id} is a video, skipped.")
                return
            image_container = soup.find("section", class_=["image-container", "note-container"])
            if not image_container:
                raise RuntimeError(f"No image container found for {image_id}.")
            if not scrape_args.use_low_quality:
                image_download_url = soup.find("a", string="Original image")["href"]
            else:
                image_download_url = image_container.find("img", id="image")["src"]
            image_ext = os.path.splitext(image_download_url)[1].lower()
            if image_ext not in IMAGE_EXT:
                print(f"Image {image_id} is not an image, skipped.")
                return
            tags = image_container["data-tags"].strip().split()
            tag_count = len(tags)
            if tag_count < scrape_args.min_tags:
                # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
                return
            rating = image_container["data-rating"]
            if rating == "explicit": tags.append("nsfw")
            elif rating == "questionable": tags.append("qfw")
            else: tags.append("sfw")
            random.shuffle(tags)
            image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
            tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
            tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
            async with scrape_state.session.get(image_download_url) as img_response:
                img_data = await img_response.read()
            os.makedirs(IMAGE_DIR, exist_ok=True)
            async with aiofiles.open(image_path, "wb") as f:
                await f.write(img_data)
            async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
                await f.write(tags_text)
            if not await utils.submit_validation(scrape_state.thread_pool, image_path, tags_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
                scrape_state.existing_image_ids.remove(image_id)
            else:
                scrape_state.scraped_image_count += 1
                if scrape_state.scraped_image_count % 1000 == 0:
                    print(f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images.")
            return
        except Exception as e:
            error = e
            if i > MAX_RETRY:
                break
            # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
            await asyncio.sleep(0.1)
    if not image_id_already_exists:
        scrape_state.existing_image_ids.remove(image_id)
    print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")

def parse_args():
    parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
    parser.add_argument("-s", "--site", default="https://gelbooru.com", help="Domain to scrape from, default to https://gelbooru.com")
    parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
    parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
    parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
    parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
    parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
    parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
    parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
    parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
    args = parser.parse_args()
    if args.width is None or args.height is None:
        if args.width is not None or args.height is not None:
            print("You must either provide both width and height or not provide both at the same time!")
            sys.exit(1)
    else:
        if args.width < 1:
            print("Width must be greater than or equal to 1!")
            sys.exit(1)
        if args.height < 1:
            print("Height must be greater than or equal to 1!")
            sys.exit(1)
    if args.avif:
        try:
            import pillow_avif
        except ImportError:
            print("You need to pip install pillow-avif-plugin to use avif conversion!")
            sys.exit(1)
    if args.min_tags < 0:
        print("Minimum tags must be greater than or equal to 0!")
        sys.exit(1)
    if isinstance(args.max_scrape_count, int) and args.max_scrape_count <= 0:
        print("Maximum scrape count must be greater than 0!")
        sys.exit(1)
    if not args.tags_to_search:
        args.tags_to_search = ["all"]
    return args

async def main():
    args = parse_args()
    print("Starting...")
    page_number = 0
    search_tags = utils.SearchTags(args.tags_to_search)

    os.makedirs(IMAGE_DIR, exist_ok=True)
    existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
    utils.register_sigint_callback()

    async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
        scrape_state = utils.ScrapeState(concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()), session, existing_image_ids)
        tasks = []
        while True:
            try:
                if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
                    break
                request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags.to_search_string()}&pid={page_number}"
                print(f"Going to {request_url}")
                async with scrape_state.session.get(request_url) as response:
                    html = await response.text()
                soup = BeautifulSoup(html, "html.parser")
                thumbnails_div = soup.find("div", class_="thumbnail-container")
                if not thumbnails_div:
                    raise RuntimeError("Thumbnails division not found.")
                notice_error = thumbnails_div.find("div", class_="notice error")
                if notice_error and args.continuous_scraping:
                    print("Reached restricted depth, adjusting search tags to continue scraping...")
                    search_tags.update_bound(scrape_state)
                    page_number = 0
                    continue
                image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
                image_url_count = len(image_urls)
                if image_url_count == 0:
                    print("Website returned 0 image urls.")
                    break
                print(f"Got {image_url_count} posts.")
                page_number += image_url_count
                for image_url in image_urls:
                    if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
                        break
                    while len(tasks) >= MAX_TASKS:
                        if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
                            break
                        await asyncio.sleep(0.1)
                        for i in range(len(tasks) - 1, -1, -1):
                            task = tasks[i]
                            if task.done():
                                await task
                                del tasks[i]
                    tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
            except Exception as e:
                print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
                await asyncio.sleep(0.1)
        if utils.get_sigint_count() >= 1:
            print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
        else:
            print("No more images to download, waiting already submitted tasks to finish...")
        while tasks and utils.get_sigint_count() <= 1:
            await asyncio.sleep(0.1)
            for i in range(len(tasks) - 1, -1, -1):
                task = tasks[i]
                if task.done():
                    await task
                    del tasks[i]
    if utils.get_sigint_count() >= 1:
        if utils.get_sigint_count() >= 2:
            print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
        sys.exit(1)

if __name__ == "__main__":
    try:
        asyncio.run(main())
    except KeyboardInterrupt:
        print("\nScript interrupted by user, exiting...")
        sys.exit(1)