v2ray commited on
Commit
20ea893
·
1 Parent(s): fa4dd8f

Finished the scraping script.

Browse files
Files changed (3) hide show
  1. .gitignore +3 -0
  2. README.md +2 -0
  3. scrape.py +192 -0
.gitignore ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ images/
2
+ __pycache__/
3
+ .ipynb_checkpoints/
README.md CHANGED
@@ -1,3 +1,5 @@
1
  ---
2
  license: mit
3
  ---
 
 
 
1
  ---
2
  license: mit
3
  ---
4
+ # Anime Collection
5
+ A repo containing scripts to scrape Gelbooru and images scraped from it.
scrape.py ADDED
@@ -0,0 +1,192 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import sys
4
+ import random
5
+ import urllib
6
+ import signal
7
+ import asyncio
8
+ import aiohttp
9
+ import aiofiles
10
+ import argparse
11
+ import concurrent
12
+ from PIL import Image
13
+ from bs4 import BeautifulSoup
14
+
15
+ MAX_TASKS = 50
16
+ MAX_RETRY = 3
17
+ IMAGE_DIR = "images"
18
+ IMAGE_EXT = {
19
+ ".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".tif",
20
+ ".webp", ".heic", ".heif", ".avif", ".jxl",
21
+ }
22
+
23
+ SIGINT_COUNTER = 0
24
+
25
+ def sigint_handler(signum, frame):
26
+ global SIGINT_COUNTER
27
+ SIGINT_COUNTER += 1
28
+ print()
29
+ if SIGINT_COUNTER >= 3:
30
+ print("Script force quit by user, exiting...")
31
+ sys.exit(1)
32
+
33
+ def validate_image(image_path, tag_path):
34
+ try:
35
+ with Image.open(image_path) as img:
36
+ img.verify()
37
+ return True
38
+ except Exception as e:
39
+ print(f"Error validating image {image_path}: {e}")
40
+ return False
41
+
42
+ def handle_validation_result(future, image_path, tag_path):
43
+ if future.result():
44
+ return
45
+ try:
46
+ os.remove(image_path)
47
+ except Exception as e:
48
+ print(f"Error deleting image file: {e}")
49
+ try:
50
+ os.remove(tag_path)
51
+ print(f"Deleted invalid image and tag files: {image_path}, {tag_path}")
52
+ except Exception as e:
53
+ print(f"Error deleting tags file: {e}")
54
+
55
+ async def process_link(image_url, image_ids_to_ignore, session, thread_pool):
56
+ image_id = re.search("id=(\d+)", image_url).group(1)
57
+ if image_id in image_ids_to_ignore:
58
+ # print(f"Image {image_id} already exists, skipped.")
59
+ return
60
+ for i in range(1, MAX_RETRY + 2): # 1 indexed.
61
+ try:
62
+ if SIGINT_COUNTER >= 1:
63
+ break
64
+ # print(f"Processing image {image_id}...")
65
+ async with session.get(image_url) as response:
66
+ html = await response.text()
67
+ soup = BeautifulSoup(html, "html.parser")
68
+ image_container = soup.find("section", class_=["image-container", "note-container"])
69
+ if not image_container:
70
+ raise RuntimeError(f"No image container found for {image_id}.")
71
+ original_link = soup.find("a", string="Original image")["href"]
72
+ image_ext = os.path.splitext(original_link)[1].lower()
73
+ if not image_ext:
74
+ print(f"Image {image_id} has no file extension, skipped.")
75
+ return
76
+ if image_ext not in IMAGE_EXT:
77
+ print(f"Image {image_id} is not an image, skipped.")
78
+ return
79
+ tags = image_container["data-tags"].strip().split()
80
+ rating = image_container["data-rating"]
81
+ tags.append("nsfw" if rating in {"explicit", "questionable"} else "sfw")
82
+ random.shuffle(tags)
83
+ async with session.get(original_link) as img_response:
84
+ img_data = await img_response.read()
85
+ image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
86
+ os.makedirs(IMAGE_DIR, exist_ok=True)
87
+ async with aiofiles.open(image_path, "wb") as f:
88
+ await f.write(img_data)
89
+ tag_path = os.path.join(IMAGE_DIR, image_id + ".txt")
90
+ async with aiofiles.open(tag_path, "w", encoding="utf8") as f:
91
+ await f.write(", ".join(tag.replace("_", " ") for tag in tags))
92
+ future = thread_pool.submit(validate_image, image_path, tag_path)
93
+ future.add_done_callback(lambda x: handle_validation_result(x, image_path, tag_path))
94
+ return
95
+ except Exception as e:
96
+ if i > MAX_RETRY:
97
+ break
98
+ # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
99
+ await asyncio.sleep(0.1)
100
+ print(f"All retry attempts failed, image {image_id} skipped.")
101
+
102
+ def parse_args():
103
+ parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
104
+ parser.add_argument("-s", "--site", type=str, default="https://gelbooru.com", help="Domain to scrape from, defaults to https://gelbooru.com")
105
+ parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, defaults to all")
106
+ args = parser.parse_args()
107
+ if not args.tags_to_search:
108
+ args.tags_to_search = ["all"]
109
+ return args
110
+
111
+ async def main():
112
+ args = parse_args()
113
+ print("Starting...")
114
+ page_number = 0
115
+ search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
116
+
117
+ image_ids_to_ignore = set()
118
+ if os.path.isdir(IMAGE_DIR):
119
+ for path in os.listdir(IMAGE_DIR):
120
+ image_id, ext = os.path.splitext(path)
121
+ if ext != ".txt":
122
+ continue
123
+ image_ids_to_ignore.add(image_id)
124
+
125
+ signal.signal(signal.SIGINT, sigint_handler)
126
+
127
+ async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=10)) as session:
128
+ thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
129
+ tasks = []
130
+ while True:
131
+ try:
132
+ if SIGINT_COUNTER >= 1:
133
+ break
134
+ request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags}&pid={page_number}"
135
+ print(f"Going to {request_url}")
136
+ async with session.get(request_url) as response:
137
+ html = await response.text()
138
+ soup = BeautifulSoup(html, "html.parser")
139
+ thumbnails_div = soup.find("div", class_="thumbnail-container")
140
+ if not thumbnails_div:
141
+ raise RuntimeError("Thumbnails division not found.")
142
+ image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
143
+ image_url_count = len(image_urls)
144
+ if image_url_count == 0:
145
+ print("Website returned 0 image urls.")
146
+ break
147
+ print(f"Got {image_url_count} posts.")
148
+ page_number += image_url_count
149
+ for image_url in image_urls:
150
+ if SIGINT_COUNTER >= 1:
151
+ break
152
+ while len(tasks) >= MAX_TASKS:
153
+ if SIGINT_COUNTER >= 1:
154
+ break
155
+ await asyncio.sleep(0.1)
156
+ for i in range(len(tasks) - 1, -1, -1):
157
+ task = tasks[i]
158
+ if task.done():
159
+ await task
160
+ del tasks[i]
161
+ tasks.append(asyncio.create_task(process_link(image_url, image_ids_to_ignore, session, thread_pool)))
162
+ except Exception as e:
163
+ print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
164
+ await asyncio.sleep(0.1)
165
+ if SIGINT_COUNTER >= 1:
166
+ print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
167
+ else:
168
+ print("No more images to download, waiting already submitted tasks to finish...")
169
+ while tasks and SIGINT_COUNTER <= 1:
170
+ await asyncio.sleep(0.1)
171
+ for i in range(len(tasks) - 1, -1, -1):
172
+ task = tasks[i]
173
+ if task.done():
174
+ await task
175
+ del tasks[i]
176
+ while True:
177
+ if SIGINT_COUNTER >= 2:
178
+ print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
179
+ thread_pool.shutdown(cancel_futures=True)
180
+ break
181
+ await asyncio.sleep(0.1)
182
+ if not thread_pool._work_queue.qsize():
183
+ break
184
+ if SIGINT_COUNTER >= 2:
185
+ sys.exit(1)
186
+
187
+ if __name__ == "__main__":
188
+ try:
189
+ asyncio.run(main())
190
+ except KeyboardInterrupt:
191
+ print("\nScript interrupted by user, exiting...")
192
+ sys.exit(1)