v2ray commited on
Commit
13b2261
·
1 Parent(s): 94f47b4

Added more features to the scraping scripts.

Browse files
Files changed (6) hide show
  1. compress.py +15 -22
  2. constants.py +12 -0
  3. decompress.py +3 -5
  4. scrape_gel.py +55 -77
  5. scrape_yan.py +53 -72
  6. utils.py +71 -0
compress.py CHANGED
@@ -2,13 +2,11 @@ import os
2
  import sys
3
  import argparse
4
  import tarfile
 
5
  import concurrent.futures
6
 
7
- IMAGE_DIR = "images"
8
- COMPRESSED_DIR = "compressed"
9
-
10
- def compress_chunk(chunk, chunk_index):
11
- with tarfile.open(os.path.join(COMPRESSED_DIR, f"chunk_{chunk_index}.tar"), "w") as tar:
12
  for image_path in chunk:
13
  tags_path = os.path.splitext(image_path)[0] + ".txt"
14
  if not os.path.isfile(tags_path):
@@ -18,34 +16,29 @@ def compress_chunk(chunk, chunk_index):
18
 
19
  def parse_args():
20
  parser = argparse.ArgumentParser(description="Group images into uncompressed tar files.")
21
- parser.add_argument("num_images_per_chunk", nargs=argparse.REMAINDER, help="Number of images per chunk, default to 1024")
 
 
22
  args = parser.parse_args()
23
- if not args.num_images_per_chunk:
24
- args.num_images_per_chunk = 1024
25
- else:
26
- if len(args.num_images_per_chunk) > 1:
27
- print("Too many arguments passed, you should only pass 1.")
28
- sys.exit(1)
29
- try:
30
- args.num_images_per_chunk = int(args.num_images_per_chunk[0])
31
- if args.num_images_per_chunk < 1:
32
- raise ValueError("Number of images per chunk needs to be a positive integer!")
33
- except ValueError as e:
34
- print("Invalid number of images per chunk set:", e)
35
- sys.exit(1)
36
  return args
37
 
38
  def main():
39
  args = parse_args()
40
- image_files = [os.path.join(IMAGE_DIR, f) for f in os.listdir(IMAGE_DIR) if not f.endswith(".txt")]
 
 
 
41
  image_files.sort()
42
- os.makedirs(COMPRESSED_DIR, exist_ok=True)
43
  with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
44
  futures = []
45
  for i in range(0, len(image_files), args.num_images_per_chunk):
46
  chunk = image_files[i:i + args.num_images_per_chunk]
47
  chunk_index = i // args.num_images_per_chunk
48
- future = executor.submit(compress_chunk, chunk, chunk_index)
49
  futures.append(future)
50
  concurrent.futures.wait(futures)
51
 
 
2
  import sys
3
  import argparse
4
  import tarfile
5
+ from constants import *
6
  import concurrent.futures
7
 
8
+ def compress_chunk(chunk, chunk_index, output_dir):
9
+ with tarfile.open(os.path.join(output_dir, f"chunk_{chunk_index}.tar"), "w") as tar:
 
 
 
10
  for image_path in chunk:
11
  tags_path = os.path.splitext(image_path)[0] + ".txt"
12
  if not os.path.isfile(tags_path):
 
16
 
17
  def parse_args():
18
  parser = argparse.ArgumentParser(description="Group images into uncompressed tar files.")
19
+ parser.add_argument("-i", "--input-dir", default=IMAGE_DIR, help="Input directory for the images to chunk into tars")
20
+ parser.add_argument("-o", "--output-dir", default=COMPRESSED_DIR, help="Output directory for chunked tars")
21
+ parser.add_argument("-n", "--num-images-per-chunk", type=int, default=1024, help="Number of images per chunk, default to 1024")
22
  args = parser.parse_args()
23
+ if args.num_images_per_chunk < 1:
24
+ print("Number of images per chunk needs to be a positive integer!")
25
+ sys.exit(1)
 
 
 
 
 
 
 
 
 
 
26
  return args
27
 
28
  def main():
29
  args = parse_args()
30
+ if not os.path.isdir(args.input_dir):
31
+ print(f"Your input dir \"{args.input_dir}\" doesn't exist or isn't a directory!")
32
+ sys.exit(1)
33
+ image_files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if not f.endswith(".txt")]
34
  image_files.sort()
35
+ os.makedirs(args.output_dir, exist_ok=True)
36
  with concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()) as executor:
37
  futures = []
38
  for i in range(0, len(image_files), args.num_images_per_chunk):
39
  chunk = image_files[i:i + args.num_images_per_chunk]
40
  chunk_index = i // args.num_images_per_chunk
41
+ future = executor.submit(compress_chunk, chunk, chunk_index, args.output_dir)
42
  futures.append(future)
43
  concurrent.futures.wait(futures)
44
 
constants.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ MAX_TASKS = 50
3
+ MAX_RETRY = 3
4
+ TIMEOUT = 10
5
+
6
+ IMAGE_DIR = "images"
7
+ COMPRESSED_DIR = "compressed"
8
+
9
+ IMAGE_EXT = {
10
+ ".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".tif",
11
+ ".webp", ".heic", ".heif", ".avif", ".jxl",
12
+ }
decompress.py CHANGED
@@ -2,11 +2,9 @@ import os
2
  import sys
3
  import argparse
4
  import tarfile
 
5
  import concurrent.futures
6
 
7
- IMAGE_DIR = "images"
8
- COMPRESSED_DIR = "compressed"
9
-
10
  def decompress_chunk(chunk_file, output_dir):
11
  with tarfile.open(chunk_file, "r") as tar:
12
  tar.extractall(path=output_dir)
@@ -20,8 +18,8 @@ def parse_args():
20
 
21
  def main():
22
  args = parse_args()
23
- if not os.path.exists(args.input_dir):
24
- print(f"Input directory \"{args.input_dir}\" does not exist.")
25
  sys.exit(1)
26
  chunk_files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if f.endswith(".tar")]
27
  os.makedirs(args.output_dir, exist_ok=True)
 
2
  import sys
3
  import argparse
4
  import tarfile
5
+ from constants import *
6
  import concurrent.futures
7
 
 
 
 
8
  def decompress_chunk(chunk_file, output_dir):
9
  with tarfile.open(chunk_file, "r") as tar:
10
  tar.extractall(path=output_dir)
 
18
 
19
  def main():
20
  args = parse_args()
21
+ if not os.path.isdir(args.input_dir):
22
+ print(f"Your input dir \"{args.input_dir}\" doesn't exist or isn't a directory!")
23
  sys.exit(1)
24
  chunk_files = [os.path.join(args.input_dir, f) for f in os.listdir(args.input_dir) if f.endswith(".tar")]
25
  os.makedirs(args.output_dir, exist_ok=True)
scrape_gel.py CHANGED
@@ -1,68 +1,28 @@
1
  import os
2
  import re
3
  import sys
 
4
  import random
5
  import urllib
6
- import signal
7
  import asyncio
8
  import aiohttp
9
  import aiofiles
10
  import argparse
11
  import concurrent
12
- from PIL import Image
13
  import html as libhtml
 
14
  from bs4 import BeautifulSoup
15
 
16
- MAX_TASKS = 50
17
- MAX_RETRY = 3
18
- TIMEOUT = 10
19
- IMAGE_DIR = "images"
20
- IMAGE_EXT = {
21
- ".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".tif",
22
- ".webp", ".heic", ".heif", ".avif", ".jxl",
23
- }
24
-
25
- SIGINT_COUNTER = 0
26
-
27
- def sigint_handler(signum, frame):
28
- global SIGINT_COUNTER
29
- SIGINT_COUNTER += 1
30
- print()
31
- if SIGINT_COUNTER >= 3:
32
- print("Script force quit by user, exiting...")
33
- sys.exit(1)
34
-
35
- def validate_image(image_path, tags_path):
36
- try:
37
- with Image.open(image_path) as img:
38
- img.verify()
39
- return True
40
- except Exception as e:
41
- print(f"Error validating image {image_path}: {e}")
42
- return False
43
-
44
- def handle_validation_result(future, image_path, tags_path):
45
- if future.result():
46
- return
47
- try:
48
- os.remove(image_path)
49
- except Exception as e:
50
- print(f"Error deleting image file: {e}")
51
- try:
52
- os.remove(tags_path)
53
- print(f"Deleted invalid image and tags files: {image_path}, {tags_path}")
54
- except Exception as e:
55
- print(f"Error deleting tags file: {e}")
56
-
57
- async def process_link(image_url, image_ids_to_ignore, session, thread_pool):
58
  image_id = re.search("id=(\d+)", image_url).group(1)
59
  if image_id in image_ids_to_ignore:
60
  # print(f"Image {image_id} already exists, skipped.")
61
  return
 
62
  error = None
63
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
64
  try:
65
- if SIGINT_COUNTER >= 1:
66
  break
67
  # print(f"Processing image {image_id}...")
68
  async with session.get(image_url) as response:
@@ -71,8 +31,11 @@ async def process_link(image_url, image_ids_to_ignore, session, thread_pool):
71
  image_container = soup.find("section", class_=["image-container", "note-container"])
72
  if not image_container:
73
  raise RuntimeError(f"No image container found for {image_id}.")
74
- original_link = soup.find("a", string="Original image")["href"]
75
- image_ext = os.path.splitext(original_link)[1].lower()
 
 
 
76
  if not image_ext:
77
  print(f"Image {image_id} has no file extension, skipped.")
78
  return
@@ -80,21 +43,24 @@ async def process_link(image_url, image_ids_to_ignore, session, thread_pool):
80
  print(f"Image {image_id} is not an image, skipped.")
81
  return
82
  tags = image_container["data-tags"].strip().split()
83
- rating = image_container["data-rating"]
84
- tags.append("nsfw" if rating in {"explicit", "questionable"} else "sfw")
 
 
 
85
  random.shuffle(tags)
86
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
87
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
88
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
89
- async with session.get(original_link) as img_response:
90
  img_data = await img_response.read()
91
  os.makedirs(IMAGE_DIR, exist_ok=True)
92
  async with aiofiles.open(image_path, "wb") as f:
93
  await f.write(img_data)
94
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
95
  await f.write(tags_text)
96
- future = thread_pool.submit(validate_image, image_path, tags_path)
97
- future.add_done_callback(lambda x: handle_validation_result(x, image_path, tags_path))
98
  return
99
  except Exception as e:
100
  error = e
@@ -102,13 +68,39 @@ async def process_link(image_url, image_ids_to_ignore, session, thread_pool):
102
  break
103
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
104
  await asyncio.sleep(0.1)
 
105
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
106
 
107
  def parse_args():
108
  parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
109
- parser.add_argument("-s", "--site", type=str, default="https://gelbooru.com", help="Domain to scrape from, defaults to https://gelbooru.com")
 
 
 
 
 
110
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, defaults to all")
111
  args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
112
  if not args.tags_to_search:
113
  args.tags_to_search = ["all"]
114
  return args
@@ -119,22 +111,15 @@ async def main():
119
  page_number = 0
120
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
121
 
122
- image_ids_to_ignore = set()
123
- if os.path.isdir(IMAGE_DIR):
124
- for path in os.listdir(IMAGE_DIR):
125
- image_id, ext = os.path.splitext(path)
126
- if ext != ".txt":
127
- continue
128
- image_ids_to_ignore.add(image_id)
129
-
130
- signal.signal(signal.SIGINT, sigint_handler)
131
 
132
  async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
133
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
134
  tasks = []
135
  while True:
136
  try:
137
- if SIGINT_COUNTER >= 1:
138
  break
139
  request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags}&pid={page_number}"
140
  print(f"Going to {request_url}")
@@ -152,10 +137,10 @@ async def main():
152
  print(f"Got {image_url_count} posts.")
153
  page_number += image_url_count
154
  for image_url in image_urls:
155
- if SIGINT_COUNTER >= 1:
156
  break
157
  while len(tasks) >= MAX_TASKS:
158
- if SIGINT_COUNTER >= 1:
159
  break
160
  await asyncio.sleep(0.1)
161
  for i in range(len(tasks) - 1, -1, -1):
@@ -163,30 +148,23 @@ async def main():
163
  if task.done():
164
  await task
165
  del tasks[i]
166
- tasks.append(asyncio.create_task(process_link(image_url, image_ids_to_ignore, session, thread_pool)))
167
  except Exception as e:
168
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
169
  await asyncio.sleep(0.1)
170
- if SIGINT_COUNTER >= 1:
171
  print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
172
  else:
173
  print("No more images to download, waiting already submitted tasks to finish...")
174
- while tasks and SIGINT_COUNTER <= 1:
175
  await asyncio.sleep(0.1)
176
  for i in range(len(tasks) - 1, -1, -1):
177
  task = tasks[i]
178
  if task.done():
179
  await task
180
  del tasks[i]
181
- while True:
182
- if SIGINT_COUNTER >= 2:
183
- print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
184
- thread_pool.shutdown(cancel_futures=True)
185
- break
186
- await asyncio.sleep(0.1)
187
- if not thread_pool._work_queue.qsize():
188
- break
189
- if SIGINT_COUNTER >= 2:
190
  sys.exit(1)
191
 
192
  if __name__ == "__main__":
 
1
  import os
2
  import re
3
  import sys
4
+ import utils
5
  import random
6
  import urllib
 
7
  import asyncio
8
  import aiohttp
9
  import aiofiles
10
  import argparse
11
  import concurrent
 
12
  import html as libhtml
13
+ from constants import *
14
  from bs4 import BeautifulSoup
15
 
16
+ async def process_link(thread_pool, session, image_url, image_ids_to_ignore, width, height, convert_to_avif, use_low_quality, min_tags):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
17
  image_id = re.search("id=(\d+)", image_url).group(1)
18
  if image_id in image_ids_to_ignore:
19
  # print(f"Image {image_id} already exists, skipped.")
20
  return
21
+ image_ids_to_ignore.add(image_id)
22
  error = None
23
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
24
  try:
25
+ if utils.SIGINT_COUNTER >= 1:
26
  break
27
  # print(f"Processing image {image_id}...")
28
  async with session.get(image_url) as response:
 
31
  image_container = soup.find("section", class_=["image-container", "note-container"])
32
  if not image_container:
33
  raise RuntimeError(f"No image container found for {image_id}.")
34
+ if not use_low_quality:
35
+ image_download_url = soup.find("a", string="Original image")["href"]
36
+ else:
37
+ image_download_url = image_container.find("img", id="image")["src"]
38
+ image_ext = os.path.splitext(image_download_url)[1].lower()
39
  if not image_ext:
40
  print(f"Image {image_id} has no file extension, skipped.")
41
  return
 
43
  print(f"Image {image_id} is not an image, skipped.")
44
  return
45
  tags = image_container["data-tags"].strip().split()
46
+ tag_count = len(tags)
47
+ if tag_count < min_tags:
48
+ # print(f"Image {image_id} doesn't have enough tags({tag_count} < {min_tags}), skipped.")
49
+ return
50
+ tags.append("nsfw" if image_container["data-rating"] in {"explicit", "questionable"} else "sfw")
51
  random.shuffle(tags)
52
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
53
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
54
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
55
+ async with session.get(image_download_url) as img_response:
56
  img_data = await img_response.read()
57
  os.makedirs(IMAGE_DIR, exist_ok=True)
58
  async with aiofiles.open(image_path, "wb") as f:
59
  await f.write(img_data)
60
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
61
  await f.write(tags_text)
62
+ if not await utils.submit_validation(thread_pool, image_path, tags_path, width, height, convert_to_avif):
63
+ image_ids_to_ignore.remove(image_id)
64
  return
65
  except Exception as e:
66
  error = e
 
68
  break
69
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
70
  await asyncio.sleep(0.1)
71
+ image_ids_to_ignore.remove(image_id)
72
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
73
 
74
  def parse_args():
75
  parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
76
+ parser.add_argument("-s", "--site", default="https://gelbooru.com", help="Domain to scrape from, defaults to https://gelbooru.com")
77
+ parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
78
+ parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
79
+ parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
80
+ parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
81
+ parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
82
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, defaults to all")
83
  args = parser.parse_args()
84
+ if args.width is None or args.height is None:
85
+ if args.width is not None or args.height is not None:
86
+ print("You must either provide both width and height or not provide both at the same time!")
87
+ sys.exit(1)
88
+ else:
89
+ if args.width < 1:
90
+ print("Width must be greater than or equal to 1!")
91
+ sys.exit(1)
92
+ if args.height < 1:
93
+ print("Height must be greater than or equal to 1!")
94
+ sys.exit(1)
95
+ if args.avif:
96
+ try:
97
+ import pillow_avif
98
+ except ImportError:
99
+ print("You need to pip install pillow-avif-plugin to use avif conversion!")
100
+ sys.exit(1)
101
+ if args.min_tags < 0:
102
+ print("Min tags must be greater than or equal to 0!")
103
+ sys.exit(1)
104
  if not args.tags_to_search:
105
  args.tags_to_search = ["all"]
106
  return args
 
111
  page_number = 0
112
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
113
 
114
+ image_ids_to_ignore = utils.get_existing_image_tags_set(IMAGE_DIR)
115
+ utils.register_sigint_callback()
 
 
 
 
 
 
 
116
 
117
  async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
118
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
119
  tasks = []
120
  while True:
121
  try:
122
+ if utils.SIGINT_COUNTER >= 1:
123
  break
124
  request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags}&pid={page_number}"
125
  print(f"Going to {request_url}")
 
137
  print(f"Got {image_url_count} posts.")
138
  page_number += image_url_count
139
  for image_url in image_urls:
140
+ if utils.SIGINT_COUNTER >= 1:
141
  break
142
  while len(tasks) >= MAX_TASKS:
143
+ if utils.SIGINT_COUNTER >= 1:
144
  break
145
  await asyncio.sleep(0.1)
146
  for i in range(len(tasks) - 1, -1, -1):
 
148
  if task.done():
149
  await task
150
  del tasks[i]
151
+ tasks.append(asyncio.create_task(process_link(thread_pool, session, image_url, image_ids_to_ignore, args.width, args.height, args.avif, args.low_quality, args.min_tags)))
152
  except Exception as e:
153
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
154
  await asyncio.sleep(0.1)
155
+ if utils.SIGINT_COUNTER >= 1:
156
  print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
157
  else:
158
  print("No more images to download, waiting already submitted tasks to finish...")
159
+ while tasks and utils.SIGINT_COUNTER <= 1:
160
  await asyncio.sleep(0.1)
161
  for i in range(len(tasks) - 1, -1, -1):
162
  task = tasks[i]
163
  if task.done():
164
  await task
165
  del tasks[i]
166
+ if utils.SIGINT_COUNTER >= 2:
167
+ print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
 
 
 
 
 
 
 
168
  sys.exit(1)
169
 
170
  if __name__ == "__main__":
scrape_yan.py CHANGED
@@ -1,68 +1,33 @@
1
  import os
2
  import sys
 
3
  import random
4
  import urllib
5
- import signal
6
  import asyncio
7
  import aiohttp
8
  import aiofiles
9
  import argparse
10
  import concurrent
11
- from PIL import Image
12
 
13
- MAX_TASKS = 50
14
- MAX_RETRY = 3
15
- TIMEOUT = 30
16
- IMAGE_DIR = "images"
17
- IMAGE_EXT = {
18
- ".png", ".jpg", ".jpeg", ".bmp", ".tiff", ".tif",
19
- ".webp", ".heic", ".heif", ".avif", ".jxl",
20
- }
21
 
22
- SIGINT_COUNTER = 0
23
-
24
- def sigint_handler(signum, frame):
25
- global SIGINT_COUNTER
26
- SIGINT_COUNTER += 1
27
- print()
28
- if SIGINT_COUNTER >= 3:
29
- print("Script force quit by user, exiting...")
30
- sys.exit(1)
31
-
32
- def validate_image(image_path, tags_path):
33
- try:
34
- with Image.open(image_path) as img:
35
- img.verify()
36
- return True
37
- except Exception as e:
38
- print(f"Error validating image {image_path}: {e}")
39
- return False
40
-
41
- def handle_validation_result(future, image_path, tags_path):
42
- if future.result():
43
- return
44
- try:
45
- os.remove(image_path)
46
- except Exception as e:
47
- print(f"Error deleting image file: {e}")
48
- try:
49
- os.remove(tags_path)
50
- print(f"Deleted invalid image and tags files: {image_path}, {tags_path}")
51
- except Exception as e:
52
- print(f"Error deleting tags file: {e}")
53
-
54
- async def process_link(image_object, image_ids_to_ignore, session, thread_pool):
55
  image_id = str(image_object["id"])
56
  if image_id in image_ids_to_ignore:
57
  # print(f"Image {image_id} already exists, skipped.")
58
  return
 
59
  error = None
60
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
61
  try:
62
- if SIGINT_COUNTER >= 1:
63
  break
64
  # print(f"Processing image {image_id}...")
65
- image_url = image_object["file_url"] # sample_url
 
 
 
66
  image_ext = os.path.splitext(image_url)[1].lower()
67
  if not image_ext:
68
  print(f"Image {image_id} has no file extension, skipped.")
@@ -71,6 +36,10 @@ async def process_link(image_object, image_ids_to_ignore, session, thread_pool):
71
  print(f"Image {image_id} is not an image, skipped.")
72
  return
73
  tags = image_object["tags"].split()
 
 
 
 
74
  tags.append("nsfw" if image_object["rating"] in {"e", "q"} else "sfw")
75
  random.shuffle(tags)
76
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
@@ -83,8 +52,8 @@ async def process_link(image_object, image_ids_to_ignore, session, thread_pool):
83
  await f.write(img_data)
84
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
85
  await f.write(tags_text)
86
- future = thread_pool.submit(validate_image, image_path, tags_path)
87
- future.add_done_callback(lambda x: handle_validation_result(x, image_path, tags_path))
88
  return
89
  except Exception as e:
90
  error = e
@@ -92,13 +61,39 @@ async def process_link(image_object, image_ids_to_ignore, session, thread_pool):
92
  break
93
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
94
  await asyncio.sleep(0.1)
 
95
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
96
 
97
  def parse_args():
98
  parser = argparse.ArgumentParser(description="Scrape images from yande.re.")
99
- parser.add_argument("-s", "--site", type=str, default="https://yande.re", help="Domain to scrape from, defaults to https://yande.re")
 
 
 
 
 
100
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, when not specified, matches every image")
101
  args = parser.parse_args()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
102
  if not args.tags_to_search:
103
  args.tags_to_search = [""]
104
  return args
@@ -109,22 +104,15 @@ async def main():
109
  page_number = 1
110
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
111
 
112
- image_ids_to_ignore = set()
113
- if os.path.isdir(IMAGE_DIR):
114
- for path in os.listdir(IMAGE_DIR):
115
- image_id, ext = os.path.splitext(path)
116
- if ext != ".txt":
117
- continue
118
- image_ids_to_ignore.add(image_id)
119
-
120
- signal.signal(signal.SIGINT, sigint_handler)
121
 
122
  async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
123
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
124
  tasks = []
125
  while True:
126
  try:
127
- if SIGINT_COUNTER >= 1:
128
  break
129
  request_url = f"{args.site}/post.json?limit=1000&tags={search_tags}&page={page_number}"
130
  print(f"Going to {request_url}")
@@ -137,10 +125,10 @@ async def main():
137
  print(f"Got {image_count} posts.")
138
  page_number += 1
139
  for image_object in image_objects:
140
- if SIGINT_COUNTER >= 1:
141
  break
142
  while len(tasks) >= MAX_TASKS:
143
- if SIGINT_COUNTER >= 1:
144
  break
145
  await asyncio.sleep(0.1)
146
  for i in range(len(tasks) - 1, -1, -1):
@@ -148,30 +136,23 @@ async def main():
148
  if task.done():
149
  await task
150
  del tasks[i]
151
- tasks.append(asyncio.create_task(process_link(image_object, image_ids_to_ignore, session, thread_pool)))
152
  except Exception as e:
153
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
154
  await asyncio.sleep(0.1)
155
- if SIGINT_COUNTER >= 1:
156
  print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
157
  else:
158
  print("No more images to download, waiting already submitted tasks to finish...")
159
- while tasks and SIGINT_COUNTER <= 1:
160
  await asyncio.sleep(0.1)
161
  for i in range(len(tasks) - 1, -1, -1):
162
  task = tasks[i]
163
  if task.done():
164
  await task
165
  del tasks[i]
166
- while True:
167
- if SIGINT_COUNTER >= 2:
168
- print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
169
- thread_pool.shutdown(cancel_futures=True)
170
- break
171
- await asyncio.sleep(0.1)
172
- if not thread_pool._work_queue.qsize():
173
- break
174
- if SIGINT_COUNTER >= 2:
175
  sys.exit(1)
176
 
177
  if __name__ == "__main__":
 
1
  import os
2
  import sys
3
+ import utils
4
  import random
5
  import urllib
 
6
  import asyncio
7
  import aiohttp
8
  import aiofiles
9
  import argparse
10
  import concurrent
11
+ from constants import *
12
 
13
+ TIMEOUT = 30 # Local override.
 
 
 
 
 
 
 
14
 
15
+ async def process_link(thread_pool, session, image_object, image_ids_to_ignore, width, height, convert_to_avif, use_low_quality, min_tags):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
  image_id = str(image_object["id"])
17
  if image_id in image_ids_to_ignore:
18
  # print(f"Image {image_id} already exists, skipped.")
19
  return
20
+ image_ids_to_ignore.add(image_id)
21
  error = None
22
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
23
  try:
24
+ if utils.SIGINT_COUNTER >= 1:
25
  break
26
  # print(f"Processing image {image_id}...")
27
+ if not use_low_quality:
28
+ image_url = image_object["file_url"]
29
+ else:
30
+ image_url = image_object["sample_url"]
31
  image_ext = os.path.splitext(image_url)[1].lower()
32
  if not image_ext:
33
  print(f"Image {image_id} has no file extension, skipped.")
 
36
  print(f"Image {image_id} is not an image, skipped.")
37
  return
38
  tags = image_object["tags"].split()
39
+ tag_count = len(tags)
40
+ if tag_count < min_tags:
41
+ # print(f"Image {image_id} doesn't have enough tags({tag_count} < {min_tags}), skipped.")
42
+ return
43
  tags.append("nsfw" if image_object["rating"] in {"e", "q"} else "sfw")
44
  random.shuffle(tags)
45
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
 
52
  await f.write(img_data)
53
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
54
  await f.write(tags_text)
55
+ if not await utils.submit_validation(thread_pool, image_path, tags_path, width, height, convert_to_avif):
56
+ image_ids_to_ignore.remove(image_id)
57
  return
58
  except Exception as e:
59
  error = e
 
61
  break
62
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
63
  await asyncio.sleep(0.1)
64
+ image_ids_to_ignore.remove(image_id)
65
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
66
 
67
  def parse_args():
68
  parser = argparse.ArgumentParser(description="Scrape images from yande.re.")
69
+ parser.add_argument("-s", "--site", default="https://yande.re", help="Domain to scrape from, defaults to https://yande.re")
70
+ parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
71
+ parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
72
+ parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
73
+ parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
74
+ parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
75
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, when not specified, matches every image")
76
  args = parser.parse_args()
77
+ if args.width is None or args.height is None:
78
+ if args.width is not None or args.height is not None:
79
+ print("You must either provide both width and height or not provide both at the same time!")
80
+ sys.exit(1)
81
+ else:
82
+ if args.width < 1:
83
+ print("Width must be greater than or equal to 1!")
84
+ sys.exit(1)
85
+ if args.height < 1:
86
+ print("Height must be greater than or equal to 1!")
87
+ sys.exit(1)
88
+ if args.avif:
89
+ try:
90
+ import pillow_avif
91
+ except ImportError:
92
+ print("You need to pip install pillow-avif-plugin to use avif conversion!")
93
+ sys.exit(1)
94
+ if args.min_tags < 0:
95
+ print("Min tags must be greater than or equal to 0!")
96
+ sys.exit(1)
97
  if not args.tags_to_search:
98
  args.tags_to_search = [""]
99
  return args
 
104
  page_number = 1
105
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
106
 
107
+ image_ids_to_ignore = utils.get_existing_image_tags_set(IMAGE_DIR)
108
+ utils.register_sigint_callback()
 
 
 
 
 
 
 
109
 
110
  async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
111
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
112
  tasks = []
113
  while True:
114
  try:
115
+ if utils.SIGINT_COUNTER >= 1:
116
  break
117
  request_url = f"{args.site}/post.json?limit=1000&tags={search_tags}&page={page_number}"
118
  print(f"Going to {request_url}")
 
125
  print(f"Got {image_count} posts.")
126
  page_number += 1
127
  for image_object in image_objects:
128
+ if utils.SIGINT_COUNTER >= 1:
129
  break
130
  while len(tasks) >= MAX_TASKS:
131
+ if utils.SIGINT_COUNTER >= 1:
132
  break
133
  await asyncio.sleep(0.1)
134
  for i in range(len(tasks) - 1, -1, -1):
 
136
  if task.done():
137
  await task
138
  del tasks[i]
139
+ tasks.append(asyncio.create_task(process_link(thread_pool, session, image_object, image_ids_to_ignore, args.width, args.height, args.avif, args.low_quality, args.min_tags)))
140
  except Exception as e:
141
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
142
  await asyncio.sleep(0.1)
143
+ if utils.SIGINT_COUNTER >= 1:
144
  print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
145
  else:
146
  print("No more images to download, waiting already submitted tasks to finish...")
147
+ while tasks and utils.SIGINT_COUNTER <= 1:
148
  await asyncio.sleep(0.1)
149
  for i in range(len(tasks) - 1, -1, -1):
150
  task = tasks[i]
151
  if task.done():
152
  await task
153
  del tasks[i]
154
+ if utils.SIGINT_COUNTER >= 2:
155
+ print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
 
 
 
 
 
 
 
156
  sys.exit(1)
157
 
158
  if __name__ == "__main__":
utils.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ import signal
4
+ import asyncio
5
+ from PIL import Image
6
+
7
+ SIGINT_COUNTER = 0
8
+
9
+ def sigint_handler(signum, frame):
10
+ global SIGINT_COUNTER
11
+ SIGINT_COUNTER += 1
12
+ print()
13
+ if SIGINT_COUNTER >= 3:
14
+ print("Script force quit by user, exiting...")
15
+ sys.exit(1)
16
+
17
+ def register_sigint_callback():
18
+ signal.signal(signal.SIGINT, sigint_handler)
19
+
20
+ def validate_image(image_path, tags_path, width=None, height=None, convert_to_avif=False):
21
+ new_path = None
22
+ try:
23
+ with Image.open(image_path) as img:
24
+ save_kwargs = {}
25
+ if isinstance(width, int) and width > 0 and isinstance(height, int) and height > 0:
26
+ img = img.resize((width, height))
27
+ new_path = image_path
28
+ if convert_to_avif:
29
+ import pillow_avif
30
+ save_kwargs["quality"] = 50
31
+ new_path = os.path.splitext(image_path)[0] + ".avif"
32
+ if new_path is not None:
33
+ img.load()
34
+ img.save(new_path, **save_kwargs)
35
+ else:
36
+ img.verify()
37
+ if new_path is not None and os.path.isfile(new_path) and os.path.realpath(new_path) != os.path.realpath(image_path):
38
+ os.remove(image_path)
39
+ return True
40
+ except Exception as e:
41
+ print(f"Error validating image {image_path}: {e}")
42
+ try:
43
+ os.remove(image_path)
44
+ except Exception as e:
45
+ print("Error deleting image file:", e)
46
+ try:
47
+ os.remove(tags_path)
48
+ print(f"Deleted invalid image and tags files: {image_path}, {tags_path}")
49
+ except Exception as e:
50
+ print("Error deleting tags file:", e)
51
+ try:
52
+ if new_path is not None and os.path.isfile(new_path):
53
+ os.remove(new_path)
54
+ print("Deleted invalid new image file:", new_path)
55
+ except Exception as e:
56
+ print("Error deleting new image file:", e)
57
+ return False
58
+
59
+ async def submit_validation(thread_pool, image_path, tags_path, width=None, height=None, convert_to_avif=False):
60
+ return await asyncio.wrap_future(thread_pool.submit(validate_image, image_path, tags_path, width, height, convert_to_avif))
61
+
62
+ def get_existing_image_tags_set(image_dir):
63
+ if not os.path.isdir(image_dir):
64
+ return set()
65
+ existing_image_tags = set()
66
+ for path in os.listdir(image_dir):
67
+ image_id, ext = os.path.splitext(path)
68
+ if ext != ".txt":
69
+ continue
70
+ existing_image_tags.add(image_id)
71
+ return existing_image_tags