v2ray commited on
Commit
1d99bd3
·
1 Parent(s): 1546b82

Added continuous scraping for Gelbooru.

Browse files
scrape_gel.py CHANGED
@@ -13,39 +13,49 @@ import html as libhtml
13
  from constants import *
14
  from bs4 import BeautifulSoup
15
 
16
- async def process_link(thread_pool, session, image_url, image_ids_to_ignore, width, height, convert_to_avif, use_low_quality, min_tags):
17
- image_id = re.search("id=(\d+)", image_url).group(1)
18
- if image_id in image_ids_to_ignore:
 
 
 
19
  # print(f"Image {image_id} already exists, skipped.")
20
  return
21
- image_ids_to_ignore.add(image_id)
22
  error = None
23
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
24
  try:
25
- if utils.get_sigint_count() >= 1:
26
  break
27
  # print(f"Processing image {image_id}...")
28
- async with session.get(image_url) as response:
29
  html = await response.text()
30
  soup = BeautifulSoup(html, "html.parser")
 
 
 
 
 
 
 
 
 
 
31
  image_container = soup.find("section", class_=["image-container", "note-container"])
32
  if not image_container:
33
  raise RuntimeError(f"No image container found for {image_id}.")
34
- if not use_low_quality:
35
  image_download_url = soup.find("a", string="Original image")["href"]
36
  else:
37
  image_download_url = image_container.find("img", id="image")["src"]
38
  image_ext = os.path.splitext(image_download_url)[1].lower()
39
- if not image_ext:
40
- print(f"Image {image_id} has no file extension, skipped.")
41
- return
42
  if image_ext not in IMAGE_EXT:
43
  print(f"Image {image_id} is not an image, skipped.")
44
  return
45
  tags = image_container["data-tags"].strip().split()
46
  tag_count = len(tags)
47
- if tag_count < min_tags:
48
- # print(f"Image {image_id} doesn't have enough tags({tag_count} < {min_tags}), skipped.")
49
  return
50
  rating = image_container["data-rating"]
51
  if rating == "explicit": tags.append("nsfw")
@@ -55,15 +65,19 @@ async def process_link(thread_pool, session, image_url, image_ids_to_ignore, wid
55
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
56
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
57
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
58
- async with session.get(image_download_url) as img_response:
59
  img_data = await img_response.read()
60
  os.makedirs(IMAGE_DIR, exist_ok=True)
61
  async with aiofiles.open(image_path, "wb") as f:
62
  await f.write(img_data)
63
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
64
  await f.write(tags_text)
65
- if not await utils.submit_validation(thread_pool, image_path, tags_path, width, height, convert_to_avif):
66
- image_ids_to_ignore.remove(image_id)
 
 
 
 
67
  return
68
  except Exception as e:
69
  error = e
@@ -71,18 +85,21 @@ async def process_link(thread_pool, session, image_url, image_ids_to_ignore, wid
71
  break
72
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
73
  await asyncio.sleep(0.1)
74
- image_ids_to_ignore.remove(image_id)
 
75
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
76
 
77
  def parse_args():
78
  parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
79
- parser.add_argument("-s", "--site", default="https://gelbooru.com", help="Domain to scrape from, defaults to https://gelbooru.com")
80
  parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
81
  parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
82
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
83
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
84
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
85
- parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, defaults to all")
 
 
86
  args = parser.parse_args()
87
  if args.width is None or args.height is None:
88
  if args.width is not None or args.height is not None:
@@ -102,7 +119,10 @@ def parse_args():
102
  print("You need to pip install pillow-avif-plugin to use avif conversion!")
103
  sys.exit(1)
104
  if args.min_tags < 0:
105
- print("Min tags must be greater than or equal to 0!")
 
 
 
106
  sys.exit(1)
107
  if not args.tags_to_search:
108
  args.tags_to_search = ["all"]
@@ -112,27 +132,33 @@ async def main():
112
  args = parse_args()
113
  print("Starting...")
114
  page_number = 0
115
- search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
116
 
117
  os.makedirs(IMAGE_DIR, exist_ok=True)
118
- image_ids_to_ignore = utils.get_existing_image_id_set(IMAGE_DIR)
119
  utils.register_sigint_callback()
120
 
121
  async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
122
- thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
123
  tasks = []
124
  while True:
125
  try:
126
- if utils.get_sigint_count() >= 1:
127
  break
128
- request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags}&pid={page_number}"
129
  print(f"Going to {request_url}")
130
- async with session.get(request_url) as response:
131
  html = await response.text()
132
  soup = BeautifulSoup(html, "html.parser")
133
  thumbnails_div = soup.find("div", class_="thumbnail-container")
134
  if not thumbnails_div:
135
  raise RuntimeError("Thumbnails division not found.")
 
 
 
 
 
 
136
  image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
137
  image_url_count = len(image_urls)
138
  if image_url_count == 0:
@@ -141,10 +167,10 @@ async def main():
141
  print(f"Got {image_url_count} posts.")
142
  page_number += image_url_count
143
  for image_url in image_urls:
144
- if utils.get_sigint_count() >= 1:
145
  break
146
  while len(tasks) >= MAX_TASKS:
147
- if utils.get_sigint_count() >= 1:
148
  break
149
  await asyncio.sleep(0.1)
150
  for i in range(len(tasks) - 1, -1, -1):
@@ -152,7 +178,7 @@ async def main():
152
  if task.done():
153
  await task
154
  del tasks[i]
155
- tasks.append(asyncio.create_task(process_link(thread_pool, session, image_url, image_ids_to_ignore, args.width, args.height, args.avif, args.low_quality, args.min_tags)))
156
  except Exception as e:
157
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
158
  await asyncio.sleep(0.1)
@@ -167,8 +193,9 @@ async def main():
167
  if task.done():
168
  await task
169
  del tasks[i]
170
- if utils.get_sigint_count() >= 2:
171
- print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
 
172
  sys.exit(1)
173
 
174
  if __name__ == "__main__":
 
13
  from constants import *
14
  from bs4 import BeautifulSoup
15
 
16
+ async def process_link(scrape_args, scrape_state):
17
+ image_id = re.search("id=(\d+)", scrape_args.target).group(1)
18
+ image_id_int = int(image_id)
19
+ scrape_state.last_reached_image_id = image_id_int
20
+ image_id_already_exists = image_id in scrape_state.existing_image_ids
21
+ if image_id_already_exists and image_id_int % 100 < 99:
22
  # print(f"Image {image_id} already exists, skipped.")
23
  return
24
+ scrape_state.existing_image_ids.add(image_id)
25
  error = None
26
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
27
  try:
28
+ if utils.get_sigint_count() >= 1 or isinstance(scrape_args.max_scrape_count, int) and scrape_state.scraped_image_count >= scrape_args.max_scrape_count:
29
  break
30
  # print(f"Processing image {image_id}...")
31
+ async with scrape_state.session.get(scrape_args.target) as response:
32
  html = await response.text()
33
  soup = BeautifulSoup(html, "html.parser")
34
+ score_span = soup.find("span", id="psc" + image_id)
35
+ if score_span:
36
+ scrape_state.last_reached_image_score = int(score_span.contents[0])
37
+ if image_id_already_exists:
38
+ # print(f"Image {image_id} already exists, skipped.")
39
+ return
40
+ video_container = soup.find("video", id="gelcomVideoPlayer")
41
+ if video_container:
42
+ print(f"Image {image_id} is a video, skipped.")
43
+ return
44
  image_container = soup.find("section", class_=["image-container", "note-container"])
45
  if not image_container:
46
  raise RuntimeError(f"No image container found for {image_id}.")
47
+ if not scrape_args.use_low_quality:
48
  image_download_url = soup.find("a", string="Original image")["href"]
49
  else:
50
  image_download_url = image_container.find("img", id="image")["src"]
51
  image_ext = os.path.splitext(image_download_url)[1].lower()
 
 
 
52
  if image_ext not in IMAGE_EXT:
53
  print(f"Image {image_id} is not an image, skipped.")
54
  return
55
  tags = image_container["data-tags"].strip().split()
56
  tag_count = len(tags)
57
+ if tag_count < scrape_args.min_tags:
58
+ # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
59
  return
60
  rating = image_container["data-rating"]
61
  if rating == "explicit": tags.append("nsfw")
 
65
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
66
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
67
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
68
+ async with scrape_state.session.get(image_download_url) as img_response:
69
  img_data = await img_response.read()
70
  os.makedirs(IMAGE_DIR, exist_ok=True)
71
  async with aiofiles.open(image_path, "wb") as f:
72
  await f.write(img_data)
73
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
74
  await f.write(tags_text)
75
+ if not await utils.submit_validation(scrape_state.thread_pool, image_path, tags_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
76
+ scrape_state.existing_image_ids.remove(image_id)
77
+ else:
78
+ scrape_state.scraped_image_count += 1
79
+ if scrape_state.scraped_image_count % 1000 == 0:
80
+ print(f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images.")
81
  return
82
  except Exception as e:
83
  error = e
 
85
  break
86
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
87
  await asyncio.sleep(0.1)
88
+ if not image_id_already_exists:
89
+ scrape_state.existing_image_ids.remove(image_id)
90
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
91
 
92
  def parse_args():
93
  parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
94
+ parser.add_argument("-s", "--site", default="https://gelbooru.com", help="Domain to scrape from, default to https://gelbooru.com")
95
  parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
96
  parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
97
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
98
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
99
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
100
+ parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
101
+ parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
102
+ parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
103
  args = parser.parse_args()
104
  if args.width is None or args.height is None:
105
  if args.width is not None or args.height is not None:
 
119
  print("You need to pip install pillow-avif-plugin to use avif conversion!")
120
  sys.exit(1)
121
  if args.min_tags < 0:
122
+ print("Minimum tags must be greater than or equal to 0!")
123
+ sys.exit(1)
124
+ if isinstance(args.max_scrape_count, int) and args.max_scrape_count <= 0:
125
+ print("Maximum scrape count must be greater than 0!")
126
  sys.exit(1)
127
  if not args.tags_to_search:
128
  args.tags_to_search = ["all"]
 
132
  args = parse_args()
133
  print("Starting...")
134
  page_number = 0
135
+ search_tags = utils.SearchTags(args.tags_to_search)
136
 
137
  os.makedirs(IMAGE_DIR, exist_ok=True)
138
+ existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
139
  utils.register_sigint_callback()
140
 
141
  async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
142
+ scrape_state = utils.ScrapeState(concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()), session, existing_image_ids)
143
  tasks = []
144
  while True:
145
  try:
146
+ if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
147
  break
148
+ request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags.to_search_string()}&pid={page_number}"
149
  print(f"Going to {request_url}")
150
+ async with scrape_state.session.get(request_url) as response:
151
  html = await response.text()
152
  soup = BeautifulSoup(html, "html.parser")
153
  thumbnails_div = soup.find("div", class_="thumbnail-container")
154
  if not thumbnails_div:
155
  raise RuntimeError("Thumbnails division not found.")
156
+ notice_error = thumbnails_div.find("div", class_="notice error")
157
+ if notice_error and args.continuous_scraping:
158
+ print("Reached restricted depth, adjusting search tags to continue scraping...")
159
+ search_tags.update_bound(scrape_state)
160
+ page_number = 0
161
+ continue
162
  image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
163
  image_url_count = len(image_urls)
164
  if image_url_count == 0:
 
167
  print(f"Got {image_url_count} posts.")
168
  page_number += image_url_count
169
  for image_url in image_urls:
170
+ if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
171
  break
172
  while len(tasks) >= MAX_TASKS:
173
+ if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
174
  break
175
  await asyncio.sleep(0.1)
176
  for i in range(len(tasks) - 1, -1, -1):
 
178
  if task.done():
179
  await task
180
  del tasks[i]
181
+ tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
182
  except Exception as e:
183
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
184
  await asyncio.sleep(0.1)
 
193
  if task.done():
194
  await task
195
  del tasks[i]
196
+ if utils.get_sigint_count() >= 1:
197
+ if utils.get_sigint_count() >= 2:
198
+ print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
199
  sys.exit(1)
200
 
201
  if __name__ == "__main__":
scrape_yan.py CHANGED
@@ -12,12 +12,12 @@ from constants import *
12
 
13
  TIMEOUT = 30 # Local override.
14
 
15
- async def process_link(thread_pool, session, image_object, image_ids_to_ignore, width, height, convert_to_avif, use_low_quality, min_tags):
16
  image_id = str(image_object["id"])
17
- if image_id in image_ids_to_ignore:
18
  # print(f"Image {image_id} already exists, skipped.")
19
  return
20
- image_ids_to_ignore.add(image_id)
21
  error = None
22
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
23
  try:
@@ -29,9 +29,6 @@ async def process_link(thread_pool, session, image_object, image_ids_to_ignore,
29
  else:
30
  image_url = image_object["sample_url"]
31
  image_ext = os.path.splitext(image_url)[1].lower()
32
- if not image_ext:
33
- print(f"Image {image_id} has no file extension, skipped.")
34
- return
35
  if image_ext not in IMAGE_EXT:
36
  print(f"Image {image_id} is not an image, skipped.")
37
  return
@@ -56,7 +53,7 @@ async def process_link(thread_pool, session, image_object, image_ids_to_ignore,
56
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
57
  await f.write(tags_text)
58
  if not await utils.submit_validation(thread_pool, image_path, tags_path, width, height, convert_to_avif):
59
- image_ids_to_ignore.remove(image_id)
60
  return
61
  except Exception as e:
62
  error = e
@@ -64,12 +61,12 @@ async def process_link(thread_pool, session, image_object, image_ids_to_ignore,
64
  break
65
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
66
  await asyncio.sleep(0.1)
67
- image_ids_to_ignore.remove(image_id)
68
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
69
 
70
  def parse_args():
71
  parser = argparse.ArgumentParser(description="Scrape images from yande.re.")
72
- parser.add_argument("-s", "--site", default="https://yande.re", help="Domain to scrape from, defaults to https://yande.re")
73
  parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
74
  parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
75
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
@@ -108,7 +105,7 @@ async def main():
108
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
109
 
110
  os.makedirs(IMAGE_DIR, exist_ok=True)
111
- image_ids_to_ignore = utils.get_existing_image_id_set(IMAGE_DIR)
112
  utils.register_sigint_callback()
113
 
114
  async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
@@ -140,7 +137,7 @@ async def main():
140
  if task.done():
141
  await task
142
  del tasks[i]
143
- tasks.append(asyncio.create_task(process_link(thread_pool, session, image_object, image_ids_to_ignore, args.width, args.height, args.avif, args.low_quality, args.min_tags)))
144
  except Exception as e:
145
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
146
  await asyncio.sleep(0.1)
@@ -155,8 +152,9 @@ async def main():
155
  if task.done():
156
  await task
157
  del tasks[i]
158
- if utils.get_sigint_count() >= 2:
159
- print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
 
160
  sys.exit(1)
161
 
162
  if __name__ == "__main__":
 
12
 
13
  TIMEOUT = 30 # Local override.
14
 
15
+ async def process_link(thread_pool, session, image_object, existing_image_ids, width, height, convert_to_avif, use_low_quality, min_tags):
16
  image_id = str(image_object["id"])
17
+ if image_id in existing_image_ids:
18
  # print(f"Image {image_id} already exists, skipped.")
19
  return
20
+ existing_image_ids.add(image_id)
21
  error = None
22
  for i in range(1, MAX_RETRY + 2): # 1 indexed.
23
  try:
 
29
  else:
30
  image_url = image_object["sample_url"]
31
  image_ext = os.path.splitext(image_url)[1].lower()
 
 
 
32
  if image_ext not in IMAGE_EXT:
33
  print(f"Image {image_id} is not an image, skipped.")
34
  return
 
53
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
54
  await f.write(tags_text)
55
  if not await utils.submit_validation(thread_pool, image_path, tags_path, width, height, convert_to_avif):
56
+ existing_image_ids.remove(image_id)
57
  return
58
  except Exception as e:
59
  error = e
 
61
  break
62
  # print(f"A {e.__class__.__name__} occurred with image {image_id}: {e}\nPausing for 0.1 second before retrying attempt {i}/{MAX_RETRY}...")
63
  await asyncio.sleep(0.1)
64
+ existing_image_ids.remove(image_id)
65
  print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
66
 
67
  def parse_args():
68
  parser = argparse.ArgumentParser(description="Scrape images from yande.re.")
69
+ parser.add_argument("-s", "--site", default="https://yande.re", help="Domain to scrape from, default to https://yande.re")
70
  parser.add_argument("-W", "--width", type=int, help="Scale the width of the image to the specified value, must either provide both width and height or not provide both")
71
  parser.add_argument("-H", "--height", type=int, help="Scale the height of the image to the specified value, must either provide both width and height or not provide both")
72
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
 
105
  search_tags = "+".join(urllib.parse.quote(tag, safe="") for tag in args.tags_to_search)
106
 
107
  os.makedirs(IMAGE_DIR, exist_ok=True)
108
+ existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
109
  utils.register_sigint_callback()
110
 
111
  async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
 
137
  if task.done():
138
  await task
139
  del tasks[i]
140
+ tasks.append(asyncio.create_task(process_link(thread_pool, session, image_object, existing_image_ids, args.width, args.height, args.avif, args.low_quality, args.min_tags)))
141
  except Exception as e:
142
  print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
143
  await asyncio.sleep(0.1)
 
152
  if task.done():
153
  await task
154
  del tasks[i]
155
+ if utils.get_sigint_count() >= 1:
156
+ if utils.get_sigint_count() >= 2:
157
+ print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
158
  sys.exit(1)
159
 
160
  if __name__ == "__main__":
utils/__init__.py CHANGED
@@ -1,3 +1,5 @@
1
  from .utils import *
 
 
2
  from .scrape_state import *
3
  from .sigint_handler import *
 
1
  from .utils import *
2
+ from .search_tags import *
3
+ from .scrape_args import *
4
  from .scrape_state import *
5
  from .sigint_handler import *
utils/scrape_args.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Any
2
+ from dataclasses import dataclass
3
+
4
+ @dataclass
5
+ class ScrapeArgs:
6
+ target: Any
7
+ width: Optional[int] = None
8
+ height: Optional[int] = None
9
+ convert_to_avif: bool = False
10
+ use_low_quality: bool = False
11
+ min_tags: int = 0
12
+ max_scrape_count: Optional[int] = None
utils/scrape_state.py CHANGED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from aiohttp import ClientSession
3
+ from dataclasses import dataclass, field
4
+ from concurrent.futures import ThreadPoolExecutor
5
+
6
+ @dataclass
7
+ class ScrapeState:
8
+ thread_pool: ClientSession
9
+ session: ThreadPoolExecutor
10
+ existing_image_ids: set[str] = field(default_factory=set)
11
+ scraped_image_count: int = 0
12
+ last_reached_image_id: Optional[int] = None
13
+ last_reached_image_score: Optional[int] = None
utils/search_tags.py ADDED
@@ -0,0 +1,137 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ import urllib
3
+ from typing import Optional
4
+ from dataclasses import dataclass
5
+
6
+ COMPARE_FILTER_TAG_PATTERN = re.compile(r"^([a-z]+):([<>]?)(=?)(\S*)$", re.ASCII)
7
+ WHITE_SPACE_PATTERN = re.compile(r"\s")
8
+
9
+ @dataclass
10
+ class SortTag:
11
+ sort_type: str = "id"
12
+ descending: bool = True
13
+
14
+ def __str__(self):
15
+ return "sort:" + self.sort_type + ":" + ("desc" if self.descending else "asc")
16
+
17
+ @classmethod
18
+ def validate_sort_type(cls, sort_type):
19
+ match sort_type:
20
+ case "id":
21
+ pass
22
+ case "score":
23
+ pass
24
+ case _:
25
+ raise NotImplementedError(f"Sort type \"{sort_type}\" is not implemented!")
26
+
27
+ @classmethod
28
+ def from_tag(cls, tag):
29
+ if not tag.startswith("sort:"):
30
+ return None
31
+ sort_type = None
32
+ descending = True
33
+ for i, sort_tag_part in enumerate(tag.split(":")):
34
+ match i:
35
+ case 0:
36
+ pass
37
+ case 1:
38
+ cls.validate_sort_type(sort_tag_part)
39
+ sort_type = sort_tag_part
40
+ case 2:
41
+ if sort_tag_part == "asc":
42
+ descending = False
43
+ case _:
44
+ raise ValueError(f"The sort tag \"{tag}\" you provided isn't valid!")
45
+ if i < 1:
46
+ raise ValueError(f"The sort tag \"{tag}\" you provided isn't valid!")
47
+ return cls(sort_type, descending)
48
+
49
+ @dataclass
50
+ class CompareFilterTag:
51
+ compare_type: str
52
+ less_than: bool
53
+ with_equal: bool
54
+ target: int
55
+
56
+ def __str__(self):
57
+ return self.compare_type + ":" + ("<" if self.less_than else ">") + ("=" if self.with_equal else "") + str(self.target)
58
+
59
+ @classmethod
60
+ def from_tag(cls, tag):
61
+ re_match = COMPARE_FILTER_TAG_PATTERN.search(tag)
62
+ if re_match is None:
63
+ return None
64
+ target = re_match.group(4)
65
+ if not target:
66
+ raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!")
67
+ less_than = re_match.group(2)
68
+ with_equal = re_match.group(3)
69
+ if not less_than:
70
+ if not with_equal:
71
+ return None
72
+ raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!")
73
+ try:
74
+ target = int(target)
75
+ except ValueError as e:
76
+ raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!") from e
77
+ if less_than == "<":
78
+ less_than = True
79
+ else:
80
+ less_than = False
81
+ with_equal = bool(with_equal)
82
+ compare_type = re_match.group(1)
83
+ return cls(compare_type, less_than, with_equal, target)
84
+
85
+ class SearchTags:
86
+
87
+ def __init__(self, tags: list[str]):
88
+ self.general_tags: list[str] = []
89
+ self.sort_tag: SortTag = None
90
+ self.compare_filter_tags: list[CompareFilterTag] = []
91
+ self.sort_associated_compare_filter_tag: Optional[CompareFilterTag] = None
92
+ for tag in tags:
93
+ tag = tag.strip().lower()
94
+ if not tag:
95
+ continue
96
+ if WHITE_SPACE_PATTERN.search(tag):
97
+ raise ValueError(f"The tag \"{tag}\" contains white space(s), booru tags should use \"_\" instead of spaces!")
98
+ sort_tag = SortTag.from_tag(tag)
99
+ if sort_tag is not None:
100
+ if self.sort_tag is not None:
101
+ raise ValueError("You can't provide more than 1 sort tag!")
102
+ self.sort_tag = sort_tag
103
+ continue
104
+ compare_filter_tag = CompareFilterTag.from_tag(tag)
105
+ if compare_filter_tag is not None:
106
+ self.compare_filter_tags.append(compare_filter_tag)
107
+ continue
108
+ self.general_tags.append(tag)
109
+ if self.sort_tag is None:
110
+ self.sort_tag = SortTag()
111
+ for i in range(len(self.compare_filter_tags) - 1, -1, -1):
112
+ compare_filter_tag = self.compare_filter_tags[i]
113
+ if compare_filter_tag.compare_type == self.sort_tag.sort_type and compare_filter_tag.less_than == self.sort_tag.descending:
114
+ if self.sort_associated_compare_filter_tag is not None:
115
+ raise ValueError("You can't provide more than 1 sort associated compare filter tag!")
116
+ self.sort_associated_compare_filter_tag = compare_filter_tag
117
+ del self.compare_filter_tags[i]
118
+
119
+ def update_bound(self, scrape_state):
120
+ match self.sort_tag.sort_type:
121
+ case "id":
122
+ if scrape_state.last_reached_image_id is None:
123
+ raise ValueError("Last reached image ID isn't set!")
124
+ self.sort_associated_compare_filter_tag = CompareFilterTag("id", self.sort_tag.descending, True, scrape_state.last_reached_image_id)
125
+ case "score":
126
+ if scrape_state.last_reached_image_score is None:
127
+ raise ValueError("Last reached image score isn't set!")
128
+ self.sort_associated_compare_filter_tag = CompareFilterTag("score", self.sort_tag.descending, True, scrape_state.last_reached_image_score)
129
+
130
+ def to_search_string(self):
131
+ tag_texts = [str(self.sort_tag)]
132
+ for compare_filter_tag in self.compare_filter_tags:
133
+ tag_texts.append(str(compare_filter_tag))
134
+ if self.sort_associated_compare_filter_tag is not None:
135
+ tag_texts.append(str(self.sort_associated_compare_filter_tag))
136
+ tag_texts += self.general_tags
137
+ return "+".join(urllib.parse.quote(tag_text, safe="") for tag_text in tag_texts)