v2ray commited on
Commit
749f292
·
1 Parent(s): 8ae5408

Added meta tags filtering.

Browse files
Files changed (2) hide show
  1. scrape_gel.py +28 -4
  2. utils/scrape_args.py +1 -0
scrape_gel.py CHANGED
@@ -13,6 +13,27 @@ import concurrent
13
  import html as libhtml
14
  from constants import *
15
  from bs4 import BeautifulSoup
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
16
 
17
  async def process_link(scrape_args, scrape_state):
18
  image_id = re.search(r"id=(\d+)", scrape_args.target).group(1)
@@ -46,7 +67,7 @@ async def process_link(scrape_args, scrape_state):
46
  return
47
  image_container = soup.find("section", class_=["image-container", "note-container"])
48
  if not image_container:
49
- raise RuntimeError(f"No image container found for {image_id}.")
50
  if not scrape_args.use_low_quality:
51
  image_download_url = soup.find("a", string="Original image")["href"]
52
  else:
@@ -55,7 +76,7 @@ async def process_link(scrape_args, scrape_state):
55
  if image_ext not in IMAGE_EXT:
56
  print(f"Image {image_id} is not an image, skipped.")
57
  return
58
- tags = image_container["data-tags"].strip().split()
59
  tag_count = len(tags)
60
  if tag_count < scrape_args.min_tags:
61
  # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
@@ -67,7 +88,7 @@ async def process_link(scrape_args, scrape_state):
67
  random.shuffle(tags)
68
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
69
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
70
- tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
71
  download_start_time = time.time()
72
  async with scrape_state.session.get(image_download_url) as img_response:
73
  img_data = await img_response.read()
@@ -113,6 +134,7 @@ def parse_args():
113
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
114
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
115
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
 
116
  parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
117
  parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
118
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
@@ -195,7 +217,9 @@ async def main():
195
  if task.done():
196
  await task
197
  del tasks[i]
198
- tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
 
 
199
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
200
  break
201
  session_refresh_counter += 1
 
13
  import html as libhtml
14
  from constants import *
15
  from bs4 import BeautifulSoup
16
+ from collections import defaultdict
17
+
18
+ def get_tags(soup, preserve_meta_tags):
19
+ tag_ul = soup.find("ul", id="tag-list")
20
+ if not tag_ul:
21
+ raise RuntimeError("No tag list found in this web page!")
22
+ type_tag_list_dict = defaultdict(list)
23
+ for element in tag_ul.find_all("li"):
24
+ class_name = element.get("class")
25
+ if not class_name or len(class_name) != 1:
26
+ continue
27
+ class_name = class_name[0]
28
+ if not class_name.startswith("tag-type-"):
29
+ continue
30
+ type_tag_list_dict[class_name[9:]].append(element.find("a", recursive=False).contents[0].replace("_", " "))
31
+ tags = []
32
+ for type, tag_list in type_tag_list_dict.items():
33
+ if not preserve_meta_tags and type in {"artist", "copyright", "metadata"}:
34
+ continue
35
+ tags += tag_list
36
+ return tags
37
 
38
  async def process_link(scrape_args, scrape_state):
39
  image_id = re.search(r"id=(\d+)", scrape_args.target).group(1)
 
67
  return
68
  image_container = soup.find("section", class_=["image-container", "note-container"])
69
  if not image_container:
70
+ raise RuntimeError("No image container found.")
71
  if not scrape_args.use_low_quality:
72
  image_download_url = soup.find("a", string="Original image")["href"]
73
  else:
 
76
  if image_ext not in IMAGE_EXT:
77
  print(f"Image {image_id} is not an image, skipped.")
78
  return
79
+ tags = get_tags(soup, scrape_args.preserve_meta_tags)
80
  tag_count = len(tags)
81
  if tag_count < scrape_args.min_tags:
82
  # print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
 
88
  random.shuffle(tags)
89
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
90
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
91
+ tags_text = ", ".join(tags)
92
  download_start_time = time.time()
93
  async with scrape_state.session.get(image_download_url) as img_response:
94
  img_data = await img_response.read()
 
134
  parser.add_argument("-a", "--avif", action="store_true", help="If set, will convert the image into avif, need to have pillow-avif-plugin installed")
135
  parser.add_argument("-l", "--low-quality", action="store_true", help="If set, will download the sample instead of the original image")
136
  parser.add_argument("-t", "--min-tags", type=int, default=0, help="Filter out images with less than the specified amount of tags, default to 0")
137
+ parser.add_argument("-p", "--preserve-meta-tags", action="store_true", help="Preserve artist, copyright, and metadata tags")
138
  parser.add_argument("-m", "--max-scrape-count", type=int, help="Stop after scraping the set amount of images, may not be exact because of the asynchronous nature of this script, default to infinite")
139
  parser.add_argument("-c", "--continuous-scraping", action="store_true", help="If set, will scraping continuously even when reaching the 20000 images Gelbooru search depth cap by adjusting search tags")
140
  parser.add_argument("tags_to_search", nargs=argparse.REMAINDER, help="List of tags to search for, default to all")
 
217
  if task.done():
218
  await task
219
  del tasks[i]
220
+ tasks.append(asyncio.create_task(process_link(
221
+ utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.preserve_meta_tags, args.max_scrape_count), scrape_state
222
+ )))
223
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
224
  break
225
  session_refresh_counter += 1
utils/scrape_args.py CHANGED
@@ -9,4 +9,5 @@ class ScrapeArgs:
9
  convert_to_avif: bool = False
10
  use_low_quality: bool = False
11
  min_tags: int = 0
 
12
  max_scrape_count: Optional[int] = None
 
9
  convert_to_avif: bool = False
10
  use_low_quality: bool = False
11
  min_tags: int = 0
12
+ preserve_meta_tags: bool = False
13
  max_scrape_count: Optional[int] = None