Adapted balance tags script to the new JSON format.
Browse files- balance_tags.py +18 -18
- scrape_gel.py +30 -18
- utils/scrape_state.py +1 -1
- utils/search_tags.py +6 -14
balance_tags.py
CHANGED
@@ -33,20 +33,20 @@ def main():
|
|
33 |
print("Starting...\nGetting model tags...")
|
34 |
model_tags = utils.get_model_tags(MODEL_TAGS_PATH)
|
35 |
print("Getting paths...")
|
36 |
-
|
37 |
-
print("Got", len(
|
38 |
random.seed(42)
|
39 |
-
random.shuffle(
|
40 |
print("Making buckets...")
|
41 |
in_bucket_image_count = 0
|
42 |
buckets = {tag: [] for tag in model_tags}
|
43 |
-
for
|
44 |
did_append = False
|
45 |
-
for tag in utils.
|
46 |
bucket = buckets.get(tag)
|
47 |
if bucket is None:
|
48 |
continue
|
49 |
-
bucket.append(
|
50 |
did_append = True
|
51 |
if did_append:
|
52 |
in_bucket_image_count += 1
|
@@ -59,33 +59,33 @@ def main():
|
|
59 |
return
|
60 |
print("Selecting...")
|
61 |
total = min(args.count, in_bucket_image_count)
|
62 |
-
selected = {} # Key: Image ID, Value:
|
63 |
with tqdm.tqdm(total=total, desc="Selecting") as progress_bar:
|
64 |
while len(selected) < total:
|
65 |
-
for tag,
|
66 |
if len(selected) >= total:
|
67 |
break
|
68 |
-
if len(
|
69 |
continue
|
70 |
-
for i in range(len(
|
71 |
-
if
|
72 |
-
del
|
73 |
break
|
74 |
else:
|
75 |
-
last_item =
|
76 |
selected[last_item[0]] = last_item[1]
|
77 |
-
del
|
78 |
progress_bar.update(1)
|
79 |
print("Selected", len(selected), "images.\nDeleting unselected images...")
|
80 |
temp_dir = "__tag_bal_trans_tmp__"
|
81 |
if os.path.exists(temp_dir):
|
82 |
shutil.rmtree(temp_dir)
|
83 |
os.makedirs(temp_dir)
|
84 |
-
for
|
85 |
-
image_path =
|
86 |
-
|
87 |
os.rename(image_path, os.path.join(temp_dir, os.path.basename(image_path)))
|
88 |
-
os.rename(
|
89 |
shutil.rmtree(IMAGE_DIR)
|
90 |
shutil.move(temp_dir, IMAGE_DIR)
|
91 |
print("Finished.")
|
|
|
33 |
print("Starting...\nGetting model tags...")
|
34 |
model_tags = utils.get_model_tags(MODEL_TAGS_PATH)
|
35 |
print("Getting paths...")
|
36 |
+
image_id_image_metadata_path_tuple_tuple_list = sorted(utils.get_image_id_image_metadata_path_tuple_dict(IMAGE_DIR).items(), key=lambda x: x[0])
|
37 |
+
print("Got", len(image_id_image_metadata_path_tuple_tuple_list), "images.\nShuffling paths...")
|
38 |
random.seed(42)
|
39 |
+
random.shuffle(image_id_image_metadata_path_tuple_tuple_list)
|
40 |
print("Making buckets...")
|
41 |
in_bucket_image_count = 0
|
42 |
buckets = {tag: [] for tag in model_tags}
|
43 |
+
for image_id_image_metadata_path_tuple_tuple in tqdm.tqdm(image_id_image_metadata_path_tuple_tuple_list, desc="Making buckets"):
|
44 |
did_append = False
|
45 |
+
for tag in utils.get_tags(image_id_image_metadata_path_tuple_tuple[1][1]):
|
46 |
bucket = buckets.get(tag)
|
47 |
if bucket is None:
|
48 |
continue
|
49 |
+
bucket.append(image_id_image_metadata_path_tuple_tuple)
|
50 |
did_append = True
|
51 |
if did_append:
|
52 |
in_bucket_image_count += 1
|
|
|
59 |
return
|
60 |
print("Selecting...")
|
61 |
total = min(args.count, in_bucket_image_count)
|
62 |
+
selected = {} # Key: Image ID, Value: (Image path, Metadata path).
|
63 |
with tqdm.tqdm(total=total, desc="Selecting") as progress_bar:
|
64 |
while len(selected) < total:
|
65 |
+
for tag, image_id_image_metadata_path_tuple_tuple_list in buckets:
|
66 |
if len(selected) >= total:
|
67 |
break
|
68 |
+
if len(image_id_image_metadata_path_tuple_tuple_list) <= 0:
|
69 |
continue
|
70 |
+
for i in range(len(image_id_image_metadata_path_tuple_tuple_list) - 1, -1, -1):
|
71 |
+
if image_id_image_metadata_path_tuple_tuple_list[i][0] in selected:
|
72 |
+
del image_id_image_metadata_path_tuple_tuple_list[i]
|
73 |
break
|
74 |
else:
|
75 |
+
last_item = image_id_image_metadata_path_tuple_tuple_list[-1]
|
76 |
selected[last_item[0]] = last_item[1]
|
77 |
+
del image_id_image_metadata_path_tuple_tuple_list[-1]
|
78 |
progress_bar.update(1)
|
79 |
print("Selected", len(selected), "images.\nDeleting unselected images...")
|
80 |
temp_dir = "__tag_bal_trans_tmp__"
|
81 |
if os.path.exists(temp_dir):
|
82 |
shutil.rmtree(temp_dir)
|
83 |
os.makedirs(temp_dir)
|
84 |
+
for image_metadata_path_tuple in tqdm.tqdm(selected.values(), desc="Moving"):
|
85 |
+
image_path = image_metadata_path_tuple[0]
|
86 |
+
metadata_path = image_metadata_path_tuple[1]
|
87 |
os.rename(image_path, os.path.join(temp_dir, os.path.basename(image_path)))
|
88 |
+
os.rename(metadata_path, os.path.join(temp_dir, os.path.basename(metadata_path)))
|
89 |
shutil.rmtree(IMAGE_DIR)
|
90 |
shutil.move(temp_dir, IMAGE_DIR)
|
91 |
print("Finished.")
|
scrape_gel.py
CHANGED
@@ -4,16 +4,15 @@ import sys
|
|
4 |
import time
|
5 |
import json
|
6 |
import utils
|
7 |
-
import urllib
|
8 |
import asyncio
|
9 |
-
import aiohttp
|
10 |
import aiofiles
|
11 |
import argparse
|
12 |
import concurrent
|
13 |
-
import html as libhtml
|
14 |
from constants import *
|
15 |
from bs4 import BeautifulSoup
|
16 |
|
|
|
|
|
17 |
def get_type_tags_dict(soup):
|
18 |
tag_ul = soup.find("ul", id="tag-list")
|
19 |
if not tag_ul:
|
@@ -40,11 +39,10 @@ def get_type_tags_dict(soup):
|
|
40 |
return type_tags_dict, len(tags_in_dict)
|
41 |
|
42 |
async def process_link(scrape_args, scrape_state):
|
43 |
-
image_id =
|
44 |
-
|
45 |
-
scrape_state.last_reached_image_id = image_id_int
|
46 |
image_id_already_exists = image_id in scrape_state.existing_image_ids
|
47 |
-
if image_id_already_exists and
|
48 |
# print(f"Image {image_id} already exists, skipped.")
|
49 |
return
|
50 |
scrape_state.existing_image_ids.add(image_id)
|
@@ -59,50 +57,61 @@ async def process_link(scrape_args, scrape_state):
|
|
59 |
html = await response.text()
|
60 |
query_used_time = time.time() - query_start_time
|
61 |
soup = BeautifulSoup(html, "html.parser")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
score_span = soup.find("span", id="psc" + image_id)
|
63 |
try:
|
64 |
image_score = int(score_span.contents[0])
|
65 |
except (AttributeError, IndexError, ValueError) as e:
|
66 |
-
raise RuntimeError("Error while getting the image score: " + str(e))
|
67 |
scrape_state.last_reached_image_score = image_score
|
68 |
if image_id_already_exists:
|
69 |
# print(f"Image {image_id} already exists, skipped.")
|
70 |
return
|
71 |
-
|
72 |
-
if video_container:
|
73 |
-
print(f"Image {image_id} is a video, skipped.")
|
74 |
-
return
|
75 |
-
image_container = soup.find("section", class_=["image-container", "note-container"])
|
76 |
-
if not image_container:
|
77 |
-
raise RuntimeError("No image container found.")
|
78 |
if not scrape_args.use_low_quality:
|
79 |
image_download_url = soup.find("a", string="Original image")["href"]
|
80 |
else:
|
81 |
image_download_url = image_container.find("img", id="image")["src"]
|
|
|
82 |
image_ext = os.path.splitext(image_download_url)[1].lower()
|
83 |
if image_ext not in IMAGE_EXT:
|
84 |
print(f"Image {image_id} is not an image, skipped.")
|
85 |
return
|
|
|
86 |
type_tags_dict, tag_count = get_type_tags_dict(soup)
|
87 |
if tag_count < scrape_args.min_tags:
|
88 |
# print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
|
89 |
return
|
|
|
90 |
rating = image_container.get("data-rating")
|
91 |
if not rating:
|
92 |
raise RuntimeError("No rating found.")
|
93 |
if rating == "safe":
|
94 |
rating = "general"
|
|
|
95 |
metadata = json.dumps({"image_id": image_id, "score": image_score, "rating": rating, "tags": type_tags_dict}, ensure_ascii=False, separators=(",", ":"))
|
|
|
96 |
image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
|
97 |
metadata_path = os.path.join(IMAGE_DIR, image_id + ".json")
|
|
|
98 |
download_start_time = time.time()
|
99 |
async with scrape_state.session.get(image_download_url) as img_response:
|
100 |
img_data = await img_response.read()
|
101 |
download_used_time = time.time() - download_start_time
|
|
|
102 |
async with aiofiles.open(image_path, "wb") as f:
|
103 |
await f.write(img_data)
|
104 |
async with aiofiles.open(metadata_path, "w", encoding="utf8") as f:
|
105 |
await f.write(metadata)
|
|
|
106 |
if not await utils.submit_validation(scrape_state.thread_pool, image_path, metadata_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
|
107 |
return
|
108 |
scrape_state.scraped_image_count += 1
|
@@ -116,8 +125,8 @@ async def process_link(scrape_args, scrape_state):
|
|
116 |
if scrape_state.scraped_image_count % interval != 0:
|
117 |
return
|
118 |
print(
|
119 |
-
f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images,
|
120 |
-
f"stats for the last {interval} images: [Average query time: {scrape_state.avg_query_time[0]:.3f}s | Average download time: {scrape_state.avg_download_time[0]:.3f}s]"
|
121 |
)
|
122 |
scrape_state.avg_query_time = [0.0, 0]
|
123 |
scrape_state.avg_download_time = [0.0, 0]
|
@@ -130,7 +139,10 @@ async def process_link(scrape_args, scrape_state):
|
|
130 |
await asyncio.sleep(0.1)
|
131 |
if not image_id_already_exists:
|
132 |
scrape_state.existing_image_ids.remove(image_id)
|
133 |
-
|
|
|
|
|
|
|
134 |
|
135 |
def parse_args():
|
136 |
parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
|
|
|
4 |
import time
|
5 |
import json
|
6 |
import utils
|
|
|
7 |
import asyncio
|
|
|
8 |
import aiofiles
|
9 |
import argparse
|
10 |
import concurrent
|
|
|
11 |
from constants import *
|
12 |
from bs4 import BeautifulSoup
|
13 |
|
14 |
+
IMAGE_ID_PATTERN = re.compile(r"id=(\d+)")
|
15 |
+
|
16 |
def get_type_tags_dict(soup):
|
17 |
tag_ul = soup.find("ul", id="tag-list")
|
18 |
if not tag_ul:
|
|
|
39 |
return type_tags_dict, len(tags_in_dict)
|
40 |
|
41 |
async def process_link(scrape_args, scrape_state):
|
42 |
+
image_id = IMAGE_ID_PATTERN.search(scrape_args.target).group(1)
|
43 |
+
scrape_state.last_reached_image_id = image_id
|
|
|
44 |
image_id_already_exists = image_id in scrape_state.existing_image_ids
|
45 |
+
if image_id_already_exists and not image_id.endswith("99"):
|
46 |
# print(f"Image {image_id} already exists, skipped.")
|
47 |
return
|
48 |
scrape_state.existing_image_ids.add(image_id)
|
|
|
57 |
html = await response.text()
|
58 |
query_used_time = time.time() - query_start_time
|
59 |
soup = BeautifulSoup(html, "html.parser")
|
60 |
+
|
61 |
+
video_container = soup.find("video", id="gelcomVideoPlayer")
|
62 |
+
if video_container:
|
63 |
+
print(f"Image {image_id} is a video, skipped.")
|
64 |
+
return
|
65 |
+
image_container = soup.find("section", class_=["image-container", "note-container"])
|
66 |
+
if not image_container:
|
67 |
+
raise RuntimeError("No image container found.")
|
68 |
+
|
69 |
score_span = soup.find("span", id="psc" + image_id)
|
70 |
try:
|
71 |
image_score = int(score_span.contents[0])
|
72 |
except (AttributeError, IndexError, ValueError) as e:
|
73 |
+
raise RuntimeError("Error while getting the image score: " + str(e)) from e
|
74 |
scrape_state.last_reached_image_score = image_score
|
75 |
if image_id_already_exists:
|
76 |
# print(f"Image {image_id} already exists, skipped.")
|
77 |
return
|
78 |
+
|
|
|
|
|
|
|
|
|
|
|
|
|
79 |
if not scrape_args.use_low_quality:
|
80 |
image_download_url = soup.find("a", string="Original image")["href"]
|
81 |
else:
|
82 |
image_download_url = image_container.find("img", id="image")["src"]
|
83 |
+
|
84 |
image_ext = os.path.splitext(image_download_url)[1].lower()
|
85 |
if image_ext not in IMAGE_EXT:
|
86 |
print(f"Image {image_id} is not an image, skipped.")
|
87 |
return
|
88 |
+
|
89 |
type_tags_dict, tag_count = get_type_tags_dict(soup)
|
90 |
if tag_count < scrape_args.min_tags:
|
91 |
# print(f"Image {image_id} doesn't have enough tags({tag_count} < {scrape_args.min_tags}), skipped.")
|
92 |
return
|
93 |
+
|
94 |
rating = image_container.get("data-rating")
|
95 |
if not rating:
|
96 |
raise RuntimeError("No rating found.")
|
97 |
if rating == "safe":
|
98 |
rating = "general"
|
99 |
+
|
100 |
metadata = json.dumps({"image_id": image_id, "score": image_score, "rating": rating, "tags": type_tags_dict}, ensure_ascii=False, separators=(",", ":"))
|
101 |
+
|
102 |
image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
|
103 |
metadata_path = os.path.join(IMAGE_DIR, image_id + ".json")
|
104 |
+
|
105 |
download_start_time = time.time()
|
106 |
async with scrape_state.session.get(image_download_url) as img_response:
|
107 |
img_data = await img_response.read()
|
108 |
download_used_time = time.time() - download_start_time
|
109 |
+
|
110 |
async with aiofiles.open(image_path, "wb") as f:
|
111 |
await f.write(img_data)
|
112 |
async with aiofiles.open(metadata_path, "w", encoding="utf8") as f:
|
113 |
await f.write(metadata)
|
114 |
+
|
115 |
if not await utils.submit_validation(scrape_state.thread_pool, image_path, metadata_path, scrape_args.width, scrape_args.height, scrape_args.convert_to_avif):
|
116 |
return
|
117 |
scrape_state.scraped_image_count += 1
|
|
|
125 |
if scrape_state.scraped_image_count % interval != 0:
|
126 |
return
|
127 |
print(
|
128 |
+
f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images,",
|
129 |
+
f"stats for the last {interval} images: [Average query time: {scrape_state.avg_query_time[0]:.3f}s | Average download time: {scrape_state.avg_download_time[0]:.3f}s]",
|
130 |
)
|
131 |
scrape_state.avg_query_time = [0.0, 0]
|
132 |
scrape_state.avg_download_time = [0.0, 0]
|
|
|
139 |
await asyncio.sleep(0.1)
|
140 |
if not image_id_already_exists:
|
141 |
scrape_state.existing_image_ids.remove(image_id)
|
142 |
+
if error is not None:
|
143 |
+
print(f"All retry attempts failed, image {image_id} skipped. Final error {error.__class__.__name__}: {error}")
|
144 |
+
else:
|
145 |
+
print(f"Task for image {image_id} cancelled.")
|
146 |
|
147 |
def parse_args():
|
148 |
parser = argparse.ArgumentParser(description="Scrape images from Gelbooru.")
|
utils/scrape_state.py
CHANGED
@@ -9,7 +9,7 @@ class ScrapeState:
|
|
9 |
session: ThreadPoolExecutor
|
10 |
existing_image_ids: set[str] = field(default_factory=set)
|
11 |
scraped_image_count: int = 0
|
12 |
-
last_reached_image_id: Optional[
|
13 |
last_reached_image_score: Optional[int] = None
|
14 |
avg_query_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
|
15 |
avg_download_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
|
|
|
9 |
session: ThreadPoolExecutor
|
10 |
existing_image_ids: set[str] = field(default_factory=set)
|
11 |
scraped_image_count: int = 0
|
12 |
+
last_reached_image_id: Optional[str] = None
|
13 |
last_reached_image_score: Optional[int] = None
|
14 |
avg_query_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
|
15 |
avg_download_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
|
utils/search_tags.py
CHANGED
@@ -51,10 +51,10 @@ class CompareFilterTag:
|
|
51 |
compare_type: str
|
52 |
less_than: bool
|
53 |
with_equal: bool
|
54 |
-
target:
|
55 |
|
56 |
def __str__(self):
|
57 |
-
return self.compare_type + ":" + ("<" if self.less_than else ">") + ("=" if self.with_equal else "") +
|
58 |
|
59 |
@classmethod
|
60 |
def from_tag(cls, tag):
|
@@ -70,17 +70,7 @@ class CompareFilterTag:
|
|
70 |
if not with_equal:
|
71 |
return None
|
72 |
raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!")
|
73 |
-
|
74 |
-
target = int(target)
|
75 |
-
except ValueError as e:
|
76 |
-
raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!") from e
|
77 |
-
if less_than == "<":
|
78 |
-
less_than = True
|
79 |
-
else:
|
80 |
-
less_than = False
|
81 |
-
with_equal = bool(with_equal)
|
82 |
-
compare_type = re_match.group(1)
|
83 |
-
return cls(compare_type, less_than, with_equal, target)
|
84 |
|
85 |
class SearchTags:
|
86 |
|
@@ -125,7 +115,9 @@ class SearchTags:
|
|
125 |
case "score":
|
126 |
if scrape_state.last_reached_image_score is None:
|
127 |
raise ValueError("Last reached image score isn't set!")
|
128 |
-
self.sort_associated_compare_filter_tag = CompareFilterTag("score", self.sort_tag.descending, True, scrape_state.last_reached_image_score)
|
|
|
|
|
129 |
|
130 |
def to_search_string(self):
|
131 |
tag_texts = [str(self.sort_tag)]
|
|
|
51 |
compare_type: str
|
52 |
less_than: bool
|
53 |
with_equal: bool
|
54 |
+
target: str
|
55 |
|
56 |
def __str__(self):
|
57 |
+
return self.compare_type + ":" + ("<" if self.less_than else ">") + ("=" if self.with_equal else "") + self.target
|
58 |
|
59 |
@classmethod
|
60 |
def from_tag(cls, tag):
|
|
|
70 |
if not with_equal:
|
71 |
return None
|
72 |
raise ValueError(f"The compare filter tag \"{tag}\" you provided isn't valid!")
|
73 |
+
return cls(re_match.group(1), less_than == "<", bool(with_equal), target)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
74 |
|
75 |
class SearchTags:
|
76 |
|
|
|
115 |
case "score":
|
116 |
if scrape_state.last_reached_image_score is None:
|
117 |
raise ValueError("Last reached image score isn't set!")
|
118 |
+
self.sort_associated_compare_filter_tag = CompareFilterTag("score", self.sort_tag.descending, True, str(scrape_state.last_reached_image_score))
|
119 |
+
case _:
|
120 |
+
raise NotImplementedError(f"Bound update for sort type \"{self.sort_tag.sort_type}\" is not implemented!")
|
121 |
|
122 |
def to_search_string(self):
|
123 |
tag_texts = [str(self.sort_tag)]
|