Uploaded filtered 500k tagging train set.
Browse files- README.md +1 -1
- balance_tags.py +2 -35
- make_model_tags.py +61 -0
- scrape_gel.py +1 -1
- tagging_train/{chunk_0.tar → balanced_500k.tar} +2 -2
- tagging_train/full_1m.tar +3 -0
- utils/utils.py +33 -0
README.md
CHANGED
@@ -5,7 +5,7 @@ task_categories:
|
|
5 |
tags:
|
6 |
- not-for-all-audiences
|
7 |
size_categories:
|
8 |
-
-
|
9 |
---
|
10 |
# Anime Collection
|
11 |
A repo containing scripts to scrape booru sites and images scraped from it.
|
|
|
5 |
tags:
|
6 |
- not-for-all-audiences
|
7 |
size_categories:
|
8 |
+
- 100K<n<1M
|
9 |
---
|
10 |
# Anime Collection
|
11 |
A repo containing scripts to scrape booru sites and images scraped from it.
|
balance_tags.py
CHANGED
@@ -7,39 +7,6 @@ import random
|
|
7 |
import argparse
|
8 |
from constants import *
|
9 |
|
10 |
-
def get_model_tags(model_tags_path):
|
11 |
-
if not os.path.isfile(model_tags_path):
|
12 |
-
raise FileNotFoundError(f"\"{model_tags_path}\" is not a file, please place one there!")
|
13 |
-
index_tag_dict = {}
|
14 |
-
with open(model_tags_path, "r", encoding="utf8") as model_tags_file:
|
15 |
-
for line in model_tags_file:
|
16 |
-
line = line.split()
|
17 |
-
if len(line) != 2:
|
18 |
-
continue
|
19 |
-
index_tag_dict[int(line[0])] = line[1]
|
20 |
-
if len(index_tag_dict) <= 0:
|
21 |
-
return []
|
22 |
-
sorted_index_tag_tuple_list = sorted(index_tag_dict.items(), key=lambda x: x[0])
|
23 |
-
if len(sorted_index_tag_tuple_list) != sorted_index_tag_tuple_list[-1][0] + 1:
|
24 |
-
raise ValueError(f"The index specified in \"{model_tags_path}\" is not continuous!")
|
25 |
-
return [tag for _, tag in sorted_index_tag_tuple_list]
|
26 |
-
|
27 |
-
def get_tags_set(tags_path):
|
28 |
-
if not os.path.isfile(tags_path):
|
29 |
-
raise FileNotFoundError(f"\"{tags_path}\" is not a file!")
|
30 |
-
with open(tags_path, "r", encoding="utf8") as tags_file:
|
31 |
-
tags_text = tags_file.read()
|
32 |
-
tags_set = set()
|
33 |
-
for tag in tags_text.split(","):
|
34 |
-
tag = tag.strip()
|
35 |
-
if tag:
|
36 |
-
tag = tag.replace(" ", "_")
|
37 |
-
if tag == "nsfw": tag = "rating:explicit"
|
38 |
-
elif tag == "qfw": tag = "rating:questionable"
|
39 |
-
elif tag == "sfw": tag = "rating:safe"
|
40 |
-
tags_set.add(tag)
|
41 |
-
return tags_set
|
42 |
-
|
43 |
def parse_args():
|
44 |
parser = argparse.ArgumentParser(description="Balance the dataset based on tag frequency.")
|
45 |
parser.add_argument("-c", "--count", type=int, help="The target selection count, must be an integer greater than 0")
|
@@ -64,7 +31,7 @@ def parse_args():
|
|
64 |
def main():
|
65 |
args = parse_args()
|
66 |
print("Starting...\nGetting model tags...")
|
67 |
-
model_tags = get_model_tags(MODEL_TAGS_PATH)
|
68 |
print("Getting paths...")
|
69 |
image_id_image_tags_path_tuple_tuple_list = sorted(utils.get_image_id_image_tags_path_tuple_dict(IMAGE_DIR).items(), key=lambda x: x[0])
|
70 |
print("Got", len(image_id_image_tags_path_tuple_tuple_list), "images.\nShuffling paths...")
|
@@ -75,7 +42,7 @@ def main():
|
|
75 |
buckets = {tag: [] for tag in model_tags}
|
76 |
for image_id_image_tags_path_tuple_tuple in tqdm.tqdm(image_id_image_tags_path_tuple_tuple_list, desc="Making buckets"):
|
77 |
did_append = False
|
78 |
-
for tag in get_tags_set(image_id_image_tags_path_tuple_tuple[1][1]):
|
79 |
bucket = buckets.get(tag)
|
80 |
if bucket is None:
|
81 |
continue
|
|
|
7 |
import argparse
|
8 |
from constants import *
|
9 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
10 |
def parse_args():
|
11 |
parser = argparse.ArgumentParser(description="Balance the dataset based on tag frequency.")
|
12 |
parser.add_argument("-c", "--count", type=int, help="The target selection count, must be an integer greater than 0")
|
|
|
31 |
def main():
|
32 |
args = parse_args()
|
33 |
print("Starting...\nGetting model tags...")
|
34 |
+
model_tags = utils.get_model_tags(MODEL_TAGS_PATH)
|
35 |
print("Getting paths...")
|
36 |
image_id_image_tags_path_tuple_tuple_list = sorted(utils.get_image_id_image_tags_path_tuple_dict(IMAGE_DIR).items(), key=lambda x: x[0])
|
37 |
print("Got", len(image_id_image_tags_path_tuple_tuple_list), "images.\nShuffling paths...")
|
|
|
42 |
buckets = {tag: [] for tag in model_tags}
|
43 |
for image_id_image_tags_path_tuple_tuple in tqdm.tqdm(image_id_image_tags_path_tuple_tuple_list, desc="Making buckets"):
|
44 |
did_append = False
|
45 |
+
for tag in utils.get_tags_set(image_id_image_tags_path_tuple_tuple[1][1]):
|
46 |
bucket = buckets.get(tag)
|
47 |
if bucket is None:
|
48 |
continue
|
make_model_tags.py
ADDED
@@ -0,0 +1,61 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import sys
|
2 |
+
import tqdm
|
3 |
+
import utils
|
4 |
+
import argparse
|
5 |
+
from constants import *
|
6 |
+
from collections import defaultdict
|
7 |
+
|
8 |
+
def is_tag_unneeded(tag):
|
9 |
+
if tag.endswith("_request"):
|
10 |
+
return True
|
11 |
+
if tag.startswith("bad_") and tag.endswith("id"):
|
12 |
+
return True
|
13 |
+
if tag.endswith("_mismatch"):
|
14 |
+
return True
|
15 |
+
match tag:
|
16 |
+
case "tagme":
|
17 |
+
return True
|
18 |
+
case "bad_link":
|
19 |
+
return True
|
20 |
+
case "protected_link":
|
21 |
+
return True
|
22 |
+
case "inactive_account":
|
23 |
+
return True
|
24 |
+
case "bad_source":
|
25 |
+
return True
|
26 |
+
case "resized":
|
27 |
+
return True
|
28 |
+
return False
|
29 |
+
|
30 |
+
def parse_args():
|
31 |
+
parser = argparse.ArgumentParser(description="Create model tags based on tag frequency.")
|
32 |
+
parser.add_argument("-i", "--min-images", type=int, default=0, help="Filter out tags with less than the specified amount of images, default to 0")
|
33 |
+
args = parser.parse_args()
|
34 |
+
if args.min_images < 0:
|
35 |
+
print("Minimum images must be greater than or equal to 0!")
|
36 |
+
sys.exit(1)
|
37 |
+
return args
|
38 |
+
|
39 |
+
def main():
|
40 |
+
args = parse_args()
|
41 |
+
print("Starting...\nGetting paths...")
|
42 |
+
image_id_image_tags_path_tuple_dict = utils.get_image_id_image_tags_path_tuple_dict(IMAGE_DIR)
|
43 |
+
print("Got", len(image_id_image_tags_path_tuple_dict), "images.\nMaking buckets...")
|
44 |
+
buckets = defaultdict(int)
|
45 |
+
for _, tags_path in tqdm.tqdm(image_id_image_tags_path_tuple_dict.values(), desc="Making buckets"):
|
46 |
+
for tag in utils.get_tags_set(tags_path):
|
47 |
+
buckets[tag] += 1
|
48 |
+
print("Filtering out tags with less than", args.min_images, "images and sorting...")
|
49 |
+
buckets = sorted(bucket for bucket in buckets.items() if not is_tag_unneeded(bucket[0]) and bucket[1] >= args.min_images)
|
50 |
+
print("The new model tags list contains", len(buckets), "tags.\nSaving the result...")
|
51 |
+
with open(MODEL_TAGS_PATH, "w", encoding="utf8") as file:
|
52 |
+
for i, bucket in enumerate(buckets):
|
53 |
+
file.write(f"{i} {bucket[0]}\n")
|
54 |
+
print("Finished.")
|
55 |
+
|
56 |
+
if __name__ == "__main__":
|
57 |
+
try:
|
58 |
+
main()
|
59 |
+
except KeyboardInterrupt:
|
60 |
+
print("\nScript interrupted by user, exiting...")
|
61 |
+
sys.exit(1)
|
scrape_gel.py
CHANGED
@@ -15,7 +15,7 @@ from constants import *
|
|
15 |
from bs4 import BeautifulSoup
|
16 |
|
17 |
async def process_link(scrape_args, scrape_state):
|
18 |
-
image_id = re.search("id=(\d+)", scrape_args.target).group(1)
|
19 |
image_id_int = int(image_id)
|
20 |
scrape_state.last_reached_image_id = image_id_int
|
21 |
image_id_already_exists = image_id in scrape_state.existing_image_ids
|
|
|
15 |
from bs4 import BeautifulSoup
|
16 |
|
17 |
async def process_link(scrape_args, scrape_state):
|
18 |
+
image_id = re.search(r"id=(\d+)", scrape_args.target).group(1)
|
19 |
image_id_int = int(image_id)
|
20 |
scrape_state.last_reached_image_id = image_id_int
|
21 |
image_id_already_exists = image_id in scrape_state.existing_image_ids
|
tagging_train/{chunk_0.tar → balanced_500k.tar}
RENAMED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6cdcc52833adbebe1c482e424a8336ce383adb1d5f3abdaf92b3710dc0cd17a6
|
3 |
+
size 11393341440
|
tagging_train/full_1m.tar
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:454f382118a06d2ee22814cc1e64fccbe7f86fecca7bbd3a8013368052ff9aa9
|
3 |
+
size 22291906560
|
utils/utils.py
CHANGED
@@ -70,3 +70,36 @@ def get_session(timeout=None, cookies=None):
|
|
70 |
if timeout is not None:
|
71 |
kwargs["timeout"] = aiohttp.ClientTimeout(total=timeout)
|
72 |
return aiohttp.ClientSession(**kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
70 |
if timeout is not None:
|
71 |
kwargs["timeout"] = aiohttp.ClientTimeout(total=timeout)
|
72 |
return aiohttp.ClientSession(**kwargs)
|
73 |
+
|
74 |
+
def get_model_tags(model_tags_path):
|
75 |
+
if not os.path.isfile(model_tags_path):
|
76 |
+
raise FileNotFoundError(f"\"{model_tags_path}\" is not a file, please place one there!")
|
77 |
+
index_tag_dict = {}
|
78 |
+
with open(model_tags_path, "r", encoding="utf8") as model_tags_file:
|
79 |
+
for line in model_tags_file:
|
80 |
+
line = line.split()
|
81 |
+
if len(line) != 2:
|
82 |
+
continue
|
83 |
+
index_tag_dict[int(line[0])] = line[1]
|
84 |
+
if len(index_tag_dict) <= 0:
|
85 |
+
return []
|
86 |
+
sorted_index_tag_tuple_list = sorted(index_tag_dict.items(), key=lambda x: x[0])
|
87 |
+
if len(sorted_index_tag_tuple_list) != sorted_index_tag_tuple_list[-1][0] + 1:
|
88 |
+
raise ValueError(f"The index specified in \"{model_tags_path}\" is not continuous!")
|
89 |
+
return [tag for _, tag in sorted_index_tag_tuple_list]
|
90 |
+
|
91 |
+
def get_tags_set(tags_path):
|
92 |
+
if not os.path.isfile(tags_path):
|
93 |
+
raise FileNotFoundError(f"\"{tags_path}\" is not a file!")
|
94 |
+
with open(tags_path, "r", encoding="utf8") as tags_file:
|
95 |
+
tags_text = tags_file.read()
|
96 |
+
tags_set = set()
|
97 |
+
for tag in tags_text.split(","):
|
98 |
+
tag = tag.strip()
|
99 |
+
if tag:
|
100 |
+
tag = tag.replace(" ", "_")
|
101 |
+
if tag == "nsfw": tag = "rating:explicit"
|
102 |
+
elif tag == "qfw": tag = "rating:questionable"
|
103 |
+
elif tag == "sfw": tag = "rating:safe"
|
104 |
+
tags_set.add(tag)
|
105 |
+
return tags_set
|