v2ray commited on
Commit
ca5b6e4
·
1 Parent(s): 1d99bd3

Fixed requests become slower over time by refreshing session.

Browse files
Files changed (4) hide show
  1. scrape_gel.py +86 -53
  2. scrape_yan.py +1 -2
  3. utils/scrape_state.py +2 -0
  4. utils/utils.py +7 -0
scrape_gel.py CHANGED
@@ -1,6 +1,7 @@
1
  import os
2
  import re
3
  import sys
 
4
  import utils
5
  import random
6
  import urllib
@@ -28,8 +29,10 @@ async def process_link(scrape_args, scrape_state):
28
  if utils.get_sigint_count() >= 1 or isinstance(scrape_args.max_scrape_count, int) and scrape_state.scraped_image_count >= scrape_args.max_scrape_count:
29
  break
30
  # print(f"Processing image {image_id}...")
 
31
  async with scrape_state.session.get(scrape_args.target) as response:
32
  html = await response.text()
 
33
  soup = BeautifulSoup(html, "html.parser")
34
  score_span = soup.find("span", id="psc" + image_id)
35
  if score_span:
@@ -65,9 +68,10 @@ async def process_link(scrape_args, scrape_state):
65
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
66
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
67
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
 
68
  async with scrape_state.session.get(image_download_url) as img_response:
69
  img_data = await img_response.read()
70
- os.makedirs(IMAGE_DIR, exist_ok=True)
71
  async with aiofiles.open(image_path, "wb") as f:
72
  await f.write(img_data)
73
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
@@ -76,8 +80,20 @@ async def process_link(scrape_args, scrape_state):
76
  scrape_state.existing_image_ids.remove(image_id)
77
  else:
78
  scrape_state.scraped_image_count += 1
79
- if scrape_state.scraped_image_count % 1000 == 0:
80
- print(f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images.")
 
 
 
 
 
 
 
 
 
 
 
 
81
  return
82
  except Exception as e:
83
  error = e
@@ -138,61 +154,78 @@ async def main():
138
  existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
139
  utils.register_sigint_callback()
140
 
141
- async with aiohttp.ClientSession(cookies={"fringeBenefits": "yup"}, timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
142
- scrape_state = utils.ScrapeState(concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()), session, existing_image_ids)
143
- tasks = []
144
- while True:
145
- try:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
146
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
147
  break
148
- request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags.to_search_string()}&pid={page_number}"
149
- print(f"Going to {request_url}")
150
- async with scrape_state.session.get(request_url) as response:
151
- html = await response.text()
152
- soup = BeautifulSoup(html, "html.parser")
153
- thumbnails_div = soup.find("div", class_="thumbnail-container")
154
- if not thumbnails_div:
155
- raise RuntimeError("Thumbnails division not found.")
156
- notice_error = thumbnails_div.find("div", class_="notice error")
157
- if notice_error and args.continuous_scraping:
158
- print("Reached restricted depth, adjusting search tags to continue scraping...")
159
- search_tags.update_bound(scrape_state)
160
- page_number = 0
161
- continue
162
- image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
163
- image_url_count = len(image_urls)
164
- if image_url_count == 0:
165
- print("Website returned 0 image urls.")
166
- break
167
- print(f"Got {image_url_count} posts.")
168
- page_number += image_url_count
169
- for image_url in image_urls:
170
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
171
  break
172
- while len(tasks) >= MAX_TASKS:
173
- if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
174
- break
175
- await asyncio.sleep(0.1)
176
- for i in range(len(tasks) - 1, -1, -1):
177
- task = tasks[i]
178
- if task.done():
179
- await task
180
- del tasks[i]
181
- tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
182
- except Exception as e:
183
- print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
184
- await asyncio.sleep(0.1)
185
- if utils.get_sigint_count() >= 1:
186
- print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
187
- else:
188
- print("No more images to download, waiting already submitted tasks to finish...")
189
- while tasks and utils.get_sigint_count() <= 1:
 
 
 
 
 
 
190
  await asyncio.sleep(0.1)
191
- for i in range(len(tasks) - 1, -1, -1):
192
- task = tasks[i]
193
- if task.done():
194
- await task
195
- del tasks[i]
 
 
 
 
 
 
 
196
  if utils.get_sigint_count() >= 1:
197
  if utils.get_sigint_count() >= 2:
198
  print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
 
1
  import os
2
  import re
3
  import sys
4
+ import time
5
  import utils
6
  import random
7
  import urllib
 
29
  if utils.get_sigint_count() >= 1 or isinstance(scrape_args.max_scrape_count, int) and scrape_state.scraped_image_count >= scrape_args.max_scrape_count:
30
  break
31
  # print(f"Processing image {image_id}...")
32
+ query_start_time = time.time()
33
  async with scrape_state.session.get(scrape_args.target) as response:
34
  html = await response.text()
35
+ query_used_time = time.time() - query_start_time
36
  soup = BeautifulSoup(html, "html.parser")
37
  score_span = soup.find("span", id="psc" + image_id)
38
  if score_span:
 
68
  image_path = os.path.join(IMAGE_DIR, image_id + image_ext)
69
  tags_path = os.path.join(IMAGE_DIR, image_id + ".txt")
70
  tags_text = ", ".join(libhtml.unescape(tag).replace("_", " ") for tag in tags)
71
+ download_start_time = time.time()
72
  async with scrape_state.session.get(image_download_url) as img_response:
73
  img_data = await img_response.read()
74
+ download_used_time = time.time() - download_start_time
75
  async with aiofiles.open(image_path, "wb") as f:
76
  await f.write(img_data)
77
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
 
80
  scrape_state.existing_image_ids.remove(image_id)
81
  else:
82
  scrape_state.scraped_image_count += 1
83
+ total_query_time = scrape_state.avg_query_time[0] * scrape_state.avg_query_time[1] + query_used_time
84
+ total_download_time = scrape_state.avg_download_time[0] * scrape_state.avg_download_time[1] + download_used_time
85
+ scrape_state.avg_query_time[1] += 1
86
+ scrape_state.avg_download_time[1] += 1
87
+ scrape_state.avg_query_time[0] = total_query_time / scrape_state.avg_query_time[1]
88
+ scrape_state.avg_download_time[0] = total_download_time / scrape_state.avg_download_time[1]
89
+ interval = 1000
90
+ if scrape_state.scraped_image_count % interval == 0:
91
+ print(
92
+ f"Scraped {scrape_state.scraped_image_count}/{scrape_args.max_scrape_count} images, "
93
+ f"stats for the last {interval} images: [Average query time: {scrape_state.avg_query_time[0]:.3f}s | Average download time: {scrape_state.avg_download_time[0]:.3f}s]"
94
+ )
95
+ scrape_state.avg_query_time = [0.0, 0]
96
+ scrape_state.avg_download_time = [0.0, 0]
97
  return
98
  except Exception as e:
99
  error = e
 
154
  existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
155
  utils.register_sigint_callback()
156
 
157
+ session_args = [TIMEOUT, {"fringeBenefits": "yup"}]
158
+ scrape_state = utils.ScrapeState(concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count()), utils.get_session(*session_args), existing_image_ids)
159
+ session_refresh_counter = 0
160
+ tasks = []
161
+ while True:
162
+ try:
163
+ if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
164
+ break
165
+ request_url = f"{args.site}/index.php?page=post&s=list&tags={search_tags.to_search_string()}&pid={page_number}"
166
+ print(f"Going to {request_url}")
167
+ async with scrape_state.session.get(request_url) as response:
168
+ html = await response.text()
169
+ soup = BeautifulSoup(html, "html.parser")
170
+ thumbnails_div = soup.find("div", class_="thumbnail-container")
171
+ if not thumbnails_div:
172
+ raise RuntimeError("Thumbnails division not found.")
173
+ notice_error = thumbnails_div.find("div", class_="notice error")
174
+ if notice_error and args.continuous_scraping:
175
+ print("Reached restricted depth, adjusting search tags to continue scraping...")
176
+ search_tags.update_bound(scrape_state)
177
+ page_number = 0
178
+ continue
179
+ image_urls = [a["href"] for a in thumbnails_div.find_all("a")]
180
+ image_url_count = len(image_urls)
181
+ if image_url_count == 0:
182
+ print("Website returned 0 image urls.")
183
+ break
184
+ print(f"Got {image_url_count} posts.")
185
+ page_number += image_url_count
186
+ for image_url in image_urls:
187
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
188
  break
189
+ while len(tasks) >= MAX_TASKS:
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
190
  if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
191
  break
192
+ await asyncio.sleep(0.1)
193
+ for i in range(len(tasks) - 1, -1, -1):
194
+ task = tasks[i]
195
+ if task.done():
196
+ await task
197
+ del tasks[i]
198
+ tasks.append(asyncio.create_task(process_link(utils.ScrapeArgs(image_url, args.width, args.height, args.avif, args.low_quality, args.min_tags, args.max_scrape_count), scrape_state)))
199
+ if utils.get_sigint_count() >= 1 or isinstance(args.max_scrape_count, int) and scrape_state.scraped_image_count >= args.max_scrape_count:
200
+ break
201
+ session_refresh_counter += 1
202
+ if session_refresh_counter % 50 == 0:
203
+ print("Refreshing session...")
204
+ while tasks and utils.get_sigint_count() < 1:
205
+ await asyncio.sleep(0.1)
206
+ for i in range(len(tasks) - 1, -1, -1):
207
+ task = tasks[i]
208
+ if task.done():
209
+ await task
210
+ del tasks[i]
211
+ if utils.get_sigint_count() < 1:
212
+ await scrape_state.session.close()
213
+ scrape_state.session = utils.get_session(*session_args)
214
+ except Exception as e:
215
+ print(f"An error occurred: {e}\nPausing for 0.1 second before retrying...")
216
  await asyncio.sleep(0.1)
217
+ if utils.get_sigint_count() >= 1:
218
+ print("Script interrupted by user, gracefully exiting...\nYou can interrupt again to exit semi-forcefully, but it will break image checks!")
219
+ else:
220
+ print("No more images to download, waiting already submitted tasks to finish...")
221
+ while tasks and utils.get_sigint_count() <= 1:
222
+ await asyncio.sleep(0.1)
223
+ for i in range(len(tasks) - 1, -1, -1):
224
+ task = tasks[i]
225
+ if task.done():
226
+ await task
227
+ del tasks[i]
228
+ await scrape_state.session.close()
229
  if utils.get_sigint_count() >= 1:
230
  if utils.get_sigint_count() >= 2:
231
  print("Another interrupt received, exiting semi-forcefully...\nYou can interrupt again for truly forceful exit, but it most likely will break a lot of things!")
scrape_yan.py CHANGED
@@ -47,7 +47,6 @@ async def process_link(thread_pool, session, image_object, existing_image_ids, w
47
  tags_text = ", ".join(tag.replace("_", " ").replace("nekomimi", "cat girl") for tag in tags)
48
  async with session.get(image_url) as img_response:
49
  img_data = await img_response.read()
50
- os.makedirs(IMAGE_DIR, exist_ok=True)
51
  async with aiofiles.open(image_path, "wb") as f:
52
  await f.write(img_data)
53
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
@@ -108,7 +107,7 @@ async def main():
108
  existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
109
  utils.register_sigint_callback()
110
 
111
- async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT)) as session:
112
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
113
  tasks = []
114
  while True:
 
47
  tags_text = ", ".join(tag.replace("_", " ").replace("nekomimi", "cat girl") for tag in tags)
48
  async with session.get(image_url) as img_response:
49
  img_data = await img_response.read()
 
50
  async with aiofiles.open(image_path, "wb") as f:
51
  await f.write(img_data)
52
  async with aiofiles.open(tags_path, "w", encoding="utf8") as f:
 
107
  existing_image_ids = utils.get_existing_image_id_set(IMAGE_DIR)
108
  utils.register_sigint_callback()
109
 
110
+ async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(total=TIMEOUT), connector=aiohttp.TCPConnector(limit=0, ttl_dns_cache=600)) as session:
111
  thread_pool = concurrent.futures.ThreadPoolExecutor(max_workers=os.cpu_count())
112
  tasks = []
113
  while True:
utils/scrape_state.py CHANGED
@@ -11,3 +11,5 @@ class ScrapeState:
11
  scraped_image_count: int = 0
12
  last_reached_image_id: Optional[int] = None
13
  last_reached_image_score: Optional[int] = None
 
 
 
11
  scraped_image_count: int = 0
12
  last_reached_image_id: Optional[int] = None
13
  last_reached_image_score: Optional[int] = None
14
+ avg_query_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
15
+ avg_download_time: list[float, int] = field(default_factory=lambda: [0.0, 0])
utils/utils.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
  import asyncio
 
3
  from PIL import Image
4
 
5
  def validate_image(image_path, tags_path, width=None, height=None, convert_to_avif=False):
@@ -63,3 +64,9 @@ def get_image_id_image_tags_path_tuple_dict(image_dir):
63
 
64
  def get_existing_image_id_set(image_dir):
65
  return set(get_image_id_image_tags_path_tuple_dict(image_dir))
 
 
 
 
 
 
 
1
  import os
2
  import asyncio
3
+ import aiohttp
4
  from PIL import Image
5
 
6
  def validate_image(image_path, tags_path, width=None, height=None, convert_to_avif=False):
 
64
 
65
  def get_existing_image_id_set(image_dir):
66
  return set(get_image_id_image_tags_path_tuple_dict(image_dir))
67
+
68
+ def get_session(timeout=None, cookies=None):
69
+ kwargs = {"connector": aiohttp.TCPConnector(limit=0, ttl_dns_cache=600), "cookies": cookies}
70
+ if timeout is not None:
71
+ kwargs["timeout"] = aiohttp.ClientTimeout(total=timeout)
72
+ return aiohttp.ClientSession(**kwargs)