Kemono-Downloader/downloader_utils.py

1299 lines
77 KiB
Python
Raw Normal View History

2025-05-07 07:20:40 +05:30
import os
import time
import requests
import re
import threading
import queue
import hashlib
2025-05-08 19:49:50 +05:30
import http.client
import traceback
from concurrent.futures import ThreadPoolExecutor, Future, CancelledError, as_completed
import html # Import the html module for unescaping
2025-05-07 07:20:40 +05:30
from PyQt5.QtCore import QObject, pyqtSignal, QThread, QMutex, QMutexLocker
from urllib.parse import urlparse
try:
from PIL import Image
except ImportError:
print("ERROR: Pillow library not found. Please install it: pip install Pillow")
Image = None
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
from io import BytesIO
2025-05-08 19:49:50 +05:30
fastapi_app = None # Placeholder, not used in this script
KNOWN_NAMES = [] # Global list, populated by main.py
2025-05-09 19:03:01 +05:30
IMAGE_EXTENSIONS = {
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.tif', '.webp',
'.heic', '.heif', '.svg', '.ico', '.jfif', '.pjpeg', '.pjp', '.avif'
}
VIDEO_EXTENSIONS = {
'.mp4', '.mov', '.mkv', '.webm', '.avi', '.wmv', '.flv', '.mpeg',
'.mpg', '.m4v', '.3gp', '.ogv', '.ts', '.vob'
}
2025-05-08 19:49:50 +05:30
def is_title_match_for_character(post_title, character_name_filter):
"""Checks if a post title contains a specific character name (case-insensitive, whole word)."""
2025-05-09 19:03:01 +05:30
if not post_title or not character_name_filter:
2025-05-08 19:49:50 +05:30
return False
2025-05-09 19:03:01 +05:30
# Ensure character_name_filter is treated as a whole word, avoid partial matches within larger words.
# Regex: \b matches word boundary. re.escape handles special characters in filter.
2025-05-08 19:49:50 +05:30
pattern = r"(?i)\b" + re.escape(character_name_filter) + r"\b"
2025-05-09 19:03:01 +05:30
return bool(re.search(pattern, post_title))
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
def is_filename_match_for_character(filename, character_name_filter):
"""Checks if a filename contains a specific character name (case-insensitive, substring)."""
if not filename or not character_name_filter:
return False
# For filenames, substring matching is often more practical.
return character_name_filter.lower() in filename.lower()
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def clean_folder_name(name):
2025-05-08 19:49:50 +05:30
"""Cleans a string to be suitable for a folder name."""
2025-05-09 19:03:01 +05:30
if not isinstance(name, str): name = str(name)
# Remove characters that are generally problematic in folder names across OS
cleaned = re.sub(r'[^\w\s\-\_\.\(\)]', '', name) # Allow letters, numbers, whitespace, hyphens, underscores, periods, parentheses
cleaned = cleaned.strip() # Remove leading/trailing whitespace
# Replace sequences of whitespace with a single underscore
cleaned = re.sub(r'\s+', '_', cleaned)
return cleaned if cleaned else "untitled_folder"
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def clean_filename(name):
2025-05-09 19:03:01 +05:30
"""Cleans a string to be suitable for a file name."""
if not isinstance(name, str): name = str(name)
# Remove characters that are generally problematic in file names across OS
cleaned = re.sub(r'[^\w\s\-\_\.\(\)]', '', name) # Allow letters, numbers, whitespace, hyphens, underscores, periods, parentheses
cleaned = cleaned.strip() # Remove leading/trailing whitespace
# Replace sequences of whitespace with a single underscore
cleaned = re.sub(r'\s+', '_', cleaned)
return cleaned if cleaned else "untitled_file"
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def extract_folder_name_from_title(title, unwanted_keywords):
2025-05-09 19:03:01 +05:30
"""Extracts a potential folder name from a title, avoiding unwanted keywords."""
2025-05-07 07:20:40 +05:30
if not title: return 'Uncategorized'
title_lower = title.lower()
2025-05-09 19:03:01 +05:30
# Try to find a meaningful token not in unwanted_keywords
tokens = re.findall(r'\b[\w\-]+\b', title_lower) # Find words
2025-05-07 07:20:40 +05:30
for token in tokens:
2025-05-08 19:49:50 +05:30
clean_token = clean_folder_name(token) # Clean the token itself
2025-05-09 19:03:01 +05:30
if clean_token and clean_token.lower() not in unwanted_keywords: # Check against lowercased unwanted keywords
return clean_token
# Fallback to cleaned full title if no suitable token found
2025-05-08 19:49:50 +05:30
cleaned_full_title = clean_folder_name(title)
return cleaned_full_title if cleaned_full_title else 'Uncategorized'
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def match_folders_from_title(title, names_to_match, unwanted_keywords):
2025-05-09 19:03:01 +05:30
"""
Matches names from a list against a title to determine potential folder names.
Prioritizes longer matches.
"""
2025-05-08 19:49:50 +05:30
if not title or not names_to_match: return []
title_lower = title.lower()
2025-05-07 07:20:40 +05:30
matched_cleaned_names = set()
2025-05-09 19:03:01 +05:30
# Sort names by length (descending) to match longer names first (e.g., "Spider-Man" before "Spider")
2025-05-08 19:49:50 +05:30
sorted_names_to_match = sorted(names_to_match, key=len, reverse=True)
for name in sorted_names_to_match:
name_lower = name.lower()
if not name_lower: continue # Skip empty names
2025-05-09 19:03:01 +05:30
# Use word boundary regex to ensure whole word matching
pattern = r'\b' + re.escape(name_lower) + r'\b'
2025-05-08 19:49:50 +05:30
if re.search(pattern, title_lower):
2025-05-09 19:03:01 +05:30
# Clean the original casing 'name' for folder creation, then lowercase for unwanted keyword check
cleaned_name_for_folder = clean_folder_name(name)
if cleaned_name_for_folder.lower() not in unwanted_keywords: # Check against lowercased unwanted keywords
matched_cleaned_names.add(cleaned_name_for_folder) # Add the cleaned name with original casing preserved as much as possible
2025-05-08 19:49:50 +05:30
return sorted(list(matched_cleaned_names))
2025-05-07 07:20:40 +05:30
def is_image(filename):
2025-05-09 19:03:01 +05:30
"""Checks if the filename has a common image extension."""
2025-05-07 07:20:40 +05:30
if not filename: return False
2025-05-09 19:03:01 +05:30
_, ext = os.path.splitext(filename)
return ext.lower() in IMAGE_EXTENSIONS
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def is_video(filename):
2025-05-09 19:03:01 +05:30
"""Checks if the filename has a common video extension."""
2025-05-07 07:20:40 +05:30
if not filename: return False
2025-05-09 19:03:01 +05:30
_, ext = os.path.splitext(filename)
return ext.lower() in VIDEO_EXTENSIONS
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def is_zip(filename):
2025-05-09 19:03:01 +05:30
"""Checks if the filename ends with .zip (case-insensitive)."""
2025-05-07 07:20:40 +05:30
if not filename: return False
return filename.lower().endswith('.zip')
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def is_rar(filename):
2025-05-09 19:03:01 +05:30
"""Checks if the filename ends with .rar (case-insensitive)."""
2025-05-07 07:20:40 +05:30
if not filename: return False
return filename.lower().endswith('.rar')
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def is_post_url(url):
2025-05-09 19:03:01 +05:30
"""Checks if the URL likely points to a specific post."""
2025-05-07 07:20:40 +05:30
if not isinstance(url, str): return False
return '/post/' in urlparse(url).path
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
def extract_post_info(url_string):
2025-05-08 19:49:50 +05:30
"""Extracts service, user ID, and post ID from a Kemono/Coomer URL."""
2025-05-07 07:20:40 +05:30
service, user_id, post_id = None, None, None
2025-05-08 19:49:50 +05:30
if not isinstance(url_string, str) or not url_string.strip(): return None, None, None
2025-05-07 07:20:40 +05:30
try:
parsed_url = urlparse(url_string.strip())
domain = parsed_url.netloc.lower()
2025-05-09 19:03:01 +05:30
# Check if the domain is one of the known Kemono or Coomer domains
2025-05-08 19:49:50 +05:30
is_kemono = any(d in domain for d in ['kemono.su', 'kemono.party'])
is_coomer = any(d in domain for d in ['coomer.su', 'coomer.party'])
2025-05-09 19:03:01 +05:30
if not (is_kemono or is_coomer): return None, None, None # Not a recognized service
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
path_parts = [part for part in parsed_url.path.strip('/').split('/') if part]
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Standard URL structure: /{service}/user/{user_id}/post/{post_id}
# Or creator page: /{service}/user/{user_id}
2025-05-07 07:20:40 +05:30
if len(path_parts) >= 3 and path_parts[1].lower() == 'user':
service = path_parts[0]
user_id = path_parts[2]
if len(path_parts) >= 5 and path_parts[3].lower() == 'post':
post_id = path_parts[4]
return service, user_id, post_id
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# API URL structure: /api/v1/{service}/user/{user_id}/post/{post_id}
# Or API creator page: /api/v1/{service}/user/{user_id}
2025-05-08 19:49:50 +05:30
if len(path_parts) >= 5 and path_parts[0].lower() == 'api' and \
path_parts[1].lower() == 'v1' and path_parts[3].lower() == 'user':
2025-05-07 07:20:40 +05:30
service = path_parts[2]
user_id = path_parts[4]
if len(path_parts) >= 7 and path_parts[5].lower() == 'post':
2025-05-08 19:49:50 +05:30
post_id = path_parts[6]
2025-05-07 07:20:40 +05:30
return service, user_id, post_id
2025-05-09 19:03:01 +05:30
2025-05-07 07:20:40 +05:30
except Exception as e:
2025-05-09 19:03:01 +05:30
# Log or handle unexpected errors during URL parsing if necessary
2025-05-07 07:20:40 +05:30
print(f"Debug: Exception during extract_post_info for URL '{url_string}': {e}")
2025-05-09 19:03:01 +05:30
return None, None, None # Return None for all if parsing fails or structure is not matched
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def fetch_posts_paginated(api_url_base, headers, offset, logger, cancellation_event=None):
2025-05-09 19:03:01 +05:30
"""Fetches a single page of posts from the API."""
2025-05-08 19:49:50 +05:30
if cancellation_event and cancellation_event.is_set():
logger(" Fetch cancelled before request.")
2025-05-09 19:03:01 +05:30
raise RuntimeError("Fetch operation cancelled by user.") # Raise error to stop pagination
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
paginated_url = f'{api_url_base}?o={offset}'
logger(f" Fetching: {paginated_url}")
try:
2025-05-09 19:03:01 +05:30
response = requests.get(paginated_url, headers=headers, timeout=(10, 60)) # connect_timeout, read_timeout
response.raise_for_status() # Raises HTTPError for bad responses (4XX or 5XX)
# It's good practice to check content type before parsing JSON
2025-05-08 19:49:50 +05:30
if 'application/json' not in response.headers.get('Content-Type', '').lower():
logger(f"⚠️ Unexpected content type from API: {response.headers.get('Content-Type')}. Body: {response.text[:200]}")
2025-05-09 19:03:01 +05:30
return [] # Return empty list or raise error if JSON is strictly expected
2025-05-07 07:20:40 +05:30
return response.json()
except requests.exceptions.Timeout:
2025-05-09 19:03:01 +05:30
# Log specific timeout and re-raise or handle as a specific error
2025-05-08 19:49:50 +05:30
raise RuntimeError(f"Timeout fetching offset {offset} from {paginated_url}")
2025-05-07 07:20:40 +05:30
except requests.exceptions.RequestException as e:
2025-05-09 19:03:01 +05:30
# General request exception (includes HTTPError, ConnectionError, etc.)
2025-05-08 19:49:50 +05:30
err_msg = f"Error fetching offset {offset} from {paginated_url}: {e}"
2025-05-07 07:20:40 +05:30
if e.response is not None:
err_msg += f" (Status: {e.response.status_code}, Body: {e.response.text[:200]})"
raise RuntimeError(err_msg)
2025-05-08 19:49:50 +05:30
except ValueError as e: # JSONDecodeError inherits from ValueError
2025-05-09 19:03:01 +05:30
# Handle cases where response is not valid JSON
2025-05-08 19:49:50 +05:30
raise RuntimeError(f"Error decoding JSON from offset {offset} ({paginated_url}): {e}. Response text: {response.text[:200]}")
2025-05-09 19:03:01 +05:30
except Exception as e:
# Catch any other unexpected errors
2025-05-08 19:49:50 +05:30
raise RuntimeError(f"Unexpected error fetching offset {offset} ({paginated_url}): {e}")
def download_from_api(api_url_input, logger=print, start_page=None, end_page=None, manga_mode=False, cancellation_event=None):
"""
2025-05-09 19:03:01 +05:30
Generator function to fetch post data from Kemono/Coomer API.
Handles pagination and yields batches of posts.
In Manga Mode, fetches all posts first, then yields them in reverse order (oldest first).
2025-05-08 19:49:50 +05:30
"""
2025-05-09 19:03:01 +05:30
headers = {'User-Agent': 'Mozilla/5.0', 'Accept': 'application/json'} # Standard headers
2025-05-07 07:20:40 +05:30
service, user_id, target_post_id = extract_post_info(api_url_input)
2025-05-08 19:49:50 +05:30
if cancellation_event and cancellation_event.is_set():
logger(" Download_from_api cancelled at start.")
2025-05-07 07:20:40 +05:30
return
2025-05-08 19:49:50 +05:30
if not service or not user_id:
logger(f"❌ Invalid URL or could not extract service/user: {api_url_input}")
2025-05-09 19:03:01 +05:30
return
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Page range is ignored for single post URLs
2025-05-08 19:49:50 +05:30
if target_post_id and (start_page or end_page):
logger("⚠️ Page range (start/end page) is ignored when a specific post URL is provided.")
2025-05-09 19:03:01 +05:30
start_page = end_page = None
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Manga mode is only applicable for creator feeds (not single posts)
2025-05-08 19:49:50 +05:30
is_creator_feed_for_manga = manga_mode and not target_post_id
2025-05-07 07:20:40 +05:30
parsed_input = urlparse(api_url_input)
2025-05-08 19:49:50 +05:30
api_domain = parsed_input.netloc
2025-05-09 19:03:01 +05:30
# Ensure we use a valid API domain, default to kemono.su if unrecognized
2025-05-08 19:49:50 +05:30
if not any(d in api_domain.lower() for d in ['kemono.su', 'kemono.party', 'coomer.su', 'coomer.party']):
logger(f"⚠️ Unrecognized domain '{api_domain}'. Defaulting to kemono.su for API calls.")
2025-05-09 19:03:01 +05:30
api_domain = "kemono.su" # Or "coomer.party" if that's preferred default
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
api_base_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}"
2025-05-09 19:03:01 +05:30
page_size = 50 # Kemono API typically returns 50 posts per page
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# --- Manga Mode: Fetch all posts first, then reverse ---
2025-05-08 19:49:50 +05:30
if is_creator_feed_for_manga:
logger(" Manga Mode: Fetching all posts to reverse order (oldest posts processed first)...")
all_posts_for_manga_mode = []
current_offset_manga = 0
2025-05-09 19:03:01 +05:30
while True:
2025-05-08 19:49:50 +05:30
if cancellation_event and cancellation_event.is_set():
logger(" Manga mode post fetching cancelled.")
break
try:
posts_batch_manga = fetch_posts_paginated(api_base_url, headers, current_offset_manga, logger, cancellation_event)
2025-05-09 19:03:01 +05:30
if not isinstance(posts_batch_manga, list): # API should always return a list
2025-05-08 19:49:50 +05:30
logger(f"❌ API Error (Manga Mode): Expected list of posts, got {type(posts_batch_manga)}.")
break
2025-05-09 19:03:01 +05:30
if not posts_batch_manga: # Empty list means no more posts
2025-05-08 19:49:50 +05:30
logger("✅ Reached end of posts (Manga Mode fetch all).")
break
all_posts_for_manga_mode.extend(posts_batch_manga)
2025-05-09 19:03:01 +05:30
current_offset_manga += len(posts_batch_manga) # API doesn't use page_size in offset, but number of posts
time.sleep(0.6) # Be respectful to the API
except RuntimeError as e: # Catch errors from fetch_posts_paginated
if "cancelled by user" in str(e).lower():
2025-05-08 19:49:50 +05:30
logger(f" Manga mode pagination stopped due to cancellation: {e}")
else:
logger(f"{e}\n Aborting manga mode pagination.")
2025-05-09 19:03:01 +05:30
break # Stop fetching on error
except Exception as e: # Catch any other unexpected errors
2025-05-08 19:49:50 +05:30
logger(f"❌ Unexpected error during manga mode fetch: {e}")
traceback.print_exc()
break
2025-05-09 19:03:01 +05:30
if cancellation_event and cancellation_event.is_set(): return # Early exit if cancelled
2025-05-08 19:49:50 +05:30
if all_posts_for_manga_mode:
logger(f" Manga Mode: Fetched {len(all_posts_for_manga_mode)} total posts. Reversing order...")
2025-05-09 19:03:01 +05:30
all_posts_for_manga_mode.reverse() # Oldest posts first
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Yield in batches of page_size
2025-05-08 19:49:50 +05:30
for i in range(0, len(all_posts_for_manga_mode), page_size):
if cancellation_event and cancellation_event.is_set():
logger(" Manga mode post yielding cancelled.")
break
yield all_posts_for_manga_mode[i:i + page_size]
else:
logger(" Manga Mode: No posts found to process.")
2025-05-09 19:03:01 +05:30
return # End of Manga Mode logic
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# --- Normal Mode or Single Post Mode ---
2025-05-08 19:49:50 +05:30
current_page_num = 1
current_offset = 0
2025-05-09 19:03:01 +05:30
processed_target_post_flag = False # For single post URLs
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
if start_page and start_page > 1:
2025-05-09 19:03:01 +05:30
current_offset = (start_page - 1) * page_size # Calculate offset for starting page
2025-05-08 19:49:50 +05:30
current_page_num = start_page
logger(f" Starting from page {current_page_num} (calculated offset {current_offset}).")
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
while True: # Pagination loop
2025-05-08 19:49:50 +05:30
if cancellation_event and cancellation_event.is_set():
logger(" Post fetching loop cancelled.")
break
if end_page and current_page_num > end_page:
logger(f"✅ Reached specified end page ({end_page}). Stopping.")
break
2025-05-09 19:03:01 +05:30
if target_post_id and processed_target_post_flag: # If single post was found and processed
2025-05-08 19:49:50 +05:30
logger(f"✅ Target post {target_post_id} has been processed.")
2025-05-07 07:20:40 +05:30
break
try:
2025-05-08 19:49:50 +05:30
posts_batch = fetch_posts_paginated(api_base_url, headers, current_offset, logger, cancellation_event)
2025-05-07 07:20:40 +05:30
if not isinstance(posts_batch, list):
2025-05-08 19:49:50 +05:30
logger(f"❌ API Error: Expected list of posts, got {type(posts_batch)} at page {current_page_num}.")
break
2025-05-09 19:03:01 +05:30
except RuntimeError as e: # Catch errors from fetch_posts_paginated
2025-05-08 19:49:50 +05:30
if "cancelled by user" in str(e).lower():
logger(f" Pagination stopped due to cancellation: {e}")
else:
logger(f"{e}\n Aborting pagination at page {current_page_num}.")
2025-05-07 07:20:40 +05:30
break
2025-05-09 19:03:01 +05:30
except Exception as e: # Catch any other unexpected errors
2025-05-08 19:49:50 +05:30
logger(f"❌ Unexpected error fetching page {current_page_num}: {e}")
traceback.print_exc()
2025-05-07 07:20:40 +05:30
break
2025-05-09 19:03:01 +05:30
if not posts_batch: # No more posts
if current_page_num == (start_page or 1) and not target_post_id : # No posts on first page of a creator feed
2025-05-08 19:49:50 +05:30
logger("😕 No posts found on the first page checked.")
2025-05-09 19:03:01 +05:30
elif not target_post_id: # End of creator feed
2025-05-08 19:49:50 +05:30
logger("✅ Reached end of posts (no more content).")
2025-05-09 19:03:01 +05:30
break # Exit pagination loop
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
if target_post_id: # Processing a single post URL
2025-05-08 19:49:50 +05:30
matching_post = next((p for p in posts_batch if str(p.get('id')) == str(target_post_id)), None)
2025-05-07 07:20:40 +05:30
if matching_post:
2025-05-08 19:49:50 +05:30
logger(f"🎯 Found target post {target_post_id}.")
2025-05-09 19:03:01 +05:30
yield [matching_post] # Yield as a list containing one item
processed_target_post_flag = True # Mark as processed
2025-05-07 07:20:40 +05:30
else:
2025-05-09 19:03:01 +05:30
# This case should ideally not happen if the post ID is valid and API is consistent.
# If the API returns posts in pages, a specific post ID might not be on the first page if offset isn't 0.
# However, for a direct post URL, we expect it or an error.
2025-05-08 19:49:50 +05:30
logger(f"❌ Target post {target_post_id} not found in the batch from offset {current_offset}. This may indicate the post URL is incorrect or the API behavior is unexpected.")
2025-05-09 19:03:01 +05:30
break # Stop if target post not found where expected
else: # Processing a creator feed (not a single post)
yield posts_batch # Yield the batch of posts
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
if not (target_post_id and processed_target_post_flag): # If not a single post that was just processed
if not posts_batch : break # Should be redundant due to check above, but safe
current_offset += len(posts_batch) # Kemono API uses item offset, not page offset
2025-05-08 19:49:50 +05:30
current_page_num += 1
2025-05-09 19:03:01 +05:30
time.sleep(0.6) # Be respectful to the API
else: # Single post was processed, exit loop
break
# Final check if a specific target post was requested but not found
2025-05-08 19:49:50 +05:30
if target_post_id and not processed_target_post_flag and not (cancellation_event and cancellation_event.is_set()):
logger(f"❌ Target post {target_post_id} could not be found after checking relevant pages.")
def get_link_platform(url):
2025-05-09 19:03:01 +05:30
"""Attempts to identify the platform of an external link from its domain."""
2025-05-08 19:49:50 +05:30
try:
domain = urlparse(url).netloc.lower()
2025-05-09 19:03:01 +05:30
# Specific known platforms (add more as needed)
2025-05-08 19:49:50 +05:30
if 'drive.google.com' in domain: return 'google drive'
if 'mega.nz' in domain or 'mega.io' in domain: return 'mega'
if 'dropbox.com' in domain: return 'dropbox'
if 'patreon.com' in domain: return 'patreon'
if 'instagram.com' in domain: return 'instagram'
if 'twitter.com' in domain or 'x.com' in domain: return 'twitter/x'
if 'discord.gg' in domain or 'discord.com/invite' in domain: return 'discord invite'
if 'pixiv.net' in domain: return 'pixiv'
if 'kemono.su' in domain or 'kemono.party' in domain: return 'kemono' # Explicitly identify kemono
if 'coomer.su' in domain or 'coomer.party' in domain: return 'coomer' # Explicitly identify coomer
2025-05-09 19:03:01 +05:30
# Generic extraction for other domains (e.g., 'example' from 'www.example.com')
2025-05-08 19:49:50 +05:30
parts = domain.split('.')
if len(parts) >= 2:
# Return the second-to-last part for common structures (e.g., 'google' from google.com)
# Avoid returning generic TLDs like 'com', 'org', 'net' as the platform
2025-05-09 19:03:01 +05:30
# Handle cases like 'google.co.uk' -> 'google'
2025-05-08 19:49:50 +05:30
if parts[-2] not in ['com', 'org', 'net', 'gov', 'edu', 'co'] or len(parts) == 2:
2025-05-09 19:03:01 +05:30
return parts[-2]
elif len(parts) >= 3 and parts[-3] not in ['com', 'org', 'net', 'gov', 'edu', 'co']:
2025-05-08 19:49:50 +05:30
return parts[-3]
2025-05-09 19:03:01 +05:30
else: # Fallback to full domain if unsure or very short domain
return domain
return 'external' # Default if domain parsing fails or is too simple (e.g., 'localhost')
2025-05-08 19:49:50 +05:30
except Exception: return 'unknown' # Error case
2025-05-07 07:20:40 +05:30
class PostProcessorSignals(QObject):
2025-05-08 19:49:50 +05:30
"""Defines signals used by PostProcessorWorker to communicate with the GUI thread."""
2025-05-09 19:03:01 +05:30
progress_signal = pyqtSignal(str) # Generic log messages
file_download_status_signal = pyqtSignal(bool) # True if a file download starts, False if ends/fails
# Signal carries post_title, link_text, link_url, platform
external_link_signal = pyqtSignal(str, str, str, str)
# Signal carries filename, downloaded_bytes, total_bytes for progress bar
file_progress_signal = pyqtSignal(str, int, int)
2025-05-08 19:49:50 +05:30
2025-05-07 07:20:40 +05:30
class PostProcessorWorker:
2025-05-08 19:49:50 +05:30
"""Processes a single post: determines save paths, downloads files, handles compression."""
def __init__(self, post_data, download_root, known_names,
filter_character_list,
2025-05-07 07:20:40 +05:30
unwanted_keywords, filter_mode, skip_zip, skip_rar,
2025-05-08 19:49:50 +05:30
use_subfolders, use_post_subfolders, target_post_id_from_initial_url, custom_folder_name,
2025-05-07 07:20:40 +05:30
compress_images, download_thumbnails, service, user_id,
api_url_input, cancellation_event, signals,
downloaded_files, downloaded_file_hashes, downloaded_files_lock, downloaded_file_hashes_lock,
2025-05-08 19:49:50 +05:30
skip_words_list=None, show_external_links=False,
2025-05-09 19:03:01 +05:30
extract_links_only=False,
num_file_threads=4, skip_current_file_flag=None,
2025-05-08 19:49:50 +05:30
manga_mode_active=False
):
2025-05-07 07:20:40 +05:30
self.post = post_data
self.download_root = download_root
self.known_names = known_names
2025-05-08 19:49:50 +05:30
self.filter_character_list = filter_character_list if filter_character_list else []
2025-05-09 19:03:01 +05:30
self.unwanted_keywords = unwanted_keywords if unwanted_keywords is not None else set()
self.filter_mode = filter_mode # 'image', 'video', or 'all'
2025-05-07 07:20:40 +05:30
self.skip_zip = skip_zip
self.skip_rar = skip_rar
self.use_subfolders = use_subfolders
2025-05-08 19:49:50 +05:30
self.use_post_subfolders = use_post_subfolders
2025-05-09 19:03:01 +05:30
self.target_post_id_from_initial_url = target_post_id_from_initial_url # ID from initial URL if it was a post URL
self.custom_folder_name = custom_folder_name # For single post downloads
2025-05-07 07:20:40 +05:30
self.compress_images = compress_images
self.download_thumbnails = download_thumbnails
self.service = service
self.user_id = user_id
2025-05-09 19:03:01 +05:30
self.api_url_input = api_url_input # The original URL input by the user
2025-05-07 07:20:40 +05:30
self.cancellation_event = cancellation_event
2025-05-09 19:03:01 +05:30
self.signals = signals # For emitting progress, logs, etc.
self.skip_current_file_flag = skip_current_file_flag # Event to skip current file download
# Sets and locks for tracking downloaded files/hashes across threads/workers
2025-05-08 19:49:50 +05:30
self.downloaded_files = downloaded_files if downloaded_files is not None else set()
self.downloaded_file_hashes = downloaded_file_hashes if downloaded_file_hashes is not None else set()
self.downloaded_files_lock = downloaded_files_lock if downloaded_files_lock is not None else threading.Lock()
self.downloaded_file_hashes_lock = downloaded_file_hashes_lock if downloaded_file_hashes_lock is not None else threading.Lock()
2025-05-09 19:03:01 +05:30
2025-05-07 07:20:40 +05:30
self.skip_words_list = skip_words_list if skip_words_list is not None else []
2025-05-09 19:03:01 +05:30
self.show_external_links = show_external_links # Whether to extract and log external links
self.extract_links_only = extract_links_only # If true, only extracts links, no downloads
self.num_file_threads = num_file_threads # Threads for downloading files within this post
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
self.manga_mode_active = manga_mode_active # True if manga mode is on
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Disable compression if Pillow is not available
2025-05-07 07:20:40 +05:30
if self.compress_images and Image is None:
2025-05-08 19:49:50 +05:30
self.logger("⚠️ Image compression disabled: Pillow library not found.")
2025-05-07 07:20:40 +05:30
self.compress_images = False
def logger(self, message):
2025-05-08 19:49:50 +05:30
"""Emits a log message via the progress_signal if available."""
2025-05-07 07:20:40 +05:30
if self.signals and hasattr(self.signals, 'progress_signal'):
self.signals.progress_signal.emit(message)
2025-05-09 19:03:01 +05:30
else: # Fallback if signals are not connected (e.g., testing)
print(f"(Worker Log - No Signal): {message}")
2025-05-07 07:20:40 +05:30
def check_cancel(self):
2025-05-08 19:49:50 +05:30
"""Checks if cancellation has been requested."""
return self.cancellation_event.is_set()
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
def _download_single_file(self, file_info, target_folder_path, headers, original_post_id_for_log, skip_event,
post_title="", file_index_in_post=0): # Added post_title here
2025-05-08 19:49:50 +05:30
"""Downloads a single file, handles retries, compression, and hash checking."""
2025-05-09 19:03:01 +05:30
if self.check_cancel() or (skip_event and skip_event.is_set()): return 0, 1 # Downloaded, Skipped
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
file_url = file_info.get('url')
2025-05-09 19:03:01 +05:30
# Use '_original_name_for_log' if available (set in process()), otherwise 'name'
2025-05-08 19:49:50 +05:30
api_original_filename = file_info.get('_original_name_for_log', file_info.get('name'))
if not file_url or not api_original_filename:
self.logger(f"⚠️ Skipping file from post {original_post_id_for_log}: Missing URL or original filename. Info: {str(file_info)[:100]}")
return 0, 1
2025-05-09 19:03:01 +05:30
# --- Skip Check 1: Skip Words (Always based on Filename) ---
2025-05-07 07:20:40 +05:30
if self.skip_words_list:
2025-05-09 19:03:01 +05:30
content_to_check_for_skip_words = api_original_filename.lower() # ALWAYS use filename for skip words
log_source_for_skip_words = f"Filename '{api_original_filename}'"
for skip_word in self.skip_words_list:
if skip_word.lower() in content_to_check_for_skip_words:
self.logger(f" -> Skip File (Keyword Match): {log_source_for_skip_words} contains '{skip_word}'.")
return 0, 1
# --- Character Filter (Global Gate) ---
# If character filters are active, the item (post for manga, file for normal) must match.
if self.filter_character_list:
matches_any_character_filter = False
if self.manga_mode_active:
# Manga Mode: Character filter applies to POST TITLE
if any(is_title_match_for_character(post_title, char_filter) for char_filter in self.filter_character_list):
matches_any_character_filter = True
if not matches_any_character_filter:
# This log might be redundant if the post-level check in process() already skipped it,
# but it's a safeguard if a file somehow reaches here without its post title matching.
self.logger(f" -> Skip File (Manga Mode - Post Title No Char Match): Title '{post_title[:30]}' doesn't match active character filters for this file.")
return 0, 1
else: # Normal mode: Character filter applies to FILENAME
if any(is_filename_match_for_character(api_original_filename, char_filter) for char_filter in self.filter_character_list):
matches_any_character_filter = True
if not matches_any_character_filter:
self.logger(f" -> Skip File (Normal Mode - Filename No Char Match): '{api_original_filename}' doesn't match active character filters.")
return 0, 1
# --- Filename Generation (Manga Mode vs Normal Mode) ---
2025-05-08 19:49:50 +05:30
_, original_ext = os.path.splitext(api_original_filename)
2025-05-09 19:03:01 +05:30
if original_ext and not original_ext.startswith('.'): original_ext = '.' + original_ext
elif not original_ext: # Try to derive extension if missing
_, temp_ext = os.path.splitext(clean_filename(api_original_filename)) # Clean first
2025-05-08 19:49:50 +05:30
if temp_ext and not temp_ext.startswith('.'): original_ext = '.' + temp_ext
elif temp_ext: original_ext = temp_ext
2025-05-09 19:03:01 +05:30
else: original_ext = '' # No extension found
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
filename_to_save = ""
2025-05-08 19:49:50 +05:30
if self.manga_mode_active:
2025-05-09 19:03:01 +05:30
# Manga mode renaming logic (uses post_title and sequence)
if post_title and post_title.strip():
cleaned_post_title_full = clean_filename(post_title.strip()) # Clean the post title for filename use
original_filename_base, _ = os.path.splitext(api_original_filename) # Get base of original API filename
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Try to extract a sequence number from the original filename
2025-05-08 19:49:50 +05:30
extracted_sequence_from_original = ""
2025-05-09 19:03:01 +05:30
# Simple number at the end: e.g., "image_01", "pic123"
2025-05-08 19:49:50 +05:30
simple_end_match = re.search(r'(\d+)$', original_filename_base)
if simple_end_match:
2025-05-09 19:03:01 +05:30
extracted_sequence_from_original = simple_end_match.group(1).zfill(2) # Pad with zero if needed
2025-05-07 07:20:40 +05:30
else:
2025-05-09 19:03:01 +05:30
# More complex patterns like "page 01", "ch-2", "ep_003"
2025-05-08 19:49:50 +05:30
complex_match = re.search(r'(?:[ _.\-/]|^)(?:p|page|ch|chapter|ep|episode|v|vol|volume|no|num|number|pt|part)[ _.\-]*(\d+)', original_filename_base, re.IGNORECASE)
if complex_match:
2025-05-09 19:03:01 +05:30
extracted_sequence_from_original = complex_match.group(1).zfill(2) # Pad
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Base for new filename from post title, removing existing page/chapter numbers from title
2025-05-08 19:49:50 +05:30
cleaned_title_base = re.sub(
2025-05-09 19:03:01 +05:30
r'[|\[\]()]*[ _.\-]*(?:page|p|ch|chapter|ep|episode|v|vol|volume|no|num|number|pt|part)s?[ _.\-]*\d+([ _.\-]+\d+)?([ _.\-]*(?:END|FIN))?$',
2025-05-08 19:49:50 +05:30
'',
cleaned_post_title_full,
flags=re.IGNORECASE
).strip()
2025-05-09 19:03:01 +05:30
if not cleaned_title_base: # Fallback if regex strips everything
cleaned_title_base = cleaned_post_title_full
cleaned_title_base = cleaned_title_base.rstrip(' _.-') # Clean trailing separators
2025-05-08 19:49:50 +05:30
if extracted_sequence_from_original:
2025-05-09 19:03:01 +05:30
filename_to_save = f"{cleaned_title_base} - {extracted_sequence_from_original}{original_ext}"
2025-05-07 07:20:40 +05:30
else:
2025-05-09 19:03:01 +05:30
# Fallback to file index in post if no sequence found in original filename
fallback_sequence = str(file_index_in_post + 1).zfill(2) # Pad with zero
filename_to_save = f"{cleaned_title_base} - {fallback_sequence}{original_ext}"
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Handle potential filename collisions by appending a counter
2025-05-08 19:49:50 +05:30
counter = 1
base_name_coll, ext_coll = os.path.splitext(filename_to_save)
temp_filename_for_collision_check = filename_to_save
while os.path.exists(os.path.join(target_folder_path, temp_filename_for_collision_check)):
temp_filename_for_collision_check = f"{base_name_coll}_{counter}{ext_coll}"
counter += 1
if temp_filename_for_collision_check != filename_to_save:
2025-05-09 19:03:01 +05:30
# self.logger(f" Manga Mode: Collision detected. Adjusted filename to '{temp_filename_for_collision_check}'")
2025-05-08 19:49:50 +05:30
filename_to_save = temp_filename_for_collision_check
2025-05-09 19:03:01 +05:30
else: # Manga mode but post_title is missing (should be rare)
filename_to_save = clean_filename(api_original_filename) # Fallback to cleaned original
self.logger(f"⚠️ Manga mode: Post title missing for post {original_post_id_for_log}. Using cleaned original filename '{filename_to_save}'.")
else: # Normal mode
2025-05-08 19:49:50 +05:30
filename_to_save = clean_filename(api_original_filename)
2025-05-09 19:03:01 +05:30
final_filename_for_sets_and_saving = filename_to_save # This is the name used for saving and duplicate checks
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# --- File Type Filtering (applies to both modes, based on original filename) ---
if not self.download_thumbnails: # Thumbnail mode bypasses these filters
is_img_type = is_image(api_original_filename) # Check original type
2025-05-08 19:49:50 +05:30
is_vid_type = is_video(api_original_filename)
is_zip_type = is_zip(api_original_filename)
is_rar_type = is_rar(api_original_filename)
2025-05-09 19:03:01 +05:30
if self.filter_mode == 'image' and not is_img_type:
self.logger(f" -> Filter Skip: '{api_original_filename}' (Not Image).")
return 0,1
if self.filter_mode == 'video' and not is_vid_type:
self.logger(f" -> Filter Skip: '{api_original_filename}' (Not Video).")
return 0,1
if self.skip_zip and is_zip_type:
self.logger(f" -> Pref Skip: '{api_original_filename}' (ZIP).")
return 0,1
if self.skip_rar and is_rar_type:
self.logger(f" -> Pref Skip: '{api_original_filename}' (RAR).")
return 0,1
target_folder_basename = os.path.basename(target_folder_path) # For logging
2025-05-08 19:49:50 +05:30
current_save_path = os.path.join(target_folder_path, final_filename_for_sets_and_saving)
2025-05-09 19:03:01 +05:30
# --- Duplicate Checks (Path, Global Filename, Hash) ---
2025-05-08 19:49:50 +05:30
if os.path.exists(current_save_path) and os.path.getsize(current_save_path) > 0:
self.logger(f" -> Exists (Path): '{final_filename_for_sets_and_saving}' in '{target_folder_basename}'.")
2025-05-09 19:03:01 +05:30
with self.downloaded_files_lock: self.downloaded_files.add(final_filename_for_sets_and_saving) # Add to global set
2025-05-07 07:20:40 +05:30
return 0, 1
2025-05-09 19:03:01 +05:30
2025-05-08 19:49:50 +05:30
with self.downloaded_files_lock:
if final_filename_for_sets_and_saving in self.downloaded_files:
2025-05-09 19:03:01 +05:30
self.logger(f" -> Global Skip (Filename): '{final_filename_for_sets_and_saving}' already recorded as downloaded this session.")
2025-05-08 19:49:50 +05:30
return 0, 1
2025-05-09 19:03:01 +05:30
# --- Download Loop with Retries ---
max_retries = 3
retry_delay = 5 # seconds
downloaded_size_bytes = 0
calculated_file_hash = None
file_content_bytes = None # BytesIO to hold downloaded content
total_size_bytes = 0 # From Content-Length header, set on first attempt
2025-05-08 19:49:50 +05:30
download_successful_flag = False
2025-05-09 19:03:01 +05:30
for attempt_num in range(max_retries + 1): # max_retries means max_retries + 1 attempts total
if self.check_cancel() or (skip_event and skip_event.is_set()):
break # Exit retry loop if cancelled
2025-05-07 07:20:40 +05:30
try:
2025-05-09 19:03:01 +05:30
if attempt_num > 0:
self.logger(f" Retrying '{api_original_filename}' (Attempt {attempt_num}/{max_retries})...")
time.sleep(retry_delay * (2**(attempt_num - 1))) # Exponential backoff
if self.signals and hasattr(self.signals, 'file_download_status_signal'):
self.signals.file_download_status_signal.emit(True) # Signal download start
response = requests.get(file_url, headers=headers, timeout=(15, 300), stream=True) # connect_timeout, read_timeout
response.raise_for_status() # Check for HTTP errors
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
current_total_size_bytes_from_headers = int(response.headers.get('Content-Length', 0))
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
if attempt_num == 0: # First attempt, log initial size
total_size_bytes = current_total_size_bytes_from_headers
size_str = f"{total_size_bytes / (1024 * 1024):.2f} MB" if total_size_bytes > 0 else "unknown size"
self.logger(f"⬇️ Downloading: '{api_original_filename}' (Size: {size_str}) [Saving as: '{final_filename_for_sets_and_saving}']")
# Use the size from the current attempt for progress reporting
current_attempt_total_size = current_total_size_bytes_from_headers
file_content_buffer = BytesIO() # Buffer for this attempt's content
current_attempt_downloaded_bytes = 0
md5_hasher = hashlib.md5()
2025-05-08 19:49:50 +05:30
last_progress_time = time.time()
for chunk in response.iter_content(chunk_size=1 * 1024 * 1024): # 1MB chunks
2025-05-09 19:03:01 +05:30
if self.check_cancel() or (skip_event and skip_event.is_set()):
break # Stop reading chunks if cancelled
2025-05-08 19:49:50 +05:30
if chunk:
2025-05-09 19:03:01 +05:30
file_content_buffer.write(chunk)
md5_hasher.update(chunk)
current_attempt_downloaded_bytes += len(chunk)
# Emit progress signal periodically
if time.time() - last_progress_time > 1 and current_attempt_total_size > 0 and \
self.signals and hasattr(self.signals, 'file_progress_signal'):
self.signals.file_progress_signal.emit(
api_original_filename, # Show original name in progress
current_attempt_downloaded_bytes,
current_attempt_total_size
)
2025-05-08 19:49:50 +05:30
last_progress_time = time.time()
2025-05-09 19:03:01 +05:30
if self.check_cancel() or (skip_event and skip_event.is_set()):
if file_content_buffer: file_content_buffer.close()
break # Break from retry loop if cancelled during chunk iteration
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Check if download was successful for this attempt
if current_attempt_downloaded_bytes > 0: # Successfully downloaded some data
2025-05-08 19:49:50 +05:30
calculated_file_hash = md5_hasher.hexdigest()
2025-05-09 19:03:01 +05:30
downloaded_size_bytes = current_attempt_downloaded_bytes
if file_content_bytes: file_content_bytes.close() # Close previous attempt's buffer
file_content_bytes = file_content_buffer # Keep this attempt's content
file_content_bytes.seek(0) # Reset pointer for reading
2025-05-08 19:49:50 +05:30
download_successful_flag = True
2025-05-09 19:03:01 +05:30
break # Exit retry loop on success
elif current_attempt_total_size == 0 and response.status_code == 200: # Handle 0-byte files
self.logger(f" Note: '{api_original_filename}' is a 0-byte file according to server.")
calculated_file_hash = md5_hasher.hexdigest() # Hash of empty content
downloaded_size_bytes = 0
if file_content_bytes: file_content_bytes.close()
file_content_bytes = file_content_buffer # Keep empty buffer
file_content_bytes.seek(0)
2025-05-08 19:49:50 +05:30
download_successful_flag = True
2025-05-09 19:03:01 +05:30
break # Exit retry loop
else: # No data or failed attempt (e.g. connection dropped before any data)
if file_content_buffer: file_content_buffer.close() # Discard this attempt's buffer
2025-05-08 19:49:50 +05:30
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, http.client.IncompleteRead) as e:
2025-05-09 19:03:01 +05:30
self.logger(f" ❌ Download Error (Retryable): {api_original_filename}. Error: {e}")
if 'file_content_buffer' in locals() and file_content_buffer: file_content_buffer.close()
except requests.exceptions.RequestException as e: # Non-retryable (like 404)
self.logger(f" ❌ Download Error (Non-Retryable): {api_original_filename}. Error: {e}")
if 'file_content_buffer' in locals() and file_content_buffer: file_content_buffer.close()
break # Break from retry loop
except Exception as e: # Other unexpected errors
self.logger(f" ❌ Unexpected Download Error: {api_original_filename}: {e}\n{traceback.format_exc(limit=2)}")
if 'file_content_buffer' in locals() and file_content_buffer: file_content_buffer.close()
break # Break from retry loop
2025-05-08 19:49:50 +05:30
finally:
2025-05-09 19:03:01 +05:30
if self.signals and hasattr(self.signals, 'file_download_status_signal'):
self.signals.file_download_status_signal.emit(False) # Signal download end/attempt end
# End of retry loop
# Emit final progress update (e.g., 100% or 0/0 if failed)
if self.signals and hasattr(self.signals, 'file_progress_signal'):
# Use total_size_bytes from the first successful header read for consistency in total
final_total_for_progress = total_size_bytes if download_successful_flag and total_size_bytes > 0 else downloaded_size_bytes
self.signals.file_progress_signal.emit(api_original_filename, downloaded_size_bytes, final_total_for_progress)
2025-05-08 19:49:50 +05:30
if self.check_cancel() or (skip_event and skip_event.is_set()):
2025-05-09 19:03:01 +05:30
self.logger(f" ⚠️ Download interrupted for {api_original_filename}.")
2025-05-08 19:49:50 +05:30
if file_content_bytes: file_content_bytes.close()
2025-05-09 19:03:01 +05:30
return 0, 1 # Skipped due to interruption
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
if not download_successful_flag:
2025-05-09 19:03:01 +05:30
self.logger(f"❌ Download failed for '{api_original_filename}' after {max_retries + 1} attempts.")
if file_content_bytes: file_content_bytes.close()
return 0, 1 # Skipped due to download failure
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
# --- Hash Check (post-download), Compression, Saving ---
2025-05-08 19:49:50 +05:30
with self.downloaded_file_hashes_lock:
if calculated_file_hash in self.downloaded_file_hashes:
2025-05-09 19:03:01 +05:30
self.logger(f" -> Content Skip (Hash): '{api_original_filename}' (Hash: {calculated_file_hash[:8]}...) already downloaded this session.")
with self.downloaded_files_lock: self.downloaded_files.add(final_filename_for_sets_and_saving) # Still mark filename as "processed"
2025-05-08 19:49:50 +05:30
if file_content_bytes: file_content_bytes.close()
2025-05-09 19:03:01 +05:30
return 0, 1 # Skipped due to hash duplicate
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
bytes_to_write = file_content_bytes # This is the BytesIO from the successful download
final_filename_after_processing = final_filename_for_sets_and_saving # May change if compressed
current_save_path_final = current_save_path # May change if filename changes due to compression
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
is_img_for_compress_check = is_image(api_original_filename) # Check original type for compression eligibility
if is_img_for_compress_check and self.compress_images and Image and downloaded_size_bytes > (1.5 * 1024 * 1024): # Compress if > 1.5MB
2025-05-08 19:49:50 +05:30
self.logger(f" Compressing '{api_original_filename}' ({downloaded_size_bytes / (1024*1024):.2f} MB)...")
try:
2025-05-09 19:03:01 +05:30
# Ensure bytes_to_write is at the beginning for Pillow
bytes_to_write.seek(0)
2025-05-08 19:49:50 +05:30
with Image.open(bytes_to_write) as img_obj:
2025-05-09 19:03:01 +05:30
# Handle palette mode images and convert to RGB/RGBA for WebP
2025-05-08 19:49:50 +05:30
if img_obj.mode == 'P': img_obj = img_obj.convert('RGBA')
2025-05-09 19:03:01 +05:30
elif img_obj.mode not in ['RGB', 'RGBA', 'L']: img_obj = img_obj.convert('RGB')
2025-05-08 19:49:50 +05:30
compressed_bytes_io = BytesIO()
2025-05-09 19:03:01 +05:30
img_obj.save(compressed_bytes_io, format='WebP', quality=80, method=4) # method 4 is a good balance
2025-05-08 19:49:50 +05:30
compressed_size = compressed_bytes_io.getbuffer().nbytes
2025-05-09 19:03:01 +05:30
# Only use compressed if significantly smaller (e.g., >10% reduction)
2025-05-08 19:49:50 +05:30
if compressed_size < downloaded_size_bytes * 0.9:
self.logger(f" Compression success: {compressed_size / (1024*1024):.2f} MB.")
2025-05-09 19:03:01 +05:30
bytes_to_write.close() # Close original downloaded content stream
bytes_to_write = compressed_bytes_io # Use compressed content stream
bytes_to_write.seek(0) # Reset pointer for writing
2025-05-08 19:49:50 +05:30
base_name_orig, _ = os.path.splitext(final_filename_for_sets_and_saving)
2025-05-09 19:03:01 +05:30
final_filename_after_processing = base_name_orig + '.webp' # Change extension
2025-05-08 19:49:50 +05:30
current_save_path_final = os.path.join(target_folder_path, final_filename_after_processing)
self.logger(f" Updated filename (compressed): {final_filename_after_processing}")
2025-05-07 07:20:40 +05:30
else:
2025-05-09 19:03:01 +05:30
self.logger(f" Compression skipped: WebP not significantly smaller."); bytes_to_write.seek(0) # Reset pointer if not using compressed
2025-05-08 19:49:50 +05:30
except Exception as comp_e:
2025-05-09 19:03:01 +05:30
self.logger(f"❌ Compression failed for '{api_original_filename}': {comp_e}. Saving original."); bytes_to_write.seek(0) # Reset pointer
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Check for existence again if filename changed due to compression
2025-05-08 19:49:50 +05:30
if final_filename_after_processing != final_filename_for_sets_and_saving and \
os.path.exists(current_save_path_final) and os.path.getsize(current_save_path_final) > 0:
self.logger(f" -> Exists (Path - Post-Compress): '{final_filename_after_processing}' in '{target_folder_basename}'.")
with self.downloaded_files_lock: self.downloaded_files.add(final_filename_after_processing)
bytes_to_write.close()
return 0, 1
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
# --- Save the file ---
2025-05-08 19:49:50 +05:30
try:
2025-05-09 19:03:01 +05:30
os.makedirs(os.path.dirname(current_save_path_final), exist_ok=True) # Ensure directory exists
2025-05-08 19:49:50 +05:30
with open(current_save_path_final, 'wb') as f_out:
2025-05-09 19:03:01 +05:30
f_out.write(bytes_to_write.getvalue()) # Write content
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Add to downloaded sets upon successful save
2025-05-08 19:49:50 +05:30
with self.downloaded_file_hashes_lock: self.downloaded_file_hashes.add(calculated_file_hash)
with self.downloaded_files_lock: self.downloaded_files.add(final_filename_after_processing)
self.logger(f"✅ Saved: '{final_filename_after_processing}' (from '{api_original_filename}', {downloaded_size_bytes / (1024*1024):.2f} MB) in '{target_folder_basename}'")
2025-05-09 19:03:01 +05:30
time.sleep(0.05) # Small delay, can be removed if not needed
return 1, 0 # Downloaded, Skipped
2025-05-08 19:49:50 +05:30
except Exception as save_err:
self.logger(f"❌ Save Fail for '{final_filename_after_processing}': {save_err}")
2025-05-09 19:03:01 +05:30
if os.path.exists(current_save_path_final): # Attempt to remove partial file
try: os.remove(current_save_path_final);
2025-05-08 19:49:50 +05:30
except OSError: self.logger(f" -> Failed to remove partially saved file: {current_save_path_final}")
2025-05-09 19:03:01 +05:30
return 0, 1 # Skipped due to save error
2025-05-08 19:49:50 +05:30
finally:
2025-05-09 19:03:01 +05:30
if bytes_to_write: bytes_to_write.close() # Ensure stream is closed
2025-05-08 19:49:50 +05:30
def process(self):
"""Main processing logic for a single post."""
2025-05-09 19:03:01 +05:30
if self.check_cancel(): return 0, 0 # Downloaded, Skipped
2025-05-08 19:49:50 +05:30
total_downloaded_this_post = 0
total_skipped_this_post = 0
2025-05-09 19:03:01 +05:30
# Prepare headers for file downloads
parsed_api_url = urlparse(self.api_url_input) # Use the original input URL for referer base
referer_url = f"https://{parsed_api_url.netloc}/"
headers = {'User-Agent': 'Mozilla/5.0', 'Referer': referer_url, 'Accept': '*/*'}
# Regex for finding links in HTML content
link_pattern = re.compile(r"""<a\s+.*?href=["'](https?://[^"']+)["'][^>]*>(.*?)</a>""",
re.IGNORECASE | re.DOTALL)
# Extract post details
post_data = self.post
post_title = post_data.get('title', '') or 'untitled_post'
2025-05-08 19:49:50 +05:30
post_id = post_data.get('id', 'unknown_id')
2025-05-09 19:03:01 +05:30
post_main_file_info = post_data.get('file') # Main file object for the post
post_attachments = post_data.get('attachments', []) # List of attachment objects
post_content_html = post_data.get('content', '') # HTML content of the post
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Log post processing start
2025-05-08 19:49:50 +05:30
self.logger(f"\n--- Processing Post {post_id} ('{post_title[:50]}...') (Thread: {threading.current_thread().name}) ---")
2025-05-09 19:03:01 +05:30
num_potential_files = len(post_attachments or []) + (1 if post_main_file_info and post_main_file_info.get('path') else 0)
# --- Post-Level Skip Word Check (REMOVED for Manga Mode based on Title) ---
# Skip words are now ALWAYS checked at the file level based on FILENAME in _download_single_file.
# The old Manga Mode post-level skip based on title is removed.
# --- Post-Level Character Filter Check (Only for Manga Mode, based on Title) ---
# If Manga Mode is active and character filters are set, the post title MUST match one of them.
# This acts as a gate for processing files from this post in Manga Mode.
if not self.extract_links_only and self.manga_mode_active and self.filter_character_list:
if not any(is_title_match_for_character(post_title, char_name) for char_name in self.filter_character_list):
self.logger(f" -> Skip Post (Manga Mode - Title No Char Match): Title '{post_title[:50]}' doesn't match active character filters.")
return 0, num_potential_files # Skip all files in this post
# Validate attachments structure
if not isinstance(post_attachments, list):
2025-05-08 19:49:50 +05:30
self.logger(f"⚠️ Corrupt attachment data for post {post_id} (expected list, got {type(post_attachments)}). Skipping attachments.")
post_attachments = []
2025-05-09 19:03:01 +05:30
# --- Determine Base Save Folders ---
potential_base_save_folders = [] # List of base folder names (not full paths yet)
if not self.extract_links_only: # Folder logic only applies if not just extracting links
if self.use_subfolders:
if self.filter_character_list: # User specified character names for folders
if self.manga_mode_active:
# Manga Mode: Only consider character folders if post title matches that character
for char_filter_name in self.filter_character_list:
if is_title_match_for_character(post_title, char_filter_name):
cleaned_folder = clean_folder_name(char_filter_name)
if cleaned_folder: potential_base_save_folders.append(cleaned_folder)
# If in manga mode and title didn't match any char filter, this list will be empty.
# The post-level skip above should have already caught this.
else: # Normal Mode: Create folders for all specified character filters
for char_filter_name in self.filter_character_list:
cleaned_folder = clean_folder_name(char_filter_name)
if cleaned_folder: potential_base_save_folders.append(cleaned_folder)
if potential_base_save_folders:
self.logger(f" Folder Target(s) (from Character Filter list): {', '.join(potential_base_save_folders)}")
elif self.filter_character_list:
self.logger(f" Note: Post {post_id} title did not match character filters for folder assignment (Manga Mode) or no valid char folders.")
else: # No character filter list from UI, derive folders from title using known_names
derived_folders = match_folders_from_title(post_title, self.known_names, self.unwanted_keywords)
if derived_folders:
potential_base_save_folders.extend(derived_folders)
self.logger(f" Folder Target(s) (Derived from Title & Known Names): {', '.join(derived_folders)}")
else: # Fallback if no known_names match
fallback_folder = extract_folder_name_from_title(post_title, self.unwanted_keywords)
potential_base_save_folders.append(fallback_folder)
self.logger(f" Folder Target (Fallback from Title): {fallback_folder}")
if not potential_base_save_folders: # If still no folders, use a generic one based on post title or default
potential_base_save_folders.append(clean_folder_name(post_title if post_title else "untitled_creator_content"))
self.logger(f" Folder Target (Final Fallback): {potential_base_save_folders[0]}")
else: # Not using subfolders, all files go to download_root
potential_base_save_folders = [""] # Represents the root download directory
# --- Post-Level Skip Words in Folder Name ---
# This applies if subfolders are used and a folder name itself contains a skip word.
if not self.extract_links_only and self.use_subfolders and self.skip_words_list:
for folder_name_to_check in potential_base_save_folders:
if not folder_name_to_check: continue # Skip root ""
if any(skip_word.lower() in folder_name_to_check.lower() for skip_word in self.skip_words_list):
matched_skip = next((sw for sw in self.skip_words_list if sw.lower() in folder_name_to_check.lower()), "unknown_skip_word")
self.logger(f" -> Skip Post (Folder Keyword): Potential folder '{folder_name_to_check}' contains '{matched_skip}'.")
return 0, num_potential_files
# --- Extract and Log External Links ---
2025-05-08 19:49:50 +05:30
if (self.show_external_links or self.extract_links_only) and post_content_html:
try:
2025-05-09 19:03:01 +05:30
unique_links_data = {} # Store unique URLs and their text
for match in link_pattern.finditer(post_content_html):
link_url = match.group(1).strip()
link_inner_text = match.group(2) # Raw inner HTML of the <a> tag
if not any(ext in link_url.lower() for ext in ['.css', '.js', '.ico', '.xml', '.svg']) \
and not link_url.startswith('javascript:') \
and link_url not in unique_links_data:
clean_link_text = re.sub(r'<.*?>', '', link_inner_text)
clean_link_text = html.unescape(clean_link_text).strip()
display_text = clean_link_text if clean_link_text else "[Link]"
unique_links_data[link_url] = display_text
links_emitted_count = 0
scraped_platforms = {'kemono', 'coomer', 'patreon'}
for link_url, link_text in unique_links_data.items():
platform = get_link_platform(link_url)
if platform not in scraped_platforms:
if self.signals and hasattr(self.signals, 'external_link_signal'):
self.signals.external_link_signal.emit(post_title, link_text, link_url, platform)
links_emitted_count +=1
if links_emitted_count > 0: self.logger(f" 🔗 Found {links_emitted_count} potential external link(s) in post content.")
2025-05-08 19:49:50 +05:30
except Exception as e: self.logger(f"⚠️ Error parsing post content for links: {e}\n{traceback.format_exc(limit=2)}")
if self.extract_links_only:
2025-05-09 19:03:01 +05:30
self.logger(f" Extract Links Only mode: Finished processing post {post_id} for links.")
2025-05-08 19:49:50 +05:30
return 0, 0
2025-05-09 19:03:01 +05:30
# --- Prepare List of Files to Download ---
2025-05-08 19:49:50 +05:30
files_to_download_info_list = []
2025-05-09 19:03:01 +05:30
api_file_domain = urlparse(self.api_url_input).netloc
if not api_file_domain:
api_file_domain = "kemono.su" if "kemono" in self.service.lower() else "coomer.party"
if self.download_thumbnails:
self.logger(f" Thumbnail-only mode for Post {post_id}.")
if post_main_file_info and isinstance(post_main_file_info, dict) and post_main_file_info.get('path'):
if is_image(post_main_file_info.get('name')):
file_path = post_main_file_info['path'].lstrip('/')
original_api_name = post_main_file_info.get('name') or os.path.basename(file_path)
if original_api_name:
files_to_download_info_list.append({
'url': f"https://{api_file_domain}{file_path}" if file_path.startswith('/') else f"https://{api_file_domain}/data/{file_path}",
'name': original_api_name,
'_original_name_for_log': original_api_name,
'_is_thumbnail': True
})
for att_info in post_attachments:
if isinstance(att_info, dict) and att_info.get('path') and is_image(att_info.get('name')):
att_path = att_info['path'].lstrip('/')
original_api_att_name = att_info.get('name') or os.path.basename(att_path)
if original_api_att_name:
files_to_download_info_list.append({
'url': f"https://{api_file_domain}{att_path}" if att_path.startswith('/') else f"https://{api_file_domain}/data/{att_path}",
'name': original_api_att_name,
'_original_name_for_log': original_api_att_name,
'_is_thumbnail': True
})
if not files_to_download_info_list:
self.logger(f" -> No image thumbnails found for post {post_id} in thumbnail-only mode.")
return 0, 0
else: # Normal download mode
2025-05-08 19:49:50 +05:30
if post_main_file_info and isinstance(post_main_file_info, dict) and post_main_file_info.get('path'):
file_path = post_main_file_info['path'].lstrip('/')
2025-05-09 19:03:01 +05:30
original_api_name = post_main_file_info.get('name') or os.path.basename(file_path)
2025-05-08 19:49:50 +05:30
if original_api_name:
files_to_download_info_list.append({
'url': f"https://{api_file_domain}{file_path}" if file_path.startswith('/') else f"https://{api_file_domain}/data/{file_path}",
2025-05-09 19:03:01 +05:30
'name': original_api_name,
'_original_name_for_log': original_api_name,
'_is_thumbnail': False
2025-05-08 19:49:50 +05:30
})
else: self.logger(f" ⚠️ Skipping main file for post {post_id}: Missing name (Path: {file_path})")
for idx, att_info in enumerate(post_attachments):
if isinstance(att_info, dict) and att_info.get('path'):
att_path = att_info['path'].lstrip('/')
original_api_att_name = att_info.get('name') or os.path.basename(att_path)
if original_api_att_name:
files_to_download_info_list.append({
'url': f"https://{api_file_domain}{att_path}" if att_path.startswith('/') else f"https://{api_file_domain}/data/{att_path}",
'name': original_api_att_name,
'_original_name_for_log': original_api_att_name,
'_is_thumbnail': False
})
else: self.logger(f" ⚠️ Skipping attachment {idx+1} for post {post_id}: Missing name (Path: {att_path})")
else: self.logger(f" ⚠️ Skipping invalid attachment {idx+1} for post {post_id}: {str(att_info)[:100]}")
if not files_to_download_info_list:
2025-05-07 07:20:40 +05:30
self.logger(f" No files found to download for post {post_id}.")
2025-05-08 19:49:50 +05:30
return 0, 0
self.logger(f" Identified {len(files_to_download_info_list)} file(s) for potential download from post {post_id}.")
2025-05-09 19:03:01 +05:30
# --- File Download Loop (using ThreadPoolExecutor for individual files) ---
2025-05-08 19:49:50 +05:30
with ThreadPoolExecutor(max_workers=self.num_file_threads, thread_name_prefix=f'P{post_id}File_') as file_pool:
futures_list = []
2025-05-09 19:03:01 +05:30
for file_idx, file_info_to_dl in enumerate(files_to_download_info_list):
2025-05-07 07:20:40 +05:30
if self.check_cancel(): break
2025-05-09 19:03:01 +05:30
actual_target_full_paths_for_this_file = []
if self.use_subfolders:
if self.filter_character_list:
for char_name_from_filter_list in self.filter_character_list:
assign_to_this_char_folder = False
if self.manga_mode_active:
# Manga Mode: Folder assignment is based on post_title matching char_name_from_filter_list
# This check is somewhat redundant if the post-level title check passed,
# but ensures files from this post go into the matched character's folder.
if is_title_match_for_character(post_title, char_name_from_filter_list):
assign_to_this_char_folder = True
else: # Normal mode
if is_filename_match_for_character(file_info_to_dl.get('_original_name_for_log'), char_name_from_filter_list):
assign_to_this_char_folder = True
if assign_to_this_char_folder:
base_char_folder_path = os.path.join(self.download_root, clean_folder_name(char_name_from_filter_list))
if self.use_post_subfolders:
cleaned_title_for_subfolder = clean_folder_name(post_title)
post_specific_subfolder_name = f"{post_id}_{cleaned_title_for_subfolder}" if cleaned_title_for_subfolder else f"{post_id}_untitled"
actual_target_full_paths_for_this_file.append(os.path.join(base_char_folder_path, post_specific_subfolder_name))
else:
actual_target_full_paths_for_this_file.append(base_char_folder_path)
else:
for base_folder_name in potential_base_save_folders:
base_folder_path = os.path.join(self.download_root, base_folder_name)
if self.use_post_subfolders:
cleaned_title_for_subfolder = clean_folder_name(post_title)
post_specific_subfolder_name = f"{post_id}_{cleaned_title_for_subfolder}" if cleaned_title_for_subfolder else f"{post_id}_untitled"
actual_target_full_paths_for_this_file.append(os.path.join(base_folder_path, post_specific_subfolder_name))
else:
actual_target_full_paths_for_this_file.append(base_folder_path)
else:
actual_target_full_paths_for_this_file = [self.download_root]
if self.target_post_id_from_initial_url and self.custom_folder_name:
custom_full_path = os.path.join(self.download_root, self.custom_folder_name)
actual_target_full_paths_for_this_file = [custom_full_path]
# self.logger(f" Using custom folder for single post: {custom_full_path}") # Logged once is enough
if not actual_target_full_paths_for_this_file:
self.logger(f" -> File Skip (No Target Folder): '{file_info_to_dl.get('_original_name_for_log')}' for post '{post_title[:30]}'. No character folder match or other path error.")
total_skipped_this_post +=1
continue
for target_path in set(actual_target_full_paths_for_this_file):
2025-05-08 19:49:50 +05:30
if self.check_cancel(): break
futures_list.append(file_pool.submit(
self._download_single_file,
file_info_to_dl,
2025-05-09 19:03:01 +05:30
target_path,
2025-05-08 19:49:50 +05:30
headers,
post_id,
self.skip_current_file_flag,
2025-05-09 19:03:01 +05:30
post_title,
file_idx
2025-05-08 19:49:50 +05:30
))
if self.check_cancel(): break
2025-05-09 19:03:01 +05:30
for future in as_completed(futures_list):
if self.check_cancel():
for f_to_cancel in futures_list:
if not f_to_cancel.done():
f_to_cancel.cancel()
break
2025-05-07 07:20:40 +05:30
try:
2025-05-09 19:03:01 +05:30
dl_count, skip_count = future.result()
2025-05-08 19:49:50 +05:30
total_downloaded_this_post += dl_count
total_skipped_this_post += skip_count
except CancelledError:
total_skipped_this_post += 1
2025-05-09 19:03:01 +05:30
except Exception as exc_f:
2025-05-08 19:49:50 +05:30
self.logger(f"❌ File download task for post {post_id} resulted in error: {exc_f}")
total_skipped_this_post += 1
2025-05-09 19:03:01 +05:30
2025-05-08 19:49:50 +05:30
if self.signals and hasattr(self.signals, 'file_progress_signal'):
self.signals.file_progress_signal.emit("", 0, 0)
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
if self.check_cancel(): self.logger(f" Post {post_id} processing interrupted/cancelled.");
2025-05-08 19:49:50 +05:30
else: self.logger(f" Post {post_id} Summary: Downloaded={total_downloaded_this_post}, Skipped Files={total_skipped_this_post}")
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
return total_downloaded_this_post, total_skipped_this_post
class DownloadThread(QThread):
2025-05-09 19:03:01 +05:30
"""
Manages the overall download process.
Fetches posts using download_from_api and then processes each post using PostProcessorWorker.
This class is typically used when the GUI needs a separate thread for the entire download operation
(e.g., when not using the multi-threaded PostFetcher model from the main app).
"""
progress_signal = pyqtSignal(str) # For general log messages
add_character_prompt_signal = pyqtSignal(str) # To ask user to add character to known list
file_download_status_signal = pyqtSignal(bool) # True when a file download starts, False when it ends
finished_signal = pyqtSignal(int, int, bool) # (total_downloaded, total_skipped, was_cancelled)
external_link_signal = pyqtSignal(str, str, str, str) # (post_title, link_text, link_url, platform)
file_progress_signal = pyqtSignal(str, int, int) # (filename, downloaded_bytes, total_bytes)
2025-05-08 19:49:50 +05:30
def __init__(self, api_url_input, output_dir, known_names_copy,
2025-05-09 19:03:01 +05:30
cancellation_event, # threading.Event()
2025-05-08 19:49:50 +05:30
filter_character_list=None,
filter_mode='all', skip_zip=True, skip_rar=True,
use_subfolders=True, use_post_subfolders=False, custom_folder_name=None, compress_images=False,
2025-05-07 07:20:40 +05:30
download_thumbnails=False, service=None, user_id=None,
2025-05-08 19:49:50 +05:30
downloaded_files=None, downloaded_file_hashes=None, downloaded_files_lock=None, downloaded_file_hashes_lock=None,
skip_words_list=None,
show_external_links=False,
2025-05-09 19:03:01 +05:30
extract_links_only=False,
num_file_threads_for_worker=1, # Threads per PostProcessorWorker instance
skip_current_file_flag=None, # threading.Event() to skip one file
start_page=None, end_page=None,
target_post_id_from_initial_url=None, # If the input URL was a specific post
2025-05-08 19:49:50 +05:30
manga_mode_active=False,
2025-05-09 19:03:01 +05:30
unwanted_keywords=None # Set of keywords to avoid in auto-generated folder names
2025-05-08 19:49:50 +05:30
):
2025-05-07 07:20:40 +05:30
super().__init__()
2025-05-09 19:03:01 +05:30
# --- Store all passed arguments as instance attributes ---
2025-05-08 19:49:50 +05:30
self.api_url_input = api_url_input
2025-05-07 07:20:40 +05:30
self.output_dir = output_dir
2025-05-09 19:03:01 +05:30
self.known_names = list(known_names_copy) # Use a copy
self.cancellation_event = cancellation_event
self.skip_current_file_flag = skip_current_file_flag
self.initial_target_post_id = target_post_id_from_initial_url
2025-05-08 19:49:50 +05:30
self.filter_character_list = filter_character_list if filter_character_list else []
2025-05-07 07:20:40 +05:30
self.filter_mode = filter_mode
self.skip_zip = skip_zip
self.skip_rar = skip_rar
self.use_subfolders = use_subfolders
2025-05-08 19:49:50 +05:30
self.use_post_subfolders = use_post_subfolders
2025-05-07 07:20:40 +05:30
self.custom_folder_name = custom_folder_name
self.compress_images = compress_images
self.download_thumbnails = download_thumbnails
self.service = service
self.user_id = user_id
self.skip_words_list = skip_words_list if skip_words_list is not None else []
2025-05-09 19:03:01 +05:30
# Shared sets and locks for tracking downloads across potential multiple workers (if this thread spawns them)
2025-05-07 07:20:40 +05:30
self.downloaded_files = downloaded_files if downloaded_files is not None else set()
self.downloaded_files_lock = downloaded_files_lock if downloaded_files_lock is not None else threading.Lock()
self.downloaded_file_hashes = downloaded_file_hashes if downloaded_file_hashes is not None else set()
self.downloaded_file_hashes_lock = downloaded_file_hashes_lock if downloaded_file_hashes_lock is not None else threading.Lock()
2025-05-09 19:03:01 +05:30
self._add_character_response = None # For handling synchronous prompt results
self.prompt_mutex = QMutex() # Mutex for _add_character_response
2025-05-08 19:49:50 +05:30
self.show_external_links = show_external_links
2025-05-09 19:03:01 +05:30
self.extract_links_only = extract_links_only
2025-05-08 19:49:50 +05:30
self.num_file_threads_for_worker = num_file_threads_for_worker
self.start_page = start_page
self.end_page = end_page
self.manga_mode_active = manga_mode_active
2025-05-09 19:03:01 +05:30
self.unwanted_keywords = unwanted_keywords if unwanted_keywords is not None else \
{'spicy', 'hd', 'nsfw', '4k', 'preview', 'teaser', 'clip'} # Default unwanted keywords
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
# Disable compression if Pillow is not available
if self.compress_images and Image is None:
2025-05-08 19:49:50 +05:30
self.logger("⚠️ Image compression disabled: Pillow library not found (DownloadThread).")
self.compress_images = False
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def logger(self, message):
"""Emits a log message via the progress_signal."""
self.progress_signal.emit(str(message))
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def isInterruptionRequested(self):
2025-05-09 19:03:01 +05:30
"""Checks if Qt interruption or manual cancellation event is set."""
2025-05-08 19:49:50 +05:30
return super().isInterruptionRequested() or self.cancellation_event.is_set()
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def skip_file(self):
2025-05-09 19:03:01 +05:30
"""Sets the flag to skip the currently processing file (if any)."""
2025-05-08 19:49:50 +05:30
if self.isRunning() and self.skip_current_file_flag:
self.logger("⏭️ Skip requested for current file (single-thread mode).")
2025-05-09 19:03:01 +05:30
self.skip_current_file_flag.set() # Signal the PostProcessorWorker
else: self.logger(" Skip file: No download active or skip flag not available.")
2025-05-07 07:20:40 +05:30
2025-05-08 19:49:50 +05:30
def run(self):
2025-05-09 19:03:01 +05:30
"""Main execution logic for the download thread."""
2025-05-08 19:49:50 +05:30
grand_total_downloaded_files = 0
grand_total_skipped_files = 0
was_process_cancelled = False
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
# Create a signals object for PostProcessorWorker instances
# This allows PostProcessorWorker to emit signals that this DownloadThread can connect to.
2025-05-08 19:49:50 +05:30
worker_signals_obj = PostProcessorSignals()
try:
2025-05-09 19:03:01 +05:30
# Connect signals from the worker_signals_obj to this thread's signals
# This effectively forwards signals from PostProcessorWorker up to the GUI
2025-05-08 19:49:50 +05:30
worker_signals_obj.progress_signal.connect(self.progress_signal)
worker_signals_obj.file_download_status_signal.connect(self.file_download_status_signal)
worker_signals_obj.file_progress_signal.connect(self.file_progress_signal)
worker_signals_obj.external_link_signal.connect(self.external_link_signal)
self.logger(" Starting post fetch (single-threaded download process)...")
2025-05-09 19:03:01 +05:30
# Get the generator for fetching posts
2025-05-08 19:49:50 +05:30
post_generator = download_from_api(
self.api_url_input,
2025-05-09 19:03:01 +05:30
logger=self.logger, # Pass this thread's logger
2025-05-08 19:49:50 +05:30
start_page=self.start_page,
end_page=self.end_page,
manga_mode=self.manga_mode_active,
cancellation_event=self.cancellation_event # Pass cancellation event
)
2025-05-07 07:20:40 +05:30
2025-05-09 19:03:01 +05:30
for posts_batch_data in post_generator: # Iterate through batches of posts
2025-05-08 19:49:50 +05:30
if self.isInterruptionRequested(): was_process_cancelled = True; break
2025-05-09 19:03:01 +05:30
for individual_post_data in posts_batch_data: # Iterate through posts in a batch
2025-05-08 19:49:50 +05:30
if self.isInterruptionRequested(): was_process_cancelled = True; break
2025-05-09 19:03:01 +05:30
# Create a PostProcessorWorker for each post
2025-05-08 19:49:50 +05:30
post_processing_worker = PostProcessorWorker(
post_data=individual_post_data,
download_root=self.output_dir,
2025-05-09 19:03:01 +05:30
known_names=self.known_names, # Pass copy
2025-05-08 19:49:50 +05:30
filter_character_list=self.filter_character_list,
unwanted_keywords=self.unwanted_keywords,
filter_mode=self.filter_mode,
skip_zip=self.skip_zip, skip_rar=self.skip_rar,
use_subfolders=self.use_subfolders, use_post_subfolders=self.use_post_subfolders,
target_post_id_from_initial_url=self.initial_target_post_id,
custom_folder_name=self.custom_folder_name,
compress_images=self.compress_images, download_thumbnails=self.download_thumbnails,
service=self.service, user_id=self.user_id,
api_url_input=self.api_url_input,
2025-05-09 19:03:01 +05:30
cancellation_event=self.cancellation_event,
signals=worker_signals_obj, # Pass the shared signals object
downloaded_files=self.downloaded_files, # Pass shared sets and locks
downloaded_file_hashes=self.downloaded_file_hashes,
downloaded_files_lock=self.downloaded_files_lock,
downloaded_file_hashes_lock=self.downloaded_file_hashes_lock,
2025-05-08 19:49:50 +05:30
skip_words_list=self.skip_words_list,
show_external_links=self.show_external_links,
2025-05-09 19:03:01 +05:30
extract_links_only=self.extract_links_only,
2025-05-08 19:49:50 +05:30
num_file_threads=self.num_file_threads_for_worker,
skip_current_file_flag=self.skip_current_file_flag,
manga_mode_active=self.manga_mode_active
)
try:
2025-05-09 19:03:01 +05:30
# Process the post (this will block until the worker is done with this post)
2025-05-08 19:49:50 +05:30
dl_count, skip_count = post_processing_worker.process()
grand_total_downloaded_files += dl_count
grand_total_skipped_files += skip_count
except Exception as proc_err:
post_id_for_err = individual_post_data.get('id', 'N/A')
self.logger(f"❌ Error processing post {post_id_for_err} in DownloadThread: {proc_err}")
traceback.print_exc()
2025-05-09 19:03:01 +05:30
# Estimate skipped files for this post if worker failed catastrophically
num_potential_files_est = len(individual_post_data.get('attachments', [])) + \
(1 if individual_post_data.get('file') else 0)
grand_total_skipped_files += num_potential_files_est
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
# Clear the skip_current_file_flag if it was set and processed
2025-05-08 19:49:50 +05:30
if self.skip_current_file_flag and self.skip_current_file_flag.is_set():
2025-05-09 19:03:01 +05:30
self.skip_current_file_flag.clear()
self.logger(" Skip current file flag was processed and cleared by DownloadThread.")
2025-05-08 19:49:50 +05:30
2025-05-09 19:03:01 +05:30
self.msleep(10) # Small delay to allow GUI to update, if needed
if was_process_cancelled: break # Break from batch loop if cancelled
2025-05-08 19:49:50 +05:30
if not was_process_cancelled: self.logger("✅ All posts processed or end of content reached.")
except Exception as main_thread_err:
self.logger(f"\n❌ Critical error within DownloadThread run loop: {main_thread_err}")
traceback.print_exc()
2025-05-09 19:03:01 +05:30
# Ensure was_process_cancelled reflects the state if error wasn't due to user cancellation
if not self.isInterruptionRequested(): was_process_cancelled = False # Error, not user cancel
2025-05-08 19:49:50 +05:30
finally:
2025-05-09 19:03:01 +05:30
# Clean up: Disconnect signals to avoid issues if the thread is somehow reused or objects persist
2025-05-08 19:49:50 +05:30
try:
2025-05-09 19:03:01 +05:30
if worker_signals_obj: # Check if it was initialized
2025-05-08 19:49:50 +05:30
worker_signals_obj.progress_signal.disconnect(self.progress_signal)
worker_signals_obj.file_download_status_signal.disconnect(self.file_download_status_signal)
worker_signals_obj.external_link_signal.disconnect(self.external_link_signal)
worker_signals_obj.file_progress_signal.disconnect(self.file_progress_signal)
2025-05-09 19:03:01 +05:30
except (TypeError, RuntimeError) as e: # Catch if signals were already disconnected or other issues
self.logger(f" Note during DownloadThread signal disconnection: {e}")
# Emit the finished signal with totals and cancellation status
2025-05-08 19:49:50 +05:30
self.finished_signal.emit(grand_total_downloaded_files, grand_total_skipped_files, was_process_cancelled)
2025-05-07 07:20:40 +05:30
def receive_add_character_result(self, result):
2025-05-09 19:03:01 +05:30
"""Slot to receive the result from a character add prompt shown in the main thread."""
with QMutexLocker(self.prompt_mutex): # Ensure thread-safe access
2025-05-07 07:20:40 +05:30
self._add_character_response = result
2025-05-09 19:03:01 +05:30
self.logger(f" (DownloadThread) Received character prompt response: {'Yes (added/confirmed)' if result else 'No (declined/failed)'}")
# This response might be used by logic within the thread if it was waiting for it,
# though typically prompts are handled by the main GUI thread.