mirror of
https://github.com/Yuvi9587/Kemono-Downloader.git
synced 2025-12-29 16:14:44 +00:00
Compare commits
21 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
f774773b63 | ||
|
|
8036cb9835 | ||
|
|
13fc33d2c0 | ||
|
|
8663ef54a3 | ||
|
|
0316813792 | ||
|
|
d201a5396c | ||
|
|
86f9396b6c | ||
|
|
0fb4bb3cb0 | ||
|
|
1528d7ce25 | ||
|
|
4e7eeb7989 | ||
|
|
7f2976a4f4 | ||
|
|
8928cb92da | ||
|
|
a181b76124 | ||
|
|
8f085a8f63 | ||
|
|
93a997351b | ||
|
|
b3af6c1c15 | ||
|
|
4a65263f7d | ||
|
|
1091b5b9b4 | ||
|
|
f6b3ff2f5c | ||
|
|
b399bdf5cf | ||
|
|
9ace161bc8 |
BIN
Read.png
BIN
Read.png
Binary file not shown.
|
Before Width: | Height: | Size: 165 KiB After Width: | Height: | Size: 162 KiB |
@@ -31,6 +31,7 @@ from io import BytesIO
|
||||
STYLE_POST_TITLE = "post_title"
|
||||
STYLE_ORIGINAL_NAME = "original_name"
|
||||
STYLE_DATE_BASED = "date_based" # For manga date-based sequential naming
|
||||
MANGA_DATE_PREFIX_DEFAULT = "" # Default for the new prefix
|
||||
STYLE_POST_TITLE_GLOBAL_NUMBERING = "post_title_global_numbering" # For manga post title + global counter
|
||||
|
||||
SKIP_SCOPE_FILES = "files"
|
||||
@@ -51,6 +52,9 @@ KNOWN_NAMES = [] # This will now store dicts: {'name': str, 'is_group': bool, 'a
|
||||
|
||||
MIN_SIZE_FOR_MULTIPART_DOWNLOAD = 10 * 1024 * 1024 # 10 MB - Stays the same
|
||||
MAX_PARTS_FOR_MULTIPART_DOWNLOAD = 15 # Max concurrent connections for a single file
|
||||
# Max length for a single filename or folder name component to ensure cross-OS compatibility
|
||||
# Windows MAX_PATH is 260 for the full path. Individual components are usually shorter.
|
||||
MAX_FILENAME_COMPONENT_LENGTH = 150
|
||||
|
||||
IMAGE_EXTENSIONS = {
|
||||
'.jpg', '.jpeg', '.png', '.gif', '.bmp', '.tiff', '.tif', '.webp',
|
||||
@@ -63,6 +67,11 @@ VIDEO_EXTENSIONS = {
|
||||
ARCHIVE_EXTENSIONS = {
|
||||
'.zip', '.rar', '.7z', '.tar', '.gz', '.bz2'
|
||||
}
|
||||
AUDIO_EXTENSIONS = {
|
||||
'.mp3', '.wav', '.aac', '.flac', '.ogg', '.wma', '.m4a', '.opus',
|
||||
'.aiff', '.ape', '.mid', '.midi'
|
||||
}
|
||||
|
||||
def parse_cookie_string(cookie_string):
|
||||
"""Parses a 'name=value; name2=value2' cookie string into a dict."""
|
||||
cookies = {}
|
||||
@@ -131,18 +140,46 @@ def clean_folder_name(name):
|
||||
|
||||
if not cleaned: # If empty after initial cleaning
|
||||
return "untitled_folder"
|
||||
|
||||
# Truncate if too long
|
||||
if len(cleaned) > MAX_FILENAME_COMPONENT_LENGTH:
|
||||
cleaned = cleaned[:MAX_FILENAME_COMPONENT_LENGTH]
|
||||
# After truncation, it's possible a new trailing space/dot is at the end
|
||||
# or an existing one remains. So, strip them using the loop below.
|
||||
|
||||
# Strip trailing dots/spaces (original logic, now applied to potentially truncated name)
|
||||
temp_name = cleaned
|
||||
while len(temp_name) > 0 and (temp_name.endswith('.') or temp_name.endswith(' ')):
|
||||
temp_name = temp_name[:-1]
|
||||
|
||||
return temp_name if temp_name else "untitled_folder"
|
||||
|
||||
|
||||
def clean_filename(name):
|
||||
if not isinstance(name, str): name = str(name)
|
||||
cleaned = re.sub(r'[^\w\s\-\_\.\(\)]', '', name)
|
||||
cleaned = cleaned.strip()
|
||||
cleaned = re.sub(r'\s+', '_', cleaned)
|
||||
return cleaned if cleaned else "untitled_file"
|
||||
cleaned = cleaned.strip() # Remove leading/trailing spaces first
|
||||
cleaned = re.sub(r'\s+', ' ', cleaned) # Replace multiple internal spaces with a single space
|
||||
|
||||
if not cleaned: return "untitled_file"
|
||||
|
||||
base_name, ext = os.path.splitext(cleaned)
|
||||
|
||||
# Calculate max length for base_name, reserving space for the extension
|
||||
max_base_len = MAX_FILENAME_COMPONENT_LENGTH - len(ext)
|
||||
|
||||
if len(base_name) > max_base_len:
|
||||
if max_base_len > 0: # If there's space for at least some of the base name
|
||||
base_name = base_name[:max_base_len]
|
||||
else: # No space for base name (extension is too long or fills the entire allowed space)
|
||||
# In this case, we have to truncate the original 'cleaned' string,
|
||||
# which might cut into the extension, but it's necessary to meet the length.
|
||||
return cleaned[:MAX_FILENAME_COMPONENT_LENGTH] if cleaned else "untitled_file"
|
||||
|
||||
final_name = base_name + ext
|
||||
# Ensure the final reconstructed name isn't empty (e.g. if base_name became empty and ext was also empty)
|
||||
return final_name if final_name else "untitled_file"
|
||||
|
||||
|
||||
def strip_html_tags(html_text):
|
||||
if not html_text: return ""
|
||||
@@ -218,6 +255,12 @@ def is_archive(filename):
|
||||
_, ext = os.path.splitext(filename)
|
||||
return ext.lower() in ARCHIVE_EXTENSIONS
|
||||
|
||||
def is_audio(filename):
|
||||
if not filename: return False
|
||||
_, ext = os.path.splitext(filename)
|
||||
return ext.lower() in AUDIO_EXTENSIONS
|
||||
|
||||
|
||||
|
||||
def is_post_url(url):
|
||||
if not isinstance(url, str): return False
|
||||
@@ -262,19 +305,43 @@ def prepare_cookies_for_request(use_cookie_flag, cookie_text_input, selected_coo
|
||||
if not use_cookie_flag:
|
||||
return None
|
||||
|
||||
if cookie_text_input:
|
||||
logger_func(" 🍪 Using cookies from UI text input.")
|
||||
return parse_cookie_string(cookie_text_input)
|
||||
elif selected_cookie_file_path:
|
||||
# Attempt 1: Selected cookie file
|
||||
if selected_cookie_file_path:
|
||||
logger_func(f" 🍪 Attempting to load cookies from selected file: '{os.path.basename(selected_cookie_file_path)}'...")
|
||||
return load_cookies_from_netscape_file(selected_cookie_file_path, logger_func)
|
||||
elif app_base_dir:
|
||||
cookies_filepath = os.path.join(app_base_dir, "cookies.txt")
|
||||
logger_func(f" 🍪 No UI text or specific file selected. Attempting to load default '{os.path.basename(cookies_filepath)}' from app directory...")
|
||||
return load_cookies_from_netscape_file(cookies_filepath, logger_func)
|
||||
else:
|
||||
logger_func(" 🍪 Cookie usage enabled, but no text input, specific file, or app base directory provided for cookies.txt.")
|
||||
return None
|
||||
cookies = load_cookies_from_netscape_file(selected_cookie_file_path, logger_func)
|
||||
if cookies:
|
||||
return cookies
|
||||
else:
|
||||
logger_func(f" ⚠️ Failed to load cookies from selected file: '{os.path.basename(selected_cookie_file_path)}'. Trying other methods.")
|
||||
# Fall through if selected file is invalid or not found
|
||||
|
||||
# Attempt 2: Default cookies.txt in app directory
|
||||
# This is tried if no specific file was selected OR if the selected file was provided but failed to load.
|
||||
if app_base_dir: # Only proceed if app_base_dir is available
|
||||
# Avoid re-logging "not found" or "failed" if a selected_cookie_file_path was already attempted and failed.
|
||||
# Only log the attempt for default if no selected_cookie_file_path was given.
|
||||
default_cookies_path = os.path.join(app_base_dir, "cookies.txt")
|
||||
if os.path.exists(default_cookies_path): # Only attempt if it exists
|
||||
if not selected_cookie_file_path: # Log attempt only if we didn't just try a selected file
|
||||
logger_func(f" 🍪 No specific file selected. Attempting to load default '{os.path.basename(default_cookies_path)}' from app directory...")
|
||||
cookies = load_cookies_from_netscape_file(default_cookies_path, logger_func)
|
||||
if cookies:
|
||||
return cookies
|
||||
elif not selected_cookie_file_path: # Log failure only if we tried default as primary file method
|
||||
logger_func(f" ⚠️ Failed to load cookies from default file: '{os.path.basename(default_cookies_path)}'. Trying text input.")
|
||||
# Fall through if default file is invalid or not found
|
||||
|
||||
# Attempt 3: Cookies from UI text input
|
||||
if cookie_text_input:
|
||||
logger_func(" 🍪 Using cookies from UI text input (as file methods failed or were not applicable).")
|
||||
cookies = parse_cookie_string(cookie_text_input)
|
||||
if cookies:
|
||||
return cookies
|
||||
else:
|
||||
logger_func(" ⚠️ UI cookie text input was provided but was empty or invalid.")
|
||||
|
||||
logger_func(" 🍪 Cookie usage enabled, but no valid cookies found from any source (selected file, default file, or text input).")
|
||||
return None
|
||||
|
||||
def fetch_posts_paginated(api_url_base, headers, offset, logger, cancellation_event=None, pause_event=None, cookies_dict=None):
|
||||
if cancellation_event and cancellation_event.is_set(): # type: ignore
|
||||
@@ -602,12 +669,15 @@ class PostProcessorWorker:
|
||||
allow_multipart_download=True,
|
||||
cookie_text="", # Added missing parameter
|
||||
use_cookie=False, # Added missing parameter
|
||||
override_output_dir=None, # New parameter
|
||||
selected_cookie_file=None, # Added missing parameter
|
||||
app_base_dir=None, # New parameter for app's base directory
|
||||
manga_date_prefix=MANGA_DATE_PREFIX_DEFAULT, # New parameter for date-based prefix
|
||||
manga_date_file_counter_ref=None, # New parameter for date-based manga naming
|
||||
scan_content_for_images=False, # New flag for scanning HTML content
|
||||
manga_global_file_counter_ref=None, # New parameter for global numbering
|
||||
): # type: ignore
|
||||
self.post = post_data
|
||||
self.post = post_data # type: ignore
|
||||
self.download_root = download_root
|
||||
self.known_names = known_names
|
||||
self.filter_character_list_objects_initial = filter_character_list if filter_character_list else [] # Store initial
|
||||
@@ -652,10 +722,14 @@ class PostProcessorWorker:
|
||||
self.selected_cookie_file = selected_cookie_file # Store selected cookie file path
|
||||
self.app_base_dir = app_base_dir # Store app base dir
|
||||
self.cookie_text = cookie_text # Store cookie text
|
||||
self.manga_date_prefix = manga_date_prefix # Store the prefix
|
||||
self.manga_global_file_counter_ref = manga_global_file_counter_ref # Store global counter
|
||||
self.use_cookie = use_cookie # Store cookie setting
|
||||
self.override_output_dir = override_output_dir # Store the override directory
|
||||
self.scan_content_for_images = scan_content_for_images # Store new flag
|
||||
|
||||
if self.compress_images and Image is None:
|
||||
# type: ignore
|
||||
self.logger("⚠️ Image compression disabled: Pillow library not found.")
|
||||
self.compress_images = False
|
||||
|
||||
@@ -676,9 +750,9 @@ class PostProcessorWorker:
|
||||
return self.cancellation_event.is_set()
|
||||
|
||||
def _check_pause(self, context_message="Operation"):
|
||||
if self.pause_event and self.pause_event.is_set():
|
||||
if self.pause_event and self.pause_event.is_set(): # type: ignore
|
||||
self.logger(f" {context_message} paused...")
|
||||
while self.pause_event.is_set(): # Loop while pause_event is set
|
||||
while self.pause_event.is_set(): # type: ignore # Loop while pause_event is set
|
||||
if self.check_cancel():
|
||||
self.logger(f" {context_message} cancelled while paused.")
|
||||
return True # Indicates cancellation occurred
|
||||
@@ -734,6 +808,14 @@ class PostProcessorWorker:
|
||||
if self.manga_mode_active: # Note: duplicate_file_mode is overridden to "Delete" in main.py if manga_mode is on
|
||||
if self.manga_filename_style == STYLE_ORIGINAL_NAME:
|
||||
filename_to_save_in_main_path = clean_filename(api_original_filename)
|
||||
# Apply prefix if provided for Original Name style
|
||||
if self.manga_date_prefix and self.manga_date_prefix.strip():
|
||||
cleaned_prefix = clean_filename(self.manga_date_prefix.strip())
|
||||
if cleaned_prefix:
|
||||
filename_to_save_in_main_path = f"{cleaned_prefix} {filename_to_save_in_main_path}"
|
||||
else:
|
||||
self.logger(f"⚠️ Manga Original Name Mode: Provided prefix '{self.manga_date_prefix}' was empty after cleaning. Using original name only.")
|
||||
|
||||
was_original_name_kept_flag = True
|
||||
elif self.manga_filename_style == STYLE_POST_TITLE:
|
||||
if post_title and post_title.strip():
|
||||
@@ -742,8 +824,8 @@ class PostProcessorWorker:
|
||||
if file_index_in_post == 0:
|
||||
filename_to_save_in_main_path = f"{cleaned_post_title_base}{original_ext}"
|
||||
else:
|
||||
filename_to_save_in_main_path = clean_filename(api_original_filename)
|
||||
was_original_name_kept_flag = True
|
||||
filename_to_save_in_main_path = f"{cleaned_post_title_base}_{file_index_in_post}{original_ext}"
|
||||
was_original_name_kept_flag = False # Name is derived, not original
|
||||
else:
|
||||
filename_to_save_in_main_path = f"{cleaned_post_title_base}{original_ext}"
|
||||
else:
|
||||
@@ -759,7 +841,15 @@ class PostProcessorWorker:
|
||||
counter_val_for_filename = manga_date_file_counter_ref[0]
|
||||
manga_date_file_counter_ref[0] += 1
|
||||
|
||||
filename_to_save_in_main_path = f"{counter_val_for_filename:03d}{original_ext}"
|
||||
base_numbered_name = f"{counter_val_for_filename:03d}"
|
||||
if self.manga_date_prefix and self.manga_date_prefix.strip():
|
||||
cleaned_prefix = clean_filename(self.manga_date_prefix.strip())
|
||||
if cleaned_prefix: # Ensure prefix is not empty after cleaning
|
||||
filename_to_save_in_main_path = f"{cleaned_prefix} {base_numbered_name}{original_ext}"
|
||||
else: # Prefix became empty after cleaning
|
||||
filename_to_save_in_main_path = f"{base_numbered_name}{original_ext}"; self.logger(f"⚠️ Manga Date Mode: Provided prefix '{self.manga_date_prefix}' was empty after cleaning. Using number only.")
|
||||
else: # No prefix provided
|
||||
filename_to_save_in_main_path = f"{base_numbered_name}{original_ext}"
|
||||
else:
|
||||
self.logger(f"⚠️ Manga Date Mode: Counter ref not provided or malformed for '{api_original_filename}'. Using original. Ref: {manga_date_file_counter_ref}")
|
||||
filename_to_save_in_main_path = clean_filename(api_original_filename)
|
||||
@@ -796,8 +886,10 @@ class PostProcessorWorker:
|
||||
if not word_to_remove: continue
|
||||
pattern = re.compile(re.escape(word_to_remove), re.IGNORECASE)
|
||||
modified_base_name = pattern.sub("", modified_base_name)
|
||||
modified_base_name = re.sub(r'[_.\s-]+', '_', modified_base_name)
|
||||
modified_base_name = modified_base_name.strip('_')
|
||||
# After removals, normalize all seps (underscore, dot, multiple spaces, hyphen) to a single space, then strip.
|
||||
modified_base_name = re.sub(r'[_.\s-]+', ' ', modified_base_name) # Convert all separators to spaces
|
||||
modified_base_name = re.sub(r'\s+', ' ', modified_base_name) # Condense multiple spaces to one
|
||||
modified_base_name = modified_base_name.strip() # Remove leading/trailing spaces
|
||||
if modified_base_name and modified_base_name != ext_for_removal.lstrip('.'):
|
||||
filename_to_save_in_main_path = modified_base_name + ext_for_removal
|
||||
else:
|
||||
@@ -807,6 +899,7 @@ class PostProcessorWorker:
|
||||
is_img_type = is_image(api_original_filename)
|
||||
is_vid_type = is_video(api_original_filename)
|
||||
is_archive_type = is_archive(api_original_filename)
|
||||
is_audio_type = is_audio(api_original_filename)
|
||||
|
||||
if self.filter_mode == 'archive':
|
||||
if not is_archive_type:
|
||||
@@ -820,6 +913,10 @@ class PostProcessorWorker:
|
||||
if not is_vid_type:
|
||||
self.logger(f" -> Filter Skip: '{api_original_filename}' (Not Video).")
|
||||
return 0, 1, api_original_filename, False, FILE_DOWNLOAD_STATUS_SKIPPED, None
|
||||
elif self.filter_mode == 'audio': # New audio filter mode
|
||||
if not is_audio_type:
|
||||
self.logger(f" -> Filter Skip: '{api_original_filename}' (Not Audio).")
|
||||
return 0, 1, api_original_filename, False, FILE_DOWNLOAD_STATUS_SKIPPED, None
|
||||
|
||||
if self.skip_zip and is_zip(api_original_filename):
|
||||
self.logger(f" -> Pref Skip: '{api_original_filename}' (ZIP).")
|
||||
@@ -1271,7 +1368,7 @@ class PostProcessorWorker:
|
||||
self.logger(f" -> Skip Post (Folder Keyword): Potential folder '{folder_name_to_check}' contains '{matched_skip}'.")
|
||||
return 0, num_potential_files_in_post, [], []
|
||||
|
||||
if (self.show_external_links or self.extract_links_only) and post_content_html:
|
||||
if (self.show_external_links or self.extract_links_only) and post_content_html: # type: ignore
|
||||
if self._check_pause(f"External link extraction for post {post_id}"): return 0, num_potential_files_in_post, [], []
|
||||
try:
|
||||
unique_links_data = {}
|
||||
@@ -1318,14 +1415,14 @@ class PostProcessorWorker:
|
||||
if original_api_name:
|
||||
all_files_from_post_api.append({
|
||||
'url': f"https://{api_file_domain}{file_path}" if file_path.startswith('/') else f"https://{api_file_domain}/data/{file_path}",
|
||||
'name': original_api_name,
|
||||
'name': original_api_name, # This is the cleaned/API provided name
|
||||
'_original_name_for_log': original_api_name,
|
||||
'_is_thumbnail': self.download_thumbnails and is_image(original_api_name)
|
||||
'_is_thumbnail': is_image(original_api_name) # Mark if it's an image from API
|
||||
})
|
||||
else: self.logger(f" ⚠️ Skipping main file for post {post_id}: Missing name (Path: {file_path})")
|
||||
|
||||
for idx, att_info in enumerate(post_attachments):
|
||||
if isinstance(att_info, dict) and att_info.get('path'):
|
||||
if isinstance(att_info, dict) and att_info.get('path'): # Ensure att_info is a dict
|
||||
att_path = att_info['path'].lstrip('/')
|
||||
original_api_att_name = att_info.get('name') or os.path.basename(att_path)
|
||||
if original_api_att_name:
|
||||
@@ -1333,16 +1430,99 @@ class PostProcessorWorker:
|
||||
'url': f"https://{api_file_domain}{att_path}" if att_path.startswith('/') else f"https://{api_file_domain}/data/{att_path}",
|
||||
'name': original_api_att_name,
|
||||
'_original_name_for_log': original_api_att_name,
|
||||
'_is_thumbnail': self.download_thumbnails and is_image(original_api_att_name)
|
||||
'_is_thumbnail': is_image(original_api_att_name) # Mark if it's an image from API
|
||||
})
|
||||
else: self.logger(f" ⚠️ Skipping attachment {idx+1} for post {post_id}: Missing name (Path: {att_path})")
|
||||
else: self.logger(f" ⚠️ Skipping invalid attachment {idx+1} for post {post_id}: {str(att_info)[:100]}")
|
||||
|
||||
# --- New: Scan post content for additional image URLs if enabled ---
|
||||
if self.scan_content_for_images and post_content_html and not self.extract_links_only: # This block was duplicated, ensure only one exists
|
||||
self.logger(f" Scanning post content for additional image URLs (Post ID: {post_id})...")
|
||||
|
||||
parsed_input_url = urlparse(self.api_url_input)
|
||||
base_url_for_relative_paths = f"{parsed_input_url.scheme}://{parsed_input_url.netloc}"
|
||||
img_ext_pattern = "|".join(ext.lstrip('.') for ext in IMAGE_EXTENSIONS)
|
||||
|
||||
# 1. Regex for direct absolute image URLs in text
|
||||
direct_url_pattern_str = r"""(?i)\b(https?://[^\s"'<>\[\]\{\}\|\^\\^~\[\]`]+\.(?:""" + img_ext_pattern + r"""))\b"""
|
||||
# 2. Regex for <img> tags (captures src content)
|
||||
img_tag_src_pattern_str = r"""<img\s+[^>]*?src\s*=\s*["']([^"']+)["']"""
|
||||
|
||||
found_image_sources = set()
|
||||
|
||||
for direct_url_match in re.finditer(direct_url_pattern_str, post_content_html):
|
||||
found_image_sources.add(direct_url_match.group(1))
|
||||
|
||||
for img_tag_match in re.finditer(img_tag_src_pattern_str, post_content_html, re.IGNORECASE):
|
||||
src_attr = img_tag_match.group(1).strip()
|
||||
src_attr = html.unescape(src_attr)
|
||||
if not src_attr: continue
|
||||
|
||||
resolved_src_url = ""
|
||||
if src_attr.startswith(('http://', 'https://')):
|
||||
resolved_src_url = src_attr
|
||||
elif src_attr.startswith('//'):
|
||||
resolved_src_url = f"{parsed_input_url.scheme}:{src_attr}"
|
||||
elif src_attr.startswith('/'):
|
||||
resolved_src_url = f"{base_url_for_relative_paths}{src_attr}"
|
||||
|
||||
if resolved_src_url:
|
||||
parsed_resolved_url = urlparse(resolved_src_url)
|
||||
if any(parsed_resolved_url.path.lower().endswith(ext) for ext in IMAGE_EXTENSIONS):
|
||||
found_image_sources.add(resolved_src_url)
|
||||
|
||||
if found_image_sources:
|
||||
self.logger(f" Found {len(found_image_sources)} potential image URLs/sources in content.")
|
||||
existing_urls_in_api_list = {f_info['url'] for f_info in all_files_from_post_api}
|
||||
|
||||
for found_url in found_image_sources: # Iterate over the unique, resolved URLs
|
||||
if self.check_cancel(): break
|
||||
if found_url in existing_urls_in_api_list:
|
||||
self.logger(f" Skipping URL from content (already in API list or previously added from content): {found_url[:70]}...")
|
||||
continue
|
||||
try:
|
||||
parsed_found_url = urlparse(found_url)
|
||||
url_filename = os.path.basename(parsed_found_url.path)
|
||||
if not url_filename or not is_image(url_filename):
|
||||
self.logger(f" Skipping URL from content (no filename part or not an image extension): {found_url[:70]}...")
|
||||
continue
|
||||
|
||||
self.logger(f" Adding image from content: {url_filename} (URL: {found_url[:70]}...)")
|
||||
all_files_from_post_api.append({
|
||||
'url': found_url,
|
||||
'name': url_filename,
|
||||
'_original_name_for_log': url_filename,
|
||||
'_is_thumbnail': False, # Images from content are not API thumbnails
|
||||
'_from_content_scan': True
|
||||
})
|
||||
existing_urls_in_api_list.add(found_url)
|
||||
except Exception as e_url_parse:
|
||||
self.logger(f" Error processing URL from content '{found_url[:70]}...': {e_url_parse}")
|
||||
else:
|
||||
self.logger(f" No additional image URLs found in post content scan for post {post_id}.")
|
||||
# --- End of new content scanning logic ---
|
||||
|
||||
# --- Final filtering based on download_thumbnails and scan_content_for_images flags ---
|
||||
if self.download_thumbnails:
|
||||
all_files_from_post_api = [finfo for finfo in all_files_from_post_api if finfo['_is_thumbnail']]
|
||||
if not all_files_from_post_api:
|
||||
self.logger(f" -> No image thumbnails found for post {post_id} in thumbnail-only mode.")
|
||||
return 0, 0, [], []
|
||||
if self.scan_content_for_images:
|
||||
# Both "Download Thumbnails Only" AND "Scan Content for Images" are checked.
|
||||
# Prioritize images from content scan.
|
||||
self.logger(f" Mode: 'Download Thumbnails Only' + 'Scan Content for Images' active. Prioritizing images from content scan for post {post_id}.")
|
||||
all_files_from_post_api = [finfo for finfo in all_files_from_post_api if finfo.get('_from_content_scan')]
|
||||
if not all_files_from_post_api:
|
||||
self.logger(f" -> No images found via content scan for post {post_id} in this combined mode.")
|
||||
return 0, 0, [], [] # No files to download for this post
|
||||
else:
|
||||
# Only "Download Thumbnails Only" is checked. Filter for API thumbnails.
|
||||
self.logger(f" Mode: 'Download Thumbnails Only' active. Filtering for API thumbnails for post {post_id}.")
|
||||
all_files_from_post_api = [finfo for finfo in all_files_from_post_api if finfo.get('_is_thumbnail')]
|
||||
if not all_files_from_post_api:
|
||||
self.logger(f" -> No API image thumbnails found for post {post_id} in thumbnail-only mode.")
|
||||
return 0, 0, [], [] # No files to download for this post
|
||||
# If self.download_thumbnails is False, all_files_from_post_api remains as is.
|
||||
# It will contain all API files (images marked with _is_thumbnail: True, others False)
|
||||
# and potentially content-scanned images (marked with _from_content_scan: True).
|
||||
|
||||
if self.manga_mode_active and self.manga_filename_style == STYLE_DATE_BASED:
|
||||
def natural_sort_key_for_files(file_api_info):
|
||||
name = file_api_info.get('_original_name_for_log', '').lower()
|
||||
@@ -1444,7 +1624,7 @@ class PostProcessorWorker:
|
||||
total_skipped_this_post += 1
|
||||
continue
|
||||
|
||||
current_path_for_file = self.download_root
|
||||
current_path_for_file = self.override_output_dir if self.override_output_dir else self.download_root # Use override if provided
|
||||
|
||||
if self.use_subfolders:
|
||||
char_title_subfolder_name = None
|
||||
@@ -1548,18 +1728,21 @@ class DownloadThread(QThread):
|
||||
manga_filename_style=STYLE_POST_TITLE,
|
||||
char_filter_scope=CHAR_SCOPE_FILES, # manga_date_file_counter_ref removed from here
|
||||
remove_from_filename_words_list=None,
|
||||
manga_date_prefix=MANGA_DATE_PREFIX_DEFAULT, # New parameter
|
||||
allow_multipart_download=True,
|
||||
selected_cookie_file=None, # New parameter for selected cookie file
|
||||
override_output_dir=None, # New parameter
|
||||
app_base_dir=None, # New parameter
|
||||
manga_date_file_counter_ref=None, # New parameter
|
||||
manga_global_file_counter_ref=None, # New parameter for global numbering
|
||||
use_cookie=False, # Added: Expected by main.py
|
||||
scan_content_for_images=False, # Added new flag
|
||||
cookie_text="", # Added: Expected by main.py
|
||||
):
|
||||
super().__init__()
|
||||
self.api_url_input = api_url_input
|
||||
self.output_dir = output_dir
|
||||
self.known_names = list(known_names_copy)
|
||||
self.known_names = list(known_names_copy) # type: ignore
|
||||
self.cancellation_event = cancellation_event
|
||||
self.pause_event = pause_event # Store pause_event
|
||||
self.skip_current_file_flag = skip_current_file_flag
|
||||
@@ -1597,12 +1780,15 @@ class DownloadThread(QThread):
|
||||
self.manga_filename_style = manga_filename_style
|
||||
self.char_filter_scope = char_filter_scope
|
||||
self.remove_from_filename_words_list = remove_from_filename_words_list
|
||||
self.manga_date_prefix = manga_date_prefix # Store the prefix
|
||||
self.allow_multipart_download = allow_multipart_download
|
||||
self.selected_cookie_file = selected_cookie_file # Store selected cookie file
|
||||
self.app_base_dir = app_base_dir # Store app base dir
|
||||
self.cookie_text = cookie_text # Store cookie text
|
||||
self.use_cookie = use_cookie # Store cookie setting
|
||||
self.override_output_dir = override_output_dir # Store override dir
|
||||
self.manga_date_file_counter_ref = manga_date_file_counter_ref # Store for passing to worker by DownloadThread
|
||||
self.scan_content_for_images = scan_content_for_images # Store new flag
|
||||
self.manga_global_file_counter_ref = manga_global_file_counter_ref # Store for global numbering
|
||||
if self.compress_images and Image is None:
|
||||
self.logger("⚠️ Image compression disabled: Pillow library not found (DownloadThread).")
|
||||
@@ -1726,15 +1912,18 @@ class DownloadThread(QThread):
|
||||
skip_current_file_flag=self.skip_current_file_flag,
|
||||
manga_mode_active=self.manga_mode_active,
|
||||
manga_filename_style=self.manga_filename_style,
|
||||
manga_date_prefix=self.manga_date_prefix, # Pass the prefix
|
||||
char_filter_scope=self.char_filter_scope,
|
||||
remove_from_filename_words_list=self.remove_from_filename_words_list,
|
||||
allow_multipart_download=self.allow_multipart_download,
|
||||
selected_cookie_file=self.selected_cookie_file, # Pass selected cookie file
|
||||
app_base_dir=self.app_base_dir, # Pass app_base_dir
|
||||
cookie_text=self.cookie_text, # Pass cookie text
|
||||
override_output_dir=self.override_output_dir, # Pass override dir
|
||||
manga_global_file_counter_ref=self.manga_global_file_counter_ref, # Pass the ref
|
||||
use_cookie=self.use_cookie, # Pass cookie setting to worker
|
||||
manga_date_file_counter_ref=current_manga_date_file_counter_ref, # Pass the calculated or passed-in ref
|
||||
scan_content_for_images=self.scan_content_for_images, # Pass new flag
|
||||
)
|
||||
try:
|
||||
dl_count, skip_count, kept_originals_this_post, retryable_failures = post_processing_worker.process()
|
||||
|
||||
@@ -5,6 +5,7 @@ import hashlib
|
||||
import http.client
|
||||
import traceback
|
||||
import threading
|
||||
import queue # Import the missing 'queue' module
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
CHUNK_DOWNLOAD_RETRY_DELAY = 2 # Slightly reduced for faster retries if needed
|
||||
|
||||
85
readme.md
85
readme.md
@@ -1,4 +1,4 @@
|
||||
<h1 align="center">Kemono Downloader v4.0.0</h1>
|
||||
<h1 align="center">Kemono Downloader v4.1.1</h1>
|
||||
|
||||
<div align="center">
|
||||
<img src="https://github.com/Yuvi9587/Kemono-Downloader/blob/main/Read.png" alt="Kemono Downloader"/>
|
||||
@@ -11,9 +11,49 @@ Built with **PyQt5**, this tool is ideal for users who want deep filtering, cust
|
||||
|
||||
---
|
||||
|
||||
## What's New in v4.0.0?
|
||||
## What's New in v4.1.1? - Enhanced Image Discovery & Audio Filtering
|
||||
|
||||
Version 3.5.0 focuses on enhancing access to content and providing even smarter organization:
|
||||
Version 4.1.1 brings significant enhancements, including smarter image capture from post content and a dedicated filter mode for audio files.
|
||||
|
||||
### "Scan Content for Images" Feature
|
||||
|
||||
- **Enhanced Image Discovery:** A new checkbox, "**Scan Content for Images**," has been added to the UI (grouped with "Download Thumbnails Only" and "Compress Large Images").
|
||||
- **How it Works:**
|
||||
- When enabled, the downloader meticulously scans the HTML content of each post's description or body.
|
||||
- It searches for images in two main ways:
|
||||
- **Directly linked absolute URLs** (e.g., `https://externalsite.com/image.png`) that end with a common image extension (jpg, png, gif, etc.).
|
||||
- **Images embedded using HTML `<img>` tags.** The downloader extracts the `src` attribute from these tags and can resolve various path types:
|
||||
- Absolute URLs (e.g., `http://...` or `https://...`)
|
||||
- Protocol-relative URLs (e.g., `//cdn.example.com/image.jpg`)
|
||||
- Root-relative paths (e.g., `/data/user_content/image.gif`), which are resolved against the site's base URL (like `https://kemono.su/data/user_content/image.gif`).
|
||||
- This is particularly useful for capturing images that are part of the post's narrative but not formally listed in the API's file or attachment sections.
|
||||
- **Default State:** This option is **unchecked by default**.
|
||||
- **Key Interaction with "Download Thumbnails Only":** This new feature works closely with the existing "Download Thumbnails Only" option:
|
||||
- If you enable "Download Thumbnails Only":
|
||||
- The "Scan Content for Images" checkbox will **automatically become checked and disabled** (locked).
|
||||
- The downloader then **exclusively downloads images discovered through the content scan**. Any API-listed thumbnails are bypassed, giving priority to images embedded directly in the post.
|
||||
- If you disable "Download Thumbnails Only":
|
||||
- The "Scan Content for Images" checkbox will become **enabled again and revert to being unchecked**. You can then manually enable it if you wish to scan content without being in thumbnail-only mode.
|
||||
|
||||
This feature ensures a more comprehensive download experience, especially for posts where images are integrated directly into the text.
|
||||
|
||||
### New "🎧 Only Audio" Filter Mode
|
||||
|
||||
Alongside image discovery, v4.1.1 also introduces/enhances a dedicated filter mode for audio enthusiasts:
|
||||
|
||||
- **Focused Audio Downloads:** The "🎧 Only Audio" option in the "Filter Files" radio button group allows you to download exclusively common audio file types. This includes formats like MP3, WAV, FLAC, M4A, OGG, and more.
|
||||
- **Streamlined UI:** When "🎧 Only Audio" mode is active:
|
||||
- Irrelevant UI options such as the "Skip Scope" button (for word-based post/file skipping) and the "Multi-part Download" toggle are hidden to simplify the interface.
|
||||
- The "Show External Links in Log" checkbox is automatically disabled, as link extraction is not the focus of this mode.
|
||||
- **Archive Handling:** Unlike the "📦 Only Archives" mode (which disables archive skipping), the "Skip .zip" and "Skip .rar" checkboxes remain enabled and configurable when "🎧 Only Audio" is selected. This gives you the flexibility to also exclude any archives encountered while in audio-only mode if desired.
|
||||
- **Purpose:** This mode is perfect for users who primarily want to collect audio tracks, podcasts, or sound effects from posts without downloading other media types.
|
||||
|
||||
|
||||
---
|
||||
|
||||
## Previous Update: What's New in v4.0.1?
|
||||
|
||||
Version 4.0.1 focuses on enhancing access to content and providing even smarter organization:
|
||||
|
||||
### Cookie Management
|
||||
|
||||
@@ -71,13 +111,30 @@ This field allows for dynamic filtering for the current download session and pro
|
||||
- **Adding New Names from Filters:** When you use the "Filter by Character(s)" input, if any names or groups are new (not already in `Known.txt`), a dialog will appear after you start the download. This dialog allows you to select which of these new names/groups should be added to `Known.txt`, formatted according to the rules described above.
|
||||
- **Intelligent Fallback:** If "Separate Folders by Name/Title" is active, and content doesn't match the "Filter by Character(s)" UI input, the downloader consults your `Known.txt` file for folder naming.
|
||||
- **Direct Management:** You can add simple entries directly to `Known.txt` using the list and "Add" button in the UI's `Known.txt` management section. For creating or modifying complex grouped alias entries directly in the file, or for bulk edits, click the "Open Known.txt" button. The application reloads `Known.txt` on startup or before a download process begins.
|
||||
- **Using Known Names to Populate Filters (via "Add to Filter" Button):**
|
||||
- Next to the "Add" button in the `Known.txt` management section, a "⤵️ Add to Filter" button provides a quick way to use your existing known names.
|
||||
- Clicking this opens a popup window displaying all entries from your `Known.txt` file, each with a checkbox.
|
||||
- The popup includes:
|
||||
- A search bar to quickly filter the list of names.
|
||||
- "Select All" and "Deselect All" buttons for convenience.
|
||||
- After selecting the desired names, click "Add Selected".
|
||||
- The chosen names will be inserted into the "Filter by Character(s)" input field.
|
||||
- **Important Formatting:** If a selected entry from `Known.txt` is a group (e.g., originally `(Boa Hancock)` in `Known.txt`, which implies aliases "Boa" and "Hancock"), it will be added to the filter field as `(Boa, Hancock)~`. Simple names are added as-is.
|
||||
|
||||
|
||||
---
|
||||
## What's in v3.5.0? (Previous Update)
|
||||
This version brings significant enhancements to manga/comic downloading, filtering capabilities, and user experience:
|
||||
This version brought significant enhancements to manga/comic downloading, filtering capabilities, and user experience:
|
||||
|
||||
### Enhanced Manga/Comic Mode
|
||||
|
||||
- **Optional Filename Prefix:**
|
||||
- When using the "Date Based" or "Original File Name" manga styles, an optional prefix can be specified in the UI.
|
||||
- This prefix will be prepended to each filename generated by these styles.
|
||||
- **Example (Date Based):** If prefix is `MySeries_`, files become `MySeries_001.jpg`, `MySeries_002.png`, etc.
|
||||
- **Example (Original File Name):** If prefix is `Comic_Vol1_`, an original file `page_01.jpg` becomes `Comic_Vol1_page_01.jpg`.
|
||||
- This input field appears automatically when either of these two manga naming styles is selected.
|
||||
|
||||
- **New "Date Based" Filename Style:**
|
||||
|
||||
- Perfect for truly sequential content! Files are named numerically (e.g., `001.jpg`, `002.jpg`, `003.ext`...) across an *entire creator's feed*, strictly following post publication order.
|
||||
@@ -87,6 +144,13 @@ This version brings significant enhancements to manga/comic downloading, filteri
|
||||
- **Guaranteed Order:** Disables multi-threading for post processing to ensure sequential accuracy.
|
||||
|
||||
- Works alongside the existing "Post Title" and "Original File Name" styles.
|
||||
- **New "Title+G.Num (Post Title + Global Numbering)" Filename Style:**
|
||||
- Ideal for series where you want each file to be prefixed by its post title but still maintain a global sequential number across all posts from a single download session.
|
||||
- **Naming Convention:** Files are named using the cleaned post title as a prefix, followed by an underscore and a globally incrementing number (e.g., `Post Title_001.ext`, `Post Title_002.ext`).
|
||||
- **Example:**
|
||||
- Post "Chapter 1: The Adventure Begins" (contains 2 files: `imageA.jpg`, `imageB.png`) -> `Chapter 1 The Adventure Begins_001.jpg`, `Chapter 1 The Adventure Begins_002.png`
|
||||
- Next Post "Chapter 2: New Friends" (contains 1 file: `cover.jpg`) -> `Chapter 2 New Friends_003.jpg`
|
||||
- **Sequential Integrity:** Multithreading for post processing is automatically disabled when this style is selected to ensure the global numbering is strictly sequential.
|
||||
|
||||
---
|
||||
|
||||
@@ -169,6 +233,7 @@ This version brings significant enhancements to manga/comic downloading, filteri
|
||||
- `Nami` (simple character)
|
||||
- `(Boa Hancock)~` (aliases for one character, session folder "Boa Hancock", adds `(Boa Hancock)` to `Known.txt`)
|
||||
- `(Vivi, Uta)` (distinct characters, session folder "Vivi Uta", adds `Vivi` and `Uta` separately to `Known.txt`)
|
||||
- A "⤵️ Add to Filter" button (near the `Known.txt` management UI) allows you to quickly populate this field by selecting from your existing `Known.txt` entries via a popup with search and checkbox selection.
|
||||
- See "Advanced `Known.txt` and Character Filtering" for full details.
|
||||
- **Filter Scopes:**
|
||||
- `Files`
|
||||
@@ -200,6 +265,7 @@ This version brings significant enhancements to manga/comic downloading, filteri
|
||||
- `Name: Post Title (Default)`
|
||||
- `Name: Original File`
|
||||
- `Name: Date Based (New)`
|
||||
- `Name: Title+G.Num (Post Title + Global Numbering)`
|
||||
|
||||
- **Best With:** Character filters set to manga/series title
|
||||
|
||||
@@ -217,12 +283,17 @@ This version brings significant enhancements to manga/comic downloading, filteri
|
||||
---
|
||||
|
||||
### Thumbnail & Compression Tools
|
||||
|
||||
- **Download Thumbnails Only**
|
||||
|
||||
- **Download Thumbnails Only:**
|
||||
- Downloads small preview images from the API instead of full-sized files (if available).
|
||||
- **Interaction with "Scan Content for Images" (New in v4.1.1):** When "Download Thumbnails Only" is active, "Scan Content for Images" is auto-enabled, and only images found by the content scan are downloaded. See "What's New in v4.1.1" for details.
|
||||
- **Scan Content for Images (New in v4.1.1):**
|
||||
- A UI option to scan the HTML content of posts for embedded image URLs (from `<img>` tags or direct links).
|
||||
- Resolves relative paths and helps capture images not listed in the API's formal attachments.
|
||||
- See the "What's New in v4.1.1?" section for a comprehensive explanation.
|
||||
- **Compress to WebP** (via Pillow)
|
||||
- Converts large images to smaller WebP versions
|
||||
|
||||
|
||||
---
|
||||
|
||||
### Performance Features
|
||||
|
||||
Reference in New Issue
Block a user