7 Commits

Author SHA1 Message Date
Yuvi9587
191dbc8c62 Commit 2025-06-19 08:25:30 +01:00
Yuvi9587
3c1b361fc1 Update main.py 2025-06-16 11:28:28 +01:00
Yuvi9587
953dbaebf0 Commit 2025-06-16 10:46:23 +01:00
Yuvi9587
efd5458493 Update main.py 2025-06-16 08:13:01 +01:00
Yuvi9587
3473f6540d Commit 2025-06-15 09:49:09 +01:00
Yuvi9587
7fe5f4b83e Commit 2025-06-14 11:40:44 +01:00
Yuvi9587
072b582622 Update languages.py 2025-06-14 11:32:29 +01:00
5 changed files with 1774 additions and 781 deletions

View File

@@ -8,6 +8,7 @@ import hashlib
import http .client import http .client
import traceback import traceback
from concurrent .futures import ThreadPoolExecutor ,Future ,CancelledError ,as_completed from concurrent .futures import ThreadPoolExecutor ,Future ,CancelledError ,as_completed
from collections import deque
import html import html
from PyQt5 .QtCore import QObject ,pyqtSignal ,QThread ,QMutex ,QMutexLocker from PyQt5 .QtCore import QObject ,pyqtSignal ,QThread ,QMutex ,QMutexLocker
from urllib .parse import urlparse from urllib .parse import urlparse
@@ -41,6 +42,7 @@ from io import BytesIO
STYLE_POST_TITLE ="post_title" STYLE_POST_TITLE ="post_title"
STYLE_ORIGINAL_NAME ="original_name" STYLE_ORIGINAL_NAME ="original_name"
STYLE_DATE_BASED ="date_based" STYLE_DATE_BASED ="date_based"
STYLE_DATE_POST_TITLE ="date_post_title"
MANGA_DATE_PREFIX_DEFAULT ="" MANGA_DATE_PREFIX_DEFAULT =""
STYLE_POST_TITLE_GLOBAL_NUMBERING ="post_title_global_numbering" STYLE_POST_TITLE_GLOBAL_NUMBERING ="post_title_global_numbering"
SKIP_SCOPE_FILES ="files" SKIP_SCOPE_FILES ="files"
@@ -509,13 +511,31 @@ def fetch_post_comments (api_domain ,service ,user_id ,post_id ,headers ,logger
raise RuntimeError (f"Error decoding JSON from comments API for post {post_id } ({comments_api_url }): {e }. Response text: {response .text [:200 ]}") raise RuntimeError (f"Error decoding JSON from comments API for post {post_id } ({comments_api_url }): {e }. Response text: {response .text [:200 ]}")
except Exception as e : except Exception as e :
raise RuntimeError (f"Unexpected error fetching comments for post {post_id } ({comments_api_url }): {e }") raise RuntimeError (f"Unexpected error fetching comments for post {post_id } ({comments_api_url }): {e }")
def download_from_api (api_url_input ,logger =print ,start_page =None ,end_page =None ,manga_mode =False , def download_from_api (
cancellation_event =None ,pause_event =None ,use_cookie =False ,cookie_text ="",selected_cookie_file =None ,app_base_dir =None ): api_url_input ,
headers ={'User-Agent':'Mozilla/5.0','Accept':'application/json'} logger =print ,
start_page =None ,
end_page =None ,
manga_mode =False ,
cancellation_event =None ,
pause_event =None ,
use_cookie =False ,
cookie_text ="",
selected_cookie_file =None ,
app_base_dir =None ,
manga_filename_style_for_sort_check =None
):
headers ={
'User-Agent':'Mozilla/5.0',
'Accept':'application/json'
}
service ,user_id ,target_post_id =extract_post_info (api_url_input ) service ,user_id ,target_post_id =extract_post_info (api_url_input )
if cancellation_event and cancellation_event .is_set (): if cancellation_event and cancellation_event .is_set ():
logger (" Download_from_api cancelled at start.") logger (" Download_from_api cancelled at start.")
return return
parsed_input_url_for_domain =urlparse (api_url_input ) parsed_input_url_for_domain =urlparse (api_url_input )
api_domain =parsed_input_url_for_domain .netloc api_domain =parsed_input_url_for_domain .netloc
if not any (d in api_domain .lower ()for d in ['kemono.su','kemono.party','coomer.su','coomer.party']): if not any (d in api_domain .lower ()for d in ['kemono.su','kemono.party','coomer.su','coomer.party']):
@@ -552,11 +572,12 @@ cancellation_event =None ,pause_event =None ,use_cookie =False ,cookie_text ="",
return return
if target_post_id and (start_page or end_page ): if target_post_id and (start_page or end_page ):
logger ("⚠️ Page range (start/end page) is ignored when a specific post URL is provided (searching all pages for the post).") logger ("⚠️ Page range (start/end page) is ignored when a specific post URL is provided (searching all pages for the post).")
is_creator_feed_for_manga =manga_mode and not target_post_id
is_manga_mode_fetch_all_and_sort_oldest_first =manga_mode and (manga_filename_style_for_sort_check !=STYLE_DATE_POST_TITLE )and not target_post_id
api_base_url =f"https://{api_domain }/api/v1/{service }/user/{user_id }" api_base_url =f"https://{api_domain }/api/v1/{service }/user/{user_id }"
page_size =50 page_size =50
if is_creator_feed_for_manga : if is_manga_mode_fetch_all_and_sort_oldest_first :
logger (" Manga Mode: Fetching posts to sort by date (oldest processed first)...") logger (f" Manga Mode (Style: {manga_filename_style_for_sort_check if manga_filename_style_for_sort_check else 'Default'} - Oldest First Sort Active): Fetching all posts to sort by date...")
all_posts_for_manga_mode =[] all_posts_for_manga_mode =[]
current_offset_manga =0 current_offset_manga =0
if start_page and start_page >1 : if start_page and start_page >1 :
@@ -635,6 +656,12 @@ cancellation_event =None ,pause_event =None ,use_cookie =False ,cookie_text ="",
break break
yield all_posts_for_manga_mode [i :i +page_size ] yield all_posts_for_manga_mode [i :i +page_size ]
return return
if manga_mode and not target_post_id and (manga_filename_style_for_sort_check ==STYLE_DATE_POST_TITLE ):
logger (f" Manga Mode (Style: {STYLE_DATE_POST_TITLE }): Processing posts in default API order (newest first).")
current_page_num =1 current_page_num =1
current_offset =0 current_offset =0
processed_target_post_flag =False processed_target_post_flag =False
@@ -727,8 +754,10 @@ class PostProcessorSignals (QObject ):
file_download_status_signal =pyqtSignal (bool ) file_download_status_signal =pyqtSignal (bool )
external_link_signal =pyqtSignal (str ,str ,str ,str ,str ) external_link_signal =pyqtSignal (str ,str ,str ,str ,str )
file_progress_signal =pyqtSignal (str ,object ) file_progress_signal =pyqtSignal (str ,object )
file_successfully_downloaded_signal =pyqtSignal (dict )
missed_character_post_signal =pyqtSignal (str ,str ) missed_character_post_signal =pyqtSignal (str ,str )
class PostProcessorWorker : class PostProcessorWorker :
def __init__ (self ,post_data ,download_root ,known_names , def __init__ (self ,post_data ,download_root ,known_names ,
filter_character_list ,emitter , filter_character_list ,emitter ,
unwanted_keywords ,filter_mode ,skip_zip ,skip_rar , unwanted_keywords ,filter_mode ,skip_zip ,skip_rar ,
@@ -836,7 +865,7 @@ class PostProcessorWorker :
post_title ="",file_index_in_post =0 ,num_files_in_this_post =1 , post_title ="",file_index_in_post =0 ,num_files_in_this_post =1 ,
manga_date_file_counter_ref =None ): manga_date_file_counter_ref =None ):
was_original_name_kept_flag =False was_original_name_kept_flag =False
manga_global_file_counter_ref =None
final_filename_saved_for_return ="" final_filename_saved_for_return =""
def _get_current_character_filters (self ): def _get_current_character_filters (self ):
if self .dynamic_filter_holder : if self .dynamic_filter_holder :
@@ -846,7 +875,7 @@ class PostProcessorWorker :
post_title ="",file_index_in_post =0 ,num_files_in_this_post =1 , post_title ="",file_index_in_post =0 ,num_files_in_this_post =1 ,
manga_date_file_counter_ref =None , manga_date_file_counter_ref =None ,
forced_filename_override =None , forced_filename_override =None ,
manga_global_file_counter_ref =None ): manga_global_file_counter_ref =None ,folder_context_name_for_history =None ):
was_original_name_kept_flag =False was_original_name_kept_flag =False
final_filename_saved_for_return ="" final_filename_saved_for_return =""
retry_later_details =None retry_later_details =None
@@ -948,6 +977,48 @@ class PostProcessorWorker :
self .logger (f"⚠️ Manga Title+GlobalNum Mode: Counter ref not provided or malformed for '{api_original_filename }'. Using original. Ref: {manga_global_file_counter_ref }") self .logger (f"⚠️ Manga Title+GlobalNum Mode: Counter ref not provided or malformed for '{api_original_filename }'. Using original. Ref: {manga_global_file_counter_ref }")
filename_to_save_in_main_path =cleaned_original_api_filename filename_to_save_in_main_path =cleaned_original_api_filename
self .logger (f"⚠️ Manga mode (Title+GlobalNum Style Fallback): Using cleaned original filename '{filename_to_save_in_main_path }' for post {original_post_id_for_log }.") self .logger (f"⚠️ Manga mode (Title+GlobalNum Style Fallback): Using cleaned original filename '{filename_to_save_in_main_path }' for post {original_post_id_for_log }.")
elif self .manga_filename_style ==STYLE_DATE_POST_TITLE :
published_date_str =self .post .get ('published')
added_date_str =self .post .get ('added')
formatted_date_str ="nodate"
if published_date_str :
try :
formatted_date_str =published_date_str .split ('T')[0 ]
except Exception :
self .logger (f" ⚠️ Could not parse 'published' date '{published_date_str }' for STYLE_DATE_POST_TITLE. Using 'nodate'.")
elif added_date_str :
try :
formatted_date_str =added_date_str .split ('T')[0 ]
self .logger (f" ⚠️ Post ID {original_post_id_for_log } missing 'published' date, using 'added' date '{added_date_str }' for STYLE_DATE_POST_TITLE naming.")
except Exception :
self .logger (f" ⚠️ Could not parse 'added' date '{added_date_str }' for STYLE_DATE_POST_TITLE. Using 'nodate'.")
else :
self .logger (f" ⚠️ Post ID {original_post_id_for_log } missing both 'published' and 'added' dates for STYLE_DATE_POST_TITLE. Using 'nodate'.")
if post_title and post_title .strip ():
temp_cleaned_title =clean_filename (post_title .strip ())
if not temp_cleaned_title or temp_cleaned_title .startswith ("untitled_file"):
self .logger (f"⚠️ Manga mode (Date+PostTitle Style): Post title for post {original_post_id_for_log } ('{post_title }') was empty or generic after cleaning. Using 'post' as title part.")
cleaned_post_title_for_filename ="post"
else :
cleaned_post_title_for_filename =temp_cleaned_title
base_name_for_style =f"{formatted_date_str }_{cleaned_post_title_for_filename }"
if num_files_in_this_post >1 :
filename_to_save_in_main_path =f"{base_name_for_style }_{file_index_in_post }{original_ext }"if file_index_in_post >0 else f"{base_name_for_style }{original_ext }"
else :
filename_to_save_in_main_path =f"{base_name_for_style }{original_ext }"
else :
self .logger (f"⚠️ Manga mode (Date+PostTitle Style): Post title missing for post {original_post_id_for_log }. Using 'post' as title part with date prefix.")
cleaned_post_title_for_filename ="post"
base_name_for_style =f"{formatted_date_str }_{cleaned_post_title_for_filename }"
if num_files_in_this_post >1 :
filename_to_save_in_main_path =f"{base_name_for_style }_{file_index_in_post }{original_ext }"if file_index_in_post >0 else f"{base_name_for_style }{original_ext }"
else :
filename_to_save_in_main_path =f"{base_name_for_style }{original_ext }"
self .logger (f"⚠️ Manga mode (Title+GlobalNum Style Fallback): Using cleaned original filename '{filename_to_save_in_main_path }' for post {original_post_id_for_log }.")
else : else :
self .logger (f"⚠️ Manga mode: Unknown filename style '{self .manga_filename_style }'. Defaulting to original filename for '{api_original_filename }'.") self .logger (f"⚠️ Manga mode: Unknown filename style '{self .manga_filename_style }'. Defaulting to original filename for '{api_original_filename }'.")
filename_to_save_in_main_path =cleaned_original_api_filename filename_to_save_in_main_path =cleaned_original_api_filename
@@ -1320,7 +1391,23 @@ class PostProcessorWorker :
with self .downloaded_files_lock :self .downloaded_files .add (filename_to_save_in_main_path ) with self .downloaded_files_lock :self .downloaded_files .add (filename_to_save_in_main_path )
final_filename_saved_for_return =final_filename_on_disk final_filename_saved_for_return =final_filename_on_disk
self .logger (f"✅ Saved: '{final_filename_saved_for_return }' (from '{api_original_filename }', {downloaded_size_bytes /(1024 *1024 ):.2f} MB) in '{os .path .basename (effective_save_folder )}'") self .logger (f"✅ Saved: '{final_filename_saved_for_return }' (from '{api_original_filename }', {downloaded_size_bytes /(1024 *1024 ):.2f} MB) in '{os .path .basename (effective_save_folder )}'")
downloaded_file_details ={
'disk_filename':final_filename_saved_for_return ,
'post_title':post_title ,
'post_id':original_post_id_for_log ,
'upload_date_str':self .post .get ('published')or self .post .get ('added')or "N/A",
'download_timestamp':time .time (),
'download_path':effective_save_folder ,
'service':self .service ,
'user_id':self .user_id ,
'api_original_filename':api_original_filename ,
'folder_context_name':folder_context_name_for_history or os .path .basename (effective_save_folder )
}
self ._emit_signal ('file_successfully_downloaded',downloaded_file_details )
time .sleep (0.05 ) time .sleep (0.05 )
return 1 ,0 ,final_filename_saved_for_return ,was_original_name_kept_flag ,FILE_DOWNLOAD_STATUS_SUCCESS ,None return 1 ,0 ,final_filename_saved_for_return ,was_original_name_kept_flag ,FILE_DOWNLOAD_STATUS_SUCCESS ,None
except Exception as save_err : except Exception as save_err :
self .logger (f"->>Save Fail for '{final_filename_on_disk }': {save_err }") self .logger (f"->>Save Fail for '{final_filename_on_disk }': {save_err }")
@@ -1336,14 +1423,16 @@ class PostProcessorWorker :
def process (self ): def process (self ):
if self ._check_pause (f"Post processing for ID {self .post .get ('id','N/A')}"):return 0 ,0 ,[],[],[] if self ._check_pause (f"Post processing for ID {self .post .get ('id','N/A')}"):return 0 ,0 ,[],[],[],None
if self .check_cancel ():return 0 ,0 ,[],[],[] if self .check_cancel ():return 0 ,0 ,[],[],[],None
current_character_filters =self ._get_current_character_filters () current_character_filters =self ._get_current_character_filters ()
kept_original_filenames_for_log =[] kept_original_filenames_for_log =[]
retryable_failures_this_post =[] retryable_failures_this_post =[]
permanent_failures_this_post =[] permanent_failures_this_post =[]
total_downloaded_this_post =0 total_downloaded_this_post =0
total_skipped_this_post =0 total_skipped_this_post =0
history_data_for_this_post =None
parsed_api_url =urlparse (self .api_url_input ) parsed_api_url =urlparse (self .api_url_input )
referer_url =f"https://{parsed_api_url .netloc }/" referer_url =f"https://{parsed_api_url .netloc }/"
headers ={'User-Agent':'Mozilla/5.0','Referer':referer_url ,'Accept':'*/*'} headers ={'User-Agent':'Mozilla/5.0','Referer':referer_url ,'Accept':'*/*'}
@@ -1371,7 +1460,7 @@ class PostProcessorWorker :
char_filter_that_matched_file_in_comment_scope =None char_filter_that_matched_file_in_comment_scope =None
char_filter_that_matched_comment =None char_filter_that_matched_comment =None
if current_character_filters and (self .char_filter_scope ==CHAR_SCOPE_TITLE or self .char_filter_scope ==CHAR_SCOPE_BOTH ): if current_character_filters and (self .char_filter_scope ==CHAR_SCOPE_TITLE or self .char_filter_scope ==CHAR_SCOPE_BOTH ):
if self ._check_pause (f"Character title filter for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[] if self ._check_pause (f"Character title filter for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
for idx ,filter_item_obj in enumerate (current_character_filters ): for idx ,filter_item_obj in enumerate (current_character_filters ):
if self .check_cancel ():break if self .check_cancel ():break
terms_to_check_for_title =list (filter_item_obj ["aliases"]) terms_to_check_for_title =list (filter_item_obj ["aliases"])
@@ -1402,7 +1491,7 @@ class PostProcessorWorker :
all_files_from_post_api_for_char_check .append ({'_original_name_for_log':original_api_att_name }) all_files_from_post_api_for_char_check .append ({'_original_name_for_log':original_api_att_name })
if current_character_filters and self .char_filter_scope ==CHAR_SCOPE_COMMENTS : if current_character_filters and self .char_filter_scope ==CHAR_SCOPE_COMMENTS :
self .logger (f" [Char Scope: Comments] Phase 1: Checking post files for matches before comments for post ID '{post_id }'.") self .logger (f" [Char Scope: Comments] Phase 1: Checking post files for matches before comments for post ID '{post_id }'.")
if self ._check_pause (f"File check (comments scope) for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[] if self ._check_pause (f"File check (comments scope) for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
for file_info_item in all_files_from_post_api_for_char_check : for file_info_item in all_files_from_post_api_for_char_check :
if self .check_cancel ():break if self .check_cancel ():break
current_api_original_filename_for_check =file_info_item .get ('_original_name_for_log') current_api_original_filename_for_check =file_info_item .get ('_original_name_for_log')
@@ -1422,7 +1511,7 @@ class PostProcessorWorker :
self .logger (f" [Char Scope: Comments] Phase 1 Result: post_is_candidate_by_file_char_match_in_comment_scope = {post_is_candidate_by_file_char_match_in_comment_scope }") self .logger (f" [Char Scope: Comments] Phase 1 Result: post_is_candidate_by_file_char_match_in_comment_scope = {post_is_candidate_by_file_char_match_in_comment_scope }")
if current_character_filters and self .char_filter_scope ==CHAR_SCOPE_COMMENTS : if current_character_filters and self .char_filter_scope ==CHAR_SCOPE_COMMENTS :
if not post_is_candidate_by_file_char_match_in_comment_scope : if not post_is_candidate_by_file_char_match_in_comment_scope :
if self ._check_pause (f"Comment check for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[] if self ._check_pause (f"Comment check for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
self .logger (f" [Char Scope: Comments] Phase 2: No file match found. Checking post comments for post ID '{post_id }'.") self .logger (f" [Char Scope: Comments] Phase 2: No file match found. Checking post comments for post ID '{post_id }'.")
try : try :
parsed_input_url_for_comments =urlparse (self .api_url_input ) parsed_input_url_for_comments =urlparse (self .api_url_input )
@@ -1471,29 +1560,30 @@ class PostProcessorWorker :
if self .char_filter_scope ==CHAR_SCOPE_TITLE and not post_is_candidate_by_title_char_match : if self .char_filter_scope ==CHAR_SCOPE_TITLE and not post_is_candidate_by_title_char_match :
self .logger (f" -> Skip Post (Scope: Title - No Char Match): Title '{post_title [:50 ]}' does not match character filters.") self .logger (f" -> Skip Post (Scope: Title - No Char Match): Title '{post_title [:50 ]}' does not match character filters.")
self ._emit_signal ('missed_character_post',post_title ,"No title match for character filter") self ._emit_signal ('missed_character_post',post_title ,"No title match for character filter")
return 0 ,num_potential_files_in_post ,[],[],[] return 0 ,num_potential_files_in_post ,[],[],[],None
if self .char_filter_scope ==CHAR_SCOPE_COMMENTS and not post_is_candidate_by_file_char_match_in_comment_scope and not post_is_candidate_by_comment_char_match : if self .char_filter_scope ==CHAR_SCOPE_COMMENTS and not post_is_candidate_by_file_char_match_in_comment_scope and not post_is_candidate_by_comment_char_match :
self .logger (f" -> Skip Post (Scope: Comments - No Char Match in Comments): Post ID '{post_id }', Title '{post_title [:50 ]}...'") self .logger (f" -> Skip Post (Scope: Comments - No Char Match in Comments): Post ID '{post_id }', Title '{post_title [:50 ]}...'")
if self .emitter and hasattr (self .emitter ,'missed_character_post_signal'): if self .emitter and hasattr (self .emitter ,'missed_character_post_signal'):
self ._emit_signal ('missed_character_post',post_title ,"No character match in files or comments (Comments scope)") self ._emit_signal ('missed_character_post',post_title ,"No character match in files or comments (Comments scope)")
return 0 ,num_potential_files_in_post ,[],[],[] return 0 ,num_potential_files_in_post ,[],[],[],None
if self .skip_words_list and (self .skip_words_scope ==SKIP_SCOPE_POSTS or self .skip_words_scope ==SKIP_SCOPE_BOTH ): if self .skip_words_list and (self .skip_words_scope ==SKIP_SCOPE_POSTS or self .skip_words_scope ==SKIP_SCOPE_BOTH ):
if self ._check_pause (f"Skip words (post title) for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[] if self ._check_pause (f"Skip words (post title) for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
post_title_lower =post_title .lower () post_title_lower =post_title .lower ()
for skip_word in self .skip_words_list : for skip_word in self .skip_words_list :
if skip_word .lower ()in post_title_lower : if skip_word .lower ()in post_title_lower :
self .logger (f" -> Skip Post (Keyword in Title '{skip_word }'): '{post_title [:50 ]}...'. Scope: {self .skip_words_scope }") self .logger (f" -> Skip Post (Keyword in Title '{skip_word }'): '{post_title [:50 ]}...'. Scope: {self .skip_words_scope }")
return 0 ,num_potential_files_in_post ,[],[],[] return 0 ,num_potential_files_in_post ,[],[],[],None
if not self .extract_links_only and self .manga_mode_active and current_character_filters and (self .char_filter_scope ==CHAR_SCOPE_TITLE or self .char_filter_scope ==CHAR_SCOPE_BOTH )and not post_is_candidate_by_title_char_match : if not self .extract_links_only and self .manga_mode_active and current_character_filters and (self .char_filter_scope ==CHAR_SCOPE_TITLE or self .char_filter_scope ==CHAR_SCOPE_BOTH )and not post_is_candidate_by_title_char_match :
self .logger (f" -> Skip Post (Manga Mode with Title/Both Scope - No Title Char Match): Title '{post_title [:50 ]}' doesn't match filters.") self .logger (f" -> Skip Post (Manga Mode with Title/Both Scope - No Title Char Match): Title '{post_title [:50 ]}' doesn't match filters.")
self ._emit_signal ('missed_character_post',post_title ,"Manga Mode: No title match for character filter (Title/Both scope)") self ._emit_signal ('missed_character_post',post_title ,"Manga Mode: No title match for character filter (Title/Both scope)")
return 0 ,num_potential_files_in_post ,[],[],[] return 0 ,num_potential_files_in_post ,[],[],[],None
if not isinstance (post_attachments ,list ): if not isinstance (post_attachments ,list ):
self .logger (f"⚠️ Corrupt attachment data for post {post_id } (expected list, got {type (post_attachments )}). Skipping attachments.") self .logger (f"⚠️ Corrupt attachment data for post {post_id } (expected list, got {type (post_attachments )}). Skipping attachments.")
post_attachments =[] post_attachments =[]
base_folder_names_for_post_content =[] base_folder_names_for_post_content =[]
determined_post_save_path_for_history =self .override_output_dir if self .override_output_dir else self .download_root
if not self .extract_links_only and self .use_subfolders : if not self .extract_links_only and self .use_subfolders :
if self ._check_pause (f"Subfolder determination for post {post_id }"):return 0 ,num_potential_files_in_post ,[] if self ._check_pause (f"Subfolder determination for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
primary_char_filter_for_folder =None primary_char_filter_for_folder =None
log_reason_for_folder ="" log_reason_for_folder =""
if self .char_filter_scope ==CHAR_SCOPE_COMMENTS and char_filter_that_matched_comment : if self .char_filter_scope ==CHAR_SCOPE_COMMENTS and char_filter_that_matched_comment :
@@ -1593,16 +1683,65 @@ class PostProcessorWorker :
final_fallback_name =clean_folder_name (post_title if post_title and post_title .strip ()else "Generic Post Content") final_fallback_name =clean_folder_name (post_title if post_title and post_title .strip ()else "Generic Post Content")
base_folder_names_for_post_content =[final_fallback_name ] base_folder_names_for_post_content =[final_fallback_name ]
self .logger (f" Ultimate fallback folder name: {final_fallback_name }") self .logger (f" Ultimate fallback folder name: {final_fallback_name }")
if base_folder_names_for_post_content :
determined_post_save_path_for_history =os .path .join (determined_post_save_path_for_history ,base_folder_names_for_post_content [0 ])
if not self .extract_links_only and self .use_post_subfolders :
cleaned_post_title_for_sub =clean_folder_name (post_title )
post_id_for_fallback = self.post.get('id', 'unknown_id') # Ensure post_id is available
# Fallback to a more unique name if the cleaned title is generic
if not cleaned_post_title_for_sub or cleaned_post_title_for_sub == "untitled_folder":
self.logger(f" ⚠️ Post title '{post_title}' resulted in a generic subfolder name. Using 'post_{post_id_for_fallback}' as base.")
original_cleaned_post_title_for_sub = f"post_{post_id_for_fallback}"
else:
original_cleaned_post_title_for_sub = cleaned_post_title_for_sub
# Path before adding the post-specific subfolder
base_path_for_post_subfolder = determined_post_save_path_for_history
suffix_counter = 0 # 0 for no suffix, 1 for _1, etc.
final_post_subfolder_name = ""
while True:
if suffix_counter == 0:
name_candidate = original_cleaned_post_title_for_sub
else:
name_candidate = f"{original_cleaned_post_title_for_sub}_{suffix_counter}"
potential_post_subfolder_path = os.path.join(base_path_for_post_subfolder, name_candidate)
try:
os.makedirs(potential_post_subfolder_path, exist_ok=False)
final_post_subfolder_name = name_candidate
if suffix_counter > 0: # Log only if a suffix was actually needed and used
self.logger(f" Post subfolder name conflict: Using '{final_post_subfolder_name}' instead of '{original_cleaned_post_title_for_sub}' to avoid mixing posts.")
break
except FileExistsError:
suffix_counter += 1
if suffix_counter > 100: # Safety break
self.logger(f" ⚠️ Exceeded 100 attempts to find unique subfolder name for '{original_cleaned_post_title_for_sub}'. Using UUID.")
final_post_subfolder_name = f"{original_cleaned_post_title_for_sub}_{uuid.uuid4().hex[:8]}"
os.makedirs(os.path.join(base_path_for_post_subfolder, final_post_subfolder_name), exist_ok=True) # Create with exist_ok=True as a last resort
break
except OSError as e_mkdir:
self.logger(f" ❌ Error creating directory '{potential_post_subfolder_path}': {e_mkdir}. Files for this post might be saved in parent or fail.")
final_post_subfolder_name = original_cleaned_post_title_for_sub # Fallback
break
determined_post_save_path_for_history = os.path.join(base_path_for_post_subfolder, final_post_subfolder_name)
if not self .extract_links_only and self .use_subfolders and self .skip_words_list : if not self .extract_links_only and self .use_subfolders and self .skip_words_list :
if self ._check_pause (f"Folder keyword skip check for post {post_id }"):return 0 ,num_potential_files_in_post ,[] if self ._check_pause (f"Folder keyword skip check for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
for folder_name_to_check in base_folder_names_for_post_content : for folder_name_to_check in base_folder_names_for_post_content :
if not folder_name_to_check :continue if not folder_name_to_check :continue
if any (skip_word .lower ()in folder_name_to_check .lower ()for skip_word in self .skip_words_list ): if any (skip_word .lower ()in folder_name_to_check .lower ()for skip_word in self .skip_words_list ):
matched_skip =next ((sw for sw in self .skip_words_list if sw .lower ()in folder_name_to_check .lower ()),"unknown_skip_word") matched_skip =next ((sw for sw in self .skip_words_list if sw .lower ()in folder_name_to_check .lower ()),"unknown_skip_word")
self .logger (f" -> Skip Post (Folder Keyword): Potential folder '{folder_name_to_check }' contains '{matched_skip }'.") self .logger (f" -> Skip Post (Folder Keyword): Potential folder '{folder_name_to_check }' contains '{matched_skip }'.")
return 0 ,num_potential_files_in_post ,[],[],[] return 0 ,num_potential_files_in_post ,[],[],[],None
if (self .show_external_links or self .extract_links_only )and post_content_html : if (self .show_external_links or self .extract_links_only )and post_content_html :
if self ._check_pause (f"External link extraction for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[] if self ._check_pause (f"External link extraction for post {post_id }"):return 0 ,num_potential_files_in_post ,[],[],[],None
try : try :
mega_key_pattern =re .compile (r'\b([a-zA-Z0-9_-]{43}|[a-zA-Z0-9_-]{22})\b') mega_key_pattern =re .compile (r'\b([a-zA-Z0-9_-]{43}|[a-zA-Z0-9_-]{22})\b')
unique_links_data ={} unique_links_data ={}
@@ -1642,7 +1781,7 @@ class PostProcessorWorker :
except Exception as e :self .logger (f"⚠️ Error parsing post content for links: {e }\n{traceback .format_exc (limit =2 )}") except Exception as e :self .logger (f"⚠️ Error parsing post content for links: {e }\n{traceback .format_exc (limit =2 )}")
if self .extract_links_only : if self .extract_links_only :
self .logger (f" Extract Links Only mode: Finished processing post {post_id } for links.") self .logger (f" Extract Links Only mode: Finished processing post {post_id } for links.")
return 0 ,0 ,[],[],[] return 0 ,0 ,[],[],[],None
all_files_from_post_api =[] all_files_from_post_api =[]
api_file_domain =urlparse (self .api_url_input ).netloc api_file_domain =urlparse (self .api_url_input ).netloc
if not api_file_domain or not any (d in api_file_domain .lower ()for d in ['kemono.su','kemono.party','coomer.su','coomer.party']): if not api_file_domain or not any (d in api_file_domain .lower ()for d in ['kemono.su','kemono.party','coomer.su','coomer.party']):
@@ -1729,13 +1868,13 @@ class PostProcessorWorker :
all_files_from_post_api =[finfo for finfo in all_files_from_post_api if finfo .get ('_from_content_scan')] all_files_from_post_api =[finfo for finfo in all_files_from_post_api if finfo .get ('_from_content_scan')]
if not all_files_from_post_api : if not all_files_from_post_api :
self .logger (f" -> No images found via content scan for post {post_id } in this combined mode.") self .logger (f" -> No images found via content scan for post {post_id } in this combined mode.")
return 0 ,0 ,[],[],[] return 0 ,0 ,[],[],[],None
else : else :
self .logger (f" Mode: 'Download Thumbnails Only' active. Filtering for API thumbnails for post {post_id }.") self .logger (f" Mode: 'Download Thumbnails Only' active. Filtering for API thumbnails for post {post_id }.")
all_files_from_post_api =[finfo for finfo in all_files_from_post_api if finfo .get ('_is_thumbnail')] all_files_from_post_api =[finfo for finfo in all_files_from_post_api if finfo .get ('_is_thumbnail')]
if not all_files_from_post_api : if not all_files_from_post_api :
self .logger (f" -> No API image thumbnails found for post {post_id } in thumbnail-only mode.") self .logger (f" -> No API image thumbnails found for post {post_id } in thumbnail-only mode.")
return 0 ,0 ,[],[],[] return 0 ,0 ,[],[],[],None
if self .manga_mode_active and self .manga_filename_style ==STYLE_DATE_BASED : if self .manga_mode_active and self .manga_filename_style ==STYLE_DATE_BASED :
def natural_sort_key_for_files (file_api_info ): def natural_sort_key_for_files (file_api_info ):
name =file_api_info .get ('_original_name_for_log','').lower () name =file_api_info .get ('_original_name_for_log','').lower ()
@@ -1744,7 +1883,7 @@ class PostProcessorWorker :
self .logger (f" Manga Date Mode: Sorted {len (all_files_from_post_api )} files within post {post_id } by original name for sequential numbering.") self .logger (f" Manga Date Mode: Sorted {len (all_files_from_post_api )} files within post {post_id } by original name for sequential numbering.")
if not all_files_from_post_api : if not all_files_from_post_api :
self .logger (f" No files found to download for post {post_id }.") self .logger (f" No files found to download for post {post_id }.")
return 0 ,0 ,[],[],[] return 0 ,0 ,[],[],[],None
files_to_download_info_list =[] files_to_download_info_list =[]
processed_original_filenames_in_this_post =set () processed_original_filenames_in_this_post =set ()
for file_info in all_files_from_post_api : for file_info in all_files_from_post_api :
@@ -1758,7 +1897,7 @@ class PostProcessorWorker :
processed_original_filenames_in_this_post .add (current_api_original_filename ) processed_original_filenames_in_this_post .add (current_api_original_filename )
if not files_to_download_info_list : if not files_to_download_info_list :
self .logger (f" All files for post {post_id } were duplicate original names or skipped earlier.") self .logger (f" All files for post {post_id } were duplicate original names or skipped earlier.")
return 0 ,total_skipped_this_post ,[],[],[] return 0 ,total_skipped_this_post ,[],[],[],None
self .logger (f" Identified {len (files_to_download_info_list )} unique original file(s) for potential download from post {post_id }.") self .logger (f" Identified {len (files_to_download_info_list )} unique original file(s) for potential download from post {post_id }.")
with ThreadPoolExecutor (max_workers =self .num_file_threads ,thread_name_prefix =f'P{post_id }File_')as file_pool : with ThreadPoolExecutor (max_workers =self .num_file_threads ,thread_name_prefix =f'P{post_id }File_')as file_pool :
@@ -1854,19 +1993,22 @@ class PostProcessorWorker :
if self .use_subfolders and target_base_folder_name_for_instance : if self .use_subfolders and target_base_folder_name_for_instance :
current_path_for_file_instance =os .path .join (current_path_for_file_instance ,target_base_folder_name_for_instance ) current_path_for_file_instance =os .path .join (current_path_for_file_instance ,target_base_folder_name_for_instance )
if self .use_post_subfolders : if self .use_post_subfolders :
cleaned_title_for_subfolder_instance =clean_folder_name (post_title ) # Use the final_post_subfolder_name determined earlier, which includes suffix if needed
current_path_for_file_instance =os .path .join (current_path_for_file_instance ,cleaned_title_for_subfolder_instance ) current_path_for_file_instance =os .path .join (current_path_for_file_instance ,final_post_subfolder_name )
manga_date_counter_to_pass =self .manga_date_file_counter_ref if self .manga_mode_active and self .manga_filename_style ==STYLE_DATE_BASED else None manga_date_counter_to_pass =self .manga_date_file_counter_ref if self .manga_mode_active and self .manga_filename_style ==STYLE_DATE_BASED else None
manga_global_counter_to_pass =self .manga_global_file_counter_ref if self .manga_mode_active and self .manga_filename_style ==STYLE_POST_TITLE_GLOBAL_NUMBERING else None manga_global_counter_to_pass =self .manga_global_file_counter_ref if self .manga_mode_active and self .manga_filename_style ==STYLE_POST_TITLE_GLOBAL_NUMBERING else None
folder_context_for_file =target_base_folder_name_for_instance if self .use_subfolders and target_base_folder_name_for_instance else clean_folder_name (post_title )
futures_list .append (file_pool .submit ( futures_list .append (file_pool .submit (
self ._download_single_file , self ._download_single_file ,
file_info =file_info_to_dl , file_info =file_info_to_dl ,
target_folder_path =current_path_for_file_instance , target_folder_path =current_path_for_file_instance ,
headers =headers ,original_post_id_for_log =post_id ,skip_event =self .skip_current_file_flag , headers =headers ,original_post_id_for_log =post_id ,skip_event =self .skip_current_file_flag ,
post_title =post_title ,manga_date_file_counter_ref =manga_date_counter_to_pass , post_title =post_title ,manga_date_file_counter_ref =manga_date_counter_to_pass ,
manga_global_file_counter_ref =manga_global_counter_to_pass , manga_global_file_counter_ref =manga_global_counter_to_pass ,folder_context_name_for_history =folder_context_for_file ,
file_index_in_post =file_idx ,num_files_in_this_post =len (files_to_download_info_list ) file_index_in_post =file_idx ,num_files_in_this_post =len (files_to_download_info_list )
)) ))
@@ -1893,18 +2035,62 @@ class PostProcessorWorker :
self .logger (f"❌ File download task for post {post_id } resulted in error: {exc_f }") self .logger (f"❌ File download task for post {post_id } resulted in error: {exc_f }")
total_skipped_this_post +=1 total_skipped_this_post +=1
self ._emit_signal ('file_progress',"",None ) self ._emit_signal ('file_progress',"",None )
if not self .extract_links_only and (total_downloaded_this_post >0 or not (
(current_character_filters and (
(self .char_filter_scope ==CHAR_SCOPE_TITLE and not post_is_candidate_by_title_char_match )or
(self .char_filter_scope ==CHAR_SCOPE_COMMENTS and not post_is_candidate_by_file_char_match_in_comment_scope and not post_is_candidate_by_comment_char_match )
))or
(self .skip_words_list and (self .skip_words_scope ==SKIP_SCOPE_POSTS or self .skip_words_scope ==SKIP_SCOPE_BOTH )and any (sw .lower ()in post_title .lower ()for sw in self .skip_words_list ))
)):
top_file_name_for_history ="N/A"
if post_main_file_info and post_main_file_info .get ('name'):
top_file_name_for_history =post_main_file_info ['name']
elif post_attachments and post_attachments [0 ].get ('name'):
top_file_name_for_history =post_attachments [0 ]['name']
history_data_for_this_post ={
'post_title':post_title ,'post_id':post_id ,
'top_file_name':top_file_name_for_history ,
'num_files':num_potential_files_in_post ,
'upload_date_str':post_data .get ('published')or post_data .get ('added')or "Unknown",
'download_location':determined_post_save_path_for_history ,
'service':self .service ,'user_id':self .user_id ,
}
if self .check_cancel ():self .logger (f" Post {post_id } processing interrupted/cancelled."); if self .check_cancel ():self .logger (f" Post {post_id } processing interrupted/cancelled.");
else :self .logger (f" Post {post_id } Summary: Downloaded={total_downloaded_this_post }, Skipped Files={total_skipped_this_post }") else :self .logger (f" Post {post_id } Summary: Downloaded={total_downloaded_this_post }, Skipped Files={total_skipped_this_post }")
return total_downloaded_this_post ,total_skipped_this_post ,kept_original_filenames_for_log ,retryable_failures_this_post ,permanent_failures_this_post
# Cleanup: Remove empty post-specific subfolder if created and no files were downloaded
if not self.extract_links_only and self.use_post_subfolders and total_downloaded_this_post == 0:
# determined_post_save_path_for_history at this point holds the full path to the post-specific subfolder
# if self.use_post_subfolders was true and it was applied.
# base_path_for_post_subfolder was the path *before* the post-specific segment.
# final_post_subfolder_name was the segment itself.
# So, determined_post_save_path_for_history is the correct path to check.
path_to_check_for_emptiness = determined_post_save_path_for_history
try:
if os.path.isdir(path_to_check_for_emptiness) and not os.listdir(path_to_check_for_emptiness):
self.logger(f" 🗑️ Removing empty post-specific subfolder: '{path_to_check_for_emptiness}'")
os.rmdir(path_to_check_for_emptiness)
except OSError as e_rmdir:
self.logger(f" ⚠️ Could not remove empty post-specific subfolder '{path_to_check_for_emptiness}': {e_rmdir}")
return total_downloaded_this_post ,total_skipped_this_post ,kept_original_filenames_for_log ,retryable_failures_this_post ,permanent_failures_this_post ,history_data_for_this_post
class DownloadThread (QThread ): class DownloadThread (QThread ):
progress_signal =pyqtSignal (str ) progress_signal =pyqtSignal (str )
add_character_prompt_signal =pyqtSignal (str ) add_character_prompt_signal =pyqtSignal (str )
file_download_status_signal =pyqtSignal (bool ) file_download_status_signal =pyqtSignal (bool )
finished_signal =pyqtSignal (int ,int ,bool ,list ) finished_signal =pyqtSignal (int ,int ,bool ,list )
external_link_signal =pyqtSignal (str ,str ,str ,str ,str ) external_link_signal =pyqtSignal (str ,str ,str ,str ,str )
file_successfully_downloaded_signal =pyqtSignal (dict )
file_progress_signal =pyqtSignal (str ,object ) file_progress_signal =pyqtSignal (str ,object )
retryable_file_failed_signal =pyqtSignal (list ) retryable_file_failed_signal =pyqtSignal (list )
missed_character_post_signal =pyqtSignal (str ,str ) missed_character_post_signal =pyqtSignal (str ,str )
post_processed_for_history_signal =pyqtSignal (dict )
final_history_entries_signal =pyqtSignal (list )
permanent_file_failed_signal =pyqtSignal (list ) permanent_file_failed_signal =pyqtSignal (list )
def __init__ (self ,api_url_input ,output_dir ,known_names_copy , def __init__ (self ,api_url_input ,output_dir ,known_names_copy ,
cancellation_event , cancellation_event ,
@@ -1987,6 +2173,7 @@ class DownloadThread (QThread ):
self .scan_content_for_images =scan_content_for_images self .scan_content_for_images =scan_content_for_images
self .creator_download_folder_ignore_words =creator_download_folder_ignore_words self .creator_download_folder_ignore_words =creator_download_folder_ignore_words
self .manga_global_file_counter_ref =manga_global_file_counter_ref self .manga_global_file_counter_ref =manga_global_file_counter_ref
self .history_candidates_buffer =deque (maxlen =8 )
if self .compress_images and Image is None : if self .compress_images and Image is None :
self .logger ("⚠️ Image compression disabled: Pillow library not found (DownloadThread).") self .logger ("⚠️ Image compression disabled: Pillow library not found (DownloadThread).")
self .compress_images =False self .compress_images =False
@@ -2052,6 +2239,7 @@ class DownloadThread (QThread ):
worker_signals_obj .file_progress_signal .connect (self .file_progress_signal ) worker_signals_obj .file_progress_signal .connect (self .file_progress_signal )
worker_signals_obj .external_link_signal .connect (self .external_link_signal ) worker_signals_obj .external_link_signal .connect (self .external_link_signal )
worker_signals_obj .missed_character_post_signal .connect (self .missed_character_post_signal ) worker_signals_obj .missed_character_post_signal .connect (self .missed_character_post_signal )
worker_signals_obj .file_successfully_downloaded_signal .connect (self .file_successfully_downloaded_signal )
self .logger (" Starting post fetch (single-threaded download process)...") self .logger (" Starting post fetch (single-threaded download process)...")
post_generator =download_from_api ( post_generator =download_from_api (
self .api_url_input , self .api_url_input ,
@@ -2064,7 +2252,8 @@ class DownloadThread (QThread ):
use_cookie =self .use_cookie , use_cookie =self .use_cookie ,
cookie_text =self .cookie_text , cookie_text =self .cookie_text ,
selected_cookie_file =self .selected_cookie_file , selected_cookie_file =self .selected_cookie_file ,
app_base_dir =self .app_base_dir app_base_dir =self .app_base_dir ,
manga_filename_style_for_sort_check =self .manga_filename_style if self .manga_mode_active else None
) )
for posts_batch_data in post_generator : for posts_batch_data in post_generator :
if self ._check_pause_self ("Post batch processing"):was_process_cancelled =True ;break if self ._check_pause_self ("Post batch processing"):was_process_cancelled =True ;break
@@ -2116,13 +2305,16 @@ class DownloadThread (QThread ):
creator_download_folder_ignore_words =self .creator_download_folder_ignore_words , creator_download_folder_ignore_words =self .creator_download_folder_ignore_words ,
) )
try : try :
dl_count ,skip_count ,kept_originals_this_post ,retryable_failures ,permanent_failures =post_processing_worker .process () dl_count ,skip_count ,kept_originals_this_post ,retryable_failures ,permanent_failures ,history_data =post_processing_worker .process ()
grand_total_downloaded_files +=dl_count grand_total_downloaded_files +=dl_count
grand_total_skipped_files +=skip_count grand_total_skipped_files +=skip_count
if kept_originals_this_post : if kept_originals_this_post :
grand_list_of_kept_original_filenames .extend (kept_originals_this_post ) grand_list_of_kept_original_filenames .extend (kept_originals_this_post )
if retryable_failures : if retryable_failures :
self .retryable_file_failed_signal .emit (retryable_failures ) self .retryable_file_failed_signal .emit (retryable_failures )
if history_data :
if len (self .history_candidates_buffer )<8 :
self .post_processed_for_history_signal .emit (history_data )
if permanent_failures : if permanent_failures :
self .permanent_file_failed_signal .emit (permanent_failures ) self .permanent_file_failed_signal .emit (permanent_failures )
except Exception as proc_err : except Exception as proc_err :
@@ -2138,6 +2330,10 @@ class DownloadThread (QThread ):
if was_process_cancelled :break if was_process_cancelled :break
if not was_process_cancelled and not self .isInterruptionRequested (): if not was_process_cancelled and not self .isInterruptionRequested ():
self .logger ("✅ All posts processed or end of content reached by DownloadThread.") self .logger ("✅ All posts processed or end of content reached by DownloadThread.")
except Exception as main_thread_err : except Exception as main_thread_err :
self .logger (f"\n❌ Critical error within DownloadThread run loop: {main_thread_err }") self .logger (f"\n❌ Critical error within DownloadThread run loop: {main_thread_err }")
traceback .print_exc () traceback .print_exc ()
@@ -2150,6 +2346,7 @@ class DownloadThread (QThread ):
worker_signals_obj .external_link_signal .disconnect (self .external_link_signal ) worker_signals_obj .external_link_signal .disconnect (self .external_link_signal )
worker_signals_obj .file_progress_signal .disconnect (self .file_progress_signal ) worker_signals_obj .file_progress_signal .disconnect (self .file_progress_signal )
worker_signals_obj .missed_character_post_signal .disconnect (self .missed_character_post_signal ) worker_signals_obj .missed_character_post_signal .disconnect (self .missed_character_post_signal )
worker_signals_obj .file_successfully_downloaded_signal .disconnect (self .file_successfully_downloaded_signal )
except (TypeError ,RuntimeError )as e : except (TypeError ,RuntimeError )as e :
self .logger (f" Note during DownloadThread signal disconnection: {e }") self .logger (f" Note during DownloadThread signal disconnection: {e }")

View File

@@ -17,7 +17,9 @@ These are the primary controls you'll interact with to initiate and manage downl
- Kemono.su (and mirrors) individual posts (e.g., `https://kemono.su/patreon/user/12345/post/98765`). - Kemono.su (and mirrors) individual posts (e.g., `https://kemono.su/patreon/user/12345/post/98765`).
- Coomer.party (and mirrors like coomer.su) creator pages. - Coomer.party (and mirrors like coomer.su) creator pages.
- Coomer.party (and mirrors) individual posts. - Coomer.party (and mirrors) individual posts.
- **Note:** When **⭐ Favorite Mode** is active, this field is disabled and shows a "Favorite Mode active" message. - **Note:**
- When **⭐ Favorite Mode** is active, this field is disabled and shows a "Favorite Mode active" message.
- This field can also be populated with a placeholder message (e.g., "{count} items in queue from popup") if posts are added to the download queue directly from the 'Creator Selection' dialog's 'Fetched Posts' view.
- **🎨 Creator Selection Button:** - **🎨 Creator Selection Button:**
- **Icon:** 🎨 (Artist Palette) - **Icon:** 🎨 (Artist Palette)
@@ -29,9 +31,17 @@ These are the primary controls you'll interact with to initiate and manage downl
- **Creator List:** Displays creators with their service (e.g., Patreon, Fanbox) and ID. - **Creator List:** Displays creators with their service (e.g., Patreon, Fanbox) and ID.
- **Selection:** Checkboxes to select one or more creators. - **Selection:** Checkboxes to select one or more creators.
- **"Add Selected to URL" Button:** Adds the names of selected creators to the URL input field, comma-separated. - **"Add Selected to URL" Button:** Adds the names of selected creators to the URL input field, comma-separated.
- **"Fetch Posts" Button:** After selecting creators, click this to retrieve their latest posts. This will display a new pane within the dialog showing the fetched posts.
- **"Download Scope" Radio Buttons (`Characters` / `Creators`):** Determines the folder structure for items added via this popup. - **"Download Scope" Radio Buttons (`Characters` / `Creators`):** Determines the folder structure for items added via this popup.
- `Characters`: Assumes creator names are character names for folder organization. - `Characters`: Assumes creator names are character names for folder organization.
- `Creators`: Uses the actual creator names for folder organization. - `Creators`: Uses the actual creator names for folder organization.
- **Fetched Posts View (Right Pane - Appears after clicking 'Fetch Posts'):**
- **Posts Area Title Label:** Indicates loading status or number of fetched posts.
- **Posts Search Input:** Allows filtering the list of fetched posts by title.
- **Posts List Widget:** Displays posts fetched from the selected creators, often grouped by creator. Each post is checkable.
- **Select All / Deselect All Buttons (for Posts):** Convenience buttons for selecting/deselecting all displayed fetched posts.
- **"Add Selected Posts to Queue" Button:** Adds all checked posts from this view directly to the application's main download queue. The main URL input field will then show a message like "{count} items in queue from popup".
- **"Close" Button (for Posts View):** Hides the fetched posts view and returns to the creator selection list, allowing you to use the 'Add Selected to URL' button if preferred.
- **Page Range (Start to End) Input Fields:** - **Page Range (Start to End) Input Fields:**
- **Purpose:** For creator URLs, specify a range of pages to fetch and process. - **Purpose:** For creator URLs, specify a range of pages to fetch and process.

View File

@@ -1,4 +1,3 @@
translations ={ translations ={
"en":{ "en":{
"settings_dialog_title":"Settings", "settings_dialog_title":"Settings",
@@ -155,6 +154,7 @@ translations ={
"manga_style_original_file_text":"Name: Original File", "manga_style_original_file_text":"Name: Original File",
"manga_style_date_based_text":"Name: Date Based", "manga_style_date_based_text":"Name: Date Based",
"manga_style_title_global_num_text":"Name: Title+G.Num", "manga_style_title_global_num_text":"Name: Title+G.Num",
"manga_style_date_post_title_text":"Name: Date + Title",
"manga_style_unknown_text":"Name: Unknown Style", "manga_style_unknown_text":"Name: Unknown Style",
"manga_style_post_title_tooltip":"""Files are named based on the post's title. "manga_style_post_title_tooltip":"""Files are named based on the post's title.
- The first file in a post is named using the cleaned post title (e.g., 'My Chapter 1.jpg'). - The first file in a post is named using the cleaned post title (e.g., 'My Chapter 1.jpg').
@@ -166,6 +166,13 @@ Output: 'Chapter One.jpg', 'Chapter One_1.png', 'Chapter One_2.gif'.""",
"manga_style_original_file_tooltip":"Files attempt to keep their original filenames.\n\n- An optional prefix can be entered in the input field that appears next to this button.\n\nExample (with prefix 'MySeries'): 'MySeries_OriginalFile.jpg'.\nExample (no prefix): 'OriginalFile.jpg'.", "manga_style_original_file_tooltip":"Files attempt to keep their original filenames.\n\n- An optional prefix can be entered in the input field that appears next to this button.\n\nExample (with prefix 'MySeries'): 'MySeries_OriginalFile.jpg'.\nExample (no prefix): 'OriginalFile.jpg'.",
"manga_style_date_based_tooltip":"Files are named sequentially (e.g., 001.ext, 002.ext) based on post publication order.\n\n- An optional prefix can be entered in the input field that appears next to this button.\n- Multithreading for post processing is disabled for this style to ensure correct numbering.\n\nExample (with prefix 'MyComic'): 'MyComic_001.jpg', 'MyComic_002.png'.\nExample (no prefix): '001.jpg', '002.png'.", "manga_style_date_based_tooltip":"Files are named sequentially (e.g., 001.ext, 002.ext) based on post publication order.\n\n- An optional prefix can be entered in the input field that appears next to this button.\n- Multithreading for post processing is disabled for this style to ensure correct numbering.\n\nExample (with prefix 'MyComic'): 'MyComic_001.jpg', 'MyComic_002.png'.\nExample (no prefix): '001.jpg', '002.png'.",
"manga_style_title_global_num_tooltip":"Files are named with the post's title and a global sequential number across all posts.\n\n- Format: '[Cleaned Post Title]_[Global Counter].[ext]'\n- The counter (e.g., _001, _002) increments for every file downloaded in the current session.\n- Multithreading for post processing is disabled for this style to ensure correct numbering.\n\nExample: Post 'Chapter 1' (2 files) -> 'Chapter 1_001.jpg', 'Chapter 1_002.png'.\nNext post 'Chapter 2' (1 file) -> 'Chapter 2_003.jpg'.", "manga_style_title_global_num_tooltip":"Files are named with the post's title and a global sequential number across all posts.\n\n- Format: '[Cleaned Post Title]_[Global Counter].[ext]'\n- The counter (e.g., _001, _002) increments for every file downloaded in the current session.\n- Multithreading for post processing is disabled for this style to ensure correct numbering.\n\nExample: Post 'Chapter 1' (2 files) -> 'Chapter 1_001.jpg', 'Chapter 1_002.png'.\nNext post 'Chapter 2' (1 file) -> 'Chapter 2_003.jpg'.",
"manga_style_date_post_title_tooltip":"""Files are named using the post's publication date and its title.
- Format: '[YYYY-MM-DD]_[Cleaned Post Title].[ext]'
- The date is taken from the post's 'published' or 'added' field.
- If a post has multiple files, subsequent files (after the first) get a numeric suffix like '_1', '_2'.
Example: Post 'Chapter One' (published 2023-01-15, 2 files: a.jpg, b.png)
Output: '2023-01-15_ChapterOne.jpg', '2023-01-15_ChapterOne_1.png'""",
"manga_style_unknown_tooltip":"The manga filename style is currently unknown. This is unexpected. Please cycle to a valid style.", "manga_style_unknown_tooltip":"The manga filename style is currently unknown. This is unexpected. Please cycle to a valid style.",
"manga_style_cycle_tooltip_suffix":"Click to cycle to the next style.", "manga_style_cycle_tooltip_suffix":"Click to cycle to the next style.",
"fav_artists_dialog_title":"Favorite Artists", "fav_artists_dialog_title":"Favorite Artists",
@@ -279,6 +286,9 @@ Output: 'Chapter One.jpg', 'Chapter One_1.png', 'Chapter One_2.gif'.""",
"new_char_input_tooltip_text":"Enter a new show, game, or character name to add to the list above.", "new_char_input_tooltip_text":"Enter a new show, game, or character name to add to the list above.",
"link_search_input_placeholder_text":"Search Links...", "link_search_input_placeholder_text":"Search Links...",
"link_search_input_tooltip_text":"When in 'Only Links' mode, type here to filter the displayed links by text, URL, or platform.", "link_search_input_tooltip_text":"When in 'Only Links' mode, type here to filter the displayed links by text, URL, or platform.",
"manga_date_title_suffix_input_placeholder_text":"Suffix (replaces title)",
"manga_date_title_suffix_input_tooltip_text":"Optional suffix for 'Date + Title' style.\nIf provided, this text will be used instead of the post title.\nExample: 'My Series Vol 1'",
"history_button_tooltip_text":"View download history (Not Implemented Yet)",
"manga_date_prefix_input_placeholder_text":"Prefix for Manga Filenames", "manga_date_prefix_input_placeholder_text":"Prefix for Manga Filenames",
"manga_date_prefix_input_tooltip_text":"Optional prefix for 'Date Based' or 'Original File' manga filenames (e.g., 'Series Name').\nIf empty, files will be named based on the style without a prefix.", "manga_date_prefix_input_tooltip_text":"Optional prefix for 'Date Based' or 'Original File' manga filenames (e.g., 'Series Name').\nIf empty, files will be named based on the style without a prefix.",
"log_display_mode_links_view_text":"🔗 Links View", "log_display_mode_links_view_text":"🔗 Links View",
@@ -286,6 +296,22 @@ Output: 'Chapter One.jpg', 'Chapter One_1.png', 'Chapter One_2.gif'.""",
"download_external_links_dialog_title":"Download Selected External Links", "download_external_links_dialog_title":"Download Selected External Links",
"select_all_button_text":"Select All", "select_all_button_text":"Select All",
"deselect_all_button_text":"Deselect All", "deselect_all_button_text":"Deselect All",
"deselect_all_button_text":"Deselect All", # Existing, but good to have for context
"settings_download_group_title": "Download Settings",
"settings_save_path_button": "Save Current Download Path",
"settings_save_path_tooltip": "Save the current 'Download Location' from the main window for future sessions.",
"settings_save_path_success_title": "Path Saved",
"settings_save_path_success_message": "Download location '{path}' saved successfully.",
"settings_save_path_invalid_title": "Invalid Path",
"settings_save_path_invalid_message": "The path '{path}' is not a valid directory. Please select a valid directory first.",
"settings_save_path_empty_title": "Empty Path",
"settings_save_path_empty_message": "Download location cannot be empty. Please select a path first.",
"settings_save_all_settings_button_text": "Save All Settings",
"settings_save_all_settings_button_tooltip": "Save all current application settings (download path, checkboxes, inputs, etc.).",
"settings_all_saved_success_title": "Settings Saved",
"settings_all_saved_success_message": "All application settings saved successfully.",
"settings_all_saved_error_title": "Save Error",
"settings_all_saved_error_message": "Could not save all application settings. Check the log for details.",
"cookie_browse_button_tooltip":"Browse for a cookie file (Netscape format, typically cookies.txt).\nThis will be used if 'Use Cookie' is checked and the text field above is empty." "cookie_browse_button_tooltip":"Browse for a cookie file (Netscape format, typically cookies.txt).\nThis will be used if 'Use Cookie' is checked and the text field above is empty."
, ,
"page_range_label_text":"Page Range:", "page_range_label_text":"Page Range:",
@@ -307,6 +333,14 @@ Output: 'Chapter One.jpg', 'Chapter One_1.png', 'Chapter One_2.gif'.""",
"cookie_help_dialog_title":"Cookie File Instructions", "cookie_help_dialog_title":"Cookie File Instructions",
"cookie_help_instruction_intro":"<p>To use cookies, you typically need a <b>cookies.txt</b> file from your browser.</p>", "cookie_help_instruction_intro":"<p>To use cookies, you typically need a <b>cookies.txt</b> file from your browser.</p>",
"cookie_help_how_to_get_title":"<p><b>How to get cookies.txt:</b></p>", "cookie_help_how_to_get_title":"<p><b>How to get cookies.txt:</b></p>",
"download_history_dialog_title_first_processed": "First Processed Files History",
"first_files_processed_header": "First {count} Files Processed in this Session:",
"history_file_label": "File:",
"history_from_post_label": "From Post:",
"history_post_uploaded_label": "Post Uploaded:",
"history_file_downloaded_label": "File Downloaded:",
"download_history_dialog_title_empty": "Download History (Empty)",
"no_download_history_header": "No Downloads Yet",
"cookie_help_step1_extension_intro":"<li>Install the 'Get cookies.txt LOCALLY' extension for your Chrome-based browser:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY on Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Install the 'Get cookies.txt LOCALLY' extension for your Chrome-based browser:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY on Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Go to the website (e.g., kemono.su or coomer.su) and log in if necessary.</li>", "cookie_help_step2_login":"<li>Go to the website (e.g., kemono.su or coomer.su) and log in if necessary.</li>",
"cookie_help_step3_click_icon":"<li>Click the extension's icon in your browser toolbar.</li>", "cookie_help_step3_click_icon":"<li>Click the extension's icon in your browser toolbar.</li>",
@@ -827,6 +861,14 @@ Output: 'Chapter One.jpg', 'Chapter One_1.png', 'Chapter One_2.gif'.""",
"cookie_help_dialog_title":"Cookieファイルの説明", "cookie_help_dialog_title":"Cookieファイルの説明",
"cookie_help_instruction_intro":"<p>Cookieを使用するには、通常ブラウザから<b>cookies.txt</b>ファイルが必要です。</p>", "cookie_help_instruction_intro":"<p>Cookieを使用するには、通常ブラウザから<b>cookies.txt</b>ファイルが必要です。</p>",
"cookie_help_how_to_get_title":"<p><b>cookies.txtの入手方法:</b></p>", "cookie_help_how_to_get_title":"<p><b>cookies.txtの入手方法:</b></p>",
"download_history_dialog_title_first_processed": "最初に処理されたファイルの履歴",
"first_files_processed_header": "このセッションで最初に処理された {count} 個のファイル:",
"history_file_label": "ファイル:",
"history_from_post_label": "投稿元:",
"history_post_uploaded_label": "投稿アップロード日時:",
"history_file_downloaded_label": "ファイルダウンロード日時:",
"download_history_dialog_title_empty": "ダウンロード履歴 (空)",
"no_download_history_header": "まだダウンロードがありません",
"cookie_help_step1_extension_intro":"<li>Chromeベースのブラウザに「Get cookies.txt LOCALLY」拡張機能をインストールします:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">ChromeウェブストアでGet cookies.txt LOCALLYを入手</a></li>", "cookie_help_step1_extension_intro":"<li>Chromeベースのブラウザに「Get cookies.txt LOCALLY」拡張機能をインストールします:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">ChromeウェブストアでGet cookies.txt LOCALLYを入手</a></li>",
"cookie_help_step2_login":"<li>ウェブサイト(例: kemono.suまたはcoomer.suにアクセスし、必要に応じてログインします。</li>", "cookie_help_step2_login":"<li>ウェブサイト(例: kemono.suまたはcoomer.suにアクセスし、必要に応じてログインします。</li>",
"cookie_help_step3_click_icon":"<li>ブラウザのツールバーにある拡張機能のアイコンをクリックします。</li>", "cookie_help_step3_click_icon":"<li>ブラウザのツールバーにある拡張機能のアイコンをクリックします。</li>",
@@ -1326,6 +1368,14 @@ Sortie : 'Chapitre Un.jpg', 'Chapitre Un_1.png', 'Chapitre Un_2.gif'.""",
"cookie_help_dialog_title":"Instructions pour le fichier de cookies", "cookie_help_dialog_title":"Instructions pour le fichier de cookies",
"cookie_help_instruction_intro":"<p>Pour utiliser les cookies, vous avez généralement besoin d'un fichier <b>cookies.txt</b> de votre navigateur.</p>", "cookie_help_instruction_intro":"<p>Pour utiliser les cookies, vous avez généralement besoin d'un fichier <b>cookies.txt</b> de votre navigateur.</p>",
"cookie_help_how_to_get_title":"<p><b>Comment obtenir cookies.txt :</b></p>", "cookie_help_how_to_get_title":"<p><b>Comment obtenir cookies.txt :</b></p>",
"download_history_dialog_title_first_processed": "Historique des premiers fichiers traités",
"first_files_processed_header": "{count} premiers fichiers traités dans cette session :",
"history_file_label": "Fichier :",
"history_from_post_label": "De la publication :",
"history_post_uploaded_label": "Publication téléversée le :",
"history_file_downloaded_label": "Fichier téléchargé le :",
"download_history_dialog_title_empty": "Historique des téléchargements (Vide)",
"no_download_history_header": "Aucun téléchargement pour le moment",
"cookie_help_step1_extension_intro":"<li>Installez l'extension 'Get cookies.txt LOCALLY' pour votre navigateur basé sur Chrome :<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY sur le Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Installez l'extension 'Get cookies.txt LOCALLY' pour votre navigateur basé sur Chrome :<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY sur le Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Allez sur le site web (ex., kemono.su ou coomer.su) et connectez-vous si nécessaire.</li>", "cookie_help_step2_login":"<li>Allez sur le site web (ex., kemono.su ou coomer.su) et connectez-vous si nécessaire.</li>",
"cookie_help_step3_click_icon":"<li>Cliquez sur l'icône de l'extension dans la barre d'outils de votre navigateur.</li>", "cookie_help_step3_click_icon":"<li>Cliquez sur l'icône de l'extension dans la barre d'outils de votre navigateur.</li>",
@@ -1406,7 +1456,7 @@ translations ["en"].update ({
"creator_popup_add_selected_button": "Add Selected", # Already exists, but good to confirm "creator_popup_add_selected_button": "Add Selected", # Already exists, but good to confirm
"fetch_posts_button_text": "Fetch Posts", "fetch_posts_button_text": "Fetch Posts",
"creator_popup_scope_characters_button": "Scope: Characters", "creator_popup_scope_characters_button": "Scope: Characters",
"creator_popup_title_fetching": "Beiträge des Erstellers", "creator_popup_title_fetching": "Creator Selection",
"creator_popup_posts_area_title": "Abgerufene Beiträge", "creator_popup_posts_area_title": "Abgerufene Beiträge",
"creator_popup_posts_search_placeholder": "Abgerufene Beiträge nach Titel suchen...", "creator_popup_posts_search_placeholder": "Abgerufene Beiträge nach Titel suchen...",
"no_posts_fetched_yet_status": "Noch keine Beiträge abgerufen.", "no_posts_fetched_yet_status": "Noch keine Beiträge abgerufen.",
@@ -2365,6 +2415,14 @@ translations ["zh_CN"].update ({
"cookie_help_dialog_title":"Cookie 文件说明", "cookie_help_dialog_title":"Cookie 文件说明",
"cookie_help_instruction_intro":"<p>要使用 cookie您通常需要浏览器中的 <b>cookies.txt</b> 文件。</p>", "cookie_help_instruction_intro":"<p>要使用 cookie您通常需要浏览器中的 <b>cookies.txt</b> 文件。</p>",
"cookie_help_how_to_get_title":"<p><b>如何获取 cookies.txt</b></p>", "cookie_help_how_to_get_title":"<p><b>如何获取 cookies.txt</b></p>",
"download_history_dialog_title_first_processed": "首次处理文件历史记录",
"first_files_processed_header": "此会话中首次处理的 {count} 个文件:",
"history_file_label": "文件:",
"history_from_post_label": "来自帖子:",
"history_post_uploaded_label": "帖子上传于:",
"history_file_downloaded_label": "文件下载于:",
"download_history_dialog_title_empty": "下载历史记录(空)",
"no_download_history_header": "尚无下载",
"cookie_help_step1_extension_intro":"<li>为您的基于 Chrome 的浏览器安装“Get cookies.txt LOCALLY”扩展程序<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">在 Chrome 网上应用店获取 Get cookies.txt LOCALLY</a></li>", "cookie_help_step1_extension_intro":"<li>为您的基于 Chrome 的浏览器安装“Get cookies.txt LOCALLY”扩展程序<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">在 Chrome 网上应用店获取 Get cookies.txt LOCALLY</a></li>",
"cookie_help_step2_login":"<li>转到网站例如kemono.su 或 coomer.su并根据需要登录。</li>", "cookie_help_step2_login":"<li>转到网站例如kemono.su 或 coomer.su并根据需要登录。</li>",
"cookie_help_step3_click_icon":"<li>单击浏览器工具栏中的扩展程序图标。</li>", "cookie_help_step3_click_icon":"<li>单击浏览器工具栏中的扩展程序图标。</li>",
@@ -2716,6 +2774,14 @@ translations ["ru"].update ({
"cookie_help_dialog_title":"Инструкции по файлу cookie", "cookie_help_dialog_title":"Инструкции по файлу cookie",
"cookie_help_instruction_intro":"<p>Для использования файлов cookie обычно требуется файл <b>cookies.txt</b> из вашего браузера.</p>", "cookie_help_instruction_intro":"<p>Для использования файлов cookie обычно требуется файл <b>cookies.txt</b> из вашего браузера.</p>",
"cookie_help_how_to_get_title":"<p><b>Как получить cookies.txt:</b></p>", "cookie_help_how_to_get_title":"<p><b>Как получить cookies.txt:</b></p>",
"download_history_dialog_title_first_processed": "История первых обработанных файлов",
"first_files_processed_header": "Первые {count} файлов, обработанных в этой сессии:",
"history_file_label": "Файл:",
"history_from_post_label": "Из поста:",
"history_post_uploaded_label": "Пост загружен:",
"history_file_downloaded_label": "Файл скачан:",
"download_history_dialog_title_empty": "История загрузок (пусто)",
"no_download_history_header": "Загрузок пока нет",
"cookie_help_step1_extension_intro":"<li>Установите расширение 'Get cookies.txt LOCALLY' для вашего браузера на основе Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Получить cookies.txt LOCALLY в Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Установите расширение 'Get cookies.txt LOCALLY' для вашего браузера на основе Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Получить cookies.txt LOCALLY в Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Перейдите на веб-сайт (например, kemono.su или coomer.su) и при необходимости войдите в систему.</li>", "cookie_help_step2_login":"<li>Перейдите на веб-сайт (например, kemono.su или coomer.su) и при необходимости войдите в систему.</li>",
"cookie_help_step3_click_icon":"<li>Нажмите на значок расширения на панели инструментов вашего браузера.</li>", "cookie_help_step3_click_icon":"<li>Нажмите на значок расширения на панели инструментов вашего браузера.</li>",
@@ -3066,6 +3132,14 @@ translations ["ko"].update ({
"cookie_help_dialog_title":"쿠키 파일 지침", "cookie_help_dialog_title":"쿠키 파일 지침",
"cookie_help_instruction_intro":"<p>쿠키를 사용하려면 일반적으로 브라우저에서 <b>cookies.txt</b> 파일이 필요합니다.</p>", "cookie_help_instruction_intro":"<p>쿠키를 사용하려면 일반적으로 브라우저에서 <b>cookies.txt</b> 파일이 필요합니다.</p>",
"cookie_help_how_to_get_title":"<p><b>cookies.txt를 얻는 방법:</b></p>", "cookie_help_how_to_get_title":"<p><b>cookies.txt를 얻는 방법:</b></p>",
"download_history_dialog_title_first_processed": "처음 처리된 파일 기록",
"first_files_processed_header": "이 세션에서 처음 처리된 {count}개 파일:",
"history_file_label": "파일:",
"history_from_post_label": "게시물 출처:",
"history_post_uploaded_label": "게시물 업로드 날짜:",
"history_file_downloaded_label": "파일 다운로드 날짜:",
"download_history_dialog_title_empty": "다운로드 기록 (비어 있음)",
"no_download_history_header": "아직 다운로드 없음",
"cookie_help_step1_extension_intro":"<li>Chrome 기반 브라우저용 'Get cookies.txt LOCALLY' 확장 프로그램을 설치하십시오:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Chrome 웹 스토어에서 Get cookies.txt LOCALLY 받기</a></li>", "cookie_help_step1_extension_intro":"<li>Chrome 기반 브라우저용 'Get cookies.txt LOCALLY' 확장 프로그램을 설치하십시오:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Chrome 웹 스토어에서 Get cookies.txt LOCALLY 받기</a></li>",
"cookie_help_step2_login":"<li>웹사이트(예: kemono.su 또는 coomer.su)로 이동하여 필요한 경우 로그인하십시오.</li>", "cookie_help_step2_login":"<li>웹사이트(예: kemono.su 또는 coomer.su)로 이동하여 필요한 경우 로그인하십시오.</li>",
"cookie_help_step3_click_icon":"<li>브라우저 도구 모음에서 확장 프로그램 아이콘을 클릭하십시오.</li>", "cookie_help_step3_click_icon":"<li>브라우저 도구 모음에서 확장 프로그램 아이콘을 클릭하십시오.</li>",
@@ -3416,6 +3490,14 @@ Salida: 'Capítulo Uno.jpg', 'Capítulo Uno_1.png', 'Capítulo Uno_2.gif'.""",
"cookie_help_dialog_title":"Instrucciones del archivo de cookies", "cookie_help_dialog_title":"Instrucciones del archivo de cookies",
"cookie_help_instruction_intro":"<p>Para usar cookies, normalmente necesita un archivo <b>cookies.txt</b> de su navegador.</p>", "cookie_help_instruction_intro":"<p>Para usar cookies, normalmente necesita un archivo <b>cookies.txt</b> de su navegador.</p>",
"cookie_help_how_to_get_title":"<p><b>Cómo obtener cookies.txt:</b></p>", "cookie_help_how_to_get_title":"<p><b>Cómo obtener cookies.txt:</b></p>",
"download_history_dialog_title_first_processed": "Historial de los primeros archivos procesados",
"first_files_processed_header": "Primeros {count} archivos procesados en esta sesión:",
"history_file_label": "Archivo:",
"history_from_post_label": "De la publicación:",
"history_post_uploaded_label": "Publicación subida el:",
"history_file_downloaded_label": "Archivo descargado el:",
"download_history_dialog_title_empty": "Historial de descargas (Vacío)",
"no_download_history_header": "Aún no hay descargas",
"cookie_help_step1_extension_intro":"<li>Instale la extensión 'Get cookies.txt LOCALLY' para su navegador basado en Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Obtener cookies.txt LOCALLY en Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Instale la extensión 'Get cookies.txt LOCALLY' para su navegador basado en Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Obtener cookies.txt LOCALLY en Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Vaya al sitio web (p. ej., kemono.su o coomer.su) e inicie sesión si es necesario.</li>", "cookie_help_step2_login":"<li>Vaya al sitio web (p. ej., kemono.su o coomer.su) e inicie sesión si es necesario.</li>",
"cookie_help_step3_click_icon":"<li>Haga clic en el icono de la extensión en la barra de herramientas de su navegador.</li>", "cookie_help_step3_click_icon":"<li>Haga clic en el icono de la extensión en la barra de herramientas de su navegador.</li>",
@@ -3778,6 +3860,14 @@ Ausgabe: 'Kapitel Eins.jpg', 'Kapitel Eins_1.png', 'Kapitel Eins_2.gif'.""",
"cookie_help_dialog_title":"Anweisungen zur Cookie-Datei", "cookie_help_dialog_title":"Anweisungen zur Cookie-Datei",
"cookie_help_instruction_intro":"<p>Um Cookies zu verwenden, benötigen Sie normalerweise eine <b>cookies.txt</b>-Datei aus Ihrem Browser.</p>", "cookie_help_instruction_intro":"<p>Um Cookies zu verwenden, benötigen Sie normalerweise eine <b>cookies.txt</b>-Datei aus Ihrem Browser.</p>",
"cookie_help_how_to_get_title":"<p><b>So erhalten Sie cookies.txt:</b></p>", "cookie_help_how_to_get_title":"<p><b>So erhalten Sie cookies.txt:</b></p>",
"download_history_dialog_title_first_processed": "Verlauf der zuerst verarbeiteten Dateien",
"first_files_processed_header": "Erste {count} in dieser Sitzung verarbeitete Dateien:",
"history_file_label": "Datei:",
"history_from_post_label": "Aus Beitrag:",
"history_post_uploaded_label": "Beitrag hochgeladen am:",
"history_file_downloaded_label": "Datei heruntergeladen am:",
"download_history_dialog_title_empty": "Download-Verlauf (Leer)",
"no_download_history_header": "Noch keine Downloads",
"cookie_help_step1_extension_intro":"<li>Installieren Sie die Erweiterung 'Get cookies.txt LOCALLY' für Ihren Chrome-basierten Browser:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY im Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Installieren Sie die Erweiterung 'Get cookies.txt LOCALLY' für Ihren Chrome-basierten Browser:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Get cookies.txt LOCALLY im Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Gehen Sie zur Website (z. B. kemono.su oder coomer.su) und melden Sie sich bei Bedarf an.</li>", "cookie_help_step2_login":"<li>Gehen Sie zur Website (z. B. kemono.su oder coomer.su) und melden Sie sich bei Bedarf an.</li>",
"cookie_help_step3_click_icon":"<li>Klicken Sie auf das Erweiterungssymbol in Ihrer Browser-Symbolleiste.</li>", "cookie_help_step3_click_icon":"<li>Klicken Sie auf das Erweiterungssymbol in Ihrer Browser-Symbolleiste.</li>",
@@ -4140,6 +4230,14 @@ Saída: 'Capítulo Um.jpg', 'Capítulo Um_1.png', 'Capítulo Um_2.gif'.""",
"cookie_help_dialog_title":"Instruções do Arquivo de Cookie", "cookie_help_dialog_title":"Instruções do Arquivo de Cookie",
"cookie_help_instruction_intro":"<p>Para usar cookies, você normalmente precisa de um arquivo <b>cookies.txt</b> do seu navegador.</p>", "cookie_help_instruction_intro":"<p>Para usar cookies, você normalmente precisa de um arquivo <b>cookies.txt</b> do seu navegador.</p>",
"cookie_help_how_to_get_title":"<p><b>Como obter o cookies.txt:</b></p>", "cookie_help_how_to_get_title":"<p><b>Como obter o cookies.txt:</b></p>",
"download_history_dialog_title_first_processed": "Histórico dos Primeiros Arquivos Processados",
"first_files_processed_header": "Primeiros {count} Arquivos Processados nesta Sessão:",
"history_file_label": "Arquivo:",
"history_from_post_label": "Da Publicação:",
"history_post_uploaded_label": "Publicação Enviada em:",
"history_file_downloaded_label": "Arquivo Baixado em:",
"download_history_dialog_title_empty": "Histórico de Downloads (Vazio)",
"no_download_history_header": "Nenhum Download Ainda",
"cookie_help_step1_extension_intro":"<li>Instale a extensão 'Get cookies.txt LOCALLY' para seu navegador baseado em Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Obter Get cookies.txt LOCALLY na Chrome Web Store</a></li>", "cookie_help_step1_extension_intro":"<li>Instale a extensão 'Get cookies.txt LOCALLY' para seu navegador baseado em Chrome:<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">Obter Get cookies.txt LOCALLY na Chrome Web Store</a></li>",
"cookie_help_step2_login":"<li>Vá para o site (ex: kemono.su ou coomer.su) e faça login, se necessário.</li>", "cookie_help_step2_login":"<li>Vá para o site (ex: kemono.su ou coomer.su) e faça login, se necessário.</li>",
"cookie_help_step3_click_icon":"<li>Clique no ícone da extensão na barra de ferramentas do seu navegador.</li>", "cookie_help_step3_click_icon":"<li>Clique no ícone da extensão na barra de ferramentas do seu navegador.</li>",
@@ -4492,6 +4590,14 @@ translations ["zh_TW"].update ({
"cookie_help_dialog_title":"Cookie 檔案說明", "cookie_help_dialog_title":"Cookie 檔案說明",
"cookie_help_instruction_intro":"<p>要使用 cookie您通常需要瀏覽器中的 <b>cookies.txt</b> 檔案。</p>", "cookie_help_instruction_intro":"<p>要使用 cookie您通常需要瀏覽器中的 <b>cookies.txt</b> 檔案。</p>",
"cookie_help_how_to_get_title":"<p><b>如何取得 cookies.txt</b></p>", "cookie_help_how_to_get_title":"<p><b>如何取得 cookies.txt</b></p>",
"download_history_dialog_title_first_processed": "最初處理的檔案歷史記錄",
"first_files_processed_header": "此工作階段中最初處理的 {count} 個檔案:",
"history_file_label": "檔案:",
"history_from_post_label": "來自貼文:",
"history_post_uploaded_label": "貼文上傳於:",
"history_file_downloaded_label": "檔案下載於:",
"download_history_dialog_title_empty": "下載歷史記錄(空)",
"no_download_history_header": "尚無下載",
"cookie_help_step1_extension_intro":"<li>為您的 Chrome 瀏覽器安裝「Get cookies.txt LOCALLY」擴充功能<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">在 Chrome 線上應用程式商店取得 Get cookies.txt LOCALLY</a></li>", "cookie_help_step1_extension_intro":"<li>為您的 Chrome 瀏覽器安裝「Get cookies.txt LOCALLY」擴充功能<br><a href=\"https://chromewebstore.google.com/detail/get-cookiestxt-locally/cclelndahbckbenkjhflpdbgdldlbecc\" style=\"color: #87CEEB;\">在 Chrome 線上應用程式商店取得 Get cookies.txt LOCALLY</a></li>",
"cookie_help_step2_login":"<li>前往網站(例如 kemono.su 或 coomer.su並在需要時登入。</li>", "cookie_help_step2_login":"<li>前往網站(例如 kemono.su 或 coomer.su並在需要時登入。</li>",
"cookie_help_step3_click_icon":"<li>點擊瀏覽器工具列中的擴充功能圖示。</li>", "cookie_help_step3_click_icon":"<li>點擊瀏覽器工具列中的擴充功能圖示。</li>",

2066
main.py

File diff suppressed because it is too large Load Diff

View File

@@ -1,4 +1,4 @@
<h1 align="center">Kemono Downloader v5.2.0</h1> <h1 align="center">Kemono Downloader v5.3.0</h1>
<table align="center"> <table align="center">
<tr> <tr>
@@ -80,6 +80,20 @@ Kemono Downloader offers a range of features to streamline your content download
--- ---
## ✨ What's New in v5.3.0
- **Multi-Creator Post Fetching & Queuing:**
- The **Creator Selection popup** (🎨 icon) has been significantly enhanced.
- After selecting multiple creators, you can now click a new "**Fetch Posts**" button.
- This will retrieve and display posts from all selected creators in a new view within the popup.
- You can then browse these fetched posts (with search functionality) and select individual posts.
- A new "**Add Selected Posts to Queue**" button allows you to add your chosen posts directly to the main download queue, streamlining the process of gathering content from multiple artists.
- The traditional "**Add Selected to URL**" button is still available if you prefer to populate the main URL field with creator names.
- **Improved Favorite Download Queue Handling:**
- When items are added to the download queue from the Creator Selection popup, the main URL input field will now display a placeholder message (e.g., "{count} items in queue from popup").
- The queue is now more robustly managed, especially when interacting with the main URL input field after items have been queued from the popup.
---
## ✨ What's New in v5.1.0 ## ✨ What's New in v5.1.0
- **Enhanced Error File Management**: The "Error" button now opens a dialog listing files that failed to download. This dialog includes: - **Enhanced Error File Management**: The "Error" button now opens a dialog listing files that failed to download. This dialog includes:
- An option to **retry selected** failed downloads. - An option to **retry selected** failed downloads.