diff --git a/data/dejavu-sans/DejaVuSans-Bold.pkl b/data/dejavu-sans/DejaVuSans-Bold.pkl
new file mode 100644
index 0000000..dff76a0
Binary files /dev/null and b/data/dejavu-sans/DejaVuSans-Bold.pkl differ
diff --git a/data/dejavu-sans/DejaVuSans.cw127.pkl b/data/dejavu-sans/DejaVuSans.cw127.pkl
new file mode 100644
index 0000000..57fc448
Binary files /dev/null and b/data/dejavu-sans/DejaVuSans.cw127.pkl differ
diff --git a/data/dejavu-sans/DejaVuSans.pkl b/data/dejavu-sans/DejaVuSans.pkl
new file mode 100644
index 0000000..3ab2c06
Binary files /dev/null and b/data/dejavu-sans/DejaVuSans.pkl differ
diff --git a/src/core/api_client.py b/src/core/api_client.py
index c192680..271824a 100644
--- a/src/core/api_client.py
+++ b/src/core/api_client.py
@@ -4,6 +4,10 @@ from urllib.parse import urlparse
import json
import requests
import cloudscraper
+import ssl
+from requests.adapters import HTTPAdapter
+from urllib3.poolmanager import PoolManager
+
from ..utils.network_utils import extract_post_info, prepare_cookies_for_request
from ..config.constants import (
STYLE_DATE_POST_TITLE,
@@ -11,6 +15,24 @@ from ..config.constants import (
STYLE_POST_TITLE_GLOBAL_NUMBERING
)
+# --- NEW: Custom Adapter to fix SSL errors ---
+class CustomSSLAdapter(HTTPAdapter):
+ """
+ A custom HTTPAdapter that forces check_hostname=False when using SSL.
+ This prevents the 'Cannot set verify_mode to CERT_NONE' error.
+ """
+ def init_poolmanager(self, connections, maxsize, block=False):
+ ctx = ssl.create_default_context()
+ # Crucial: Disable hostname checking FIRST, then set verify mode
+ ctx.check_hostname = False
+ ctx.verify_mode = ssl.CERT_NONE
+
+ self.poolmanager = PoolManager(
+ num_pools=connections,
+ maxsize=maxsize,
+ block=block,
+ ssl_context=ctx
+ )
def fetch_posts_paginated(api_url_base, headers, offset, logger, cancellation_event=None, pause_event=None, cookies_dict=None, proxies=None):
"""
@@ -87,36 +109,62 @@ def fetch_posts_paginated(api_url_base, headers, offset, logger, cancellation_ev
def fetch_single_post_data(api_domain, service, user_id, post_id, headers, logger, cookies_dict=None, proxies=None):
"""
Fetches the full data, including the 'content' field, for a single post using cloudscraper.
+ Includes RETRY logic for 429 Rate Limit errors.
"""
post_api_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}/post/{post_id}"
logger(f" Fetching full content for post ID {post_id}...")
- # FIX: Ensure scraper session is closed after use
- scraper = None
- try:
- scraper = cloudscraper.create_scraper()
- # Keep the 300s read timeout for both, but increase connect timeout for proxies
- request_timeout = (30, 300) if proxies else (15, 300)
+ # Retry settings
+ max_retries = 4
+
+ for attempt in range(max_retries + 1):
+ scraper = None
+ try:
+ scraper = cloudscraper.create_scraper()
+
+ # Mount custom SSL adapter
+ adapter = CustomSSLAdapter()
+ scraper.mount("https://", adapter)
+
+ request_timeout = (30, 300) if proxies else (15, 300)
+
+ response = scraper.get(post_api_url, headers=headers, timeout=request_timeout, cookies=cookies_dict, proxies=proxies, verify=False)
- response = scraper.get(post_api_url, headers=headers, timeout=request_timeout, cookies=cookies_dict, proxies=proxies, verify=False)
-
- response.raise_for_status()
+ # --- FIX: Handle 429 Rate Limit explicitly ---
+ if response.status_code == 429:
+ wait_time = 20 + (attempt * 10) # 20s, 30s, 40s...
+ logger(f" ⚠️ Rate Limited (429) on post {post_id}. Waiting {wait_time} seconds before retrying...")
+ time.sleep(wait_time)
+ continue # Try loop again
+ # ---------------------------------------------
- full_post_data = response.json()
+ response.raise_for_status()
- if isinstance(full_post_data, list) and full_post_data:
- return full_post_data[0]
- if isinstance(full_post_data, dict) and 'post' in full_post_data:
- return full_post_data['post']
- return full_post_data
+ full_post_data = response.json()
- except Exception as e:
- logger(f" ❌ Failed to fetch full content for post {post_id}: {e}")
- return None
- finally:
- if scraper:
- scraper.close()
+ if isinstance(full_post_data, list) and full_post_data:
+ return full_post_data[0]
+ if isinstance(full_post_data, dict) and 'post' in full_post_data:
+ return full_post_data['post']
+ return full_post_data
+ except Exception as e:
+ # Catch "Too Many Requests" if it wasn't caught by status_code check above
+ if "429" in str(e) or "Too Many Requests" in str(e):
+ if attempt < max_retries:
+ wait_time = 20 + (attempt * 10)
+ logger(f" ⚠️ Rate Limit Error caught: {e}. Waiting {wait_time}s...")
+ time.sleep(wait_time)
+ continue
+
+ # Only log error if this was the last attempt
+ if attempt == max_retries:
+ logger(f" ❌ Failed to fetch full content for post {post_id} after {max_retries} retries: {e}")
+ return None
+ finally:
+ if scraper:
+ scraper.close()
+ return None
def fetch_post_comments(api_domain, service, user_id, post_id, headers, logger, cancellation_event=None, pause_event=None, cookies_dict=None, proxies=None):
"""Fetches all comments for a specific post."""
diff --git a/src/core/hentaifox.txt b/src/core/hentaifox.txt
new file mode 100644
index 0000000..e69de29
diff --git a/src/core/hentaifox_client.py b/src/core/hentaifox_client.py
new file mode 100644
index 0000000..0d6766b
--- /dev/null
+++ b/src/core/hentaifox_client.py
@@ -0,0 +1,60 @@
+import requests
+import re
+from bs4 import BeautifulSoup # Optional, but regex is faster for this specific site
+
+# Logic derived from NHdownloader.sh 'hentaifox' function
+BASE_URL = "https://hentaifox.com"
+HEADERS = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
+ "Referer": "https://hentaifox.com/",
+ "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8"
+}
+
+def get_gallery_id(url_or_id):
+ """Extracts numbers from URL or returns the ID string."""
+ match = re.search(r"(\d+)", str(url_or_id))
+ return match.group(1) if match else None
+
+def get_gallery_metadata(gallery_id):
+ """
+ Fetches the main gallery page to get the Title and Total Pages.
+ Equivalent to the first part of the 'hentaifox' function in .sh file.
+ """
+ url = f"{BASE_URL}/gallery/{gallery_id}/"
+ response = requests.get(url, headers=HEADERS)
+ response.raise_for_status()
+ html = response.text
+
+ # Extract Title (Bash: grep -o '
.*')
+ title_match = re.search(r'(.*?)', html)
+ title = title_match.group(1).replace(" - HentaiFox", "").strip() if title_match else f"Gallery {gallery_id}"
+
+ # Extract Total Pages (Bash: grep -Eo 'Pages: [0-9]*')
+ pages_match = re.search(r'Pages: (\d+)', html)
+ if not pages_match:
+ raise ValueError("Could not find total pages count.")
+
+ total_pages = int(pages_match.group(1))
+
+ return {
+ "id": gallery_id,
+ "title": title,
+ "total_pages": total_pages
+ }
+
+def get_image_link_for_page(gallery_id, page_num):
+ """
+ Fetches the specific reader page to find the actual image URL.
+ Equivalent to the loop in the 'hentaifox' function:
+ url="https://hentaifox.com/g/${id}/${i}/"
+ """
+ url = f"{BASE_URL}/g/{gallery_id}/{page_num}/"
+ response = requests.get(url, headers=HEADERS)
+
+ # Extract image source (Bash: grep -Eo 'data-src="..."')
+ # Regex looks for: data-src="https://..."
+ match = re.search(r'data-src="(https://[^"]+)"', response.text)
+
+ if match:
+ return match.group(1)
+ return None
\ No newline at end of file
diff --git a/src/core/workers.py b/src/core/workers.py
index 43fcd93..171f732 100644
--- a/src/core/workers.py
+++ b/src/core/workers.py
@@ -62,7 +62,8 @@ def robust_clean_name(name):
"""A more robust function to remove illegal characters for filenames and folders."""
if not name:
return ""
- illegal_chars_pattern = r'[\x00-\x1f<>:"/\\|?*\']'
+ # FIX: Removed \' from the list so apostrophes are kept
+ illegal_chars_pattern = r'[\x00-\x1f<>:"/\\|?*]'
cleaned_name = re.sub(illegal_chars_pattern, '', name)
cleaned_name = cleaned_name.strip(' .')
@@ -1599,12 +1600,11 @@ class PostProcessorWorker:
should_create_post_subfolder = self.use_post_subfolders
- if (not self.use_post_subfolders and self.use_subfolders and
+ if (not self.use_post_subfolders and
self.sfp_threshold is not None and num_potential_files_in_post >= self.sfp_threshold):
self.logger(f" ℹ️ Post has {num_potential_files_in_post} files (≥{self.sfp_threshold}). Activating Subfolder per Post via [sfp] command.")
should_create_post_subfolder = True
-
base_folder_names_for_post_content = []
determined_post_save_path_for_history = self.override_output_dir if self.override_output_dir else self.download_root
if not self.extract_links_only and self.use_subfolders:
@@ -2462,6 +2462,7 @@ class DownloadThread(QThread):
proxies=self.proxies
)
+ processed_count_for_delay = 0
for posts_batch_data in post_generator:
if self.isInterruptionRequested():
was_process_cancelled = True
@@ -2472,7 +2473,11 @@ class DownloadThread(QThread):
was_process_cancelled = True
break
- # --- FIX: Ensure 'proxies' is in this dictionary ---
+ processed_count_for_delay += 1
+ if processed_count_for_delay > 0 and processed_count_for_delay % 50 == 0:
+ self.logger(" ⏳ Safety Pause: Waiting 10 seconds to respect server rate limits...")
+ time.sleep(10)
+
worker_args = {
'post_data': individual_post_data,
'emitter': worker_signals_obj,
diff --git a/src/ui/classes/downloader_factory.py b/src/ui/classes/downloader_factory.py
index c24fa7f..0611f13 100644
--- a/src/ui/classes/downloader_factory.py
+++ b/src/ui/classes/downloader_factory.py
@@ -25,6 +25,7 @@ from .saint2_downloader_thread import Saint2DownloadThread
from .simp_city_downloader_thread import SimpCityDownloadThread
from .toonily_downloader_thread import ToonilyDownloadThread
from .deviantart_downloader_thread import DeviantArtDownloadThread
+from .hentaifox_downloader_thread import HentaiFoxDownloadThread
def create_downloader_thread(main_app, api_url, service, id1, id2, effective_output_dir_for_run):
"""
@@ -185,6 +186,17 @@ def create_downloader_thread(main_app, api_url, service, id1, id2, effective_out
cancellation_event=main_app.cancellation_event,
parent=main_app
)
+
+ # Handler for HentaiFox (New)
+ if 'hentaifox.com' in api_url or service == 'hentaifox':
+ main_app.log_signal.emit("🦊 HentaiFox URL detected.")
+ return HentaiFoxDownloadThread(
+ url_or_id=api_url,
+ output_dir=effective_output_dir_for_run,
+ parent=main_app
+ )
+
+
# ----------------------
# --- Fallback ---
# If no specific handler matched based on service name or URL pattern, return None.
diff --git a/src/ui/classes/hentaifox_downloader_thread.py b/src/ui/classes/hentaifox_downloader_thread.py
new file mode 100644
index 0000000..023f130
--- /dev/null
+++ b/src/ui/classes/hentaifox_downloader_thread.py
@@ -0,0 +1,136 @@
+import os
+import time
+import requests
+from PyQt5.QtCore import QThread, pyqtSignal
+from ...core.hentaifox_client import get_gallery_metadata, get_image_link_for_page, get_gallery_id
+from ...utils.file_utils import clean_folder_name
+
+class HentaiFoxDownloadThread(QThread):
+ progress_signal = pyqtSignal(str) # Log messages
+ file_progress_signal = pyqtSignal(str, object) # filename, (current_bytes, total_bytes)
+ # finished_signal: (downloaded_count, skipped_count, was_cancelled, kept_files_list)
+ finished_signal = pyqtSignal(int, int, bool, list)
+
+ def __init__(self, url_or_id, output_dir, parent=None):
+ super().__init__(parent)
+ self.gallery_id = get_gallery_id(url_or_id)
+ self.output_dir = output_dir
+ self.is_running = True
+ self.downloaded_count = 0
+ self.skipped_count = 0
+
+ def run(self):
+ try:
+ self.progress_signal.emit(f"🔍 [HentaiFox] Fetching metadata for ID: {self.gallery_id}...")
+
+ # 1. Get Info
+ try:
+ data = get_gallery_metadata(self.gallery_id)
+ except Exception as e:
+ self.progress_signal.emit(f"❌ [HentaiFox] Failed to fetch metadata: {e}")
+ self.finished_signal.emit(0, 0, False, [])
+ return
+
+ title = clean_folder_name(data['title'])
+ total_pages = data['total_pages']
+
+ # 2. Setup Folder
+ save_folder = os.path.join(self.output_dir, f"[{self.gallery_id}] {title}")
+ os.makedirs(save_folder, exist_ok=True)
+
+ self.progress_signal.emit(f"📂 Saving to: {save_folder}")
+ self.progress_signal.emit(f"📄 Found {total_pages} pages. Starting download...")
+
+ # 3. Iterate and Download
+ for i in range(1, total_pages + 1):
+ if not self.is_running:
+ self.progress_signal.emit("🛑 Download cancelled by user.")
+ break
+
+ # Fetch image link for this specific page
+ try:
+ img_url = get_image_link_for_page(self.gallery_id, i)
+
+ if img_url:
+ ext = img_url.split('.')[-1]
+ filename = f"{i:03d}.{ext}"
+ filepath = os.path.join(save_folder, filename)
+
+ # Check if exists
+ if os.path.exists(filepath):
+ self.progress_signal.emit(f"⚠️ [{i}/{total_pages}] Skipped (Exists): {filename}")
+ self.skipped_count += 1
+ else:
+ self.progress_signal.emit(f"⬇️ [{i}/{total_pages}] Downloading: {filename}")
+
+ # CALL NEW DOWNLOAD FUNCTION
+ success = self.download_image_with_progress(img_url, filepath, filename)
+
+ if success:
+ self.progress_signal.emit(f"✅ [{i}/{total_pages}] Finished: {filename}")
+ self.downloaded_count += 1
+ else:
+ self.progress_signal.emit(f"❌ [{i}/{total_pages}] Failed: {filename}")
+ self.skipped_count += 1
+ else:
+ self.progress_signal.emit(f"❌ [{i}/{total_pages}] Error: No image link found.")
+ self.skipped_count += 1
+
+ except Exception as e:
+ self.progress_signal.emit(f"❌ [{i}/{total_pages}] Exception: {e}")
+ self.skipped_count += 1
+
+ time.sleep(0.5)
+
+ # 4. Final Summary
+ summary = (
+ f"\n🏁 [HentaiFox] Task Complete!\n"
+ f" - Total: {total_pages}\n"
+ f" - Downloaded: {self.downloaded_count}\n"
+ f" - Skipped: {self.skipped_count}\n"
+ )
+ self.progress_signal.emit(summary)
+
+ except Exception as e:
+ self.progress_signal.emit(f"❌ Critical Error: {str(e)}")
+
+ self.finished_signal.emit(self.downloaded_count, self.skipped_count, not self.is_running, [])
+
+ def download_image_with_progress(self, url, path, filename):
+ """Downloads file while emitting byte-level progress signals."""
+ headers = {
+ "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36",
+ "Referer": "https://hentaifox.com/"
+ }
+
+ try:
+ # stream=True is required to get size before downloading body
+ r = requests.get(url, headers=headers, stream=True, timeout=20)
+ if r.status_code != 200:
+ return False
+
+ # Get Total Size (in bytes)
+ total_size = int(r.headers.get('content-length', 0))
+ downloaded_size = 0
+
+ chunk_size = 1024 # 1KB chunks
+
+ with open(path, 'wb') as f:
+ for chunk in r.iter_content(chunk_size):
+ if not self.is_running:
+ r.close()
+ return False
+
+ if chunk:
+ f.write(chunk)
+ downloaded_size += len(chunk)
+
+ self.file_progress_signal.emit(filename, (downloaded_size, total_size))
+
+ return True
+ except Exception as e:
+ print(f"Download Error: {e}")
+ return False
+
+ def stop(self):
+ self.is_running = False
\ No newline at end of file
diff --git a/src/ui/dialogs/SinglePDF.py b/src/ui/dialogs/SinglePDF.py
index f8cddca..d8ff49a 100644
--- a/src/ui/dialogs/SinglePDF.py
+++ b/src/ui/dialogs/SinglePDF.py
@@ -1,5 +1,7 @@
import os
import re
+import sys
+
try:
from fpdf import FPDF
FPDF_AVAILABLE = True
@@ -18,7 +20,9 @@ try:
self.set_font(self.font_family_main, '', 8)
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
-except ImportError:
+except Exception as e:
+ print(f"\n❌ DEBUG INFO: Import failed. The specific error is: {e}")
+ print(f"❌ DEBUG INFO: Python running this script is located at: {sys.executable}\n")
FPDF_AVAILABLE = False
FPDF = None
PDF = None
@@ -244,6 +248,9 @@ def create_single_pdf_from_content(posts_data, output_filename, font_path, add_i
pdf.multi_cell(w=0, h=7, txt=post.get('content', 'No Content'))
try:
+ output_dir = os.path.dirname(output_filename)
+ if output_dir and not os.path.exists(output_dir):
+ os.makedirs(output_dir, exist_ok=True)
pdf.output(output_filename)
logger(f"✅ Successfully created single PDF: '{os.path.basename(output_filename)}'")
return True
diff --git a/src/ui/main_window.py b/src/ui/main_window.py
index 5800ee9..a9b1ff5 100644
--- a/src/ui/main_window.py
+++ b/src/ui/main_window.py
@@ -106,6 +106,7 @@ from .classes.external_link_downloader_thread import ExternalLinkDownloadThread
from .classes.nhentai_downloader_thread import NhentaiDownloadThread
from .classes.downloader_factory import create_downloader_thread
from .classes.kemono_discord_downloader_thread import KemonoDiscordDownloadThread
+from .classes.hentaifox_downloader_thread import HentaiFoxDownloadThread
_ff_ver = (datetime.date.today().toordinal() - 735506) // 28
USERAGENT_FIREFOX = (f"Mozilla/5.0 (Windows NT 10.0; Win64; x64; "
@@ -309,6 +310,9 @@ class DownloaderApp (QWidget ):
self.downloaded_hash_counts_lock = threading.Lock()
self.session_temp_files = []
self.single_pdf_mode = False
+
+ self.temp_pdf_content_list = []
+ self.last_effective_download_dir = None
self.save_creator_json_enabled_this_session = True
self.date_prefix_format = self.settings.value(DATE_PREFIX_FORMAT_KEY, "YYYY-MM-DD {post}", type=str)
self.is_single_post_session = False
@@ -346,7 +350,7 @@ class DownloaderApp (QWidget ):
self.download_location_label_widget = None
self.remove_from_filename_label_widget = None
self.skip_words_label_widget = None
- self.setWindowTitle("Kemono Downloader v7.9.0")
+ self.setWindowTitle("Kemono Downloader v7.9.1")
setup_ui(self)
self._connect_signals()
if hasattr(self, 'character_input'):
@@ -3918,7 +3922,11 @@ class DownloaderApp (QWidget ):
'txt_file': 'coomer.txt',
'url_regex': r'https?://(?:www\.)?coomer\.(?:su|party|st)/[^/\s]+/user/[^/\s]+(?:/post/\d+)?/?'
},
-
+ 'hentaifox.com': {
+ 'name': 'HentaiFox',
+ 'txt_file': 'hentaifox.txt',
+ 'url_regex': r'https?://(?:www\.)?hentaifox\.com/(?:g|gallery)/\d+/?'
+ },
'allporncomic.com': {
'name': 'AllPornComic',
'txt_file': 'allporncomic.txt',
@@ -3999,7 +4007,8 @@ class DownloaderApp (QWidget ):
'toonily.com', 'toonily.me',
'hentai2read.com',
'saint2.su', 'saint2.pk',
- 'imgur.com', 'bunkr.'
+ 'imgur.com', 'bunkr.',
+ 'hentaifox.com'
]
for url in urls_to_download:
@@ -4087,6 +4096,7 @@ class DownloaderApp (QWidget ):
self._clear_stale_temp_files()
self.session_temp_files = []
+ self.temp_pdf_content_list = []
processed_post_ids_for_restore = []
manga_counters_for_restore = None
@@ -4170,6 +4180,7 @@ class DownloaderApp (QWidget ):
return False
effective_output_dir_for_run = os.path.normpath(main_ui_download_dir) if main_ui_download_dir else ""
+ self.last_effective_download_dir = effective_output_dir_for_run
if not is_restore:
self._create_initial_session_file(api_url, effective_output_dir_for_run, remaining_queue=self.favorite_download_queue)
@@ -5600,8 +5611,18 @@ class DownloaderApp (QWidget ):
permanent, history_data,
temp_filepath) = result_tuple
- if temp_filepath: self.session_temp_files.append(temp_filepath)
-
+ if temp_filepath:
+ self.session_temp_files.append(temp_filepath)
+
+ # If Single PDF mode is enabled, we need to load the data
+ # from the temp file into memory for the final aggregation.
+ if self.single_pdf_setting:
+ try:
+ with open(temp_filepath, 'r', encoding='utf-8') as f:
+ post_content_data = json.load(f)
+ self.temp_pdf_content_list.append(post_content_data)
+ except Exception as e:
+ self.log_signal.emit(f"⚠️ Error reading temp file for PDF aggregation: {e}")
with self.downloaded_files_lock:
self.download_counter += downloaded
self.skip_counter += skipped
@@ -5627,47 +5648,73 @@ class DownloaderApp (QWidget ):
self.finished_signal.emit(self.download_counter, self.skip_counter, self.cancellation_event.is_set(), self.all_kept_original_filenames)
def _trigger_single_pdf_creation(self):
- """Reads temp files, sorts them by date, then creates the single PDF."""
- self.log_signal.emit("="*40)
- self.log_signal.emit("Creating single PDF from collected text files...")
-
- posts_content_data = []
- for temp_filepath in self.session_temp_files:
- try:
- with open(temp_filepath, 'r', encoding='utf-8') as f:
- data = json.load(f)
- posts_content_data.append(data)
- except Exception as e:
- self.log_signal.emit(f" ⚠️ Could not read temp file '{temp_filepath}': {e}")
-
- if not posts_content_data:
- self.log_signal.emit(" No content was collected. Aborting PDF creation.")
+ """
+ Triggers the creation of a single PDF from collected text content in a BACKGROUND THREAD.
+ """
+ if not self.temp_pdf_content_list:
+ self.log_signal.emit("⚠️ No content collected for Single PDF.")
return
- output_dir = self.dir_input.text().strip() or QStandardPaths.writableLocation(QStandardPaths.DownloadLocation)
- default_filename = os.path.join(output_dir, "Consolidated_Content.pdf")
- filepath, _ = QFileDialog.getSaveFileName(self, "Save Single PDF", default_filename, "PDF Files (*.pdf)")
+ # 1. Sort the content
+ self.log_signal.emit(" Sorting collected content for PDF...")
+ def sort_key(post):
+ p_date = post.get('published') or "0000-00-00"
+ a_date = post.get('added') or "0000-00-00"
+ pid = post.get('id') or "0"
+ return (p_date, a_date, pid)
- if not filepath:
- self.log_signal.emit(" Single PDF creation cancelled by user.")
- return
+ sorted_content = sorted(self.temp_pdf_content_list, key=sort_key)
- if not filepath.lower().endswith('.pdf'):
- filepath += '.pdf'
+ # 2. Determine Filename
+ first_post = sorted_content[0]
+ creator_name = first_post.get('creator_name') or first_post.get('user') or "Unknown_Creator"
+ clean_creator = clean_folder_name(creator_name)
+ filename = f"[{clean_creator}] Complete_Collection.pdf"
+
+ # --- FIX 3: Corrected Fallback Logic ---
+ # Use the stored dir, or fall back to the text input in the UI, or finally the app root
+ base_dir = self.last_effective_download_dir
+ if not base_dir:
+ base_dir = self.dir_input.text().strip()
+ if not base_dir:
+ base_dir = self.app_base_dir
+
+ output_path = os.path.join(base_dir, filename)
+ # ---------------------------------------
+
+ # 3. Get Options
font_path = os.path.join(self.app_base_dir, 'data', 'dejavu-sans', 'DejaVuSans.ttf')
-
- self.log_signal.emit(" Sorting collected posts by date (oldest first)...")
- sorted_content = sorted(posts_content_data, key=lambda x: x.get('published', 'Z'))
+ # Get 'Add Info Page' preference
+ add_info = True
+ if hasattr(self, 'more_options_dialog') and self.more_options_dialog:
+ add_info = self.more_options_dialog.get_add_info_state()
+ elif hasattr(self, 'add_info_in_pdf_setting'):
+ add_info = self.add_info_in_pdf_setting
- create_single_pdf_from_content(
- sorted_content,
- filepath,
- font_path,
- add_info_page=self.add_info_in_pdf_setting, # Pass the flag here
- logger=self.log_signal.emit
+ # 4. START THE THREAD
+ self.pdf_thread = PdfGenerationThread(
+ posts_data=sorted_content,
+ output_filename=output_path,
+ font_path=font_path,
+ add_info_page=add_info,
+ logger_func=self.log_signal.emit
)
- self.log_signal.emit("="*40)
+
+ self.pdf_thread.finished_signal.connect(self._on_pdf_generation_finished)
+ self.pdf_thread.start()
+
+ def _on_pdf_generation_finished(self, success, message):
+ """Callback for when the PDF thread is done."""
+ if success:
+ self.log_signal.emit(f"✅ {message}")
+ QMessageBox.information(self, "PDF Created", message)
+ else:
+ self.log_signal.emit(f"❌ PDF Creation Error: {message}")
+ QMessageBox.warning(self, "PDF Error", f"Could not create PDF: {message}")
+
+ # Optional: Clear the temp list now that we are done
+ self.temp_pdf_content_list = []
def _add_to_history_candidates(self, history_data):
"""Adds processed post data to the history candidates list and updates the creator profile."""
@@ -7468,4 +7515,36 @@ class DownloaderApp (QWidget ):
if not success_starting_download:
self.log_signal.emit(f"⚠️ Failed to initiate download for '{item_display_name}'. Skipping and moving to the next item in queue.")
- QTimer.singleShot(100, self._process_next_favorite_download)
\ No newline at end of file
+ QTimer.singleShot(100, self._process_next_favorite_download)
+
+class PdfGenerationThread(QThread):
+ finished_signal = pyqtSignal(bool, str) # success, message
+
+ def __init__(self, posts_data, output_filename, font_path, add_info_page, logger_func):
+ super().__init__()
+ self.posts_data = posts_data
+ self.output_filename = output_filename
+ self.font_path = font_path
+ self.add_info_page = add_info_page
+ self.logger_func = logger_func
+
+ def run(self):
+ try:
+ from .dialogs.SinglePDF import create_single_pdf_from_content
+ self.logger_func("📄 Background Task: Generating Single PDF... (This may take a while)")
+
+ success = create_single_pdf_from_content(
+ self.posts_data,
+ self.output_filename,
+ self.font_path,
+ self.add_info_page,
+ logger=self.logger_func
+ )
+
+ if success:
+ self.finished_signal.emit(True, f"PDF Saved: {os.path.basename(self.output_filename)}")
+ else:
+ self.finished_signal.emit(False, "PDF generation failed.")
+
+ except Exception as e:
+ self.finished_signal.emit(False, str(e))
\ No newline at end of file