Compare commits
286 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9fe0c37127 | ||
|
|
5d4e08f794 | ||
|
|
8239fdb8f3 | ||
|
|
df8a305e81 | ||
|
|
090f1a638d | ||
|
|
871ee75a2a | ||
|
|
fea59c7903 | ||
|
|
a9b210b2ba | ||
|
|
ec94417569 | ||
|
|
0a902895a8 | ||
|
|
7217bfdb39 | ||
|
|
24880b5042 | ||
|
|
510ae5e1d1 | ||
|
|
65b4759bad | ||
|
|
6e993d88de | ||
|
|
cc3565b12b | ||
|
|
f8b150dfdb | ||
|
|
5f7b526852 | ||
|
|
b0a6c264e1 | ||
|
|
d9364f4f91 | ||
|
|
9cd48bb63a | ||
|
|
d0f11c4a06 | ||
|
|
26fa3b9bc1 | ||
|
|
f7c4d892a8 | ||
|
|
661b97aa16 | ||
|
|
3704fece2b | ||
|
|
bdb7ac93c4 | ||
|
|
76d4a3ea8a | ||
|
|
ccc7804505 | ||
|
|
4ee750c5d4 | ||
|
|
e9be13c4e3 | ||
|
|
a5cb04ea6f | ||
|
|
842f18d70d | ||
|
|
fb3f0e8913 | ||
|
|
0758887154 | ||
|
|
e752d881e7 | ||
|
|
a776d1abe9 | ||
|
|
21d1ce4fa9 | ||
|
|
d5112a25ee | ||
|
|
791ce503ff | ||
|
|
e5b519d5ce | ||
|
|
9888ed0862 | ||
|
|
9e996bf682 | ||
|
|
e7a6a91542 | ||
|
|
d7faccce18 | ||
|
|
a78c01c4f6 | ||
|
|
6de9967e0b | ||
|
|
e3dd0e70b6 | ||
|
|
9db89cfad0 | ||
|
|
0a6034a632 | ||
|
|
2da69e7017 | ||
|
|
3209770d00 | ||
|
|
337cdd342c | ||
|
|
d54b013bbc | ||
|
|
2785fc1121 | ||
|
|
fbdae61b80 | ||
|
|
33133eb275 | ||
|
|
3935cbeea4 | ||
|
|
8ba2a572fa | ||
|
|
8db40f03b6 | ||
|
|
742fe7685c | ||
|
|
e085d9a134 | ||
|
|
1cd03731c0 | ||
|
|
0bc8d7c692 | ||
|
|
3a9009e76e | ||
|
|
9a28e922b4 | ||
|
|
923a0ff61e | ||
|
|
e891a2a845 | ||
|
|
778b0219e2 | ||
|
|
3fc08d9ea7 | ||
|
|
af6a6add57 | ||
|
|
7737d32ef9 | ||
|
|
c08cbb6490 | ||
|
|
92a2e91624 | ||
|
|
11ea511a9d | ||
|
|
8abdb49ed8 | ||
|
|
0873dd1ce0 | ||
|
|
df5fbc1f73 | ||
|
|
5510f7f0c6 | ||
|
|
2f0593c450 | ||
|
|
e67adb6bdc | ||
|
|
d39081088c | ||
|
|
f303b8b020 | ||
|
|
539e76aa9e | ||
|
|
574d0d66b4 | ||
|
|
9e58a9d574 | ||
|
|
d67de87a11 | ||
|
|
149f217f2f | ||
|
|
874902ad60 | ||
|
|
440cf60d90 | ||
|
|
fb446a1e28 | ||
|
|
cfd869e05a | ||
|
|
b191776f65 | ||
|
|
f41f354737 | ||
|
|
6b57ee099d | ||
|
|
21ecb60cb5 | ||
|
|
ee00019f2e | ||
|
|
d49c739fe4 | ||
|
|
dbdf82a079 | ||
|
|
f0bf74da16 | ||
|
|
e8b655e492 | ||
|
|
4f383910d2 | ||
|
|
404c4ca59a | ||
|
|
bcf26bea20 | ||
|
|
fa198c41c1 | ||
|
|
f214d2452e | ||
|
|
f39b510577 | ||
|
|
2c45c14696 | ||
|
|
aa2305c10e | ||
|
|
568c687f98 | ||
|
|
c8b77fb0d7 | ||
|
|
b78d543f16 | ||
|
|
56922dcd47 | ||
|
|
a00d9de546 | ||
|
|
260bf8e666 | ||
|
|
f3d8447135 | ||
|
|
aa176afdb7 | ||
|
|
ea84750f05 | ||
|
|
e021a66092 | ||
|
|
650ef1cbb3 | ||
|
|
9082c0c94a | ||
|
|
783dfb985c | ||
|
|
bb2cf15b88 | ||
|
|
4b565dbadd | ||
|
|
95b0ab88ba | ||
|
|
65c5d2798e | ||
|
|
c23f18be6d | ||
|
|
69ddc2ca08 | ||
|
|
191dbc8c62 | ||
|
|
3c1b361fc1 | ||
|
|
953dbaebf0 | ||
|
|
efd5458493 | ||
|
|
3473f6540d | ||
|
|
7fe5f4b83e | ||
|
|
072b582622 | ||
|
|
de936e8d96 | ||
|
|
9d0f0dda23 | ||
|
|
222ec769db | ||
|
|
6771ede722 | ||
|
|
8199b79dc7 | ||
|
|
dfca265380 | ||
|
|
d68bab40d9 | ||
|
|
3fc2cfde99 | ||
|
|
304ad2b3c1 | ||
|
|
64a314713e | ||
|
|
d5d6fd91ef | ||
|
|
01665c366b | ||
|
|
b443ec1da9 | ||
|
|
ae4ee57500 | ||
|
|
11d0515f8b | ||
|
|
1b95f13b37 | ||
|
|
1cb70e2d4d | ||
|
|
2bda267c3e | ||
|
|
a721900179 | ||
|
|
b4bea4d4a3 | ||
|
|
373c0c868c | ||
|
|
6960cbed9a | ||
|
|
8645c0c290 | ||
|
|
76486a92fd | ||
|
|
823bd438bc | ||
|
|
360c0c247a | ||
|
|
474ba0280a | ||
|
|
d7fa6b1bd6 | ||
|
|
deb543b596 | ||
|
|
e32eb98bb7 | ||
|
|
461249b8ba | ||
|
|
f8d67b0555 | ||
|
|
9701abde5f | ||
|
|
0940bdb8dd | ||
|
|
b744e83f09 | ||
|
|
811b7b765c | ||
|
|
3bc3c7b760 | ||
|
|
d8ed588033 | ||
|
|
f6b7919043 | ||
|
|
401ccd9884 | ||
|
|
3b010b8eeb | ||
|
|
da29ccfc1f | ||
|
|
3197be300f | ||
|
|
2cf73e6dbd | ||
|
|
bd46002684 | ||
|
|
5a6474cb8a | ||
|
|
cdf4e9bdfb | ||
|
|
10b2ec666f | ||
|
|
08dac4df1e | ||
|
|
b3c837e88a | ||
|
|
e395a8411d | ||
|
|
ec9e595167 | ||
|
|
5ff87f914a | ||
|
|
318b9095a7 | ||
|
|
437df4e73a | ||
|
|
3eb26bcf0c | ||
|
|
db7a08f18a | ||
|
|
dc1314a148 | ||
|
|
21ba95e325 | ||
|
|
9367970ec0 | ||
|
|
c34863a397 | ||
|
|
f93795e370 | ||
|
|
7d4e785ca1 | ||
|
|
31b1cb2873 | ||
|
|
5e23e544e8 | ||
|
|
80feac092d | ||
|
|
9e73125d69 | ||
|
|
b32cbf0dfd | ||
|
|
a9c9fde855 | ||
|
|
46658a7bab | ||
|
|
927c11f2bb | ||
|
|
a54f2b3567 | ||
|
|
7f2312b64f | ||
|
|
7106694bcb | ||
|
|
6b37d73e5a | ||
|
|
d1c5b205ef | ||
|
|
10b567a5fd | ||
|
|
eed0a919aa | ||
|
|
78357df07f | ||
|
|
8137c76eb4 | ||
|
|
be3a522305 | ||
|
|
13d05765b2 | ||
|
|
f52d16d1e4 | ||
|
|
acb91c7e8a | ||
|
|
c765a7a281 | ||
|
|
5abfcc8550 | ||
|
|
7957468077 | ||
|
|
f774773b63 | ||
|
|
8036cb9835 | ||
|
|
13fc33d2c0 | ||
|
|
8663ef54a3 | ||
|
|
0316813792 | ||
|
|
d201a5396c | ||
|
|
86f9396b6c | ||
|
|
0fb4bb3cb0 | ||
|
|
1528d7ce25 | ||
|
|
4e7eeb7989 | ||
|
|
7f2976a4f4 | ||
|
|
8928cb92da | ||
|
|
a181b76124 | ||
|
|
8f085a8f63 | ||
|
|
93a997351b | ||
|
|
b3af6c1c15 | ||
|
|
4a65263f7d | ||
|
|
1091b5b9b4 | ||
|
|
f6b3ff2f5c | ||
|
|
b399bdf5cf | ||
|
|
9ace161bc8 | ||
|
|
66e52cfd78 | ||
|
|
e665fd3cde | ||
|
|
fc94f4c691 | ||
|
|
78e2012f04 | ||
|
|
3fe9dbacc6 | ||
|
|
004dea06e0 | ||
|
|
8994a69c34 | ||
|
|
f4a692673e | ||
|
|
4cb5f14ef6 | ||
|
|
a596c4f350 | ||
|
|
e091c60d29 | ||
|
|
d2ea026a41 | ||
|
|
bb3d5c20f5 | ||
|
|
a13eae8f16 | ||
|
|
7e5dc71720 | ||
|
|
d7960bbb85 | ||
|
|
c4d5ba3040 | ||
|
|
fd84de7bce | ||
|
|
a6383b20a4 | ||
|
|
651f9d9f8d | ||
|
|
decef6730f | ||
|
|
32a12e8a09 | ||
|
|
62007d2d45 | ||
|
|
f1e592cf99 | ||
|
|
bf111d109a | ||
|
|
00f8ff63d6 | ||
|
|
aee0ff999d | ||
|
|
b5e9080285 | ||
|
|
25d33f1531 | ||
|
|
ff0ccb2631 | ||
|
|
da507b2b3a | ||
|
|
9165903e96 | ||
|
|
f85de58fcb | ||
|
|
ccfb8496a2 | ||
|
|
e0d3e1b5af | ||
|
|
50ee50cd5c | ||
|
|
8982026d79 | ||
|
|
aec44f1782 | ||
|
|
866a5a90de | ||
|
|
929051d46c | ||
|
|
eada5057b7 | ||
|
|
fe0b369446 | ||
|
|
c0c2db709b |
3
.github/FUNDING.yml
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
github: [Yuvi9587]
|
||||
ko_fi: yuvi427183
|
||||
buy_me_a_coffee: yuvi9587
|
||||
21
LICENSE
Normal file
@@ -0,0 +1,21 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) [2025] [Yuvi9587]
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
0
LinkMaker/hentai2read.py
Normal file
BIN
Read/Cat.gif
Normal file
|
After Width: | Height: | Size: 779 KiB |
BIN
Read/Read.png
Normal file
|
After Width: | Height: | Size: 82 KiB |
BIN
Read/Read1.png
Normal file
|
After Width: | Height: | Size: 84 KiB |
BIN
Read/Read2.png
Normal file
|
After Width: | Height: | Size: 85 KiB |
BIN
Read/Read3.png
Normal file
|
After Width: | Height: | Size: 90 KiB |
BIN
Read/bmac.gif
Normal file
|
After Width: | Height: | Size: 434 KiB |
|
Before Width: | Height: | Size: 66 KiB After Width: | Height: | Size: 66 KiB |
BIN
assets/Kemono.png
Normal file
|
After Width: | Height: | Size: 12 KiB |
BIN
assets/Ko-fi.png
Normal file
|
After Width: | Height: | Size: 2.9 KiB |
BIN
assets/buymeacoffee.png
Normal file
|
After Width: | Height: | Size: 3.2 KiB |
BIN
assets/discord.png
Normal file
|
After Width: | Height: | Size: 17 KiB |
BIN
assets/github.png
Normal file
|
After Width: | Height: | Size: 13 KiB |
BIN
assets/instagram.png
Normal file
|
After Width: | Height: | Size: 59 KiB |
BIN
assets/patreon.png
Normal file
|
After Width: | Height: | Size: 978 B |
2291826
data/creators.json
Normal file
97
data/dejavu-sans/DejaVu Fonts License.txt
Normal file
@@ -0,0 +1,97 @@
|
||||
Fonts are (c) Bitstream (see below). DejaVu changes are in public domain.
|
||||
Glyphs imported from Arev fonts are (c) Tavmjong Bah (see below)
|
||||
|
||||
Bitstream Vera Fonts Copyright
|
||||
------------------------------
|
||||
|
||||
Copyright (c) 2003 by Bitstream, Inc. All Rights Reserved. Bitstream Vera is
|
||||
a trademark of Bitstream, Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of the fonts accompanying this license ("Fonts") and associated
|
||||
documentation files (the "Font Software"), to reproduce and distribute the
|
||||
Font Software, including without limitation the rights to use, copy, merge,
|
||||
publish, distribute, and/or sell copies of the Font Software, and to permit
|
||||
persons to whom the Font Software is furnished to do so, subject to the
|
||||
following conditions:
|
||||
|
||||
The above copyright and trademark notices and this permission notice shall
|
||||
be included in all copies of one or more of the Font Software typefaces.
|
||||
|
||||
The Font Software may be modified, altered, or added to, and in particular
|
||||
the designs of glyphs or characters in the Fonts may be modified and
|
||||
additional glyphs or characters may be added to the Fonts, only if the fonts
|
||||
are renamed to names not containing either the words "Bitstream" or the word
|
||||
"Vera".
|
||||
|
||||
This License becomes null and void to the extent applicable to Fonts or Font
|
||||
Software that has been modified and is distributed under the "Bitstream
|
||||
Vera" names.
|
||||
|
||||
The Font Software may be sold as part of a larger software package but no
|
||||
copy of one or more of the Font Software typefaces may be sold by itself.
|
||||
|
||||
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF COPYRIGHT, PATENT,
|
||||
TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL BITSTREAM OR THE GNOME
|
||||
FOUNDATION BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, INCLUDING
|
||||
ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL DAMAGES,
|
||||
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF
|
||||
THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM OTHER DEALINGS IN THE
|
||||
FONT SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the names of Gnome, the Gnome
|
||||
Foundation, and Bitstream Inc., shall not be used in advertising or
|
||||
otherwise to promote the sale, use or other dealings in this Font Software
|
||||
without prior written authorization from the Gnome Foundation or Bitstream
|
||||
Inc., respectively. For further information, contact: fonts at gnome dot
|
||||
org.
|
||||
|
||||
Arev Fonts Copyright
|
||||
------------------------------
|
||||
|
||||
Copyright (c) 2006 by Tavmjong Bah. All Rights Reserved.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of the fonts accompanying this license ("Fonts") and
|
||||
associated documentation files (the "Font Software"), to reproduce
|
||||
and distribute the modifications to the Bitstream Vera Font Software,
|
||||
including without limitation the rights to use, copy, merge, publish,
|
||||
distribute, and/or sell copies of the Font Software, and to permit
|
||||
persons to whom the Font Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright and trademark notices and this permission notice
|
||||
shall be included in all copies of one or more of the Font Software
|
||||
typefaces.
|
||||
|
||||
The Font Software may be modified, altered, or added to, and in
|
||||
particular the designs of glyphs or characters in the Fonts may be
|
||||
modified and additional glyphs or characters may be added to the
|
||||
Fonts, only if the fonts are renamed to names not containing either
|
||||
the words "Tavmjong Bah" or the word "Arev".
|
||||
|
||||
This License becomes null and void to the extent applicable to Fonts
|
||||
or Font Software that has been modified and is distributed under the
|
||||
"Tavmjong Bah Arev" names.
|
||||
|
||||
The Font Software may be sold as part of a larger software package but
|
||||
no copy of one or more of the Font Software typefaces may be sold by
|
||||
itself.
|
||||
|
||||
THE FONT SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO ANY WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT
|
||||
OF COPYRIGHT, PATENT, TRADEMARK, OR OTHER RIGHT. IN NO EVENT SHALL
|
||||
TAVMJONG BAH BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
INCLUDING ANY GENERAL, SPECIAL, INDIRECT, INCIDENTAL, OR CONSEQUENTIAL
|
||||
DAMAGES, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
FROM, OUT OF THE USE OR INABILITY TO USE THE FONT SOFTWARE OR FROM
|
||||
OTHER DEALINGS IN THE FONT SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the name of Tavmjong Bah shall not
|
||||
be used in advertising or otherwise to promote the sale, use or other
|
||||
dealings in this Font Software without prior written authorization
|
||||
from Tavmjong Bah. For further information, contact: tavmjong @ free
|
||||
. fr.
|
||||
BIN
data/dejavu-sans/DejaVuSans-Bold.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSans-BoldOblique.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSans-ExtraLight.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSans-Oblique.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSans.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSansCondensed-Bold.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSansCondensed-BoldOblique.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSansCondensed-Oblique.ttf
Normal file
BIN
data/dejavu-sans/DejaVuSansCondensed.ttf
Normal file
@@ -1,857 +0,0 @@
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
import re
|
||||
import threading
|
||||
import queue
|
||||
import hashlib
|
||||
from concurrent.futures import ThreadPoolExecutor, Future, CancelledError
|
||||
|
||||
from PyQt5.QtCore import QObject, pyqtSignal, QThread, QMutex, QMutexLocker
|
||||
from urllib.parse import urlparse
|
||||
try:
|
||||
from PIL import Image
|
||||
except ImportError:
|
||||
print("ERROR: Pillow library not found. Please install it: pip install Pillow")
|
||||
Image = None
|
||||
|
||||
from io import BytesIO
|
||||
|
||||
fastapi_app = None
|
||||
KNOWN_NAMES = []
|
||||
|
||||
def clean_folder_name(name):
|
||||
if not isinstance(name, str): name = str(name)
|
||||
cleaned = re.sub(r'[^\w\s\-\_]', '', name)
|
||||
return cleaned.strip().replace(' ', '_')
|
||||
|
||||
def clean_filename(name):
|
||||
if not isinstance(name, str): name = str(name)
|
||||
cleaned = re.sub(r'[^\w\s\-\_\.]', '', name)
|
||||
return cleaned.strip().replace(' ', '_')
|
||||
|
||||
def extract_folder_name_from_title(title, unwanted_keywords):
|
||||
if not title: return 'Uncategorized'
|
||||
title_lower = title.lower()
|
||||
tokens = title_lower.split()
|
||||
for token in tokens:
|
||||
clean_token = clean_folder_name(token)
|
||||
if clean_token and clean_token not in unwanted_keywords:
|
||||
return clean_token
|
||||
return 'Uncategorized'
|
||||
|
||||
def match_folders_from_title(title, known_names, unwanted_keywords):
|
||||
if not title: return []
|
||||
cleaned_title = clean_folder_name(title.lower())
|
||||
matched_cleaned_names = set()
|
||||
|
||||
for name in known_names:
|
||||
cleaned_name_for_match = clean_folder_name(name.lower())
|
||||
if not cleaned_name_for_match: continue
|
||||
if cleaned_name_for_match in cleaned_title:
|
||||
if cleaned_name_for_match not in unwanted_keywords:
|
||||
matched_cleaned_names.add(cleaned_name_for_match)
|
||||
return list(matched_cleaned_names)
|
||||
|
||||
def is_image(filename):
|
||||
if not filename: return False
|
||||
return filename.lower().endswith(('.png', '.jpg', '.jpeg', '.webp', '.gif'))
|
||||
|
||||
def is_video(filename):
|
||||
if not filename: return False
|
||||
return filename.lower().endswith(('.mp4', '.mov', '.mkv', '.webm', '.avi', '.wmv'))
|
||||
|
||||
def is_zip(filename):
|
||||
if not filename: return False
|
||||
return filename.lower().endswith('.zip')
|
||||
|
||||
def is_rar(filename):
|
||||
if not filename: return False
|
||||
return filename.lower().endswith('.rar')
|
||||
|
||||
def is_post_url(url):
|
||||
if not isinstance(url, str): return False
|
||||
return '/post/' in urlparse(url).path
|
||||
|
||||
def extract_post_info(url_string):
|
||||
service, user_id, post_id = None, None, None
|
||||
if not isinstance(url_string, str) or not url_string.strip():
|
||||
return None, None, None
|
||||
try:
|
||||
parsed_url = urlparse(url_string.strip())
|
||||
domain = parsed_url.netloc.lower()
|
||||
path_parts = [part for part in parsed_url.path.strip('/').split('/') if part]
|
||||
is_kemono = 'kemono.su' in domain or 'kemono.party' in domain
|
||||
is_coomer = 'coomer.su' in domain or 'coomer.party' in domain
|
||||
if not (is_kemono or is_coomer):
|
||||
return None, None, None
|
||||
if len(path_parts) >= 3 and path_parts[1].lower() == 'user':
|
||||
service = path_parts[0]
|
||||
user_id = path_parts[2]
|
||||
if len(path_parts) >= 5 and path_parts[3].lower() == 'post':
|
||||
post_id = path_parts[4]
|
||||
return service, user_id, post_id
|
||||
if len(path_parts) >= 5 and path_parts[0].lower() == 'api' and path_parts[1].lower() == 'v1' and path_parts[3].lower() == 'user':
|
||||
service = path_parts[2]
|
||||
user_id = path_parts[4]
|
||||
if len(path_parts) >= 7 and path_parts[5].lower() == 'post':
|
||||
post_id = path_parts[6]
|
||||
return service, user_id, post_id
|
||||
except ValueError:
|
||||
print(f"Debug: ValueError parsing URL '{url_string}'")
|
||||
return None, None, None
|
||||
except Exception as e:
|
||||
print(f"Debug: Exception during extract_post_info for URL '{url_string}': {e}")
|
||||
return None, None, None
|
||||
return None, None, None
|
||||
|
||||
def fetch_posts_paginated(api_url_base, headers, offset, logger):
|
||||
paginated_url = f'{api_url_base}?o={offset}'
|
||||
logger(f" Fetching: {paginated_url}")
|
||||
try:
|
||||
response = requests.get(paginated_url, headers=headers, timeout=45)
|
||||
response.raise_for_status()
|
||||
if 'application/json' not in response.headers.get('Content-Type', ''):
|
||||
raise RuntimeError(f"Unexpected content type received: {response.headers.get('Content-Type')}. Body: {response.text[:200]}")
|
||||
return response.json()
|
||||
except requests.exceptions.Timeout:
|
||||
raise RuntimeError(f"Timeout fetching page offset {offset}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
err_msg = f"Error fetching page offset {offset}: {e}"
|
||||
if e.response is not None:
|
||||
err_msg += f" (Status: {e.response.status_code}, Body: {e.response.text[:200]})"
|
||||
raise RuntimeError(err_msg)
|
||||
except ValueError as e:
|
||||
raise RuntimeError(f"Error decoding JSON response for offset {offset}: {e}. Body: {response.text[:200]}")
|
||||
except Exception as e:
|
||||
raise RuntimeError(f"Unexpected error processing page offset {offset}: {e}")
|
||||
|
||||
def download_from_api(api_url_input, logger=print):
|
||||
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'}
|
||||
service, user_id, target_post_id = extract_post_info(api_url_input)
|
||||
|
||||
if not service or not user_id:
|
||||
logger(f"❌ Invalid or unrecognized URL: {api_url_input}. Cannot fetch.")
|
||||
return
|
||||
|
||||
parsed_input = urlparse(api_url_input)
|
||||
api_domain = parsed_input.netloc if ('kemono.su' in parsed_input.netloc.lower() or 'coomer.su' in parsed_input.netloc.lower() or 'kemono.party' in parsed_input.netloc.lower() or 'coomer.party' in parsed_input.netloc.lower()) else "kemono.su"
|
||||
api_base_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}"
|
||||
|
||||
offset = 0
|
||||
page = 1
|
||||
processed_target_post = False
|
||||
|
||||
while True:
|
||||
if target_post_id and processed_target_post:
|
||||
logger(f"✅ Target post {target_post_id} found and processed. Stopping.")
|
||||
break
|
||||
|
||||
logger(f"\n🔄 Fetching page {page} (offset {offset}) for user {user_id} on {api_domain}...")
|
||||
try:
|
||||
posts_batch = fetch_posts_paginated(api_base_url, headers, offset, logger)
|
||||
if not isinstance(posts_batch, list):
|
||||
logger(f"❌ API Error: Expected a list of posts, got {type(posts_batch)}. Response: {str(posts_batch)[:200]}")
|
||||
break
|
||||
except RuntimeError as e:
|
||||
logger(f"❌ {e}")
|
||||
logger(" Aborting pagination due to error.")
|
||||
break
|
||||
except Exception as e:
|
||||
logger(f"❌ Unexpected error during fetch loop: {e}")
|
||||
break
|
||||
|
||||
if not posts_batch:
|
||||
if page == 1 and not target_post_id:
|
||||
logger("😕 No posts found for this creator.")
|
||||
elif not target_post_id:
|
||||
logger("✅ Reached end of posts.")
|
||||
break
|
||||
|
||||
logger(f"📦 Found {len(posts_batch)} posts on page {page}.")
|
||||
|
||||
if target_post_id:
|
||||
matching_post = next((post for post in posts_batch if str(post.get('id')) == str(target_post_id)), None)
|
||||
|
||||
if matching_post:
|
||||
logger(f"🎯 Found target post {target_post_id} on page {page}.")
|
||||
yield [matching_post]
|
||||
processed_target_post = True
|
||||
else:
|
||||
logger(f" Target post {target_post_id} not found on this page.")
|
||||
pass
|
||||
else:
|
||||
yield posts_batch
|
||||
if not (target_post_id and processed_target_post):
|
||||
page_size = 50
|
||||
offset += page_size
|
||||
page += 1
|
||||
time.sleep(0.6)
|
||||
if target_post_id and not processed_target_post:
|
||||
logger(f"❌ Target post ID {target_post_id} was not found for this creator.")
|
||||
|
||||
class PostProcessorSignals(QObject):
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_download_status_signal = pyqtSignal(bool)
|
||||
|
||||
class PostProcessorWorker:
|
||||
def __init__(self, post_data, download_root, known_names, filter_character,
|
||||
unwanted_keywords, filter_mode, skip_zip, skip_rar,
|
||||
use_subfolders, target_post_id_from_initial_url, custom_folder_name,
|
||||
compress_images, download_thumbnails, service, user_id,
|
||||
api_url_input, cancellation_event, signals,
|
||||
downloaded_files, downloaded_file_hashes, downloaded_files_lock, downloaded_file_hashes_lock,
|
||||
skip_words_list=None):
|
||||
self.post = post_data
|
||||
self.download_root = download_root
|
||||
self.known_names = known_names
|
||||
self.filter_character = filter_character
|
||||
self.unwanted_keywords = unwanted_keywords
|
||||
self.filter_mode = filter_mode
|
||||
self.skip_zip = skip_zip
|
||||
self.skip_rar = skip_rar
|
||||
self.use_subfolders = use_subfolders
|
||||
self.target_post_id_from_initial_url = target_post_id_from_initial_url
|
||||
self.custom_folder_name = custom_folder_name
|
||||
self.compress_images = compress_images
|
||||
self.download_thumbnails = download_thumbnails
|
||||
self.service = service
|
||||
self.user_id = user_id
|
||||
self.api_url_input = api_url_input
|
||||
self.cancellation_event = cancellation_event
|
||||
self.signals = signals
|
||||
self.skip_current_file_flag = threading.Event()
|
||||
self.is_downloading_file = False
|
||||
self.current_download_path = None
|
||||
self.downloaded_files = downloaded_files
|
||||
self.downloaded_file_hashes = downloaded_file_hashes
|
||||
self.downloaded_files_lock = downloaded_files_lock
|
||||
self.downloaded_file_hashes_lock = downloaded_file_hashes_lock
|
||||
self.skip_words_list = skip_words_list if skip_words_list is not None else []
|
||||
if self.compress_images and Image is None:
|
||||
self.logger("⚠️ Image compression enabled, but Pillow library is not loaded. Disabling compression.")
|
||||
self.compress_images = False
|
||||
|
||||
def logger(self, message):
|
||||
if self.signals and hasattr(self.signals, 'progress_signal'):
|
||||
self.signals.progress_signal.emit(message)
|
||||
else:
|
||||
print(f"(Worker Log): {message}")
|
||||
|
||||
def check_cancel(self):
|
||||
is_cancelled = self.cancellation_event.is_set()
|
||||
return is_cancelled
|
||||
|
||||
def skip_file(self):
|
||||
pass
|
||||
|
||||
def process(self):
|
||||
if self.check_cancel(): return 0, 0
|
||||
|
||||
total_downloaded_post = 0
|
||||
total_skipped_post = 0
|
||||
headers = {'User-Agent': 'Mozilla/5.0', 'Referer': f'https://{urlparse(self.api_url_input).netloc}/'}
|
||||
url_pattern = re.compile(r'https?://[^\s<>"]+|www\.[^\s<>"]+')
|
||||
LARGE_THUMBNAIL_THRESHOLD = 1 * 1024 * 1024
|
||||
|
||||
post = self.post
|
||||
api_title = post.get('title', '')
|
||||
title = api_title if api_title else 'untitled_post'
|
||||
post_id = post.get('id', 'unknown_id')
|
||||
post_file_info = post.get('file')
|
||||
attachments = post.get('attachments', [])
|
||||
post_content = post.get('content', '')
|
||||
is_target_post = (self.target_post_id_from_initial_url is not None) and (str(post_id) == str(self.target_post_id_from_initial_url))
|
||||
|
||||
self.logger(f"\n--- Processing Post {post_id} ('{title[:50]}...') (Thread: {threading.current_thread().name}) ---")
|
||||
if self.skip_words_list:
|
||||
title_lower = title.lower()
|
||||
for skip_word in self.skip_words_list:
|
||||
if skip_word.lower() in title_lower:
|
||||
self.logger(f" -> Skip Post (Title): Post {post_id} title ('{title[:30]}...') contains skip word '{skip_word}'. Skipping entire post.")
|
||||
return 0, 1
|
||||
|
||||
|
||||
if not isinstance(attachments, list):
|
||||
self.logger(f"⚠️ Corrupt attachment data for post {post_id}. Skipping attachments.")
|
||||
attachments = []
|
||||
valid_folder_paths = []
|
||||
folder_decision_reason = ""
|
||||
api_domain = urlparse(self.api_url_input).netloc if ('kemono.su' in urlparse(self.api_url_input).netloc.lower() or 'coomer.su' in urlparse(self.api_url_input).netloc.lower() or 'kemono.party' in urlparse(self.api_url_input).netloc.lower() or 'coomer.party' in urlparse(self.api_url_input).netloc.lower()) else "kemono.su"
|
||||
if is_target_post and self.custom_folder_name and self.use_subfolders:
|
||||
folder_path_full = os.path.join(self.download_root, self.custom_folder_name)
|
||||
valid_folder_paths = [folder_path_full]
|
||||
folder_decision_reason = f"Using custom folder for target post: '{self.custom_folder_name}'"
|
||||
if not valid_folder_paths and self.use_subfolders:
|
||||
folder_names_for_post = []
|
||||
if self.filter_character:
|
||||
clean_char_filter = clean_folder_name(self.filter_character.lower())
|
||||
matched_names_in_title = match_folders_from_title(title, self.known_names, self.unwanted_keywords)
|
||||
|
||||
if clean_char_filter and clean_char_filter in matched_names_in_title:
|
||||
folder_names_for_post = [clean_char_filter]
|
||||
folder_decision_reason = f"Character filter '{self.filter_character}' matched title. Using folder '{clean_char_filter}'."
|
||||
else:
|
||||
self.logger(f" -> Filter Skip Post {post_id}: Character filter '{self.filter_character}' not found in title matches ({matched_names_in_title}).")
|
||||
return 0, 1
|
||||
else:
|
||||
matched_folders = match_folders_from_title(title, self.known_names, self.unwanted_keywords)
|
||||
if matched_folders:
|
||||
folder_names_for_post = matched_folders
|
||||
folder_decision_reason = f"Found known name(s) in title: {matched_folders}"
|
||||
else:
|
||||
extracted_folder = extract_folder_name_from_title(title, self.unwanted_keywords)
|
||||
folder_names_for_post = [extracted_folder]
|
||||
folder_decision_reason = f"No known names in title. Using derived folder: '{extracted_folder}'"
|
||||
for folder_name in folder_names_for_post:
|
||||
folder_path_full = os.path.join(self.download_root, folder_name)
|
||||
valid_folder_paths.append(folder_path_full)
|
||||
if not valid_folder_paths:
|
||||
valid_folder_paths = [self.download_root]
|
||||
if not folder_decision_reason:
|
||||
folder_decision_reason = "Subfolders disabled or no specific folder determined. Using root download directory."
|
||||
|
||||
|
||||
self.logger(f" Folder Decision: {folder_decision_reason}")
|
||||
if not valid_folder_paths:
|
||||
self.logger(f" ERROR: No valid folder paths determined for post {post_id}. Skipping.")
|
||||
return 0, 1
|
||||
if post_content:
|
||||
try:
|
||||
found_links = re.findall(r'href=["\'](https?://[^"\']+)["\']', post_content)
|
||||
if found_links:
|
||||
self.logger(f"🔗 Links found in post content:")
|
||||
unique_links = sorted(list(set(found_links)))
|
||||
for link in unique_links[:10]:
|
||||
if not any(x in link for x in ['.css', '.js', 'javascript:']):
|
||||
self.logger(f" - {link}")
|
||||
if len(unique_links) > 10:
|
||||
self.logger(f" - ... ({len(unique_links) - 10} more links not shown)")
|
||||
except Exception as e:
|
||||
self.logger(f"⚠️ Error parsing content for links in post {post_id}: {e}")
|
||||
files_to_process_for_download = []
|
||||
api_domain = urlparse(self.api_url_input).netloc if ('kemono.su' in urlparse(self.api_url_input).netloc.lower() or 'coomer.su' in urlparse(self.api_url_input).netloc.lower() or 'kemono.party' in urlparse(self.api_url_input).netloc.lower() or 'coomer.party' in urlparse(self.api_url_input).netloc.lower()) else "kemono.su"
|
||||
|
||||
if self.download_thumbnails:
|
||||
self.logger(f" Mode: Attempting to download thumbnail...")
|
||||
self.logger(" Thumbnail download via API is disabled as the local API is not used.")
|
||||
self.logger(f" -> Skipping Post {post_id}: Thumbnail download requested but API is disabled.")
|
||||
return 0, 1
|
||||
|
||||
else:
|
||||
self.logger(f" Mode: Downloading post file/attachments.")
|
||||
if post_file_info and isinstance(post_file_info, dict) and post_file_info.get('path'):
|
||||
main_file_path = post_file_info['path'].lstrip('/')
|
||||
main_file_name = post_file_info.get('name') or os.path.basename(main_file_path)
|
||||
if main_file_name:
|
||||
file_url = f"https://{api_domain}/data/{main_file_path}"
|
||||
files_to_process_for_download.append({
|
||||
'url': file_url, 'name': main_file_name,
|
||||
'_is_thumbnail': False, '_source': 'post_file'
|
||||
})
|
||||
else:
|
||||
self.logger(f" ⚠️ Skipping main post file: Missing filename (Path: {main_file_path})")
|
||||
attachment_counter = 0
|
||||
for idx, attachment in enumerate(attachments):
|
||||
if isinstance(attachment, dict) and attachment.get('path'):
|
||||
attach_path = attachment['path'].lstrip('/')
|
||||
attach_name = attachment.get('name') or os.path.basename(attach_path)
|
||||
if attach_name:
|
||||
base, ext = os.path.splitext(clean_filename(attach_name))
|
||||
final_attach_name = f"{post_id}_{attachment_counter}{ext}"
|
||||
if base and base != f"{post_id}_{attachment_counter}":
|
||||
final_attach_name = f"{post_id}_{attachment_counter}_{base}{ext}"
|
||||
|
||||
|
||||
attach_url = f"https://{api_domain}/data/{attach_path}"
|
||||
files_to_process_for_download.append({
|
||||
'url': attach_url, 'name': final_attach_name,
|
||||
'_is_thumbnail': False, '_source': f'attachment_{idx+1}',
|
||||
'_original_name_for_log': attach_name
|
||||
})
|
||||
attachment_counter += 1
|
||||
|
||||
else:
|
||||
self.logger(f" ⚠️ Skipping attachment {idx+1}: Missing filename (Path: {attach_path})")
|
||||
else:
|
||||
self.logger(f" ⚠️ Skipping invalid attachment entry {idx+1}: {str(attachment)[:100]}")
|
||||
|
||||
|
||||
if not files_to_process_for_download:
|
||||
self.logger(f" No files found to download for post {post_id}.")
|
||||
return 0, 0
|
||||
|
||||
self.logger(f" Files identified for download: {len(files_to_process_for_download)}")
|
||||
post_download_count = 0
|
||||
post_skip_count = 0
|
||||
local_processed_filenames = set()
|
||||
local_filenames_lock = threading.Lock()
|
||||
|
||||
|
||||
for file_info in files_to_process_for_download:
|
||||
if self.check_cancel(): break
|
||||
if self.skip_current_file_flag.is_set():
|
||||
original_name_for_log = file_info.get('_original_name_for_log', file_info.get('name', 'unknown_file'))
|
||||
self.logger(f"⏭️ File skip requested: {original_name_for_log}")
|
||||
post_skip_count += 1
|
||||
self.skip_current_file_flag.clear()
|
||||
continue
|
||||
|
||||
file_url = file_info.get('url')
|
||||
original_filename = file_info.get('name')
|
||||
is_thumbnail = file_info.get('_is_thumbnail', False)
|
||||
original_name_for_log = file_info.get('_original_name_for_log', original_filename)
|
||||
|
||||
if not file_url or not original_filename:
|
||||
self.logger(f"⚠️ Skipping file entry due to missing URL or name: {str(file_info)[:100]}")
|
||||
post_skip_count += 1
|
||||
continue
|
||||
|
||||
cleaned_save_filename = clean_filename(original_filename)
|
||||
if self.skip_words_list:
|
||||
filename_lower = cleaned_save_filename.lower()
|
||||
file_skipped_by_word = False
|
||||
for skip_word in self.skip_words_list:
|
||||
if skip_word.lower() in filename_lower:
|
||||
self.logger(f" -> Skip File (Filename): File '{original_name_for_log}' contains skip word '{skip_word}'.")
|
||||
post_skip_count += 1
|
||||
file_skipped_by_word = True
|
||||
break
|
||||
if file_skipped_by_word:
|
||||
continue
|
||||
if not self.download_thumbnails:
|
||||
file_skipped_by_filter = False
|
||||
is_img = is_image(cleaned_save_filename)
|
||||
is_vid = is_video(cleaned_save_filename)
|
||||
is_zip_file = is_zip(cleaned_save_filename)
|
||||
is_rar_file = is_rar(cleaned_save_filename)
|
||||
|
||||
if self.filter_mode == 'image' and not is_img:
|
||||
self.logger(f" -> Filter Skip: '{original_name_for_log}' (Not image/gif)")
|
||||
file_skipped_by_filter = True
|
||||
elif self.filter_mode == 'video' and not is_vid:
|
||||
self.logger(f" -> Filter Skip: '{original_name_for_log}' (Not video)")
|
||||
file_skipped_by_filter = True
|
||||
elif self.skip_zip and is_zip_file:
|
||||
self.logger(f" -> Pref Skip: '{original_name_for_log}' (Zip)")
|
||||
file_skipped_by_filter = True
|
||||
elif self.skip_rar and is_rar_file:
|
||||
self.logger(f" -> Pref Skip: '{original_name_for_log}' (RAR)")
|
||||
file_skipped_by_filter = True
|
||||
|
||||
if file_skipped_by_filter:
|
||||
post_skip_count += 1
|
||||
continue
|
||||
file_downloaded_or_exists = False
|
||||
for folder_path in valid_folder_paths:
|
||||
if self.check_cancel(): break
|
||||
try:
|
||||
os.makedirs(folder_path, exist_ok=True)
|
||||
except OSError as e:
|
||||
self.logger(f"❌ Error ensuring directory exists {folder_path}: {e}. Skipping path.")
|
||||
continue
|
||||
except Exception as e:
|
||||
self.logger(f"❌ Unexpected error creating dir {folder_path}: {e}. Skipping path.")
|
||||
continue
|
||||
|
||||
save_path = os.path.join(folder_path, cleaned_save_filename)
|
||||
folder_basename = os.path.basename(folder_path)
|
||||
with local_filenames_lock:
|
||||
if os.path.exists(save_path) and os.path.getsize(save_path) > 0:
|
||||
self.logger(f" -> Exists Skip: '{original_name_for_log}' in '{folder_basename}'")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
with self.downloaded_files_lock:
|
||||
self.downloaded_files.add(cleaned_save_filename)
|
||||
break
|
||||
elif cleaned_save_filename in local_processed_filenames:
|
||||
self.logger(f" -> Local Skip: '{original_name_for_log}' in '{folder_basename}' (already processed in this post)")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
with self.downloaded_files_lock:
|
||||
self.downloaded_files.add(cleaned_save_filename)
|
||||
break
|
||||
with self.downloaded_files_lock:
|
||||
if cleaned_save_filename in self.downloaded_files:
|
||||
self.logger(f" -> Global Filename Skip: '{original_name_for_log}' in '{folder_basename}' (filename already downloaded globally)")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
break
|
||||
try:
|
||||
self.logger(f"⬇️ Downloading '{original_name_for_log}' to '{folder_basename}'...")
|
||||
self.current_download_path = save_path
|
||||
self.is_downloading_file = True
|
||||
self.signals.file_download_status_signal.emit(True)
|
||||
response = requests.get(file_url, headers=headers, timeout=(15, 300), stream=True)
|
||||
response.raise_for_status()
|
||||
file_content_bytes = BytesIO()
|
||||
downloaded_size = 0
|
||||
chunk_count = 0
|
||||
md5_hash = hashlib.md5()
|
||||
|
||||
for chunk in response.iter_content(chunk_size=32 * 1024):
|
||||
if self.check_cancel(): break
|
||||
if self.skip_current_file_flag.is_set(): break
|
||||
|
||||
if chunk:
|
||||
file_content_bytes.write(chunk)
|
||||
md5_hash.update(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
chunk_count += 1
|
||||
if self.check_cancel() or self.skip_current_file_flag.is_set():
|
||||
self.logger(f" ⚠️ Download interrupted {'(cancelled)' if self.cancellation_event.is_set() else '(skipped)'} for {original_name_for_log}.")
|
||||
if self.skip_current_file_flag.is_set():
|
||||
post_skip_count += 1
|
||||
self.skip_current_file_flag.clear()
|
||||
break
|
||||
final_save_path = save_path
|
||||
current_filename_for_log = cleaned_save_filename
|
||||
file_content_bytes.seek(0)
|
||||
|
||||
if downloaded_size == 0 and chunk_count > 0:
|
||||
self.logger(f"⚠️ Warning: Downloaded 0 bytes despite receiving chunks for {original_name_for_log}. Skipping save.")
|
||||
post_skip_count += 1
|
||||
break
|
||||
|
||||
if downloaded_size > 0:
|
||||
calculated_hash = md5_hash.hexdigest()
|
||||
with self.downloaded_file_hashes_lock:
|
||||
if calculated_hash in self.downloaded_file_hashes:
|
||||
self.logger(f" -> Content Skip: '{original_name_for_log}' (Hash: {calculated_hash}) already downloaded.")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
with self.downloaded_files_lock:
|
||||
self.downloaded_files.add(cleaned_save_filename)
|
||||
with local_filenames_lock:
|
||||
local_processed_filenames.add(cleaned_save_filename)
|
||||
break
|
||||
else:
|
||||
pass
|
||||
|
||||
|
||||
if not file_downloaded_or_exists:
|
||||
final_bytes_to_save = file_content_bytes
|
||||
is_img_for_compress = is_image(cleaned_save_filename)
|
||||
if is_img_for_compress and not is_thumbnail and self.compress_images and Image and downloaded_size > 1500 * 1024:
|
||||
self.logger(f" Compressing large image ({downloaded_size / 1024:.2f} KB)...")
|
||||
try:
|
||||
with Image.open(file_content_bytes) as img:
|
||||
original_format = img.format
|
||||
if img.mode == 'P': img = img.convert('RGBA')
|
||||
elif img.mode not in ['RGB', 'RGBA', 'L']: img = img.convert('RGB')
|
||||
|
||||
compressed_bytes = BytesIO()
|
||||
img.save(compressed_bytes, format='WebP', quality=75, method=4)
|
||||
compressed_size = compressed_bytes.getbuffer().nbytes
|
||||
if compressed_size < downloaded_size * 0.90:
|
||||
self.logger(f" Compression success: {compressed_size / 1024:.2f} KB (WebP Q75)")
|
||||
compressed_bytes.seek(0)
|
||||
final_bytes_to_save = compressed_bytes
|
||||
base, _ = os.path.splitext(cleaned_save_filename)
|
||||
current_filename_for_log = base + '.webp'
|
||||
final_save_path = os.path.join(folder_path, current_filename_for_log)
|
||||
self.logger(f" Updated filename: {current_filename_for_log}")
|
||||
else:
|
||||
self.logger(f" Compression skipped: WebP not significantly smaller ({compressed_size / 1024:.2f} KB).")
|
||||
file_content_bytes.seek(0)
|
||||
final_bytes_to_save = file_content_bytes
|
||||
|
||||
except Exception as comp_e:
|
||||
self.logger(f"❌ Image compression failed for {original_name_for_log}: {comp_e}. Saving original.")
|
||||
file_content_bytes.seek(0)
|
||||
final_bytes_to_save = file_content_bytes
|
||||
final_save_path = save_path
|
||||
|
||||
elif is_img_for_compress and not is_thumbnail and self.compress_images:
|
||||
self.logger(f" Skipping compression: Image size ({downloaded_size / 1024:.2f} KB) below threshold.")
|
||||
file_content_bytes.seek(0)
|
||||
final_bytes_to_save = file_content_bytes
|
||||
|
||||
elif is_thumbnail and downloaded_size > LARGE_THUMBNAIL_THRESHOLD:
|
||||
self.logger(f"⚠️ Downloaded thumbnail '{current_filename_for_log}' ({downloaded_size / 1024:.2f} KB) is large.")
|
||||
file_content_bytes.seek(0)
|
||||
final_bytes_to_save = file_content_bytes
|
||||
else:
|
||||
file_content_bytes.seek(0)
|
||||
final_bytes_to_save = file_content_bytes
|
||||
save_file = False
|
||||
with self.downloaded_files_lock:
|
||||
with local_filenames_lock:
|
||||
if os.path.exists(final_save_path) and os.path.getsize(final_save_path) > 0:
|
||||
self.logger(f" -> Exists Skip (pre-write): '{current_filename_for_log}' in '{folder_basename}'")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
elif current_filename_for_log in self.downloaded_files:
|
||||
self.logger(f" -> Global Skip (pre-write): '{current_filename_for_log}' in '{folder_basename}' (already downloaded globally)")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
elif current_filename_for_log in local_processed_filenames:
|
||||
self.logger(f" -> Local Skip (pre-write): '{current_filename_for_log}' in '{folder_basename}' (already processed in this post)")
|
||||
post_skip_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
else:
|
||||
save_file = True
|
||||
|
||||
|
||||
if save_file:
|
||||
try:
|
||||
with open(final_save_path, 'wb') as f:
|
||||
while True:
|
||||
chunk = final_bytes_to_save.read(64 * 1024)
|
||||
if not chunk: break
|
||||
f.write(chunk)
|
||||
with self.downloaded_file_hashes_lock:
|
||||
self.downloaded_file_hashes.add(calculated_hash)
|
||||
with self.downloaded_files_lock:
|
||||
self.downloaded_files.add(current_filename_for_log)
|
||||
with local_filenames_lock:
|
||||
local_processed_filenames.add(current_filename_for_log)
|
||||
|
||||
post_download_count += 1
|
||||
file_downloaded_or_exists = True
|
||||
self.logger(f"✅ Saved: '{current_filename_for_log}' ({downloaded_size / 1024:.1f} KB, Hash: {calculated_hash[:8]}...) in '{folder_basename}'")
|
||||
time.sleep(0.05)
|
||||
|
||||
except IOError as io_err:
|
||||
self.logger(f"❌ Save Fail: '{current_filename_for_log}' to '{folder_basename}'. Error: {io_err}")
|
||||
post_skip_count += 1
|
||||
if os.path.exists(final_save_path):
|
||||
try: os.remove(final_save_path)
|
||||
except OSError: pass
|
||||
break
|
||||
except Exception as save_err:
|
||||
self.logger(f"❌ Unexpected Save Error: '{current_filename_for_log}' in '{folder_basename}'. Error: {save_err}")
|
||||
post_skip_count += 1
|
||||
if os.path.exists(final_save_path):
|
||||
try: os.remove(final_save_path)
|
||||
except OSError: pass
|
||||
break
|
||||
final_bytes_to_save.close()
|
||||
if file_content_bytes is not final_bytes_to_save:
|
||||
file_content_bytes.close()
|
||||
if file_downloaded_or_exists:
|
||||
break
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.logger(f"❌ Download Fail: {original_name_for_log}. Error: {e}")
|
||||
post_skip_count += 1
|
||||
break
|
||||
except IOError as e:
|
||||
self.logger(f"❌ File I/O Error: {original_name_for_log} in '{folder_basename}'. Error: {e}")
|
||||
post_skip_count += 1
|
||||
break
|
||||
except Exception as e:
|
||||
self.logger(f"❌ Unexpected Error during download/save for {original_name_for_log}: {e}")
|
||||
import traceback
|
||||
self.logger(f" Traceback: {traceback.format_exc(limit=2)}")
|
||||
post_skip_count += 1
|
||||
break
|
||||
|
||||
finally:
|
||||
self.is_downloading_file = False
|
||||
self.current_download_path = None
|
||||
self.signals.file_download_status_signal.emit(False)
|
||||
if self.check_cancel(): break
|
||||
if self.skip_current_file_flag.is_set():
|
||||
self.skip_current_file_flag.clear()
|
||||
if not file_downloaded_or_exists:
|
||||
pass
|
||||
if self.check_cancel():
|
||||
self.logger(f" Post {post_id} processing cancelled.")
|
||||
return post_download_count, post_skip_count
|
||||
|
||||
|
||||
self.logger(f" Post {post_id} Summary: Downloaded={post_download_count}, Skipped={post_skip_count}")
|
||||
return post_download_count, post_skip_count
|
||||
|
||||
class DownloadThread(QThread):
|
||||
progress_signal = pyqtSignal(str)
|
||||
add_character_prompt_signal = pyqtSignal(str)
|
||||
file_download_status_signal = pyqtSignal(bool)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
|
||||
|
||||
def __init__(self, api_url, output_dir, known_names_copy,
|
||||
cancellation_event, single_post_id=None,
|
||||
filter_character=None, filter_mode='all', skip_zip=True, skip_rar=True,
|
||||
use_subfolders=True, custom_folder_name=None, compress_images=False,
|
||||
download_thumbnails=False, service=None, user_id=None,
|
||||
downloaded_files=None, downloaded_files_lock=None,
|
||||
downloaded_file_hashes=None, downloaded_file_hashes_lock=None,
|
||||
skip_words_list=None):
|
||||
super().__init__()
|
||||
self._init_failed = False
|
||||
self.api_url_input = api_url
|
||||
self.output_dir = output_dir
|
||||
self.known_names = list(known_names_copy)
|
||||
self.cancellation_event = cancellation_event
|
||||
self.initial_target_post_id = single_post_id
|
||||
self.filter_character = filter_character
|
||||
self.filter_mode = filter_mode
|
||||
self.skip_zip = skip_zip
|
||||
self.skip_rar = skip_rar
|
||||
self.use_subfolders = use_subfolders
|
||||
self.custom_folder_name = custom_folder_name
|
||||
self.compress_images = compress_images
|
||||
self.download_thumbnails = download_thumbnails
|
||||
self.service = service
|
||||
self.user_id = user_id
|
||||
self.skip_words_list = skip_words_list if skip_words_list is not None else []
|
||||
self.downloaded_files = downloaded_files if downloaded_files is not None else set()
|
||||
self.downloaded_files_lock = downloaded_files_lock if downloaded_files_lock is not None else threading.Lock()
|
||||
self.downloaded_file_hashes = downloaded_file_hashes if downloaded_file_hashes is not None else set()
|
||||
self.downloaded_file_hashes_lock = downloaded_file_hashes_lock if downloaded_file_hashes_lock is not None else threading.Lock()
|
||||
self.skip_current_file_flag = threading.Event()
|
||||
self.is_downloading_file = False
|
||||
self.current_download_path = None
|
||||
self._add_character_response = None
|
||||
self.prompt_mutex = QMutex()
|
||||
if not self.service or not self.user_id:
|
||||
log_msg = f"❌ Thread Init Error: Missing service ('{self.service}') or user ID ('{self.user_id}') for URL '{api_url}'"
|
||||
print(log_msg)
|
||||
try: self.progress_signal.emit(log_msg)
|
||||
except RuntimeError: pass
|
||||
self._init_failed = True
|
||||
|
||||
|
||||
def run(self):
|
||||
if self._init_failed:
|
||||
self.finished_signal.emit(0, 0, False)
|
||||
return
|
||||
|
||||
unwanted_keywords = {'spicy', 'hd', 'nsfw', '4k', 'preview'}
|
||||
grand_total_downloaded = 0
|
||||
grand_total_skipped = 0
|
||||
cancelled_by_user = False
|
||||
|
||||
try:
|
||||
if self.use_subfolders and self.filter_character and not self.custom_folder_name:
|
||||
if not self._check_and_prompt_filter_character():
|
||||
self.finished_signal.emit(0, 0, False)
|
||||
return
|
||||
worker_signals_adapter = PostProcessorSignals()
|
||||
worker_signals_adapter.progress_signal.connect(self.progress_signal)
|
||||
worker_signals_adapter.file_download_status_signal.connect(self.file_download_status_signal)
|
||||
|
||||
post_worker = PostProcessorWorker(
|
||||
post_data=None,
|
||||
download_root=self.output_dir,
|
||||
known_names=self.known_names,
|
||||
filter_character=self.filter_character,
|
||||
unwanted_keywords=unwanted_keywords,
|
||||
filter_mode=self.filter_mode,
|
||||
skip_zip=self.skip_zip,
|
||||
skip_rar=self.skip_rar,
|
||||
use_subfolders=self.use_subfolders,
|
||||
target_post_id_from_initial_url=self.initial_target_post_id,
|
||||
custom_folder_name=self.custom_folder_name,
|
||||
compress_images=self.compress_images,
|
||||
download_thumbnails=self.download_thumbnails,
|
||||
service=self.service,
|
||||
user_id=self.user_id,
|
||||
api_url_input=self.api_url_input,
|
||||
cancellation_event=self.cancellation_event,
|
||||
signals=worker_signals_adapter,
|
||||
downloaded_files=self.downloaded_files,
|
||||
downloaded_files_lock=self.downloaded_files_lock,
|
||||
downloaded_file_hashes=self.downloaded_file_hashes,
|
||||
downloaded_file_hashes_lock=self.downloaded_file_hashes_lock,
|
||||
skip_words_list=self.skip_words_list,
|
||||
)
|
||||
post_worker.skip_current_file_flag = self.skip_current_file_flag
|
||||
self.progress_signal.emit(" Starting post fetch...")
|
||||
def thread_logger(msg):
|
||||
self.progress_signal.emit(msg)
|
||||
|
||||
post_generator = download_from_api(self.api_url_input, logger=thread_logger)
|
||||
|
||||
for posts_batch in post_generator:
|
||||
if self.isInterruptionRequested():
|
||||
self.progress_signal.emit("⚠️ Download cancelled before processing batch.")
|
||||
cancelled_by_user = True
|
||||
break
|
||||
|
||||
for post in posts_batch:
|
||||
if self.isInterruptionRequested():
|
||||
self.progress_signal.emit("⚠️ Download cancelled during post processing.")
|
||||
cancelled_by_user = True
|
||||
break
|
||||
post_worker.post = post
|
||||
try:
|
||||
downloaded, skipped = post_worker.process()
|
||||
grand_total_downloaded += downloaded
|
||||
grand_total_skipped += skipped
|
||||
except Exception as proc_e:
|
||||
post_id_err = post.get('id', 'N/A') if isinstance(post, dict) else 'N/A'
|
||||
self.progress_signal.emit(f"❌ Error processing post {post_id_err}: {proc_e}")
|
||||
import traceback
|
||||
self.progress_signal.emit(traceback.format_exc(limit=2))
|
||||
grand_total_skipped += 1
|
||||
self.msleep(20)
|
||||
|
||||
if cancelled_by_user:
|
||||
break
|
||||
if not cancelled_by_user:
|
||||
self.progress_signal.emit("✅ Post fetching and processing complete.")
|
||||
|
||||
|
||||
except Exception as e:
|
||||
log_msg = f"\n❌ An critical error occurred in download thread: {e}"
|
||||
self.progress_signal.emit(log_msg)
|
||||
import traceback
|
||||
tb_str = traceback.format_exc()
|
||||
self.progress_signal.emit("--- Traceback ---")
|
||||
for line in tb_str.splitlines():
|
||||
self.progress_signal.emit(" " + line)
|
||||
self.progress_signal.emit("--- End Traceback ---")
|
||||
cancelled_by_user = False
|
||||
|
||||
finally:
|
||||
self.finished_signal.emit(grand_total_downloaded, grand_total_skipped, cancelled_by_user)
|
||||
|
||||
|
||||
def _check_and_prompt_filter_character(self):
|
||||
clean_char_filter = clean_folder_name(self.filter_character.lower())
|
||||
known_names_lower = {name.lower() for name in self.known_names}
|
||||
|
||||
if not clean_char_filter:
|
||||
self.progress_signal.emit(f"❌ Filter name '{self.filter_character}' is invalid. Aborting.")
|
||||
return False
|
||||
|
||||
if self.filter_character.lower() not in known_names_lower:
|
||||
self.progress_signal.emit(f"❓ Filter '{self.filter_character}' not found in known list.")
|
||||
with QMutexLocker(self.prompt_mutex):
|
||||
self._add_character_response = None
|
||||
self.add_character_prompt_signal.emit(self.filter_character)
|
||||
self.progress_signal.emit(" Waiting for user confirmation to add filter name...")
|
||||
while self._add_character_response is None:
|
||||
if self.isInterruptionRequested():
|
||||
self.progress_signal.emit("⚠️ Cancelled while waiting for user input on filter name.")
|
||||
return False
|
||||
self.msleep(200)
|
||||
if self._add_character_response:
|
||||
self.progress_signal.emit(f"✅ User confirmed adding '{self.filter_character}'. Continuing.")
|
||||
if self.filter_character not in self.known_names:
|
||||
self.known_names.append(self.filter_character)
|
||||
return True
|
||||
else:
|
||||
self.progress_signal.emit(f"❌ User declined to add filter '{self.filter_character}'. Aborting download.")
|
||||
return False
|
||||
return True
|
||||
|
||||
|
||||
def skip_file(self):
|
||||
if self.isRunning() and self.is_downloading_file:
|
||||
self.progress_signal.emit("⏭️ Skip requested for current file.")
|
||||
self.skip_current_file_flag.set()
|
||||
elif self.isRunning():
|
||||
self.progress_signal.emit("ℹ️ Skip requested, but no file download active.")
|
||||
|
||||
|
||||
def receive_add_character_result(self, result):
|
||||
with QMutexLocker(self.prompt_mutex):
|
||||
self._add_character_response = result
|
||||
self.progress_signal.emit(f" Received prompt response: {'Yes' if result else 'No'}")
|
||||
|
||||
|
||||
def isInterruptionRequested(self):
|
||||
return super().isInterruptionRequested() or self.cancellation_event.is_set()
|
||||
159
features.md
Normal file
@@ -0,0 +1,159 @@
|
||||
<h1>Kemono Downloader - Comprehensive Feature Guide</h1>
|
||||
<p>This guide provides a detailed overview of all user interface elements, input fields, buttons, popups, and functionalities available in the application.</p>
|
||||
<hr>
|
||||
<h2>1. Core Concepts & Supported Sites</h2>
|
||||
<h3>URL Input (🔗)</h3>
|
||||
<p>This is the primary input field where you specify the content you want to download.</p>
|
||||
<p><strong>Supported URL Types:</strong></p>
|
||||
<ul>
|
||||
<li><strong>Creator URL</strong>: A link to a creator's main page. Downloads all posts from that creator.</li>
|
||||
<li><strong>Post URL</strong>: A direct link to a specific post. Downloads only that single post.</li>
|
||||
<li><strong>Batch Command</strong>: Special keywords to trigger bulk downloading from a text file (see Batch Downloading section).</li>
|
||||
</ul>
|
||||
<p><strong>Supported Websites:</strong></p>
|
||||
<ul>
|
||||
<li>Kemono (<code>kemono.su</code>, <code>kemono.party</code>, etc.)</li>
|
||||
<li>Coomer (<code>coomer.su</code>, <code>coomer.party</code>, etc.)</li>
|
||||
<li>Discord (via Kemono/Coomer API)</li>
|
||||
<li>Bunkr</li>
|
||||
<li>Erome</li>
|
||||
<li>Saint2.su</li>
|
||||
<li>nhentai</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2>2. Main Download Controls & Inputs</h2>
|
||||
<h3>Download Location (📁)</h3>
|
||||
<p>This input defines the main folder where your files will be saved.</p>
|
||||
<ul>
|
||||
<li><strong>Browse Button</strong>: Opens a system dialog to choose a folder.</li>
|
||||
<li><strong>Directory Creation</strong>: If the folder doesn't exist, the app will ask for confirmation to create it.</li>
|
||||
</ul>
|
||||
<h3>Filter by Character(s) & Scope</h3>
|
||||
<p>Used to download content for specific characters or series and organize them into subfolders.</p>
|
||||
<ul>
|
||||
<li><strong>Input Field</strong>: Enter comma-separated names (e.g., <code>Tifa, Aerith</code>). Group aliases using parentheses for folder naming (e.g., <code>(Cloud, Zack)</code>).</li>
|
||||
<li><strong>Scope Button</strong>: Cycles through where to look for name matches:
|
||||
<ul>
|
||||
<li><strong>Filter: Title</strong>: Matches names in the post title.</li>
|
||||
<li><strong>Filter: Files</strong>: Matches names in the filenames.</li>
|
||||
<li><strong>Filter: Both</strong>: Checks the title first, then filenames.</li>
|
||||
<li><strong>Filter: Comments</strong>: Checks filenames first, then post comments.</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<h3>Skip with Words & Scope</h3>
|
||||
<p>Prevents downloading content based on keywords or file size.</p>
|
||||
<ul>
|
||||
<li><strong>Input Field</strong>: Enter comma-separated keywords (e.g., <code>WIP, sketch, preview</code>).</li>
|
||||
<li><strong>Skip by Size</strong>: Enter a number in square brackets to skip any file <strong>smaller than</strong> that size in MB. For example, <code>WIP, [200]</code> skips files with "WIP" in the name AND any file smaller than 200 MB.</li>
|
||||
<li><strong>Scope Button</strong>: Cycles through where to apply keyword filters:
|
||||
<ul>
|
||||
<li><strong>Scope: Posts</strong>: Skips the entire post if the title matches.</li>
|
||||
<li><strong>Scope: Files</strong>: Skips individual files if the filename matches.</li>
|
||||
<li><strong>Scope: Both</strong>: Checks the post title first, then individual files.</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<h3>Remove Words from Name (✂️)</h3>
|
||||
<p>Enter comma-separated words to remove from final filenames (e.g., <code>patreon, [HD]</code>). This helps clean up file naming.</p>
|
||||
<hr>
|
||||
<h2>3. Primary Download Modes (Filter File Section)</h2>
|
||||
<p>This section uses radio buttons to set the main download mode. Only one can be active at a time.</p>
|
||||
<ul>
|
||||
<li><strong>All</strong>: Default mode. Downloads every file and attachment.</li>
|
||||
<li><strong>Images/GIFs</strong>: Downloads only common image formats.</li>
|
||||
<li><strong>Videos</strong>: Downloads only common video formats.</li>
|
||||
<li><strong>Only Archives</strong>: Downloads only <code>.zip</code>, <code>.rar</code>, etc.</li>
|
||||
<li><strong>Only Audio</strong>: Downloads only common audio formats.</li>
|
||||
<li><strong>Only Links</strong>: Extracts external hyperlinks (e.g., Mega, Google Drive) from post descriptions instead of downloading files. <strong>This mode unlocks special features</strong> (see section 6).</li>
|
||||
<li><strong>More</strong>: Opens a dialog to download text-based content.
|
||||
<ul>
|
||||
<li><strong>Scope</strong>: Choose to extract text from the post description or comments.</li>
|
||||
<li><strong>Export Format</strong>: Save as PDF, DOCX, or TXT.</li>
|
||||
<li><strong>Single PDF</strong>: Compile all text from the session into one consolidated PDF file.</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2>4. Advanced Features & Toggles (Checkboxes)</h2>
|
||||
<h3>Folder Organization</h3>
|
||||
<ul>
|
||||
<li><strong>Separate folders by Known.txt</strong>: Automatically organizes downloads into subfolders based on name matches from your <code>Known.txt</code> list or the "Filter by Character(s)" input.</li>
|
||||
<li><strong>Subfolder per post</strong>: Creates a unique folder for each post, named after the post's title. This prevents files from different posts from mixing.</li>
|
||||
<li><strong>Date prefix</strong>: (Only available with "Subfolder per post") Prepends the post date to the folder name (e.g., <code>2025-08-03 My Post Title</code>) for chronological sorting.</li>
|
||||
</ul>
|
||||
<h3>Special Modes</h3>
|
||||
<ul>
|
||||
<li><strong>⭐ Favorite Mode</strong>: Switches the UI to download from your personal favorites list instead of using the URL input.</li>
|
||||
<li><strong>Manga/Comic mode</strong>: Sorts a creator's posts from oldest to newest before downloading, ensuring correct page order. A scope button appears to control the filename style (e.g., using post title, date, or a global number).</li>
|
||||
</ul>
|
||||
<h3>File Handling</h3>
|
||||
<ul>
|
||||
<li><strong>Skip Archives</strong>: Ignores <code>.zip</code> and <code>.rar</code> files during downloads.</li>
|
||||
<li><strong>Download Thumbnail Only</strong>: Saves only the small preview images instead of full-resolution files.</li>
|
||||
<li><strong>Scan Content for Images</strong>: Parses post HTML to find embedded images that may not be listed in the API data.</li>
|
||||
<li><strong>Compress to WebP</strong>: Converts large images (over 1.5 MB) to the space-saving WebP format.</li>
|
||||
<li><strong>Keep Duplicates</strong>: Opens a dialog to control how duplicate files are handled (skip by default, keep all, or keep a specific number of copies).</li>
|
||||
</ul>
|
||||
<h3>General Functionality</h3>
|
||||
<ul>
|
||||
<li><strong>Use cookie</strong>: Enables login-based access. You can paste a cookie string or browse for a <code>cookies.txt</code> file.</li>
|
||||
<li><strong>Use Multithreading</strong>: Enables parallel processing of posts for faster downloads. You can set the number of concurrent worker threads.</li>
|
||||
<li><strong>Show external links in log</strong>: Opens a secondary log panel that displays external links found in post descriptions.</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2>5. Specialized Downloaders & Batch Mode</h2>
|
||||
<h3>Discord Features</h3>
|
||||
<ul>
|
||||
<li>When a Discord URL is entered, a <strong>Scope</strong> button appears.
|
||||
<ul>
|
||||
<li><strong>Scope: Files</strong>: Downloads all files from the channel/server.</li>
|
||||
<li><strong>Scope: Messages</strong>: Saves the entire message history of the channel/server as a formatted PDF.</li>
|
||||
</ul>
|
||||
</li>
|
||||
<li>A <strong>"Save as PDF"</strong> button also appears as a shortcut for the message saving feature.</li>
|
||||
</ul>
|
||||
<h3>Batch Downloading (<code>nhentai</code> & <code>saint2.su</code>)</h3>
|
||||
<p>This feature allows you to download hundreds of galleries or videos from a simple text file.</p>
|
||||
<ol>
|
||||
<li>In the <code>appdata</code> folder, create <code>nhentai.txt</code> or <code>saint2.su.txt</code>.</li>
|
||||
<li>Add one full URL per line to the corresponding file.</li>
|
||||
<li>In the app's URL input, type either <code>nhentai.net</code> or <code>saint2.su</code> and click "Start Download".</li>
|
||||
<li>The app will read the file and process every URL in the queue.</li>
|
||||
</ol>
|
||||
<hr>
|
||||
<h2>6. "Only Links" Mode: Extraction & Direct Download</h2>
|
||||
<p>When you select the <strong>"Only Links"</strong> radio button, the application's behavior changes significantly.</p>
|
||||
<ul>
|
||||
<li><strong>Link Extraction</strong>: Instead of downloading files, the main log panel will fill with all external links found (Mega, Google Drive, Dropbox, etc.).</li>
|
||||
<li><strong>Export Links</strong>: An "Export Links" button appears, allowing you to save the full list of extracted URLs to a <code>.txt</code> file.</li>
|
||||
<li><strong>Direct Cloud Download</strong>: A <strong>"Download"</strong> button appears next to the export button.
|
||||
<ul>
|
||||
<li>Clicking this opens a new dialog listing all supported cloud links (Mega, G-Drive, Dropbox).</li>
|
||||
<li>You can select which files you want to download from this list.</li>
|
||||
<li>The application will then download the selected files directly from the cloud service to your chosen download location.</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
<hr>
|
||||
<h2>7. Session & Process Management</h2>
|
||||
<h3>Main Action Buttons</h3>
|
||||
<ul>
|
||||
<li><strong>Start Download</strong>: Begins the download process. This button's text changes contextually (e.g., "Extract Links", "Check for Updates").</li>
|
||||
<li><strong>Pause / Resume</strong>: Pauses or resumes the ongoing download. When paused, you can safely change some settings.</li>
|
||||
<li><strong>Cancel & Reset UI</strong>: Stops the current download and performs a soft reset of the UI, preserving your URL and download location.</li>
|
||||
</ul>
|
||||
<h3>Restore Interrupted Download</h3>
|
||||
<p>If the application is closed unexpectedly during a download, it will save its progress.</p>
|
||||
<ul>
|
||||
<li>On the next launch, the UI will be pre-filled with the settings from the interrupted session.</li>
|
||||
<li>The <strong>Pause</strong> button will change to <strong>"🔄 Restore Download"</strong>. Clicking it will resume the download exactly where it left off, skipping already processed posts.</li>
|
||||
<li>The <strong>Cancel</strong> button will change to <strong>"🗑️ Discard Session"</strong>, allowing you to clear the saved state and start fresh.</li>
|
||||
</ul>
|
||||
<h3>Other UI Controls</h3>
|
||||
<ul>
|
||||
<li><strong>Error Button</strong>: Shows a count of failed files. Clicking it opens a dialog where you can view, export, or retry the failed downloads.</li>
|
||||
<li><strong>History Button</strong>: Shows a log of recently downloaded files and processed posts.</li>
|
||||
<li><strong>Settings Button</strong>: Opens the settings dialog where you can change the theme, language, and <strong>check for application updates</strong>.</li>
|
||||
<li><strong>Support Button</strong>: Opens a dialog with links to the project's source code and developer support pages.</li>
|
||||
</ul>
|
||||
93
note.md
Normal file
@@ -0,0 +1,93 @@
|
||||
# 🛠️ KemonoDownloader Refactor Notes
|
||||
|
||||
## What's Going On
|
||||
|
||||
This project used to be one giant messy App Script. It worked, but it was hard to maintain or expand. So I cleaned it up and split everything into smaller, more manageable files to make it easier to read, update, and add new stuff later.
|
||||
|
||||
**⚠️ Heads up:** Since I'm still in the middle of refactoring things, some features might be broken or not working right now. The layout is better, but I still need to update some parts of the logic and dependencies.
|
||||
|
||||
---
|
||||
|
||||
## 📁 Folder Layout
|
||||
|
||||
```
|
||||
KemonoDownloader/
|
||||
├── main.py # Where the app starts
|
||||
├── assets/ # Icons and other static files
|
||||
│ └── Kemono.ico
|
||||
├── data/
|
||||
│ └── creators.json
|
||||
├── logs/ # Error logs and other output
|
||||
│ └── uncaught_exceptions.log
|
||||
└── src/ # Main code lives here
|
||||
├── __init__.py
|
||||
├── ui/ # UI-related code
|
||||
│ ├── __init__.py
|
||||
│ ├── main_window.py
|
||||
│ └── dialogs/
|
||||
│ ├── __init__.py
|
||||
│ ├── ConfirmAddAllDialog.py
|
||||
│ ├── CookieHelpDialog.py
|
||||
│ ├── DownloadExtractedLinksDialog.py
|
||||
│ ├── DownloadFinishedDialog.py
|
||||
│ └── ... (more dialogs)
|
||||
├── core/ # The brain of the app
|
||||
│ ├── __init__.py
|
||||
│ ├── manager.py
|
||||
│ ├── workers.py
|
||||
│ └── api_client.py
|
||||
├── services/ # Downloading stuff happens here
|
||||
│ ├── __init__.py
|
||||
│ ├── drive_downloader.py
|
||||
│ └── multipart_downloader.py
|
||||
├── utils/ # Helper functions
|
||||
│ ├── __init__.py
|
||||
│ ├── file_utils.py
|
||||
│ ├── network_utils.py
|
||||
│ └── text_utils.py
|
||||
├── config/ # Constants and settings
|
||||
│ ├── __init__.py
|
||||
│ └── constants.py
|
||||
└── i18n/ # Translations (if needed)
|
||||
├── __init__.py
|
||||
└── translator.py
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## ✅ Why Bother Refactoring?
|
||||
|
||||
- Everything’s now broken into smaller parts, so it’s easier to work with.
|
||||
- Easier to test, fix, and add stuff.
|
||||
- Prepping the project to grow without becoming a mess again.
|
||||
- Separated the UI from the app logic so they don’t get tangled.
|
||||
|
||||
---
|
||||
|
||||
## 🚧 What’s Still Broken
|
||||
|
||||
- Some features don’t work yet or haven’t been tested since the changes.
|
||||
- Still need to:
|
||||
- Reconnect the UI to the updated logic.
|
||||
- Move over some of the old script code into proper modules.
|
||||
- Make sure settings and cookies work properly in the new setup.
|
||||
|
||||
---
|
||||
|
||||
## 📌 To-Do List
|
||||
|
||||
- Test all the dialogs and UI stuff.
|
||||
- Make sure the download services and API calls are working.
|
||||
- Reconnect the UI with the new logic in `core/manager.py`.
|
||||
- Add more logging and maybe some unit tests too.
|
||||
|
||||
---
|
||||
|
||||
## 🐞 Found a Bug?
|
||||
|
||||
If something's busted:
|
||||
|
||||
- Feel free to open an issue if you're using this.
|
||||
- Or just message me. Feedback helps a lot while I’m still figuring things out.
|
||||
|
||||
Thanks for checking it out! Still a work in progress, but getting there.
|
||||
229
readme.md
@@ -1,108 +1,151 @@
|
||||
# Kemono Downloader
|
||||
<h1 align="center">Kemono Downloader</h1>
|
||||
|
||||
A simple, multi-platform GUI application built with PyQt5 to download content from Kemono.su or Coomer.party creator pages or specific posts, with options for filtering and organizing downloads.
|
||||
<p>A powerful, feature-rich GUI application for downloading content from a wide array of sites, including <strong>Kemono</strong>, <strong>Coomer</strong>, <strong>Bunkr</strong>, <strong>Erome</strong>, <strong>Saint2.su</strong>, and <strong>nhentai</strong>.</p>
|
||||
<p>Built with PyQt5, this tool is designed for users who want deep filtering capabilities, customizable folder structures, efficient downloads, and intelligent automation — all within a modern and user-friendly graphical interface.</p>
|
||||
|
||||
## Features
|
||||
<div align="center">
|
||||
<a href="features.md"><img src="https://img.shields.io/badge/📚%20Full%20Feature%20List-FFD700?style=for-the-badge&logoColor=black&color=FFD700" alt="Full Feature List"></a>
|
||||
<a href="LICENSE"><img src="https://img.shields.io/badge/📝%20License-90EE90?style=for-the-badge&logoColor=black&color=90EE90" alt="License"></a>
|
||||
</div>
|
||||
|
||||
* **GUI Interface:** Easy-to-use graphical interface.
|
||||
* **URL Support:** Download from a creator's main page (paginated) or a specific post URL from Kemono or Coomer sites.
|
||||
* **Download Location:** Select your desired output directory.
|
||||
* **Subfolder Organization:**
|
||||
* Organize downloads into folders based on character/artist names found in post titles (using your "Known Names" list).
|
||||
* Option to create a custom folder for single post downloads.
|
||||
* Automatic folder naming based on post title if no known names are matched.
|
||||
* **Known Names List:** Manage a persistent list of known names (artists, characters, series) for improved folder organization and filtering.
|
||||
* **Content Filtering:**
|
||||
* **Character/Name Filter:** Only download posts where the specified known name is found in the title.
|
||||
* **File Type Filter:** Download All Files, Images/GIFs Only, or Videos Only.
|
||||
* **Skip Words Filter:** Specify a list of comma-separated words to skip posts or files if these words appear in their titles or filenames.
|
||||
* **Archive Skipping:** Options to skip `.zip` and `.rar` files (enabled by default).
|
||||
* **Image Compression:** Optionally compress large images (larger than 1.5MB) to WebP format to save space (requires Pillow library).
|
||||
* **Thumbnail Downloading:** Option to download thumbnails. (Note: The previous local API method for enhanced thumbnail fetching has been removed. Thumbnail availability might depend on the source.)
|
||||
* **Duplicate Prevention:**
|
||||
* Avoids re-downloading files with the same content hash.
|
||||
* Checks for existing filenames in the target directory.
|
||||
* **Multithreading:** Utilizes multithreading for faster downloads from full creator pages (single posts are processed in a single thread).
|
||||
* **Progress Log:** View detailed download progress, status messages, and errors.
|
||||
* **Dark Theme:** Built-in dark theme for comfortable use.
|
||||
* **Download Management:**
|
||||
* Ability to cancel an ongoing download process.
|
||||
* Option to skip the specific file currently being downloaded (in single-thread mode).
|
||||
* **Persistent Configuration:** Saves the "Known Names" list to a local file.
|
||||
<h2>Core Capabilities Overview</h2>
|
||||
<h3>High-Performance & Resilient Downloading</h3>
|
||||
<ul>
|
||||
<li><strong>Multi-threading:</strong> Processes multiple posts simultaneously to greatly accelerate downloads from large creator profiles.</li>
|
||||
<li><strong>Multi-part Downloading:</strong> Splits large files into chunks and downloads them in parallel to maximize speed.</li>
|
||||
<li><strong>Session Management:</strong> Supports pausing, resuming, and <strong>restoring downloads</strong> after crashes or interruptions, so you never lose your progress.</li>
|
||||
</ul>
|
||||
<h3>Expanded Site Support</h3>
|
||||
<ul>
|
||||
<li><strong>Direct Downloading:</strong> Full support for Kemono, Coomer, Bunkr, Erome, Saint2.su, and nhentai.</li>
|
||||
<li><strong>Batch Mode:</strong> Download hundreds of URLs at once from <code>nhentai.txt</code> or <code>saint2.su.txt</code> files.</li>
|
||||
<li><strong>Discord Support:</strong> Download files or save entire channel histories as PDFs directly through the API.</li>
|
||||
</ul>
|
||||
<h3>Advanced Filtering & Content Control</h3>
|
||||
<ul>
|
||||
<li><strong>Content Type Filtering:</strong> Select whether to download all files or limit to images, videos, audio, or archives only.</li>
|
||||
<li><strong>Keyword Skipping:</strong> Automatically skips posts or files containing certain keywords (e.g., "WIP", "sketch").</li>
|
||||
<li><strong>Skip by Size:</strong> Avoid small files by setting a minimum size threshold in MB (e.g., <code>[200]</code>).</li>
|
||||
<li><strong>Character Filtering:</strong> Restricts downloads to posts that match specific character or series names, with scope controls for title, filename, or comments.</li>
|
||||
</ul>
|
||||
<h3>Intelligent File Organization</h3>
|
||||
<ul>
|
||||
<li><strong>Automated Subfolders:</strong> Automatically organizes downloaded files into subdirectories based on character names or per post.</li>
|
||||
<li><strong>Advanced File Renaming:</strong> Flexible renaming options, especially in Manga Mode, including by post title, date, sequential numbering, or post ID.</li>
|
||||
<li><strong>Filename Cleaning:</strong> Automatically removes unwanted text from filenames.</li>
|
||||
</ul>
|
||||
<h3>Specialized Modes</h3>
|
||||
<ul>
|
||||
<li><strong>Renaming Mode:</strong> Sorts posts chronologically before downloading to ensure pages appear in the correct sequence.</li>
|
||||
<li><strong>Favorite Mode:</strong> Connects to your account and downloads from your favorites list (artists or posts).</li>
|
||||
<li><strong>Link Extraction Mode:</strong> Extracts external links (Mega, Google Drive) from posts for export or <strong>direct in-app downloading</strong>.</li>
|
||||
<li><strong>Text Extraction Mode:</strong> Saves post descriptions or comment sections as <code>PDF</code>, <code>DOCX</code>, or <code>TXT</code> files.</li>
|
||||
</ul>
|
||||
<h3>Utility & Advanced Features</h3>
|
||||
<ul>
|
||||
<li><strong>In-App Updater:</strong> Check for new versions directly from the settings menu.</li>
|
||||
<li><strong>Cookie Support:</strong> Enables access to subscriber-only content via browser session cookies.</li>
|
||||
<li><strong>Duplicate Detection:</strong> Prevents saving duplicate files using content-based comparison, with configurable limits.</li>
|
||||
<li><strong>Image Compression:</strong> Automatically converts large images to <code>.webp</code> to reduce disk usage.</li>
|
||||
<li><strong>Creator Management:</strong> Built-in creator browser and update checker for downloading only new posts from saved profiles.</li>
|
||||
<li><strong>Error Handling:</strong> Tracks failed downloads and provides a retry dialog with options to export or redownload missing files.</li>
|
||||
</ul>
|
||||
<section aria-labelledby="supported-sites">
|
||||
<h2 id="supported-sites">Supported Sites</h2>
|
||||
|
||||
## Prerequisites
|
||||
<h3>Main Platforms</h3>
|
||||
<p>
|
||||
The downloader is primarily built to archive content from the platforms below.
|
||||
</p>
|
||||
<ul>
|
||||
<li>
|
||||
<strong>Kemono & Coomer</strong> — Core supported sites; download posts and files from creators on services such as
|
||||
<em>Patreon, Fanbox, OnlyFans, Fansly</em>, and similar platforms.
|
||||
</li>
|
||||
<li>
|
||||
<strong>Discord</strong> — Two modes for a channel URL:
|
||||
<ul>
|
||||
<li>Download all files and attachments.</li>
|
||||
<li>Save the entire message history as a formatted PDF.</li>
|
||||
</ul>
|
||||
</li>
|
||||
</ul>
|
||||
|
||||
* Python 3.6 or higher
|
||||
* `pip` package installer
|
||||
<hr>
|
||||
|
||||
## Installation
|
||||
<h3>Specialized Site Support</h3>
|
||||
<p>Paste a link from any of the following and the app will handle the download automatically:</p>
|
||||
|
||||
1. Clone or download this repository/script to your local machine.
|
||||
2. Navigate to the script's directory in your terminal or command prompt.
|
||||
3. Install the required Python libraries:
|
||||
```bash
|
||||
pip install PyQt5 requests Pillow
|
||||
```
|
||||
*(Pillow is required for image compression and potentially for basic image handling.)*
|
||||
<details>
|
||||
<summary>Supported specialized sites (click to expand)</summary>
|
||||
<ul>
|
||||
<li>AllPornComic</li>
|
||||
<li>Bunkr</li>
|
||||
<li>Erome</li>
|
||||
<li>Fap-Nation</li>
|
||||
<li>Hentai2Read</li>
|
||||
<li>nhentai</li>
|
||||
<li>Pixeldrain</li>
|
||||
<li>Saint2</li>
|
||||
<li>Toonily</li>
|
||||
</ul>
|
||||
</details>
|
||||
|
||||
## How to Run
|
||||
<hr>
|
||||
|
||||
1. Make sure you have followed the installation steps.
|
||||
2. Open your terminal or command prompt and navigate to the script's directory.
|
||||
3. Run the script using Python:
|
||||
```bash
|
||||
python main.py
|
||||
```
|
||||
<h3>Direct File Hosting</h3>
|
||||
<p>
|
||||
You may paste direct links from these file hosting services to download contents without using the
|
||||
<code>"Only Links"</code> mode:
|
||||
</p>
|
||||
<ul>
|
||||
<li>Dropbox</li>
|
||||
<li>Gofile</li>
|
||||
<li>Google Drive</li>
|
||||
<li>Mega</li>
|
||||
</ul>
|
||||
</section>
|
||||
|
||||
## How to Use
|
||||
<h2>💻 Installation</h2>
|
||||
<h3>Requirements</h3>
|
||||
<ul>
|
||||
<li>Python 3.6 or higher</li>
|
||||
<li>pip (Python package installer)</li>
|
||||
</ul>
|
||||
<h3>Install Dependencies</h3>
|
||||
<pre><code>Required - pip install PyQt5 requests packaging cloudscraper bs4 pycryptodome
|
||||
</code></pre>
|
||||
|
||||
1. **URL Input:** Enter the URL of the Kemono/Coomer creator page (e.g., `https://kemono.su/patreon/user/12345`) or a specific post (e.g., `https://kemono.su/patreon/user/12345/post/67890`) into the "Kemono Creator/Post URL" field.
|
||||
2. **Download Location:** Use the "Browse" button to select the root directory where you want to save the downloaded content.
|
||||
3. **Custom Folder Name (Single Post Only):** If downloading a single post and "Separate Folders" is enabled, you can specify a custom folder name for that post's content.
|
||||
4. **Filter by Show/Character Name (Optional):** If "Separate Folders" is enabled, enter a name from your "Known Names" list. Only posts with titles matching this name will be downloaded into a folder named accordingly. If empty, the script will try to match any known name or derive a folder name from the post title.
|
||||
5. **Skip Posts/Files with Words:** Enter comma-separated words (e.g., `WIP, sketch, preview`). Posts or files containing these words in their title/filename will be skipped.
|
||||
6. **File Type Filter:**
|
||||
* **All:** Downloads all files.
|
||||
* **Images/GIFs:** Downloads common image formats and GIFs.
|
||||
* **Videos:** Downloads common video formats.
|
||||
7. **Options (Checkboxes):**
|
||||
* **Separate Folders by Name/Title:** Enables creation of subfolders based on known names or post titles. Controls visibility of "Filter by Show/Character Name" and "Custom Folder Name". (Default: On)
|
||||
* **Download Thumbnails Only:** Attempts to download only thumbnails for posts. (Default: Off)
|
||||
* **Skip .zip / Skip .rar:** Prevents downloading of these archive types. (Default: On)
|
||||
* **Compress Large Images (to WebP):** Compresses images larger than 1.5MB. (Default: Off)
|
||||
* **Use Multithreading:** Enables faster downloads for full creator pages. (Default: On)
|
||||
8. **Known Names List:**
|
||||
* The list on the left ("Known Shows/Characters") displays names used for folder organization and filtering. This list is saved in `Known.txt`.
|
||||
* Use the input field below the list and the "➕ Add" button to add new names.
|
||||
* Select names and click "🗑️ Delete Selected" to remove them.
|
||||
* A search bar above the list allows you to filter the displayed names.
|
||||
9. **Start Download:** Click "⬇️ Start Download" to begin.
|
||||
10. **Cancel / Skip:**
|
||||
* **❌ Cancel:** Stops the entire download process.
|
||||
* **⏭️ Skip Current File:** (Only in single-thread mode during file download) Skips the currently downloading file and moves to the next.
|
||||
11. **Progress Log:** The area on the right shows detailed logs of the download process, including fetched posts, saved files, skips, and errors.
|
||||
<pre><code>Optional - pip install gdown pillow fpdf python-docx
|
||||
</code></pre>
|
||||
|
||||
## Building an Executable (Optional)
|
||||
<h3>Running the Application</h3>
|
||||
<p>Navigate to the application's directory in your terminal and run:</p>
|
||||
<pre><code>python main.py
|
||||
</code></pre>
|
||||
<h2>Contribution</h2>
|
||||
<p>Feel free to fork this repo and submit pull requests for bug fixes, new features, or UI improvements!</p>
|
||||
<h2>License</h2>
|
||||
<p>This project is under the MIT Licence</p>
|
||||
### Included Third-Party Tools
|
||||
|
||||
You can create a standalone `.exe` file for Windows using `PyInstaller`.
|
||||
This project includes a pre-compiled binary of `yt-dlp` for handling certain video downloads. `yt-dlp` is in the public domain. For more information or to get the latest version, please visit the official [yt-dlp GitHub repository](https://github.com/yt-dlp/yt-dlp).
|
||||
|
||||
1. Install PyInstaller: `pip install pyinstaller`
|
||||
2. Obtain an icon file (`.ico`). Place it in the same directory as `main.py`.
|
||||
3. Open your terminal in the script's directory and run:
|
||||
```bash
|
||||
pyinstaller --name "YourAppName" --onefile --windowed --icon="your_icon.ico" main.py
|
||||
```
|
||||
Replace `"YourAppName"` with your desired application name and `"your_icon.ico"` with the actual name of your icon file.
|
||||
4. The executable will be found in the `./dist` folder.
|
||||
<h2>Star History</h2>
|
||||
<table align="center" style="border-collapse: collapse; border: none; margin-left: auto; margin-right: auto;">
|
||||
<tbody>
|
||||
<tr>
|
||||
<td align="center" valign="middle" style="padding: 10px; border: none;">
|
||||
<a href="https://www.star-history.com/#Yuvi9587/Kemono-Downloader&Date">
|
||||
<img src="https://api.star-history.com/svg?repos=Yuvi9587/Kemono-Downloader&type=Date" alt="Star History Chart" width="650">
|
||||
</a>
|
||||
</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Configuration
|
||||
|
||||
The application saves your list of known names (characters, artists, series, etc.) to a file named `Known.txt` in the same directory as the script (`main.py`). Each name is stored on a new line. You can manually edit this file if needed.
|
||||
|
||||
## Dark Theme
|
||||
|
||||
The application uses a built-in dark theme for the user interface.
|
||||
|
||||
## Contributing
|
||||
|
||||
Contributions are welcome! If you find a bug or have a feature request, please open an issue on the GitHub repository (if applicable). If you want to contribute code, please fork the repository and create a pull request.
|
||||
<p align="center">
|
||||
<a href="https://buymeacoffee.com/yuvi9587">
|
||||
<img src="https://img.shields.io/badge/🍺%20Buy%20Me%20a%20Coffee-FFCCCB?style=for-the-badge&logoColor=black&color=FFDD00" alt="Buy Me a Coffee">
|
||||
</a>
|
||||
</p>
|
||||
|
||||
19
security.md
Normal file
@@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We are committed to maintaining and improving the Kemono Downloader. For the best experience and access to the latest security updates and features, we strongly recommend using the most recent versions of the application.
|
||||
|
||||
| Version | Supported Status |
|
||||
| -------------- | ------------------------------------ |
|
||||
| >= 7.0.0 | :white_check_mark: Actively Supported |
|
||||
| 6.0.0 - 6.x.x | :warning: Supported (Limited Features) |
|
||||
| < 5.0.0 | :x: End of Life (EOL) |
|
||||
|
||||
Users are encouraged to update to **v5.0.0 or newer** versions.
|
||||
|
||||
## Active Maintenance
|
||||
|
||||
The Kemono Downloader is actively maintained. We strive to address bugs, implement new features in a timely manner. If you discover any security vulnerabilities, please report them(details on reporting to be added if a formal process is established).
|
||||
|
||||
We appreciate your help in keeping Kemono Downloader secure!
|
||||
1
src/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
1
src/config/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
130
src/config/constants.py
Normal file
@@ -0,0 +1,130 @@
|
||||
CONFIG_ORGANIZATION_NAME = "KemonoDownloader"
|
||||
CONFIG_APP_NAME_MAIN = "ApplicationSettings"
|
||||
CONFIG_APP_NAME_TOUR = "ApplicationTour"
|
||||
|
||||
# --- Filename and Folder Naming Styles ---
|
||||
STYLE_POST_TITLE = "post_title"
|
||||
STYLE_ORIGINAL_NAME = "original_name"
|
||||
STYLE_DATE_BASED = "date_based"
|
||||
STYLE_DATE_POST_TITLE = "date_post_title"
|
||||
STYLE_POST_TITLE_GLOBAL_NUMBERING = "post_title_global_numbering"
|
||||
STYLE_POST_ID = "post_id"
|
||||
MANGA_DATE_PREFIX_DEFAULT = ""
|
||||
|
||||
# --- Download Scopes ---
|
||||
SKIP_SCOPE_FILES = "files"
|
||||
SKIP_SCOPE_POSTS = "posts"
|
||||
SKIP_SCOPE_BOTH = "both"
|
||||
|
||||
CHAR_SCOPE_TITLE = "title"
|
||||
CHAR_SCOPE_FILES = "files"
|
||||
CHAR_SCOPE_BOTH = "both"
|
||||
CHAR_SCOPE_COMMENTS = "comments"
|
||||
|
||||
FAVORITE_SCOPE_SELECTED_LOCATION = "selected_location"
|
||||
FAVORITE_SCOPE_ARTIST_FOLDERS = "artist_folders"
|
||||
|
||||
# --- Download Status Constants ---
|
||||
FILE_DOWNLOAD_STATUS_SUCCESS = "success"
|
||||
FILE_DOWNLOAD_STATUS_SKIPPED = "skipped"
|
||||
FILE_DOWNLOAD_STATUS_FAILED_RETRYABLE_LATER = "failed_retry_later"
|
||||
FILE_DOWNLOAD_STATUS_FAILED_PERMANENTLY_THIS_SESSION = "failed_permanent_session"
|
||||
|
||||
# --- Threading and Performance ---
|
||||
MAX_THREADS = 200
|
||||
RECOMMENDED_MAX_THREADS = 50
|
||||
SOFT_WARNING_THREAD_THRESHOLD = 40
|
||||
MAX_FILE_THREADS_PER_POST_OR_WORKER = 10
|
||||
POST_WORKER_BATCH_THRESHOLD = 30
|
||||
POST_WORKER_NUM_BATCHES = 4
|
||||
POST_WORKER_BATCH_DELAY_SECONDS = 2.5
|
||||
MAX_POST_WORKERS_WHEN_COMMENT_FILTERING = 3
|
||||
|
||||
# --- Multipart Download Settings ---
|
||||
MIN_SIZE_FOR_MULTIPART_DOWNLOAD = 10 * 1024 * 1024 # 10 MB
|
||||
MAX_PARTS_FOR_MULTIPART_DOWNLOAD = 15
|
||||
|
||||
# --- UI and Settings Keys (for QSettings) ---
|
||||
TOUR_SHOWN_KEY = "neverShowTourAgainV19"
|
||||
MANGA_FILENAME_STYLE_KEY = "mangaFilenameStyleV1"
|
||||
MANGA_CUSTOM_FORMAT_KEY = "mangaCustomFormatV1"
|
||||
MANGA_CUSTOM_DATE_FORMAT_KEY = "mangaCustomDateFormatV1"
|
||||
SKIP_WORDS_SCOPE_KEY = "skipWordsScopeV1"
|
||||
ALLOW_MULTIPART_DOWNLOAD_KEY = "allowMultipartDownloadV1"
|
||||
USE_COOKIE_KEY = "useCookieV1"
|
||||
COOKIE_TEXT_KEY = "cookieTextV1"
|
||||
CHAR_FILTER_SCOPE_KEY = "charFilterScopeV1"
|
||||
THEME_KEY = "currentThemeV2"
|
||||
SCAN_CONTENT_IMAGES_KEY = "scanContentForImagesV1"
|
||||
LANGUAGE_KEY = "currentLanguageV1"
|
||||
DOWNLOAD_LOCATION_KEY = "downloadLocationV1"
|
||||
RESOLUTION_KEY = "window_resolution"
|
||||
UI_SCALE_KEY = "ui_scale_factor"
|
||||
SAVE_CREATOR_JSON_KEY = "saveCreatorJsonProfile"
|
||||
DATE_PREFIX_FORMAT_KEY = "datePrefixFormatV1"
|
||||
AUTO_RETRY_ON_FINISH_KEY = "auto_retry_on_finish"
|
||||
FETCH_FIRST_KEY = "fetchAllPostsFirst"
|
||||
DISCORD_TOKEN_KEY = "discord/token"
|
||||
|
||||
POST_DOWNLOAD_ACTION_KEY = "postDownloadAction"
|
||||
|
||||
# --- UI Constants and Identifiers ---
|
||||
HTML_PREFIX = "<!HTML!>"
|
||||
LOG_DISPLAY_LINKS = "links"
|
||||
LOG_DISPLAY_DOWNLOAD_PROGRESS = "download_progress"
|
||||
|
||||
# --- Dialog Return Codes ---
|
||||
CONFIRM_ADD_ALL_ACCEPTED = 1
|
||||
CONFIRM_ADD_ALL_SKIP_ADDING = 2
|
||||
CONFIRM_ADD_ALL_CANCEL_DOWNLOAD = 3
|
||||
|
||||
# --- File Type Extensions ---
|
||||
IMAGE_EXTENSIONS = {
|
||||
'.jpg', '.jpeg', '.jpe', '.png', '.gif', '.bmp', '.tiff', '.tif', '.webp',
|
||||
'.heic', '.heif', '.svg', '.ico', '.jfif', '.pjpeg', '.pjp', '.avif'
|
||||
}
|
||||
VIDEO_EXTENSIONS = {
|
||||
'.mp4', '.mov', '.mkv', '.webm', '.avi', '.wmv', '.flv', '.mpeg',
|
||||
'.mpg', '.m4v', '.3gp', '.ogv', '.ts', '.vob'
|
||||
}
|
||||
ARCHIVE_EXTENSIONS = {
|
||||
'.zip', '.rar', '.7z', '.tar', '.gz', '.bz2', '.bin'
|
||||
}
|
||||
AUDIO_EXTENSIONS = {
|
||||
'.mp3', '.wav', '.aac', '.flac', '.ogg', '.wma', '.m4a', '.opus',
|
||||
'.aiff', '.ape', '.mid', '.midi'
|
||||
}
|
||||
|
||||
# --- Text Processing Constants ---
|
||||
MAX_FILENAME_COMPONENT_LENGTH = 150
|
||||
|
||||
# Words to ignore when creating folder names from titles
|
||||
FOLDER_NAME_STOP_WORDS = {
|
||||
"a", "alone", "am", "an", "and", "at", "be", "by", "com",
|
||||
"for", "he", "her", "his", "i", "im", "in", "is", "it", "its",
|
||||
"me", "my", "net", "not", "of", "on", "or", "org", "our",
|
||||
"s", "she", "so", "the", "their", "they", "this",
|
||||
"to", "ve", "was", "we", "were", "with", "www", "you", "your", "nsfw", "sfw",
|
||||
# add more according to need
|
||||
}
|
||||
|
||||
# Additional words to ignore specifically for creator-level downloads
|
||||
CREATOR_DOWNLOAD_DEFAULT_FOLDER_IGNORE_WORDS = {
|
||||
"poll", "cover", "fan-art", "fanart", "requests", "request", "holiday",
|
||||
"batch", "open", "closed", "winner", "loser", "wip",
|
||||
"update", "news", "discussion", "question", "stream", "video", "sketchbook",
|
||||
# Months and days
|
||||
"jan", "january", "feb", "february", "mar", "march", "apr", "april",
|
||||
"may", "jun", "june", "jul", "july", "aug", "august", "sep", "september",
|
||||
"oct", "october", "nov", "november", "dec", "december",
|
||||
"mon", "monday", "tue", "tuesday", "wed", "wednesday", "thu", "thursday",
|
||||
"fri", "friday", "sat", "saturday", "sun", "sunday", "Pack", "tier", "spoiler",
|
||||
|
||||
|
||||
# add more according to need
|
||||
}
|
||||
|
||||
# --- Duplicate Handling Modes ---
|
||||
DUPLICATE_HANDLING_HASH = "hash"
|
||||
DUPLICATE_HANDLING_KEEP_ALL = "keep_all"
|
||||
STYLE_CUSTOM = "custom"
|
||||
207
src/core/Hentai2read_client.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# src/core/Hentai2read_client.py
|
||||
|
||||
import re
|
||||
import os
|
||||
import time
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urljoin
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
import queue
|
||||
|
||||
def run_hentai2read_download(start_url, output_dir, progress_callback, overall_progress_callback, check_pause_func):
|
||||
"""
|
||||
Orchestrates the download process using a producer-consumer model.
|
||||
The main thread scrapes image URLs and puts them in a queue.
|
||||
A pool of worker threads consumes from the queue to download images concurrently.
|
||||
"""
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
progress_callback(" [Hentai2Read] Scraping series page for all metadata...")
|
||||
top_level_folder_name, chapters_to_process = _get_series_metadata(start_url, progress_callback, scraper)
|
||||
|
||||
if not chapters_to_process:
|
||||
progress_callback("❌ No chapters found to download. Aborting.")
|
||||
return 0, 0
|
||||
|
||||
total_chapters = len(chapters_to_process)
|
||||
overall_progress_callback(total_chapters, 0)
|
||||
|
||||
total_downloaded_count = 0
|
||||
total_skipped_count = 0
|
||||
|
||||
for idx, chapter in enumerate(chapters_to_process):
|
||||
if check_pause_func(): break
|
||||
|
||||
progress_callback(f"\n-- Processing and Downloading Chapter {idx + 1}/{total_chapters}: '{chapter['title']}' --")
|
||||
|
||||
series_folder = re.sub(r'[\\/*?:"<>|]', "", top_level_folder_name).strip()
|
||||
chapter_folder = re.sub(r'[\\/*?:"<>|]', "", chapter['title']).strip()
|
||||
final_save_path = os.path.join(output_dir, series_folder, chapter_folder)
|
||||
os.makedirs(final_save_path, exist_ok=True)
|
||||
|
||||
# This function now scrapes and downloads simultaneously
|
||||
dl_count, skip_count = _process_and_download_chapter(
|
||||
chapter_url=chapter['url'],
|
||||
save_path=final_save_path,
|
||||
scraper=scraper,
|
||||
progress_callback=progress_callback,
|
||||
check_pause_func=check_pause_func
|
||||
)
|
||||
|
||||
total_downloaded_count += dl_count
|
||||
total_skipped_count += skip_count
|
||||
|
||||
overall_progress_callback(total_chapters, idx + 1)
|
||||
if check_pause_func(): break
|
||||
|
||||
return total_downloaded_count, total_skipped_count
|
||||
|
||||
except Exception as e:
|
||||
progress_callback(f"❌ A critical error occurred in the Hentai2Read client: {e}")
|
||||
return 0, 0
|
||||
|
||||
def _get_series_metadata(start_url, progress_callback, scraper):
|
||||
"""
|
||||
Scrapes the main series page to get the Artist Name, Series Title, and chapter list.
|
||||
"""
|
||||
try:
|
||||
response = scraper.get(start_url, timeout=30)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
series_title = "Unknown Series"
|
||||
artist_name = None
|
||||
metadata_list = soup.select_one("ul.list.list-simple-mini")
|
||||
|
||||
if metadata_list:
|
||||
first_li = metadata_list.find('li', recursive=False)
|
||||
if first_li and not first_li.find('a'):
|
||||
series_title = first_li.get_text(strip=True)
|
||||
|
||||
for b_tag in metadata_list.find_all('b'):
|
||||
label = b_tag.get_text(strip=True)
|
||||
if label in ("Artist", "Author"):
|
||||
a_tag = b_tag.find_next_sibling('a')
|
||||
if a_tag:
|
||||
artist_name = a_tag.get_text(strip=True)
|
||||
if label == "Artist":
|
||||
break
|
||||
|
||||
top_level_folder_name = artist_name if artist_name else series_title
|
||||
|
||||
chapter_links = soup.select("div.media a.pull-left.font-w600")
|
||||
if not chapter_links:
|
||||
chapters_to_process = [{'url': start_url, 'title': series_title}]
|
||||
else:
|
||||
chapters_to_process = [
|
||||
{'url': urljoin(start_url, link['href']), 'title': " ".join(link.stripped_strings)}
|
||||
for link in chapter_links
|
||||
]
|
||||
chapters_to_process.reverse()
|
||||
|
||||
progress_callback(f" [Hentai2Read] ✅ Found Artist/Series: '{top_level_folder_name}'")
|
||||
progress_callback(f" [Hentai2Read] ✅ Found {len(chapters_to_process)} chapters to process.")
|
||||
|
||||
return top_level_folder_name, chapters_to_process
|
||||
|
||||
except Exception as e:
|
||||
progress_callback(f" [Hentai2Read] ❌ Error getting series metadata: {e}")
|
||||
return "Unknown Series", []
|
||||
|
||||
### NEW: This function contains the pipeline logic ###
|
||||
def _process_and_download_chapter(chapter_url, save_path, scraper, progress_callback, check_pause_func):
|
||||
"""
|
||||
Uses a producer-consumer pattern to download a chapter.
|
||||
The main thread (producer) scrapes URLs one by one.
|
||||
Worker threads (consumers) download the URLs as they are found.
|
||||
"""
|
||||
task_queue = queue.Queue()
|
||||
num_download_threads = 8
|
||||
|
||||
# These will be updated by the worker threads
|
||||
download_stats = {'downloaded': 0, 'skipped': 0}
|
||||
|
||||
def downloader_worker():
|
||||
"""The function that each download thread will run."""
|
||||
# Create a unique session for each thread to avoid conflicts
|
||||
worker_scraper = cloudscraper.create_scraper()
|
||||
while True:
|
||||
try:
|
||||
# Get a task from the queue
|
||||
task = task_queue.get()
|
||||
# The sentinel value to signal the end
|
||||
if task is None:
|
||||
break
|
||||
|
||||
filepath, img_url = task
|
||||
if os.path.exists(filepath):
|
||||
progress_callback(f" -> Skip: '{os.path.basename(filepath)}'")
|
||||
download_stats['skipped'] += 1
|
||||
else:
|
||||
progress_callback(f" Downloading: '{os.path.basename(filepath)}'...")
|
||||
response = worker_scraper.get(img_url, stream=True, timeout=60, headers={'Referer': chapter_url})
|
||||
response.raise_for_status()
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
download_stats['downloaded'] += 1
|
||||
except Exception as e:
|
||||
progress_callback(f" ❌ Download failed for task. Error: {e}")
|
||||
download_stats['skipped'] += 1
|
||||
finally:
|
||||
task_queue.task_done()
|
||||
|
||||
# --- Start the downloader threads ---
|
||||
executor = ThreadPoolExecutor(max_workers=num_download_threads, thread_name_prefix='H2R_Downloader')
|
||||
for _ in range(num_download_threads):
|
||||
executor.submit(downloader_worker)
|
||||
|
||||
# --- Main thread acts as the scraper (producer) ---
|
||||
page_number = 1
|
||||
while True:
|
||||
if check_pause_func(): break
|
||||
if page_number > 300: # Safety break
|
||||
progress_callback(" [Hentai2Read] ⚠️ Safety break: Reached 300 pages.")
|
||||
break
|
||||
|
||||
page_url_to_check = f"{chapter_url}{page_number}/"
|
||||
try:
|
||||
response = scraper.get(page_url_to_check, timeout=30)
|
||||
if response.history or response.status_code != 200:
|
||||
progress_callback(f" [Hentai2Read] End of chapter detected on page {page_number}.")
|
||||
break
|
||||
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
img_tag = soup.select_one("img#arf-reader")
|
||||
img_src = img_tag.get("src") if img_tag else None
|
||||
|
||||
if not img_tag or img_src == "https://static.hentai.direct/hentai":
|
||||
progress_callback(f" [Hentai2Read] End of chapter detected (Placeholder image on page {page_number}).")
|
||||
break
|
||||
|
||||
normalized_img_src = urljoin(response.url, img_src)
|
||||
ext = os.path.splitext(normalized_img_src.split('/')[-1])[-1] or ".jpg"
|
||||
filename = f"{page_number:03d}{ext}"
|
||||
filepath = os.path.join(save_path, filename)
|
||||
|
||||
# Put the download task into the queue for a worker to pick up
|
||||
task_queue.put((filepath, normalized_img_src))
|
||||
|
||||
page_number += 1
|
||||
time.sleep(0.1) # Small delay between scraping pages
|
||||
except Exception as e:
|
||||
progress_callback(f" [Hentai2Read] ❌ Error while scraping page {page_number}: {e}")
|
||||
break
|
||||
|
||||
# --- Shutdown sequence ---
|
||||
# Tell all worker threads to exit by sending the sentinel value
|
||||
for _ in range(num_download_threads):
|
||||
task_queue.put(None)
|
||||
|
||||
# Wait for all download tasks to be completed
|
||||
executor.shutdown(wait=True)
|
||||
|
||||
progress_callback(f" Found and processed {page_number - 1} images for this chapter.")
|
||||
return download_stats['downloaded'], download_stats['skipped']
|
||||
1
src/core/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
121
src/core/allcomic_client.py
Normal file
@@ -0,0 +1,121 @@
|
||||
import requests
|
||||
import re
|
||||
from bs4 import BeautifulSoup
|
||||
import time
|
||||
import random
|
||||
from urllib.parse import urlparse
|
||||
|
||||
def get_chapter_list(scraper, series_url, logger_func):
|
||||
"""
|
||||
Checks if a URL is a series page and returns a list of all chapter URLs if it is.
|
||||
Relies on a passed-in scraper session for connection.
|
||||
"""
|
||||
logger_func(f" [AllComic] Checking for chapter list at: {series_url}")
|
||||
|
||||
headers = {'Referer': 'https://allporncomic.com/'}
|
||||
response = None
|
||||
max_retries = 8
|
||||
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
response = scraper.get(series_url, headers=headers, timeout=30)
|
||||
response.raise_for_status()
|
||||
logger_func(f" [AllComic] Successfully connected to series page on attempt {attempt + 1}.")
|
||||
break
|
||||
except requests.RequestException as e:
|
||||
logger_func(f" [AllComic] ⚠️ Series page check attempt {attempt + 1}/{max_retries} failed: {e}")
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = (2 ** attempt) + random.uniform(0, 2)
|
||||
logger_func(f" Retrying in {wait_time:.1f} seconds...")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
logger_func(f" [AllComic] ❌ All attempts to check series page failed.")
|
||||
return []
|
||||
|
||||
if not response:
|
||||
return []
|
||||
|
||||
try:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
chapter_links = soup.select('li.wp-manga-chapter a')
|
||||
|
||||
if not chapter_links:
|
||||
logger_func(" [AllComic] ℹ️ No chapter list found. Assuming this is a single chapter page.")
|
||||
return []
|
||||
|
||||
chapter_urls = [link['href'] for link in chapter_links]
|
||||
chapter_urls.reverse()
|
||||
|
||||
logger_func(f" [AllComic] ✅ Found {len(chapter_urls)} chapters.")
|
||||
return chapter_urls
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [AllComic] ❌ Error parsing chapters after successful connection: {e}")
|
||||
return []
|
||||
|
||||
def fetch_chapter_data(scraper, chapter_url, logger_func):
|
||||
"""
|
||||
Fetches the comic title, chapter title, and image URLs for a single chapter page.
|
||||
Relies on a passed-in scraper session for connection.
|
||||
"""
|
||||
logger_func(f" [AllComic] Fetching page: {chapter_url}")
|
||||
|
||||
headers = {'Referer': 'https://allporncomic.com/'}
|
||||
|
||||
response = None
|
||||
max_retries = 8
|
||||
for attempt in range(max_retries):
|
||||
try:
|
||||
response = scraper.get(chapter_url, headers=headers, timeout=30)
|
||||
response.raise_for_status()
|
||||
break
|
||||
except requests.RequestException as e:
|
||||
logger_func(f" [AllComic] ⚠️ Chapter page connection attempt {attempt + 1}/{max_retries} failed: {e}")
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = (2 ** attempt) + random.uniform(0, 2)
|
||||
logger_func(f" Retrying in {wait_time:.1f} seconds...")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
logger_func(f" [AllComic] ❌ All connection attempts failed for chapter: {chapter_url}")
|
||||
return None, None, None
|
||||
|
||||
if not response:
|
||||
return None, None, None
|
||||
|
||||
try:
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
comic_title = "Unknown Comic"
|
||||
title_element = soup.find('h1', class_='post-title')
|
||||
if title_element:
|
||||
comic_title = title_element.text.strip()
|
||||
else:
|
||||
try:
|
||||
path_parts = urlparse(chapter_url).path.strip('/').split('/')
|
||||
if len(path_parts) >= 3 and path_parts[-3] == 'porncomic':
|
||||
comic_slug = path_parts[-2]
|
||||
comic_title = comic_slug.replace('-', ' ').title()
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
chapter_slug = chapter_url.strip('/').split('/')[-1]
|
||||
chapter_title = chapter_slug.replace('-', ' ').title()
|
||||
|
||||
reading_container = soup.find('div', class_='reading-content')
|
||||
list_of_image_urls = []
|
||||
if reading_container:
|
||||
image_elements = reading_container.find_all('img', class_='wp-manga-chapter-img')
|
||||
for img in image_elements:
|
||||
img_url = (img.get('data-src') or img.get('src', '')).strip()
|
||||
if img_url:
|
||||
list_of_image_urls.append(img_url)
|
||||
|
||||
if not list_of_image_urls:
|
||||
logger_func(f" [AllComic] ❌ Could not find any images on the page.")
|
||||
return None, None, None
|
||||
|
||||
return comic_title, chapter_title, list_of_image_urls
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [AllComic] ❌ An unexpected error occurred while parsing the page: {e}")
|
||||
return None, None, None
|
||||
378
src/core/api_client.py
Normal file
@@ -0,0 +1,378 @@
|
||||
import time
|
||||
import traceback
|
||||
from urllib.parse import urlparse
|
||||
import json
|
||||
import requests
|
||||
import cloudscraper
|
||||
from ..utils.network_utils import extract_post_info, prepare_cookies_for_request
|
||||
from ..config.constants import (
|
||||
STYLE_DATE_POST_TITLE
|
||||
)
|
||||
|
||||
|
||||
def fetch_posts_paginated(api_url_base, headers, offset, logger, cancellation_event=None, pause_event=None, cookies_dict=None):
|
||||
"""
|
||||
Fetches a single page of posts from the API with robust retry logic.
|
||||
"""
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
raise RuntimeError("Fetch operation cancelled by user.")
|
||||
if pause_event and pause_event.is_set():
|
||||
logger(" Post fetching paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
raise RuntimeError("Fetch operation cancelled by user while paused.")
|
||||
time.sleep(0.5)
|
||||
logger(" Post fetching resumed.")
|
||||
fields_to_request = "id,user,service,title,shared_file,added,published,edited,file,attachments,tags"
|
||||
paginated_url = f'{api_url_base}?o={offset}&fields={fields_to_request}'
|
||||
|
||||
max_retries = 3
|
||||
retry_delay = 5
|
||||
|
||||
for attempt in range(max_retries):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
raise RuntimeError("Fetch operation cancelled by user during retry loop.")
|
||||
|
||||
log_message = f" Fetching post list: {api_url_base} (Page approx. {offset // 50 + 1})"
|
||||
if attempt > 0:
|
||||
log_message += f" (Attempt {attempt + 1}/{max_retries})"
|
||||
logger(log_message)
|
||||
|
||||
try:
|
||||
response = requests.get(paginated_url, headers=headers, timeout=(15, 60), cookies=cookies_dict)
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
return response.json()
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
# Handle 403 error on the FIRST page as a rate limit/block
|
||||
if e.response is not None and e.response.status_code == 403 and offset == 0:
|
||||
logger(" ❌ Access Denied (403 Forbidden) on the first page.")
|
||||
logger(" This is likely a rate limit or a Cloudflare block.")
|
||||
logger(" 💡 SOLUTION: Wait a while, use a VPN, or provide a valid session cookie.")
|
||||
return [] # Stop the process gracefully
|
||||
|
||||
# Handle 400 error as the end of pages
|
||||
if e.response is not None and e.response.status_code == 400:
|
||||
logger(f" ✅ Reached end of posts (API returned 400 Bad Request for offset {offset}).")
|
||||
return []
|
||||
|
||||
# Handle all other network errors with a retry
|
||||
logger(f" ⚠️ Retryable network error on page fetch (Attempt {attempt + 1}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
delay = retry_delay * (2 ** attempt)
|
||||
logger(f" Retrying in {delay} seconds...")
|
||||
time.sleep(delay)
|
||||
continue
|
||||
else:
|
||||
logger(f" ❌ Failed to fetch page after {max_retries} attempts.")
|
||||
raise RuntimeError(f"Network error fetching offset {offset}")
|
||||
except json.JSONDecodeError as e:
|
||||
logger(f" ❌ Failed to decode JSON on page fetch (Attempt {attempt + 1}): {e}")
|
||||
if attempt < max_retries - 1:
|
||||
delay = retry_delay * (2 ** attempt)
|
||||
logger(f" Retrying in {delay} seconds...")
|
||||
time.sleep(delay)
|
||||
continue
|
||||
else:
|
||||
raise RuntimeError(f"JSONDecodeError fetching offset {offset}")
|
||||
|
||||
raise RuntimeError(f"Failed to fetch page {paginated_url} after all attempts.")
|
||||
|
||||
def fetch_single_post_data(api_domain, service, user_id, post_id, headers, logger, cookies_dict=None):
|
||||
"""
|
||||
--- MODIFIED FUNCTION ---
|
||||
Fetches the full data, including the 'content' field, for a single post using cloudscraper.
|
||||
"""
|
||||
post_api_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}/post/{post_id}"
|
||||
logger(f" Fetching full content for post ID {post_id}...")
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
response = scraper.get(post_api_url, headers=headers, timeout=(15, 300), cookies=cookies_dict)
|
||||
response.raise_for_status()
|
||||
|
||||
full_post_data = response.json()
|
||||
|
||||
if isinstance(full_post_data, list) and full_post_data:
|
||||
return full_post_data[0]
|
||||
if isinstance(full_post_data, dict) and 'post' in full_post_data:
|
||||
return full_post_data['post']
|
||||
return full_post_data
|
||||
|
||||
except Exception as e:
|
||||
logger(f" ❌ Failed to fetch full content for post {post_id}: {e}")
|
||||
return None
|
||||
|
||||
|
||||
def fetch_post_comments(api_domain, service, user_id, post_id, headers, logger, cancellation_event=None, pause_event=None, cookies_dict=None):
|
||||
"""Fetches all comments for a specific post."""
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
raise RuntimeError("Comment fetch operation cancelled by user.")
|
||||
|
||||
comments_api_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}/post/{post_id}/comments"
|
||||
logger(f" Fetching comments: {comments_api_url}")
|
||||
|
||||
try:
|
||||
response = requests.get(comments_api_url, headers=headers, timeout=(10, 30), cookies=cookies_dict)
|
||||
response.raise_for_status()
|
||||
response.encoding = 'utf-8'
|
||||
return response.json()
|
||||
except requests.exceptions.RequestException as e:
|
||||
raise RuntimeError(f"Error fetching comments for post {post_id}: {e}")
|
||||
except ValueError as e:
|
||||
raise RuntimeError(f"Error decoding JSON from comments API for post {post_id}: {e}")
|
||||
|
||||
def download_from_api(
|
||||
api_url_input,
|
||||
logger=print,
|
||||
start_page=None,
|
||||
end_page=None,
|
||||
manga_mode=False,
|
||||
cancellation_event=None,
|
||||
pause_event=None,
|
||||
use_cookie=False,
|
||||
cookie_text="",
|
||||
selected_cookie_file=None,
|
||||
app_base_dir=None,
|
||||
manga_filename_style_for_sort_check=None,
|
||||
processed_post_ids=None,
|
||||
fetch_all_first=False
|
||||
):
|
||||
parsed_input_url_for_domain = urlparse(api_url_input)
|
||||
api_domain = parsed_input_url_for_domain.netloc
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (compatible; Googlebot/2.1; +http://www.google.com/bot.html)',
|
||||
'Referer': f'https://{api_domain}/',
|
||||
'Accept': 'text/css'
|
||||
}
|
||||
|
||||
if processed_post_ids is None:
|
||||
processed_post_ids = set()
|
||||
else:
|
||||
processed_post_ids = set(processed_post_ids)
|
||||
|
||||
service, user_id, target_post_id = extract_post_info(api_url_input)
|
||||
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Download_from_api cancelled at start.")
|
||||
return
|
||||
|
||||
# The code that defined api_domain was moved from here to the top of the function
|
||||
|
||||
if not any(d in api_domain.lower() for d in ['kemono.su', 'kemono.party', 'kemono.cr', 'coomer.su', 'coomer.party', 'coomer.st']):
|
||||
logger(f"⚠️ Unrecognized domain '{api_domain}' from input URL. Defaulting to kemono.su for API calls.")
|
||||
api_domain = "kemono.su"
|
||||
|
||||
cookies_for_api = None
|
||||
if use_cookie and app_base_dir:
|
||||
cookies_for_api = prepare_cookies_for_request(use_cookie, cookie_text, selected_cookie_file, app_base_dir, logger, target_domain=api_domain)
|
||||
if target_post_id:
|
||||
if target_post_id in processed_post_ids:
|
||||
logger(f" Skipping already processed target post ID: {target_post_id}")
|
||||
return
|
||||
direct_post_api_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}/post/{target_post_id}"
|
||||
logger(f" Attempting direct fetch for target post: {direct_post_api_url}")
|
||||
try:
|
||||
direct_response = requests.get(direct_post_api_url, headers=headers, timeout=(10, 30), cookies=cookies_for_api)
|
||||
direct_response.raise_for_status()
|
||||
direct_response.encoding = 'utf-8'
|
||||
direct_post_data = direct_response.json()
|
||||
if isinstance(direct_post_data, list) and direct_post_data:
|
||||
direct_post_data = direct_post_data[0]
|
||||
if isinstance(direct_post_data, dict) and 'post' in direct_post_data and isinstance(direct_post_data['post'], dict):
|
||||
direct_post_data = direct_post_data['post']
|
||||
if isinstance(direct_post_data, dict) and direct_post_data.get('id') == target_post_id:
|
||||
logger(f" ✅ Direct fetch successful for post {target_post_id}.")
|
||||
yield [direct_post_data]
|
||||
return
|
||||
else:
|
||||
response_type = type(direct_post_data).__name__
|
||||
response_snippet = str(direct_post_data)[:200]
|
||||
logger(f" ⚠️ Direct fetch for post {target_post_id} returned unexpected data (Type: {response_type}, Snippet: '{response_snippet}'). Falling back to pagination.")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger(f" ⚠️ Direct fetch failed for post {target_post_id}: {e}. Falling back to pagination.")
|
||||
except Exception as e:
|
||||
logger(f" ⚠️ Unexpected error during direct fetch for post {target_post_id}: {e}. Falling back to pagination.")
|
||||
if not service or not user_id:
|
||||
logger(f"❌ Invalid URL or could not extract service/user: {api_url_input}")
|
||||
return
|
||||
if target_post_id and (start_page or end_page):
|
||||
logger("⚠️ Page range (start/end page) is ignored when a specific post URL is provided (searching all pages for the post).")
|
||||
|
||||
is_manga_mode_fetch_all_and_sort_oldest_first = manga_mode and (manga_filename_style_for_sort_check != STYLE_DATE_POST_TITLE) and not target_post_id
|
||||
should_fetch_all = fetch_all_first or is_manga_mode_fetch_all_and_sort_oldest_first
|
||||
api_base_url = f"https://{api_domain}/api/v1/{service}/user/{user_id}/posts"
|
||||
page_size = 50
|
||||
if is_manga_mode_fetch_all_and_sort_oldest_first:
|
||||
logger(f" Manga Mode (Style: {manga_filename_style_for_sort_check if manga_filename_style_for_sort_check else 'Default'} - Oldest First Sort Active): Fetching all posts to sort by date...")
|
||||
all_posts_for_manga_mode = []
|
||||
current_offset_manga = 0
|
||||
if start_page and start_page > 1:
|
||||
current_offset_manga = (start_page - 1) * page_size
|
||||
logger(f" Manga Mode: Starting fetch from page {start_page} (offset {current_offset_manga}).")
|
||||
elif start_page:
|
||||
logger(f" Manga Mode: Starting fetch from page 1 (offset 0).")
|
||||
if end_page:
|
||||
logger(f" Manga Mode: Will fetch up to page {end_page}.")
|
||||
while True:
|
||||
if pause_event and pause_event.is_set():
|
||||
logger(" Manga mode post fetching paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Manga mode post fetching cancelled while paused.")
|
||||
break
|
||||
time.sleep(0.5)
|
||||
if not (cancellation_event and cancellation_event.is_set()): logger(" Manga mode post fetching resumed.")
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Manga mode post fetching cancelled.")
|
||||
break
|
||||
current_page_num_manga = (current_offset_manga // page_size) + 1
|
||||
if end_page and current_page_num_manga > end_page:
|
||||
logger(f" Manga Mode: Reached specified end page ({end_page}). Stopping post fetch.")
|
||||
break
|
||||
try:
|
||||
posts_batch_manga = fetch_posts_paginated(api_base_url, headers, current_offset_manga, logger, cancellation_event, pause_event, cookies_dict=cookies_for_api)
|
||||
if not isinstance(posts_batch_manga, list):
|
||||
logger(f"❌ API Error (Manga Mode): Expected list of posts, got {type(posts_batch_manga)}.")
|
||||
break
|
||||
if not posts_batch_manga:
|
||||
logger("✅ Reached end of posts (Manga Mode fetch all).")
|
||||
if start_page and not end_page and current_page_num_manga < start_page:
|
||||
logger(f" Manga Mode: No posts found on or after specified start page {start_page}.")
|
||||
elif end_page and current_page_num_manga <= end_page and not all_posts_for_manga_mode:
|
||||
logger(f" Manga Mode: No posts found within the specified page range ({start_page or 1}-{end_page}).")
|
||||
break
|
||||
all_posts_for_manga_mode.extend(posts_batch_manga)
|
||||
|
||||
logger(f"RENAMING_MODE_FETCH_PROGRESS:{len(all_posts_for_manga_mode)}:{current_page_num_manga}")
|
||||
|
||||
current_offset_manga += page_size
|
||||
time.sleep(0.6)
|
||||
except RuntimeError as e:
|
||||
if "cancelled by user" in str(e).lower():
|
||||
logger(f"ℹ️ Manga mode pagination stopped due to cancellation: {e}")
|
||||
else:
|
||||
logger(f"❌ {e}\n Aborting manga mode pagination.")
|
||||
break
|
||||
except Exception as e:
|
||||
logger(f"❌ Unexpected error during manga mode fetch: {e}")
|
||||
traceback.print_exc()
|
||||
break
|
||||
|
||||
if cancellation_event and cancellation_event.is_set(): return
|
||||
|
||||
if all_posts_for_manga_mode:
|
||||
logger(f"RENAMING_MODE_FETCH_COMPLETE:{len(all_posts_for_manga_mode)}")
|
||||
|
||||
if all_posts_for_manga_mode:
|
||||
if processed_post_ids:
|
||||
original_count = len(all_posts_for_manga_mode)
|
||||
all_posts_for_manga_mode = [post for post in all_posts_for_manga_mode if post.get('id') not in processed_post_ids]
|
||||
skipped_count = original_count - len(all_posts_for_manga_mode)
|
||||
if skipped_count > 0:
|
||||
logger(f" Manga Mode: Skipped {skipped_count} already processed post(s) before sorting.")
|
||||
|
||||
logger(f" Manga Mode: Fetched {len(all_posts_for_manga_mode)} total posts. Sorting by publication date (oldest first)...")
|
||||
def sort_key_tuple(post):
|
||||
published_date_str = post.get('published')
|
||||
added_date_str = post.get('added')
|
||||
post_id_str = post.get('id', "0")
|
||||
primary_sort_val = "0000-00-00T00:00:00"
|
||||
if published_date_str:
|
||||
primary_sort_val = published_date_str
|
||||
elif added_date_str:
|
||||
logger(f" ⚠️ Post ID {post_id_str} missing 'published' date, using 'added' date '{added_date_str}' for primary sorting.")
|
||||
primary_sort_val = added_date_str
|
||||
else:
|
||||
logger(f" ⚠️ Post ID {post_id_str} missing both 'published' and 'added' dates. Placing at start of sort (using default earliest date).")
|
||||
secondary_sort_val = 0
|
||||
try:
|
||||
secondary_sort_val = int(post_id_str)
|
||||
except ValueError:
|
||||
logger(f" ⚠️ Post ID '{post_id_str}' is not a valid integer for secondary sorting, using 0.")
|
||||
return (primary_sort_val, secondary_sort_val)
|
||||
all_posts_for_manga_mode.sort(key=sort_key_tuple)
|
||||
for i in range(0, len(all_posts_for_manga_mode), page_size):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Manga mode post yielding cancelled.")
|
||||
break
|
||||
yield all_posts_for_manga_mode[i:i + page_size]
|
||||
return
|
||||
|
||||
if manga_mode and not target_post_id and (manga_filename_style_for_sort_check == STYLE_DATE_POST_TITLE):
|
||||
logger(f" Manga Mode (Style: {STYLE_DATE_POST_TITLE}): Processing posts in default API order (newest first).")
|
||||
|
||||
current_page_num = 1
|
||||
current_offset = 0
|
||||
processed_target_post_flag = False
|
||||
if start_page and start_page > 1 and not target_post_id:
|
||||
current_offset = (start_page - 1) * page_size
|
||||
current_page_num = start_page
|
||||
logger(f" Starting from page {current_page_num} (calculated offset {current_offset}).")
|
||||
while True:
|
||||
if pause_event and pause_event.is_set():
|
||||
logger(" Post fetching loop paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Post fetching loop cancelled while paused.")
|
||||
break
|
||||
time.sleep(0.5)
|
||||
if not (cancellation_event and cancellation_event.is_set()): logger(" Post fetching loop resumed.")
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Post fetching loop cancelled.")
|
||||
break
|
||||
if target_post_id and processed_target_post_flag:
|
||||
break
|
||||
if not target_post_id and end_page and current_page_num > end_page:
|
||||
logger(f"✅ Reached specified end page ({end_page}) for creator feed. Stopping.")
|
||||
break
|
||||
try:
|
||||
posts_batch = fetch_posts_paginated(api_base_url, headers, current_offset, logger, cancellation_event, pause_event, cookies_dict=cookies_for_api)
|
||||
if not isinstance(posts_batch, list):
|
||||
logger(f"❌ API Error: Expected list of posts, got {type(posts_batch)} at page {current_page_num} (offset {current_offset}).")
|
||||
break
|
||||
except RuntimeError as e:
|
||||
if "cancelled by user" in str(e).lower():
|
||||
logger(f"ℹ️ Pagination stopped due to cancellation: {e}")
|
||||
else:
|
||||
logger(f"❌ {e}\n Aborting pagination at page {current_page_num} (offset {current_offset}).")
|
||||
break
|
||||
except Exception as e:
|
||||
logger(f"❌ Unexpected error fetching page {current_page_num} (offset {current_offset}): {e}")
|
||||
traceback.print_exc()
|
||||
break
|
||||
if processed_post_ids:
|
||||
original_count = len(posts_batch)
|
||||
posts_batch = [post for post in posts_batch if post.get('id') not in processed_post_ids]
|
||||
skipped_count = original_count - len(posts_batch)
|
||||
if skipped_count > 0:
|
||||
logger(f" Skipped {skipped_count} already processed post(s) from page {current_page_num}.")
|
||||
|
||||
if not posts_batch:
|
||||
if target_post_id and not processed_target_post_flag:
|
||||
logger(f"❌ Target post {target_post_id} not found after checking all available pages (API returned no more posts at offset {current_offset}).")
|
||||
elif not target_post_id:
|
||||
if current_page_num == (start_page or 1):
|
||||
logger(f"😕 No posts found on the first page checked (page {current_page_num}, offset {current_offset}).")
|
||||
else:
|
||||
logger(f"✅ Reached end of posts (no more content from API at offset {current_offset}).")
|
||||
break
|
||||
if target_post_id and not processed_target_post_flag:
|
||||
matching_post = next((p for p in posts_batch if str(p.get('id')) == str(target_post_id)), None)
|
||||
if matching_post:
|
||||
logger(f"🎯 Found target post {target_post_id} on page {current_page_num} (offset {current_offset}).")
|
||||
yield [matching_post]
|
||||
processed_target_post_flag = True
|
||||
elif not target_post_id:
|
||||
yield posts_batch
|
||||
if processed_target_post_flag:
|
||||
break
|
||||
current_offset += page_size
|
||||
current_page_num += 1
|
||||
time.sleep(0.6)
|
||||
if target_post_id and not processed_target_post_flag and not (cancellation_event and cancellation_event.is_set()):
|
||||
logger(f"❌ Target post {target_post_id} could not be found after checking all relevant pages (final check after loop).")
|
||||
|
||||
374
src/core/booru_client.py
Normal file
@@ -0,0 +1,374 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import datetime
|
||||
import urllib.parse
|
||||
import requests
|
||||
import logging
|
||||
import cloudscraper
|
||||
# --- Start of Combined Code from 1.py ---
|
||||
|
||||
# Part 1: Essential Utilities & Exceptions
|
||||
|
||||
class BooruClientException(Exception):
|
||||
"""Base class for exceptions in this client."""
|
||||
pass
|
||||
|
||||
class HttpError(BooruClientException):
|
||||
"""HTTP request during data extraction failed."""
|
||||
def __init__(self, message="", response=None):
|
||||
self.response = response
|
||||
self.status = response.status_code if response else 0
|
||||
if response and not message:
|
||||
message = f"'{response.status_code} {response.reason}' for '{response.url}'"
|
||||
super().__init__(message)
|
||||
|
||||
class NotFoundError(BooruClientException):
|
||||
pass
|
||||
|
||||
def unquote(s):
|
||||
return urllib.parse.unquote(s)
|
||||
|
||||
def parse_datetime(date_string, fmt):
|
||||
try:
|
||||
# Assumes date_string is in a format that strptime can handle with timezone
|
||||
return datetime.datetime.strptime(date_string, fmt)
|
||||
except (ValueError, TypeError):
|
||||
return None
|
||||
|
||||
def nameext_from_url(url, data=None):
|
||||
if data is None: data = {}
|
||||
try:
|
||||
path = urllib.parse.urlparse(url).path
|
||||
filename = unquote(os.path.basename(path))
|
||||
if '.' in filename:
|
||||
name, ext = filename.rsplit('.', 1)
|
||||
data["filename"], data["extension"] = name, ext.lower()
|
||||
else:
|
||||
data["filename"], data["extension"] = filename, ""
|
||||
except Exception:
|
||||
data["filename"], data["extension"] = "", ""
|
||||
return data
|
||||
|
||||
USERAGENT_FIREFOX = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:109.0) Gecko/20100101 Firefox/118.0"
|
||||
|
||||
# Part 2: Core Extractor Logic
|
||||
|
||||
class Extractor:
|
||||
category = ""
|
||||
subcategory = ""
|
||||
directory_fmt = ("{category}", "{id}")
|
||||
filename_fmt = "{filename}.{extension}"
|
||||
_retries = 3
|
||||
_timeout = 30
|
||||
|
||||
def __init__(self, match, logger_func=print):
|
||||
self.url = match.string
|
||||
self.match = match
|
||||
self.groups = match.groups()
|
||||
self.session = cloudscraper.create_scraper()
|
||||
self.session.headers["User-Agent"] = USERAGENT_FIREFOX
|
||||
self.log = logger_func
|
||||
self.api_key = None
|
||||
self.user_id = None
|
||||
|
||||
def set_auth(self, api_key, user_id):
|
||||
self.api_key = api_key
|
||||
self.user_id = user_id
|
||||
self._init_auth()
|
||||
|
||||
def _init_auth(self):
|
||||
"""Placeholder for extractor-specific auth setup."""
|
||||
pass
|
||||
|
||||
def request(self, url, method="GET", fatal=True, **kwargs):
|
||||
for attempt in range(self._retries + 1):
|
||||
try:
|
||||
response = self.session.request(method, url, timeout=self._timeout, **kwargs)
|
||||
if response.status_code < 400:
|
||||
return response
|
||||
if response.status_code == 404 and fatal:
|
||||
raise NotFoundError(f"Resource not found at {url}")
|
||||
self.log(f"Request for {url} failed with status {response.status_code}. Retrying...")
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.log(f"Request for {url} failed: {e}. Retrying...")
|
||||
if attempt < self._retries:
|
||||
time.sleep(2 ** attempt)
|
||||
if fatal:
|
||||
raise HttpError(f"Failed to retrieve {url} after {self._retries} retries.")
|
||||
return None
|
||||
|
||||
def request_json(self, url, **kwargs):
|
||||
response = self.request(url, **kwargs)
|
||||
try:
|
||||
return response.json()
|
||||
except (ValueError, TypeError) as exc:
|
||||
self.log(f"Failed to decode JSON from {url}: {exc}")
|
||||
raise BooruClientException("Invalid JSON response")
|
||||
|
||||
def items(self):
|
||||
data = self.metadata()
|
||||
for item in self.posts():
|
||||
# Check for our special page update message
|
||||
if isinstance(item, tuple) and item[0] == 'PAGE_UPDATE':
|
||||
yield item
|
||||
continue
|
||||
|
||||
# Otherwise, process it as a post
|
||||
post = item
|
||||
url = post.get("file_url")
|
||||
if not url: continue
|
||||
|
||||
nameext_from_url(url, post)
|
||||
post["date"] = parse_datetime(post.get("created_at"), "%Y-%m-%dT%H:%M:%S.%f%z")
|
||||
|
||||
if url.startswith("/"):
|
||||
url = self.root + url
|
||||
post['file_url'] = url # Ensure full URL
|
||||
|
||||
post.update(data)
|
||||
yield post
|
||||
|
||||
class BaseExtractor(Extractor):
|
||||
instances = ()
|
||||
|
||||
def __init__(self, match, logger_func=print):
|
||||
super().__init__(match, logger_func)
|
||||
self._init_category()
|
||||
|
||||
def _init_category(self):
|
||||
parsed_url = urllib.parse.urlparse(self.url)
|
||||
self.root = f"{parsed_url.scheme}://{parsed_url.netloc}"
|
||||
for i, group in enumerate(self.groups):
|
||||
if group is not None:
|
||||
try:
|
||||
self.category = self.instances[i][0]
|
||||
return
|
||||
except IndexError:
|
||||
continue
|
||||
|
||||
@classmethod
|
||||
def update(cls, instances):
|
||||
pattern_list = []
|
||||
instance_list = cls.instances = []
|
||||
for category, info in instances.items():
|
||||
root = info["root"].rstrip("/") if info["root"] else ""
|
||||
instance_list.append((category, root, info))
|
||||
pattern = info.get("pattern", re.escape(root.partition("://")[2]))
|
||||
pattern_list.append(f"({pattern})")
|
||||
return r"(?:https?://)?(?:" + "|".join(pattern_list) + r")"
|
||||
|
||||
# Part 3: Danbooru Extractor
|
||||
|
||||
class DanbooruExtractor(BaseExtractor):
|
||||
filename_fmt = "{category}_{id}_{filename}.{extension}"
|
||||
per_page = 200
|
||||
|
||||
def __init__(self, match, logger_func=print):
|
||||
super().__init__(match, logger_func)
|
||||
self._auth_logged = False
|
||||
|
||||
def _init_auth(self):
|
||||
if self.user_id and self.api_key:
|
||||
if not self._auth_logged:
|
||||
self.log("Danbooru auth set.")
|
||||
self._auth_logged = True
|
||||
self.session.auth = (self.user_id, self.api_key)
|
||||
|
||||
|
||||
def items(self):
|
||||
data = self.metadata()
|
||||
for item in self.posts():
|
||||
# Check for our special page update message
|
||||
if isinstance(item, tuple) and item[0] == 'PAGE_UPDATE':
|
||||
yield item
|
||||
continue
|
||||
|
||||
# Otherwise, process it as a post
|
||||
post = item
|
||||
url = post.get("file_url")
|
||||
if not url: continue
|
||||
|
||||
nameext_from_url(url, post)
|
||||
post["date"] = parse_datetime(post.get("created_at"), "%Y-%m-%dT%H:%M:%S.%f%z")
|
||||
|
||||
if url.startswith("/"):
|
||||
url = self.root + url
|
||||
post['file_url'] = url # Ensure full URL
|
||||
|
||||
post.update(data)
|
||||
yield post
|
||||
|
||||
def metadata(self):
|
||||
return {}
|
||||
|
||||
def posts(self):
|
||||
return []
|
||||
|
||||
def _pagination(self, endpoint, params, prefix="b"):
|
||||
url = self.root + endpoint
|
||||
params["limit"] = self.per_page
|
||||
params["page"] = 1
|
||||
threshold = self.per_page - 20
|
||||
|
||||
while True:
|
||||
posts = self.request_json(url, params=params)
|
||||
if not posts: break
|
||||
yield ('PAGE_UPDATE', len(posts))
|
||||
yield from posts
|
||||
if len(posts) < threshold: return
|
||||
if prefix:
|
||||
params["page"] = f"{prefix}{posts[-1]['id']}"
|
||||
else:
|
||||
params["page"] += 1
|
||||
|
||||
BASE_PATTERN = DanbooruExtractor.update({
|
||||
"danbooru": {"root": None, "pattern": r"(?:danbooru|safebooru)\.donmai\.us"},
|
||||
})
|
||||
|
||||
class DanbooruTagExtractor(DanbooruExtractor):
|
||||
subcategory = "tag"
|
||||
directory_fmt = ("{category}", "{search_tags}")
|
||||
pattern = BASE_PATTERN + r"(/posts\?(?:[^&#]*&)*tags=([^&#]*))"
|
||||
|
||||
def metadata(self):
|
||||
self.tags = unquote(self.groups[-1].replace("+", " ")).strip()
|
||||
sanitized_tags = re.sub(r'[\\/*?:"<>|]', "_", self.tags)
|
||||
return {"search_tags": sanitized_tags}
|
||||
|
||||
def posts(self):
|
||||
return self._pagination("/posts.json", {"tags": self.tags})
|
||||
|
||||
class DanbooruPostExtractor(DanbooruExtractor):
|
||||
subcategory = "post"
|
||||
pattern = BASE_PATTERN + r"(/post(?:s|/show)/(\d+))"
|
||||
|
||||
def posts(self):
|
||||
post_id = self.groups[-1]
|
||||
url = f"{self.root}/posts/{post_id}.json"
|
||||
post = self.request_json(url)
|
||||
return (post,) if post else ()
|
||||
|
||||
class GelbooruBase(Extractor):
|
||||
category = "gelbooru"
|
||||
root = "https://gelbooru.com"
|
||||
|
||||
def __init__(self, match, logger_func=print):
|
||||
super().__init__(match, logger_func)
|
||||
self._auth_logged = False
|
||||
|
||||
def _api_request(self, params, key="post"):
|
||||
# Auth is now added dynamically
|
||||
if self.api_key and self.user_id:
|
||||
if not self._auth_logged:
|
||||
self.log("Gelbooru auth set.")
|
||||
self._auth_logged = True
|
||||
params.update({"api_key": self.api_key, "user_id": self.user_id})
|
||||
|
||||
url = self.root + "/index.php?page=dapi&q=index&json=1"
|
||||
data = self.request_json(url, params=params)
|
||||
|
||||
if not key: return data
|
||||
posts = data.get(key, [])
|
||||
return posts if isinstance(posts, list) else [posts] if posts else []
|
||||
|
||||
def items(self):
|
||||
base_data = self.metadata()
|
||||
base_data['category'] = self.category
|
||||
|
||||
for item in self.posts():
|
||||
# Check for our special page update message
|
||||
if isinstance(item, tuple) and item[0] == 'PAGE_UPDATE':
|
||||
yield item
|
||||
continue
|
||||
|
||||
# Otherwise, process it as a post
|
||||
post = item
|
||||
url = post.get("file_url")
|
||||
if not url: continue
|
||||
|
||||
data = base_data.copy()
|
||||
data.update(post)
|
||||
nameext_from_url(url, data)
|
||||
yield data
|
||||
|
||||
def metadata(self): return {}
|
||||
def posts(self): return []
|
||||
|
||||
GELBOORU_PATTERN = r"(?:https?://)?(?:www\.)?gelbooru\.com"
|
||||
|
||||
class GelbooruTagExtractor(GelbooruBase):
|
||||
subcategory = "tag"
|
||||
directory_fmt = ("{category}", "{search_tags}")
|
||||
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
||||
pattern = GELBOORU_PATTERN + r"(/index\.php\?page=post&s=list&tags=([^&#]*))"
|
||||
|
||||
def metadata(self):
|
||||
self.tags = unquote(self.groups[-1].replace("+", " ")).strip()
|
||||
sanitized_tags = re.sub(r'[\\/*?:"<>|]', "_", self.tags)
|
||||
return {"search_tags": sanitized_tags}
|
||||
|
||||
def posts(self):
|
||||
"""Scrapes HTML search pages as API can be restrictive for tags."""
|
||||
pid = 0
|
||||
posts_per_page = 42
|
||||
search_url = self.root + "/index.php"
|
||||
params = {"page": "post", "s": "list", "tags": self.tags}
|
||||
|
||||
while True:
|
||||
params['pid'] = pid
|
||||
self.log(f"Scraping search results page (offset: {pid})...")
|
||||
response = self.request(search_url, params=params)
|
||||
html_content = response.text
|
||||
post_ids = re.findall(r'id="p(\d+)"', html_content)
|
||||
|
||||
if not post_ids:
|
||||
self.log("No more posts found on page. Ending scrape.")
|
||||
break
|
||||
yield ('PAGE_UPDATE', len(post_ids))
|
||||
for post_id in post_ids:
|
||||
post_data = self._api_request({"s": "post", "id": post_id})
|
||||
yield from post_data
|
||||
|
||||
pid += posts_per_page
|
||||
|
||||
class GelbooruPostExtractor(GelbooruBase):
|
||||
subcategory = "post"
|
||||
filename_fmt = "{category}_{id}_{md5}.{extension}"
|
||||
pattern = GELBOORU_PATTERN + r"(/index\.php\?page=post&s=view&id=(\d+))"
|
||||
|
||||
def posts(self):
|
||||
post_id = self.groups[-1]
|
||||
return self._api_request({"s": "post", "id": post_id})
|
||||
|
||||
# --- Main Entry Point ---
|
||||
|
||||
EXTRACTORS = [
|
||||
DanbooruTagExtractor,
|
||||
DanbooruPostExtractor,
|
||||
GelbooruTagExtractor,
|
||||
GelbooruPostExtractor,
|
||||
]
|
||||
|
||||
def find_extractor(url, logger_func):
|
||||
for extractor_cls in EXTRACTORS:
|
||||
match = re.search(extractor_cls.pattern, url)
|
||||
if match:
|
||||
return extractor_cls(match, logger_func)
|
||||
return None
|
||||
|
||||
def fetch_booru_data(url, api_key, user_id, logger_func):
|
||||
"""
|
||||
Main function to find an extractor and yield image data.
|
||||
"""
|
||||
extractor = find_extractor(url, logger_func)
|
||||
if not extractor:
|
||||
logger_func(f"No suitable Booru extractor found for URL: {url}")
|
||||
return
|
||||
|
||||
logger_func(f"Using extractor: {extractor.__class__.__name__}")
|
||||
extractor.set_auth(api_key, user_id)
|
||||
|
||||
# The 'items' method will now yield the data dictionaries directly
|
||||
yield from extractor.items()
|
||||
282
src/core/bunkr_client.py
Normal file
@@ -0,0 +1,282 @@
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
import requests
|
||||
import html
|
||||
import time
|
||||
import datetime
|
||||
import urllib.parse
|
||||
import json
|
||||
import random
|
||||
import binascii
|
||||
import itertools
|
||||
|
||||
class MockMessage:
|
||||
Directory = 1
|
||||
Url = 2
|
||||
Version = 3
|
||||
|
||||
class AlbumException(Exception): pass
|
||||
class ExtractionError(AlbumException): pass
|
||||
class HttpError(ExtractionError):
|
||||
def __init__(self, message="", response=None):
|
||||
self.response = response
|
||||
self.status = response.status_code if response is not None else 0
|
||||
super().__init__(message)
|
||||
class ControlException(AlbumException): pass
|
||||
class AbortExtraction(ExtractionError, ControlException): pass
|
||||
|
||||
try:
|
||||
re_compile = re._compiler.compile
|
||||
except AttributeError:
|
||||
re_compile = re.sre_compile.compile
|
||||
HTML_RE = re_compile(r"<[^>]+>")
|
||||
def extr(txt, begin, end, default=""):
|
||||
try:
|
||||
first = txt.index(begin) + len(begin)
|
||||
return txt[first:txt.index(end, first)]
|
||||
except Exception: return default
|
||||
def extract_iter(txt, begin, end, pos=None):
|
||||
try:
|
||||
index = txt.index
|
||||
lbeg = len(begin)
|
||||
lend = len(end)
|
||||
while True:
|
||||
first = index(begin, pos) + lbeg
|
||||
last = index(end, first)
|
||||
pos = last + lend
|
||||
yield txt[first:last]
|
||||
except Exception: return
|
||||
def split_html(txt):
|
||||
try: return [html.unescape(x).strip() for x in HTML_RE.split(txt) if x and not x.isspace()]
|
||||
except TypeError: return []
|
||||
def parse_datetime(date_string, format="%Y-%m-%dT%H:%M:%S%z", utcoffset=0):
|
||||
try:
|
||||
d = datetime.datetime.strptime(date_string, format)
|
||||
o = d.utcoffset()
|
||||
if o is not None: d = d.replace(tzinfo=None, microsecond=0) - o
|
||||
else:
|
||||
if d.microsecond: d = d.replace(microsecond=0)
|
||||
if utcoffset: d += datetime.timedelta(0, utcoffset * -3600)
|
||||
return d
|
||||
except (TypeError, IndexError, KeyError, ValueError, OverflowError): return None
|
||||
unquote = urllib.parse.unquote
|
||||
unescape = html.unescape
|
||||
|
||||
def decrypt_xor(encrypted, key, base64=True, fromhex=False):
|
||||
if base64: encrypted = binascii.a2b_base64(encrypted)
|
||||
if fromhex: encrypted = bytes.fromhex(encrypted.decode())
|
||||
div = len(key)
|
||||
return bytes([encrypted[i] ^ key[i % div] for i in range(len(encrypted))]).decode()
|
||||
def advance(iterable, num):
|
||||
iterator = iter(iterable)
|
||||
next(itertools.islice(iterator, num, num), None)
|
||||
return iterator
|
||||
def json_loads(s): return json.loads(s)
|
||||
def json_dumps(obj): return json.dumps(obj, separators=(",", ":"))
|
||||
|
||||
class Extractor:
|
||||
def __init__(self, match, logger):
|
||||
self.log = logger
|
||||
self.url = match.string
|
||||
self.match = match
|
||||
self.groups = match.groups()
|
||||
self.session = requests.Session()
|
||||
self.session.headers["User-Agent"] = "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0"
|
||||
@classmethod
|
||||
def from_url(cls, url, logger):
|
||||
if isinstance(cls.pattern, str): cls.pattern = re.compile(cls.pattern)
|
||||
match = cls.pattern.match(url)
|
||||
return cls(match, logger) if match else None
|
||||
def __iter__(self): return self.items()
|
||||
def items(self): yield MockMessage.Version, 1
|
||||
def request(self, url, method="GET", fatal=True, **kwargs):
|
||||
tries = 1
|
||||
while True:
|
||||
try:
|
||||
response = self.session.request(method, url, **kwargs)
|
||||
if response.status_code < 400: return response
|
||||
msg = f"'{response.status_code} {response.reason}' for '{response.url}'"
|
||||
except requests.exceptions.RequestException as exc:
|
||||
msg = str(exc)
|
||||
|
||||
self.log.info("%s (retrying...)", msg)
|
||||
if tries > 4: break
|
||||
time.sleep(tries)
|
||||
tries += 1
|
||||
if not fatal: return None
|
||||
raise HttpError(msg)
|
||||
def request_json(self, url, **kwargs):
|
||||
response = self.request(url, **kwargs)
|
||||
try: return json_loads(response.text)
|
||||
except Exception as exc:
|
||||
self.log.warning("%s: %s", exc.__class__.__name__, exc)
|
||||
if not kwargs.get("fatal", True): return {}
|
||||
raise
|
||||
|
||||
BASE_PATTERN_BUNKR = r"(?:https?://)?(?:[a-zA-Z0-9-]+\.)?(bunkr\.(?:si|la|ws|red|black|media|site|is|to|ac|cr|ci|fi|pk|ps|sk|ph|su)|bunkrr\.ru)"
|
||||
DOMAINS = ["bunkr.si", "bunkr.ws", "bunkr.la", "bunkr.red", "bunkr.black", "bunkr.media", "bunkr.site"]
|
||||
CF_DOMAINS = set()
|
||||
|
||||
class BunkrAlbumExtractor(Extractor):
|
||||
category = "bunkr"
|
||||
root = "https://bunkr.si"
|
||||
root_dl = "https://get.bunkrr.su"
|
||||
root_api = "https://apidl.bunkr.ru"
|
||||
pattern = re.compile(BASE_PATTERN_BUNKR + r"/a/([^/?#]+)")
|
||||
|
||||
def __init__(self, match, logger):
|
||||
super().__init__(match, logger)
|
||||
domain_match = re.search(BASE_PATTERN_BUNKR, match.string)
|
||||
if domain_match:
|
||||
self.root = "https://" + domain_match.group(1)
|
||||
self.endpoint = self.root_api + "/api/_001_v2"
|
||||
self.album_id = self.groups[-1]
|
||||
|
||||
def items(self):
|
||||
page = self.request(self.url).text
|
||||
title = unescape(unescape(extr(page, 'property="og:title" content="', '"')))
|
||||
items_html = list(extract_iter(page, '<div class="grid-images_box', "</a>"))
|
||||
|
||||
album_data = {
|
||||
"album_id": self.album_id,
|
||||
"album_name": title,
|
||||
"count": len(items_html),
|
||||
}
|
||||
yield MockMessage.Directory, album_data, {}
|
||||
|
||||
for item_html in items_html:
|
||||
try:
|
||||
webpage_url = unescape(extr(item_html, ' href="', '"'))
|
||||
if webpage_url.startswith("/"):
|
||||
webpage_url = self.root + webpage_url
|
||||
|
||||
file_data = self._extract_file(webpage_url)
|
||||
info = split_html(item_html)
|
||||
|
||||
if not file_data.get("name"):
|
||||
file_data["name"] = info[-3]
|
||||
|
||||
yield MockMessage.Url, file_data, {}
|
||||
except Exception as exc:
|
||||
self.log.error("%s: %s", exc.__class__.__name__, exc)
|
||||
|
||||
def _extract_file(self, webpage_url):
|
||||
page = self.request(webpage_url).text
|
||||
data_id = extr(page, 'data-file-id="', '"')
|
||||
|
||||
# This referer is for the API call only
|
||||
api_referer = self.root_dl + "/file/" + data_id
|
||||
headers = {"Referer": api_referer, "Origin": self.root_dl}
|
||||
data = self.request_json(self.endpoint, method="POST", headers=headers, json={"id": data_id})
|
||||
|
||||
# Get the raw file URL (no domain replacement)
|
||||
file_url = decrypt_xor(data["url"], f"SECRET_KEY_{data['timestamp'] // 3600}".encode()) if data.get("encrypted") else data["url"]
|
||||
|
||||
file_name = extr(page, "<h1", "<").rpartition(">")[2]
|
||||
|
||||
# --- NEW FIX ---
|
||||
# The download thread uses a new `requests` call, so we must
|
||||
# explicitly pass BOTH the User-Agent and the correct Referer.
|
||||
|
||||
# 1. Get the User-Agent from this extractor's session
|
||||
user_agent = self.session.headers.get("User-Agent", "Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:102.0) Gecko/20100101 Firefox/102.0")
|
||||
|
||||
# 2. Use the original album URL as the Referer
|
||||
download_referer = self.url
|
||||
|
||||
return {
|
||||
"url": file_url,
|
||||
"name": unescape(file_name),
|
||||
"_http_headers": {
|
||||
"Referer": download_referer,
|
||||
"User-Agent": user_agent
|
||||
}
|
||||
}
|
||||
|
||||
class BunkrMediaExtractor(BunkrAlbumExtractor):
|
||||
pattern = re.compile(BASE_PATTERN_BUNKR + r"(/[fvid]/[^/?#]+)")
|
||||
def items(self):
|
||||
try:
|
||||
media_path = self.groups[-1]
|
||||
file_data = self._extract_file(self.root + media_path)
|
||||
album_data = {"album_name": file_data.get("name", "bunkr_media"), "count": 1}
|
||||
|
||||
yield MockMessage.Directory, album_data, {}
|
||||
yield MockMessage.Url, file_data, {}
|
||||
|
||||
except Exception as exc:
|
||||
self.log.error("%s: %s", exc.__class__.__name__, exc)
|
||||
yield MockMessage.Directory, {"album_name": "error", "count": 0}, {}
|
||||
|
||||
def get_bunkr_extractor(url, logger):
|
||||
"""Selects the correct Bunkr extractor based on the URL pattern."""
|
||||
if BunkrAlbumExtractor.pattern.match(url):
|
||||
logger.info("Bunkr Album URL detected.")
|
||||
return BunkrAlbumExtractor.from_url(url, logger)
|
||||
elif BunkrMediaExtractor.pattern.match(url):
|
||||
logger.info("Bunkr Media URL detected.")
|
||||
return BunkrMediaExtractor.from_url(url, logger)
|
||||
else:
|
||||
logger.error(f"No suitable Bunkr extractor found for URL: {url}")
|
||||
return None
|
||||
|
||||
def fetch_bunkr_data(url, logger):
|
||||
"""
|
||||
Main function to be called from the GUI.
|
||||
It extracts all file information from a Bunkr URL, now handling both albums and direct file links.
|
||||
|
||||
Returns:
|
||||
A tuple of (album_name, list_of_files)
|
||||
- album_name (str): The name of the album.
|
||||
- list_of_files (list): A list of dicts, each containing 'url', 'name', and '_http_headers'.
|
||||
Returns (None, None) on failure.
|
||||
"""
|
||||
# --- START: New logic to handle direct CDN file URLs ---
|
||||
try:
|
||||
parsed_url = urllib.parse.urlparse(url)
|
||||
# Check if the hostname contains 'cdn' and the path has a common file extension
|
||||
is_direct_cdn_file = (parsed_url.hostname and 'cdn' in parsed_url.hostname and 'bunkr' in parsed_url.hostname and
|
||||
any(parsed_url.path.lower().endswith(ext) for ext in ['.mp4', '.mkv', '.webm', '.jpg', '.jpeg', '.png', '.gif', '.zip', '.rar']))
|
||||
|
||||
if is_direct_cdn_file:
|
||||
logger.info("Bunkr direct file URL detected.")
|
||||
filename = os.path.basename(parsed_url.path)
|
||||
# Use the filename (without extension) as a sensible album name
|
||||
album_name = os.path.splitext(filename)[0]
|
||||
|
||||
files_to_download = [{
|
||||
'url': url,
|
||||
'name': filename,
|
||||
'_http_headers': {'Referer': 'https://bunkr.ru/'} # Use a generic Referer
|
||||
}]
|
||||
return album_name, files_to_download
|
||||
except Exception as e:
|
||||
logger.warning(f"Could not parse Bunkr URL for direct file check: {e}")
|
||||
# --- END: New logic ---
|
||||
|
||||
# This is the original logic for album and media pages
|
||||
extractor = get_bunkr_extractor(url, logger)
|
||||
if not extractor:
|
||||
return None, None
|
||||
|
||||
try:
|
||||
album_name = "default_bunkr_album"
|
||||
files_to_download = []
|
||||
for msg_type, data, metadata in extractor:
|
||||
if msg_type == MockMessage.Directory:
|
||||
raw_album_name = data.get('album_name', 'untitled')
|
||||
album_name = re.sub(r'[<>:"/\\|?*]', '_', raw_album_name).strip() or "untitled"
|
||||
logger.info(f"Processing Bunkr album: {album_name}")
|
||||
elif msg_type == MockMessage.Url:
|
||||
files_to_download.append(data)
|
||||
|
||||
if not files_to_download:
|
||||
logger.warning("No files found to download from the Bunkr URL.")
|
||||
return None, None
|
||||
|
||||
return album_name, files_to_download
|
||||
|
||||
except Exception as e:
|
||||
logger.error(f"An error occurred while extracting Bunkr info: {e}", exc_info=True)
|
||||
return None, None
|
||||
88
src/core/discord_client.py
Normal file
@@ -0,0 +1,88 @@
|
||||
import time
|
||||
import cloudscraper
|
||||
import json
|
||||
|
||||
def fetch_server_channels(server_id, logger=print, cookies_dict=None):
|
||||
"""
|
||||
Fetches all channels for a given Discord server ID from the API.
|
||||
Uses cloudscraper to bypass Cloudflare.
|
||||
"""
|
||||
api_url = f"https://kemono.cr/api/v1/discord/server/{server_id}"
|
||||
logger(f" Fetching channels for server: {api_url}")
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Referer': f'https://kemono.cr/discord/server/{server_id}',
|
||||
'Accept': 'text/css'
|
||||
}
|
||||
|
||||
try:
|
||||
response = scraper.get(api_url, headers=headers, cookies=cookies_dict, timeout=30)
|
||||
response.raise_for_status()
|
||||
channels = response.json()
|
||||
if isinstance(channels, list):
|
||||
logger(f" ✅ Found {len(channels)} channels for server {server_id}.")
|
||||
return channels
|
||||
return None
|
||||
except Exception as e:
|
||||
logger(f" ❌ Error fetching server channels for {server_id}: {e}")
|
||||
return None
|
||||
|
||||
def fetch_channel_messages(channel_id, logger=print, cancellation_event=None, pause_event=None, cookies_dict=None):
|
||||
"""
|
||||
A generator that fetches all messages for a specific Discord channel, handling pagination.
|
||||
Uses cloudscraper and proper headers to bypass server protection.
|
||||
"""
|
||||
scraper = cloudscraper.create_scraper()
|
||||
base_url = f"https://kemono.cr/api/v1/discord/channel/{channel_id}"
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Referer': f'https://kemono.cr/discord/channel/{channel_id}',
|
||||
'Accept': 'text/css'
|
||||
}
|
||||
|
||||
offset = 0
|
||||
page_size = 150
|
||||
|
||||
while True:
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger(" Discord message fetching cancelled.")
|
||||
break
|
||||
if pause_event and pause_event.is_set():
|
||||
logger(" Discord message fetching paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
break
|
||||
time.sleep(0.5)
|
||||
if not (cancellation_event and cancellation_event.is_set()):
|
||||
logger(" Discord message fetching resumed.")
|
||||
|
||||
paginated_url = f"{base_url}?o={offset}"
|
||||
logger(f" Fetching messages from API: page starting at offset {offset}")
|
||||
|
||||
try:
|
||||
response = scraper.get(paginated_url, headers=headers, cookies=cookies_dict, timeout=30)
|
||||
response.raise_for_status()
|
||||
messages_batch = response.json()
|
||||
|
||||
if not messages_batch:
|
||||
logger(f" ✅ Reached end of messages for channel {channel_id}.")
|
||||
break
|
||||
|
||||
logger(f" Fetched {len(messages_batch)} messages...")
|
||||
yield messages_batch
|
||||
|
||||
if len(messages_batch) < page_size:
|
||||
logger(f" ✅ Last page of messages received for channel {channel_id}.")
|
||||
break
|
||||
|
||||
offset += page_size
|
||||
time.sleep(0.5) # Be respectful to the API
|
||||
|
||||
except (cloudscraper.exceptions.CloudflareException, json.JSONDecodeError) as e:
|
||||
logger(f" ❌ Error fetching messages at offset {offset}: {e}")
|
||||
break
|
||||
except Exception as e:
|
||||
logger(f" ❌ An unexpected error occurred while fetching messages: {e}")
|
||||
break
|
||||
131
src/core/erome_client.py
Normal file
@@ -0,0 +1,131 @@
|
||||
|
||||
import os
|
||||
import re
|
||||
import html
|
||||
import time
|
||||
import urllib.parse
|
||||
import requests
|
||||
from datetime import datetime
|
||||
import cloudscraper
|
||||
|
||||
|
||||
def extr(txt, begin, end, default=""):
|
||||
"""Stripped-down version of 'extract()' to find text between two delimiters."""
|
||||
try:
|
||||
first = txt.index(begin) + len(begin)
|
||||
return txt[first:txt.index(end, first)]
|
||||
except (ValueError, IndexError):
|
||||
return default
|
||||
|
||||
def extract_iter(txt, begin, end):
|
||||
"""Yields all occurrences of text between two delimiters."""
|
||||
try:
|
||||
index = txt.index
|
||||
lbeg = len(begin)
|
||||
lend = len(end)
|
||||
pos = 0
|
||||
while True:
|
||||
first = index(begin, pos) + lbeg
|
||||
last = index(end, first)
|
||||
pos = last + lend
|
||||
yield txt[first:last]
|
||||
except (ValueError, IndexError):
|
||||
return
|
||||
|
||||
def nameext_from_url(url):
|
||||
"""Extracts filename and extension from a URL."""
|
||||
data = {}
|
||||
filename = urllib.parse.unquote(url.partition("?")[0].rpartition("/")[2])
|
||||
name, _, ext = filename.rpartition(".")
|
||||
if name and len(ext) <= 16:
|
||||
data["filename"], data["extension"] = name, ext.lower()
|
||||
else:
|
||||
data["filename"], data["extension"] = filename, ""
|
||||
return data
|
||||
|
||||
def parse_timestamp(ts, default=None):
|
||||
"""Creates a datetime object from a Unix timestamp."""
|
||||
try:
|
||||
return datetime.fromtimestamp(int(ts))
|
||||
except (ValueError, TypeError):
|
||||
return default
|
||||
|
||||
|
||||
def fetch_erome_data(url, logger):
|
||||
"""
|
||||
Identifies and extracts all media files from an Erome album URL.
|
||||
|
||||
Args:
|
||||
url (str): The Erome album URL (e.g., https://www.erome.com/a/albumID).
|
||||
logger (function): A function to log progress messages.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing (album_folder_name, list_of_file_dicts).
|
||||
Returns (None, []) if data extraction fails.
|
||||
"""
|
||||
album_id_match = re.search(r"/a/(\w+)", url)
|
||||
if not album_id_match:
|
||||
logger(f"Error: The URL '{url}' does not appear to be a valid Erome album link.")
|
||||
return None, []
|
||||
|
||||
album_id = album_id_match.group(1)
|
||||
page_url = f"https://www.erome.com/a/{album_id}"
|
||||
|
||||
session = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
logger(f" Fetching Erome album page: {page_url}")
|
||||
for attempt in range(5):
|
||||
response = session.get(page_url, timeout=30)
|
||||
response.raise_for_status()
|
||||
page_content = response.text
|
||||
if "<title>Please wait a few moments</title>" in page_content:
|
||||
logger(f" Cloudflare check detected. Waiting 5 seconds... (Attempt {attempt + 1}/5)")
|
||||
time.sleep(5)
|
||||
continue
|
||||
break
|
||||
else:
|
||||
logger(" Error: Could not bypass Cloudflare check after several attempts.")
|
||||
return None, []
|
||||
|
||||
title = html.unescape(extr(page_content, 'property="og:title" content="', '"'))
|
||||
user = urllib.parse.unquote(extr(page_content, 'href="https://www.erome.com/', '"', default="unknown_user"))
|
||||
|
||||
sanitized_title = re.sub(r'[<>:"/\\|?*]', '_', title).strip()
|
||||
sanitized_user = re.sub(r'[<>:"/\\|?*]', '_', user).strip()
|
||||
|
||||
album_folder_name = f"Erome - {sanitized_user} - {sanitized_title} [{album_id}]"
|
||||
|
||||
urls = []
|
||||
media_groups = page_content.split('<div class="media-group"')
|
||||
for group in media_groups[1:]:
|
||||
video_url = extr(group, '<source src="', '"') or extr(group, 'data-src="', '"')
|
||||
if video_url:
|
||||
urls.append(video_url)
|
||||
|
||||
if not urls:
|
||||
logger(" Warning: No media URLs found on the album page.")
|
||||
return album_folder_name, []
|
||||
|
||||
logger(f" Found {len(urls)} media files in album '{title}'.")
|
||||
|
||||
file_list = []
|
||||
for i, file_url in enumerate(urls, 1):
|
||||
filename_info = nameext_from_url(file_url)
|
||||
filename = f"{album_id}_{sanitized_title}_{i:03d}.{filename_info.get('extension', 'mp4')}"
|
||||
|
||||
file_data = {
|
||||
"url": file_url,
|
||||
"filename": filename,
|
||||
"headers": {"Referer": page_url},
|
||||
}
|
||||
file_list.append(file_data)
|
||||
|
||||
return album_folder_name, file_list
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger(f" Error fetching Erome page: {e}")
|
||||
return None, []
|
||||
except Exception as e:
|
||||
logger(f" An unexpected error occurred during Erome extraction: {e}")
|
||||
return None, []
|
||||
125
src/core/fap_nation_client.py
Normal file
@@ -0,0 +1,125 @@
|
||||
import re
|
||||
import os
|
||||
import cloudscraper
|
||||
from urllib.parse import urlparse, urljoin
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
|
||||
def fetch_fap_nation_data(album_url, logger_func):
|
||||
"""
|
||||
Scrapes a fap-nation page by prioritizing HLS streams first, then falling
|
||||
back to direct download links. Selects the highest quality available.
|
||||
"""
|
||||
logger_func(f" [Fap-Nation] Fetching album data from: {album_url}")
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
response = scraper.get(album_url, timeout=45)
|
||||
response.raise_for_status()
|
||||
html_content = response.text
|
||||
|
||||
title_match = re.search(r'<h1[^>]*itemprop="name"[^>]*>(.*?)</h1>', html_content, re.IGNORECASE)
|
||||
album_slug = clean_folder_name(os.path.basename(urlparse(album_url).path.strip('/')))
|
||||
album_title = clean_folder_name(title_match.group(1).strip()) if title_match else album_slug
|
||||
|
||||
files_to_download = []
|
||||
final_url = None
|
||||
link_type = None
|
||||
filename_from_video_tag = None
|
||||
|
||||
video_tag_title_match = re.search(r'data-plyr-config=.*?"title":.*?"([^&]+?\.mp4)"', html_content, re.IGNORECASE)
|
||||
if video_tag_title_match:
|
||||
filename_from_video_tag = clean_folder_name(video_tag_title_match.group(1))
|
||||
logger_func(f" [Fap-Nation] Found high-quality filename in video tag: {filename_from_video_tag}")
|
||||
|
||||
# --- REVISED LOGIC: HLS FIRST ---
|
||||
|
||||
# 1. Prioritize finding an HLS stream.
|
||||
logger_func(" [Fap-Nation] Priority 1: Searching for HLS stream...")
|
||||
iframe_match = re.search(r'<iframe[^>]+src="([^"]+mediadelivery\.net[^"]+)"', html_content, re.IGNORECASE)
|
||||
|
||||
if iframe_match:
|
||||
iframe_url = iframe_match.group(1)
|
||||
logger_func(f" [Fap-Nation] Found video iframe. Visiting: {iframe_url}")
|
||||
try:
|
||||
iframe_response = scraper.get(iframe_url, timeout=30)
|
||||
iframe_response.raise_for_status()
|
||||
iframe_html = iframe_response.text
|
||||
|
||||
playlist_match = re.search(r'<source[^>]+src="([^"]+\.m3u8)"', iframe_html, re.IGNORECASE)
|
||||
if playlist_match:
|
||||
final_url = playlist_match.group(1)
|
||||
link_type = 'hls'
|
||||
logger_func(f" [Fap-Nation] Found embedded HLS stream in iframe: {final_url}")
|
||||
except Exception as e:
|
||||
logger_func(f" [Fap-Nation] ⚠️ Error fetching or parsing iframe content: {e}")
|
||||
|
||||
if not final_url:
|
||||
logger_func(" [Fap-Nation] No stream found in iframe. Checking main page content as a last resort...")
|
||||
js_var_match = re.search(r'"(https?://[^"]+\.m3u8)"', html_content, re.IGNORECASE)
|
||||
if js_var_match:
|
||||
final_url = js_var_match.group(1)
|
||||
link_type = 'hls'
|
||||
logger_func(f" [Fap-Nation] Found HLS stream on main page: {final_url}")
|
||||
|
||||
# 2. Fallback: If no HLS stream was found, search for direct links.
|
||||
if not final_url:
|
||||
logger_func(" [Fap-Nation] No HLS stream found. Priority 2 (Fallback): Searching for direct download links...")
|
||||
direct_link_pattern = r'<a\s+[^>]*href="([^"]+\.(?:mp4|webm|mkv|mov))"[^>]*>'
|
||||
direct_links_found = re.findall(direct_link_pattern, html_content, re.IGNORECASE)
|
||||
|
||||
if direct_links_found:
|
||||
logger_func(f" [Fap-Nation] Found {len(direct_links_found)} direct media link(s). Selecting the best quality...")
|
||||
best_link = direct_links_found[0]
|
||||
for link in direct_links_found:
|
||||
if '1080p' in link.lower():
|
||||
best_link = link
|
||||
break
|
||||
final_url = best_link
|
||||
link_type = 'direct'
|
||||
logger_func(f" [Fap-Nation] Identified direct media link: {final_url}")
|
||||
|
||||
# If after all checks, we still have no URL, then fail.
|
||||
if not final_url:
|
||||
logger_func(" [Fap-Nation] ❌ Stage 1 Failed: Could not find any HLS stream or direct link.")
|
||||
return None, []
|
||||
|
||||
# --- HLS Quality Selection Logic ---
|
||||
if link_type == 'hls' and final_url:
|
||||
logger_func(" [Fap-Nation] HLS stream found. Checking for higher quality variants...")
|
||||
try:
|
||||
master_playlist_response = scraper.get(final_url, timeout=20)
|
||||
master_playlist_response.raise_for_status()
|
||||
playlist_content = master_playlist_response.text
|
||||
|
||||
streams = re.findall(r'#EXT-X-STREAM-INF:.*?RESOLUTION=(\d+)x(\d+).*?\n(.*?)\s', playlist_content)
|
||||
|
||||
if streams:
|
||||
best_stream = max(streams, key=lambda s: int(s[0]) * int(s[1]))
|
||||
height = best_stream[1]
|
||||
relative_path = best_stream[2]
|
||||
new_final_url = urljoin(final_url, relative_path)
|
||||
|
||||
logger_func(f" [Fap-Nation] ✅ Best quality found: {height}p. Updating URL to: {new_final_url}")
|
||||
final_url = new_final_url
|
||||
else:
|
||||
logger_func(" [Fap-Nation] ℹ️ No alternate quality streams found in playlist. Using original.")
|
||||
except Exception as e:
|
||||
logger_func(f" [Fap-Nation] ⚠️ Could not parse HLS master playlist for quality selection: {e}. Using original URL.")
|
||||
|
||||
if final_url and link_type:
|
||||
if filename_from_video_tag:
|
||||
base_name, _ = os.path.splitext(filename_from_video_tag)
|
||||
new_filename = f"{base_name}.mp4"
|
||||
else:
|
||||
new_filename = f"{album_slug}.mp4"
|
||||
|
||||
files_to_download.append({'url': final_url, 'filename': new_filename, 'type': link_type})
|
||||
logger_func(f" [Fap-Nation] ✅ Ready to download '{new_filename}' ({link_type} method).")
|
||||
return album_title, files_to_download
|
||||
|
||||
logger_func(f" [Fap-Nation] ❌ Could not determine a valid download link.")
|
||||
return None, []
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Fap-Nation] ❌ Error fetching Fap-Nation data: {e}")
|
||||
return None, []
|
||||
285
src/core/manager.py
Normal file
@@ -0,0 +1,285 @@
|
||||
import threading
|
||||
import time
|
||||
import os
|
||||
import json
|
||||
import traceback
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed, Future
|
||||
from .api_client import download_from_api
|
||||
from .workers import PostProcessorWorker
|
||||
from ..config.constants import (
|
||||
STYLE_DATE_BASED, STYLE_POST_TITLE_GLOBAL_NUMBERING,
|
||||
MAX_THREADS
|
||||
)
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class DownloadManager:
|
||||
"""
|
||||
Manages the entire download lifecycle, acting as a bridge between the UI
|
||||
and the backend workers. It handles thread pools, task submission,
|
||||
and state management for a download session.
|
||||
"""
|
||||
|
||||
def __init__(self, progress_queue):
|
||||
"""
|
||||
Initializes the DownloadManager.
|
||||
|
||||
Args:
|
||||
progress_queue (queue.Queue): A thread-safe queue for sending
|
||||
status updates to the UI.
|
||||
"""
|
||||
self.progress_queue = progress_queue
|
||||
self.thread_pool = None
|
||||
self.active_futures = []
|
||||
self.cancellation_event = threading.Event()
|
||||
self.pause_event = threading.Event()
|
||||
self.is_running = False
|
||||
|
||||
self.total_posts = 0
|
||||
self.processed_posts = 0
|
||||
self.total_downloads = 0
|
||||
self.total_skips = 0
|
||||
self.all_kept_original_filenames = []
|
||||
self.creator_profiles_dir = None
|
||||
self.current_creator_name_for_profile = None
|
||||
self.current_creator_profile_path = None
|
||||
self.session_file_path = None
|
||||
|
||||
def _log(self, message):
|
||||
"""Puts a progress message into the queue for the UI."""
|
||||
self.progress_queue.put({'type': 'progress', 'payload': (message,)})
|
||||
|
||||
def start_session(self, config, restore_data=None):
|
||||
"""
|
||||
Starts a new download session based on the provided configuration.
|
||||
This is the main entry point called by the UI.
|
||||
|
||||
Args:
|
||||
config (dict): A dictionary containing all settings from the UI.
|
||||
restore_data (dict, optional): Data from a previous, interrupted session.
|
||||
"""
|
||||
if self.is_running:
|
||||
self._log("❌ Cannot start a new session: A session is already in progress.")
|
||||
return
|
||||
|
||||
self.session_file_path = config.get('session_file_path')
|
||||
creator_profile_data = self._setup_creator_profile(config)
|
||||
|
||||
# Save settings to profile at the start of the session
|
||||
if self.current_creator_profile_path:
|
||||
creator_profile_data['settings'] = config
|
||||
creator_profile_data.setdefault('processed_post_ids', [])
|
||||
self._save_creator_profile(creator_profile_data)
|
||||
self._log(f"✅ Loaded/created profile for '{self.current_creator_name_for_profile}'. Settings saved.")
|
||||
|
||||
self.is_running = True
|
||||
self.cancellation_event.clear()
|
||||
self.pause_event.clear()
|
||||
self.active_futures.clear()
|
||||
self.total_posts = 0
|
||||
self.processed_posts = 0
|
||||
self.total_downloads = 0
|
||||
self.total_skips = 0
|
||||
self.all_kept_original_filenames = []
|
||||
|
||||
is_single_post = bool(config.get('target_post_id_from_initial_url'))
|
||||
use_multithreading = config.get('use_multithreading', True)
|
||||
is_manga_sequential = config.get('manga_mode_active') and config.get('manga_filename_style') in [STYLE_DATE_BASED, STYLE_POST_TITLE_GLOBAL_NUMBERING]
|
||||
|
||||
should_use_multithreading_for_posts = use_multithreading and not is_single_post and not is_manga_sequential
|
||||
|
||||
if should_use_multithreading_for_posts:
|
||||
fetcher_thread = threading.Thread(
|
||||
target=self._fetch_and_queue_posts_for_pool,
|
||||
args=(config, restore_data, creator_profile_data),
|
||||
daemon=True
|
||||
)
|
||||
fetcher_thread.start()
|
||||
else:
|
||||
# Single-threaded mode does not use the manager's complex logic
|
||||
self._log("ℹ️ Manager is handing off to a single-threaded worker...")
|
||||
# The single-threaded worker will manage its own lifecycle and signals.
|
||||
# The manager's role for this session is effectively over.
|
||||
self.is_running = False # Allow another session to start if needed
|
||||
self.progress_queue.put({'type': 'handoff_to_single_thread', 'payload': (config,)})
|
||||
|
||||
|
||||
def _fetch_and_queue_posts_for_pool(self, config, restore_data, creator_profile_data):
|
||||
"""
|
||||
Fetches posts from the API in batches and submits them as tasks to a thread pool.
|
||||
This method runs in its own dedicated thread to avoid blocking the UI.
|
||||
It provides immediate feedback as soon as the first batch of posts is found.
|
||||
"""
|
||||
try:
|
||||
num_workers = min(config.get('num_threads', 4), MAX_THREADS)
|
||||
self.thread_pool = ThreadPoolExecutor(max_workers=num_workers, thread_name_prefix='PostWorker_')
|
||||
|
||||
session_processed_ids = set(restore_data.get('processed_post_ids', [])) if restore_data else set()
|
||||
profile_processed_ids = set(creator_profile_data.get('processed_post_ids', []))
|
||||
processed_ids = session_processed_ids.union(profile_processed_ids)
|
||||
|
||||
if restore_data and 'all_posts_data' in restore_data:
|
||||
# This logic for session restore remains as it relies on a pre-fetched list
|
||||
all_posts = restore_data['all_posts_data']
|
||||
posts_to_process = [p for p in all_posts if p.get('id') not in processed_ids]
|
||||
self.total_posts = len(all_posts)
|
||||
self.processed_posts = len(processed_ids)
|
||||
self._log(f"🔄 Restoring session. {len(posts_to_process)} posts remaining.")
|
||||
self.progress_queue.put({'type': 'overall_progress', 'payload': (self.total_posts, self.processed_posts)})
|
||||
|
||||
if not posts_to_process:
|
||||
self._log("✅ No new posts to process from restored session.")
|
||||
return
|
||||
|
||||
for post_data in posts_to_process:
|
||||
if self.cancellation_event.is_set(): break
|
||||
worker = PostProcessorWorker(post_data, config, self.progress_queue)
|
||||
future = self.thread_pool.submit(worker.process)
|
||||
future.add_done_callback(self._handle_future_result)
|
||||
self.active_futures.append(future)
|
||||
else:
|
||||
# --- START: REFACTORED STREAMING LOGIC ---
|
||||
post_generator = download_from_api(
|
||||
api_url_input=config['api_url'],
|
||||
logger=self._log,
|
||||
start_page=config.get('start_page'),
|
||||
end_page=config.get('end_page'),
|
||||
manga_mode=config.get('manga_mode_active', False),
|
||||
cancellation_event=self.cancellation_event,
|
||||
pause_event=self.pause_event,
|
||||
use_cookie=config.get('use_cookie', False),
|
||||
cookie_text=config.get('cookie_text', ''),
|
||||
selected_cookie_file=config.get('selected_cookie_file'),
|
||||
app_base_dir=config.get('app_base_dir'),
|
||||
manga_filename_style_for_sort_check=config.get('manga_filename_style'),
|
||||
processed_post_ids=list(processed_ids)
|
||||
)
|
||||
|
||||
self.total_posts = 0
|
||||
self.processed_posts = 0
|
||||
|
||||
# Process posts in batches as they are yielded by the API client
|
||||
for batch in post_generator:
|
||||
if self.cancellation_event.is_set():
|
||||
self._log(" Post fetching cancelled.")
|
||||
break
|
||||
|
||||
# Filter out any posts that might have been processed since the start
|
||||
posts_in_batch_to_process = [p for p in batch if p.get('id') not in processed_ids]
|
||||
|
||||
if not posts_in_batch_to_process:
|
||||
continue
|
||||
|
||||
# Update total count and immediately inform the UI
|
||||
self.total_posts += len(posts_in_batch_to_process)
|
||||
self.progress_queue.put({'type': 'overall_progress', 'payload': (self.total_posts, self.processed_posts)})
|
||||
|
||||
for post_data in posts_in_batch_to_process:
|
||||
if self.cancellation_event.is_set(): break
|
||||
worker = PostProcessorWorker(post_data, config, self.progress_queue)
|
||||
future = self.thread_pool.submit(worker.process)
|
||||
future.add_done_callback(self._handle_future_result)
|
||||
self.active_futures.append(future)
|
||||
|
||||
if self.total_posts == 0 and not self.cancellation_event.is_set():
|
||||
self._log("✅ No new posts found to process.")
|
||||
|
||||
except Exception as e:
|
||||
self._log(f"❌ CRITICAL ERROR in post fetcher thread: {e}")
|
||||
self._log(traceback.format_exc())
|
||||
finally:
|
||||
if self.thread_pool:
|
||||
self.thread_pool.shutdown(wait=True)
|
||||
self.is_running = False
|
||||
self._log("🏁 All processing tasks have completed or been cancelled.")
|
||||
self.progress_queue.put({
|
||||
'type': 'finished',
|
||||
'payload': (self.total_downloads, self.total_skips, self.cancellation_event.is_set(), self.all_kept_original_filenames)
|
||||
})
|
||||
|
||||
def _handle_future_result(self, future: Future):
|
||||
"""Callback executed when a worker task completes."""
|
||||
if self.cancellation_event.is_set():
|
||||
return
|
||||
|
||||
with threading.Lock(): # Protect shared counters
|
||||
self.processed_posts += 1
|
||||
try:
|
||||
if future.cancelled():
|
||||
self._log("⚠️ A post processing task was cancelled.")
|
||||
self.total_skips += 1
|
||||
else:
|
||||
result = future.result()
|
||||
(dl_count, skip_count, kept_originals,
|
||||
retryable, permanent, history) = result
|
||||
self.total_downloads += dl_count
|
||||
self.total_skips += skip_count
|
||||
self.all_kept_original_filenames.extend(kept_originals)
|
||||
if retryable:
|
||||
self.progress_queue.put({'type': 'retryable_failure', 'payload': (retryable,)})
|
||||
if permanent:
|
||||
self.progress_queue.put({'type': 'permanent_failure', 'payload': (permanent,)})
|
||||
if history:
|
||||
self.progress_queue.put({'type': 'post_processed_history', 'payload': (history,)})
|
||||
post_id = history.get('post_id')
|
||||
if post_id and self.current_creator_profile_path:
|
||||
profile_data = self._setup_creator_profile({'creator_name_for_profile': self.current_creator_name_for_profile, 'session_file_path': self.session_file_path})
|
||||
if post_id not in profile_data.get('processed_post_ids', []):
|
||||
profile_data.setdefault('processed_post_ids', []).append(post_id)
|
||||
self._save_creator_profile(profile_data)
|
||||
|
||||
except Exception as e:
|
||||
self._log(f"❌ Worker task resulted in an exception: {e}")
|
||||
self.total_skips += 1 # Count errored posts as skipped
|
||||
self.progress_queue.put({'type': 'overall_progress', 'payload': (self.total_posts, self.processed_posts)})
|
||||
|
||||
def _setup_creator_profile(self, config):
|
||||
"""Prepares the path and loads data for the current creator's profile."""
|
||||
self.current_creator_name_for_profile = config.get('creator_name_for_profile')
|
||||
if not self.current_creator_name_for_profile:
|
||||
self._log("⚠️ Cannot create creator profile: Name not provided in config.")
|
||||
return {}
|
||||
|
||||
appdata_dir = os.path.dirname(config.get('session_file_path', '.'))
|
||||
self.creator_profiles_dir = os.path.join(appdata_dir, "creator_profiles")
|
||||
os.makedirs(self.creator_profiles_dir, exist_ok=True)
|
||||
|
||||
safe_filename = clean_folder_name(self.current_creator_name_for_profile) + ".json"
|
||||
self.current_creator_profile_path = os.path.join(self.creator_profiles_dir, safe_filename)
|
||||
|
||||
if os.path.exists(self.current_creator_profile_path):
|
||||
try:
|
||||
with open(self.current_creator_profile_path, 'r', encoding='utf-8') as f:
|
||||
return json.load(f)
|
||||
except (json.JSONDecodeError, OSError) as e:
|
||||
self._log(f"❌ Error loading creator profile '{safe_filename}': {e}. Starting fresh.")
|
||||
return {}
|
||||
|
||||
def _save_creator_profile(self, data):
|
||||
"""Saves the provided data to the current creator's profile file."""
|
||||
if not self.current_creator_profile_path:
|
||||
return
|
||||
try:
|
||||
temp_path = self.current_creator_profile_path + ".tmp"
|
||||
with open(temp_path, 'w', encoding='utf-8') as f:
|
||||
json.dump(data, f, indent=2)
|
||||
os.replace(temp_path, self.current_creator_profile_path)
|
||||
except OSError as e:
|
||||
self._log(f"❌ Error saving creator profile to '{self.current_creator_profile_path}': {e}")
|
||||
|
||||
def cancel_session(self):
|
||||
"""Cancels the current running session."""
|
||||
if not self.is_running:
|
||||
return
|
||||
|
||||
if self.cancellation_event.is_set():
|
||||
self._log("ℹ️ Cancellation already in progress.")
|
||||
return
|
||||
|
||||
self._log("⚠️ Cancellation requested by user...")
|
||||
self.cancellation_event.set()
|
||||
|
||||
if self.thread_pool:
|
||||
self._log(" Signaling all worker threads to stop and shutting down pool...")
|
||||
self.thread_pool.shutdown(wait=False)
|
||||
|
||||
189
src/core/mangadex_client.py
Normal file
@@ -0,0 +1,189 @@
|
||||
# src/core/mangadex_client.py
|
||||
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import cloudscraper
|
||||
from collections import defaultdict
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
|
||||
def fetch_mangadex_data(start_url, output_dir, logger_func, file_progress_callback, overall_progress_callback, pause_event, cancellation_event):
|
||||
"""
|
||||
Fetches and downloads all content from a MangaDex series or chapter URL.
|
||||
Returns a tuple of (downloaded_count, skipped_count).
|
||||
"""
|
||||
grand_total_dl = 0
|
||||
grand_total_skip = 0
|
||||
|
||||
api = _MangadexAPI(logger_func)
|
||||
|
||||
def _check_pause():
|
||||
if cancellation_event and cancellation_event.is_set(): return True
|
||||
if pause_event and pause_event.is_set():
|
||||
logger_func(" Download paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set(): return True
|
||||
time.sleep(0.5)
|
||||
logger_func(" Download resumed.")
|
||||
return cancellation_event.is_set()
|
||||
|
||||
series_match = re.search(r"mangadex\.org/(?:title|manga)/([0-9a-f-]+)", start_url)
|
||||
chapter_match = re.search(r"mangadex\.org/chapter/([0-9a-f-]+)", start_url)
|
||||
|
||||
chapters_to_process = []
|
||||
if series_match:
|
||||
series_id = series_match.group(1)
|
||||
logger_func(f" Series detected. Fetching chapter list for ID: {series_id}")
|
||||
chapters_to_process = api.get_manga_chapters(series_id, cancellation_event, pause_event)
|
||||
elif chapter_match:
|
||||
chapter_id = chapter_match.group(1)
|
||||
logger_func(f" Single chapter detected. Fetching info for ID: {chapter_id}")
|
||||
chapter_info = api.get_chapter_info(chapter_id)
|
||||
if chapter_info:
|
||||
chapters_to_process = [chapter_info]
|
||||
|
||||
if not chapters_to_process:
|
||||
logger_func("❌ No chapters found or failed to fetch chapter info.")
|
||||
return 0, 0
|
||||
|
||||
logger_func(f"✅ Found {len(chapters_to_process)} chapter(s) to download.")
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback.emit(len(chapters_to_process), 0)
|
||||
|
||||
for chap_idx, chapter_json in enumerate(chapters_to_process):
|
||||
if _check_pause(): break
|
||||
try:
|
||||
metadata = api.transform_chapter_data(chapter_json)
|
||||
logger_func("-" * 40)
|
||||
logger_func(f"Processing Chapter {chap_idx + 1}/{len(chapters_to_process)}: Vol. {metadata['volume']} Ch. {metadata['chapter']}{metadata['chapter_minor']} - {metadata['title']}")
|
||||
|
||||
server_info = api.get_at_home_server(chapter_json["id"])
|
||||
if not server_info:
|
||||
logger_func(" ❌ Could not get image server for this chapter. Skipping.")
|
||||
continue
|
||||
|
||||
base_url = f"{server_info['baseUrl']}/data/{server_info['chapter']['hash']}/"
|
||||
image_files = server_info['chapter']['data']
|
||||
|
||||
series_folder = clean_folder_name(metadata['manga'])
|
||||
chapter_folder_title = metadata['title'] or ''
|
||||
chapter_folder = clean_folder_name(f"Vol {metadata['volume']:02d} Chap {metadata['chapter']:03d}{metadata['chapter_minor']} - {chapter_folder_title}".strip().strip('-').strip())
|
||||
final_save_path = os.path.join(output_dir, series_folder, chapter_folder)
|
||||
os.makedirs(final_save_path, exist_ok=True)
|
||||
|
||||
for img_idx, filename in enumerate(image_files):
|
||||
if _check_pause(): break
|
||||
|
||||
full_img_url = base_url + filename
|
||||
img_path = os.path.join(final_save_path, f"{img_idx + 1:03d}{os.path.splitext(filename)[1]}")
|
||||
|
||||
if os.path.exists(img_path):
|
||||
logger_func(f" -> Skip ({img_idx+1}/{len(image_files)}): '{os.path.basename(img_path)}' already exists.")
|
||||
grand_total_skip += 1
|
||||
continue
|
||||
|
||||
logger_func(f" Downloading ({img_idx+1}/{len(image_files)}): '{os.path.basename(img_path)}'...")
|
||||
|
||||
try:
|
||||
response = api.session.get(full_img_url, stream=True, timeout=60, headers={'Referer': 'https://mangadex.org/'})
|
||||
response.raise_for_status()
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
|
||||
if file_progress_callback:
|
||||
file_progress_callback.emit(os.path.basename(img_path), (0, total_size))
|
||||
|
||||
with open(img_path, 'wb') as f:
|
||||
downloaded_bytes = 0
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if _check_pause(): break
|
||||
f.write(chunk)
|
||||
downloaded_bytes += len(chunk)
|
||||
if file_progress_callback:
|
||||
file_progress_callback.emit(os.path.basename(img_path), (downloaded_bytes, total_size))
|
||||
|
||||
if _check_pause():
|
||||
if os.path.exists(img_path): os.remove(img_path)
|
||||
break
|
||||
|
||||
grand_total_dl += 1
|
||||
except Exception as e:
|
||||
logger_func(f" ❌ Failed to download page {img_idx+1}: {e}")
|
||||
grand_total_skip += 1
|
||||
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback.emit(len(chapters_to_process), chap_idx + 1)
|
||||
time.sleep(1)
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" ❌ An unexpected error occurred while processing chapter {chapter_json.get('id')}: {e}")
|
||||
|
||||
return grand_total_dl, grand_total_skip
|
||||
|
||||
class _MangadexAPI:
|
||||
def __init__(self, logger_func):
|
||||
self.logger_func = logger_func
|
||||
self.session = cloudscraper.create_scraper()
|
||||
self.root = "https://api.mangadex.org"
|
||||
|
||||
def _call(self, endpoint, params=None, cancellation_event=None):
|
||||
if cancellation_event and cancellation_event.is_set(): return None
|
||||
try:
|
||||
response = self.session.get(f"{self.root}{endpoint}", params=params, timeout=30)
|
||||
if response.status_code == 429:
|
||||
retry_after = int(response.headers.get("X-RateLimit-Retry-After", 5))
|
||||
self.logger_func(f" ⚠️ Rate limited. Waiting for {retry_after} seconds...")
|
||||
time.sleep(retry_after)
|
||||
return self._call(endpoint, params, cancellation_event)
|
||||
response.raise_for_status()
|
||||
return response.json()
|
||||
except Exception as e:
|
||||
self.logger_func(f" ❌ API call to '{endpoint}' failed: {e}")
|
||||
return None
|
||||
|
||||
def get_manga_chapters(self, series_id, cancellation_event, pause_event):
|
||||
all_chapters = []
|
||||
offset = 0
|
||||
limit = 500
|
||||
base_params = {
|
||||
"limit": limit, "order[volume]": "asc", "order[chapter]": "asc",
|
||||
"translatedLanguage[]": ["en"], "includes[]": ["scanlation_group", "user", "manga"]
|
||||
}
|
||||
while True:
|
||||
if cancellation_event.is_set(): break
|
||||
while pause_event.is_set(): time.sleep(0.5)
|
||||
|
||||
params = {**base_params, "offset": offset}
|
||||
data = self._call(f"/manga/{series_id}/feed", params, cancellation_event)
|
||||
if not data or data.get("result") != "ok": break
|
||||
|
||||
results = data.get("data", [])
|
||||
all_chapters.extend(results)
|
||||
|
||||
if (offset + limit) >= data.get("total", 0): break
|
||||
offset += limit
|
||||
return all_chapters
|
||||
|
||||
def get_chapter_info(self, chapter_id):
|
||||
params = {"includes[]": ["scanlation_group", "user", "manga"]}
|
||||
data = self._call(f"/chapter/{chapter_id}", params)
|
||||
return data.get("data") if data and data.get("result") == "ok" else None
|
||||
|
||||
def get_at_home_server(self, chapter_id):
|
||||
return self._call(f"/at-home/server/{chapter_id}")
|
||||
|
||||
def transform_chapter_data(self, chapter):
|
||||
relationships = {item["type"]: item for item in chapter.get("relationships", [])}
|
||||
manga = relationships.get("manga", {})
|
||||
c_attrs = chapter.get("attributes", {})
|
||||
m_attrs = manga.get("attributes", {})
|
||||
|
||||
chapter_num_str = c_attrs.get("chapter", "0") or "0"
|
||||
chnum, sep, minor = chapter_num_str.partition(".")
|
||||
|
||||
return {
|
||||
"manga": (m_attrs.get("title", {}).get("en") or next(iter(m_attrs.get("title", {}).values()), "Unknown Series")),
|
||||
"title": c_attrs.get("title", ""),
|
||||
"volume": int(float(c_attrs.get("volume", 0) or 0)),
|
||||
"chapter": int(float(chnum or 0)),
|
||||
"chapter_minor": sep + minor if minor else ""
|
||||
}
|
||||
44
src/core/nhentai_client.py
Normal file
@@ -0,0 +1,44 @@
|
||||
import requests
|
||||
import cloudscraper
|
||||
import json
|
||||
|
||||
def fetch_nhentai_gallery(gallery_id, logger=print):
|
||||
"""
|
||||
Fetches the metadata for a single nhentai gallery using cloudscraper to bypass Cloudflare.
|
||||
|
||||
Args:
|
||||
gallery_id (str or int): The ID of the nhentai gallery.
|
||||
logger (function): A function to log progress and error messages.
|
||||
|
||||
Returns:
|
||||
dict: A dictionary containing the gallery's metadata if successful, otherwise None.
|
||||
"""
|
||||
api_url = f"https://nhentai.net/api/gallery/{gallery_id}"
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
logger(f" Fetching nhentai gallery metadata from: {api_url}")
|
||||
|
||||
try:
|
||||
# Use the scraper to make the GET request
|
||||
response = scraper.get(api_url, timeout=20)
|
||||
|
||||
if response.status_code == 404:
|
||||
logger(f" ❌ Gallery not found (404): ID {gallery_id}")
|
||||
return None
|
||||
|
||||
response.raise_for_status()
|
||||
|
||||
gallery_data = response.json()
|
||||
|
||||
if "id" in gallery_data and "media_id" in gallery_data and "images" in gallery_data:
|
||||
logger(f" ✅ Successfully fetched metadata for '{gallery_data['title']['english']}'")
|
||||
gallery_data['pages'] = gallery_data.pop('images')['pages']
|
||||
return gallery_data
|
||||
else:
|
||||
logger(" ❌ API response is missing essential keys (id, media_id, or images).")
|
||||
return None
|
||||
|
||||
except Exception as e:
|
||||
logger(f" ❌ An error occurred while fetching gallery {gallery_id}: {e}")
|
||||
return None
|
||||
93
src/core/pixeldrain_client.py
Normal file
@@ -0,0 +1,93 @@
|
||||
import os
|
||||
import re
|
||||
import cloudscraper
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
# --- ADDED IMPORTS ---
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3.util.retry import Retry
|
||||
|
||||
def fetch_pixeldrain_data(url: str, logger):
|
||||
"""
|
||||
Scrapes a given Pixeldrain URL to extract album or file information.
|
||||
Handles single files (/u/), albums/lists (/l/), and folders (/d/).
|
||||
"""
|
||||
logger(f"Fetching data for Pixeldrain URL: {url}")
|
||||
scraper = cloudscraper.create_scraper()
|
||||
root = "https://pixeldrain.com"
|
||||
|
||||
# --- START OF FIX: Add a robust retry strategy ---
|
||||
try:
|
||||
retry_strategy = Retry(
|
||||
total=5, # Total number of retries
|
||||
backoff_factor=1, # Wait 1s, 2s, 4s, 8s between retries
|
||||
status_forcelist=[429, 500, 502, 503, 504], # Retry on these server errors
|
||||
allowed_methods=["HEAD", "GET"]
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retry_strategy)
|
||||
scraper.mount("https://", adapter)
|
||||
scraper.mount("http://", adapter)
|
||||
logger(" [Pixeldrain] Configured retry strategy for network requests.")
|
||||
except Exception as e:
|
||||
logger(f" [Pixeldrain] ⚠️ Could not configure retry strategy: {e}")
|
||||
# --- END OF FIX ---
|
||||
|
||||
file_match = re.search(r"/u/(\w+)", url)
|
||||
album_match = re.search(r"/l/(\w+)", url)
|
||||
folder_match = re.search(r"/d/([^?]+)", url)
|
||||
|
||||
try:
|
||||
if file_match:
|
||||
file_id = file_match.group(1)
|
||||
logger(f" Detected Pixeldrain File ID: {file_id}")
|
||||
api_url = f"{root}/api/file/{file_id}/info"
|
||||
data = scraper.get(api_url).json()
|
||||
|
||||
title = data.get("name", file_id)
|
||||
|
||||
files = [{
|
||||
'url': f"{root}/api/file/{file_id}?download",
|
||||
'filename': data.get("name", f"{file_id}.tmp")
|
||||
}]
|
||||
return title, files
|
||||
|
||||
elif album_match:
|
||||
album_id = album_match.group(1)
|
||||
logger(f" Detected Pixeldrain Album ID: {album_id}")
|
||||
api_url = f"{root}/api/list/{album_id}"
|
||||
data = scraper.get(api_url).json()
|
||||
|
||||
title = data.get("title", album_id)
|
||||
|
||||
files = []
|
||||
for file_info in data.get("files", []):
|
||||
files.append({
|
||||
'url': f"{root}/api/file/{file_info['id']}?download",
|
||||
'filename': file_info.get("name", f"{file_info['id']}.tmp")
|
||||
})
|
||||
return title, files
|
||||
|
||||
elif folder_match:
|
||||
path_id = folder_match.group(1)
|
||||
logger(f" Detected Pixeldrain Folder Path: {path_id}")
|
||||
api_url = f"{root}/api/filesystem/{path_id}?stat"
|
||||
data = scraper.get(api_url).json()
|
||||
|
||||
path_info = data["path"][data["base_index"]]
|
||||
title = path_info.get("name", path_id)
|
||||
|
||||
files = []
|
||||
for child in data.get("children", []):
|
||||
if child.get("type") == "file":
|
||||
files.append({
|
||||
'url': f"{root}/api/filesystem{child['path']}?attach",
|
||||
'filename': child.get("name")
|
||||
})
|
||||
return title, files
|
||||
|
||||
else:
|
||||
logger(" ❌ Could not identify Pixeldrain URL type (file, album, or folder).")
|
||||
return None, []
|
||||
|
||||
except Exception as e:
|
||||
logger(f"❌ An error occurred while fetching Pixeldrain data: {e}")
|
||||
return None, []
|
||||
107
src/core/rule34video_client.py
Normal file
@@ -0,0 +1,107 @@
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
import re
|
||||
import html
|
||||
|
||||
def fetch_rule34video_data(video_url, logger_func):
|
||||
"""
|
||||
Scrapes a rule34video.com page by specifically finding the 'Download' div,
|
||||
then selecting the best available quality link.
|
||||
|
||||
Args:
|
||||
video_url (str): The full URL to the rule34video.com page.
|
||||
logger_func (callable): Function to use for logging progress.
|
||||
|
||||
Returns:
|
||||
tuple: (video_title, final_video_url) or (None, None) on failure.
|
||||
"""
|
||||
logger_func(f" [Rule34Video] Fetching page: {video_url}")
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
main_page_response = scraper.get(video_url, timeout=20)
|
||||
main_page_response.raise_for_status()
|
||||
|
||||
soup = BeautifulSoup(main_page_response.text, 'html.parser')
|
||||
|
||||
page_title_tag = soup.find('title')
|
||||
video_title = page_title_tag.text.strip() if page_title_tag else "rule34video_file"
|
||||
|
||||
# --- START OF FINAL FIX ---
|
||||
# 1. Find the SPECIFIC "Download" label first. This is the key.
|
||||
download_label = soup.find('div', class_='label', string='Download')
|
||||
|
||||
if not download_label:
|
||||
logger_func(" [Rule34Video] ❌ Could not find the 'Download' label. Unable to locate the correct links div.")
|
||||
return None, None
|
||||
|
||||
# 2. The correct container is the parent of this label.
|
||||
download_div = download_label.parent
|
||||
|
||||
# 3. Now, find the links ONLY within this correct container.
|
||||
link_tags = download_div.find_all('a', class_='tag_item')
|
||||
if not link_tags:
|
||||
logger_func(" [Rule34Video] ❌ Found the 'Download' div, but no download links were inside it.")
|
||||
return None, None
|
||||
# --- END OF FINAL FIX ---
|
||||
|
||||
links_by_quality = {}
|
||||
quality_pattern = re.compile(r'(\d+p|4k)')
|
||||
|
||||
for tag in link_tags:
|
||||
href = tag.get('href')
|
||||
if not href:
|
||||
continue
|
||||
|
||||
quality = None
|
||||
text_match = quality_pattern.search(tag.text)
|
||||
if text_match:
|
||||
quality = text_match.group(1)
|
||||
else:
|
||||
href_match = quality_pattern.search(href)
|
||||
if href_match:
|
||||
quality = href_match.group(1)
|
||||
|
||||
if quality:
|
||||
links_by_quality[quality] = href
|
||||
|
||||
if not links_by_quality:
|
||||
logger_func(" [Rule34Video] ⚠️ Could not parse specific qualities. Using first available link as a fallback.")
|
||||
final_video_url = link_tags[0].get('href')
|
||||
if not final_video_url:
|
||||
logger_func(" [Rule34Video] ❌ Fallback failed: First link tag had no href attribute.")
|
||||
return None, None
|
||||
|
||||
final_video_url = html.unescape(final_video_url)
|
||||
logger_func(f" [Rule34Video] ✅ Selected first available link as fallback: {final_video_url}")
|
||||
return video_title, final_video_url
|
||||
|
||||
logger_func(f" [Rule34Video] Found available qualities: {list(links_by_quality.keys())}")
|
||||
|
||||
final_video_url = None
|
||||
if '1080p' in links_by_quality:
|
||||
final_video_url = links_by_quality['1080p']
|
||||
logger_func(" [Rule34Video] ✅ Selected preferred 1080p link.")
|
||||
elif '720p' in links_by_quality:
|
||||
final_video_url = links_by_quality['720p']
|
||||
logger_func(" [Rule34Video] ✅ 1080p not found. Selected fallback 720p link.")
|
||||
else:
|
||||
fallback_order = ['480p', '360p']
|
||||
for quality in fallback_order:
|
||||
if quality in links_by_quality:
|
||||
final_video_url = links_by_quality[quality]
|
||||
logger_func(f" [Rule34Video] ⚠️ 1080p/720p not found. Selected best available fallback: {quality}")
|
||||
break
|
||||
|
||||
if not final_video_url:
|
||||
logger_func(" [Rule34Video] ❌ Could not find a suitable download link.")
|
||||
return None, None
|
||||
|
||||
final_video_url = html.unescape(final_video_url)
|
||||
logger_func(f" [Rule34Video] ✅ Selected direct download URL: {final_video_url}")
|
||||
|
||||
return video_title, final_video_url
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Rule34Video] ❌ An error occurred: {e}")
|
||||
return None, None
|
||||
163
src/core/saint2_client.py
Normal file
@@ -0,0 +1,163 @@
|
||||
import os
|
||||
import re as re_module
|
||||
import html
|
||||
import urllib.parse
|
||||
import requests
|
||||
|
||||
|
||||
PATTERN_CACHE = {}
|
||||
|
||||
def re(pattern):
|
||||
"""Compile a regular expression pattern and cache it."""
|
||||
try:
|
||||
return PATTERN_CACHE[pattern]
|
||||
except KeyError:
|
||||
p = PATTERN_CACHE[pattern] = re_module.compile(pattern)
|
||||
return p
|
||||
|
||||
def extract_from(txt, pos=None, default=""):
|
||||
"""Returns a function that extracts text between two delimiters from 'txt'."""
|
||||
def extr(begin, end, index=txt.find, txt=txt):
|
||||
nonlocal pos
|
||||
try:
|
||||
start_pos = pos if pos is not None else 0
|
||||
first = index(begin, start_pos) + len(begin)
|
||||
last = index(end, first)
|
||||
if pos is not None:
|
||||
pos = last + len(end)
|
||||
return txt[first:last]
|
||||
except (ValueError, IndexError):
|
||||
return default
|
||||
return extr
|
||||
|
||||
def nameext_from_url(url):
|
||||
"""Extract filename and extension from a URL."""
|
||||
data = {}
|
||||
filename = urllib.parse.unquote(url.partition("?")[0].rpartition("/")[2])
|
||||
name, _, ext = filename.rpartition(".")
|
||||
if name and len(ext) <= 16:
|
||||
data["filename"], data["extension"] = name, ext.lower()
|
||||
else:
|
||||
data["filename"], data["extension"] = filename, ""
|
||||
return data
|
||||
|
||||
class BaseExtractor:
|
||||
"""A simplified base class for extractors."""
|
||||
def __init__(self, match, session, logger):
|
||||
self.match = match
|
||||
self.groups = match.groups()
|
||||
self.session = session
|
||||
self.log = logger
|
||||
|
||||
def request(self, url, **kwargs):
|
||||
"""Makes an HTTP request using the session."""
|
||||
try:
|
||||
response = self.session.get(url, **kwargs)
|
||||
response.raise_for_status()
|
||||
return response
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.log(f"Error making request to {url}: {e}")
|
||||
return None
|
||||
|
||||
class SaintAlbumExtractor(BaseExtractor):
|
||||
"""Extractor for saint.su albums."""
|
||||
root = "https://saint2.su"
|
||||
pattern = re(r"(?:https?://)?saint\d*\.(?:su|pk|cr|to)/a/([^/?#]+)")
|
||||
|
||||
def items(self):
|
||||
"""Generator that yields all files from an album."""
|
||||
album_id = self.groups[0]
|
||||
response = self.request(f"{self.root}/a/{album_id}")
|
||||
if not response:
|
||||
return None, []
|
||||
|
||||
extr = extract_from(response.text)
|
||||
title = extr("<title>", "<").rpartition(" - ")[0]
|
||||
self.log(f"Downloading album: {title}")
|
||||
|
||||
files_html = re_module.findall(r'<a class="image".*?</a>', response.text, re_module.DOTALL)
|
||||
file_list = []
|
||||
for i, file_html in enumerate(files_html, 1):
|
||||
file_extr = extract_from(file_html)
|
||||
file_url = html.unescape(file_extr("onclick=\"play('", "'"))
|
||||
if not file_url:
|
||||
continue
|
||||
|
||||
filename_info = nameext_from_url(file_url)
|
||||
filename = f"{filename_info['filename']}.{filename_info['extension']}"
|
||||
|
||||
file_data = {
|
||||
"url": file_url,
|
||||
"filename": filename,
|
||||
"headers": {"Referer": response.url},
|
||||
}
|
||||
file_list.append(file_data)
|
||||
|
||||
return title, file_list
|
||||
|
||||
class SaintMediaExtractor(BaseExtractor):
|
||||
"""Extractor for single saint.su media links."""
|
||||
root = "https://saint2.su"
|
||||
pattern = re(r"(?:https?://)?saint\d*\.(?:su|pk|cr|to)(/(embe)?d/([^/?#]+))")
|
||||
|
||||
def items(self):
|
||||
"""Generator that yields the single file from a media page."""
|
||||
path, embed, media_id = self.groups
|
||||
url = self.root + path
|
||||
response = self.request(url)
|
||||
if not response:
|
||||
return None, []
|
||||
|
||||
extr = extract_from(response.text)
|
||||
file_url = ""
|
||||
title = extr("<title>", "<").rpartition(" - ")[0] or media_id
|
||||
|
||||
if embed: # /embed/ link
|
||||
file_url = html.unescape(extr('<source src="', '"'))
|
||||
else: # /d/ link
|
||||
file_url = html.unescape(extr('<a href="', '"'))
|
||||
|
||||
if not file_url:
|
||||
self.log("Could not find video URL on the page.")
|
||||
return title, []
|
||||
|
||||
filename_info = nameext_from_url(file_url)
|
||||
filename = f"{filename_info['filename'] or media_id}.{filename_info['extension'] or 'mp4'}"
|
||||
|
||||
file_data = {
|
||||
"url": file_url,
|
||||
"filename": filename,
|
||||
"headers": {"Referer": response.url}
|
||||
}
|
||||
|
||||
return title, [file_data]
|
||||
|
||||
|
||||
def fetch_saint2_data(url, logger):
|
||||
"""
|
||||
Identifies the correct extractor for a saint2.su URL and returns the data.
|
||||
|
||||
Args:
|
||||
url (str): The saint2.su URL.
|
||||
logger (function): A function to log progress messages.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing (album_title, list_of_file_dicts).
|
||||
Returns (None, []) if no data could be fetched.
|
||||
"""
|
||||
extractors = [SaintMediaExtractor, SaintAlbumExtractor]
|
||||
session = requests.Session()
|
||||
session.headers.update({
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
|
||||
})
|
||||
|
||||
for extractor_cls in extractors:
|
||||
match = extractor_cls.pattern.match(url)
|
||||
if match:
|
||||
extractor = extractor_cls(match, session, logger)
|
||||
album_title, files = extractor.items()
|
||||
sanitized_title = re_module.sub(r'[<>:"/\\|?*]', '_', album_title) if album_title else "saint2_download"
|
||||
return sanitized_title, files
|
||||
|
||||
logger(f"Error: The URL '{url}' does not match a known saint2 pattern.")
|
||||
return None, []
|
||||
102
src/core/simpcity_client.py
Normal file
@@ -0,0 +1,102 @@
|
||||
# src/core/simpcity_client.py
|
||||
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
from urllib.parse import urlparse, unquote
|
||||
import os
|
||||
import re
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
import urllib.parse
|
||||
|
||||
def fetch_single_simpcity_page(url, logger_func, cookies=None, post_id=None):
|
||||
"""
|
||||
Scrapes a single SimpCity page for images, external links, video tags, and iframes.
|
||||
"""
|
||||
scraper = cloudscraper.create_scraper()
|
||||
headers = {'Referer': 'https://simpcity.cr/'}
|
||||
|
||||
try:
|
||||
response = scraper.get(url, timeout=30, headers=headers, cookies=cookies)
|
||||
final_url = response.url # Capture the final URL after any redirects
|
||||
|
||||
if response.status_code == 404:
|
||||
return None, [], final_url
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.text, 'html.parser')
|
||||
|
||||
album_title = None
|
||||
title_element = soup.find('h1', class_='p-title-value')
|
||||
if title_element:
|
||||
album_title = title_element.text.strip()
|
||||
|
||||
search_scope = soup
|
||||
if post_id:
|
||||
post_content_container = soup.find('div', attrs={'data-lb-id': f'post-{post_id}'})
|
||||
if post_content_container:
|
||||
logger_func(f" [SimpCity] ✅ Isolating search to post content container for ID {post_id}.")
|
||||
search_scope = post_content_container
|
||||
else:
|
||||
logger_func(f" [SimpCity] ⚠️ Could not find content container for post ID {post_id}.")
|
||||
|
||||
jobs_on_page = []
|
||||
|
||||
# Find native SimpCity images
|
||||
image_tags = search_scope.find_all('img', class_='bbImage')
|
||||
for img_tag in image_tags:
|
||||
thumbnail_url = img_tag.get('src')
|
||||
if not thumbnail_url or not isinstance(thumbnail_url, str) or 'saint2.su' in thumbnail_url: continue
|
||||
full_url = thumbnail_url.replace('.md.', '.')
|
||||
filename = img_tag.get('alt', '').replace('.md.', '.') or os.path.basename(unquote(urlparse(full_url).path))
|
||||
jobs_on_page.append({'type': 'image', 'filename': filename, 'url': full_url})
|
||||
|
||||
# Find links in <a> tags, now with redirect handling
|
||||
link_tags = search_scope.find_all('a', href=True)
|
||||
for link in link_tags:
|
||||
href = link.get('href', '')
|
||||
|
||||
actual_url = href
|
||||
if '/misc/goto?url=' in href:
|
||||
try:
|
||||
# Extract and decode the real URL from the 'url' parameter
|
||||
parsed_href = urlparse(href)
|
||||
query_params = dict(urllib.parse.parse_qsl(parsed_href.query))
|
||||
if 'url' in query_params:
|
||||
actual_url = unquote(query_params['url'])
|
||||
except Exception:
|
||||
actual_url = href # Fallback if parsing fails
|
||||
|
||||
# Perform all checks on the 'actual_url' which is now the real destination
|
||||
if re.search(r'pixeldrain\.com/[lud]/', actual_url): jobs_on_page.append({'type': 'pixeldrain', 'url': actual_url})
|
||||
elif re.search(r'saint2\.(su|pk|cr|to)/embed/', actual_url): jobs_on_page.append({'type': 'saint2', 'url': actual_url})
|
||||
elif re.search(r'bunkr\.(?:cr|si|la|ws|is|ru|su|red|black|media|site|to|ac|ci|fi|pk|ps|sk|ph)|bunkrr\.ru', actual_url): jobs_on_page.append({'type': 'bunkr', 'url': actual_url})
|
||||
elif re.search(r'mega\.(nz|io)', actual_url): jobs_on_page.append({'type': 'mega', 'url': actual_url})
|
||||
elif re.search(r'gofile\.io', actual_url): jobs_on_page.append({'type': 'gofile', 'url': actual_url})
|
||||
|
||||
# Find direct Saint2 video embeds in <video> tags
|
||||
video_tags = search_scope.find_all('video')
|
||||
for video in video_tags:
|
||||
source_tag = video.find('source')
|
||||
if source_tag and source_tag.get('src'):
|
||||
src_url = source_tag['src']
|
||||
if re.search(r'saint2\.(su|pk|cr|to)', src_url):
|
||||
jobs_on_page.append({'type': 'saint2_direct', 'url': src_url})
|
||||
|
||||
# Find embeds in <iframe> tags (as a fallback)
|
||||
iframe_tags = search_scope.find_all('iframe')
|
||||
for iframe in iframe_tags:
|
||||
src_url = iframe.get('src')
|
||||
if src_url and isinstance(src_url, str):
|
||||
if re.search(r'saint2\.(su|pk|cr|to)/embed/', src_url):
|
||||
jobs_on_page.append({'type': 'saint2', 'url': src_url})
|
||||
|
||||
if jobs_on_page:
|
||||
# We use a set to remove duplicate URLs that might be found in multiple ways
|
||||
unique_jobs = list({job['url']: job for job in jobs_on_page}.values())
|
||||
logger_func(f" [SimpCity] Scraper found jobs: {[job['type'] for job in unique_jobs]}")
|
||||
return album_title, unique_jobs, final_url
|
||||
|
||||
return album_title, [], final_url
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [SimpCity] ❌ Error fetching page {url}: {e}")
|
||||
raise e
|
||||
73
src/core/toonily_client.py
Normal file
@@ -0,0 +1,73 @@
|
||||
import cloudscraper
|
||||
from bs4 import BeautifulSoup
|
||||
import time
|
||||
|
||||
def get_chapter_list(series_url, logger_func):
|
||||
logger_func(f" [Toonily] Scraping series page for chapter list: {series_url}")
|
||||
scraper = cloudscraper.create_scraper()
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
|
||||
'Referer': 'https://toonily.com/'
|
||||
}
|
||||
|
||||
try:
|
||||
response = scraper.get(series_url, timeout=30, headers=headers)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
chapter_links = soup.select('li.wp-manga-chapter > a')
|
||||
|
||||
if not chapter_links:
|
||||
logger_func(" [Toonily] ❌ Could not find any chapter links on the page.")
|
||||
return []
|
||||
|
||||
urls = [link['href'] for link in chapter_links]
|
||||
urls.reverse()
|
||||
logger_func(f" [Toonily] Found {len(urls)} chapters.")
|
||||
return urls
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Toonily] ❌ Error getting chapter list: {e}")
|
||||
return []
|
||||
|
||||
|
||||
def fetch_chapter_data(chapter_url, logger_func, scraper_session):
|
||||
"""
|
||||
Scrapes a single Toonily.com chapter page for its title and image URLs.
|
||||
"""
|
||||
main_series_url = chapter_url.rsplit('/', 2)[0] + '/'
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
|
||||
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.7',
|
||||
'Accept-Language': 'en-US,en;q=0.9',
|
||||
'Referer': main_series_url
|
||||
}
|
||||
|
||||
try:
|
||||
response = scraper_session.get(chapter_url, timeout=30, headers=headers)
|
||||
response.raise_for_status()
|
||||
soup = BeautifulSoup(response.content, 'html.parser')
|
||||
|
||||
title_element = soup.select_one('h1#chapter-heading')
|
||||
image_container = soup.select_one('div.reading-content')
|
||||
|
||||
if not title_element or not image_container:
|
||||
logger_func(" [Toonily] ❌ Page structure invalid. Could not find title or image container.")
|
||||
return None, None, []
|
||||
|
||||
full_chapter_title = title_element.text.strip()
|
||||
|
||||
if " - Chapter" in full_chapter_title:
|
||||
series_title = full_chapter_title.split(" - Chapter")[0].strip()
|
||||
else:
|
||||
series_title = full_chapter_title.strip()
|
||||
|
||||
chapter_title = full_chapter_title # The full string is best for the chapter folder name
|
||||
|
||||
image_elements = image_container.select('img')
|
||||
image_urls = [img.get('data-src', img.get('src')).strip() for img in image_elements if img.get('data-src') or img.get('src')]
|
||||
|
||||
return series_title, chapter_title, image_urls
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Toonily] ❌ An error occurred scraping chapter '{chapter_url}': {e}")
|
||||
return None, None, []
|
||||
2407
src/core/workers.py
Normal file
1
src/i18n/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
3271
src/i18n/translator.py
Normal file
1
src/services/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
703
src/services/drive_downloader.py
Normal file
@@ -0,0 +1,703 @@
|
||||
# --- Standard Library Imports ---
|
||||
import os
|
||||
import re
|
||||
import traceback
|
||||
import json
|
||||
import base64
|
||||
import time
|
||||
import zipfile
|
||||
import struct
|
||||
import sys
|
||||
import io
|
||||
import hashlib
|
||||
from contextlib import redirect_stdout
|
||||
from urllib.parse import urlparse, urlunparse, parse_qs, urlencode
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
from threading import Lock
|
||||
|
||||
# --- Third-party Library Imports ---
|
||||
import requests
|
||||
import cloudscraper
|
||||
from requests.adapters import HTTPAdapter
|
||||
from urllib3.util.retry import Retry
|
||||
from ..utils.file_utils import clean_folder_name
|
||||
|
||||
try:
|
||||
from Crypto.Cipher import AES
|
||||
PYCRYPTODOME_AVAILABLE = True
|
||||
except ImportError:
|
||||
PYCRYPTODOME_AVAILABLE = False
|
||||
|
||||
try:
|
||||
import gdown
|
||||
GDRIVE_AVAILABLE = True
|
||||
except ImportError:
|
||||
GDRIVE_AVAILABLE = False
|
||||
|
||||
MEGA_API_URL = "https://g.api.mega.co.nz"
|
||||
MIN_SIZE_FOR_MULTIPART_MEGA = 20 * 1024 * 1024 # 20 MB
|
||||
NUM_PARTS_FOR_MEGA = 5
|
||||
|
||||
def _get_filename_from_headers(headers):
|
||||
cd = headers.get('content-disposition')
|
||||
if not cd:
|
||||
return None
|
||||
fname_match = re.findall('filename="?([^"]+)"?', cd)
|
||||
if fname_match:
|
||||
sanitized_name = re.sub(r'[<>:"/\\|?*]', '_', fname_match[0].strip())
|
||||
return sanitized_name
|
||||
return None
|
||||
|
||||
def urlb64_to_b64(s):
|
||||
s += '=' * (-len(s) % 4)
|
||||
return s.replace('-', '+').replace('_', '/')
|
||||
|
||||
def b64_to_bytes(s):
|
||||
return base64.b64decode(urlb64_to_b64(s))
|
||||
|
||||
def bytes_to_b64(b):
|
||||
return base64.b64encode(b).decode('utf-8')
|
||||
|
||||
def _decrypt_mega_attribute(encrypted_attr_b64, key_bytes):
|
||||
try:
|
||||
attr_bytes = b64_to_bytes(encrypted_attr_b64)
|
||||
padded_len = (len(attr_bytes) + 15) & ~15
|
||||
padded_attr_bytes = attr_bytes.ljust(padded_len, b'\0')
|
||||
iv = b'\0' * 16
|
||||
cipher = AES.new(key_bytes, AES.MODE_CBC, iv)
|
||||
decrypted_attr = cipher.decrypt(padded_attr_bytes)
|
||||
json_str = decrypted_attr.strip(b'\0').decode('utf-8')
|
||||
if json_str.startswith('MEGA'):
|
||||
return json.loads(json_str[4:])
|
||||
return json.loads(json_str)
|
||||
except Exception:
|
||||
return {}
|
||||
|
||||
def _decrypt_mega_key(encrypted_key_b64, master_key_bytes):
|
||||
key_bytes = b64_to_bytes(encrypted_key_b64)
|
||||
iv = b'\0' * 16
|
||||
cipher = AES.new(master_key_bytes, AES.MODE_ECB)
|
||||
return cipher.decrypt(key_bytes)
|
||||
|
||||
def _parse_mega_key(key_b64):
|
||||
key_bytes = b64_to_bytes(key_b64)
|
||||
key_parts = struct.unpack('>' + 'I' * (len(key_bytes) // 4), key_bytes)
|
||||
if len(key_parts) == 8:
|
||||
final_key = (key_parts[0] ^ key_parts[4], key_parts[1] ^ key_parts[5], key_parts[2] ^ key_parts[6], key_parts[3] ^ key_parts[7])
|
||||
iv = (key_parts[4], key_parts[5], 0, 0)
|
||||
key_bytes = struct.pack('>' + 'I' * 4, *final_key)
|
||||
iv_bytes = struct.pack('>' + 'I' * 4, *iv)
|
||||
return key_bytes, iv_bytes, None
|
||||
elif len(key_parts) == 4:
|
||||
return key_bytes, None, None
|
||||
raise ValueError("Invalid Mega key length")
|
||||
|
||||
def _process_file_key(file_key_bytes):
|
||||
key_parts = struct.unpack('>' + 'I' * 8, file_key_bytes)
|
||||
final_key_parts = (key_parts[0] ^ key_parts[4], key_parts[1] ^ key_parts[5], key_parts[2] ^ key_parts[6], key_parts[3] ^ key_parts[7])
|
||||
return struct.pack('>' + 'I' * 4, *final_key_parts)
|
||||
|
||||
def _download_and_decrypt_chunk(args):
|
||||
url, temp_path, start_byte, end_byte, key, nonce, part_num, progress_data, progress_callback_func, file_name, cancellation_event, pause_event = args
|
||||
try:
|
||||
headers = {'Range': f'bytes={start_byte}-{end_byte}'}
|
||||
initial_counter = start_byte // 16
|
||||
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce, initial_value=initial_counter)
|
||||
|
||||
with requests.get(url, headers=headers, stream=True, timeout=(15, 300)) as r:
|
||||
r.raise_for_status()
|
||||
with open(temp_path, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
return False
|
||||
while pause_event and pause_event.is_set():
|
||||
time.sleep(0.5)
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
return False
|
||||
|
||||
decrypted_chunk = cipher.decrypt(chunk)
|
||||
f.write(decrypted_chunk)
|
||||
with progress_data['lock']:
|
||||
progress_data['downloaded'] += len(chunk)
|
||||
if progress_callback_func and (time.time() - progress_data['last_update'] > 1):
|
||||
progress_callback_func(file_name, (progress_data['downloaded'], progress_data['total_size']))
|
||||
progress_data['last_update'] = time.time()
|
||||
return True
|
||||
except Exception as e:
|
||||
return False
|
||||
|
||||
def download_and_decrypt_mega_file(info, download_path, logger_func, progress_callback_func=None, cancellation_event=None, pause_event=None):
|
||||
file_name = info['file_name']
|
||||
file_size = info['file_size']
|
||||
dl_url = info['dl_url']
|
||||
final_path = os.path.join(download_path, file_name)
|
||||
|
||||
if os.path.exists(final_path) and os.path.getsize(final_path) == file_size:
|
||||
logger_func(f" [Mega] ℹ️ File '{file_name}' already exists with the correct size. Skipping.")
|
||||
return
|
||||
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
key, iv, _ = _parse_mega_key(urlb64_to_b64(info['file_key']))
|
||||
nonce = iv[:8]
|
||||
|
||||
# Check for cancellation before starting
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger_func(f" [Mega] Download for '{file_name}' cancelled before starting.")
|
||||
return
|
||||
|
||||
if file_size < MIN_SIZE_FOR_MULTIPART_MEGA:
|
||||
logger_func(f" [Mega] Downloading '{file_name}' (Single Stream)...")
|
||||
try:
|
||||
cipher = AES.new(key, AES.MODE_CTR, nonce=nonce, initial_value=0)
|
||||
with requests.get(dl_url, stream=True, timeout=(15, 300)) as r:
|
||||
r.raise_for_status()
|
||||
downloaded_bytes = 0
|
||||
last_update_time = time.time()
|
||||
with open(final_path, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
break
|
||||
while pause_event and pause_event.is_set():
|
||||
time.sleep(0.5)
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
break
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
break
|
||||
|
||||
decrypted_chunk = cipher.decrypt(chunk)
|
||||
f.write(decrypted_chunk)
|
||||
downloaded_bytes += len(chunk)
|
||||
current_time = time.time()
|
||||
if current_time - last_update_time > 1:
|
||||
if progress_callback_func:
|
||||
progress_callback_func(file_name, (downloaded_bytes, file_size))
|
||||
last_update_time = time.time()
|
||||
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger_func(f" [Mega] ❌ Download cancelled for '{file_name}'. Deleting partial file.")
|
||||
if os.path.exists(final_path): os.remove(final_path)
|
||||
else:
|
||||
logger_func(f" [Mega] ✅ Successfully downloaded '{file_name}'")
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Mega] ❌ Download failed for '{file_name}': {e}")
|
||||
if os.path.exists(final_path): os.remove(final_path)
|
||||
else:
|
||||
logger_func(f" [Mega] Downloading '{file_name}' ({NUM_PARTS_FOR_MEGA} Parts)...")
|
||||
chunk_size = file_size // NUM_PARTS_FOR_MEGA
|
||||
chunks = []
|
||||
for i in range(NUM_PARTS_FOR_MEGA):
|
||||
start = i * chunk_size
|
||||
end = start + chunk_size - 1 if i < NUM_PARTS_FOR_MEGA - 1 else file_size - 1
|
||||
chunks.append((start, end))
|
||||
|
||||
progress_data = {'downloaded': 0, 'total_size': file_size, 'lock': Lock(), 'last_update': time.time()}
|
||||
|
||||
tasks = []
|
||||
for i, (start, end) in enumerate(chunks):
|
||||
temp_path = f"{final_path}.part{i}"
|
||||
tasks.append((dl_url, temp_path, start, end, key, nonce, i, progress_data, progress_callback_func, file_name, cancellation_event, pause_event))
|
||||
|
||||
all_parts_successful = True
|
||||
with ThreadPoolExecutor(max_workers=NUM_PARTS_FOR_MEGA) as executor:
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
executor.shutdown(wait=False, cancel_futures=True)
|
||||
all_parts_successful = False
|
||||
else:
|
||||
results = executor.map(_download_and_decrypt_chunk, tasks)
|
||||
for result in results:
|
||||
if not result:
|
||||
all_parts_successful = False
|
||||
|
||||
# Check for cancellation after threads finish/are cancelled
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
all_parts_successful = False
|
||||
logger_func(f" [Mega] ❌ Multipart download cancelled for '{file_name}'.")
|
||||
|
||||
if all_parts_successful:
|
||||
logger_func(f" [Mega] All parts for '{file_name}' downloaded. Assembling file...")
|
||||
try:
|
||||
with open(final_path, 'wb') as f_out:
|
||||
for i in range(NUM_PARTS_FOR_MEGA):
|
||||
part_path = f"{final_path}.part{i}"
|
||||
with open(part_path, 'rb') as f_in:
|
||||
f_out.write(f_in.read())
|
||||
os.remove(part_path)
|
||||
logger_func(f" [Mega] ✅ Successfully downloaded and assembled '{file_name}'")
|
||||
except Exception as e:
|
||||
logger_func(f" [Mega] ❌ File assembly failed for '{file_name}': {e}")
|
||||
else:
|
||||
logger_func(f" [Mega] ❌ Multipart download failed or was cancelled for '{file_name}'. Cleaning up partial files.")
|
||||
for i in range(NUM_PARTS_FOR_MEGA):
|
||||
part_path = f"{final_path}.part{i}"
|
||||
if os.path.exists(part_path):
|
||||
os.remove(part_path)
|
||||
|
||||
|
||||
def _process_mega_folder(folder_id, folder_key, session, logger_func):
|
||||
try:
|
||||
master_key_bytes, _, _ = _parse_mega_key(folder_key)
|
||||
payload = [{"a": "f", "c": 1, "r": 1}]
|
||||
params = {'n': folder_id}
|
||||
response = session.post(f"{MEGA_API_URL}/cs", params=params, json=payload, timeout=30)
|
||||
response.raise_for_status()
|
||||
res_json = response.json()
|
||||
|
||||
if isinstance(res_json, int) or (isinstance(res_json, list) and res_json and isinstance(res_json[0], int)):
|
||||
error_code = res_json if isinstance(res_json, int) else res_json[0]
|
||||
logger_func(f" [Mega Folder] ❌ API returned error code: {error_code}. The folder may be invalid or removed.")
|
||||
return None, None
|
||||
if not isinstance(res_json, list) or not res_json or not isinstance(res_json[0], dict) or 'f' not in res_json[0]:
|
||||
logger_func(f" [Mega Folder] ❌ Invalid folder data received: {str(res_json)[:200]}")
|
||||
return None, None
|
||||
|
||||
nodes = res_json[0]['f']
|
||||
decrypted_nodes = {}
|
||||
for node in nodes:
|
||||
try:
|
||||
encrypted_key_b64 = node['k'].split(':')[-1]
|
||||
decrypted_key_raw = _decrypt_mega_key(encrypted_key_b64, master_key_bytes)
|
||||
|
||||
attr_key = _process_file_key(decrypted_key_raw) if node.get('t') == 0 else decrypted_key_raw
|
||||
attributes = _decrypt_mega_attribute(node['a'], attr_key)
|
||||
name = re.sub(r'[<>:"/\\|?*]', '_', attributes.get('n', f"unknown_{node['h']}"))
|
||||
|
||||
decrypted_nodes[node['h']] = {"name": name, "parent": node.get('p'), "type": node.get('t'), "size": node.get('s'), "raw_key_b64": urlb64_to_b64(bytes_to_b64(decrypted_key_raw))}
|
||||
except Exception as e:
|
||||
logger_func(f" [Mega Folder] ⚠️ Could not process node {node.get('h')}: {e}")
|
||||
|
||||
root_name = decrypted_nodes.get(folder_id, {}).get("name", "Mega_Folder")
|
||||
files_to_download = []
|
||||
for handle, node_info in decrypted_nodes.items():
|
||||
if node_info.get("type") == 0:
|
||||
path_parts = [node_info['name']]
|
||||
current_parent_id = node_info.get('parent')
|
||||
while current_parent_id in decrypted_nodes:
|
||||
parent_node = decrypted_nodes[current_parent_id]
|
||||
path_parts.insert(0, parent_node['name'])
|
||||
current_parent_id = parent_node.get('parent')
|
||||
if current_parent_id == folder_id:
|
||||
break
|
||||
files_to_download.append({'h': handle, 's': node_info['size'], 'key': node_info['raw_key_b64'], 'relative_path': os.path.join(*path_parts)})
|
||||
|
||||
return root_name, files_to_download
|
||||
except Exception as e:
|
||||
logger_func(f" [Mega Folder] ❌ Failed to get folder info: {e}")
|
||||
return None, None
|
||||
|
||||
def download_mega_file(mega_url, download_path, logger_func=print, progress_callback_func=None, overall_progress_callback=None, cancellation_event=None, pause_event=None):
|
||||
if not PYCRYPTODOME_AVAILABLE:
|
||||
logger_func("❌ Mega download failed: 'pycryptodome' library is not installed.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
logger_func(f" [Mega] Initializing download for: {mega_url}")
|
||||
folder_match = re.search(r'mega(?:\.co)?\.nz/folder/([a-zA-Z0-9]+)#([a-zA-Z0-9_.-]+)', mega_url)
|
||||
file_match = re.search(r'mega(?:\.co)?\.nz/(?:file/|#!)?([a-zA-Z0-9]+)(?:#|!)([a-zA-Z0-9_.-]+)', mega_url)
|
||||
session = requests.Session()
|
||||
session.headers.update({'User-Agent': 'Kemono-Downloader-PyQt/1.0'})
|
||||
|
||||
if folder_match:
|
||||
folder_id, folder_key = folder_match.groups()
|
||||
logger_func(f" [Mega] Folder link detected. Starting crawl...")
|
||||
root_folder_name, files = _process_mega_folder(folder_id, folder_key, session, logger_func)
|
||||
|
||||
if root_folder_name is None or files is None:
|
||||
logger_func(" [Mega Folder] ❌ Crawling failed. Aborting.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
if not files:
|
||||
logger_func(" [Mega Folder] ℹ️ Folder is empty. Nothing to download.")
|
||||
if overall_progress_callback: overall_progress_callback(0, 0)
|
||||
return
|
||||
|
||||
logger_func(" [Mega Folder] Prioritizing largest files first...")
|
||||
files.sort(key=lambda f: f.get('s', 0), reverse=True)
|
||||
|
||||
total_files = len(files)
|
||||
logger_func(f" [Mega Folder] ✅ Crawl complete. Found {total_files} file(s) in folder '{root_folder_name}'.")
|
||||
|
||||
if overall_progress_callback: overall_progress_callback(total_files, 0)
|
||||
|
||||
folder_download_path = os.path.join(download_path, root_folder_name)
|
||||
os.makedirs(folder_download_path, exist_ok=True)
|
||||
|
||||
progress_lock = Lock()
|
||||
processed_count = 0
|
||||
MAX_WORKERS = 3
|
||||
|
||||
logger_func(f" [Mega Folder] Starting concurrent download with up to {MAX_WORKERS} workers...")
|
||||
|
||||
def _download_worker(file_data):
|
||||
nonlocal processed_count
|
||||
try:
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
return
|
||||
|
||||
params = {'n': folder_id}
|
||||
payload = [{"a": "g", "g": 1, "n": file_data['h']}]
|
||||
response = session.post(f"{MEGA_API_URL}/cs", params=params, json=payload, timeout=20)
|
||||
response.raise_for_status()
|
||||
res_json = response.json()
|
||||
|
||||
if isinstance(res_json, int) or (isinstance(res_json, list) and res_json and isinstance(res_json[0], int)):
|
||||
error_code = res_json if isinstance(res_json, int) else res_json[0]
|
||||
logger_func(f" [Mega Worker] ❌ API Error {error_code} for '{file_data['relative_path']}'. Skipping.")
|
||||
return
|
||||
|
||||
dl_temp_url = res_json[0]['g']
|
||||
file_info = {'file_name': os.path.basename(file_data['relative_path']), 'file_size': file_data['s'], 'dl_url': dl_temp_url, 'file_key': file_data['key']}
|
||||
file_specific_path = os.path.dirname(file_data['relative_path'])
|
||||
final_download_dir = os.path.join(folder_download_path, file_specific_path)
|
||||
|
||||
download_and_decrypt_mega_file(file_info, final_download_dir, logger_func, progress_callback_func, cancellation_event, pause_event)
|
||||
|
||||
except Exception as e:
|
||||
# Don't log error if it was a cancellation
|
||||
if not (cancellation_event and cancellation_event.is_set()):
|
||||
logger_func(f" [Mega Worker] ❌ Failed to process '{file_data['relative_path']}': {e}")
|
||||
finally:
|
||||
with progress_lock:
|
||||
processed_count += 1
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback(total_files, processed_count)
|
||||
|
||||
with ThreadPoolExecutor(max_workers=MAX_WORKERS) as executor:
|
||||
futures = [executor.submit(_download_worker, file_data) for file_data in files]
|
||||
for future in as_completed(futures):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
# Attempt to cancel remaining futures
|
||||
for f in futures:
|
||||
if not f.done():
|
||||
f.cancel()
|
||||
break
|
||||
try:
|
||||
future.result()
|
||||
except Exception as e:
|
||||
if not (cancellation_event and cancellation_event.is_set()):
|
||||
logger_func(f" [Mega Folder] A download worker failed with an error: {e}")
|
||||
|
||||
logger_func(" [Mega Folder] ✅ All concurrent downloads complete or cancelled.")
|
||||
|
||||
elif file_match:
|
||||
if overall_progress_callback: overall_progress_callback(1, 0)
|
||||
file_id, file_key = file_match.groups()
|
||||
try:
|
||||
payload = [{"a": "g", "p": file_id}]
|
||||
response = session.post(f"{MEGA_API_URL}/cs", json=payload, timeout=20)
|
||||
res_json = response.json()
|
||||
if isinstance(res_json, list) and res_json and isinstance(res_json[0], int):
|
||||
logger_func(f" [Mega] ❌ API Error {res_json[0]}. Link may be invalid or removed.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
file_size = res_json[0]['s']
|
||||
at_b64 = res_json[0]['at']
|
||||
raw_file_key_bytes = b64_to_bytes(file_key)
|
||||
attr_key_bytes = _process_file_key(raw_file_key_bytes)
|
||||
attrs = _decrypt_mega_attribute(at_b64, attr_key_bytes)
|
||||
|
||||
file_name = attrs.get('n', f"unknown_file_{file_id}")
|
||||
payload_dl = [{"a": "g", "g": 1, "p": file_id}]
|
||||
response_dl = session.post(f"{MEGA_API_URL}/cs", json=payload_dl, timeout=20)
|
||||
dl_temp_url = response_dl.json()[0]['g']
|
||||
file_info_obj = {'file_name': file_name, 'file_size': file_size, 'dl_url': dl_temp_url, 'file_key': file_key}
|
||||
|
||||
download_and_decrypt_mega_file(file_info_obj, download_path, logger_func, progress_callback_func, cancellation_event, pause_event)
|
||||
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
except Exception as e:
|
||||
if not (cancellation_event and cancellation_event.is_set()):
|
||||
logger_func(f" [Mega] ❌ Failed to process single file: {e}")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
else:
|
||||
logger_func(f" [Mega] ❌ Error: Invalid or unsupported Mega URL format.")
|
||||
if '/folder/' in mega_url and '/file/' in mega_url:
|
||||
logger_func(" [Mega] ℹ️ This looks like a link to a file inside a folder. Please use a direct, shareable link to the individual file.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
|
||||
def download_gdrive_file(url, download_path, logger_func=print, progress_callback_func=None, overall_progress_callback=None, use_post_subfolder=False, post_title=None):
|
||||
if not GDRIVE_AVAILABLE:
|
||||
logger_func("❌ Google Drive download failed: 'gdown' library is not installed.")
|
||||
return
|
||||
|
||||
# --- Subfolder Logic ---
|
||||
final_download_path = download_path
|
||||
if use_post_subfolder and post_title:
|
||||
subfolder_name = clean_folder_name(post_title)
|
||||
final_download_path = os.path.join(download_path, subfolder_name)
|
||||
logger_func(f" [G-Drive] Using post subfolder: '{subfolder_name}'")
|
||||
os.makedirs(final_download_path, exist_ok=True)
|
||||
# --- End Subfolder Logic ---
|
||||
|
||||
original_stdout = sys.stdout
|
||||
original_stderr = sys.stderr
|
||||
captured_output_buffer = io.StringIO()
|
||||
|
||||
paths = None
|
||||
try:
|
||||
logger_func(f" [G-Drive] Starting folder download for: {url}")
|
||||
|
||||
sys.stdout = captured_output_buffer
|
||||
sys.stderr = captured_output_buffer
|
||||
|
||||
paths = gdown.download_folder(url, output=final_download_path, quiet=False, use_cookies=False, remaining_ok=True)
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [G-Drive] ❌ An unexpected error occurred: {e}")
|
||||
logger_func(" [G-Drive] ℹ️ This can happen if the folder is private, deleted, or you have been rate-limited by Google.")
|
||||
finally:
|
||||
sys.stdout = original_stdout
|
||||
sys.stderr = original_stderr
|
||||
|
||||
captured_output = captured_output_buffer.getvalue()
|
||||
if captured_output:
|
||||
processed_files_count = 0
|
||||
current_filename = None
|
||||
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback(-1, 0)
|
||||
|
||||
lines = captured_output.splitlines()
|
||||
for i, line in enumerate(lines):
|
||||
cleaned_line = line.strip('\r').strip()
|
||||
if not cleaned_line:
|
||||
continue
|
||||
|
||||
if cleaned_line.startswith("To: "):
|
||||
try:
|
||||
if current_filename:
|
||||
logger_func(f" [G-Drive] ✅ Saved '{current_filename}'")
|
||||
|
||||
filepath = cleaned_line[4:]
|
||||
current_filename = os.path.basename(filepath)
|
||||
processed_files_count += 1
|
||||
|
||||
logger_func(f" [G-Drive] ({processed_files_count}/?) Downloading '{current_filename}'...")
|
||||
if progress_callback_func:
|
||||
progress_callback_func(current_filename, "In Progress...")
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback(-1, processed_files_count -1)
|
||||
|
||||
except Exception:
|
||||
logger_func(f" [gdown] {cleaned_line}")
|
||||
|
||||
if current_filename:
|
||||
logger_func(f" [G-Drive] ✅ Saved '{current_filename}'")
|
||||
if overall_progress_callback:
|
||||
overall_progress_callback(-1, processed_files_count)
|
||||
|
||||
if paths and all(os.path.exists(p) for p in paths):
|
||||
final_folder_path = os.path.dirname(paths[0]) if paths else final_download_path
|
||||
logger_func(f" [G-Drive] ✅ Finished. Downloaded {len(paths)} file(s) to folder '{final_folder_path}'")
|
||||
else:
|
||||
logger_func(f" [G-Drive] ❌ Download failed or folder was empty. Check the log above for details from gdown.")
|
||||
|
||||
def download_dropbox_file(dropbox_link, download_path=".", logger_func=print, progress_callback_func=None, use_post_subfolder=False, post_title=None):
|
||||
logger_func(f" [Dropbox] Attempting to download: {dropbox_link}")
|
||||
|
||||
final_download_path = download_path
|
||||
if use_post_subfolder and post_title:
|
||||
subfolder_name = clean_folder_name(post_title)
|
||||
final_download_path = os.path.join(download_path, subfolder_name)
|
||||
logger_func(f" [Dropbox] Using post subfolder: '{subfolder_name}'")
|
||||
|
||||
parsed_url = urlparse(dropbox_link)
|
||||
query_params = parse_qs(parsed_url.query)
|
||||
query_params['dl'] = ['1']
|
||||
new_query = urlencode(query_params, doseq=True)
|
||||
direct_download_url = urlunparse(parsed_url._replace(query=new_query))
|
||||
logger_func(f" [Dropbox] Using direct download URL: {direct_download_url}")
|
||||
scraper = cloudscraper.create_scraper()
|
||||
try:
|
||||
os.makedirs(final_download_path, exist_ok=True)
|
||||
with scraper.get(direct_download_url, stream=True, allow_redirects=True, timeout=(20, 600)) as r:
|
||||
r.raise_for_status()
|
||||
filename = _get_filename_from_headers(r.headers) or os.path.basename(parsed_url.path) or "dropbox_download"
|
||||
if not os.path.splitext(filename)[1]:
|
||||
filename += ".zip"
|
||||
full_save_path = os.path.join(final_download_path, filename)
|
||||
logger_func(f" [Dropbox] Starting download of '{filename}'...")
|
||||
total_size = int(r.headers.get('content-length', 0))
|
||||
downloaded_bytes = 0
|
||||
last_log_time = time.time()
|
||||
with open(full_save_path, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_bytes += len(chunk)
|
||||
current_time = time.time()
|
||||
if current_time - last_log_time > 1:
|
||||
if progress_callback_func:
|
||||
progress_callback_func(filename, (downloaded_bytes, total_size))
|
||||
last_log_time = current_time
|
||||
logger_func(f" [Dropbox] ✅ Download complete: {full_save_path}")
|
||||
if zipfile.is_zipfile(full_save_path):
|
||||
logger_func(f" [Dropbox] ዚ Detected zip file. Attempting to extract...")
|
||||
extract_folder_name = os.path.splitext(filename)[0]
|
||||
extract_path = os.path.join(final_download_path, extract_folder_name)
|
||||
os.makedirs(extract_path, exist_ok=True)
|
||||
with zipfile.ZipFile(full_save_path, 'r') as zip_ref:
|
||||
zip_ref.extractall(extract_path)
|
||||
logger_func(f" [Dropbox] ✅ Successfully extracted to folder: '{extract_path}'")
|
||||
try:
|
||||
os.remove(full_save_path)
|
||||
logger_func(f" [Dropbox] 🗑️ Removed original zip file.")
|
||||
except OSError as e:
|
||||
logger_func(f" [Dropbox] ⚠️ Could not remove original zip file: {e}")
|
||||
except Exception as e:
|
||||
logger_func(f" [Dropbox] ❌ An error occurred during Dropbox download: {e}")
|
||||
|
||||
def _get_gofile_api_token(session, logger_func):
|
||||
"""Creates a temporary guest account to get an API token."""
|
||||
try:
|
||||
logger_func(" [Gofile] Creating temporary guest account for API token...")
|
||||
response = session.post("https://api.gofile.io/accounts", timeout=20)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
if data.get("status") == "ok":
|
||||
token = data["data"]["token"]
|
||||
logger_func(" [Gofile] ✅ Successfully obtained API token.")
|
||||
return token
|
||||
else:
|
||||
logger_func(f" [Gofile] ❌ Failed to get API token, status: {data.get('status')}")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger_func(f" [Gofile] ❌ Error creating guest account: {e}")
|
||||
return None
|
||||
|
||||
def _get_gofile_website_token(session, logger_func):
|
||||
"""Fetches the 'wt' (website token) from Gofile's global JS file."""
|
||||
try:
|
||||
logger_func(" [Gofile] Fetching website token (wt)...")
|
||||
response = session.get("https://gofile.io/dist/js/global.js", timeout=20)
|
||||
response.raise_for_status()
|
||||
match = re.search(r'\.wt = "([^"]+)"', response.text)
|
||||
if match:
|
||||
wt = match.group(1)
|
||||
logger_func(" [Gofile] ✅ Successfully fetched website token.")
|
||||
return wt
|
||||
logger_func(" [Gofile] ❌ Could not find website token in JS file.")
|
||||
return None
|
||||
except Exception as e:
|
||||
logger_func(f" [Gofile] ❌ Error fetching website token: {e}")
|
||||
return None
|
||||
|
||||
def download_gofile_folder(gofile_url, download_path, logger_func=print, progress_callback_func=None, overall_progress_callback=None):
|
||||
"""Downloads all files from a Gofile folder URL."""
|
||||
logger_func(f" [Gofile] Initializing download for: {gofile_url}")
|
||||
|
||||
match = re.search(r"gofile\.io/d/([^/?#]+)", gofile_url)
|
||||
if not match:
|
||||
logger_func(" [Gofile] ❌ Invalid Gofile folder URL format.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
content_id = match.group(1)
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
try:
|
||||
retry_strategy = Retry(
|
||||
total=5,
|
||||
backoff_factor=1,
|
||||
status_forcelist=[429, 500, 502, 503, 504],
|
||||
allowed_methods=["HEAD", "GET", "POST"]
|
||||
)
|
||||
adapter = HTTPAdapter(max_retries=retry_strategy)
|
||||
scraper.mount("http://", adapter)
|
||||
scraper.mount("https://", adapter)
|
||||
logger_func(" [Gofile] 🔧 Configured robust retry strategy for network requests.")
|
||||
except Exception as e:
|
||||
logger_func(f" [Gofile] ⚠️ Could not configure retry strategy: {e}")
|
||||
|
||||
api_token = _get_gofile_api_token(scraper, logger_func)
|
||||
if not api_token:
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
website_token = _get_gofile_website_token(scraper, logger_func)
|
||||
if not website_token:
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
try:
|
||||
scraper.cookies.set("accountToken", api_token, domain=".gofile.io")
|
||||
scraper.headers.update({"Authorization": f"Bearer {api_token}"})
|
||||
|
||||
api_url = f"https://api.gofile.io/contents/{content_id}?wt={website_token}"
|
||||
logger_func(f" [Gofile] Fetching folder contents for ID: {content_id}")
|
||||
response = scraper.get(api_url, timeout=30)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
if data.get("status") != "ok":
|
||||
if data.get("status") == "error-passwordRequired":
|
||||
logger_func(" [Gofile] ❌ This folder is password protected. Downloading password-protected folders is not supported.")
|
||||
else:
|
||||
logger_func(f" [Gofile] ❌ API Error: {data.get('status')}. The folder may be expired or invalid.")
|
||||
if overall_progress_callback: overall_progress_callback(1, 1)
|
||||
return
|
||||
|
||||
folder_info = data.get("data", {})
|
||||
folder_name = clean_folder_name(folder_info.get("name", content_id))
|
||||
files_to_download = [item for item in folder_info.get("children", {}).values() if item.get("type") == "file"]
|
||||
|
||||
if not files_to_download:
|
||||
logger_func(" [Gofile] ℹ️ No files found in this Gofile folder.")
|
||||
if overall_progress_callback: overall_progress_callback(0, 0)
|
||||
return
|
||||
|
||||
final_download_path = os.path.join(download_path, folder_name)
|
||||
os.makedirs(final_download_path, exist_ok=True)
|
||||
logger_func(f" [Gofile] Found {len(files_to_download)} file(s). Saving to folder: '{folder_name}'")
|
||||
if overall_progress_callback: overall_progress_callback(len(files_to_download), 0)
|
||||
|
||||
download_session = requests.Session()
|
||||
adapter = HTTPAdapter(max_retries=Retry(
|
||||
total=5, backoff_factor=1, status_forcelist=[429, 500, 502, 503, 504]
|
||||
))
|
||||
download_session.mount("http://", adapter)
|
||||
download_session.mount("https://", adapter)
|
||||
|
||||
for i, file_info in enumerate(files_to_download):
|
||||
filename = file_info.get("name")
|
||||
file_url = file_info.get("link")
|
||||
file_size = file_info.get("size", 0)
|
||||
filepath = os.path.join(final_download_path, filename)
|
||||
|
||||
if os.path.exists(filepath) and os.path.getsize(filepath) == file_size:
|
||||
logger_func(f" [Gofile] ({i+1}/{len(files_to_download)}) ⏩ Skipping existing file: '{filename}'")
|
||||
if overall_progress_callback: overall_progress_callback(len(files_to_download), i + 1)
|
||||
continue
|
||||
|
||||
logger_func(f" [Gofile] ({i+1}/{len(files_to_download)}) 🔽 Downloading: '{filename}'")
|
||||
with download_session.get(file_url, stream=True, timeout=(60, 600)) as r:
|
||||
r.raise_for_status()
|
||||
|
||||
if progress_callback_func:
|
||||
progress_callback_func(filename, (0, file_size))
|
||||
|
||||
downloaded_bytes = 0
|
||||
last_log_time = time.time()
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
downloaded_bytes += len(chunk)
|
||||
current_time = time.time()
|
||||
if current_time - last_log_time > 0.5: # Update slightly faster
|
||||
if progress_callback_func:
|
||||
progress_callback_func(filename, (downloaded_bytes, file_size))
|
||||
last_log_time = current_time
|
||||
|
||||
if progress_callback_func:
|
||||
progress_callback_func(filename, (file_size, file_size))
|
||||
|
||||
logger_func(f" [Gofile] ✅ Finished '{filename}'")
|
||||
if overall_progress_callback: overall_progress_callback(len(files_to_download), i + 1)
|
||||
time.sleep(1)
|
||||
|
||||
except Exception as e:
|
||||
logger_func(f" [Gofile] ❌ An error occurred during Gofile download: {e}")
|
||||
if not isinstance(e, requests.exceptions.RequestException):
|
||||
traceback.print_exc()
|
||||
325
src/services/multipart_downloader.py
Normal file
@@ -0,0 +1,325 @@
|
||||
# --- Standard Library Imports ---
|
||||
# --- Standard Library Imports ---
|
||||
import os
|
||||
import time
|
||||
import hashlib
|
||||
import http.client
|
||||
import traceback
|
||||
import threading
|
||||
import queue
|
||||
from concurrent.futures import ThreadPoolExecutor, as_completed
|
||||
|
||||
# --- Third-Party Library Imports ---
|
||||
import requests
|
||||
MULTIPART_DOWNLOADER_AVAILABLE = True
|
||||
|
||||
# --- Module Constants ---
|
||||
CHUNK_DOWNLOAD_RETRY_DELAY = 2
|
||||
MAX_CHUNK_DOWNLOAD_RETRIES = 5
|
||||
DOWNLOAD_CHUNK_SIZE_ITER = 1024 * 256 # 256 KB per iteration chunk
|
||||
|
||||
|
||||
def _download_individual_chunk(
|
||||
chunk_url, chunk_temp_file_path, start_byte, end_byte, headers,
|
||||
part_num, total_parts, progress_data, cancellation_event,
|
||||
skip_event, pause_event, global_emit_time_ref, cookies_for_chunk,
|
||||
logger_func, emitter=None, api_original_filename=None
|
||||
):
|
||||
"""
|
||||
Downloads a single segment (chunk) of a larger file to its own unique part file.
|
||||
This function is intended to be run in a separate thread by a ThreadPoolExecutor.
|
||||
|
||||
It handles retries, pauses, and cancellations for its specific chunk. If a
|
||||
download fails, the partial chunk file is removed, allowing a clean retry later.
|
||||
|
||||
Args:
|
||||
chunk_url (str): The URL to download the file from.
|
||||
chunk_temp_file_path (str): The unique path to save this specific chunk
|
||||
(e.g., 'my_video.mp4.part0').
|
||||
start_byte (int): The starting byte for the Range header.
|
||||
end_byte (int): The ending byte for the Range header.
|
||||
headers (dict): The HTTP headers to use for the request.
|
||||
part_num (int): The index of this chunk (e.g., 0 for the first part).
|
||||
total_parts (int): The total number of chunks for the entire file.
|
||||
progress_data (dict): A thread-safe dictionary for sharing progress.
|
||||
cancellation_event (threading.Event): Event to signal cancellation.
|
||||
skip_event (threading.Event): Event to signal skipping the file.
|
||||
pause_event (threading.Event): Event to signal pausing the download.
|
||||
global_emit_time_ref (list): A mutable list with one element (a timestamp)
|
||||
to rate-limit UI updates.
|
||||
cookies_for_chunk (dict): Cookies to use for the request.
|
||||
logger_func (function): A function to log messages.
|
||||
emitter (queue.Queue or QObject): Emitter for sending progress to the UI.
|
||||
api_original_filename (str): The original filename for UI display.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing (bytes_downloaded, success_flag).
|
||||
"""
|
||||
# --- Pre-download checks for control events ---
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Download cancelled before start.")
|
||||
return 0, False
|
||||
if skip_event and skip_event.is_set():
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Skip event triggered before start.")
|
||||
return 0, False
|
||||
if pause_event and pause_event.is_set():
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Download paused before start...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Download cancelled while paused.")
|
||||
return 0, False
|
||||
time.sleep(0.2)
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Download resumed.")
|
||||
|
||||
# Set this chunk's status to 'active' before starting the download.
|
||||
with progress_data['lock']:
|
||||
progress_data['chunks_status'][part_num]['active'] = True
|
||||
|
||||
try:
|
||||
# Prepare headers for the specific byte range of this chunk
|
||||
chunk_headers = headers.copy()
|
||||
if end_byte != -1:
|
||||
chunk_headers['Range'] = f"bytes={start_byte}-{end_byte}"
|
||||
|
||||
bytes_this_chunk = 0
|
||||
last_speed_calc_time = time.time()
|
||||
bytes_at_last_speed_calc = 0
|
||||
|
||||
# --- Retry Loop ---
|
||||
for attempt in range(MAX_CHUNK_DOWNLOAD_RETRIES + 1):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
return bytes_this_chunk, False
|
||||
|
||||
try:
|
||||
if attempt > 0:
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Retrying (Attempt {attempt + 1}/{MAX_CHUNK_DOWNLOAD_RETRIES + 1})...")
|
||||
time.sleep(CHUNK_DOWNLOAD_RETRY_DELAY * (2 ** (attempt - 1)))
|
||||
last_speed_calc_time = time.time()
|
||||
bytes_at_last_speed_calc = bytes_this_chunk
|
||||
|
||||
logger_func(f" 🚀 [Chunk {part_num + 1}/{total_parts}] Starting download: bytes {start_byte}-{end_byte if end_byte != -1 else 'EOF'}")
|
||||
|
||||
response = requests.get(chunk_url, headers=chunk_headers, timeout=(10, 120), stream=True, cookies=cookies_for_chunk)
|
||||
response.raise_for_status()
|
||||
|
||||
# --- Data Writing Loop ---
|
||||
# We open the unique chunk file in write-binary ('wb') mode.
|
||||
# No more seeking is required.
|
||||
with open(chunk_temp_file_path, 'wb') as f:
|
||||
for data_segment in response.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE_ITER):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
return bytes_this_chunk, False
|
||||
if pause_event and pause_event.is_set():
|
||||
# Handle pausing during the download stream
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Paused...")
|
||||
while pause_event.is_set():
|
||||
if cancellation_event and cancellation_event.is_set(): return bytes_this_chunk, False
|
||||
time.sleep(0.2)
|
||||
logger_func(f" [Chunk {part_num + 1}/{total_parts}] Resumed.")
|
||||
|
||||
if data_segment:
|
||||
f.write(data_segment)
|
||||
bytes_this_chunk += len(data_segment)
|
||||
|
||||
# Update shared progress data structure
|
||||
with progress_data['lock']:
|
||||
progress_data['total_downloaded_so_far'] += len(data_segment)
|
||||
progress_data['chunks_status'][part_num]['downloaded'] = bytes_this_chunk
|
||||
|
||||
# Calculate and update speed for this chunk
|
||||
current_time = time.time()
|
||||
time_delta = current_time - last_speed_calc_time
|
||||
if time_delta > 0.5:
|
||||
bytes_delta = bytes_this_chunk - bytes_at_last_speed_calc
|
||||
current_speed_bps = (bytes_delta * 8) / time_delta if time_delta > 0 else 0
|
||||
progress_data['chunks_status'][part_num]['speed_bps'] = current_speed_bps
|
||||
last_speed_calc_time = current_time
|
||||
bytes_at_last_speed_calc = bytes_this_chunk
|
||||
|
||||
# Emit progress signal to the UI via the queue
|
||||
if emitter and (current_time - global_emit_time_ref[0] > 0.25):
|
||||
global_emit_time_ref[0] = current_time
|
||||
status_list_copy = [dict(s) for s in progress_data['chunks_status']]
|
||||
if isinstance(emitter, queue.Queue):
|
||||
emitter.put({'type': 'file_progress', 'payload': (api_original_filename, status_list_copy)})
|
||||
elif hasattr(emitter, 'file_progress_signal'):
|
||||
emitter.file_progress_signal.emit(api_original_filename, status_list_copy)
|
||||
|
||||
# If we get here, the download for this chunk is successful
|
||||
return bytes_this_chunk, True
|
||||
|
||||
except (requests.exceptions.ConnectionError, requests.exceptions.Timeout, http.client.IncompleteRead) as e:
|
||||
logger_func(f" ❌ [Chunk {part_num + 1}/{total_parts}] Retryable error: {e}")
|
||||
except requests.exceptions.RequestException as e:
|
||||
logger_func(f" ❌ [Chunk {part_num + 1}/{total_parts}] Non-retryable error: {e}")
|
||||
return bytes_this_chunk, False # Break loop on non-retryable errors
|
||||
except Exception as e:
|
||||
logger_func(f" ❌ [Chunk {part_num + 1}/{total_parts}] Unexpected error: {e}\n{traceback.format_exc(limit=1)}")
|
||||
return bytes_this_chunk, False
|
||||
|
||||
# If the retry loop finishes without a successful download
|
||||
return bytes_this_chunk, False
|
||||
finally:
|
||||
# This block runs whether the download succeeded or failed
|
||||
with progress_data['lock']:
|
||||
progress_data['chunks_status'][part_num]['active'] = False
|
||||
progress_data['chunks_status'][part_num]['speed_bps'] = 0.0
|
||||
|
||||
|
||||
def download_file_in_parts(file_url, save_path, total_size, num_parts, headers, api_original_filename,
|
||||
emitter_for_multipart, cookies_for_chunk_session,
|
||||
cancellation_event, skip_event, logger_func, pause_event):
|
||||
"""
|
||||
Manages a resilient, multipart file download by saving each chunk to a separate file.
|
||||
|
||||
This function orchestrates the download process by:
|
||||
1. Checking for already completed chunk files to resume a previous download.
|
||||
2. Submitting only the missing chunks to a thread pool for parallel download.
|
||||
3. Assembling the final file from the individual chunks upon successful completion.
|
||||
4. Cleaning up temporary chunk files after assembly.
|
||||
5. Leaving completed chunks on disk if the download fails, allowing for a future resume.
|
||||
|
||||
Args:
|
||||
file_url (str): The URL of the file to download.
|
||||
save_path (str): The final desired path for the downloaded file (e.g., 'my_video.mp4').
|
||||
total_size (int): The total size of the file in bytes.
|
||||
num_parts (int): The number of parts to split the download into.
|
||||
headers (dict): HTTP headers for the download requests.
|
||||
api_original_filename (str): The original filename for UI progress display.
|
||||
emitter_for_multipart (queue.Queue or QObject): Emitter for UI signals.
|
||||
cookies_for_chunk_session (dict): Cookies for the download requests.
|
||||
cancellation_event (threading.Event): Event to signal cancellation.
|
||||
skip_event (threading.Event): Event to signal skipping the file.
|
||||
logger_func (function): A function for logging messages.
|
||||
pause_event (threading.Event): Event to signal pausing the download.
|
||||
|
||||
Returns:
|
||||
tuple: A tuple containing (success_flag, total_bytes_downloaded, md5_hash, file_handle).
|
||||
The file_handle will be for the final assembled file if successful, otherwise None.
|
||||
"""
|
||||
logger_func(f"⬇️ Initializing Resumable Multi-part Download ({num_parts} parts) for: '{api_original_filename}' (Size: {total_size / (1024*1024):.2f} MB)")
|
||||
|
||||
# Calculate the byte range for each chunk
|
||||
chunk_size_calc = total_size // num_parts
|
||||
chunks_ranges = []
|
||||
for i in range(num_parts):
|
||||
start = i * chunk_size_calc
|
||||
end = start + chunk_size_calc - 1 if i < num_parts - 1 else total_size - 1
|
||||
if start <= end:
|
||||
chunks_ranges.append((start, end))
|
||||
elif total_size == 0 and i == 0: # Handle zero-byte files
|
||||
chunks_ranges.append((0, -1))
|
||||
|
||||
# Calculate the expected size of each chunk
|
||||
chunk_actual_sizes = []
|
||||
for start, end in chunks_ranges:
|
||||
chunk_actual_sizes.append(end - start + 1 if end != -1 else 0)
|
||||
|
||||
if not chunks_ranges and total_size > 0:
|
||||
logger_func(f" ⚠️ No valid chunk ranges for multipart download of '{api_original_filename}'. Aborting.")
|
||||
return False, 0, None, None
|
||||
|
||||
# --- Resumption Logic: Check for existing complete chunks ---
|
||||
chunks_to_download = []
|
||||
total_bytes_resumed = 0
|
||||
for i, (start, end) in enumerate(chunks_ranges):
|
||||
chunk_part_path = f"{save_path}.part{i}"
|
||||
expected_chunk_size = chunk_actual_sizes[i]
|
||||
|
||||
if os.path.exists(chunk_part_path) and os.path.getsize(chunk_part_path) == expected_chunk_size:
|
||||
logger_func(f" [Chunk {i + 1}/{num_parts}] Resuming with existing complete chunk file.")
|
||||
total_bytes_resumed += expected_chunk_size
|
||||
else:
|
||||
chunks_to_download.append({'index': i, 'start': start, 'end': end})
|
||||
|
||||
# Setup the shared progress data structure
|
||||
progress_data = {
|
||||
'total_file_size': total_size,
|
||||
'total_downloaded_so_far': total_bytes_resumed,
|
||||
'chunks_status': [],
|
||||
'lock': threading.Lock(),
|
||||
'last_global_emit_time': [time.time()]
|
||||
}
|
||||
for i in range(num_parts):
|
||||
is_resumed = not any(c['index'] == i for c in chunks_to_download)
|
||||
progress_data['chunks_status'].append({
|
||||
'id': i,
|
||||
'downloaded': chunk_actual_sizes[i] if is_resumed else 0,
|
||||
'total': chunk_actual_sizes[i],
|
||||
'active': False,
|
||||
'speed_bps': 0.0
|
||||
})
|
||||
|
||||
# --- Download Phase ---
|
||||
chunk_futures = []
|
||||
all_chunks_successful = True
|
||||
total_bytes_from_threads = 0
|
||||
|
||||
with ThreadPoolExecutor(max_workers=num_parts, thread_name_prefix=f"MPChunk_{api_original_filename[:10]}_") as chunk_pool:
|
||||
for chunk_info in chunks_to_download:
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
all_chunks_successful = False
|
||||
break
|
||||
|
||||
i, start, end = chunk_info['index'], chunk_info['start'], chunk_info['end']
|
||||
chunk_part_path = f"{save_path}.part{i}"
|
||||
|
||||
future = chunk_pool.submit(
|
||||
_download_individual_chunk,
|
||||
chunk_url=file_url,
|
||||
chunk_temp_file_path=chunk_part_path,
|
||||
start_byte=start, end_byte=end, headers=headers, part_num=i, total_parts=num_parts,
|
||||
progress_data=progress_data, cancellation_event=cancellation_event,
|
||||
skip_event=skip_event, global_emit_time_ref=progress_data['last_global_emit_time'],
|
||||
pause_event=pause_event, cookies_for_chunk=cookies_for_chunk_session,
|
||||
logger_func=logger_func, emitter=emitter_for_multipart,
|
||||
api_original_filename=api_original_filename
|
||||
)
|
||||
chunk_futures.append(future)
|
||||
|
||||
for future in as_completed(chunk_futures):
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
all_chunks_successful = False
|
||||
bytes_downloaded, success = future.result()
|
||||
total_bytes_from_threads += bytes_downloaded
|
||||
if not success:
|
||||
all_chunks_successful = False
|
||||
|
||||
total_bytes_final = total_bytes_resumed + total_bytes_from_threads
|
||||
|
||||
if cancellation_event and cancellation_event.is_set():
|
||||
logger_func(f" Multi-part download for '{api_original_filename}' cancelled by main event.")
|
||||
all_chunks_successful = False
|
||||
|
||||
# --- Assembly and Cleanup Phase ---
|
||||
if all_chunks_successful and (total_bytes_final == total_size or total_size == 0):
|
||||
logger_func(f" ✅ All {num_parts} chunks complete. Assembling final file...")
|
||||
md5_hasher = hashlib.md5()
|
||||
try:
|
||||
with open(save_path, 'wb') as final_file:
|
||||
for i in range(num_parts):
|
||||
chunk_part_path = f"{save_path}.part{i}"
|
||||
with open(chunk_part_path, 'rb') as chunk_file:
|
||||
content = chunk_file.read()
|
||||
final_file.write(content)
|
||||
md5_hasher.update(content)
|
||||
|
||||
calculated_hash = md5_hasher.hexdigest()
|
||||
logger_func(f" ✅ Assembly successful for '{api_original_filename}'. Total bytes: {total_bytes_final}")
|
||||
return True, total_bytes_final, calculated_hash, open(save_path, 'rb')
|
||||
except Exception as e:
|
||||
logger_func(f" ❌ Critical error during file assembly: {e}. Cleaning up.")
|
||||
return False, total_bytes_final, None, None
|
||||
finally:
|
||||
# Cleanup all individual chunk files after successful assembly
|
||||
for i in range(num_parts):
|
||||
chunk_part_path = f"{save_path}.part{i}"
|
||||
if os.path.exists(chunk_part_path):
|
||||
try:
|
||||
os.remove(chunk_part_path)
|
||||
except OSError as e:
|
||||
logger_func(f" ⚠️ Failed to remove temp part file '{chunk_part_path}': {e}")
|
||||
else:
|
||||
# If download failed, we do NOT clean up, allowing for resumption later
|
||||
logger_func(f" ❌ Multi-part download failed for '{api_original_filename}'. Success: {all_chunks_successful}, Bytes: {total_bytes_final}/{total_size}. Partial chunks saved for future resumption.")
|
||||
return False, total_bytes_final, None, None
|
||||
120
src/services/updater.py
Normal file
@@ -0,0 +1,120 @@
|
||||
import sys
|
||||
import os
|
||||
import requests
|
||||
import subprocess # Keep this for now, though it's not used in the final command
|
||||
from packaging.version import parse as parse_version
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
# Constants for the updater
|
||||
GITHUB_REPO_URL = "https://api.github.com/repos/Yuvi63771/Kemono-Downloader/releases/latest"
|
||||
EXE_NAME = "Kemono.Downloader.exe"
|
||||
|
||||
class UpdateChecker(QThread):
|
||||
"""Checks for a new version on GitHub in a background thread."""
|
||||
update_available = pyqtSignal(str, str) # new_version, download_url
|
||||
up_to_date = pyqtSignal(str)
|
||||
update_error = pyqtSignal(str)
|
||||
|
||||
def __init__(self, current_version):
|
||||
super().__init__()
|
||||
self.current_version_str = current_version.lstrip('v')
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
response = requests.get(GITHUB_REPO_URL, timeout=15)
|
||||
response.raise_for_status()
|
||||
data = response.json()
|
||||
|
||||
latest_version_str = data['tag_name'].lstrip('v')
|
||||
current_version = parse_version(self.current_version_str)
|
||||
latest_version = parse_version(latest_version_str)
|
||||
|
||||
if latest_version > current_version:
|
||||
for asset in data.get('assets', []):
|
||||
if asset['name'] == EXE_NAME:
|
||||
self.update_available.emit(latest_version_str, asset['browser_download_url'])
|
||||
return
|
||||
self.update_error.emit(f"Update found, but '{EXE_NAME}' is missing from the release assets.")
|
||||
else:
|
||||
self.up_to_date.emit("You are on the latest version.")
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.update_error.emit(f"Network error: {e}")
|
||||
except Exception as e:
|
||||
self.update_error.emit(f"An error occurred: {e}")
|
||||
|
||||
|
||||
class UpdateDownloader(QThread):
|
||||
"""
|
||||
Downloads the new executable and runs an updater script that kills the old process,
|
||||
replaces the file, and displays a message in the terminal.
|
||||
"""
|
||||
download_finished = pyqtSignal()
|
||||
download_error = pyqtSignal(str)
|
||||
|
||||
def __init__(self, download_url, parent_app):
|
||||
super().__init__()
|
||||
self.download_url = download_url
|
||||
self.parent_app = parent_app
|
||||
|
||||
def run(self):
|
||||
try:
|
||||
app_path = sys.executable
|
||||
app_dir = os.path.dirname(app_path)
|
||||
temp_path = os.path.join(app_dir, f"{EXE_NAME}.tmp")
|
||||
old_path = os.path.join(app_dir, f"{EXE_NAME}.old")
|
||||
updater_script_path = os.path.join(app_dir, "updater.bat")
|
||||
|
||||
pid_file_path = os.path.join(app_dir, "updater.pid")
|
||||
|
||||
with requests.get(self.download_url, stream=True, timeout=300) as r:
|
||||
r.raise_for_status()
|
||||
with open(temp_path, 'wb') as f:
|
||||
for chunk in r.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
|
||||
with open(pid_file_path, "w") as f:
|
||||
f.write(str(os.getpid()))
|
||||
|
||||
script_content = f"""
|
||||
@echo off
|
||||
SETLOCAL
|
||||
|
||||
echo.
|
||||
echo Reading process information...
|
||||
set /p PID=<{pid_file_path}
|
||||
|
||||
echo Closing the old application (PID: %PID%)...
|
||||
taskkill /F /PID %PID%
|
||||
|
||||
echo Waiting for files to unlock...
|
||||
timeout /t 2 /nobreak > nul
|
||||
|
||||
echo Replacing application files...
|
||||
if exist "{old_path}" del /F /Q "{old_path}"
|
||||
rename "{app_path}" "{os.path.basename(old_path)}"
|
||||
rename "{temp_path}" "{EXE_NAME}"
|
||||
|
||||
echo.
|
||||
echo ============================================================
|
||||
echo Update Complete!
|
||||
echo You can now close this window and run {EXE_NAME}.
|
||||
echo ============================================================
|
||||
echo.
|
||||
pause
|
||||
|
||||
echo Cleaning up helper files...
|
||||
del "{pid_file_path}"
|
||||
del "%~f0"
|
||||
ENDLOCAL
|
||||
"""
|
||||
with open(updater_script_path, "w") as f:
|
||||
f.write(script_content)
|
||||
|
||||
# --- Go back to the os.startfile command that we know works ---
|
||||
os.startfile(updater_script_path)
|
||||
|
||||
self.download_finished.emit()
|
||||
|
||||
except Exception as e:
|
||||
self.download_error.emit(f"Failed to download or run updater: {e}")
|
||||
1
src/ui/__init__.py
Normal file
@@ -0,0 +1 @@
|
||||
# ...existing code...
|
||||
40
src/ui/assets.py
Normal file
@@ -0,0 +1,40 @@
|
||||
import os
|
||||
import sys
|
||||
from PyQt5.QtGui import QIcon
|
||||
|
||||
_app_icon_cache = None
|
||||
|
||||
def get_app_icon_object():
|
||||
"""
|
||||
Loads and caches the application icon from the assets folder.
|
||||
This function is now centralized to prevent circular imports.
|
||||
|
||||
Returns:
|
||||
QIcon: The application icon object.
|
||||
"""
|
||||
global _app_icon_cache
|
||||
if _app_icon_cache and not _app_icon_cache.isNull():
|
||||
return _app_icon_cache
|
||||
|
||||
app_base_dir = ""
|
||||
|
||||
if getattr(sys, 'frozen', False):
|
||||
app_base_dir = os.path.dirname(sys.executable)
|
||||
else:
|
||||
app_base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
|
||||
|
||||
icon_path = os.path.join(app_base_dir, 'assets', 'Kemono.ico')
|
||||
|
||||
if os.path.exists(icon_path):
|
||||
_app_icon_cache = QIcon(icon_path)
|
||||
else:
|
||||
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
|
||||
fallback_icon_path = os.path.join(sys._MEIPASS, 'assets', 'Kemono.ico')
|
||||
if os.path.exists(fallback_icon_path):
|
||||
_app_icon_cache = QIcon(fallback_icon_path)
|
||||
return _app_icon_cache
|
||||
|
||||
print(f"Warning: Application icon not found at {icon_path}")
|
||||
_app_icon_cache = QIcon() # Return an empty icon as a fallback
|
||||
|
||||
return _app_icon_cache
|
||||
137
src/ui/classes/allcomic_downloader_thread.py
Normal file
@@ -0,0 +1,137 @@
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import cloudscraper
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.allcomic_client import (fetch_chapter_data as allcomic_fetch_data,
|
||||
get_chapter_list as allcomic_get_list)
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class AllcomicDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling allcomic.com downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.comic_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
self.pause_event = parent.pause_event if hasattr(parent, 'pause_event') else threading.Event()
|
||||
|
||||
def _check_pause(self):
|
||||
if self.is_cancelled: return True
|
||||
if self.pause_event and self.pause_event.is_set():
|
||||
self.progress_signal.emit(" Download paused...")
|
||||
while self.pause_event.is_set():
|
||||
if self.is_cancelled: return True
|
||||
time.sleep(0.5)
|
||||
self.progress_signal.emit(" Download resumed.")
|
||||
return self.is_cancelled
|
||||
|
||||
def run(self):
|
||||
grand_total_dl = 0
|
||||
grand_total_skip = 0
|
||||
|
||||
# Create the scraper session ONCE for the entire job
|
||||
scraper = cloudscraper.create_scraper(
|
||||
browser={'browser': 'firefox', 'platform': 'windows', 'desktop': True}
|
||||
)
|
||||
|
||||
# Pass the scraper to the function
|
||||
chapters_to_download = allcomic_get_list(scraper, self.comic_url, self.progress_signal.emit)
|
||||
|
||||
if not chapters_to_download:
|
||||
chapters_to_download = [self.comic_url]
|
||||
|
||||
self.progress_signal.emit(f"--- Starting download of {len(chapters_to_download)} chapter(s) ---")
|
||||
|
||||
for chapter_idx, chapter_url in enumerate(chapters_to_download):
|
||||
if self._check_pause(): break
|
||||
|
||||
self.progress_signal.emit(f"\n-- Processing Chapter {chapter_idx + 1}/{len(chapters_to_download)} --")
|
||||
# Pass the scraper to the function
|
||||
comic_title, chapter_title, image_urls = allcomic_fetch_data(scraper, chapter_url, self.progress_signal.emit)
|
||||
|
||||
if not image_urls:
|
||||
self.progress_signal.emit(f"❌ Failed to get data for chapter. Skipping.")
|
||||
continue
|
||||
|
||||
series_folder_name = clean_folder_name(comic_title)
|
||||
chapter_folder_name = clean_folder_name(chapter_title)
|
||||
final_save_path = os.path.join(self.output_dir, series_folder_name, chapter_folder_name)
|
||||
|
||||
try:
|
||||
os.makedirs(final_save_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{os.path.join(series_folder_name, chapter_folder_name)}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
grand_total_skip += len(image_urls)
|
||||
continue
|
||||
|
||||
total_files_in_chapter = len(image_urls)
|
||||
self.overall_progress_signal.emit(total_files_in_chapter, 0)
|
||||
headers = {'Referer': chapter_url}
|
||||
|
||||
for i, img_url in enumerate(image_urls):
|
||||
if self._check_pause(): break
|
||||
|
||||
file_extension = os.path.splitext(urlparse(img_url).path)[1] or '.jpg'
|
||||
filename = f"{i+1:03d}{file_extension}"
|
||||
filepath = os.path.join(final_save_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({i+1}/{total_files_in_chapter}): '{filename}' already exists.")
|
||||
grand_total_skip += 1
|
||||
else:
|
||||
download_successful = False
|
||||
max_retries = 8
|
||||
for attempt in range(max_retries):
|
||||
if self._check_pause(): break
|
||||
try:
|
||||
self.progress_signal.emit(f" Downloading ({i+1}/{total_files_in_chapter}): '{filename}' (Attempt {attempt + 1})...")
|
||||
# Use the persistent scraper object
|
||||
response = scraper.get(img_url, stream=True, headers=headers, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self._check_pause(): break
|
||||
f.write(chunk)
|
||||
|
||||
if self._check_pause():
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
break
|
||||
|
||||
download_successful = True
|
||||
grand_total_dl += 1
|
||||
break
|
||||
|
||||
except requests.RequestException as e:
|
||||
self.progress_signal.emit(f" ⚠️ Attempt {attempt + 1} failed for '{filename}': {e}")
|
||||
if attempt < max_retries - 1:
|
||||
wait_time = 2 * (attempt + 1)
|
||||
self.progress_signal.emit(f" Retrying in {wait_time} seconds...")
|
||||
time.sleep(wait_time)
|
||||
else:
|
||||
self.progress_signal.emit(f" ❌ All attempts failed for '{filename}'. Skipping.")
|
||||
grand_total_skip += 1
|
||||
|
||||
self.overall_progress_signal.emit(total_files_in_chapter, i + 1)
|
||||
time.sleep(0.5) # Increased delay between images for this site
|
||||
|
||||
if self._check_pause(): break
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(grand_total_dl, grand_total_skip, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by AllComic thread.")
|
||||
133
src/ui/classes/booru_downloader_thread.py
Normal file
@@ -0,0 +1,133 @@
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
import datetime
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.booru_client import fetch_booru_data, BooruClientException
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
_ff_ver = (datetime.date.today().toordinal() - 735506) // 28
|
||||
USERAGENT_FIREFOX = (f"Mozilla/5.0 (Windows NT 10.0; Win64; x64; "
|
||||
f"rv:{_ff_ver}.0) Gecko/20100101 Firefox/{_ff_ver}.0")
|
||||
|
||||
class BooruDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling Danbooru and Gelbooru downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
finished_signal = pyqtSignal(int, int, bool) # dl_count, skip_count, cancelled
|
||||
|
||||
def __init__(self, url, output_dir, api_key, user_id, parent=None):
|
||||
super().__init__(parent)
|
||||
self.booru_url = url
|
||||
self.output_dir = output_dir
|
||||
self.api_key = api_key
|
||||
self.user_id = user_id
|
||||
self.is_cancelled = False
|
||||
self.pause_event = parent.pause_event if hasattr(parent, 'pause_event') else threading.Event()
|
||||
|
||||
def run(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
processed_count = 0
|
||||
cumulative_total = 0
|
||||
|
||||
def logger(msg):
|
||||
self.progress_signal.emit(str(msg))
|
||||
|
||||
try:
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Booru Download for: {self.booru_url}")
|
||||
|
||||
item_generator = fetch_booru_data(self.booru_url, self.api_key, self.user_id, logger)
|
||||
|
||||
download_path = self.output_dir # Default path
|
||||
path_initialized = False
|
||||
|
||||
session = requests.Session()
|
||||
session.headers["User-Agent"] = USERAGENT_FIREFOX
|
||||
|
||||
for item in item_generator:
|
||||
if self.is_cancelled:
|
||||
break
|
||||
|
||||
if isinstance(item, tuple) and item[0] == 'PAGE_UPDATE':
|
||||
newly_found = item[1]
|
||||
cumulative_total += newly_found
|
||||
self.progress_signal.emit(f" Found {newly_found} more posts. Total so far: {cumulative_total}")
|
||||
self.overall_progress_signal.emit(cumulative_total, processed_count)
|
||||
continue
|
||||
|
||||
post_data = item
|
||||
processed_count += 1
|
||||
|
||||
if not path_initialized:
|
||||
base_folder_name = post_data.get('search_tags', 'booru_download')
|
||||
download_path = os.path.join(self.output_dir, clean_folder_name(base_folder_name))
|
||||
os.makedirs(download_path, exist_ok=True)
|
||||
path_initialized = True
|
||||
|
||||
if self.pause_event.is_set():
|
||||
self.progress_signal.emit(" Download paused...")
|
||||
while self.pause_event.is_set():
|
||||
if self.is_cancelled: break
|
||||
time.sleep(0.5)
|
||||
if self.is_cancelled: break
|
||||
self.progress_signal.emit(" Download resumed.")
|
||||
|
||||
file_url = post_data.get('file_url')
|
||||
if not file_url:
|
||||
skip_count += 1
|
||||
self.progress_signal.emit(f" -> Skip ({processed_count}/{cumulative_total}): Post ID {post_data.get('id')} has no file URL.")
|
||||
continue
|
||||
|
||||
cat = post_data.get('category', 'booru')
|
||||
post_id = post_data.get('id', 'unknown')
|
||||
md5 = post_data.get('md5', '')
|
||||
fname = post_data.get('filename', f"file_{post_id}")
|
||||
ext = post_data.get('extension', 'jpg')
|
||||
|
||||
final_filename = f"{cat}_{post_id}_{md5 or fname}.{ext}"
|
||||
filepath = os.path.join(download_path, final_filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({processed_count}/{cumulative_total}): '{final_filename}' already exists.")
|
||||
skip_count += 1
|
||||
else:
|
||||
try:
|
||||
self.progress_signal.emit(f" Downloading ({processed_count}/{cumulative_total}): '{final_filename}'...")
|
||||
response = session.get(file_url, stream=True, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled: break
|
||||
f.write(chunk)
|
||||
|
||||
if not self.is_cancelled:
|
||||
download_count += 1
|
||||
else:
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{final_filename}': {e}")
|
||||
skip_count += 1
|
||||
|
||||
self.overall_progress_signal.emit(cumulative_total, processed_count)
|
||||
time.sleep(0.2)
|
||||
|
||||
if not path_initialized:
|
||||
self.progress_signal.emit("No posts found for the given URL/tags.")
|
||||
|
||||
except BooruClientException as e:
|
||||
self.progress_signal.emit(f"❌ A Booru client error occurred: {e}")
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f"❌ An unexpected error occurred in Booru thread: {e}")
|
||||
finally:
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Booru thread.")
|
||||
195
src/ui/classes/bunkr_downloader_thread.py
Normal file
@@ -0,0 +1,195 @@
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
import requests
|
||||
import threading
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.bunkr_client import fetch_bunkr_data
|
||||
|
||||
# Define image extensions
|
||||
IMG_EXTS = ('.jpg', '.jpeg', '.png', '.gif', '.webp', '.bmp', '.avif')
|
||||
BUNKR_IMG_THREADS = 6 # Hardcoded thread count for images
|
||||
|
||||
class BunkrDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling Bunkr downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool, list)
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.bunkr_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
# --- NEW: Threading members ---
|
||||
self.lock = threading.Lock()
|
||||
self.download_count = 0
|
||||
self.skip_count = 0
|
||||
self.file_index = 0 # Use a shared index for logging
|
||||
|
||||
class ThreadLogger:
|
||||
def __init__(self, signal_emitter):
|
||||
self.signal_emitter = signal_emitter
|
||||
def info(self, msg, *args, **kwargs):
|
||||
self.signal_emitter.emit(str(msg))
|
||||
def error(self, msg, *args, **kwargs):
|
||||
self.signal_emitter.emit(f"❌ ERROR: {msg}")
|
||||
def warning(self, msg, *args, **kwargs):
|
||||
self.signal_emitter.emit(f"⚠️ WARNING: {msg}")
|
||||
def debug(self, msg, *args, **kwargs):
|
||||
pass
|
||||
|
||||
self.logger = ThreadLogger(self.progress_signal)
|
||||
|
||||
def _download_file(self, file_data, total_files, album_path, is_image_task=False):
|
||||
"""
|
||||
A thread-safe method to download a single file.
|
||||
This function will be called by the main thread (for videos)
|
||||
and worker threads (for images).
|
||||
"""
|
||||
|
||||
# Stop if a cancellation signal was received before starting
|
||||
if self.is_cancelled:
|
||||
return
|
||||
|
||||
# --- Thread-safe index for logging ---
|
||||
with self.lock:
|
||||
self.file_index += 1
|
||||
current_file_num = self.file_index
|
||||
|
||||
try:
|
||||
filename = file_data.get('name', 'untitled_file')
|
||||
file_url = file_data.get('url')
|
||||
headers = file_data.get('_http_headers')
|
||||
|
||||
filename = re.sub(r'[<>:"/\\|?*]', '_', filename).strip()
|
||||
filepath = os.path.join(album_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({current_file_num}/{total_files}): '{filename}' already exists.")
|
||||
with self.lock:
|
||||
self.skip_count += 1
|
||||
return
|
||||
|
||||
self.progress_signal.emit(f" Downloading ({current_file_num}/{total_files}): '{filename}'...")
|
||||
|
||||
response = requests.get(file_url, stream=True, headers=headers, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_update_time = time.time()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
|
||||
# For videos/other files, send frequent progress
|
||||
# For images, don't send progress to avoid UI flicker
|
||||
if not is_image_task:
|
||||
current_time = time.time()
|
||||
if total_size > 0 and (current_time - last_update_time) > 0.5:
|
||||
self.file_progress_signal.emit(filename, (downloaded_size, total_size))
|
||||
last_update_time = current_time
|
||||
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(f" Download cancelled for '{filename}'.")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
return
|
||||
|
||||
if total_size > 0:
|
||||
self.file_progress_signal.emit(filename, (total_size, total_size))
|
||||
|
||||
with self.lock:
|
||||
self.download_count += 1
|
||||
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}'. Error: {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
with self.lock:
|
||||
self.skip_count += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ An unexpected error occurred with '{filename}': {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
with self.lock:
|
||||
self.skip_count += 1
|
||||
|
||||
def run(self):
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Bunkr Download for: {self.bunkr_url}")
|
||||
|
||||
album_name, files_to_download = fetch_bunkr_data(self.bunkr_url, self.logger)
|
||||
|
||||
if not files_to_download:
|
||||
self.progress_signal.emit("❌ Failed to extract file information from Bunkr. Aborting.")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled, [])
|
||||
return
|
||||
|
||||
album_path = os.path.join(self.output_dir, album_name)
|
||||
try:
|
||||
os.makedirs(album_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{album_path}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
self.finished_signal.emit(0, len(files_to_download), self.is_cancelled, [])
|
||||
return
|
||||
|
||||
total_files = len(files_to_download)
|
||||
|
||||
# --- NEW: Separate files into images and others ---
|
||||
image_files = []
|
||||
other_files = []
|
||||
for f in files_to_download:
|
||||
name = f.get('name', '').lower()
|
||||
if name.endswith(IMG_EXTS):
|
||||
image_files.append(f)
|
||||
else:
|
||||
other_files.append(f)
|
||||
|
||||
self.progress_signal.emit(f" Found {len(image_files)} images and {len(other_files)} other files.")
|
||||
|
||||
# --- 1. Process videos and other files sequentially (one by one) ---
|
||||
if other_files:
|
||||
self.progress_signal.emit(f" Downloading {len(other_files)} videos/other files sequentially...")
|
||||
for file_data in other_files:
|
||||
if self.is_cancelled:
|
||||
break
|
||||
# Call the new download helper method
|
||||
self._download_file(file_data, total_files, album_path, is_image_task=False)
|
||||
|
||||
# --- 2. Process images concurrently using a fixed 6-thread pool ---
|
||||
if image_files and not self.is_cancelled:
|
||||
self.progress_signal.emit(f" Downloading {len(image_files)} images concurrently ({BUNKR_IMG_THREADS} threads)...")
|
||||
with ThreadPoolExecutor(max_workers=BUNKR_IMG_THREADS, thread_name_prefix='BunkrImg') as executor:
|
||||
|
||||
# Submit all image download tasks
|
||||
futures = {executor.submit(self._download_file, file_data, total_files, album_path, is_image_task=True): file_data for file_data in image_files}
|
||||
|
||||
try:
|
||||
# Wait for tasks to complete, but check for cancellation
|
||||
for future in futures:
|
||||
if self.is_cancelled:
|
||||
future.cancel() # Try to cancel running/pending tasks
|
||||
else:
|
||||
future.result() # Wait for the task to finish (or raise exception)
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ A thread pool error occurred: {e}")
|
||||
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(" Download cancelled by user.")
|
||||
# Update skip count to reflect all non-downloaded files
|
||||
self.skip_count = total_files - self.download_count
|
||||
|
||||
self.file_progress_signal.emit("", None) # Clear file progress
|
||||
self.finished_signal.emit(self.download_count, self.skip_count, self.is_cancelled, [])
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Bunkr thread.")
|
||||
189
src/ui/classes/discord_downloader_thread.py
Normal file
@@ -0,0 +1,189 @@
|
||||
import os
|
||||
import time
|
||||
import datetime
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
# Assuming discord_pdf_generator is in the dialogs folder, sibling to the classes folder
|
||||
from ..dialogs.discord_pdf_generator import create_pdf_from_discord_messages
|
||||
|
||||
# This constant is needed for the thread to function independently
|
||||
_ff_ver = (datetime.date.today().toordinal() - 735506) // 28
|
||||
USERAGENT_FIREFOX = (f"Mozilla/5.0 (Windows NT 10.0; Win64; x64; "
|
||||
f"rv:{_ff_ver}.0) Gecko/20100101 Firefox/{_ff_ver}.0")
|
||||
|
||||
class DiscordDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling all official Discord downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
progress_label_signal = pyqtSignal(str)
|
||||
finished_signal = pyqtSignal(int, int, bool, list)
|
||||
|
||||
def __init__(self, mode, session, token, output_dir, server_id, channel_id, url, app_base_dir, limit=None, parent=None):
|
||||
super().__init__(parent)
|
||||
self.mode = mode
|
||||
self.session = session
|
||||
self.token = token
|
||||
self.output_dir = output_dir
|
||||
self.server_id = server_id
|
||||
self.channel_id = channel_id
|
||||
self.api_url = url
|
||||
self.message_limit = limit
|
||||
self.app_base_dir = app_base_dir # Path to app's base directory
|
||||
|
||||
self.is_cancelled = False
|
||||
self.is_paused = False
|
||||
|
||||
def run(self):
|
||||
if self.mode == 'pdf':
|
||||
self._run_pdf_creation()
|
||||
else:
|
||||
self._run_file_download()
|
||||
|
||||
def cancel(self):
|
||||
self.progress_signal.emit(" Cancellation signal received by Discord thread.")
|
||||
self.is_cancelled = True
|
||||
|
||||
def pause(self):
|
||||
self.progress_signal.emit(" Pausing Discord download...")
|
||||
self.is_paused = True
|
||||
|
||||
def resume(self):
|
||||
self.progress_signal.emit(" Resuming Discord download...")
|
||||
self.is_paused = False
|
||||
|
||||
def _check_events(self):
|
||||
if self.is_cancelled:
|
||||
return True
|
||||
while self.is_paused:
|
||||
time.sleep(0.5)
|
||||
if self.is_cancelled:
|
||||
return True
|
||||
return False
|
||||
|
||||
def _fetch_all_messages(self):
|
||||
all_messages = []
|
||||
last_message_id = None
|
||||
headers = {'Authorization': self.token, 'User-Agent': USERAGENT_FIREFOX}
|
||||
|
||||
while True:
|
||||
if self._check_events(): break
|
||||
|
||||
endpoint = f"/channels/{self.channel_id}/messages?limit=100"
|
||||
if last_message_id:
|
||||
endpoint += f"&before={last_message_id}"
|
||||
|
||||
try:
|
||||
resp = self.session.get(f"https://discord.com/api/v10{endpoint}", headers=headers, timeout=30)
|
||||
resp.raise_for_status()
|
||||
message_batch = resp.json()
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Error fetching message batch: {e}")
|
||||
break
|
||||
|
||||
if not message_batch:
|
||||
break
|
||||
|
||||
all_messages.extend(message_batch)
|
||||
|
||||
if self.message_limit and len(all_messages) >= self.message_limit:
|
||||
self.progress_signal.emit(f" Reached message limit of {self.message_limit}. Halting fetch.")
|
||||
all_messages = all_messages[:self.message_limit]
|
||||
break
|
||||
|
||||
last_message_id = message_batch[-1]['id']
|
||||
self.progress_label_signal.emit(f"Fetched {len(all_messages)} messages...")
|
||||
time.sleep(1) # API Rate Limiting
|
||||
|
||||
return all_messages
|
||||
|
||||
def _run_pdf_creation(self):
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Discord PDF export for: {self.api_url}")
|
||||
self.progress_label_signal.emit("Fetching messages...")
|
||||
|
||||
all_messages = self._fetch_all_messages()
|
||||
|
||||
if self.is_cancelled:
|
||||
self.finished_signal.emit(0, 0, True, [])
|
||||
return
|
||||
|
||||
self.progress_label_signal.emit(f"Collected {len(all_messages)} total messages. Generating PDF...")
|
||||
all_messages.reverse()
|
||||
|
||||
font_path = os.path.join(self.app_base_dir, 'data', 'dejavu-sans', 'DejaVuSans.ttf')
|
||||
output_filepath = os.path.join(self.output_dir, f"discord_{self.server_id}_{self.channel_id or 'server'}.pdf")
|
||||
|
||||
success = create_pdf_from_discord_messages(
|
||||
all_messages, self.server_id, self.channel_id,
|
||||
output_filepath, font_path, logger=self.progress_signal.emit,
|
||||
cancellation_event=self, pause_event=self
|
||||
)
|
||||
|
||||
if success:
|
||||
self.progress_label_signal.emit(f"✅ PDF export complete!")
|
||||
elif not self.is_cancelled:
|
||||
self.progress_label_signal.emit(f"❌ PDF export failed. Check log for details.")
|
||||
|
||||
self.finished_signal.emit(0, len(all_messages), self.is_cancelled, [])
|
||||
|
||||
def _run_file_download(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
try:
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Discord download for channel: {self.channel_id}")
|
||||
self.progress_label_signal.emit("Fetching messages...")
|
||||
all_messages = self._fetch_all_messages()
|
||||
|
||||
if self.is_cancelled:
|
||||
self.finished_signal.emit(0, 0, True, [])
|
||||
return
|
||||
|
||||
self.progress_label_signal.emit(f"Collected {len(all_messages)} messages. Starting downloads...")
|
||||
total_attachments = sum(len(m.get('attachments', [])) for m in all_messages)
|
||||
|
||||
for message in reversed(all_messages):
|
||||
if self._check_events(): break
|
||||
for attachment in message.get('attachments', []):
|
||||
if self._check_events(): break
|
||||
|
||||
file_url = attachment['url']
|
||||
original_filename = attachment['filename']
|
||||
filepath = os.path.join(self.output_dir, original_filename)
|
||||
filename_to_use = original_filename
|
||||
|
||||
counter = 1
|
||||
base_name, extension = os.path.splitext(original_filename)
|
||||
while os.path.exists(filepath):
|
||||
filename_to_use = f"{base_name} ({counter}){extension}"
|
||||
filepath = os.path.join(self.output_dir, filename_to_use)
|
||||
counter += 1
|
||||
|
||||
if filename_to_use != original_filename:
|
||||
self.progress_signal.emit(f" -> Duplicate name '{original_filename}'. Saving as '{filename_to_use}'.")
|
||||
|
||||
try:
|
||||
self.progress_signal.emit(f" Downloading ({download_count+1}/{total_attachments}): '{filename_to_use}'...")
|
||||
response = requests.get(file_url, stream=True, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
download_cancelled_mid_file = False
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self._check_events():
|
||||
download_cancelled_mid_file = True
|
||||
break
|
||||
f.write(chunk)
|
||||
|
||||
if download_cancelled_mid_file:
|
||||
self.progress_signal.emit(f" Download cancelled for '{filename_to_use}'. Deleting partial file.")
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
continue
|
||||
|
||||
download_count += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename_to_use}': {e}")
|
||||
skip_count += 1
|
||||
finally:
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled, [])
|
||||
133
src/ui/classes/downloader_factory.py
Normal file
@@ -0,0 +1,133 @@
|
||||
import re
|
||||
import requests
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from ...utils.network_utils import prepare_cookies_for_request
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
from .allcomic_downloader_thread import AllcomicDownloadThread
|
||||
from .booru_downloader_thread import BooruDownloadThread
|
||||
from .bunkr_downloader_thread import BunkrDownloadThread
|
||||
from .discord_downloader_thread import DiscordDownloadThread
|
||||
from .drive_downloader_thread import DriveDownloadThread
|
||||
from .erome_downloader_thread import EromeDownloadThread
|
||||
from .external_link_downloader_thread import ExternalLinkDownloadThread
|
||||
from .fap_nation_downloader_thread import FapNationDownloadThread
|
||||
from .hentai2read_downloader_thread import Hentai2readDownloadThread
|
||||
from .mangadex_downloader_thread import MangaDexDownloadThread
|
||||
from .nhentai_downloader_thread import NhentaiDownloadThread
|
||||
from .pixeldrain_downloader_thread import PixeldrainDownloadThread
|
||||
from .saint2_downloader_thread import Saint2DownloadThread
|
||||
from .simp_city_downloader_thread import SimpCityDownloadThread
|
||||
from .toonily_downloader_thread import ToonilyDownloadThread
|
||||
from .rule34video_downloader_thread import Rule34VideoDownloadThread
|
||||
|
||||
|
||||
def create_downloader_thread(main_app, api_url, service, id1, id2, effective_output_dir_for_run):
|
||||
"""
|
||||
Factory function to create and configure the correct QThread for a given URL.
|
||||
Returns a configured QThread instance or None if no special handler is found.
|
||||
"""
|
||||
|
||||
# Handler for Booru sites (Danbooru, Gelbooru)
|
||||
if service in ['danbooru', 'gelbooru']:
|
||||
api_key = main_app.api_key_input.text().strip()
|
||||
user_id = main_app.user_id_input.text().strip()
|
||||
return BooruDownloadThread(
|
||||
url=api_url, output_dir=effective_output_dir_for_run,
|
||||
api_key=api_key, user_id=user_id, parent=main_app
|
||||
)
|
||||
|
||||
# Handler for cloud storage sites (Mega, GDrive, etc.)
|
||||
platform = None
|
||||
if 'mega.nz' in api_url or 'mega.io' in api_url: platform = 'mega'
|
||||
elif 'drive.google.com' in api_url: platform = 'gdrive'
|
||||
elif 'dropbox.com' in api_url: platform = 'dropbox'
|
||||
elif 'gofile.io' in api_url: platform = 'gofile'
|
||||
if platform:
|
||||
use_post_subfolder = main_app.use_subfolder_per_post_checkbox.isChecked()
|
||||
return DriveDownloadThread(
|
||||
api_url, effective_output_dir_for_run, platform, use_post_subfolder,
|
||||
main_app.cancellation_event, main_app.pause_event, main_app.log_signal.emit
|
||||
)
|
||||
|
||||
# Handler for Erome
|
||||
if 'erome.com' in api_url:
|
||||
return EromeDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for MangaDex
|
||||
if 'mangadex.org' in api_url:
|
||||
return MangaDexDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for Saint2
|
||||
is_saint2_url = 'saint2.su' in api_url or 'saint2.pk' in api_url
|
||||
if is_saint2_url and api_url.strip().lower() != 'saint2.su': # Exclude batch mode trigger
|
||||
return Saint2DownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for SimpCity
|
||||
if service == 'simpcity':
|
||||
cookies = prepare_cookies_for_request(
|
||||
use_cookie_flag=True, cookie_text_input=main_app.cookie_text_input.text(),
|
||||
selected_cookie_file_path=main_app.selected_cookie_filepath,
|
||||
app_base_dir=main_app.app_base_dir, logger_func=main_app.log_signal.emit,
|
||||
target_domain='simpcity.cr'
|
||||
)
|
||||
if not cookies:
|
||||
# The main app will handle the error dialog
|
||||
return "COOKIE_ERROR"
|
||||
return SimpCityDownloadThread(api_url, id2, effective_output_dir_for_run, cookies, main_app)
|
||||
|
||||
if service == 'rule34video':
|
||||
main_app.log_signal.emit("ℹ️ Rule34Video.com URL detected. Starting dedicated downloader.")
|
||||
# id1 contains the video_id from extract_post_info
|
||||
return Rule34VideoDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for official Discord URLs
|
||||
if 'discord.com' in api_url and service == 'discord':
|
||||
token = main_app.remove_from_filename_input.text().strip()
|
||||
limit_text = main_app.discord_message_limit_input.text().strip()
|
||||
message_limit = int(limit_text) if limit_text else None
|
||||
mode = 'pdf' if main_app.discord_download_scope == 'messages' else 'files'
|
||||
return DiscordDownloadThread(
|
||||
mode=mode, session=requests.Session(), token=token, output_dir=effective_output_dir_for_run,
|
||||
server_id=id1, channel_id=id2, url=api_url, app_base_dir=main_app.app_base_dir,
|
||||
limit=message_limit, parent=main_app
|
||||
)
|
||||
|
||||
# Handler for Allcomic/Allporncomic
|
||||
if 'allcomic.com' in api_url or 'allporncomic.com' in api_url:
|
||||
return AllcomicDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for Hentai2Read
|
||||
if 'hentai2read.com' in api_url:
|
||||
return Hentai2readDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for Fap-Nation
|
||||
if 'fap-nation.com' in api_url or 'fap-nation.org' in api_url:
|
||||
use_post_subfolder = main_app.use_subfolder_per_post_checkbox.isChecked()
|
||||
return FapNationDownloadThread(
|
||||
api_url, effective_output_dir_for_run, use_post_subfolder,
|
||||
main_app.pause_event, main_app.cancellation_event, main_app.actual_gui_signals, main_app
|
||||
)
|
||||
|
||||
# Handler for Pixeldrain
|
||||
if 'pixeldrain.com' in api_url:
|
||||
return PixeldrainDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for nHentai
|
||||
if service == 'nhentai':
|
||||
from ...core.nhentai_client import fetch_nhentai_gallery
|
||||
gallery_data = fetch_nhentai_gallery(id1, main_app.log_signal.emit)
|
||||
if not gallery_data:
|
||||
return "FETCH_ERROR" # Sentinel value for fetch failure
|
||||
return NhentaiDownloadThread(gallery_data, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for Toonily
|
||||
if 'toonily.com' in api_url:
|
||||
return ToonilyDownloadThread(api_url, effective_output_dir_for_run, main_app)
|
||||
|
||||
# Handler for Bunkr
|
||||
if service == 'bunkr':
|
||||
return BunkrDownloadThread(id1, effective_output_dir_for_run, main_app)
|
||||
|
||||
# If no special handler matched, return None
|
||||
return None
|
||||
77
src/ui/classes/drive_downloader_thread.py
Normal file
@@ -0,0 +1,77 @@
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...services.drive_downloader import (
|
||||
download_dropbox_file,
|
||||
download_gdrive_file,
|
||||
download_gofile_folder,
|
||||
download_mega_file as drive_download_mega_file,
|
||||
)
|
||||
|
||||
|
||||
class DriveDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling direct Mega, GDrive, and Dropbox links."""
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool, list)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, output_dir, platform, use_post_subfolder, cancellation_event, pause_event, logger_func, parent=None):
|
||||
super().__init__(parent)
|
||||
self.drive_url = url
|
||||
self.output_dir = output_dir
|
||||
self.platform = platform
|
||||
self.use_post_subfolder = use_post_subfolder
|
||||
self.is_cancelled = False
|
||||
self.cancellation_event = cancellation_event
|
||||
self.pause_event = pause_event
|
||||
self.logger_func = logger_func
|
||||
|
||||
def run(self):
|
||||
self.logger_func("=" * 40)
|
||||
self.logger_func(f"🚀 Starting direct {self.platform.capitalize()} Download for: {self.drive_url}")
|
||||
|
||||
try:
|
||||
if self.platform == 'mega':
|
||||
drive_download_mega_file(
|
||||
self.drive_url, self.output_dir,
|
||||
logger_func=self.logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit,
|
||||
cancellation_event=self.cancellation_event,
|
||||
pause_event=self.pause_event
|
||||
)
|
||||
elif self.platform == 'gdrive':
|
||||
download_gdrive_file(
|
||||
self.drive_url, self.output_dir,
|
||||
logger_func=self.logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit,
|
||||
use_post_subfolder=self.use_post_subfolder,
|
||||
post_title="Google Drive Download"
|
||||
)
|
||||
elif self.platform == 'dropbox':
|
||||
download_dropbox_file(
|
||||
self.drive_url, self.output_dir,
|
||||
logger_func=self.logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
use_post_subfolder=self.use_post_subfolder,
|
||||
post_title="Dropbox Download"
|
||||
)
|
||||
elif self.platform == 'gofile':
|
||||
download_gofile_folder(
|
||||
self.drive_url, self.output_dir,
|
||||
logger_func=self.logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit
|
||||
)
|
||||
|
||||
self.finished_signal.emit(1, 0, self.is_cancelled, [])
|
||||
|
||||
except Exception as e:
|
||||
self.logger_func(f"❌ An unexpected error occurred in DriveDownloadThread: {e}")
|
||||
self.finished_signal.emit(0, 1, self.is_cancelled, [])
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
if self.cancellation_event:
|
||||
self.cancellation_event.set()
|
||||
self.logger_func(f" Cancellation signal received by {self.platform.capitalize()} thread.")
|
||||
106
src/ui/classes/erome_downloader_thread.py
Normal file
@@ -0,0 +1,106 @@
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
import cloudscraper
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.erome_client import fetch_erome_data
|
||||
|
||||
class EromeDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling erome.com downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool) # dl_count, skip_count, cancelled
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.erome_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
def run(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Erome.com Download for: {self.erome_url}")
|
||||
|
||||
album_name, files_to_download = fetch_erome_data(self.erome_url, self.progress_signal.emit)
|
||||
|
||||
if not files_to_download:
|
||||
self.progress_signal.emit("❌ Failed to extract file information from Erome. Aborting.")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled)
|
||||
return
|
||||
|
||||
album_path = os.path.join(self.output_dir, album_name)
|
||||
try:
|
||||
os.makedirs(album_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{album_path}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
self.finished_signal.emit(0, len(files_to_download), self.is_cancelled)
|
||||
return
|
||||
|
||||
total_files = len(files_to_download)
|
||||
session = cloudscraper.create_scraper()
|
||||
|
||||
for i, file_data in enumerate(files_to_download):
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(" Download cancelled by user.")
|
||||
skip_count = total_files - download_count
|
||||
break
|
||||
|
||||
filename = file_data.get('filename', f'untitled_{i+1}.mp4')
|
||||
file_url = file_data.get('url')
|
||||
headers = file_data.get('headers')
|
||||
filepath = os.path.join(album_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({i+1}/{total_files}): '{filename}' already exists.")
|
||||
skip_count += 1
|
||||
continue
|
||||
|
||||
self.progress_signal.emit(f" Downloading ({i+1}/{total_files}): '{filename}'...")
|
||||
|
||||
try:
|
||||
response = session.get(file_url, stream=True, headers=headers, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_update_time = time.time()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
if total_size > 0 and (current_time - last_update_time) > 0.5:
|
||||
self.file_progress_signal.emit(filename, (downloaded_size, total_size))
|
||||
last_update_time = current_time
|
||||
|
||||
if self.is_cancelled:
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
continue
|
||||
|
||||
if total_size > 0:
|
||||
self.file_progress_signal.emit(filename, (total_size, total_size))
|
||||
|
||||
download_count += 1
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}'. Error: {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ An unexpected error occurred with '{filename}': {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Erome thread.")
|
||||
86
src/ui/classes/external_link_downloader_thread.py
Normal file
@@ -0,0 +1,86 @@
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...services.drive_downloader import (
|
||||
download_dropbox_file,
|
||||
download_gdrive_file,
|
||||
download_mega_file as drive_download_mega_file,
|
||||
)
|
||||
|
||||
|
||||
class ExternalLinkDownloadThread(QThread):
|
||||
"""A QThread to handle downloading multiple external links sequentially."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_complete_signal = pyqtSignal(str, bool)
|
||||
finished_signal = pyqtSignal()
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
|
||||
def __init__(self, tasks_to_download, download_base_path, parent_logger_func, parent=None, use_post_subfolder=False):
|
||||
super().__init__(parent)
|
||||
self.tasks = tasks_to_download
|
||||
self.download_base_path = download_base_path
|
||||
self.parent_logger_func = parent_logger_func
|
||||
self.is_cancelled = False
|
||||
self.use_post_subfolder = use_post_subfolder
|
||||
|
||||
def run(self):
|
||||
total_tasks = len(self.tasks)
|
||||
self.progress_signal.emit(f"ℹ️ Starting external link download thread for {total_tasks} link(s).")
|
||||
self.overall_progress_signal.emit(total_tasks, 0)
|
||||
|
||||
for i, task_info in enumerate(self.tasks):
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit("External link download cancelled by user.")
|
||||
break
|
||||
|
||||
self.overall_progress_signal.emit(total_tasks, i + 1)
|
||||
|
||||
platform = task_info.get('platform', 'unknown').lower()
|
||||
full_url = task_info['url']
|
||||
post_title = task_info['title']
|
||||
|
||||
self.progress_signal.emit(f"Download ({i + 1}/{total_tasks}): Starting '{post_title}' ({platform.upper()}) from {full_url}")
|
||||
|
||||
try:
|
||||
if platform == 'mega':
|
||||
drive_download_mega_file(
|
||||
full_url,
|
||||
self.download_base_path,
|
||||
logger_func=self.parent_logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit
|
||||
)
|
||||
elif platform == 'google drive':
|
||||
download_gdrive_file(
|
||||
full_url,
|
||||
self.download_base_path,
|
||||
logger_func=self.parent_logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit,
|
||||
use_post_subfolder=self.use_post_subfolder,
|
||||
post_title=post_title
|
||||
)
|
||||
elif platform == 'dropbox':
|
||||
download_dropbox_file(
|
||||
full_url,
|
||||
self.download_base_path,
|
||||
logger_func=self.parent_logger_func,
|
||||
progress_callback_func=self.file_progress_signal.emit,
|
||||
use_post_subfolder=self.use_post_subfolder,
|
||||
post_title=post_title
|
||||
)
|
||||
else:
|
||||
self.progress_signal.emit(f"⚠️ Unsupported platform '{platform}' for link: {full_url}")
|
||||
self.file_complete_signal.emit(full_url, False)
|
||||
continue
|
||||
self.file_complete_signal.emit(full_url, True)
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f"❌ Error downloading ({platform.upper()}) link '{full_url}': {e}")
|
||||
self.file_complete_signal.emit(full_url, False)
|
||||
|
||||
self.finished_signal.emit()
|
||||
|
||||
def cancel(self):
|
||||
"""Sets the cancellation flag to stop the thread gracefully."""
|
||||
self.progress_signal.emit(" [External Links] Cancellation signal received by thread.")
|
||||
self.is_cancelled = True
|
||||
162
src/ui/classes/fap_nation_downloader_thread.py
Normal file
@@ -0,0 +1,162 @@
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from PyQt5.QtCore import QThread, pyqtSignal, QProcess
|
||||
import cloudscraper
|
||||
|
||||
from ...core.fap_nation_client import fetch_fap_nation_data
|
||||
from ...services.multipart_downloader import download_file_in_parts
|
||||
|
||||
class FapNationDownloadThread(QThread):
|
||||
"""
|
||||
A dedicated QThread for Fap-Nation that uses a hybrid approach, choosing
|
||||
between yt-dlp for HLS streams and a multipart downloader for direct links.
|
||||
"""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, output_dir, use_post_subfolder, pause_event, cancellation_event, gui_signals, parent=None):
|
||||
super().__init__(parent)
|
||||
self.album_url = url
|
||||
self.output_dir = output_dir
|
||||
self.use_post_subfolder = use_post_subfolder
|
||||
self.is_cancelled = False
|
||||
self.process = None
|
||||
self.current_filename = "Unknown File"
|
||||
self.album_name = "fap-nation_album"
|
||||
self.pause_event = pause_event
|
||||
self.cancellation_event = cancellation_event
|
||||
self.gui_signals = gui_signals
|
||||
self._is_finished = False
|
||||
|
||||
self.process = QProcess(self)
|
||||
self.process.readyReadStandardOutput.connect(self.handle_ytdlp_output)
|
||||
|
||||
def run(self):
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Fap-Nation Download for: {self.album_url}")
|
||||
|
||||
self.album_name, files_to_download = fetch_fap_nation_data(self.album_url, self.progress_signal.emit)
|
||||
|
||||
if self.is_cancelled or not files_to_download:
|
||||
self.progress_signal.emit("❌ Failed to extract file information. Aborting.")
|
||||
self.finished_signal.emit(0, 1, self.is_cancelled)
|
||||
return
|
||||
|
||||
self.overall_progress_signal.emit(1, 0)
|
||||
|
||||
save_path = self.output_dir
|
||||
if self.use_post_subfolder:
|
||||
save_path = os.path.join(self.output_dir, self.album_name)
|
||||
self.progress_signal.emit(f" Subfolder per Post is ON. Saving to: '{self.album_name}'")
|
||||
os.makedirs(save_path, exist_ok=True)
|
||||
|
||||
file_data = files_to_download[0]
|
||||
self.current_filename = file_data.get('filename')
|
||||
download_url = file_data.get('url')
|
||||
link_type = file_data.get('type')
|
||||
filepath = os.path.join(save_path, self.current_filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip: '{self.current_filename}' already exists.")
|
||||
self.overall_progress_signal.emit(1, 1)
|
||||
self.finished_signal.emit(0, 1, self.is_cancelled)
|
||||
return
|
||||
|
||||
if link_type == 'hls':
|
||||
self.download_with_ytdlp(filepath, download_url)
|
||||
elif link_type == 'direct':
|
||||
self.download_with_multipart(filepath, download_url)
|
||||
else:
|
||||
self.progress_signal.emit(f" ❌ Unknown link type '{link_type}'. Aborting.")
|
||||
self._on_ytdlp_finished(-1)
|
||||
|
||||
def download_with_ytdlp(self, filepath, playlist_url):
|
||||
self.progress_signal.emit(f" Downloading (HLS Stream): '{self.current_filename}' using yt-dlp...")
|
||||
try:
|
||||
if getattr(sys, 'frozen', False):
|
||||
base_path = sys._MEIPASS
|
||||
ytdlp_path = os.path.join(base_path, "yt-dlp.exe")
|
||||
else:
|
||||
ytdlp_path = "yt-dlp.exe"
|
||||
|
||||
if not os.path.exists(ytdlp_path):
|
||||
self.progress_signal.emit(f" ❌ ERROR: yt-dlp.exe not found at '{ytdlp_path}'.")
|
||||
self._on_ytdlp_finished(-1)
|
||||
return
|
||||
|
||||
command = [ytdlp_path, '--no-warnings', '--progress', '--output', filepath, '--merge-output-format', 'mp4', playlist_url]
|
||||
|
||||
self.process.start(command[0], command[1:])
|
||||
self.process.waitForFinished(-1)
|
||||
self._on_ytdlp_finished(self.process.exitCode())
|
||||
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to start yt-dlp: {e}")
|
||||
self._on_ytdlp_finished(-1)
|
||||
|
||||
def download_with_multipart(self, filepath, direct_url):
|
||||
self.progress_signal.emit(f" Downloading (Direct Link): '{self.current_filename}' using multipart downloader...")
|
||||
try:
|
||||
session = cloudscraper.create_scraper()
|
||||
head_response = session.head(direct_url, allow_redirects=True, timeout=20)
|
||||
head_response.raise_for_status()
|
||||
total_size = int(head_response.headers.get('content-length', 0))
|
||||
|
||||
success, _, _, _ = download_file_in_parts(
|
||||
file_url=direct_url, save_path=filepath, total_size=total_size, num_parts=5,
|
||||
headers=session.headers, api_original_filename=self.current_filename,
|
||||
emitter_for_multipart=self.gui_signals,
|
||||
cookies_for_chunk_session=session.cookies,
|
||||
cancellation_event=self.cancellation_event,
|
||||
skip_event=None, logger_func=self.progress_signal.emit, pause_event=self.pause_event
|
||||
)
|
||||
self._on_ytdlp_finished(0 if success else 1)
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Multipart download failed: {e}")
|
||||
self._on_ytdlp_finished(1)
|
||||
|
||||
def handle_ytdlp_output(self):
|
||||
if not self.process:
|
||||
return
|
||||
|
||||
output = self.process.readAllStandardOutput().data().decode('utf-8', errors='ignore')
|
||||
for line in reversed(output.strip().splitlines()):
|
||||
line = line.strip()
|
||||
progress_match = re.search(r'\[download\]\s+([\d.]+)%\s+of\s+~?\s*([\d.]+\w+B)', line)
|
||||
if progress_match:
|
||||
percent, size = progress_match.groups()
|
||||
self.file_progress_signal.emit("yt-dlp:", f"{percent}% of {size}")
|
||||
break
|
||||
|
||||
def _on_ytdlp_finished(self, exit_code):
|
||||
if self._is_finished:
|
||||
return
|
||||
self._is_finished = True
|
||||
|
||||
download_count, skip_count = 0, 0
|
||||
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(f" Download of '{self.current_filename}' was cancelled.")
|
||||
skip_count = 1
|
||||
elif exit_code == 0:
|
||||
self.progress_signal.emit(f" ✅ Download process finished successfully for '{self.current_filename}'.")
|
||||
download_count = 1
|
||||
else:
|
||||
self.progress_signal.emit(f" ❌ Download process exited with an error (Code: {exit_code}) for '{self.current_filename}'.")
|
||||
skip_count = 1
|
||||
|
||||
self.overall_progress_signal.emit(1, 1)
|
||||
self.process = None
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.cancellation_event.set()
|
||||
if self.process and self.process.state() == QProcess.Running:
|
||||
self.progress_signal.emit(" Cancellation signal received, terminating yt-dlp process.")
|
||||
self.process.kill()
|
||||
51
src/ui/classes/hentai2read_downloader_thread.py
Normal file
@@ -0,0 +1,51 @@
|
||||
import threading
|
||||
import time
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.Hentai2read_client import run_hentai2read_download as h2r_run_download
|
||||
|
||||
|
||||
class Hentai2readDownloadThread(QThread):
|
||||
"""
|
||||
A dedicated QThread that calls the self-contained Hentai2Read client to
|
||||
perform scraping and downloading.
|
||||
"""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.start_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
self.pause_event = parent.pause_event if hasattr(parent, 'pause_event') else threading.Event()
|
||||
|
||||
def _check_pause(self):
|
||||
"""Helper to handle pausing and cancellation events."""
|
||||
if self.is_cancelled: return True
|
||||
if self.pause_event and self.pause_event.is_set():
|
||||
self.progress_signal.emit(" Download paused...")
|
||||
while self.pause_event.is_set():
|
||||
if self.is_cancelled: return True
|
||||
time.sleep(0.5)
|
||||
self.progress_signal.emit(" Download resumed.")
|
||||
return self.is_cancelled
|
||||
|
||||
def run(self):
|
||||
"""
|
||||
Executes the main download logic by calling the dedicated client function.
|
||||
"""
|
||||
downloaded, skipped = h2r_run_download(
|
||||
start_url=self.start_url,
|
||||
output_dir=self.output_dir,
|
||||
progress_callback=self.progress_signal.emit,
|
||||
overall_progress_callback=self.overall_progress_signal.emit,
|
||||
check_pause_func=self._check_pause
|
||||
)
|
||||
|
||||
self.finished_signal.emit(downloaded, skipped, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
45
src/ui/classes/mangadex_downloader_thread.py
Normal file
@@ -0,0 +1,45 @@
|
||||
import threading
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.mangadex_client import fetch_mangadex_data
|
||||
|
||||
|
||||
class MangaDexDownloadThread(QThread):
|
||||
"""A wrapper QThread for running the MangaDex client function."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.start_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
self.pause_event = parent.pause_event if hasattr(parent, 'pause_event') else threading.Event()
|
||||
self.cancellation_event = parent.cancellation_event if hasattr(parent, 'cancellation_event') else threading.Event()
|
||||
|
||||
def run(self):
|
||||
downloaded = 0
|
||||
skipped = 0
|
||||
try:
|
||||
downloaded, skipped = fetch_mangadex_data(
|
||||
self.start_url,
|
||||
self.output_dir,
|
||||
logger_func=self.progress_signal.emit,
|
||||
file_progress_callback=self.file_progress_signal,
|
||||
overall_progress_callback=self.overall_progress_signal,
|
||||
pause_event=self.pause_event,
|
||||
cancellation_event=self.cancellation_event
|
||||
)
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f"❌ A critical error occurred in the MangaDex thread: {e}")
|
||||
skipped = 1 # Mark as skipped if there was a critical failure
|
||||
finally:
|
||||
self.finished_signal.emit(downloaded, skipped, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
if self.cancellation_event:
|
||||
self.cancellation_event.set()
|
||||
self.progress_signal.emit(" Cancellation signal received by MangaDex thread.")
|
||||
105
src/ui/classes/nhentai_downloader_thread.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import os
|
||||
import time
|
||||
import cloudscraper
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class NhentaiDownloadThread(QThread):
|
||||
progress_signal = pyqtSignal(str)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
|
||||
IMAGE_SERVERS = [
|
||||
"https://i.nhentai.net", "https://i2.nhentai.net", "https://i3.nhentai.net",
|
||||
"https://i5.nhentai.net", "https://i7.nhentai.net"
|
||||
]
|
||||
|
||||
EXTENSION_MAP = {'j': 'jpg', 'p': 'png', 'g': 'gif', 'w': 'webp' }
|
||||
|
||||
def __init__(self, gallery_data, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.gallery_data = gallery_data
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
def run(self):
|
||||
title = self.gallery_data.get("title", {}).get("english", f"gallery_{self.gallery_data.get('id')}")
|
||||
gallery_id = self.gallery_data.get("id")
|
||||
media_id = self.gallery_data.get("media_id")
|
||||
pages_info = self.gallery_data.get("pages", [])
|
||||
|
||||
folder_name = clean_folder_name(title)
|
||||
gallery_path = os.path.join(self.output_dir, folder_name)
|
||||
|
||||
try:
|
||||
os.makedirs(gallery_path, exist_ok=True)
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
self.finished_signal.emit(0, len(pages_info), False)
|
||||
return
|
||||
|
||||
self.progress_signal.emit(f"⬇️ Downloading '{title}' to folder '{folder_name}'...")
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
|
||||
for i, page_data in enumerate(pages_info):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
|
||||
page_num = i + 1
|
||||
|
||||
ext_char = page_data.get('t', 'j')
|
||||
extension = self.EXTENSION_MAP.get(ext_char, 'jpg')
|
||||
|
||||
relative_path = f"/galleries/{media_id}/{page_num}.{extension}"
|
||||
|
||||
local_filename = f"{page_num:03d}.{extension}"
|
||||
filepath = os.path.join(gallery_path, local_filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip (Exists): {local_filename}")
|
||||
skip_count += 1
|
||||
continue
|
||||
|
||||
download_successful = False
|
||||
for server in self.IMAGE_SERVERS:
|
||||
if self.is_cancelled:
|
||||
break
|
||||
|
||||
full_url = f"{server}{relative_path}"
|
||||
try:
|
||||
self.progress_signal.emit(f" Downloading page {page_num}/{len(pages_info)} from {server} ...")
|
||||
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/125.0.0.0 Safari/537.36',
|
||||
'Referer': f'https://nhentai.net/g/{gallery_id}/'
|
||||
}
|
||||
|
||||
response = scraper.get(full_url, headers=headers, timeout=60, stream=True)
|
||||
|
||||
if response.status_code == 200:
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
f.write(chunk)
|
||||
download_count += 1
|
||||
download_successful = True
|
||||
break
|
||||
else:
|
||||
self.progress_signal.emit(f" -> {server} returned status {response.status_code}. Trying next server...")
|
||||
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" -> {server} failed to connect or timed out: {e}. Trying next server...")
|
||||
|
||||
if not download_successful:
|
||||
self.progress_signal.emit(f" ❌ Failed to download {local_filename} from all servers.")
|
||||
skip_count += 1
|
||||
|
||||
time.sleep(0.5)
|
||||
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
101
src/ui/classes/pixeldrain_downloader_thread.py
Normal file
@@ -0,0 +1,101 @@
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
import cloudscraper
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.pixeldrain_client import fetch_pixeldrain_data
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class PixeldrainDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling pixeldrain.com downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool) # dl_count, skip_count, cancelled
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.pixeldrain_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
def run(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Pixeldrain.com Download for: {self.pixeldrain_url}")
|
||||
|
||||
album_title_raw, files_to_download = fetch_pixeldrain_data(self.pixeldrain_url, self.progress_signal.emit)
|
||||
|
||||
if not files_to_download:
|
||||
self.progress_signal.emit("❌ Failed to extract file information from Pixeldrain. Aborting.")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled)
|
||||
return
|
||||
|
||||
album_folder_name = clean_folder_name(album_title_raw)
|
||||
album_path = os.path.join(self.output_dir, album_folder_name)
|
||||
try:
|
||||
os.makedirs(album_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{album_path}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
self.finished_signal.emit(0, len(files_to_download), self.is_cancelled)
|
||||
return
|
||||
|
||||
total_files = len(files_to_download)
|
||||
session = cloudscraper.create_scraper()
|
||||
|
||||
for i, file_data in enumerate(files_to_download):
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(" Download cancelled by user.")
|
||||
skip_count = total_files - download_count
|
||||
break
|
||||
|
||||
filename = file_data.get('filename')
|
||||
file_url = file_data.get('url')
|
||||
filepath = os.path.join(album_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({i+1}/{total_files}): '{filename}' already exists.")
|
||||
skip_count += 1
|
||||
continue
|
||||
|
||||
self.progress_signal.emit(f" Downloading ({i+1}/{total_files}): '{filename}'...")
|
||||
|
||||
try:
|
||||
response = session.get(file_url, stream=True, timeout=90)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_update_time = time.time()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
if total_size > 0 and (current_time - last_update_time) > 0.5:
|
||||
self.file_progress_signal.emit(filename, (downloaded_size, total_size))
|
||||
last_update_time = current_time
|
||||
|
||||
if self.is_cancelled:
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
continue
|
||||
|
||||
download_count += 1
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}'. Error: {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Pixeldrain thread.")
|
||||
87
src/ui/classes/rule34video_downloader_thread.py
Normal file
@@ -0,0 +1,87 @@
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
import cloudscraper
|
||||
|
||||
from ...core.rule34video_client import fetch_rule34video_data
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
class Rule34VideoDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling rule34video.com downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool) # dl_count, skip_count, cancelled
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.video_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
def run(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
|
||||
video_title, final_video_url = fetch_rule34video_data(self.video_url, self.progress_signal.emit)
|
||||
|
||||
if not final_video_url:
|
||||
self.progress_signal.emit("❌ Failed to get video data. Aborting.")
|
||||
self.finished_signal.emit(0, 1, self.is_cancelled)
|
||||
return
|
||||
|
||||
# Create a safe filename from the title, defaulting if needed
|
||||
safe_title = clean_folder_name(video_title if video_title else "rule34video_file")
|
||||
filename = f"{safe_title}.mp4"
|
||||
filepath = os.path.join(self.output_dir, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip: '{filename}' already exists.")
|
||||
self.finished_signal.emit(0, 1, self.is_cancelled)
|
||||
return
|
||||
|
||||
self.progress_signal.emit(f" Downloading: '{filename}'...")
|
||||
try:
|
||||
scraper = cloudscraper.create_scraper()
|
||||
# The CDN link might not require special headers, but a referer is good practice
|
||||
headers = {'Referer': 'https://rule34video.com/'}
|
||||
response = scraper.get(final_video_url, stream=True, headers=headers, timeout=90)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_update_time = time.time()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
# Use a larger chunk size for video files
|
||||
for chunk in response.iter_content(chunk_size=8192 * 4):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
if total_size > 0 and (current_time - last_update_time) > 0.5:
|
||||
self.file_progress_signal.emit(filename, (downloaded_size, total_size))
|
||||
last_update_time = current_time
|
||||
|
||||
if self.is_cancelled:
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
skip_count = 1
|
||||
self.progress_signal.emit(f" Download cancelled for '{filename}'.")
|
||||
else:
|
||||
download_count = 1
|
||||
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}': {e}")
|
||||
if os.path.exists(filepath):
|
||||
os.remove(filepath)
|
||||
skip_count = 1
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Rule34Video thread.")
|
||||
105
src/ui/classes/saint2_downloader_thread.py
Normal file
@@ -0,0 +1,105 @@
|
||||
import os
|
||||
import time
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.saint2_client import fetch_saint2_data
|
||||
|
||||
class Saint2DownloadThread(QThread):
|
||||
"""A dedicated QThread for handling saint2.su downloads."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool) # dl_count, skip_count, cancelled
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.saint2_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
|
||||
def run(self):
|
||||
download_count = 0
|
||||
skip_count = 0
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting Saint2.su Download for: {self.saint2_url}")
|
||||
|
||||
album_name, files_to_download = fetch_saint2_data(self.saint2_url, self.progress_signal.emit)
|
||||
|
||||
if not files_to_download:
|
||||
self.progress_signal.emit("❌ Failed to extract file information from Saint2. Aborting.")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled)
|
||||
return
|
||||
|
||||
album_path = os.path.join(self.output_dir, album_name)
|
||||
try:
|
||||
os.makedirs(album_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{album_path}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
self.finished_signal.emit(0, len(files_to_download), self.is_cancelled)
|
||||
return
|
||||
|
||||
total_files = len(files_to_download)
|
||||
session = requests.Session()
|
||||
|
||||
for i, file_data in enumerate(files_to_download):
|
||||
if self.is_cancelled:
|
||||
self.progress_signal.emit(" Download cancelled by user.")
|
||||
skip_count = total_files - download_count
|
||||
break
|
||||
|
||||
filename = file_data.get('filename', f'untitled_{i+1}.mp4')
|
||||
file_url = file_data.get('url')
|
||||
headers = file_data.get('headers')
|
||||
filepath = os.path.join(album_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({i+1}/{total_files}): '{filename}' already exists.")
|
||||
skip_count += 1
|
||||
continue
|
||||
|
||||
self.progress_signal.emit(f" Downloading ({i+1}/{total_files}): '{filename}'...")
|
||||
|
||||
try:
|
||||
response = session.get(file_url, stream=True, headers=headers, timeout=60)
|
||||
response.raise_for_status()
|
||||
|
||||
total_size = int(response.headers.get('content-length', 0))
|
||||
downloaded_size = 0
|
||||
last_update_time = time.time()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled:
|
||||
break
|
||||
if chunk:
|
||||
f.write(chunk)
|
||||
downloaded_size += len(chunk)
|
||||
current_time = time.time()
|
||||
if total_size > 0 and (current_time - last_update_time) > 0.5:
|
||||
self.file_progress_signal.emit(filename, (downloaded_size, total_size))
|
||||
last_update_time = current_time
|
||||
|
||||
if self.is_cancelled:
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
continue
|
||||
|
||||
if total_size > 0:
|
||||
self.file_progress_signal.emit(filename, (total_size, total_size))
|
||||
|
||||
download_count += 1
|
||||
except requests.exceptions.RequestException as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}'. Error: {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ An unexpected error occurred with '{filename}': {e}")
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
skip_count += 1
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(download_count, skip_count, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Saint2 thread.")
|
||||
347
src/ui/classes/simp_city_downloader_thread.py
Normal file
@@ -0,0 +1,347 @@
|
||||
import os
|
||||
import queue
|
||||
import re
|
||||
import threading
|
||||
import time
|
||||
from collections import Counter
|
||||
from concurrent.futures import ThreadPoolExecutor
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import cloudscraper
|
||||
import requests
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.bunkr_client import fetch_bunkr_data
|
||||
from ...core.pixeldrain_client import fetch_pixeldrain_data
|
||||
from ...core.saint2_client import fetch_saint2_data
|
||||
from ...core.simpcity_client import fetch_single_simpcity_page
|
||||
from ...services.drive_downloader import (
|
||||
download_mega_file as drive_download_mega_file,
|
||||
download_gofile_folder
|
||||
)
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class SimpCityDownloadThread(QThread):
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool, list)
|
||||
overall_progress_signal = pyqtSignal(int, int)
|
||||
|
||||
def __init__(self, url, post_id, output_dir, cookies, parent=None):
|
||||
super().__init__(parent)
|
||||
self.start_url = url
|
||||
self.post_id = post_id
|
||||
self.output_dir = output_dir
|
||||
self.cookies = cookies
|
||||
self.is_cancelled = False
|
||||
self.parent_app = parent
|
||||
self.image_queue = queue.Queue()
|
||||
self.service_queue = queue.Queue()
|
||||
self.counter_lock = threading.Lock()
|
||||
self.total_dl_count = 0
|
||||
self.total_skip_count = 0
|
||||
self.total_jobs_found = 0
|
||||
self.total_jobs_processed = 0
|
||||
self.processed_job_urls = set()
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
|
||||
class _ServiceLoggerAdapter:
|
||||
"""Wraps the progress signal to provide .info(), .error(), .warning() methods for other clients."""
|
||||
def __init__(self, signal_emitter, prefix=""):
|
||||
self.emit = signal_emitter
|
||||
self.prefix = prefix
|
||||
|
||||
def __call__(self, msg, *args, **kwargs):
|
||||
# Make the logger callable, defaulting to the info method.
|
||||
self.info(msg, *args, **kwargs)
|
||||
|
||||
def info(self, msg, *args, **kwargs): self.emit(f"{self.prefix}{str(msg) % args}")
|
||||
def error(self, msg, *args, **kwargs): self.emit(f"{self.prefix}❌ ERROR: {str(msg) % args}")
|
||||
def warning(self, msg, *args, **kwargs): self.emit(f"{self.prefix}⚠️ WARNING: {str(msg) % args}")
|
||||
|
||||
def _log_interceptor(self, message):
|
||||
"""Filters out verbose log messages from the simpcity_client."""
|
||||
if "[SimpCity] Scraper found" in message or "[SimpCity] Scraping page" in message:
|
||||
pass
|
||||
else:
|
||||
self.progress_signal.emit(message)
|
||||
|
||||
def _get_enriched_jobs(self, jobs_to_check):
|
||||
"""Performs a pre-flight check on jobs to get an accurate total file count and summary."""
|
||||
if not jobs_to_check:
|
||||
return []
|
||||
|
||||
enriched_jobs = []
|
||||
|
||||
bunkr_logger = self._ServiceLoggerAdapter(self.progress_signal.emit, prefix=" ")
|
||||
pixeldrain_logger = self._ServiceLoggerAdapter(self.progress_signal.emit, prefix=" ")
|
||||
saint2_logger = self._ServiceLoggerAdapter(self.progress_signal.emit, prefix=" ")
|
||||
|
||||
for job in jobs_to_check:
|
||||
job_type = job.get('type')
|
||||
job_url = job.get('url')
|
||||
|
||||
if job_type in ['image', 'saint2_direct']:
|
||||
enriched_jobs.append(job)
|
||||
elif (job_type == 'bunkr' and self.should_dl_bunkr) or \
|
||||
(job_type == 'pixeldrain' and self.should_dl_pixeldrain) or \
|
||||
(job_type == 'saint2' and self.should_dl_saint2):
|
||||
self.progress_signal.emit(f" -> Checking {job_type} album for file count...")
|
||||
|
||||
fetch_map = {
|
||||
'bunkr': (fetch_bunkr_data, bunkr_logger),
|
||||
'pixeldrain': (fetch_pixeldrain_data, pixeldrain_logger),
|
||||
'saint2': (fetch_saint2_data, saint2_logger)
|
||||
}
|
||||
fetch_func, logger_adapter = fetch_map[job_type]
|
||||
album_name, files = fetch_func(job_url, logger_adapter)
|
||||
|
||||
if files:
|
||||
job['prefetched_files'] = files
|
||||
job['prefetched_album_name'] = album_name
|
||||
enriched_jobs.append(job)
|
||||
|
||||
if enriched_jobs:
|
||||
summary_counts = Counter()
|
||||
current_page_file_count = 0
|
||||
for job in enriched_jobs:
|
||||
if job.get('prefetched_files'):
|
||||
file_count = len(job['prefetched_files'])
|
||||
summary_counts[job['type']] += file_count
|
||||
current_page_file_count += file_count
|
||||
else:
|
||||
summary_counts[job['type']] += 1
|
||||
current_page_file_count += 1
|
||||
|
||||
summary_parts = [f"{job_type} ({count})" for job_type, count in summary_counts.items()]
|
||||
self.progress_signal.emit(f" [SimpCity] Content Found: {' | '.join(summary_parts)}")
|
||||
|
||||
with self.counter_lock: self.total_jobs_found += current_page_file_count
|
||||
self.overall_progress_signal.emit(self.total_jobs_found, self.total_jobs_processed)
|
||||
|
||||
return enriched_jobs
|
||||
|
||||
def _download_single_image(self, job, album_path, session):
|
||||
"""Downloads one image file; this is run by the image thread pool."""
|
||||
filename = job['filename']
|
||||
filepath = os.path.join(album_path, filename)
|
||||
try:
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip (Image): '{filename}'")
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
return
|
||||
self.progress_signal.emit(f" -> Downloading (Image): '{filename}'...")
|
||||
response = session.get(job['url'], stream=True, timeout=90, headers={'Referer': self.start_url})
|
||||
response.raise_for_status()
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled: break
|
||||
f.write(chunk)
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_dl_count += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" -> ❌ Image download failed for '{filename}': {e}")
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
finally:
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_jobs_processed += 1
|
||||
self.overall_progress_signal.emit(self.total_jobs_found, self.total_jobs_processed)
|
||||
|
||||
def _image_worker(self, album_path):
|
||||
"""Target function for the image thread pool that pulls jobs from the queue."""
|
||||
session = cloudscraper.create_scraper()
|
||||
while True:
|
||||
if self.is_cancelled: break
|
||||
try:
|
||||
job = self.image_queue.get(timeout=1)
|
||||
if job is None: break
|
||||
self._download_single_image(job, album_path, session)
|
||||
self.image_queue.task_done()
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
def _service_worker(self, album_path):
|
||||
"""Target function for the single service thread, ensuring sequential downloads."""
|
||||
while True:
|
||||
if self.is_cancelled: break
|
||||
try:
|
||||
job = self.service_queue.get(timeout=1)
|
||||
if job is None: break
|
||||
|
||||
job_type = job['type']
|
||||
job_url = job['url']
|
||||
|
||||
if job_type in ['pixeldrain', 'saint2', 'bunkr']:
|
||||
if (job_type == 'pixeldrain' and self.should_dl_pixeldrain) or \
|
||||
(job_type == 'saint2' and self.should_dl_saint2) or \
|
||||
(job_type == 'bunkr' and self.should_dl_bunkr):
|
||||
self.progress_signal.emit(f"\n--- Processing Service ({job_type.capitalize()}): {job_url} ---")
|
||||
self._download_album(job.get('prefetched_files', []), job_url, album_path)
|
||||
elif job_type == 'mega' and self.should_dl_mega:
|
||||
self.progress_signal.emit(f"\n--- Processing Service (Mega): {job_url} ---")
|
||||
drive_download_mega_file(job_url, album_path, self.progress_signal.emit, self.file_progress_signal.emit)
|
||||
elif job_type == 'gofile' and self.should_dl_gofile:
|
||||
self.progress_signal.emit(f"\n--- Processing Service (Gofile): {job_url} ---")
|
||||
download_gofile_folder(job_url, album_path, self.progress_signal.emit, self.file_progress_signal.emit)
|
||||
elif job_type == 'saint2_direct' and self.should_dl_saint2:
|
||||
self.progress_signal.emit(f"\n--- Processing Service (Saint2 Direct): {job_url} ---")
|
||||
try:
|
||||
filename = os.path.basename(urlparse(job_url).path)
|
||||
filepath = os.path.join(album_path, filename)
|
||||
if os.path.exists(filepath):
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
else:
|
||||
response = cloudscraper.create_scraper().get(job_url, stream=True, timeout=120, headers={'Referer': self.start_url})
|
||||
response.raise_for_status()
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled: break
|
||||
f.write(chunk)
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_dl_count += 1
|
||||
except Exception as e:
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
finally:
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_jobs_processed += 1
|
||||
self.overall_progress_signal.emit(self.total_jobs_found, self.total_jobs_processed)
|
||||
|
||||
self.service_queue.task_done()
|
||||
except queue.Empty:
|
||||
continue
|
||||
|
||||
def _download_album(self, files_to_process, source_url, album_path):
|
||||
"""Helper to download all files from a pre-fetched album list."""
|
||||
if not files_to_process: return
|
||||
session = cloudscraper.create_scraper()
|
||||
for file_data in files_to_process:
|
||||
if self.is_cancelled: return
|
||||
filename = file_data.get('filename') or file_data.get('name')
|
||||
filepath = os.path.join(album_path, filename)
|
||||
try:
|
||||
if os.path.exists(filepath):
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
else:
|
||||
self.progress_signal.emit(f" -> Downloading: '{filename}'...")
|
||||
headers = file_data.get('headers', {'Referer': source_url})
|
||||
response = session.get(file_data.get('url'), stream=True, timeout=90, headers=headers)
|
||||
response.raise_for_status()
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self.is_cancelled: break
|
||||
f.write(chunk)
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_dl_count += 1
|
||||
except Exception as e:
|
||||
with self.counter_lock: self.total_skip_count += 1
|
||||
finally:
|
||||
if not self.is_cancelled:
|
||||
with self.counter_lock: self.total_jobs_processed += 1
|
||||
self.overall_progress_signal.emit(self.total_jobs_found, self.total_jobs_processed)
|
||||
|
||||
def run(self):
|
||||
"""Main entry point for the thread, orchestrates the entire download."""
|
||||
self.progress_signal.emit("=" * 40)
|
||||
self.progress_signal.emit(f"🚀 Starting SimpCity Download for: {self.start_url}")
|
||||
|
||||
self.should_dl_pixeldrain = self.parent_app.simpcity_dl_pixeldrain_cb.isChecked()
|
||||
self.should_dl_saint2 = self.parent_app.simpcity_dl_saint2_cb.isChecked()
|
||||
self.should_dl_mega = self.parent_app.simpcity_dl_mega_cb.isChecked()
|
||||
self.should_dl_bunkr = self.parent_app.simpcity_dl_bunkr_cb.isChecked()
|
||||
self.should_dl_gofile = self.parent_app.simpcity_dl_gofile_cb.isChecked()
|
||||
|
||||
is_single_post_mode = self.post_id or '/post-' in self.start_url
|
||||
album_path = ""
|
||||
|
||||
try:
|
||||
if is_single_post_mode:
|
||||
self.progress_signal.emit(" Mode: Single Post detected.")
|
||||
album_title, _, _ = fetch_single_simpcity_page(self.start_url, self._log_interceptor, cookies=self.cookies, post_id=self.post_id)
|
||||
album_path = os.path.join(self.output_dir, clean_folder_name(album_title or "simpcity_post"))
|
||||
else:
|
||||
self.progress_signal.emit(" Mode: Full Thread detected.")
|
||||
first_page_url = re.sub(r'(/page-\d+)|(/post-\d+)', '', self.start_url).split('#')[0].strip('/')
|
||||
album_title, _, _ = fetch_single_simpcity_page(first_page_url, self._log_interceptor, cookies=self.cookies)
|
||||
album_path = os.path.join(self.output_dir, clean_folder_name(album_title or "simpcity_album"))
|
||||
os.makedirs(album_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving all content to folder: '{os.path.basename(album_path)}'")
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f"❌ Could not process the initial page. Aborting. Error: {e}")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled, []); return
|
||||
|
||||
service_thread = threading.Thread(target=self._service_worker, args=(album_path,), daemon=True)
|
||||
service_thread.start()
|
||||
num_image_threads = 15
|
||||
image_executor = ThreadPoolExecutor(max_workers=num_image_threads, thread_name_prefix='SimpCityImage')
|
||||
for _ in range(num_image_threads): image_executor.submit(self._image_worker, album_path)
|
||||
|
||||
try:
|
||||
if is_single_post_mode:
|
||||
_, jobs, _ = fetch_single_simpcity_page(self.start_url, self._log_interceptor, cookies=self.cookies, post_id=self.post_id)
|
||||
enriched_jobs = self._get_enriched_jobs(jobs)
|
||||
if enriched_jobs:
|
||||
for job in enriched_jobs:
|
||||
if job['type'] == 'image': self.image_queue.put(job)
|
||||
else: self.service_queue.put(job)
|
||||
else:
|
||||
base_url = re.sub(r'(/page-\d+)|(/post-\d+)', '', self.start_url).split('#')[0].strip('/')
|
||||
page_counter = 1; end_of_thread = False; MAX_RETRIES = 3
|
||||
while not end_of_thread:
|
||||
if self.is_cancelled: break
|
||||
page_url = f"{base_url}/page-{page_counter}"; retries = 0; page_fetch_successful = False
|
||||
while retries < MAX_RETRIES:
|
||||
if self.is_cancelled: end_of_thread = True; break
|
||||
self.progress_signal.emit(f"\n--- Analyzing page {page_counter} (Attempt {retries + 1}/{MAX_RETRIES}) ---")
|
||||
try:
|
||||
page_title, jobs_on_page, final_url = fetch_single_simpcity_page(page_url, self._log_interceptor, cookies=self.cookies)
|
||||
|
||||
if final_url != page_url:
|
||||
self.progress_signal.emit(f" -> Redirect detected from {page_url} to {final_url}")
|
||||
try:
|
||||
req_page_match = re.search(r'/page-(\d+)', page_url)
|
||||
final_page_match = re.search(r'/page-(\d+)', final_url)
|
||||
if req_page_match and final_page_match and int(final_page_match.group(1)) < int(req_page_match.group(1)):
|
||||
self.progress_signal.emit(" -> Redirected to an earlier page. Reached end of thread.")
|
||||
end_of_thread = True
|
||||
except (ValueError, TypeError):
|
||||
pass
|
||||
|
||||
if end_of_thread:
|
||||
page_fetch_successful = True; break
|
||||
|
||||
if page_counter > 1 and not page_title:
|
||||
self.progress_signal.emit(f" -> Page {page_counter} is invalid or has no title. Reached end of thread.")
|
||||
end_of_thread = True
|
||||
elif not jobs_on_page:
|
||||
end_of_thread = True
|
||||
else:
|
||||
new_jobs = [job for job in jobs_on_page if job.get('url') not in self.processed_job_urls]
|
||||
if not new_jobs and page_counter > 1:
|
||||
end_of_thread = True
|
||||
else:
|
||||
enriched_jobs = self._get_enriched_jobs(new_jobs)
|
||||
for job in enriched_jobs:
|
||||
self.processed_job_urls.add(job.get('url'))
|
||||
if job['type'] == 'image': self.image_queue.put(job)
|
||||
else: self.service_queue.put(job)
|
||||
page_fetch_successful = True; break
|
||||
except requests.exceptions.HTTPError as e:
|
||||
if e.response.status_code in [403, 404]: end_of_thread = True; break
|
||||
elif e.response.status_code == 429: time.sleep(5 * (retries + 2)); retries += 1
|
||||
else: end_of_thread = True; break
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" Stopping crawl due to error on page {page_counter}: {e}"); end_of_thread = True; break
|
||||
if not page_fetch_successful and not end_of_thread: end_of_thread = True
|
||||
if not end_of_thread: page_counter += 1
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f"❌ A critical error occurred during the main fetch phase: {e}")
|
||||
|
||||
self.progress_signal.emit("\n--- All pages analyzed. Waiting for background downloads to complete... ---")
|
||||
for _ in range(num_image_threads): self.image_queue.put(None)
|
||||
self.service_queue.put(None)
|
||||
image_executor.shutdown(wait=True)
|
||||
service_thread.join()
|
||||
self.finished_signal.emit(self.total_dl_count, self.total_skip_count, self.is_cancelled, [])
|
||||
128
src/ui/classes/toonily_downloader_thread.py
Normal file
@@ -0,0 +1,128 @@
|
||||
import os
|
||||
import threading
|
||||
import time
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import cloudscraper
|
||||
from PyQt5.QtCore import QThread, pyqtSignal
|
||||
|
||||
from ...core.toonily_client import (
|
||||
fetch_chapter_data as toonily_fetch_data,
|
||||
get_chapter_list as toonily_get_list
|
||||
)
|
||||
from ...utils.file_utils import clean_folder_name
|
||||
|
||||
|
||||
class ToonilyDownloadThread(QThread):
|
||||
"""A dedicated QThread for handling toonily.com series or single chapters."""
|
||||
progress_signal = pyqtSignal(str)
|
||||
file_progress_signal = pyqtSignal(str, object)
|
||||
finished_signal = pyqtSignal(int, int, bool)
|
||||
overall_progress_signal = pyqtSignal(int, int) # Signal for chapter progress
|
||||
|
||||
def __init__(self, url, output_dir, parent=None):
|
||||
super().__init__(parent)
|
||||
self.start_url = url
|
||||
self.output_dir = output_dir
|
||||
self.is_cancelled = False
|
||||
# Get access to the pause event from the main app
|
||||
self.pause_event = parent.pause_event if hasattr(parent, 'pause_event') else threading.Event()
|
||||
|
||||
def _check_pause(self):
|
||||
# Helper function to check for pause/cancel events
|
||||
if self.is_cancelled: return True
|
||||
if self.pause_event and self.pause_event.is_set():
|
||||
self.progress_signal.emit(" Download paused...")
|
||||
while self.pause_event.is_set():
|
||||
if self.is_cancelled: return True
|
||||
time.sleep(0.5)
|
||||
self.progress_signal.emit(" Download resumed.")
|
||||
return self.is_cancelled
|
||||
|
||||
def run(self):
|
||||
grand_total_dl = 0
|
||||
grand_total_skip = 0
|
||||
|
||||
# Check if the URL is a series or a chapter
|
||||
if '/chapter-' in self.start_url:
|
||||
# It's a single chapter URL
|
||||
chapters_to_download = [self.start_url]
|
||||
self.progress_signal.emit("ℹ️ Single Toonily chapter URL detected.")
|
||||
else:
|
||||
# It's a series URL, so get all chapters
|
||||
chapters_to_download = toonily_get_list(self.start_url, self.progress_signal.emit)
|
||||
|
||||
if not chapters_to_download:
|
||||
self.progress_signal.emit("❌ No chapters found to download.")
|
||||
self.finished_signal.emit(0, 0, self.is_cancelled)
|
||||
return
|
||||
|
||||
self.progress_signal.emit(f"--- Starting download of {len(chapters_to_download)} chapter(s) ---")
|
||||
self.overall_progress_signal.emit(len(chapters_to_download), 0)
|
||||
|
||||
scraper = cloudscraper.create_scraper()
|
||||
|
||||
for chapter_idx, chapter_url in enumerate(chapters_to_download):
|
||||
if self._check_pause(): break
|
||||
|
||||
self.progress_signal.emit(f"\n-- Processing Chapter {chapter_idx + 1}/{len(chapters_to_download)} --")
|
||||
series_title, chapter_title, image_urls = toonily_fetch_data(chapter_url, self.progress_signal.emit, scraper)
|
||||
|
||||
if not image_urls:
|
||||
self.progress_signal.emit(f"❌ Failed to get data for chapter. Skipping.")
|
||||
continue
|
||||
|
||||
# Create folders like: /Downloads/Series Name/Chapter 01/
|
||||
series_folder_name = clean_folder_name(series_title)
|
||||
# Make a safe folder name from the full chapter title
|
||||
chapter_folder_name = clean_folder_name(chapter_title)
|
||||
final_save_path = os.path.join(self.output_dir, series_folder_name, chapter_folder_name)
|
||||
|
||||
try:
|
||||
os.makedirs(final_save_path, exist_ok=True)
|
||||
self.progress_signal.emit(f" Saving to folder: '{os.path.join(series_folder_name, chapter_folder_name)}'")
|
||||
except OSError as e:
|
||||
self.progress_signal.emit(f"❌ Critical error creating directory: {e}")
|
||||
grand_total_skip += len(image_urls)
|
||||
continue
|
||||
|
||||
for i, img_url in enumerate(image_urls):
|
||||
if self._check_pause(): break
|
||||
|
||||
try:
|
||||
file_extension = os.path.splitext(urlparse(img_url).path)[1] or '.jpg'
|
||||
filename = f"{i+1:03d}{file_extension}"
|
||||
filepath = os.path.join(final_save_path, filename)
|
||||
|
||||
if os.path.exists(filepath):
|
||||
self.progress_signal.emit(f" -> Skip ({i+1}/{len(image_urls)}): '{filename}' already exists.")
|
||||
grand_total_skip += 1
|
||||
else:
|
||||
self.progress_signal.emit(f" Downloading ({i+1}/{len(image_urls)}): '{filename}'...")
|
||||
response = scraper.get(img_url, stream=True, timeout=60, headers={'Referer': chapter_url})
|
||||
response.raise_for_status()
|
||||
|
||||
with open(filepath, 'wb') as f:
|
||||
for chunk in response.iter_content(chunk_size=8192):
|
||||
if self._check_pause(): break
|
||||
f.write(chunk)
|
||||
|
||||
if self._check_pause():
|
||||
if os.path.exists(filepath): os.remove(filepath)
|
||||
break
|
||||
|
||||
grand_total_dl += 1
|
||||
time.sleep(0.2)
|
||||
except Exception as e:
|
||||
self.progress_signal.emit(f" ❌ Failed to download '{filename}': {e}")
|
||||
grand_total_skip += 1
|
||||
|
||||
self.overall_progress_signal.emit(len(chapters_to_download), chapter_idx + 1)
|
||||
time.sleep(1) # Wait a second between chapters
|
||||
|
||||
self.file_progress_signal.emit("", None)
|
||||
self.finished_signal.emit(grand_total_dl, grand_total_skip, self.is_cancelled)
|
||||
|
||||
def cancel(self):
|
||||
self.is_cancelled = True
|
||||
self.progress_signal.emit(" Cancellation signal received by Toonily thread.")
|
||||
157
src/ui/dialogs/ConfirmAddAllDialog.py
Normal file
@@ -0,0 +1,157 @@
|
||||
from PyQt5.QtCore import Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QListWidget, QListWidgetItem,
|
||||
QPushButton, QVBoxLayout
|
||||
)
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
CONFIRM_ADD_ALL_ACCEPTED = 1
|
||||
CONFIRM_ADD_ALL_SKIP_ADDING = 2
|
||||
CONFIRM_ADD_ALL_CANCEL_DOWNLOAD = 3
|
||||
|
||||
|
||||
class ConfirmAddAllDialog(QDialog):
|
||||
"""
|
||||
A dialog to confirm adding multiple new character/series names to Known.txt.
|
||||
It appears when the user provides filter names that are not already known,
|
||||
allowing them to persist these names for future use.
|
||||
"""
|
||||
|
||||
def __init__(self, new_filter_objects_list, parent_app, parent=None):
|
||||
"""
|
||||
Initializes the dialog.
|
||||
|
||||
Args:
|
||||
new_filter_objects_list (list): A list of filter objects (dicts) to propose adding.
|
||||
parent_app (DownloaderApp): A reference to the main application window for theming and translations.
|
||||
parent (QWidget, optional): The parent widget. Defaults to None.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app
|
||||
self.setModal(True)
|
||||
self.new_filter_objects_list = new_filter_objects_list
|
||||
self.user_choice = CONFIRM_ADD_ALL_CANCEL_DOWNLOAD
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
screen_height = QApplication.primaryScreen().availableGeometry().height() if QApplication.primaryScreen() else 768
|
||||
scale_factor = screen_height / 768.0
|
||||
base_min_w, base_min_h = 480, 350
|
||||
scaled_min_w = int(base_min_w * scale_factor)
|
||||
scaled_min_h = int(base_min_h * scale_factor)
|
||||
self.setMinimumSize(scaled_min_w, scaled_min_h)
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
self.info_label = QLabel()
|
||||
self.info_label.setWordWrap(True)
|
||||
main_layout.addWidget(self.info_label)
|
||||
|
||||
self.names_list_widget = QListWidget()
|
||||
self._populate_list()
|
||||
main_layout.addWidget(self.names_list_widget)
|
||||
selection_buttons_layout = QHBoxLayout()
|
||||
self.select_all_button = QPushButton()
|
||||
self.select_all_button.clicked.connect(self._select_all_items)
|
||||
selection_buttons_layout.addWidget(self.select_all_button)
|
||||
|
||||
self.deselect_all_button = QPushButton()
|
||||
self.deselect_all_button.clicked.connect(self._deselect_all_items)
|
||||
selection_buttons_layout.addWidget(self.deselect_all_button)
|
||||
selection_buttons_layout.addStretch()
|
||||
main_layout.addLayout(selection_buttons_layout)
|
||||
buttons_layout = QHBoxLayout()
|
||||
self.add_selected_button = QPushButton()
|
||||
self.add_selected_button.clicked.connect(self._accept_add_selected)
|
||||
self.add_selected_button.setDefault(True)
|
||||
buttons_layout.addWidget(self.add_selected_button)
|
||||
|
||||
self.skip_adding_button = QPushButton()
|
||||
self.skip_adding_button.clicked.connect(self._reject_skip_adding)
|
||||
buttons_layout.addWidget(self.skip_adding_button)
|
||||
buttons_layout.addStretch()
|
||||
|
||||
self.cancel_download_button = QPushButton()
|
||||
self.cancel_download_button.clicked.connect(self._reject_cancel_download)
|
||||
buttons_layout.addWidget(self.cancel_download_button)
|
||||
|
||||
main_layout.addLayout(buttons_layout)
|
||||
|
||||
def _populate_list(self):
|
||||
"""Populates the list widget with the new names to be confirmed."""
|
||||
for filter_obj in self.new_filter_objects_list:
|
||||
item_text = filter_obj["name"]
|
||||
list_item = QListWidgetItem(item_text)
|
||||
list_item.setFlags(list_item.flags() | Qt.ItemIsUserCheckable)
|
||||
list_item.setCheckState(Qt.Checked)
|
||||
list_item.setData(Qt.UserRole, filter_obj)
|
||||
self.names_list_widget.addItem(list_item)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on the main application's current language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for all translatable UI elements."""
|
||||
self.setWindowTitle(self._tr("confirm_add_all_dialog_title", "Confirm Adding New Names"))
|
||||
self.info_label.setText(self._tr("confirm_add_all_info_label", "The following new names/groups..."))
|
||||
self.select_all_button.setText(self._tr("confirm_add_all_select_all_button", "Select All"))
|
||||
self.deselect_all_button.setText(self._tr("confirm_add_all_deselect_all_button", "Deselect All"))
|
||||
self.add_selected_button.setText(self._tr("confirm_add_all_add_selected_button", "Add Selected to Known.txt"))
|
||||
self.skip_adding_button.setText(self._tr("confirm_add_all_skip_adding_button", "Skip Adding These"))
|
||||
self.cancel_download_button.setText(self._tr("confirm_add_all_cancel_download_button", "Cancel Download"))
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and hasattr(self.parent_app, 'get_dark_theme') and self.parent_app.current_theme == "dark":
|
||||
self.setStyleSheet(self.parent_app.get_dark_theme())
|
||||
|
||||
def _select_all_items(self):
|
||||
"""Checks all items in the list."""
|
||||
for i in range(self.names_list_widget.count()):
|
||||
self.names_list_widget.item(i).setCheckState(Qt.Checked)
|
||||
|
||||
def _deselect_all_items(self):
|
||||
"""Unchecks all items in the list."""
|
||||
for i in range(self.names_list_widget.count()):
|
||||
self.names_list_widget.item(i).setCheckState(Qt.Unchecked)
|
||||
|
||||
def _accept_add_selected(self):
|
||||
"""Sets the user choice to the list of selected items and accepts the dialog."""
|
||||
selected_objects = []
|
||||
for i in range(self.names_list_widget.count()):
|
||||
item = self.names_list_widget.item(i)
|
||||
if item.checkState() == Qt.Checked:
|
||||
filter_obj = item.data(Qt.UserRole)
|
||||
if filter_obj:
|
||||
selected_objects.append(filter_obj)
|
||||
|
||||
self.user_choice = selected_objects
|
||||
self.accept()
|
||||
|
||||
def _reject_skip_adding(self):
|
||||
"""Sets the user choice to skip adding and rejects the dialog."""
|
||||
self.user_choice = CONFIRM_ADD_ALL_SKIP_ADDING
|
||||
self.reject()
|
||||
|
||||
def _reject_cancel_download(self):
|
||||
"""Sets the user choice to cancel the entire download and rejects the dialog."""
|
||||
self.user_choice = CONFIRM_ADD_ALL_CANCEL_DOWNLOAD
|
||||
self.reject()
|
||||
|
||||
def exec_(self):
|
||||
"""
|
||||
Overrides the default exec_ to handle the return value logic, ensuring a
|
||||
sensible default if no items are selected but the "Add" button is clicked.
|
||||
"""
|
||||
super().exec_()
|
||||
if isinstance(self.user_choice, list) and not self.user_choice:
|
||||
return CONFIRM_ADD_ALL_SKIP_ADDING
|
||||
return self.user_choice
|
||||
132
src/ui/dialogs/CookieHelpDialog.py
Normal file
@@ -0,0 +1,132 @@
|
||||
# --- PyQt5 Imports ---
|
||||
from PyQt5.QtCore import Qt
|
||||
from PyQt5.QtGui import QIcon
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QPushButton, QVBoxLayout
|
||||
)
|
||||
|
||||
# --- Local Application Imports ---
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class CookieHelpDialog(QDialog):
|
||||
"""
|
||||
A dialog to explain how to get a cookies.txt file.
|
||||
It can be displayed as a simple informational popup or as a modal choice
|
||||
when cookies are required but not found.
|
||||
"""
|
||||
CHOICE_PROCEED_WITHOUT_COOKIES = 1
|
||||
CHOICE_CANCEL_DOWNLOAD = 2
|
||||
CHOICE_OK_INFO_ONLY = 3
|
||||
|
||||
def __init__(self, parent_app, parent=None, offer_download_without_option=False):
|
||||
"""
|
||||
Initializes the dialog.
|
||||
|
||||
Args:
|
||||
parent_app (DownloaderApp): A reference to the main application window.
|
||||
parent (QWidget, optional): The parent widget. Defaults to None.
|
||||
offer_download_without_option (bool): If True, shows buttons to
|
||||
"Download without Cookies" and "Cancel Download". If False,
|
||||
shows only an "OK" button for informational purposes.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app
|
||||
self.setModal(True)
|
||||
self.offer_download_without_option = offer_download_without_option
|
||||
self.user_choice = None
|
||||
|
||||
# --- Basic Window Setup ---
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
self.setMinimumWidth(500)
|
||||
|
||||
# --- Initialize UI and Apply Theming ---
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
self.info_label = QLabel()
|
||||
self.info_label.setTextFormat(Qt.RichText)
|
||||
self.info_label.setOpenExternalLinks(True)
|
||||
self.info_label.setWordWrap(True)
|
||||
main_layout.addWidget(self.info_label)
|
||||
|
||||
button_layout = QHBoxLayout()
|
||||
button_layout.addStretch(1)
|
||||
|
||||
if self.offer_download_without_option:
|
||||
self.download_without_button = QPushButton()
|
||||
self.download_without_button.clicked.connect(self._proceed_without_cookies)
|
||||
button_layout.addWidget(self.download_without_button)
|
||||
|
||||
self.cancel_button = QPushButton()
|
||||
self.cancel_button.clicked.connect(self._cancel_download)
|
||||
button_layout.addWidget(self.cancel_button)
|
||||
else:
|
||||
self.ok_button = QPushButton()
|
||||
self.ok_button.clicked.connect(self._ok_info_only)
|
||||
button_layout.addWidget(self.ok_button)
|
||||
|
||||
main_layout.addLayout(button_layout)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on the main application's current language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for all translatable UI elements."""
|
||||
self.setWindowTitle(self._tr("cookie_help_dialog_title", "Cookie File Instructions"))
|
||||
|
||||
instruction_html = f"""
|
||||
{self._tr("cookie_help_instruction_intro", "<p>To use cookies...</p>")}
|
||||
{self._tr("cookie_help_how_to_get_title", "<p><b>How to get cookies.txt:</b></p>")}
|
||||
<ol>
|
||||
{self._tr("cookie_help_step1_extension_intro", "<li>Install extension...</li>")}
|
||||
{self._tr("cookie_help_step2_login", "<li>Go to website...</li>")}
|
||||
{self._tr("cookie_help_step3_click_icon", "<li>Click icon...</li>")}
|
||||
{self._tr("cookie_help_step4_export", "<li>Click export...</li>")}
|
||||
{self._tr("cookie_help_step5_save_file", "<li>Save file...</li>")}
|
||||
{self._tr("cookie_help_step6_app_intro", "<li>In this application:<ul>")}
|
||||
{self._tr("cookie_help_step6a_checkbox", "<li>Ensure checkbox...</li>")}
|
||||
{self._tr("cookie_help_step6b_browse", "<li>Click browse...</li>")}
|
||||
{self._tr("cookie_help_step6c_select", "<li>Select file...</li></ul></li>")}
|
||||
</ol>
|
||||
{self._tr("cookie_help_alternative_paste", "<p>Alternatively, paste...</p>")}
|
||||
"""
|
||||
self.info_label.setText(instruction_html)
|
||||
|
||||
if self.offer_download_without_option:
|
||||
self.download_without_button.setText(self._tr("cookie_help_proceed_without_button", "Download without Cookies"))
|
||||
self.cancel_button.setText(self._tr("cookie_help_cancel_download_button", "Cancel Download"))
|
||||
else:
|
||||
self.ok_button.setText(self._tr("ok_button", "OK"))
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and hasattr(self.parent_app, 'get_dark_theme') and self.parent_app.current_theme == "dark":
|
||||
self.setStyleSheet(self.parent_app.get_dark_theme())
|
||||
|
||||
def _proceed_without_cookies(self):
|
||||
"""Handles the user choice to proceed without using cookies."""
|
||||
self.user_choice = self.CHOICE_PROCEED_WITHOUT_COOKIES
|
||||
self.accept()
|
||||
|
||||
def _cancel_download(self):
|
||||
"""Handles the user choice to cancel the download."""
|
||||
self.user_choice = self.CHOICE_CANCEL_DOWNLOAD
|
||||
self.reject()
|
||||
|
||||
def _ok_info_only(self):
|
||||
"""Handles the acknowledgment when the dialog is purely informational."""
|
||||
self.user_choice = self.CHOICE_OK_INFO_ONLY
|
||||
self.accept()
|
||||
89
src/ui/dialogs/CustomFilenameDialog.py
Normal file
@@ -0,0 +1,89 @@
|
||||
from PyQt5.QtWidgets import (
|
||||
QDialog, QVBoxLayout, QHBoxLayout, QLabel, QLineEdit, QPushButton,
|
||||
QDialogButtonBox, QTextEdit
|
||||
)
|
||||
from PyQt5.QtCore import Qt
|
||||
|
||||
class CustomFilenameDialog(QDialog):
|
||||
"""A dialog for creating a custom filename format string."""
|
||||
|
||||
# --- REPLACE THE 'AVAILABLE_KEYS' LIST WITH THIS DICTIONARY ---
|
||||
DISPLAY_KEY_MAP = {
|
||||
"PostID": "id",
|
||||
"CreatorName": "creator_name",
|
||||
"service": "service",
|
||||
"title": "title",
|
||||
"added": "added",
|
||||
"published": "published",
|
||||
"edited": "edited",
|
||||
"name": "name"
|
||||
}
|
||||
|
||||
def __init__(self, current_format, current_date_format, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("Custom Filename Format")
|
||||
self.setMinimumWidth(500)
|
||||
|
||||
self.current_format = current_format
|
||||
self.current_date_format = current_date_format
|
||||
|
||||
# --- Main Layout ---
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
# --- Description ---
|
||||
description_label = QLabel(
|
||||
"Create a filename format using placeholders. The date/time values for 'added', 'published', and 'edited' will be automatically shortened to your specified format."
|
||||
)
|
||||
description_label.setWordWrap(True)
|
||||
layout.addWidget(description_label)
|
||||
|
||||
# --- Format Input ---
|
||||
format_label = QLabel("Filename Format:")
|
||||
layout.addWidget(format_label)
|
||||
self.format_input = QLineEdit(self)
|
||||
self.format_input.setText(self.current_format)
|
||||
self.format_input.setPlaceholderText("e.g., {published} {title} {id}")
|
||||
layout.addWidget(self.format_input)
|
||||
|
||||
# --- Date Format Input ---
|
||||
date_format_label = QLabel("Date Format (for {added}, {published}, {edited}):")
|
||||
layout.addWidget(date_format_label)
|
||||
self.date_format_input = QLineEdit(self)
|
||||
self.date_format_input.setText(self.current_date_format)
|
||||
self.date_format_input.setPlaceholderText("e.g., YYYY-MM-DD or DD-MM-YYYY")
|
||||
layout.addWidget(self.date_format_input)
|
||||
|
||||
# --- Available Keys Display ---
|
||||
keys_label = QLabel("Click to add a placeholder:")
|
||||
layout.addWidget(keys_label)
|
||||
|
||||
keys_layout = QHBoxLayout()
|
||||
keys_layout.setSpacing(5)
|
||||
|
||||
for display_key, internal_key in self.DISPLAY_KEY_MAP.items():
|
||||
key_button = QPushButton(f"{{{display_key}}}")
|
||||
# Use a lambda to pass the correct internal key when the button is clicked
|
||||
key_button.clicked.connect(lambda checked, key=internal_key: self.add_key_to_input(key))
|
||||
keys_layout.addWidget(key_button)
|
||||
keys_layout.addStretch()
|
||||
|
||||
layout.addLayout(keys_layout)
|
||||
|
||||
# --- OK/Cancel Buttons ---
|
||||
button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
button_box.accepted.connect(self.accept)
|
||||
button_box.rejected.connect(self.reject)
|
||||
layout.addWidget(button_box)
|
||||
|
||||
def add_key_to_input(self, key_to_insert):
|
||||
"""Adds the corresponding internal key placeholder to the input field."""
|
||||
self.format_input.insert(f" {{{key_to_insert}}} ")
|
||||
self.format_input.setFocus()
|
||||
|
||||
def get_format_string(self):
|
||||
"""Returns the final format string from the input field."""
|
||||
return self.format_input.text().strip()
|
||||
|
||||
def get_date_format_string(self):
|
||||
"""Returns the date format string from its input field."""
|
||||
return self.date_format_input.text().strip()
|
||||
153
src/ui/dialogs/DownloadExtractedLinksDialog.py
Normal file
@@ -0,0 +1,153 @@
|
||||
from collections import defaultdict
|
||||
from PyQt5.QtCore import pyqtSignal, Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QListWidget, QListWidgetItem,
|
||||
QMessageBox, QPushButton, QVBoxLayout, QAbstractItemView
|
||||
)
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class DownloadExtractedLinksDialog(QDialog):
|
||||
"""
|
||||
A dialog to select and initiate the download for extracted, supported links
|
||||
from external cloud services like Mega, Google Drive, and Dropbox.
|
||||
"""
|
||||
download_requested = pyqtSignal(list)
|
||||
|
||||
def __init__(self, links_data, parent_app, parent=None):
|
||||
"""
|
||||
Initializes the dialog.
|
||||
|
||||
Args:
|
||||
links_data (list): A list of dictionaries, each containing info about an extracted link.
|
||||
parent_app (DownloaderApp): A reference to the main application window for theming and translations.
|
||||
parent (QWidget, optional): The parent widget. Defaults to None.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.links_data = links_data
|
||||
self.parent_app = parent_app
|
||||
app_icon = get_app_icon_object()
|
||||
if not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
scale_factor = getattr(self.parent_app, 'scale_factor', 1.0)
|
||||
base_width, base_height = 600, 450
|
||||
self.setMinimumSize(int(base_width * scale_factor), int(base_height * scale_factor))
|
||||
self.resize(int(base_width * scale_factor * 1.1), int(base_height * scale_factor * 1.1))
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
self.main_info_label = QLabel()
|
||||
self.main_info_label.setAlignment(Qt.AlignHCenter | Qt.AlignTop)
|
||||
self.main_info_label.setWordWrap(True)
|
||||
layout.addWidget(self.main_info_label)
|
||||
|
||||
self.links_list_widget = QListWidget()
|
||||
self.links_list_widget.setSelectionMode(QAbstractItemView.NoSelection)
|
||||
self._populate_list()
|
||||
layout.addWidget(self.links_list_widget)
|
||||
button_layout = QHBoxLayout()
|
||||
self.select_all_button = QPushButton()
|
||||
self.select_all_button.clicked.connect(lambda: self._set_all_items_checked(Qt.Checked))
|
||||
button_layout.addWidget(self.select_all_button)
|
||||
|
||||
self.deselect_all_button = QPushButton()
|
||||
self.deselect_all_button.clicked.connect(lambda: self._set_all_items_checked(Qt.Unchecked))
|
||||
button_layout.addWidget(self.deselect_all_button)
|
||||
button_layout.addStretch()
|
||||
|
||||
self.download_button = QPushButton()
|
||||
self.download_button.clicked.connect(self._handle_download_selected)
|
||||
self.download_button.setDefault(True)
|
||||
button_layout.addWidget(self.download_button)
|
||||
|
||||
self.cancel_button = QPushButton()
|
||||
self.cancel_button.clicked.connect(self.reject)
|
||||
button_layout.addWidget(self.cancel_button)
|
||||
layout.addLayout(button_layout)
|
||||
|
||||
def _populate_list(self):
|
||||
"""Populates the list widget with the provided links, grouped by post title."""
|
||||
grouped_links = defaultdict(list)
|
||||
for link_info_item in self.links_data:
|
||||
post_title_for_group = link_info_item.get('title', 'Untitled Post')
|
||||
grouped_links[post_title_for_group].append(link_info_item)
|
||||
|
||||
sorted_post_titles = sorted(grouped_links.keys(), key=lambda x: x.lower())
|
||||
|
||||
for post_title_key in sorted_post_titles:
|
||||
header_item = QListWidgetItem(f"{post_title_key}")
|
||||
header_item.setFlags(Qt.NoItemFlags)
|
||||
font = header_item.font()
|
||||
font.setBold(True)
|
||||
font.setPointSize(font.pointSize() + 1)
|
||||
header_item.setFont(font)
|
||||
self.links_list_widget.addItem(header_item)
|
||||
for link_info_data in grouped_links[post_title_key]:
|
||||
platform_display = link_info_data.get('platform', 'unknown').upper()
|
||||
display_text = f" [{platform_display}] {link_info_data['link_text']} ({link_info_data['url']})"
|
||||
item = QListWidgetItem(display_text)
|
||||
item.setData(Qt.UserRole, link_info_data)
|
||||
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
|
||||
item.setCheckState(Qt.Checked)
|
||||
self.links_list_widget.addItem(item)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on current app language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for all translatable UI elements."""
|
||||
self.setWindowTitle(self._tr("download_external_links_dialog_title", "Download Selected External Links"))
|
||||
self.main_info_label.setText(self._tr("download_external_links_dialog_main_label", "Found {count} supported link(s)...").format(count=len(self.links_data)))
|
||||
self.select_all_button.setText(self._tr("select_all_button_text", "Select All"))
|
||||
self.deselect_all_button.setText(self._tr("deselect_all_button_text", "Deselect All"))
|
||||
self.download_button.setText(self._tr("download_selected_button_text", "Download Selected"))
|
||||
self.cancel_button.setText(self._tr("fav_posts_cancel_button", "Cancel"))
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
is_dark_theme = self.parent_app and self.parent_app.current_theme == "dark"
|
||||
|
||||
if is_dark_theme:
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("")
|
||||
header_color = Qt.cyan if is_dark_theme else Qt.blue
|
||||
for i in range(self.links_list_widget.count()):
|
||||
item = self.links_list_widget.item(i)
|
||||
if not item.flags() & Qt.ItemIsUserCheckable:
|
||||
item.setForeground(header_color)
|
||||
|
||||
def _set_all_items_checked(self, check_state):
|
||||
"""Sets the checked state for all checkable items in the list."""
|
||||
for i in range(self.links_list_widget.count()):
|
||||
item = self.links_list_widget.item(i)
|
||||
if item.flags() & Qt.ItemIsUserCheckable:
|
||||
item.setCheckState(check_state)
|
||||
|
||||
def _handle_download_selected(self):
|
||||
"""Gathers selected links and emits the download_requested signal."""
|
||||
selected_links = []
|
||||
for i in range(self.links_list_widget.count()):
|
||||
item = self.links_list_widget.item(i)
|
||||
if item.flags() & Qt.ItemIsUserCheckable and item.checkState() == Qt.Checked and item.data(Qt.UserRole) is not None:
|
||||
selected_links.append(item.data(Qt.UserRole))
|
||||
|
||||
if selected_links:
|
||||
self.download_requested.emit(selected_links)
|
||||
self.accept()
|
||||
else:
|
||||
QMessageBox.information(
|
||||
self,
|
||||
self._tr("no_selection_title", "No Selection"),
|
||||
self._tr("no_selection_message_links", "Please select at least one link to download.")
|
||||
)
|
||||
223
src/ui/dialogs/DownloadHistoryDialog.py
Normal file
@@ -0,0 +1,223 @@
|
||||
import os
|
||||
import time
|
||||
import json
|
||||
from PyQt5.QtCore import Qt, QStandardPaths, QTimer
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QScrollArea,
|
||||
QPushButton, QVBoxLayout, QSplitter, QWidget, QGroupBox,
|
||||
QFileDialog, QMessageBox
|
||||
)
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
|
||||
class DownloadHistoryDialog (QDialog ):
|
||||
"""Dialog to display download history."""
|
||||
def __init__ (self ,last_3_downloaded_entries ,first_processed_entries ,parent_app ,parent =None ):
|
||||
super ().__init__ (parent )
|
||||
self .parent_app =parent_app
|
||||
self .last_3_downloaded_entries =last_3_downloaded_entries
|
||||
self .first_processed_entries =first_processed_entries
|
||||
self .setModal (True )
|
||||
self._apply_theme()
|
||||
creator_name_cache = getattr(parent_app, 'creator_name_cache', None)
|
||||
if creator_name_cache:
|
||||
for entry in self.last_3_downloaded_entries:
|
||||
if not entry.get('creator_display_name'):
|
||||
service = entry.get('service', '').lower()
|
||||
user_id = str(entry.get('user_id', ''))
|
||||
key = (service, user_id)
|
||||
entry['creator_display_name'] = creator_name_cache.get(key, entry.get('folder_context_name', 'Unknown Creator/Series'))
|
||||
for entry in self.first_processed_entries:
|
||||
if not entry.get('creator_name'):
|
||||
service = entry.get('service', '').lower()
|
||||
user_id = str(entry.get('user_id', ''))
|
||||
key = (service, user_id)
|
||||
entry['creator_name'] = creator_name_cache.get(key, entry.get('user_id', 'Unknown'))
|
||||
|
||||
app_icon =get_app_icon_object ()
|
||||
if not app_icon .isNull ():
|
||||
self .setWindowIcon (app_icon )
|
||||
|
||||
screen_height =QApplication .primaryScreen ().availableGeometry ().height ()if QApplication .primaryScreen ()else 768
|
||||
scale_factor =screen_height /768.0
|
||||
base_min_w ,base_min_h =600 ,450
|
||||
|
||||
scaled_min_w =int (base_min_w *1.5 *scale_factor )
|
||||
scaled_min_h =int (base_min_h *scale_factor )
|
||||
self .setMinimumSize (scaled_min_w ,scaled_min_h )
|
||||
|
||||
self .setWindowTitle (self ._tr ("download_history_dialog_title_combined","Download History"))
|
||||
|
||||
|
||||
dialog_layout =QVBoxLayout (self )
|
||||
self .setLayout (dialog_layout )
|
||||
|
||||
|
||||
self .main_splitter =QSplitter (Qt .Horizontal )
|
||||
dialog_layout .addWidget (self .main_splitter )
|
||||
|
||||
|
||||
left_pane_widget =QWidget ()
|
||||
left_layout =QVBoxLayout (left_pane_widget )
|
||||
left_header_label =QLabel (self ._tr ("history_last_downloaded_header","Last 3 Files Downloaded:"))
|
||||
left_header_label .setAlignment (Qt .AlignCenter )
|
||||
left_layout .addWidget (left_header_label )
|
||||
|
||||
left_scroll_area =QScrollArea ()
|
||||
left_scroll_area .setWidgetResizable (True )
|
||||
left_scroll_content_widget =QWidget ()
|
||||
left_scroll_layout =QVBoxLayout (left_scroll_content_widget )
|
||||
|
||||
if not self .last_3_downloaded_entries :
|
||||
no_left_history_label =QLabel (self ._tr ("no_download_history_header","No Downloads Yet"))
|
||||
no_left_history_label .setAlignment (Qt .AlignCenter )
|
||||
left_scroll_layout .addWidget (no_left_history_label )
|
||||
else :
|
||||
for entry in self .last_3_downloaded_entries :
|
||||
group_box =QGroupBox (f"{self ._tr ('history_file_label','File:')} {entry .get ('disk_filename','N/A')}")
|
||||
group_layout =QVBoxLayout (group_box )
|
||||
details_text =(
|
||||
f"<b>{self ._tr ('history_from_post_label','From Post:')}</b> {entry .get ('post_title','N/A')} (ID: {entry .get ('post_id','N/A')})<br>"
|
||||
f"<b>{self ._tr ('history_creator_series_label','Creator/Series:')}</b> {entry .get ('creator_display_name','N/A')}<br>"
|
||||
f"<b>{self ._tr ('history_post_uploaded_label','Post Uploaded:')}</b> {entry .get ('upload_date_str','N/A')}<br>"
|
||||
f"<b>{self ._tr ('history_file_downloaded_label','File Downloaded:')}</b> {time .strftime ('%Y-%m-%d %H:%M:%S',time .localtime (entry .get ('download_timestamp',0 )))}<br>"
|
||||
f"<b>{self ._tr ('history_saved_in_folder_label','Saved In Folder:')}</b> {entry .get ('download_path','N/A')}"
|
||||
)
|
||||
details_label =QLabel (details_text )
|
||||
details_label .setWordWrap (True )
|
||||
details_label .setTextFormat (Qt .RichText )
|
||||
group_layout .addWidget (details_label )
|
||||
left_scroll_layout .addWidget (group_box )
|
||||
left_scroll_area .setWidget (left_scroll_content_widget )
|
||||
left_layout .addWidget (left_scroll_area )
|
||||
self .main_splitter .addWidget (left_pane_widget )
|
||||
|
||||
|
||||
right_pane_widget =QWidget ()
|
||||
right_layout =QVBoxLayout (right_pane_widget )
|
||||
right_header_label =QLabel (self ._tr ("first_files_processed_header","First {count} Posts Processed This Session:").format (count =len (self .first_processed_entries )))
|
||||
right_header_label .setAlignment (Qt .AlignCenter )
|
||||
right_layout .addWidget (right_header_label )
|
||||
|
||||
right_scroll_area =QScrollArea ()
|
||||
right_scroll_area .setWidgetResizable (True )
|
||||
right_scroll_content_widget =QWidget ()
|
||||
right_scroll_layout =QVBoxLayout (right_scroll_content_widget )
|
||||
|
||||
if not self .first_processed_entries :
|
||||
no_right_history_label =QLabel (self ._tr ("no_processed_history_header","No Posts Processed Yet"))
|
||||
no_right_history_label .setAlignment (Qt .AlignCenter )
|
||||
right_scroll_layout .addWidget (no_right_history_label )
|
||||
else :
|
||||
for entry in self .first_processed_entries :
|
||||
|
||||
group_box =QGroupBox (f"{self ._tr ('history_post_label','Post:')} {entry .get ('post_title','N/A')} (ID: {entry .get ('post_id','N/A')})")
|
||||
group_layout =QVBoxLayout (group_box )
|
||||
details_text =(
|
||||
f"<b>{self ._tr ('history_creator_label','Creator:')}</b> {entry .get ('creator_name','N/A')}<br>"
|
||||
f"<b>{self ._tr ('history_top_file_label','Top File:')}</b> {entry .get ('top_file_name','N/A')}<br>"
|
||||
f"<b>{self ._tr ('history_num_files_label','Num Files in Post:')}</b> {entry .get ('num_files',0 )}<br>"
|
||||
f"<b>{self ._tr ('history_post_uploaded_label','Post Uploaded:')}</b> {entry .get ('upload_date_str','N/A')}<br>"
|
||||
f"<b>{self ._tr ('history_processed_on_label','Processed On:')}</b> {time .strftime ('%Y-%m-%d %H:%M:%S',time .localtime (entry .get ('download_date_timestamp',0 )))}<br>"
|
||||
f"<b>{self ._tr ('history_saved_to_folder_label','Saved To Folder:')}</b> {entry .get ('download_location','N/A')}"
|
||||
)
|
||||
details_label =QLabel (details_text )
|
||||
details_label .setWordWrap (True )
|
||||
details_label .setTextFormat (Qt .RichText )
|
||||
group_layout .addWidget (details_label )
|
||||
right_scroll_layout .addWidget (group_box )
|
||||
right_scroll_area .setWidget (right_scroll_content_widget )
|
||||
right_layout .addWidget (right_scroll_area )
|
||||
self .main_splitter .addWidget (right_pane_widget )
|
||||
|
||||
|
||||
QTimer .singleShot (0 ,lambda :self .main_splitter .setSizes ([self .width ()//2 ,self .width ()//2 ]))
|
||||
|
||||
|
||||
bottom_button_layout =QHBoxLayout ()
|
||||
self .save_history_button =QPushButton (self ._tr ("history_save_button_text","Save History to .txt"))
|
||||
self .save_history_button .clicked .connect (self ._save_history_to_txt )
|
||||
bottom_button_layout .addStretch (1 )
|
||||
bottom_button_layout .addWidget (self .save_history_button )
|
||||
|
||||
dialog_layout .addLayout (bottom_button_layout )
|
||||
|
||||
if self .parent_app and hasattr (self .parent_app ,'get_dark_theme')and self .parent_app .current_theme =="dark":
|
||||
self .setStyleSheet (self .parent_app .get_dark_theme ())
|
||||
|
||||
def _tr (self ,key ,default_text =""):
|
||||
if callable (get_translation )and self .parent_app :
|
||||
return get_translation (self .parent_app .current_selected_language ,key ,default_text )
|
||||
return default_text
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and self.parent_app.current_theme == "dark":
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("QDialog { background-color: #f0f0f0; }")
|
||||
|
||||
def _save_history_to_txt (self ):
|
||||
if not self .last_3_downloaded_entries and not self .first_processed_entries :
|
||||
QMessageBox .information (self ,self ._tr ("no_download_history_header","No Downloads Yet"),
|
||||
self ._tr ("history_nothing_to_save_message","There is no history to save."))
|
||||
return
|
||||
|
||||
main_download_dir =self .parent_app .dir_input .text ().strip ()
|
||||
default_save_dir =""
|
||||
if main_download_dir and os .path .isdir (main_download_dir ):
|
||||
default_save_dir =main_download_dir
|
||||
else :
|
||||
fallback_dir =QStandardPaths .writableLocation (QStandardPaths .DocumentsLocation )
|
||||
if fallback_dir and os .path .isdir (fallback_dir ):
|
||||
default_save_dir =fallback_dir
|
||||
else :
|
||||
default_save_dir =self .parent_app .app_base_dir
|
||||
|
||||
default_filepath =os .path .join (default_save_dir ,"download_history.txt")
|
||||
|
||||
filepath ,_ =QFileDialog .getSaveFileName (
|
||||
self ,self ._tr ("history_save_dialog_title","Save Download History"),
|
||||
default_filepath ,"Text Files (*.txt);;All Files (*)"
|
||||
)
|
||||
|
||||
if not filepath :
|
||||
return
|
||||
|
||||
history_content =[]
|
||||
history_content .append (f"{self ._tr ('history_last_downloaded_header','Last 3 Files Downloaded:')}\n")
|
||||
if self .last_3_downloaded_entries :
|
||||
for entry in self .last_3_downloaded_entries :
|
||||
history_content .append (f" {self ._tr ('history_file_label','File:')} {entry .get ('disk_filename','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_from_post_label','From Post:')} {entry .get ('post_title','N/A')} (ID: {entry .get ('post_id','N/A')})")
|
||||
history_content .append (f" {self ._tr ('history_creator_series_label','Creator/Series:')} {entry .get ('creator_display_name','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_post_uploaded_label','Post Uploaded:')} {entry .get ('upload_date_str','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_file_downloaded_label','File Downloaded:')} {time .strftime ('%Y-%m-%d %H:%M:%S',time .localtime (entry .get ('download_timestamp',0 )))}")
|
||||
history_content .append (f" {self ._tr ('history_saved_in_folder_label','Saved In Folder:')} {entry .get ('download_path','N/A')}\n")
|
||||
else :
|
||||
history_content .append (f" ({self ._tr ('no_download_history_header','No Downloads Yet')})\n")
|
||||
|
||||
history_content .append (f"\n{self ._tr ('first_files_processed_header','First {count} Posts Processed This Session:').format (count =len (self .first_processed_entries ))}\n")
|
||||
if self .first_processed_entries :
|
||||
for entry in self .first_processed_entries :
|
||||
history_content .append (f" {self ._tr ('history_post_label','Post:')} {entry .get ('post_title','N/A')} (ID: {entry .get ('post_id','N/A')})")
|
||||
history_content .append (f" {self ._tr ('history_creator_label','Creator:')} {entry .get ('creator_name','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_top_file_label','Top File:')} {entry .get ('top_file_name','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_num_files_label','Num Files in Post:')} {entry .get ('num_files',0 )}")
|
||||
history_content .append (f" {self ._tr ('history_post_uploaded_label','Post Uploaded:')} {entry .get ('upload_date_str','N/A')}")
|
||||
history_content .append (f" {self ._tr ('history_processed_on_label','Processed On:')} {time .strftime ('%Y-%m-%d %H:%M:%S',time .localtime (entry .get ('download_date_timestamp',0 )))}")
|
||||
history_content .append (f" {self ._tr ('history_saved_to_folder_label','Saved To Folder:')} {entry .get ('download_location','N/A')}\n")
|
||||
else :
|
||||
history_content .append (f" ({self ._tr ('no_processed_history_header','No Posts Processed Yet')})\n")
|
||||
|
||||
try :
|
||||
with open (filepath ,'w',encoding ='utf-8')as f :
|
||||
f .write ("\n".join (history_content ))
|
||||
QMessageBox .information (self ,self ._tr ("history_export_success_title","History Export Successful"),
|
||||
self ._tr ("history_export_success_message","Successfully exported download history to:\n{filepath}").format (filepath =filepath ))
|
||||
except Exception as e :
|
||||
QMessageBox .critical (self ,self ._tr ("history_export_error_title","History Export Error"),
|
||||
self ._tr ("history_export_error_message","Could not export download history: {error}").format (error =str (e )))
|
||||
1038
src/ui/dialogs/EmptyPopupDialog.py
Normal file
294
src/ui/dialogs/ErrorFilesDialog.py
Normal file
@@ -0,0 +1,294 @@
|
||||
# --- PyQt5 Imports ---
|
||||
from PyQt5.QtCore import pyqtSignal, Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QListWidget, QListWidgetItem,
|
||||
QMessageBox, QPushButton, QVBoxLayout, QAbstractItemView, QFileDialog, QCheckBox
|
||||
)
|
||||
|
||||
# --- Local Application Imports ---
|
||||
from ...i18n.translator import get_translation
|
||||
from ..assets import get_app_icon_object
|
||||
from .ExportOptionsDialog import ExportOptionsDialog
|
||||
from ...utils.resolution import get_dark_theme
|
||||
from ...config.constants import AUTO_RETRY_ON_FINISH_KEY
|
||||
|
||||
class ErrorFilesDialog(QDialog):
|
||||
"""
|
||||
Dialog to display files that were skipped due to errors and
|
||||
allows the user to retry downloading them or export the list of URLs.
|
||||
"""
|
||||
retry_selected_signal = pyqtSignal(list)
|
||||
|
||||
def __init__(self, error_files_info_list, parent_app, parent=None):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app
|
||||
self.setModal(True)
|
||||
self.error_files = error_files_info_list
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
scale_factor = getattr(self.parent_app, 'scale_factor', 1.0)
|
||||
base_width, base_height = 600, 450
|
||||
self.setMinimumSize(int(base_width * scale_factor), int(base_height * scale_factor))
|
||||
self.resize(int(base_width * scale_factor * 1.1), int(base_height * scale_factor * 1.1))
|
||||
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
main_layout = QVBoxLayout(self)
|
||||
self.info_label = QLabel()
|
||||
self.info_label.setWordWrap(True)
|
||||
main_layout.addWidget(self.info_label)
|
||||
|
||||
self.files_list_widget = QListWidget()
|
||||
self.files_list_widget.setSelectionMode(QAbstractItemView.ExtendedSelection)
|
||||
main_layout.addWidget(self.files_list_widget)
|
||||
self._populate_list()
|
||||
|
||||
# --- Control Buttons ---
|
||||
buttons_layout = QHBoxLayout()
|
||||
|
||||
self.select_all_button = QPushButton()
|
||||
self.select_all_button.clicked.connect(self._select_all_items)
|
||||
buttons_layout.addWidget(self.select_all_button)
|
||||
|
||||
self.retry_button = QPushButton()
|
||||
self.retry_button.clicked.connect(self._handle_retry_selected)
|
||||
buttons_layout.addWidget(self.retry_button)
|
||||
|
||||
self.load_button = QPushButton()
|
||||
self.load_button.clicked.connect(self._handle_load_errors_from_txt)
|
||||
buttons_layout.addWidget(self.load_button)
|
||||
|
||||
self.export_button = QPushButton()
|
||||
self.export_button.clicked.connect(self._handle_export_errors_to_txt)
|
||||
buttons_layout.addWidget(self.export_button)
|
||||
|
||||
# The stretch will push everything added after this point to the right
|
||||
buttons_layout.addStretch(1)
|
||||
|
||||
# --- MOVED: Auto Retry Checkbox ---
|
||||
self.auto_retry_checkbox = QCheckBox()
|
||||
auto_retry_enabled = self.parent_app.settings.value(AUTO_RETRY_ON_FINISH_KEY, False, type=bool)
|
||||
self.auto_retry_checkbox.setChecked(auto_retry_enabled)
|
||||
self.auto_retry_checkbox.toggled.connect(self._save_auto_retry_setting)
|
||||
buttons_layout.addWidget(self.auto_retry_checkbox)
|
||||
# --- END ---
|
||||
|
||||
self.ok_button = QPushButton()
|
||||
self.ok_button.clicked.connect(self.accept)
|
||||
self.ok_button.setDefault(True)
|
||||
buttons_layout.addWidget(self.ok_button)
|
||||
main_layout.addLayout(buttons_layout)
|
||||
|
||||
has_errors = bool(self.error_files)
|
||||
self.select_all_button.setEnabled(has_errors)
|
||||
self.retry_button.setEnabled(has_errors)
|
||||
self.export_button.setEnabled(has_errors)
|
||||
|
||||
def _populate_list(self):
|
||||
self.files_list_widget.clear()
|
||||
for error_info in self.error_files:
|
||||
self._add_item_to_list(error_info)
|
||||
|
||||
def _handle_load_errors_from_txt(self):
|
||||
"""Opens a file dialog to load URLs from a .txt file."""
|
||||
import re
|
||||
|
||||
filepath, _ = QFileDialog.getOpenFileName(
|
||||
self,
|
||||
self._tr("error_files_load_dialog_title", "Load Error File URLs"),
|
||||
"",
|
||||
"Text Files (*.txt);;All Files (*)"
|
||||
)
|
||||
|
||||
if not filepath:
|
||||
return
|
||||
|
||||
try:
|
||||
detailed_pattern = re.compile(r"^(https?://[^\s]+)\s*\[Post: '(.*?)' \(ID: (.*?)\), File: '(.*?)'\]$")
|
||||
simple_pattern = re.compile(r'^(https?://[^\s]+)')
|
||||
|
||||
with open(filepath, 'r', encoding='utf-8') as f:
|
||||
for line in f:
|
||||
line = line.strip()
|
||||
if not line: continue
|
||||
|
||||
url, post_title, post_id, filename = None, 'Loaded from .txt', 'N/A', None
|
||||
|
||||
detailed_match = detailed_pattern.match(line)
|
||||
if detailed_match:
|
||||
url, post_title, post_id, filename = detailed_match.groups()
|
||||
else:
|
||||
simple_match = simple_pattern.match(line)
|
||||
if simple_match:
|
||||
url = simple_match.group(1)
|
||||
filename = url.split('/')[-1]
|
||||
|
||||
if url:
|
||||
simple_error_info = {
|
||||
'is_loaded_from_txt': True, 'file_info': {'url': url, 'name': filename},
|
||||
'post_title': post_title, 'original_post_id_for_log': post_id,
|
||||
'target_folder_path': self.parent_app.dir_input.text().strip(),
|
||||
'forced_filename_override': filename, 'file_index_in_post': 0,
|
||||
'num_files_in_this_post': 1, 'service': None, 'user_id': None, 'api_url_input': ''
|
||||
}
|
||||
self.error_files.append(simple_error_info)
|
||||
self._add_item_to_list(simple_error_info)
|
||||
|
||||
self.info_label.setText(self._tr("error_files_found_label", "The following {count} file(s)...").format(count=len(self.error_files)))
|
||||
|
||||
has_errors = bool(self.error_files)
|
||||
self.select_all_button.setEnabled(has_errors)
|
||||
self.retry_button.setEnabled(has_errors)
|
||||
self.export_button.setEnabled(has_errors)
|
||||
|
||||
except Exception as e:
|
||||
QMessageBox.critical(self, self._tr("error_files_load_error_title", "Load Error"),
|
||||
self._tr("error_files_load_error_message", "Could not load or parse the file: {error}").format(error=str(e)))
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
self.setWindowTitle(self._tr("error_files_dialog_title", "Files Skipped Due to Errors"))
|
||||
if not self.error_files:
|
||||
self.info_label.setText(self._tr("error_files_no_errors_label", "No files were recorded as skipped..."))
|
||||
else:
|
||||
self.info_label.setText(self._tr("error_files_found_label", "The following {count} file(s)...").format(count=len(self.error_files)))
|
||||
|
||||
self.auto_retry_checkbox.setText(self._tr("error_files_auto_retry_checkbox", "Auto Retry at End"))
|
||||
self.select_all_button.setText(self._tr("error_files_select_all_button", "Select/Deselect All"))
|
||||
self.retry_button.setText(self._tr("error_files_retry_selected_button", "Retry Selected"))
|
||||
self.load_button.setText(self._tr("error_files_load_urls_button", "Load URLs from .txt"))
|
||||
self.export_button.setText(self._tr("error_files_export_urls_button", "Export URLs to .txt"))
|
||||
self.ok_button.setText(self._tr("ok_button", "OK"))
|
||||
|
||||
def _apply_theme(self):
|
||||
if self.parent_app and self.parent_app.current_theme == "dark":
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("")
|
||||
|
||||
def _save_auto_retry_setting(self, checked):
|
||||
"""Saves the state of the auto-retry checkbox to QSettings."""
|
||||
self.parent_app.settings.setValue(AUTO_RETRY_ON_FINISH_KEY, checked)
|
||||
|
||||
def _add_item_to_list(self, error_info):
|
||||
"""Creates and adds a single QListWidgetItem based on error_info content."""
|
||||
if error_info.get('is_loaded_from_txt'):
|
||||
filename = error_info.get('file_info', {}).get('name', 'Unknown Filename')
|
||||
post_title = error_info.get('post_title', 'N/A')
|
||||
post_id = error_info.get('original_post_id_for_log', 'N/A')
|
||||
item_text = f"File: {filename}\nPost: '{post_title}' (ID: {post_id}) [Loaded from .txt]"
|
||||
else:
|
||||
filename = error_info.get('forced_filename_override', error_info.get('file_info', {}).get('name', 'Unknown Filename'))
|
||||
post_title = error_info.get('post_title', 'Unknown Post')
|
||||
post_id = error_info.get('original_post_id_for_log', 'N/A')
|
||||
creator_name = "Unknown Creator"
|
||||
service, user_id = error_info.get('service'), error_info.get('user_id')
|
||||
if service and user_id and hasattr(self.parent_app, 'creator_name_cache'):
|
||||
creator_name = self.parent_app.creator_name_cache.get((service.lower(), str(user_id)), user_id)
|
||||
item_text = f"File: {filename}\nCreator: {creator_name} - Post: '{post_title}' (ID: {post_id})"
|
||||
|
||||
list_item = QListWidgetItem(item_text)
|
||||
list_item.setData(Qt.UserRole, error_info)
|
||||
list_item.setFlags(list_item.flags() | Qt.ItemIsUserCheckable)
|
||||
list_item.setCheckState(Qt.Unchecked) # Start as unchecked
|
||||
self.files_list_widget.addItem(list_item)
|
||||
|
||||
def _select_all_items(self):
|
||||
"""Toggles checking all items in the list."""
|
||||
# Determine if we should check or uncheck all based on the first item's state
|
||||
is_currently_checked = self.files_list_widget.item(0).checkState() == Qt.Checked if self.files_list_widget.count() > 0 else False
|
||||
new_state = Qt.Unchecked if is_currently_checked else Qt.Checked
|
||||
for i in range(self.files_list_widget.count()):
|
||||
self.files_list_widget.item(i).setCheckState(new_state)
|
||||
|
||||
def _handle_retry_selected(self):
|
||||
selected_files_for_retry = [
|
||||
self.files_list_widget.item(i).data(Qt.UserRole)
|
||||
for i in range(self.files_list_widget.count())
|
||||
if self.files_list_widget.item(i).checkState() == Qt.Checked
|
||||
]
|
||||
if selected_files_for_retry:
|
||||
self.retry_selected_signal.emit(selected_files_for_retry)
|
||||
self.accept()
|
||||
else:
|
||||
QMessageBox.information(self, self._tr("fav_artists_no_selection_title", "No Selection"),
|
||||
self._tr("error_files_no_selection_retry_message", "Please check the box next to at least one file to retry."))
|
||||
|
||||
def _handle_export_errors_to_txt(self):
|
||||
"""Exports the URLs of failed files to a text file."""
|
||||
if not self.error_files:
|
||||
QMessageBox.information(
|
||||
self,
|
||||
self._tr("error_files_no_errors_export_title", "No Errors"),
|
||||
self._tr("error_files_no_errors_export_message", "There are no error file URLs to export.")
|
||||
)
|
||||
return
|
||||
|
||||
options_dialog = ExportOptionsDialog(parent_app=self.parent_app, parent=self)
|
||||
if not options_dialog.exec_() == QDialog.Accepted:
|
||||
return
|
||||
|
||||
export_option = options_dialog.get_selected_option()
|
||||
|
||||
lines_to_export = []
|
||||
for error_item in self.error_files:
|
||||
file_info = error_item.get('file_info', {})
|
||||
url = file_info.get('url')
|
||||
|
||||
if url:
|
||||
if export_option == ExportOptionsDialog.EXPORT_MODE_WITH_DETAILS:
|
||||
post_title = error_item.get('post_title', 'Unknown Post')
|
||||
post_id = error_item.get('original_post_id_for_log', 'N/A')
|
||||
|
||||
# Prioritize the final renamed filename, but fall back to the original from the API
|
||||
filename_to_display = error_item.get('forced_filename_override') or file_info.get('name', 'Unknown Filename')
|
||||
|
||||
details_string = f" [Post: '{post_title}' (ID: {post_id}), File: '{filename_to_display}']"
|
||||
lines_to_export.append(f"{url}{details_string}")
|
||||
else:
|
||||
lines_to_export.append(url)
|
||||
|
||||
if not lines_to_export:
|
||||
QMessageBox.information(
|
||||
self,
|
||||
self._tr("error_files_no_urls_found_export_title", "No URLs Found"),
|
||||
self._tr("error_files_no_urls_found_export_message", "Could not extract any URLs...")
|
||||
)
|
||||
return
|
||||
|
||||
default_filename = "error_file_links.txt"
|
||||
filepath, _ = QFileDialog.getSaveFileName(
|
||||
self,
|
||||
self._tr("error_files_save_dialog_title", "Save Error File URLs"),
|
||||
default_filename,
|
||||
"Text Files (*.txt);;All Files (*)"
|
||||
)
|
||||
|
||||
if filepath:
|
||||
try:
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
for line in lines_to_export:
|
||||
f.write(f"{line}\n")
|
||||
QMessageBox.information(
|
||||
self,
|
||||
self._tr("error_files_export_success_title", "Export Successful"),
|
||||
self._tr("error_files_export_success_message", "Successfully exported...").format(
|
||||
count=len(lines_to_export), filepath=filepath
|
||||
)
|
||||
)
|
||||
except Exception as e:
|
||||
QMessageBox.critical(
|
||||
self,
|
||||
self._tr("error_files_export_error_title", "Export Error"),
|
||||
self._tr("error_files_export_error_message", "Could not export...").format(error=str(e))
|
||||
)
|
||||
226
src/ui/dialogs/ExportLinksDialog.py
Normal file
@@ -0,0 +1,226 @@
|
||||
import os
|
||||
import json
|
||||
import re
|
||||
from collections import defaultdict
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QWidget, QLabel, QLineEdit, QTextEdit, QPushButton,
|
||||
QVBoxLayout, QHBoxLayout, QFileDialog, QMessageBox, QListWidget, QRadioButton,
|
||||
QButtonGroup, QCheckBox, QSplitter, QGroupBox, QDialog, QStackedWidget,
|
||||
QScrollArea, QListWidgetItem, QSizePolicy, QProgressBar, QAbstractItemView, QFrame,
|
||||
QMainWindow, QAction, QGridLayout,
|
||||
)
|
||||
from PyQt5.QtCore import Qt
|
||||
|
||||
class ExportLinksDialog(QDialog):
|
||||
"""
|
||||
A dialog for exporting extracted links with various format options, including custom templates.
|
||||
"""
|
||||
def __init__(self, links_data, parent=None):
|
||||
super().__init__(parent)
|
||||
self.links_data = links_data
|
||||
self.setWindowTitle("Export Extracted Links")
|
||||
self.setMinimumWidth(550)
|
||||
self._setup_ui()
|
||||
self._update_options_visibility()
|
||||
|
||||
def _setup_ui(self):
|
||||
"""Initializes the UI components of the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
# Format Selection (Top Level)
|
||||
format_group = QGroupBox("Export Format")
|
||||
format_layout = QHBoxLayout()
|
||||
self.radio_txt = QRadioButton("Plain Text (.txt)")
|
||||
self.radio_json = QRadioButton("JSON (.json)")
|
||||
self.radio_txt.setChecked(True)
|
||||
format_layout.addWidget(self.radio_txt)
|
||||
format_layout.addWidget(self.radio_json)
|
||||
format_group.setLayout(format_layout)
|
||||
main_layout.addWidget(format_group)
|
||||
|
||||
# TXT Options Group
|
||||
self.txt_options_group = QGroupBox("TXT Options")
|
||||
txt_options_layout = QVBoxLayout()
|
||||
|
||||
self.txt_mode_group = QButtonGroup(self)
|
||||
self.radio_simple = QRadioButton("Simple (URL only, one per line)")
|
||||
self.radio_detailed = QRadioButton("Detailed (with checkboxes)")
|
||||
self.radio_custom = QRadioButton("Custom Format Template")
|
||||
|
||||
self.txt_mode_group.addButton(self.radio_simple)
|
||||
self.txt_mode_group.addButton(self.radio_detailed)
|
||||
self.txt_mode_group.addButton(self.radio_custom)
|
||||
|
||||
txt_options_layout.addWidget(self.radio_simple)
|
||||
txt_options_layout.addWidget(self.radio_detailed)
|
||||
|
||||
self.detailed_options_widget = QWidget()
|
||||
detailed_layout = QVBoxLayout(self.detailed_options_widget)
|
||||
detailed_layout.setContentsMargins(20, 5, 0, 5)
|
||||
self.check_include_titles = QCheckBox("Include post titles as separators")
|
||||
self.check_include_link_text = QCheckBox("Include link text/description")
|
||||
self.check_include_platform = QCheckBox("Include platform (e.g., Mega, GDrive)")
|
||||
detailed_layout.addWidget(self.check_include_titles)
|
||||
detailed_layout.addWidget(self.check_include_link_text)
|
||||
detailed_layout.addWidget(self.check_include_platform)
|
||||
txt_options_layout.addWidget(self.detailed_options_widget)
|
||||
|
||||
txt_options_layout.addWidget(self.radio_custom)
|
||||
|
||||
self.custom_format_widget = QWidget()
|
||||
custom_layout = QVBoxLayout(self.custom_format_widget)
|
||||
custom_layout.setContentsMargins(20, 5, 0, 5)
|
||||
placeholders_label = QLabel("Available placeholders: <b>{url} {post_title} {link_text} {platform} {key}</b>")
|
||||
self.custom_format_input = QTextEdit()
|
||||
self.custom_format_input.setAcceptRichText(False)
|
||||
self.custom_format_input.setPlaceholderText("Enter your format, e.g., ({url}) or Title: {post_title}\\nLink: {url}")
|
||||
self.custom_format_input.setText("{url}")
|
||||
self.custom_format_input.setFixedHeight(80)
|
||||
custom_layout.addWidget(placeholders_label)
|
||||
custom_layout.addWidget(self.custom_format_input)
|
||||
txt_options_layout.addWidget(self.custom_format_widget)
|
||||
|
||||
separator = QLabel("-" * 70)
|
||||
txt_options_layout.addWidget(separator)
|
||||
self.check_separate_files = QCheckBox("Save each platform to a separate file (e.g., export_mega.txt)")
|
||||
txt_options_layout.addWidget(self.check_separate_files)
|
||||
|
||||
self.txt_options_group.setLayout(txt_options_layout)
|
||||
main_layout.addWidget(self.txt_options_group)
|
||||
|
||||
# File Path Selection
|
||||
path_layout = QHBoxLayout()
|
||||
self.path_input = QLineEdit()
|
||||
self.browse_button = QPushButton("Browse...")
|
||||
path_layout.addWidget(self.path_input)
|
||||
path_layout.addWidget(self.browse_button)
|
||||
main_layout.addLayout(path_layout)
|
||||
|
||||
# Action Buttons
|
||||
button_layout = QHBoxLayout()
|
||||
button_layout.addStretch(1)
|
||||
self.export_button = QPushButton("Export")
|
||||
self.cancel_button = QPushButton("Cancel")
|
||||
button_layout.addWidget(self.export_button)
|
||||
button_layout.addWidget(self.cancel_button)
|
||||
main_layout.addLayout(button_layout)
|
||||
|
||||
# Connections
|
||||
self.radio_txt.toggled.connect(self._update_options_visibility)
|
||||
self.radio_simple.toggled.connect(self._update_options_visibility)
|
||||
self.radio_detailed.toggled.connect(self._update_options_visibility)
|
||||
self.radio_custom.toggled.connect(self._update_options_visibility)
|
||||
self.browse_button.clicked.connect(self._browse)
|
||||
self.export_button.clicked.connect(self._accept_and_export)
|
||||
self.cancel_button.clicked.connect(self.reject)
|
||||
|
||||
self.radio_simple.setChecked(True)
|
||||
|
||||
def _update_options_visibility(self):
|
||||
is_txt = self.radio_txt.isChecked()
|
||||
self.txt_options_group.setVisible(is_txt)
|
||||
|
||||
self.detailed_options_widget.setVisible(is_txt and self.radio_detailed.isChecked())
|
||||
self.custom_format_widget.setVisible(is_txt and self.radio_custom.isChecked())
|
||||
|
||||
def _browse(self, base_filepath):
|
||||
is_separate_files_mode = self.radio_txt.isChecked() and self.check_separate_files.isChecked()
|
||||
|
||||
if is_separate_files_mode:
|
||||
dir_path = QFileDialog.getExistingDirectory(self, "Select Folder to Save Files")
|
||||
if dir_path:
|
||||
self.path_input.setText(os.path.join(dir_path, "exported_links"))
|
||||
else:
|
||||
default_filename = "exported_links"
|
||||
file_filter = "Text Files (*.txt)"
|
||||
if self.radio_json.isChecked():
|
||||
default_filename += ".json"
|
||||
file_filter = "JSON Files (*.json)"
|
||||
else:
|
||||
default_filename += ".txt"
|
||||
|
||||
filepath, _ = QFileDialog.getSaveFileName(self, "Save Links", default_filename, file_filter)
|
||||
if filepath:
|
||||
self.path_input.setText(filepath)
|
||||
|
||||
def _accept_and_export(self):
|
||||
filepath = self.path_input.text().strip()
|
||||
if not filepath:
|
||||
QMessageBox.warning(self, "Input Error", "Please select a file path or folder.")
|
||||
return
|
||||
|
||||
try:
|
||||
if self.radio_txt.isChecked():
|
||||
self._write_txt_file(filepath)
|
||||
else:
|
||||
self._write_json_file(filepath)
|
||||
|
||||
QMessageBox.information(self, "Export Successful", "Links successfully exported!")
|
||||
self.accept()
|
||||
except OSError as e:
|
||||
QMessageBox.critical(self, "Export Error", f"Could not write to file:\n{e}")
|
||||
|
||||
def _write_txt_file(self, base_filepath):
|
||||
if self.check_separate_files.isChecked():
|
||||
links_by_platform = defaultdict(list)
|
||||
for _, _, link_url, platform, _ in self.links_data:
|
||||
sanitized_platform = re.sub(r'[<>:"/\\|?*]', '_', platform.lower().replace(' ', '_'))
|
||||
links_by_platform[sanitized_platform].append(link_url)
|
||||
|
||||
base, ext = os.path.splitext(base_filepath)
|
||||
if not ext: ext = ".txt"
|
||||
|
||||
for platform_key, links in links_by_platform.items():
|
||||
platform_filepath = f"{base}_{platform_key}{ext}"
|
||||
with open(platform_filepath, 'w', encoding='utf-8') as f:
|
||||
for url in links:
|
||||
f.write(url + "\n")
|
||||
return
|
||||
|
||||
with open(base_filepath, 'w', encoding='utf-8') as f:
|
||||
if self.radio_simple.isChecked():
|
||||
for _, _, link_url, _, _ in self.links_data:
|
||||
f.write(link_url + "\n")
|
||||
|
||||
elif self.radio_detailed.isChecked():
|
||||
include_titles = self.check_include_titles.isChecked()
|
||||
include_text = self.check_include_link_text.isChecked()
|
||||
include_platform = self.check_include_platform.isChecked()
|
||||
current_title = None
|
||||
for post_title, link_text, link_url, platform, _ in self.links_data:
|
||||
if include_titles and post_title != current_title:
|
||||
if current_title is not None: f.write("\n" + "="*60 + "\n\n")
|
||||
f.write(f"# Post: {post_title}\n")
|
||||
current_title = post_title
|
||||
line_parts = [link_url]
|
||||
if include_platform: line_parts.append(f"Platform: {platform}")
|
||||
if include_text and link_text: line_parts.append(f"Description: {link_text}")
|
||||
f.write(" | ".join(line_parts) + "\n")
|
||||
|
||||
elif self.radio_custom.isChecked():
|
||||
template = self.custom_format_input.toPlainText().replace("\\n", "\n")
|
||||
for post_title, link_text, link_url, platform, decryption_key in self.links_data:
|
||||
formatted_line = template.format(
|
||||
url=link_url,
|
||||
post_title=post_title,
|
||||
link_text=link_text,
|
||||
platform=platform,
|
||||
key=decryption_key or ""
|
||||
)
|
||||
f.write(formatted_line)
|
||||
if not template.endswith('\n'):
|
||||
f.write('\n')
|
||||
|
||||
def _write_json_file(self, filepath):
|
||||
output_data = []
|
||||
for post_title, link_text, link_url, platform, decryption_key in self.links_data:
|
||||
output_data.append({
|
||||
"post_title": post_title,
|
||||
"url": link_url,
|
||||
"link_text": link_text,
|
||||
"platform": platform,
|
||||
"key": decryption_key or None
|
||||
})
|
||||
|
||||
with open(filepath, 'w', encoding='utf-8') as f:
|
||||
json.dump(output_data, f, indent=2)
|
||||
118
src/ui/dialogs/ExportOptionsDialog.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# --- PyQt5 Imports ---
|
||||
from PyQt5.QtCore import Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QPushButton, QVBoxLayout,
|
||||
QRadioButton, QButtonGroup
|
||||
)
|
||||
|
||||
# --- Local Application Imports ---
|
||||
# This assumes the new project structure is in place.
|
||||
from ...i18n.translator import get_translation
|
||||
# get_app_icon_object is defined in the main window module in this refactoring plan.
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class ExportOptionsDialog(QDialog):
|
||||
"""
|
||||
Dialog to choose the export format for error file links.
|
||||
It allows the user to select between exporting only the URLs or
|
||||
exporting URLs with additional details.
|
||||
"""
|
||||
# Constants to define the export modes
|
||||
EXPORT_MODE_LINK_ONLY = 1
|
||||
EXPORT_MODE_WITH_DETAILS = 2
|
||||
|
||||
def __init__(self, parent_app, parent=None):
|
||||
"""
|
||||
Initializes the dialog.
|
||||
|
||||
Args:
|
||||
parent_app (DownloaderApp): A reference to the main application window for theming and translations.
|
||||
parent (QWidget, optional): The parent widget. Defaults to None.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app
|
||||
self.setModal(True)
|
||||
# Default option
|
||||
self.selected_option = self.EXPORT_MODE_LINK_ONLY
|
||||
|
||||
# --- Basic Window Setup ---
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
# Set window size dynamically
|
||||
screen_height = QApplication.primaryScreen().availableGeometry().height() if QApplication.primaryScreen() else 768
|
||||
scale_factor = screen_height / 768.0
|
||||
base_min_w = 350
|
||||
scaled_min_w = int(base_min_w * scale_factor)
|
||||
self.setMinimumWidth(scaled_min_w)
|
||||
|
||||
# --- Initialize UI and Apply Theming ---
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
self.description_label = QLabel()
|
||||
layout.addWidget(self.description_label)
|
||||
|
||||
self.radio_group = QButtonGroup(self)
|
||||
|
||||
self.radio_link_only = QRadioButton()
|
||||
self.radio_link_only.setChecked(True)
|
||||
self.radio_group.addButton(self.radio_link_only, self.EXPORT_MODE_LINK_ONLY)
|
||||
layout.addWidget(self.radio_link_only)
|
||||
|
||||
self.radio_with_details = QRadioButton()
|
||||
self.radio_group.addButton(self.radio_with_details, self.EXPORT_MODE_WITH_DETAILS)
|
||||
layout.addWidget(self.radio_with_details)
|
||||
|
||||
# --- Action Buttons ---
|
||||
button_layout = QHBoxLayout()
|
||||
self.export_button = QPushButton()
|
||||
self.export_button.clicked.connect(self._handle_export)
|
||||
self.export_button.setDefault(True)
|
||||
|
||||
self.cancel_button = QPushButton()
|
||||
self.cancel_button.clicked.connect(self.reject)
|
||||
|
||||
button_layout.addStretch(1)
|
||||
button_layout.addWidget(self.export_button)
|
||||
button_layout.addWidget(self.cancel_button)
|
||||
layout.addLayout(button_layout)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on the main application's current language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for all translatable UI elements."""
|
||||
self.setWindowTitle(self._tr("export_options_dialog_title", "Export Options"))
|
||||
self.description_label.setText(self._tr("export_options_description_label", "Choose the format for exporting error file links:"))
|
||||
self.radio_link_only.setText(self._tr("export_options_radio_link_only", "Link per line (URL only)"))
|
||||
self.radio_link_only.setToolTip(self._tr("export_options_radio_link_only_tooltip", "Exports only the direct download URL..."))
|
||||
self.radio_with_details.setText(self._tr("export_options_radio_with_details", "Export with details (URL [Post, File info])"))
|
||||
self.radio_with_details.setToolTip(self._tr("export_options_radio_with_details_tooltip", "Exports the URL followed by details..."))
|
||||
self.export_button.setText(self._tr("export_options_export_button", "Export"))
|
||||
self.cancel_button.setText(self._tr("fav_posts_cancel_button", "Cancel"))
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and hasattr(self.parent_app, 'current_theme') and self.parent_app.current_theme == "dark":
|
||||
if hasattr(self.parent_app, 'get_dark_theme'):
|
||||
self.setStyleSheet(self.parent_app.get_dark_theme())
|
||||
|
||||
def _handle_export(self):
|
||||
"""Sets the selected export option and accepts the dialog."""
|
||||
self.selected_option = self.radio_group.checkedId()
|
||||
self.accept()
|
||||
|
||||
def get_selected_option(self):
|
||||
"""Returns the export mode chosen by the user."""
|
||||
return self.selected_option
|
||||
330
src/ui/dialogs/FavoriteArtistsDialog.py
Normal file
@@ -0,0 +1,330 @@
|
||||
# --- Standard Library Imports ---
|
||||
import html
|
||||
import re
|
||||
|
||||
# --- Third-Party Library Imports ---
|
||||
import cloudscraper # MODIFIED: Import cloudscraper
|
||||
from PyQt5.QtCore import QCoreApplication, Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QLineEdit, QListWidget,
|
||||
QListWidgetItem, QMessageBox, QPushButton, QVBoxLayout
|
||||
)
|
||||
|
||||
# --- Local Application Imports ---
|
||||
from ...i18n.translator import get_translation
|
||||
from ..assets import get_app_icon_object
|
||||
from ...utils.network_utils import prepare_cookies_for_request
|
||||
from .CookieHelpDialog import CookieHelpDialog
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class FavoriteArtistsDialog (QDialog ):
|
||||
"""Dialog to display and select favorite artists."""
|
||||
def __init__ (self ,parent_app ,cookies_config ):
|
||||
super ().__init__ (parent_app )
|
||||
self .parent_app =parent_app
|
||||
self .cookies_config =cookies_config
|
||||
self .all_fetched_artists =[]
|
||||
|
||||
app_icon =get_app_icon_object ()
|
||||
if not app_icon .isNull ():
|
||||
self .setWindowIcon (app_icon )
|
||||
self .selected_artist_urls =[]
|
||||
|
||||
self .setModal (True )
|
||||
self .setMinimumSize (500 ,500 )
|
||||
|
||||
self ._init_ui ()
|
||||
self ._fetch_favorite_artists ()
|
||||
|
||||
def _get_domain_for_service(self, service_name):
|
||||
service_lower = service_name.lower()
|
||||
coomer_primary_services = {'onlyfans', 'fansly', 'manyvids', 'candfans'}
|
||||
if service_lower in coomer_primary_services:
|
||||
return "coomer.st"
|
||||
else:
|
||||
return "kemono.cr"
|
||||
|
||||
def _tr (self ,key ,default_text =""):
|
||||
"""Helper to get translation based on current app language."""
|
||||
if callable (get_translation )and self .parent_app :
|
||||
return get_translation (self .parent_app .current_selected_language ,key ,default_text )
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui (self ):
|
||||
self .setWindowTitle (self ._tr ("fav_artists_dialog_title","Favorite Artists"))
|
||||
self .status_label .setText (self ._tr ("fav_artists_loading_status","Loading favorite artists..."))
|
||||
self .search_input .setPlaceholderText (self ._tr ("fav_artists_search_placeholder","Search artists..."))
|
||||
self .select_all_button .setText (self ._tr ("fav_artists_select_all_button","Select All"))
|
||||
self .deselect_all_button .setText (self ._tr ("fav_artists_deselect_all_button","Deselect All"))
|
||||
self .download_button .setText (self ._tr ("fav_artists_download_selected_button","Download Selected"))
|
||||
self .cancel_button .setText (self ._tr ("fav_artists_cancel_button","Cancel"))
|
||||
|
||||
def _init_ui (self ):
|
||||
main_layout =QVBoxLayout (self )
|
||||
|
||||
self .status_label =QLabel ()
|
||||
self .status_label .setAlignment (Qt .AlignCenter )
|
||||
main_layout .addWidget (self .status_label )
|
||||
|
||||
self .search_input =QLineEdit ()
|
||||
self .search_input .textChanged .connect (self ._filter_artist_list_display )
|
||||
main_layout .addWidget (self .search_input )
|
||||
|
||||
|
||||
self .artist_list_widget =QListWidget ()
|
||||
self .artist_list_widget .setStyleSheet ("""
|
||||
QListWidget::item {
|
||||
border-bottom: 1px solid #4A4A4A; /* Slightly softer line */
|
||||
padding-top: 4px;
|
||||
padding-bottom: 4px;
|
||||
}""")
|
||||
main_layout .addWidget (self .artist_list_widget )
|
||||
self .artist_list_widget .setAlternatingRowColors (True )
|
||||
self .search_input .setVisible (False )
|
||||
self .artist_list_widget .setVisible (False )
|
||||
|
||||
combined_buttons_layout =QHBoxLayout ()
|
||||
|
||||
self .select_all_button =QPushButton ()
|
||||
self .select_all_button .clicked .connect (self ._select_all_items )
|
||||
combined_buttons_layout .addWidget (self .select_all_button )
|
||||
|
||||
self .deselect_all_button =QPushButton ()
|
||||
self .deselect_all_button .clicked .connect (self ._deselect_all_items )
|
||||
combined_buttons_layout .addWidget (self .deselect_all_button )
|
||||
|
||||
|
||||
self .download_button =QPushButton ()
|
||||
self .download_button .clicked .connect (self ._accept_selection_action )
|
||||
self .download_button .setEnabled (False )
|
||||
self .download_button .setDefault (True )
|
||||
combined_buttons_layout .addWidget (self .download_button )
|
||||
|
||||
self .cancel_button =QPushButton ()
|
||||
self .cancel_button .clicked .connect (self .reject )
|
||||
combined_buttons_layout .addWidget (self .cancel_button )
|
||||
|
||||
combined_buttons_layout .addStretch (1 )
|
||||
main_layout .addLayout (combined_buttons_layout )
|
||||
|
||||
self ._retranslate_ui ()
|
||||
if hasattr (self .parent_app ,'get_dark_theme')and self .parent_app .current_theme =="dark":
|
||||
self .setStyleSheet (self .parent_app .get_dark_theme ())
|
||||
|
||||
|
||||
def _logger (self ,message ):
|
||||
"""Helper to log messages, either to parent app or console."""
|
||||
if hasattr (self .parent_app ,'log_signal')and self .parent_app .log_signal :
|
||||
self .parent_app .log_signal .emit (f"[FavArtistsDialog] {message }")
|
||||
else :
|
||||
print (f"[FavArtistsDialog] {message }")
|
||||
|
||||
def _show_content_elements (self ,show ):
|
||||
"""Helper to show/hide content-related widgets."""
|
||||
self .search_input .setVisible (show )
|
||||
self .artist_list_widget .setVisible (show )
|
||||
|
||||
def _fetch_favorite_artists (self ):
|
||||
# --- FIX: Use cloudscraper and add proper headers ---
|
||||
scraper = cloudscraper.create_scraper()
|
||||
# --- END FIX ---
|
||||
|
||||
if self.cookies_config['use_cookie']:
|
||||
kemono_cookies = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain="kemono.cr"
|
||||
)
|
||||
if not kemono_cookies:
|
||||
self._logger("No cookies for kemono.cr, trying fallback kemono.su...")
|
||||
kemono_cookies = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain="kemono.su"
|
||||
)
|
||||
|
||||
coomer_cookies = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain="coomer.st"
|
||||
)
|
||||
if not coomer_cookies:
|
||||
self._logger("No cookies for coomer.st, trying fallback coomer.su...")
|
||||
coomer_cookies = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain="coomer.su"
|
||||
)
|
||||
|
||||
if not kemono_cookies and not coomer_cookies:
|
||||
self.status_label.setText(self._tr("fav_artists_cookies_required_status", "Error: Cookies enabled but could not be loaded for any source."))
|
||||
self._logger("Error: Cookies enabled but no valid cookies were loaded. Showing help dialog.")
|
||||
cookie_help_dialog = CookieHelpDialog(self.parent_app, self)
|
||||
cookie_help_dialog.exec_()
|
||||
self.download_button.setEnabled(False)
|
||||
return
|
||||
|
||||
self .all_fetched_artists =[]
|
||||
fetched_any_successfully =False
|
||||
errors_occurred =[]
|
||||
any_cookies_loaded_successfully_for_any_source =False
|
||||
|
||||
api_sources = [
|
||||
{"name": "Kemono.cr", "url": "https://kemono.cr/api/v1/account/favorites?type=artist", "domain": "kemono.cr"},
|
||||
{"name": "Coomer.st", "url": "https://coomer.st/api/v1/account/favorites?type=artist", "domain": "coomer.st"}
|
||||
]
|
||||
|
||||
for source in api_sources :
|
||||
self ._logger (f"Attempting to fetch favorite artists from: {source ['name']} ({source ['url']})")
|
||||
self .status_label .setText (self ._tr ("fav_artists_loading_from_source_status","⏳ Loading favorites from {source_name}...").format (source_name =source ['name']))
|
||||
QCoreApplication .processEvents ()
|
||||
|
||||
cookies_dict_for_source = None
|
||||
if self.cookies_config['use_cookie']:
|
||||
primary_domain = source['domain']
|
||||
fallback_domain = "kemono.su" if "kemono" in primary_domain else "coomer.su"
|
||||
|
||||
cookies_dict_for_source = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain=primary_domain
|
||||
)
|
||||
if not cookies_dict_for_source:
|
||||
self._logger(f"Warning ({source['name']}): No cookies for '{primary_domain}'. Trying fallback '{fallback_domain}'...")
|
||||
cookies_dict_for_source = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain=fallback_domain
|
||||
)
|
||||
|
||||
if cookies_dict_for_source:
|
||||
any_cookies_loaded_successfully_for_any_source = True
|
||||
else:
|
||||
self._logger(f"Warning ({source['name']}): Cookies enabled but not loaded for this source. Fetch may fail.")
|
||||
try :
|
||||
# --- FIX: Add Referer and Accept headers ---
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Referer': f"https://{source['domain']}/favorites",
|
||||
'Accept': 'text/css'
|
||||
}
|
||||
# --- END FIX ---
|
||||
|
||||
# --- FIX: Use scraper instead of requests ---
|
||||
response = scraper.get(source['url'], headers=headers, cookies=cookies_dict_for_source, timeout=20)
|
||||
# --- END FIX ---
|
||||
|
||||
response .raise_for_status ()
|
||||
artists_data_from_api =response .json ()
|
||||
|
||||
if not isinstance (artists_data_from_api ,list ):
|
||||
error_msg =f"Error ({source ['name']}): API did not return a list of artists (got {type (artists_data_from_api )})."
|
||||
self ._logger (error_msg )
|
||||
errors_occurred .append (error_msg )
|
||||
continue
|
||||
|
||||
processed_artists_from_source =0
|
||||
for artist_entry in artists_data_from_api :
|
||||
artist_id =artist_entry .get ("id")
|
||||
artist_name =html .unescape (artist_entry .get ("name","Unknown Artist").strip ())
|
||||
artist_service_platform =artist_entry .get ("service")
|
||||
|
||||
if artist_id and artist_name and artist_service_platform :
|
||||
artist_page_domain =self ._get_domain_for_service (artist_service_platform )
|
||||
full_url =f"https://{artist_page_domain }/{artist_service_platform }/user/{artist_id }"
|
||||
|
||||
self .all_fetched_artists .append ({
|
||||
'name':artist_name ,
|
||||
'url':full_url ,
|
||||
'service':artist_service_platform ,
|
||||
'id':artist_id ,
|
||||
'_source_api':source ['name']
|
||||
})
|
||||
processed_artists_from_source +=1
|
||||
else :
|
||||
self ._logger (f"Warning ({source ['name']}): Skipping favorite artist entry due to missing data: {artist_entry }")
|
||||
|
||||
if processed_artists_from_source >0 :
|
||||
fetched_any_successfully =True
|
||||
self ._logger (f"Fetched {processed_artists_from_source } artists from {source ['name']}.")
|
||||
|
||||
except Exception as e :
|
||||
error_msg =f"Error fetching favorites from {source ['name']}: {e }"
|
||||
self ._logger (error_msg )
|
||||
errors_occurred .append (error_msg )
|
||||
|
||||
if self .cookies_config ['use_cookie']and not any_cookies_loaded_successfully_for_any_source :
|
||||
self .status_label .setText (self ._tr ("fav_artists_cookies_required_status","Error: Cookies enabled but could not be loaded for any source."))
|
||||
self ._logger ("Error: Cookies enabled but no cookies loaded for any source. Showing help dialog.")
|
||||
cookie_help_dialog = CookieHelpDialog(self.parent_app, self)
|
||||
cookie_help_dialog .exec_ ()
|
||||
self .download_button .setEnabled (False )
|
||||
if not fetched_any_successfully :
|
||||
errors_occurred .append ("Cookies enabled but could not be loaded for any API source.")
|
||||
|
||||
unique_artists_map ={}
|
||||
for artist in self .all_fetched_artists :
|
||||
key =(artist ['service'].lower (),str (artist ['id']).lower ())
|
||||
if key not in unique_artists_map :
|
||||
unique_artists_map [key ]=artist
|
||||
self .all_fetched_artists =list (unique_artists_map .values ())
|
||||
|
||||
self .all_fetched_artists .sort (key =lambda x :x ['name'].lower ())
|
||||
self ._populate_artist_list_widget ()
|
||||
|
||||
if fetched_any_successfully and self .all_fetched_artists :
|
||||
self .status_label .setText (self ._tr ("fav_artists_found_status","Found {count} total favorite artist(s).").format (count =len (self .all_fetched_artists )))
|
||||
self ._show_content_elements (True )
|
||||
self .download_button .setEnabled (True )
|
||||
elif not fetched_any_successfully and not errors_occurred :
|
||||
self .status_label .setText (self ._tr ("fav_artists_none_found_status","No favorite artists found on Kemono or Coomer."))
|
||||
self ._show_content_elements (False )
|
||||
self .download_button .setEnabled (False )
|
||||
else :
|
||||
final_error_message =self ._tr ("fav_artists_failed_status","Failed to fetch favorites.")
|
||||
if errors_occurred :
|
||||
final_error_message +=" Errors: "+"; ".join (errors_occurred )
|
||||
self .status_label .setText (final_error_message )
|
||||
self ._show_content_elements (False )
|
||||
self .download_button .setEnabled (False )
|
||||
if fetched_any_successfully and not self .all_fetched_artists :
|
||||
self .status_label .setText (self ._tr ("fav_artists_no_favorites_after_processing","No favorite artists found after processing."))
|
||||
|
||||
def _populate_artist_list_widget (self ,artists_to_display =None ):
|
||||
self .artist_list_widget .clear ()
|
||||
source_list =artists_to_display if artists_to_display is not None else self .all_fetched_artists
|
||||
for artist_data in source_list :
|
||||
item =QListWidgetItem (f"{artist_data ['name']} ({artist_data .get ('service','N/A').capitalize ()})")
|
||||
item .setFlags (item .flags ()|Qt .ItemIsUserCheckable )
|
||||
item .setCheckState (Qt .Unchecked )
|
||||
item .setData (Qt .UserRole ,artist_data )
|
||||
self .artist_list_widget .addItem (item )
|
||||
|
||||
def _filter_artist_list_display (self ):
|
||||
search_text =self .search_input .text ().lower ().strip ()
|
||||
if not search_text :
|
||||
self ._populate_artist_list_widget ()
|
||||
return
|
||||
|
||||
filtered_artists =[
|
||||
artist for artist in self .all_fetched_artists
|
||||
if search_text in artist ['name'].lower ()or search_text in artist ['url'].lower ()
|
||||
]
|
||||
self ._populate_artist_list_widget (filtered_artists )
|
||||
|
||||
def _select_all_items (self ):
|
||||
for i in range (self .artist_list_widget .count ()):
|
||||
self .artist_list_widget .item (i ).setCheckState (Qt .Checked )
|
||||
|
||||
def _deselect_all_items (self ):
|
||||
for i in range (self .artist_list_widget .count ()):
|
||||
self .artist_list_widget .item (i ).setCheckState (Qt .Unchecked )
|
||||
|
||||
def _accept_selection_action (self ):
|
||||
self .selected_artists_data =[]
|
||||
for i in range (self .artist_list_widget .count ()):
|
||||
item =self .artist_list_widget .item (i )
|
||||
if item .checkState ()==Qt .Checked :
|
||||
self .selected_artists_data .append (item .data (Qt .UserRole ))
|
||||
|
||||
if not self .selected_artists_data :
|
||||
QMessageBox .information (self ,"No Selection","Please select at least one artist to download.")
|
||||
return
|
||||
self .accept ()
|
||||
|
||||
def get_selected_artists (self ):
|
||||
return self .selected_artists_data
|
||||
632
src/ui/dialogs/FavoritePostsDialog.py
Normal file
@@ -0,0 +1,632 @@
|
||||
import html
|
||||
import os
|
||||
import sys
|
||||
import threading
|
||||
import time
|
||||
import traceback
|
||||
import json
|
||||
import re
|
||||
from collections import defaultdict
|
||||
import cloudscraper # MODIFIED: Import cloudscraper
|
||||
from PyQt5.QtCore import QCoreApplication, Qt, pyqtSignal, QThread
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QLineEdit, QListWidget,
|
||||
QListWidgetItem, QMessageBox, QPushButton, QVBoxLayout, QProgressBar,
|
||||
QWidget, QCheckBox
|
||||
)
|
||||
from ...i18n.translator import get_translation
|
||||
from ..assets import get_app_icon_object
|
||||
from ...utils.network_utils import prepare_cookies_for_request
|
||||
from .CookieHelpDialog import CookieHelpDialog
|
||||
from ...core.api_client import download_from_api
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class FavoritePostsFetcherThread (QThread ):
|
||||
"""Worker thread to fetch favorite posts and creator names."""
|
||||
status_update =pyqtSignal (str )
|
||||
progress_bar_update =pyqtSignal (int ,int )
|
||||
finished =pyqtSignal (list ,str )
|
||||
|
||||
def __init__ (self ,cookies_config ,parent_logger_func ,target_domain_preference =None ):
|
||||
super ().__init__ ()
|
||||
self .cookies_config =cookies_config
|
||||
self .parent_logger_func =parent_logger_func
|
||||
self .target_domain_preference =target_domain_preference
|
||||
self .cancellation_event =threading .Event ()
|
||||
self .error_key_map ={
|
||||
"kemono.cr":"kemono_su",
|
||||
"coomer.st":"coomer_su"
|
||||
}
|
||||
|
||||
def _logger (self ,message ):
|
||||
self .parent_logger_func (f"[FavPostsFetcherThread] {message }")
|
||||
|
||||
def run(self):
|
||||
# --- FIX: Use cloudscraper and add proper headers ---
|
||||
scraper = cloudscraper.create_scraper()
|
||||
# --- END FIX ---
|
||||
|
||||
all_fetched_posts_temp = []
|
||||
error_messages_for_summary = []
|
||||
fetched_any_successfully = False
|
||||
any_cookies_loaded_successfully_for_any_source = False
|
||||
|
||||
self.status_update.emit("key_fetching_fav_post_list_init")
|
||||
self.progress_bar_update.emit(0, 0)
|
||||
|
||||
api_sources = [
|
||||
{"name": "Kemono.cr", "url": "https://kemono.cr/api/v1/account/favorites?type=post", "domain": "kemono.cr"},
|
||||
{"name": "Coomer.st", "url": "https://coomer.st/api/v1/account/favorites?type=post", "domain": "coomer.st"}
|
||||
]
|
||||
|
||||
api_sources_to_try =[]
|
||||
if self .target_domain_preference :
|
||||
self ._logger (f"Targeting specific domain for favorites: {self .target_domain_preference }")
|
||||
for source_def in api_sources :
|
||||
if source_def ["domain"]==self .target_domain_preference :
|
||||
api_sources_to_try .append (source_def )
|
||||
break
|
||||
if not api_sources_to_try :
|
||||
self ._logger (f"Warning: Preferred domain '{self .target_domain_preference }' not a recognized API source. Fetching from all.")
|
||||
api_sources_to_try =api_sources
|
||||
else :
|
||||
self ._logger ("No specific domain preference, or both domains have cookies. Will attempt to fetch from all sources.")
|
||||
api_sources_to_try =api_sources
|
||||
|
||||
for source in api_sources_to_try :
|
||||
if self .cancellation_event .is_set ():
|
||||
self .finished .emit ([],"KEY_FETCH_CANCELLED_DURING")
|
||||
return
|
||||
cookies_dict_for_source = None
|
||||
if self.cookies_config['use_cookie']:
|
||||
primary_domain = source['domain']
|
||||
fallback_domain = "kemono.su" if "kemono" in primary_domain else "coomer.su"
|
||||
|
||||
cookies_dict_for_source = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain=primary_domain
|
||||
)
|
||||
|
||||
if not cookies_dict_for_source and fallback_domain:
|
||||
self._logger(f"Warning ({source['name']}): No cookies for '{primary_domain}'. Trying fallback '{fallback_domain}'...")
|
||||
cookies_dict_for_source = prepare_cookies_for_request(
|
||||
True, self.cookies_config['cookie_text'], self.cookies_config['selected_cookie_file'],
|
||||
self.cookies_config['app_base_dir'], self._logger, target_domain=fallback_domain
|
||||
)
|
||||
|
||||
if cookies_dict_for_source:
|
||||
any_cookies_loaded_successfully_for_any_source = True
|
||||
else:
|
||||
self._logger(f"Warning ({source['name']}): Cookies enabled but could not be loaded for this domain. Fetch might fail if cookies are required.")
|
||||
|
||||
self ._logger (f"Attempting to fetch favorite posts from: {source ['name']} ({source ['url']})")
|
||||
source_key_part =self .error_key_map .get (source ['name'],source ['name'].lower ().replace ('.','_'))
|
||||
self .status_update .emit (f"key_fetching_from_source_{source_key_part }")
|
||||
QCoreApplication .processEvents ()
|
||||
|
||||
try :
|
||||
# --- FIX: Add Referer and Accept headers ---
|
||||
headers = {
|
||||
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36',
|
||||
'Referer': f"https://{source['domain']}/favorites",
|
||||
'Accept': 'text/css'
|
||||
}
|
||||
# --- END FIX ---
|
||||
|
||||
# --- FIX: Use scraper instead of requests ---
|
||||
response = scraper.get(source['url'], headers=headers, cookies=cookies_dict_for_source, timeout=20)
|
||||
# --- END FIX ---
|
||||
|
||||
response .raise_for_status ()
|
||||
posts_data_from_api =response .json ()
|
||||
|
||||
if not isinstance (posts_data_from_api ,list ):
|
||||
err_detail =f"Error ({source ['name']}): API did not return a list of posts (got {type (posts_data_from_api )})."
|
||||
self ._logger (err_detail )
|
||||
error_messages_for_summary .append (err_detail )
|
||||
continue
|
||||
|
||||
processed_posts_from_source =0
|
||||
for post_entry in posts_data_from_api :
|
||||
post_id =post_entry .get ("id")
|
||||
post_title =html .unescape (post_entry .get ("title","Untitled Post").strip ())
|
||||
service =post_entry .get ("service")
|
||||
creator_id =post_entry .get ("user")
|
||||
added_date_str =post_entry .get ("added",post_entry .get ("published",""))
|
||||
|
||||
if post_id and post_title and service and creator_id :
|
||||
all_fetched_posts_temp .append ({
|
||||
'post_id':post_id ,'title':post_title ,'service':service ,
|
||||
'creator_id':creator_id ,'added_date':added_date_str ,
|
||||
'_source_api':source ['name']
|
||||
})
|
||||
processed_posts_from_source +=1
|
||||
else :
|
||||
self ._logger (f"Warning ({source ['name']}): Skipping favorite post entry due to missing data: {post_entry }")
|
||||
|
||||
if processed_posts_from_source >0 :
|
||||
fetched_any_successfully =True
|
||||
self ._logger (f"Fetched {processed_posts_from_source } posts from {source ['name']}.")
|
||||
|
||||
except Exception as e :
|
||||
err_detail =f"Error fetching favorite posts from {source ['name']}: {e }"
|
||||
self ._logger (err_detail )
|
||||
error_messages_for_summary .append (err_detail )
|
||||
if hasattr(e, 'response') and e.response is not None and e.response.status_code == 401:
|
||||
self .finished .emit ([],"KEY_AUTH_FAILED")
|
||||
self ._logger (f"Authorization failed for {source ['name']}, emitting KEY_AUTH_FAILED.")
|
||||
return
|
||||
|
||||
if self .cancellation_event .is_set ():
|
||||
self .finished .emit ([],"KEY_FETCH_CANCELLED_AFTER")
|
||||
return
|
||||
|
||||
if self .cookies_config ['use_cookie']and not any_cookies_loaded_successfully_for_any_source :
|
||||
if self .target_domain_preference and not any_cookies_loaded_successfully_for_any_source :
|
||||
domain_key_part =self .error_key_map .get (self .target_domain_preference ,self .target_domain_preference .lower ().replace ('.','_'))
|
||||
self .finished .emit ([],f"KEY_COOKIES_REQUIRED_BUT_NOT_FOUND_FOR_DOMAIN_{domain_key_part }")
|
||||
return
|
||||
self .finished .emit ([],"KEY_COOKIES_REQUIRED_BUT_NOT_FOUND_GENERIC")
|
||||
return
|
||||
|
||||
unique_posts_map ={}
|
||||
for post in all_fetched_posts_temp :
|
||||
key =(post ['service'].lower (),str (post ['creator_id']).lower (),str (post ['post_id']).lower ())
|
||||
if key not in unique_posts_map :
|
||||
unique_posts_map [key ]=post
|
||||
all_fetched_posts_temp =list (unique_posts_map .values ())
|
||||
|
||||
all_fetched_posts_temp .sort (key =lambda x :(x .get ('_source_api','').lower (),x .get ('service','').lower (),str (x .get ('creator_id','')).lower (),(x .get ('added_date')or '')),reverse =False )
|
||||
|
||||
if error_messages_for_summary :
|
||||
error_summary_str ="; ".join (error_messages_for_summary )
|
||||
if not fetched_any_successfully :
|
||||
self .finished .emit ([],f"KEY_FETCH_FAILED_GENERIC_{error_summary_str [:50 ]}")
|
||||
else :
|
||||
self .finished .emit (all_fetched_posts_temp ,f"KEY_FETCH_PARTIAL_SUCCESS_{error_summary_str [:50 ]}")
|
||||
elif not all_fetched_posts_temp and not fetched_any_successfully and not self .target_domain_preference :
|
||||
self .finished .emit ([],"KEY_NO_FAVORITES_FOUND_ALL_PLATFORMS")
|
||||
else :
|
||||
self .finished .emit (all_fetched_posts_temp ,"KEY_FETCH_SUCCESS")
|
||||
|
||||
class PostListItemWidget (QWidget ):
|
||||
"""Custom widget for displaying a single post in the FavoritePostsDialog list."""
|
||||
def __init__ (self ,post_data_dict ,parent_dialog_ref ,parent =None ):
|
||||
super ().__init__ (parent )
|
||||
self .post_data =post_data_dict
|
||||
self .parent_dialog =parent_dialog_ref
|
||||
|
||||
self .layout =QHBoxLayout (self )
|
||||
self .layout .setContentsMargins (5 ,3 ,5 ,3 )
|
||||
self .layout .setSpacing (10 )
|
||||
|
||||
self .checkbox =QCheckBox ()
|
||||
self .layout .addWidget (self .checkbox )
|
||||
|
||||
self .info_label =QLabel ()
|
||||
self .info_label .setWordWrap (True )
|
||||
self .info_label .setTextFormat (Qt .RichText )
|
||||
self .layout .addWidget (self .info_label ,1 )
|
||||
|
||||
self ._setup_display_text ()
|
||||
def _setup_display_text (self ):
|
||||
suffix_plain =self .post_data .get ('suffix_for_display',"")
|
||||
title_plain =self .post_data .get ('title','Untitled Post')
|
||||
escaped_suffix =html .escape (suffix_plain )
|
||||
escaped_title =html .escape (title_plain )
|
||||
p_style_paragraph ="font-size:10.5pt; margin:0; padding:0;"
|
||||
title_span_style ="font-weight:bold; color:#E0E0E0;"
|
||||
suffix_span_style ="color:#999999; font-weight:normal; font-size:9.5pt;"
|
||||
|
||||
if escaped_suffix :
|
||||
display_html_content =f"<p style='{p_style_paragraph }'><span style='{title_span_style }'>{escaped_title }</span><span style='{suffix_span_style }'>{escaped_suffix }</span></p>"
|
||||
else :
|
||||
display_html_content =f"<p style='{p_style_paragraph }'><span style='{title_span_style }'>{escaped_title }</span></p>"
|
||||
|
||||
self .info_label .setText (display_html_content )
|
||||
|
||||
def isChecked (self ):return self .checkbox .isChecked ()
|
||||
def setCheckState (self ,state ):self .checkbox .setCheckState (state )
|
||||
def get_post_data (self ):return self .post_data
|
||||
|
||||
class FavoritePostsDialog (QDialog ):
|
||||
"""Dialog to display and select favorite posts."""
|
||||
def __init__ (self ,parent_app ,cookies_config ,known_names_list_ref ,target_domain_preference =None ):
|
||||
super ().__init__ (parent_app )
|
||||
self .parent_app =parent_app
|
||||
self .cookies_config =cookies_config
|
||||
self .all_fetched_posts =[]
|
||||
self .selected_posts_data =[]
|
||||
self .known_names_list_ref =known_names_list_ref
|
||||
self .target_domain_preference_for_this_fetch =target_domain_preference
|
||||
self .creator_name_cache ={}
|
||||
self .displayable_grouped_posts ={}
|
||||
self .fetcher_thread =None
|
||||
|
||||
app_icon =get_app_icon_object ()
|
||||
if not app_icon .isNull ():
|
||||
self .setWindowIcon (app_icon )
|
||||
|
||||
self .setModal (True )
|
||||
self .setMinimumSize (600 ,600 )
|
||||
if hasattr (self .parent_app ,'get_dark_theme'):
|
||||
self .setStyleSheet (self .parent_app .get_dark_theme ())
|
||||
|
||||
self ._init_ui ()
|
||||
self ._load_creator_names_from_file ()
|
||||
self ._retranslate_ui ()
|
||||
self ._start_fetching_favorite_posts ()
|
||||
|
||||
def _update_status_label_from_key (self ,status_key ):
|
||||
"""Translates a status key and updates the status label."""
|
||||
|
||||
translated_status =self ._tr (status_key .lower (),status_key )
|
||||
self .status_label .setText (translated_status )
|
||||
|
||||
def _init_ui (self ):
|
||||
main_layout =QVBoxLayout (self )
|
||||
|
||||
self .status_label =QLabel ()
|
||||
self .status_label .setAlignment (Qt .AlignCenter )
|
||||
main_layout .addWidget (self .status_label )
|
||||
|
||||
self .progress_bar =QProgressBar ()
|
||||
self .progress_bar .setTextVisible (False )
|
||||
self .progress_bar .setVisible (False )
|
||||
main_layout .addWidget (self .progress_bar )
|
||||
|
||||
self .search_input =QLineEdit ()
|
||||
|
||||
self .search_input .textChanged .connect (self ._filter_post_list_display )
|
||||
main_layout .addWidget (self .search_input )
|
||||
|
||||
self .post_list_widget =QListWidget ()
|
||||
self .post_list_widget .setStyleSheet ("""
|
||||
QListWidget::item {
|
||||
border-bottom: 1px solid #4A4A4A;
|
||||
padding-top: 4px;
|
||||
padding-bottom: 4px;
|
||||
}""")
|
||||
self .post_list_widget .setAlternatingRowColors (True )
|
||||
main_layout .addWidget (self .post_list_widget )
|
||||
|
||||
combined_buttons_layout =QHBoxLayout ()
|
||||
self .select_all_button =QPushButton ()
|
||||
self .select_all_button .clicked .connect (self ._select_all_items )
|
||||
combined_buttons_layout .addWidget (self .select_all_button )
|
||||
|
||||
self .deselect_all_button =QPushButton ()
|
||||
self .deselect_all_button .clicked .connect (self ._deselect_all_items )
|
||||
combined_buttons_layout .addWidget (self .deselect_all_button )
|
||||
|
||||
self .download_button =QPushButton ()
|
||||
self .download_button .clicked .connect (self ._accept_selection_action )
|
||||
self .download_button .setEnabled (False )
|
||||
self .download_button .setDefault (True )
|
||||
combined_buttons_layout .addWidget (self .download_button )
|
||||
|
||||
self .cancel_button =QPushButton ()
|
||||
self .cancel_button .clicked .connect (self .reject )
|
||||
combined_buttons_layout .addWidget (self .cancel_button )
|
||||
combined_buttons_layout .addStretch (1 )
|
||||
main_layout .addLayout (combined_buttons_layout )
|
||||
|
||||
def _tr (self ,key ,default_text =""):
|
||||
"""Helper to get translation based on current app language."""
|
||||
if callable (get_translation )and self .parent_app :
|
||||
return get_translation (self .parent_app .current_selected_language ,key ,default_text )
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui (self ):
|
||||
self .setWindowTitle (self ._tr ("fav_posts_dialog_title","Favorite Posts"))
|
||||
self .status_label .setText (self ._tr ("fav_posts_loading_status","Loading favorite posts..."))
|
||||
self .search_input .setPlaceholderText (self ._tr ("fav_posts_search_placeholder","Search posts (title, creator name, ID, service)..."))
|
||||
self .select_all_button .setText (self ._tr ("fav_posts_select_all_button","Select All"))
|
||||
self .deselect_all_button .setText (self ._tr ("fav_posts_deselect_all_button","Deselect All"))
|
||||
self .download_button .setText (self ._tr ("fav_posts_download_selected_button","Download Selected"))
|
||||
self .cancel_button .setText (self ._tr ("fav_posts_cancel_button","Cancel"))
|
||||
|
||||
def _logger (self ,message ):
|
||||
if hasattr (self .parent_app ,'log_signal')and self .parent_app .log_signal :
|
||||
self .parent_app .log_signal .emit (f"[FavPostsDialog] {message }")
|
||||
else :
|
||||
print (f"[FavPostsDialog] {message }")
|
||||
|
||||
def _load_creator_names_from_file (self ):
|
||||
"""Loads creator id-name-service mappings from creators.txt."""
|
||||
self ._logger ("Attempting to load creators.json for Favorite Posts Dialog.")
|
||||
|
||||
if getattr (sys ,'frozen',False )and hasattr (sys ,'_MEIPASS'):
|
||||
base_path_for_creators =sys ._MEIPASS
|
||||
self ._logger (f" Running bundled. Using _MEIPASS: {base_path_for_creators }")
|
||||
else :
|
||||
base_path_for_creators =self .parent_app .app_base_dir
|
||||
self ._logger (f" Not bundled or _MEIPASS unavailable. Using app_base_dir: {base_path_for_creators }")
|
||||
creators_file_path = os.path.join(base_path_for_creators, "data", "creators.json")
|
||||
self ._logger (f"Full path to creators.json: {creators_file_path }")
|
||||
|
||||
if not os .path .exists (creators_file_path ):
|
||||
self ._logger (f"Warning: 'creators.json' not found at {creators_file_path }. Creator names will not be displayed.")
|
||||
return
|
||||
|
||||
try :
|
||||
with open (creators_file_path ,'r',encoding ='utf-8')as f :
|
||||
loaded_data =json .load (f )
|
||||
|
||||
if isinstance (loaded_data ,list )and len (loaded_data )>0 and isinstance (loaded_data [0 ],list ):
|
||||
creators_list =loaded_data [0 ]
|
||||
elif isinstance (loaded_data ,list )and all (isinstance (item ,dict )for item in loaded_data ):
|
||||
creators_list =loaded_data
|
||||
else :
|
||||
self ._logger (f"Warning: 'creators.json' has an unexpected format. Expected a list of lists or a flat list of creator objects.")
|
||||
return
|
||||
|
||||
for creator_data in creators_list :
|
||||
creator_id =creator_data .get ("id")
|
||||
name =creator_data .get ("name")
|
||||
service =creator_data .get ("service")
|
||||
if creator_id and name and service :
|
||||
self .creator_name_cache [(service .lower (),str (creator_id ))]=name
|
||||
self ._logger (f"Successfully loaded {len (self .creator_name_cache )} creator names from 'creators.json'.")
|
||||
except Exception as e :
|
||||
self ._logger (f"Error loading 'creators.json': {e }")
|
||||
|
||||
def _start_fetching_favorite_posts (self ):
|
||||
self .download_button .setEnabled (False )
|
||||
self .status_label .setText ("Initializing favorite posts fetch...")
|
||||
|
||||
self .fetcher_thread =FavoritePostsFetcherThread (
|
||||
self .cookies_config ,
|
||||
self .parent_app .log_signal .emit ,
|
||||
target_domain_preference =self .target_domain_preference_for_this_fetch
|
||||
)
|
||||
self .fetcher_thread .status_update .connect (self ._update_status_label_from_key )
|
||||
self .fetcher_thread .finished .connect (self ._on_fetch_completed )
|
||||
self .fetcher_thread .progress_bar_update .connect (self ._set_progress_bar_value )
|
||||
self .progress_bar .setVisible (True )
|
||||
self .fetcher_thread .start ()
|
||||
|
||||
def _set_progress_bar_value (self ,value ,maximum ):
|
||||
if maximum ==0 :
|
||||
self .progress_bar .setRange (0 ,0 )
|
||||
self .progress_bar .setValue (0 )
|
||||
else :
|
||||
self .progress_bar .setRange (0 ,maximum )
|
||||
self .progress_bar .setValue (value )
|
||||
|
||||
def _on_fetch_completed (self ,fetched_posts_list ,status_key ):
|
||||
self .progress_bar .setVisible (False )
|
||||
|
||||
proceed_to_display_posts =False
|
||||
show_error_message_box =False
|
||||
message_box_title_key ="fav_posts_fetch_error_title"
|
||||
message_box_text_key ="fav_posts_fetch_error_message"
|
||||
message_box_params ={'domain':self .target_domain_preference_for_this_fetch or "platform",'error_message_part':""}
|
||||
status_label_text_key =None
|
||||
|
||||
if status_key =="KEY_FETCH_SUCCESS":
|
||||
proceed_to_display_posts =True
|
||||
elif status_key and status_key .startswith ("KEY_FETCH_PARTIAL_SUCCESS_")and fetched_posts_list :
|
||||
displayable_detail =status_key .replace ("KEY_FETCH_PARTIAL_SUCCESS_","").replace ("_"," ")
|
||||
self ._logger (f"Partial success with posts: {status_key } -> {displayable_detail }")
|
||||
|
||||
|
||||
proceed_to_display_posts =True
|
||||
elif status_key :
|
||||
specific_domain_msg_part =f" for {self .target_domain_preference_for_this_fetch }"if self .target_domain_preference_for_this_fetch else ""
|
||||
|
||||
if status_key .startswith ("KEY_COOKIES_REQUIRED_BUT_NOT_FOUND_FOR_DOMAIN_")or status_key =="KEY_COOKIES_REQUIRED_BUT_NOT_FOUND_GENERIC":
|
||||
status_label_text_key ="fav_posts_cookies_required_error"
|
||||
self ._logger (f"Cookie error: {status_key }. Showing help dialog.")
|
||||
cookie_help_dialog = CookieHelpDialog(self.parent_app, self)
|
||||
cookie_help_dialog .exec_ ()
|
||||
elif status_key =="KEY_AUTH_FAILED":
|
||||
status_label_text_key ="fav_posts_auth_failed_title"
|
||||
self ._logger (f"Auth error: {status_key }. Showing help dialog.")
|
||||
QMessageBox .warning (self ,self ._tr ("fav_posts_auth_failed_title","Authorization Failed (Posts)"),
|
||||
self ._tr ("fav_posts_auth_failed_message_generic","...").format (domain_specific_part =specific_domain_msg_part ))
|
||||
cookie_help_dialog = CookieHelpDialog(self.parent_app, self)
|
||||
cookie_help_dialog .exec_ ()
|
||||
elif status_key =="KEY_NO_FAVORITES_FOUND_ALL_PLATFORMS":
|
||||
status_label_text_key ="fav_posts_no_posts_found_status"
|
||||
self ._logger (status_key )
|
||||
elif status_key .startswith ("KEY_FETCH_CANCELLED"):
|
||||
status_label_text_key ="fav_posts_fetch_cancelled_status"
|
||||
self ._logger (status_key )
|
||||
else :
|
||||
displayable_error_detail =status_key
|
||||
if status_key .startswith ("KEY_FETCH_FAILED_GENERIC_"):
|
||||
displayable_error_detail =status_key .replace ("KEY_FETCH_FAILED_GENERIC_","").replace ("_"," ")
|
||||
elif status_key .startswith ("KEY_FETCH_PARTIAL_SUCCESS_"):
|
||||
displayable_error_detail =status_key .replace ("KEY_FETCH_PARTIAL_SUCCESS_","Partial success but no posts: ").replace ("_"," ")
|
||||
|
||||
message_box_params ['error_message_part']=f":\n\n{displayable_error_detail }"if displayable_error_detail else ""
|
||||
status_label_text_key ="fav_posts_fetch_error_message"
|
||||
show_error_message_box =True
|
||||
self ._logger (f"Fetch error: {status_key } -> {displayable_error_detail }")
|
||||
|
||||
if status_label_text_key :
|
||||
self .status_label .setText (self ._tr (status_label_text_key ,status_label_text_key ).format (**message_box_params ))
|
||||
if show_error_message_box :
|
||||
QMessageBox .critical (self ,self ._tr (message_box_title_key ),self ._tr (message_box_text_key ).format (**message_box_params ))
|
||||
|
||||
self .download_button .setEnabled (False )
|
||||
return
|
||||
|
||||
|
||||
if not proceed_to_display_posts :
|
||||
if not status_label_text_key :
|
||||
self .status_label .setText (self ._tr ("fav_posts_cookies_required_error","Error: Cookies are required for favorite posts but could not be loaded."))
|
||||
self .download_button .setEnabled (False )
|
||||
return
|
||||
|
||||
if not self .creator_name_cache :
|
||||
self ._logger ("Warning: Creator name cache is empty. Names will not be resolved from creators.json. Displaying IDs instead.")
|
||||
else :
|
||||
self ._logger (f"Creator name cache has {len (self .creator_name_cache )} entries. Attempting to resolve names...")
|
||||
sample_keys =list (self .creator_name_cache .keys ())[:3 ]
|
||||
if sample_keys :
|
||||
self ._logger (f"Sample keys from creator_name_cache: {sample_keys }")
|
||||
|
||||
|
||||
processed_one_missing_log =False
|
||||
for post_entry in fetched_posts_list :
|
||||
service_from_post =post_entry .get ('service','')
|
||||
creator_id_from_post =post_entry .get ('creator_id','')
|
||||
|
||||
lookup_key_service =service_from_post .lower ()
|
||||
lookup_key_id =str (creator_id_from_post )
|
||||
lookup_key_tuple =(lookup_key_service ,lookup_key_id )
|
||||
|
||||
resolved_name =self .creator_name_cache .get (lookup_key_tuple )
|
||||
|
||||
if resolved_name :
|
||||
post_entry ['creator_name_resolved']=resolved_name
|
||||
else :
|
||||
post_entry ['creator_name_resolved']=str (creator_id_from_post )
|
||||
if not processed_one_missing_log and self .creator_name_cache :
|
||||
self ._logger (f"Debug: Name not found for key {lookup_key_tuple }. Using ID '{creator_id_from_post }'.")
|
||||
processed_one_missing_log =True
|
||||
|
||||
self .all_fetched_posts =fetched_posts_list
|
||||
|
||||
if not self .all_fetched_posts :
|
||||
self .status_label .setText (self ._tr ("fav_posts_no_posts_found_status","No favorite posts found."))
|
||||
self .download_button .setEnabled (False )
|
||||
return
|
||||
|
||||
try :
|
||||
self ._populate_post_list_widget ()
|
||||
self .status_label .setText (self ._tr ("fav_posts_found_status","{count} favorite post(s) found.").format (count =len (self .all_fetched_posts )))
|
||||
self .download_button .setEnabled (True )
|
||||
except Exception as e :
|
||||
self .status_label .setText (self ._tr ("fav_posts_display_error_status","Error displaying posts: {error}").format (error =str (e )))
|
||||
self ._logger (f"Error during _populate_post_list_widget: {e }\n{traceback .format_exc (limit =3 )}")
|
||||
QMessageBox .critical (self ,self ._tr ("fav_posts_ui_error_title","UI Error"),self ._tr ("fav_posts_ui_error_message","Could not display favorite posts: {error}").format (error =str (e )))
|
||||
self .download_button .setEnabled (False )
|
||||
|
||||
|
||||
def _find_best_known_name_match_in_title (self ,title_raw ):
|
||||
if not title_raw or not self .known_names_list_ref :
|
||||
return None
|
||||
|
||||
title_lower =title_raw .lower ()
|
||||
best_match_known_name_primary =None
|
||||
longest_match_len =0
|
||||
|
||||
for known_entry in self .known_names_list_ref :
|
||||
aliases_to_check =set ()
|
||||
for alias_val in known_entry .get ("aliases",[]):
|
||||
aliases_to_check .add (alias_val )
|
||||
if not known_entry .get ("is_group",False ):
|
||||
aliases_to_check .add (known_entry ["name"])
|
||||
sorted_aliases_for_entry =sorted (list (aliases_to_check ),key =len ,reverse =True )
|
||||
|
||||
for alias in sorted_aliases_for_entry :
|
||||
alias_lower =alias .lower ()
|
||||
if not alias_lower :
|
||||
continue
|
||||
if re .search (r'\b'+re .escape (alias_lower )+r'\b',title_lower ):
|
||||
if len (alias_lower )>longest_match_len :
|
||||
longest_match_len =len (alias_lower )
|
||||
best_match_known_name_primary =known_entry ["name"]
|
||||
break
|
||||
return best_match_known_name_primary
|
||||
|
||||
def _populate_post_list_widget (self ,posts_to_display =None ):
|
||||
self .post_list_widget .clear ()
|
||||
|
||||
source_list_for_grouping =posts_to_display if posts_to_display is not None else self .all_fetched_posts
|
||||
grouped_posts ={}
|
||||
for post in source_list_for_grouping :
|
||||
service =post .get ('service','unknown_service')
|
||||
creator_id =post .get ('creator_id','unknown_id')
|
||||
group_key =(service ,creator_id )
|
||||
if group_key not in grouped_posts :
|
||||
grouped_posts [group_key ]=[]
|
||||
grouped_posts [group_key ].append (post )
|
||||
|
||||
sorted_group_keys =sorted (grouped_posts .keys (),key =lambda x :(x [0 ].lower (),x [1 ].lower ()))
|
||||
|
||||
self .displayable_grouped_posts ={
|
||||
key :sorted (grouped_posts [key ],key =lambda p :(p .get ('added_date')or ''),reverse =True )
|
||||
for key in sorted_group_keys
|
||||
}
|
||||
for service ,creator_id_val in sorted_group_keys :
|
||||
creator_name_display =self .creator_name_cache .get (
|
||||
(service .lower (),str (creator_id_val )),
|
||||
str (creator_id_val )
|
||||
)
|
||||
artist_header_display_text =f"{creator_name_display } ({service .capitalize ()} / {creator_id_val })"
|
||||
artist_header_item =QListWidgetItem (f"🎨 {artist_header_display_text }")
|
||||
artist_header_item .setFlags (Qt .NoItemFlags )
|
||||
font =artist_header_item .font ()
|
||||
font .setBold (True )
|
||||
font .setPointSize (font .pointSize ()+1 )
|
||||
artist_header_item .setFont (font )
|
||||
artist_header_item .setForeground (Qt .cyan )
|
||||
self .post_list_widget .addItem (artist_header_item )
|
||||
for post_data in self .displayable_grouped_posts [(service ,creator_id_val )]:
|
||||
post_title_raw =post_data .get ('title','Untitled Post')
|
||||
found_known_name_primary =self ._find_best_known_name_match_in_title (post_title_raw )
|
||||
|
||||
plain_text_title_for_list_item =post_title_raw
|
||||
if found_known_name_primary :
|
||||
suffix_text =f" [Known - {found_known_name_primary }]"
|
||||
post_data ['suffix_for_display']=suffix_text
|
||||
plain_text_title_for_list_item =post_title_raw +suffix_text
|
||||
else :
|
||||
post_data .pop ('suffix_for_display',None )
|
||||
|
||||
list_item =QListWidgetItem (self .post_list_widget )
|
||||
list_item .setText (plain_text_title_for_list_item )
|
||||
list_item .setFlags (list_item .flags ()|Qt .ItemIsUserCheckable )
|
||||
list_item .setCheckState (Qt .Unchecked )
|
||||
list_item .setData (Qt .UserRole ,post_data )
|
||||
self .post_list_widget .addItem (list_item )
|
||||
|
||||
def _filter_post_list_display (self ):
|
||||
search_text =self .search_input .text ().lower ().strip ()
|
||||
if not search_text :
|
||||
self ._populate_post_list_widget (self .all_fetched_posts )
|
||||
return
|
||||
|
||||
filtered_posts_to_group =[]
|
||||
for post in self .all_fetched_posts :
|
||||
matches_post_title =search_text in post .get ('title','').lower ()
|
||||
matches_creator_name =search_text in post .get ('creator_name_resolved','').lower ()
|
||||
matches_creator_id =search_text in post .get ('creator_id','').lower ()
|
||||
matches_service =search_text in post ['service'].lower ()
|
||||
|
||||
if matches_post_title or matches_creator_name or matches_creator_id or matches_service :
|
||||
filtered_posts_to_group .append (post )
|
||||
|
||||
self ._populate_post_list_widget (filtered_posts_to_group )
|
||||
|
||||
def _select_all_items (self ):
|
||||
for i in range (self .post_list_widget .count ()):
|
||||
item =self .post_list_widget .item (i )
|
||||
if item and item .flags ()&Qt .ItemIsUserCheckable :
|
||||
item .setCheckState (Qt .Checked )
|
||||
|
||||
def _deselect_all_items (self ):
|
||||
for i in range (self .post_list_widget .count ()):
|
||||
item =self .post_list_widget .item (i )
|
||||
if item and item .flags ()&Qt .ItemIsUserCheckable :
|
||||
item .setCheckState (Qt .Unchecked )
|
||||
|
||||
def _accept_selection_action (self ):
|
||||
self .selected_posts_data =[]
|
||||
for i in range (self .post_list_widget .count ()):
|
||||
item =self .post_list_widget .item (i )
|
||||
if item and item .checkState ()==Qt .Checked :
|
||||
post_data_for_download =item .data (Qt .UserRole )
|
||||
self .selected_posts_data .append (post_data_for_download )
|
||||
|
||||
if not self .selected_posts_data :
|
||||
QMessageBox .information (self ,self ._tr ("fav_posts_no_selection_title","No Selection"),self ._tr ("fav_posts_no_selection_message","Please select at least one post to download."))
|
||||
return
|
||||
self .accept ()
|
||||
|
||||
def get_selected_posts (self ):
|
||||
return self .selected_posts_data
|
||||
493
src/ui/dialogs/FutureSettingsDialog.py
Normal file
@@ -0,0 +1,493 @@
|
||||
# --- Standard Library Imports ---
|
||||
import os
|
||||
import json
|
||||
import sys
|
||||
|
||||
# --- PyQt5 Imports ---
|
||||
from PyQt5.QtCore import Qt, QStandardPaths, QTimer
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QPushButton, QVBoxLayout,
|
||||
QGroupBox, QComboBox, QMessageBox, QGridLayout, QCheckBox, QLineEdit
|
||||
)
|
||||
# --- Local Application Imports ---
|
||||
from ...i18n.translator import get_translation
|
||||
from ...utils.resolution import get_dark_theme
|
||||
from ..assets import get_app_icon_object
|
||||
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...config.constants import (
|
||||
THEME_KEY, LANGUAGE_KEY, DOWNLOAD_LOCATION_KEY,
|
||||
RESOLUTION_KEY, UI_SCALE_KEY, SAVE_CREATOR_JSON_KEY,
|
||||
DATE_PREFIX_FORMAT_KEY,
|
||||
COOKIE_TEXT_KEY, USE_COOKIE_KEY,
|
||||
FETCH_FIRST_KEY, DISCORD_TOKEN_KEY, POST_DOWNLOAD_ACTION_KEY
|
||||
)
|
||||
from ...services.updater import UpdateChecker, UpdateDownloader
|
||||
|
||||
class CountdownMessageBox(QDialog):
|
||||
"""
|
||||
A custom message box that includes a countdown timer for the 'Yes' button,
|
||||
which automatically accepts the dialog when the timer reaches zero.
|
||||
"""
|
||||
def __init__(self, title, text, countdown_seconds=10, parent_app=None, parent=None):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app
|
||||
self.countdown = countdown_seconds
|
||||
|
||||
# --- Basic Window Setup ---
|
||||
self.setWindowTitle(title)
|
||||
self.setModal(True)
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
self._init_ui(text)
|
||||
self._apply_theme()
|
||||
|
||||
# --- Timer Setup ---
|
||||
self.timer = QTimer(self)
|
||||
self.timer.setInterval(1000) # Tick every second
|
||||
self.timer.timeout.connect(self._update_countdown)
|
||||
self.timer.start()
|
||||
|
||||
def _init_ui(self, text):
|
||||
"""Initializes the UI components of the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
self.message_label = QLabel(text)
|
||||
self.message_label.setWordWrap(True)
|
||||
self.message_label.setAlignment(Qt.AlignCenter)
|
||||
main_layout.addWidget(self.message_label)
|
||||
|
||||
buttons_layout = QHBoxLayout()
|
||||
buttons_layout.addStretch(1)
|
||||
|
||||
self.yes_button = QPushButton()
|
||||
self.yes_button.clicked.connect(self.accept)
|
||||
self.yes_button.setDefault(True)
|
||||
|
||||
self.no_button = QPushButton()
|
||||
self.no_button.clicked.connect(self.reject)
|
||||
|
||||
buttons_layout.addWidget(self.yes_button)
|
||||
buttons_layout.addWidget(self.no_button)
|
||||
buttons_layout.addStretch(1)
|
||||
|
||||
main_layout.addLayout(buttons_layout)
|
||||
|
||||
self._retranslate_ui()
|
||||
self._update_countdown() # Initial text setup
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper for translations."""
|
||||
if self.parent_app and hasattr(self.parent_app, 'current_selected_language'):
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets translated text for UI elements."""
|
||||
self.no_button.setText(self._tr("no_button_text", "No"))
|
||||
# The 'yes' button text is handled by the countdown
|
||||
|
||||
def _update_countdown(self):
|
||||
"""Updates the countdown and button text each second."""
|
||||
if self.countdown <= 0:
|
||||
self.timer.stop()
|
||||
self.accept() # Automatically accept when countdown finishes
|
||||
return
|
||||
|
||||
yes_text = self._tr("yes_button_text", "Yes")
|
||||
self.yes_button.setText(f"{yes_text} ({self.countdown})")
|
||||
self.countdown -= 1
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and hasattr(self.parent_app, 'current_theme') and self.parent_app.current_theme == "dark":
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("")
|
||||
|
||||
class FutureSettingsDialog(QDialog):
|
||||
"""
|
||||
A dialog for managing application-wide settings like theme, language,
|
||||
and display options, with an organized layout.
|
||||
"""
|
||||
def __init__(self, parent_app_ref, parent=None):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app_ref
|
||||
self.setModal(True)
|
||||
self.update_downloader_thread = None # To keep a reference
|
||||
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
screen_height = QApplication.primaryScreen().availableGeometry().height() if QApplication.primaryScreen() else 800
|
||||
scale_factor = screen_height / 800.0
|
||||
base_min_w, base_min_h = 420, 520 # Increased height for new options
|
||||
scaled_min_w = int(base_min_w * scale_factor)
|
||||
scaled_min_h = int(base_min_h * scale_factor)
|
||||
self.setMinimumSize(scaled_min_w, scaled_min_h)
|
||||
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
self.interface_group_box = QGroupBox()
|
||||
interface_layout = QGridLayout(self.interface_group_box)
|
||||
|
||||
self.theme_label = QLabel()
|
||||
self.theme_toggle_button = QPushButton()
|
||||
self.theme_toggle_button.clicked.connect(self._toggle_theme)
|
||||
interface_layout.addWidget(self.theme_label, 0, 0)
|
||||
interface_layout.addWidget(self.theme_toggle_button, 0, 1)
|
||||
|
||||
self.ui_scale_label = QLabel()
|
||||
self.ui_scale_combo_box = QComboBox()
|
||||
self.ui_scale_combo_box.currentIndexChanged.connect(self._display_setting_changed)
|
||||
interface_layout.addWidget(self.ui_scale_label, 1, 0)
|
||||
interface_layout.addWidget(self.ui_scale_combo_box, 1, 1)
|
||||
|
||||
self.language_label = QLabel()
|
||||
self.language_combo_box = QComboBox()
|
||||
self.language_combo_box.currentIndexChanged.connect(self._language_selection_changed)
|
||||
interface_layout.addWidget(self.language_label, 2, 0)
|
||||
interface_layout.addWidget(self.language_combo_box, 2, 1)
|
||||
|
||||
main_layout.addWidget(self.interface_group_box)
|
||||
|
||||
self.download_window_group_box = QGroupBox()
|
||||
download_window_layout = QGridLayout(self.download_window_group_box)
|
||||
|
||||
self.window_size_label = QLabel()
|
||||
self.resolution_combo_box = QComboBox()
|
||||
self.resolution_combo_box.currentIndexChanged.connect(self._display_setting_changed)
|
||||
download_window_layout.addWidget(self.window_size_label, 0, 0)
|
||||
download_window_layout.addWidget(self.resolution_combo_box, 0, 1)
|
||||
|
||||
self.default_path_label = QLabel()
|
||||
self.save_path_button = QPushButton()
|
||||
self.save_path_button.clicked.connect(self._save_settings)
|
||||
download_window_layout.addWidget(self.default_path_label, 1, 0)
|
||||
download_window_layout.addWidget(self.save_path_button, 1, 1)
|
||||
|
||||
self.date_prefix_format_label = QLabel()
|
||||
self.date_prefix_format_input = QLineEdit()
|
||||
self.date_prefix_format_input.textChanged.connect(self._date_prefix_format_changed)
|
||||
download_window_layout.addWidget(self.date_prefix_format_label, 2, 0)
|
||||
download_window_layout.addWidget(self.date_prefix_format_input, 2, 1)
|
||||
|
||||
self.post_download_action_label = QLabel()
|
||||
self.post_download_action_combo = QComboBox()
|
||||
self.post_download_action_combo.currentIndexChanged.connect(self._post_download_action_changed)
|
||||
download_window_layout.addWidget(self.post_download_action_label, 3, 0)
|
||||
download_window_layout.addWidget(self.post_download_action_combo, 3, 1)
|
||||
|
||||
self.save_creator_json_checkbox = QCheckBox()
|
||||
self.save_creator_json_checkbox.stateChanged.connect(self._creator_json_setting_changed)
|
||||
download_window_layout.addWidget(self.save_creator_json_checkbox, 4, 0, 1, 2)
|
||||
|
||||
self.fetch_first_checkbox = QCheckBox()
|
||||
self.fetch_first_checkbox.stateChanged.connect(self._fetch_first_setting_changed)
|
||||
download_window_layout.addWidget(self.fetch_first_checkbox, 5, 0, 1, 2)
|
||||
|
||||
main_layout.addWidget(self.download_window_group_box)
|
||||
|
||||
self.update_group_box = QGroupBox()
|
||||
update_layout = QGridLayout(self.update_group_box)
|
||||
self.version_label = QLabel()
|
||||
self.update_status_label = QLabel()
|
||||
self.check_update_button = QPushButton()
|
||||
self.check_update_button.clicked.connect(self._check_for_updates)
|
||||
update_layout.addWidget(self.version_label, 0, 0)
|
||||
update_layout.addWidget(self.update_status_label, 0, 1)
|
||||
update_layout.addWidget(self.check_update_button, 1, 0, 1, 2)
|
||||
main_layout.addWidget(self.update_group_box)
|
||||
|
||||
main_layout.addStretch(1)
|
||||
|
||||
self.ok_button = QPushButton()
|
||||
self.ok_button.clicked.connect(self.accept)
|
||||
main_layout.addWidget(self.ok_button, 0, Qt.AlignRight | Qt.AlignBottom)
|
||||
|
||||
def _retranslate_ui(self):
|
||||
self.setWindowTitle(self._tr("settings_dialog_title", "Settings"))
|
||||
self.interface_group_box.setTitle(self._tr("interface_group_title", "Interface Settings"))
|
||||
self.download_window_group_box.setTitle(self._tr("download_window_group_title", "Download & Window Settings"))
|
||||
self.theme_label.setText(self._tr("theme_label", "Theme:"))
|
||||
self.ui_scale_label.setText(self._tr("ui_scale_label", "UI Scale:"))
|
||||
self.language_label.setText(self._tr("language_label", "Language:"))
|
||||
|
||||
self.window_size_label.setText(self._tr("window_size_label", "Window Size:"))
|
||||
self.default_path_label.setText(self._tr("default_path_label", "Default Path:"))
|
||||
|
||||
self.date_prefix_format_label.setText(self._tr("date_prefix_format_label", "Post Subfolder Format:"))
|
||||
# Update placeholder to include {post}
|
||||
self.date_prefix_format_input.setPlaceholderText(self._tr("date_prefix_format_placeholder", "e.g., YYYY-MM-DD {post} {postid}"))
|
||||
# Add the tooltip to explain usage
|
||||
self.date_prefix_format_input.setToolTip(self._tr(
|
||||
"date_prefix_format_tooltip",
|
||||
"Create a custom folder name using placeholders:\n"
|
||||
"• YYYY, MM, DD: for the date\n"
|
||||
"• {post}: for the post title\n"
|
||||
"• {postid}: for the post's unique ID\n\n"
|
||||
"Example: {post} [{postid}] [YYYY-MM-DD]"
|
||||
))
|
||||
|
||||
|
||||
self.post_download_action_label.setText(self._tr("post_download_action_label", "Action After Download:"))
|
||||
|
||||
self.post_download_action_label.setText(self._tr("post_download_action_label", "Action After Download:"))
|
||||
self.save_creator_json_checkbox.setText(self._tr("save_creator_json_label", "Save Creator.json file"))
|
||||
self.fetch_first_checkbox.setText(self._tr("fetch_first_label", "Fetch First (Download after all pages are found)"))
|
||||
self.fetch_first_checkbox.setToolTip(self._tr("fetch_first_tooltip", "If checked, the downloader will find all posts from a creator first before starting any downloads.\nThis can be slower to start but provides a more accurate progress bar."))
|
||||
self._update_theme_toggle_button_text()
|
||||
self.save_path_button.setText(self._tr("settings_save_all_button", "Save Path + Cookie + Token"))
|
||||
self.save_path_button.setToolTip(self._tr("settings_save_all_tooltip", "Save the current 'Download Location', Cookie, and Discord Token settings for future sessions."))
|
||||
self.ok_button.setText(self._tr("ok_button", "OK"))
|
||||
|
||||
self.update_group_box.setTitle(self._tr("update_group_title", "Application Updates"))
|
||||
current_version = self.parent_app.windowTitle().split(' v')[-1]
|
||||
self.version_label.setText(self._tr("current_version_label", f"Current Version: v{current_version}"))
|
||||
self.update_status_label.setText(self._tr("update_status_ready", "Ready to check."))
|
||||
self.check_update_button.setText(self._tr("check_for_updates_button", "Check for Updates"))
|
||||
|
||||
self._populate_display_combo_boxes()
|
||||
self._populate_language_combo_box()
|
||||
self._populate_post_download_action_combo()
|
||||
self._load_date_prefix_format()
|
||||
self._load_checkbox_states()
|
||||
|
||||
def _check_for_updates(self):
|
||||
self.check_update_button.setEnabled(False)
|
||||
self.update_status_label.setText(self._tr("update_status_checking", "Checking..."))
|
||||
current_version = self.parent_app.windowTitle().split(' v')[-1]
|
||||
|
||||
self.update_checker_thread = UpdateChecker(current_version)
|
||||
self.update_checker_thread.update_available.connect(self._on_update_available)
|
||||
self.update_checker_thread.up_to_date.connect(self._on_up_to_date)
|
||||
self.update_checker_thread.update_error.connect(self._on_update_error)
|
||||
self.update_checker_thread.start()
|
||||
|
||||
def _on_update_available(self, new_version, download_url):
|
||||
self.update_status_label.setText(self._tr("update_status_found", f"Update found: v{new_version}"))
|
||||
self.check_update_button.setEnabled(True)
|
||||
|
||||
reply = QMessageBox.question(self, self._tr("update_available_title", "Update Available"),
|
||||
self._tr("update_available_message", f"A new version (v{new_version}) is available.\nWould you like to download and install it now?"),
|
||||
QMessageBox.Yes | QMessageBox.No, QMessageBox.Yes)
|
||||
if reply == QMessageBox.Yes:
|
||||
self.ok_button.setEnabled(False)
|
||||
self.check_update_button.setEnabled(False)
|
||||
self.update_status_label.setText(self._tr("update_status_downloading", "Downloading update..."))
|
||||
self.update_downloader_thread = UpdateDownloader(download_url, self.parent_app)
|
||||
self.update_downloader_thread.download_finished.connect(self._on_download_finished)
|
||||
self.update_downloader_thread.download_error.connect(self._on_update_error)
|
||||
self.update_downloader_thread.start()
|
||||
|
||||
def _on_download_finished(self):
|
||||
QApplication.instance().quit()
|
||||
|
||||
def _on_up_to_date(self, message):
|
||||
self.update_status_label.setText(self._tr("update_status_latest", message))
|
||||
self.check_update_button.setEnabled(True)
|
||||
|
||||
def _on_update_error(self, message):
|
||||
self.update_status_label.setText(self._tr("update_status_error", f"Error: {message}"))
|
||||
self.check_update_button.setEnabled(True)
|
||||
self.ok_button.setEnabled(True)
|
||||
|
||||
def _load_checkbox_states(self):
|
||||
self.save_creator_json_checkbox.blockSignals(True)
|
||||
should_save = self.parent_app.settings.value(SAVE_CREATOR_JSON_KEY, True, type=bool)
|
||||
self.save_creator_json_checkbox.setChecked(should_save)
|
||||
self.save_creator_json_checkbox.blockSignals(False)
|
||||
|
||||
self.fetch_first_checkbox.blockSignals(True)
|
||||
should_fetch_first = self.parent_app.settings.value(FETCH_FIRST_KEY, False, type=bool)
|
||||
self.fetch_first_checkbox.setChecked(should_fetch_first)
|
||||
self.fetch_first_checkbox.blockSignals(False)
|
||||
|
||||
def _creator_json_setting_changed(self, state):
|
||||
is_checked = state == Qt.Checked
|
||||
self.parent_app.settings.setValue(SAVE_CREATOR_JSON_KEY, is_checked)
|
||||
self.parent_app.settings.sync()
|
||||
|
||||
def _fetch_first_setting_changed(self, state):
|
||||
is_checked = state == Qt.Checked
|
||||
self.parent_app.settings.setValue(FETCH_FIRST_KEY, is_checked)
|
||||
self.parent_app.settings.sync()
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _apply_theme(self):
|
||||
if self.parent_app and self.parent_app.current_theme == "dark":
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("")
|
||||
|
||||
def _update_theme_toggle_button_text(self):
|
||||
if self.parent_app.current_theme == "dark":
|
||||
self.theme_toggle_button.setText(self._tr("theme_toggle_light", "Switch to Light Mode"))
|
||||
else:
|
||||
self.theme_toggle_button.setText(self._tr("theme_toggle_dark", "Switch to Dark Mode"))
|
||||
|
||||
def _toggle_theme(self):
|
||||
new_theme = "light" if self.parent_app.current_theme == "dark" else "dark"
|
||||
self.parent_app.settings.setValue(THEME_KEY, new_theme)
|
||||
self.parent_app.settings.sync()
|
||||
self.parent_app.current_theme = new_theme
|
||||
self._apply_theme()
|
||||
if hasattr(self.parent_app, '_apply_theme_and_restart_prompt'):
|
||||
self.parent_app._apply_theme_and_restart_prompt()
|
||||
|
||||
def _populate_display_combo_boxes(self):
|
||||
self.resolution_combo_box.blockSignals(True)
|
||||
self.resolution_combo_box.clear()
|
||||
resolutions = [("Auto", "Auto"), ("1280x720", "1280x720"), ("1600x900", "1600x900"), ("1920x1080", "1920x1080")]
|
||||
current_res = self.parent_app.settings.value(RESOLUTION_KEY, "Auto")
|
||||
for res_key, res_name in resolutions:
|
||||
self.resolution_combo_box.addItem(res_name, res_key)
|
||||
if current_res == res_key:
|
||||
self.resolution_combo_box.setCurrentIndex(self.resolution_combo_box.count() - 1)
|
||||
self.resolution_combo_box.blockSignals(False)
|
||||
|
||||
self.ui_scale_combo_box.blockSignals(True)
|
||||
self.ui_scale_combo_box.clear()
|
||||
scales = [
|
||||
(0.5, "50%"), (0.7, "70%"), (0.9, "90%"), (1.0, "100% (Default)"),
|
||||
(1.25, "125%"), (1.50, "150%"), (1.75, "175%"), (2.0, "200%")
|
||||
]
|
||||
current_scale = self.parent_app.settings.value(UI_SCALE_KEY, 1.0)
|
||||
for scale_val, scale_name in scales:
|
||||
self.ui_scale_combo_box.addItem(scale_name, scale_val)
|
||||
if abs(float(current_scale) - scale_val) < 0.01:
|
||||
self.ui_scale_combo_box.setCurrentIndex(self.ui_scale_combo_box.count() - 1)
|
||||
self.ui_scale_combo_box.blockSignals(False)
|
||||
|
||||
def _display_setting_changed(self):
|
||||
selected_res = self.resolution_combo_box.currentData()
|
||||
selected_scale = self.ui_scale_combo_box.currentData()
|
||||
self.parent_app.settings.setValue(RESOLUTION_KEY, selected_res)
|
||||
self.parent_app.settings.setValue(UI_SCALE_KEY, selected_scale)
|
||||
self.parent_app.settings.sync()
|
||||
QMessageBox.information(self, self._tr("display_change_title", "Display Settings Changed"),
|
||||
self._tr("language_change_message", "A restart is required..."))
|
||||
|
||||
def _populate_language_combo_box(self):
|
||||
self.language_combo_box.blockSignals(True)
|
||||
self.language_combo_box.clear()
|
||||
languages = [
|
||||
("en", "English"), ("ja", "日本語 (Japanese)"), ("fr", "Français (French)"),
|
||||
("de", "Deutsch (German)"), ("es", "Español (Spanish)"), ("pt", "Português (Portuguese)"),
|
||||
("ru", "Русский (Russian)"), ("zh_CN", "简体中文 (Simplified Chinese)"),
|
||||
("zh_TW", "繁體中文 (Traditional Chinese)"), ("ko", "한국어 (Korean)")
|
||||
]
|
||||
current_lang = self.parent_app.current_selected_language
|
||||
for lang_code, lang_name in languages:
|
||||
self.language_combo_box.addItem(lang_name, lang_code)
|
||||
if current_lang == lang_code:
|
||||
self.language_combo_box.setCurrentIndex(self.language_combo_box.count() - 1)
|
||||
self.language_combo_box.blockSignals(False)
|
||||
|
||||
def _language_selection_changed(self, index):
|
||||
selected_lang_code = self.language_combo_box.itemData(index)
|
||||
if selected_lang_code and selected_lang_code != self.parent_app.current_selected_language:
|
||||
self.parent_app.settings.setValue(LANGUAGE_KEY, selected_lang_code)
|
||||
self.parent_app.settings.sync()
|
||||
self.parent_app.current_selected_language = selected_lang_code
|
||||
self._retranslate_ui()
|
||||
if hasattr(self.parent_app, '_retranslate_main_ui'):
|
||||
self.parent_app._retranslate_main_ui()
|
||||
QMessageBox.information(self, self._tr("language_change_title", "Language Changed"),
|
||||
self._tr("language_change_message", "A restart is required..."))
|
||||
|
||||
def _populate_post_download_action_combo(self):
|
||||
"""Populates the action dropdown and sets the current selection from settings."""
|
||||
self.post_download_action_combo.blockSignals(True)
|
||||
self.post_download_action_combo.clear()
|
||||
|
||||
actions = [
|
||||
(self._tr("action_off", "Off"), "off"),
|
||||
(self._tr("action_notify", "Notify with Sound"), "notify"),
|
||||
(self._tr("action_sleep", "Sleep"), "sleep"),
|
||||
(self._tr("action_shutdown", "Shutdown"), "shutdown")
|
||||
]
|
||||
|
||||
current_action = self.parent_app.settings.value(POST_DOWNLOAD_ACTION_KEY, "off")
|
||||
|
||||
for text, key in actions:
|
||||
self.post_download_action_combo.addItem(text, key)
|
||||
if current_action == key:
|
||||
self.post_download_action_combo.setCurrentIndex(self.post_download_action_combo.count() - 1)
|
||||
|
||||
self.post_download_action_combo.blockSignals(False)
|
||||
|
||||
def _post_download_action_changed(self):
|
||||
"""Saves the selected post-download action to settings."""
|
||||
selected_action = self.post_download_action_combo.currentData()
|
||||
self.parent_app.settings.setValue(POST_DOWNLOAD_ACTION_KEY, selected_action)
|
||||
self.parent_app.settings.sync()
|
||||
|
||||
def _load_date_prefix_format(self):
|
||||
"""Loads the saved date prefix format and sets it in the input field."""
|
||||
self.date_prefix_format_input.blockSignals(True)
|
||||
current_format = self.parent_app.settings.value(DATE_PREFIX_FORMAT_KEY, "YYYY-MM-DD {post}", type=str)
|
||||
self.date_prefix_format_input.setText(current_format)
|
||||
self.date_prefix_format_input.blockSignals(False)
|
||||
|
||||
def _date_prefix_format_changed(self, text):
|
||||
"""Saves the date prefix format whenever it's changed."""
|
||||
self.parent_app.settings.setValue(DATE_PREFIX_FORMAT_KEY, text)
|
||||
self.parent_app.settings.sync()
|
||||
# Also update the live value in the parent app
|
||||
if hasattr(self.parent_app, 'date_prefix_format'):
|
||||
self.parent_app.date_prefix_format = text
|
||||
|
||||
def _save_settings(self):
|
||||
path_saved = False
|
||||
cookie_saved = False
|
||||
token_saved = False
|
||||
|
||||
if hasattr(self.parent_app, 'dir_input') and self.parent_app.dir_input:
|
||||
current_path = self.parent_app.dir_input.text().strip()
|
||||
if current_path and os.path.isdir(current_path):
|
||||
self.parent_app.settings.setValue(DOWNLOAD_LOCATION_KEY, current_path)
|
||||
path_saved = True
|
||||
|
||||
if hasattr(self.parent_app, 'use_cookie_checkbox'):
|
||||
use_cookie = self.parent_app.use_cookie_checkbox.isChecked()
|
||||
cookie_content = self.parent_app.cookie_text_input.text().strip()
|
||||
if use_cookie and cookie_content:
|
||||
self.parent_app.settings.setValue(USE_COOKIE_KEY, True)
|
||||
self.parent_app.settings.setValue(COOKIE_TEXT_KEY, cookie_content)
|
||||
cookie_saved = True
|
||||
else:
|
||||
self.parent_app.settings.setValue(USE_COOKIE_KEY, False)
|
||||
self.parent_app.settings.setValue(COOKIE_TEXT_KEY, "")
|
||||
|
||||
if (hasattr(self.parent_app, 'remove_from_filename_input') and
|
||||
hasattr(self.parent_app, 'remove_from_filename_label_widget')):
|
||||
|
||||
label_text = self.parent_app.remove_from_filename_label_widget.text()
|
||||
if "Token" in label_text:
|
||||
discord_token = self.parent_app.remove_from_filename_input.text().strip()
|
||||
if discord_token:
|
||||
self.parent_app.settings.setValue(DISCORD_TOKEN_KEY, discord_token)
|
||||
token_saved = True
|
||||
|
||||
self.parent_app.settings.sync()
|
||||
|
||||
if path_saved or cookie_saved or token_saved:
|
||||
QMessageBox.information(self, "Settings Saved", "Settings have been saved successfully.")
|
||||
else:
|
||||
QMessageBox.warning(self, "Nothing to Save", "No valid settings were found to save.")
|
||||
192
src/ui/dialogs/HelpGuideDialog.py
Normal file
@@ -0,0 +1,192 @@
|
||||
import os
|
||||
import sys
|
||||
from PyQt5.QtCore import QUrl, QSize, Qt
|
||||
from PyQt5.QtGui import QIcon, QDesktopServices
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QPushButton, QVBoxLayout,
|
||||
QStackedWidget, QListWidget, QFrame, QWidget, QScrollArea
|
||||
)
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class TourStepWidget(QWidget):
|
||||
"""
|
||||
A custom widget representing a single step or page in the feature guide.
|
||||
It neatly formats a title and its corresponding content.
|
||||
"""
|
||||
def __init__(self, title_text, content_text, parent=None, scale=1.0):
|
||||
super().__init__(parent)
|
||||
layout = QVBoxLayout(self)
|
||||
layout.setContentsMargins(20, 20, 20, 20)
|
||||
layout.setSpacing(10)
|
||||
|
||||
title_font_size = int(14 * scale)
|
||||
content_font_size = int(11 * scale)
|
||||
|
||||
title_label = QLabel(title_text)
|
||||
title_label.setAlignment(Qt.AlignCenter)
|
||||
title_label.setStyleSheet(f"font-size: {title_font_size}pt; font-weight: bold; color: #E0E0E0; padding-bottom: 15px;")
|
||||
layout.addWidget(title_label)
|
||||
|
||||
scroll_area = QScrollArea()
|
||||
scroll_area.setWidgetResizable(True)
|
||||
scroll_area.setFrameShape(QFrame.NoFrame)
|
||||
scroll_area.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
|
||||
scroll_area.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
|
||||
scroll_area.setStyleSheet("background-color: transparent;")
|
||||
|
||||
content_label = QLabel(content_text)
|
||||
content_label.setWordWrap(True)
|
||||
content_label.setAlignment(Qt.AlignLeft | Qt.AlignTop)
|
||||
content_label.setTextFormat(Qt.RichText)
|
||||
content_label.setOpenExternalLinks(True)
|
||||
content_label.setStyleSheet(f"font-size: {content_font_size}pt; color: #C8C8C8; line-height: 1.8;")
|
||||
scroll_area.setWidget(content_label)
|
||||
layout.addWidget(scroll_area, 1)
|
||||
|
||||
|
||||
class HelpGuideDialog(QDialog):
|
||||
"""A multi-page dialog for displaying the feature guide with a navigation list."""
|
||||
def __init__(self, steps_data, parent_app, parent=None):
|
||||
super().__init__(parent)
|
||||
self.steps_data = steps_data
|
||||
self.parent_app = parent_app
|
||||
|
||||
scale = self.parent_app.scale_factor if hasattr(self.parent_app, 'scale_factor') else 1.0
|
||||
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
self.setModal(True)
|
||||
self.resize(int(800 * scale), int(650 * scale))
|
||||
|
||||
dialog_font_size = int(11 * scale)
|
||||
|
||||
current_theme_style = ""
|
||||
if hasattr(self.parent_app, 'current_theme') and self.parent_app.current_theme == "dark":
|
||||
current_theme_style = get_dark_theme(scale)
|
||||
else:
|
||||
# Basic light theme fallback
|
||||
current_theme_style = f"""
|
||||
QDialog {{ background-color: #F0F0F0; border: 1px solid #B0B0B0; }}
|
||||
QLabel {{ color: #1E1E1E; }}
|
||||
QPushButton {{
|
||||
background-color: #E1E1E1;
|
||||
color: #1E1E1E;
|
||||
border: 1px solid #ADADAD;
|
||||
padding: {int(8*scale)}px {int(15*scale)}px;
|
||||
border-radius: 4px;
|
||||
min-height: {int(25*scale)}px;
|
||||
font-size: {dialog_font_size}pt;
|
||||
}}
|
||||
QPushButton:hover {{ background-color: #CACACA; }}
|
||||
QPushButton:pressed {{ background-color: #B0B0B0; }}
|
||||
"""
|
||||
|
||||
self.setStyleSheet(current_theme_style)
|
||||
self._init_ui()
|
||||
if self.parent_app:
|
||||
self.move(self.parent_app.geometry().center() - self.rect().center())
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on current app language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _init_ui(self):
|
||||
main_layout = QVBoxLayout(self)
|
||||
main_layout.setContentsMargins(15, 15, 15, 15)
|
||||
main_layout.setSpacing(10)
|
||||
|
||||
# Title
|
||||
title_label = QLabel(self._tr("help_guide_dialog_title", "Kemono Downloader - Feature Guide"))
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1.0)
|
||||
title_font_size = int(16 * scale)
|
||||
title_label.setStyleSheet(f"font-size: {title_font_size}pt; font-weight: bold; color: #E0E0E0;")
|
||||
title_label.setAlignment(Qt.AlignCenter)
|
||||
main_layout.addWidget(title_label)
|
||||
|
||||
# Content Layout (Navigation + Stacked Pages)
|
||||
content_layout = QHBoxLayout()
|
||||
main_layout.addLayout(content_layout, 1)
|
||||
|
||||
self.nav_list = QListWidget()
|
||||
self.nav_list.setFixedWidth(int(220 * scale))
|
||||
self.nav_list.setStyleSheet(f"""
|
||||
QListWidget {{
|
||||
background-color: #2E2E2E;
|
||||
border: 1px solid #4A4A4A;
|
||||
border-radius: 4px;
|
||||
font-size: {int(11 * scale)}pt;
|
||||
}}
|
||||
QListWidget::item {{
|
||||
padding: 10px;
|
||||
border-bottom: 1px solid #4A4A4A;
|
||||
}}
|
||||
QListWidget::item:selected {{
|
||||
background-color: #87CEEB;
|
||||
color: #2E2E2E;
|
||||
font-weight: bold;
|
||||
}}
|
||||
""")
|
||||
content_layout.addWidget(self.nav_list)
|
||||
|
||||
self.stacked_widget = QStackedWidget()
|
||||
content_layout.addWidget(self.stacked_widget)
|
||||
|
||||
for title_key, content_key in self.steps_data:
|
||||
title = self._tr(title_key, title_key)
|
||||
content = self._tr(content_key, f"Content for {content_key} not found.")
|
||||
|
||||
self.nav_list.addItem(title)
|
||||
|
||||
step_widget = TourStepWidget(title, content, scale=scale)
|
||||
self.stacked_widget.addWidget(step_widget)
|
||||
|
||||
self.nav_list.currentRowChanged.connect(self.stacked_widget.setCurrentIndex)
|
||||
if self.nav_list.count() > 0:
|
||||
self.nav_list.setCurrentRow(0)
|
||||
|
||||
# Footer Layout (Social links and Close button)
|
||||
footer_layout = QHBoxLayout()
|
||||
footer_layout.setContentsMargins(0, 10, 0, 0)
|
||||
|
||||
# Social Media Icons
|
||||
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
|
||||
assets_base_dir = sys._MEIPASS
|
||||
else:
|
||||
assets_base_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
|
||||
|
||||
github_icon_path = os.path.join(assets_base_dir, "assets", "github.png")
|
||||
instagram_icon_path = os.path.join(assets_base_dir, "assets", "instagram.png")
|
||||
discord_icon_path = os.path.join(assets_base_dir, "assets", "discord.png")
|
||||
|
||||
self.github_button = QPushButton(QIcon(github_icon_path), "")
|
||||
self.instagram_button = QPushButton(QIcon(instagram_icon_path), "")
|
||||
self.discord_button = QPushButton(QIcon(discord_icon_path), "")
|
||||
|
||||
icon_dim = int(24 * scale)
|
||||
icon_size = QSize(icon_dim, icon_dim)
|
||||
|
||||
for button, tooltip_key, url in [
|
||||
(self.github_button, "help_guide_github_tooltip", "https://github.com/Yuvi63771/Kemono-Downloader"),
|
||||
(self.instagram_button, "help_guide_instagram_tooltip", "https://www.instagram.com/uvi.arts/"),
|
||||
(self.discord_button, "help_guide_discord_tooltip", "https://discord.gg/BqP64XTdJN")
|
||||
]:
|
||||
button.setIconSize(icon_size)
|
||||
button.setToolTip(self._tr(tooltip_key))
|
||||
button.setFixedSize(icon_size.width() + 8, icon_size.height() + 8)
|
||||
button.setStyleSheet("background-color: transparent; border: none;")
|
||||
button.clicked.connect(lambda _, u=url: QDesktopServices.openUrl(QUrl(u)))
|
||||
footer_layout.addWidget(button)
|
||||
|
||||
footer_layout.addStretch(1)
|
||||
|
||||
self.finish_button = QPushButton(self._tr("tour_dialog_finish_button", "Finish"))
|
||||
self.finish_button.clicked.connect(self.accept)
|
||||
footer_layout.addWidget(self.finish_button)
|
||||
|
||||
main_layout.addLayout(footer_layout)
|
||||
107
src/ui/dialogs/KeepDuplicatesDialog.py
Normal file
@@ -0,0 +1,107 @@
|
||||
from PyQt5.QtWidgets import (
|
||||
QDialog, QVBoxLayout, QGroupBox, QRadioButton,
|
||||
QPushButton, QHBoxLayout, QButtonGroup, QLabel, QLineEdit
|
||||
)
|
||||
from PyQt5.QtGui import QIntValidator
|
||||
from ...i18n.translator import get_translation
|
||||
from ...config.constants import DUPLICATE_HANDLING_HASH, DUPLICATE_HANDLING_KEEP_ALL
|
||||
|
||||
class KeepDuplicatesDialog(QDialog):
|
||||
"""A dialog to choose the duplicate handling method, with a limit option."""
|
||||
|
||||
def __init__(self, current_mode, current_limit, parent=None):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent
|
||||
self.selected_mode = current_mode
|
||||
self.limit = current_limit
|
||||
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
|
||||
if self.parent_app and hasattr(self.parent_app, '_apply_theme_to_widget'):
|
||||
self.parent_app._apply_theme_to_widget(self)
|
||||
if current_mode == DUPLICATE_HANDLING_KEEP_ALL:
|
||||
self.radio_keep_everything.setChecked(True)
|
||||
self.limit_input.setText(str(current_limit) if current_limit > 0 else "")
|
||||
else:
|
||||
self.radio_skip_by_hash.setChecked(True)
|
||||
self.limit_input.setEnabled(False)
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes the UI components."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
info_label = QLabel()
|
||||
info_label.setWordWrap(True)
|
||||
main_layout.addWidget(info_label)
|
||||
|
||||
options_group = QGroupBox()
|
||||
options_layout = QVBoxLayout(options_group)
|
||||
self.button_group = QButtonGroup(self)
|
||||
self.radio_skip_by_hash = QRadioButton()
|
||||
self.button_group.addButton(self.radio_skip_by_hash)
|
||||
options_layout.addWidget(self.radio_skip_by_hash)
|
||||
keep_everything_layout = QHBoxLayout()
|
||||
self.radio_keep_everything = QRadioButton()
|
||||
self.button_group.addButton(self.radio_keep_everything)
|
||||
keep_everything_layout.addWidget(self.radio_keep_everything)
|
||||
keep_everything_layout.addStretch(1)
|
||||
|
||||
self.limit_label = QLabel()
|
||||
self.limit_input = QLineEdit()
|
||||
self.limit_input.setValidator(QIntValidator(0, 99))
|
||||
self.limit_input.setFixedWidth(50)
|
||||
keep_everything_layout.addWidget(self.limit_label)
|
||||
keep_everything_layout.addWidget(self.limit_input)
|
||||
options_layout.addLayout(keep_everything_layout)
|
||||
|
||||
main_layout.addWidget(options_group)
|
||||
button_layout = QHBoxLayout()
|
||||
self.ok_button = QPushButton()
|
||||
self.cancel_button = QPushButton()
|
||||
button_layout.addStretch(1)
|
||||
button_layout.addWidget(self.ok_button)
|
||||
button_layout.addWidget(self.cancel_button)
|
||||
main_layout.addLayout(button_layout)
|
||||
self.ok_button.clicked.connect(self.accept)
|
||||
self.cancel_button.clicked.connect(self.reject)
|
||||
self.radio_keep_everything.toggled.connect(self.limit_input.setEnabled)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
if self.parent_app and callable(get_translation):
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for UI elements."""
|
||||
self.setWindowTitle(self._tr("duplicates_dialog_title", "Duplicate Handling Options"))
|
||||
self.findChild(QLabel).setText(self._tr("duplicates_dialog_info",
|
||||
"Choose how to handle files that have identical content to already downloaded files."))
|
||||
self.findChild(QGroupBox).setTitle(self._tr("duplicates_dialog_group_title", "Mode"))
|
||||
|
||||
self.radio_skip_by_hash.setText(self._tr("duplicates_dialog_skip_hash", "Skip by Hash (Recommended)"))
|
||||
self.radio_keep_everything.setText(self._tr("duplicates_dialog_keep_all", "Keep Everything"))
|
||||
|
||||
self.limit_label.setText(self._tr("duplicates_limit_label", "Limit:"))
|
||||
self.limit_input.setPlaceholderText(self._tr("duplicates_limit_placeholder", "0=all"))
|
||||
self.limit_input.setToolTip(self._tr("duplicates_limit_tooltip",
|
||||
"Set a limit for identical files to keep. 0 means no limit."))
|
||||
|
||||
self.ok_button.setText(self._tr("ok_button", "OK"))
|
||||
self.cancel_button.setText(self._tr("cancel_button_text_simple", "Cancel"))
|
||||
|
||||
def accept(self):
|
||||
"""Sets the selected mode and limit when OK is clicked."""
|
||||
if self.radio_keep_everything.isChecked():
|
||||
self.selected_mode = DUPLICATE_HANDLING_KEEP_ALL
|
||||
try:
|
||||
self.limit = int(self.limit_input.text()) if self.limit_input.text() else 0
|
||||
except ValueError:
|
||||
self.limit = 0
|
||||
else:
|
||||
self.selected_mode = DUPLICATE_HANDLING_HASH
|
||||
self.limit = 0
|
||||
super().accept()
|
||||
|
||||
def get_selected_options(self):
|
||||
"""Returns the chosen mode and limit as a dictionary."""
|
||||
return {"mode": self.selected_mode, "limit": self.limit}
|
||||
158
src/ui/dialogs/KnownNamesFilterDialog.py
Normal file
@@ -0,0 +1,158 @@
|
||||
# --- PyQt5 Imports ---
|
||||
from PyQt5.QtCore import Qt
|
||||
from PyQt5.QtWidgets import (
|
||||
QApplication, QDialog, QHBoxLayout, QLabel, QLineEdit, QListWidget,
|
||||
QListWidgetItem, QPushButton, QVBoxLayout
|
||||
)
|
||||
|
||||
# --- Local Application Imports ---
|
||||
from ...i18n.translator import get_translation
|
||||
from ..main_window import get_app_icon_object
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class KnownNamesFilterDialog(QDialog):
|
||||
"""
|
||||
A dialog to select names from the Known.txt list to add to the main
|
||||
character filter input field. This provides a convenient way for users
|
||||
to reuse their saved names and groups for filtering downloads.
|
||||
"""
|
||||
|
||||
def __init__(self, known_names_list, parent_app_ref, parent=None):
|
||||
"""
|
||||
Initializes the dialog.
|
||||
|
||||
Args:
|
||||
known_names_list (list): A list of known name objects (dicts) from Known.txt.
|
||||
parent_app_ref (DownloaderApp): A reference to the main application window.
|
||||
parent (QWidget, optional): The parent widget. Defaults to None.
|
||||
"""
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent_app_ref
|
||||
self.setModal(True)
|
||||
self.all_known_name_entries = sorted(known_names_list, key=lambda x: x['name'].lower())
|
||||
self.selected_entries_to_return = []
|
||||
|
||||
# --- Basic Window Setup ---
|
||||
app_icon = get_app_icon_object()
|
||||
if app_icon and not app_icon.isNull():
|
||||
self.setWindowIcon(app_icon)
|
||||
|
||||
# --- START OF FIX ---
|
||||
# Get the user-defined scale factor from the parent application
|
||||
# instead of calculating an independent one.
|
||||
scale_factor = getattr(self.parent_app, 'scale_factor', 1.0)
|
||||
|
||||
# Define base size and apply the correct scale factor
|
||||
base_width, base_height = 460, 450
|
||||
self.setMinimumSize(int(base_width * scale_factor), int(base_height * scale_factor))
|
||||
self.resize(int(base_width * scale_factor * 1.1), int(base_height * scale_factor * 1.1))
|
||||
# --- END OF FIX ---
|
||||
|
||||
# --- Initialize UI and Apply Theming ---
|
||||
self._init_ui()
|
||||
self._retranslate_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _init_ui(self):
|
||||
"""Initializes all UI components and layouts for the dialog."""
|
||||
main_layout = QVBoxLayout(self)
|
||||
|
||||
self.search_input = QLineEdit()
|
||||
self.search_input.textChanged.connect(self._filter_list_display)
|
||||
main_layout.addWidget(self.search_input)
|
||||
|
||||
self.names_list_widget = QListWidget()
|
||||
self._populate_list_widget()
|
||||
main_layout.addWidget(self.names_list_widget)
|
||||
|
||||
# --- Control Buttons ---
|
||||
buttons_layout = QHBoxLayout()
|
||||
|
||||
self.select_all_button = QPushButton()
|
||||
self.select_all_button.clicked.connect(self._select_all_items)
|
||||
buttons_layout.addWidget(self.select_all_button)
|
||||
|
||||
self.deselect_all_button = QPushButton()
|
||||
self.deselect_all_button.clicked.connect(self._deselect_all_items)
|
||||
buttons_layout.addWidget(self.deselect_all_button)
|
||||
buttons_layout.addStretch(1)
|
||||
|
||||
self.add_button = QPushButton()
|
||||
self.add_button.clicked.connect(self._accept_selection_action)
|
||||
self.add_button.setDefault(True)
|
||||
buttons_layout.addWidget(self.add_button)
|
||||
|
||||
self.cancel_button = QPushButton()
|
||||
self.cancel_button.clicked.connect(self.reject)
|
||||
buttons_layout.addWidget(self.cancel_button)
|
||||
main_layout.addLayout(buttons_layout)
|
||||
|
||||
def _tr(self, key, default_text=""):
|
||||
"""Helper to get translation based on the main application's current language."""
|
||||
if callable(get_translation) and self.parent_app:
|
||||
return get_translation(self.parent_app.current_selected_language, key, default_text)
|
||||
return default_text
|
||||
|
||||
def _retranslate_ui(self):
|
||||
"""Sets the text for all translatable UI elements."""
|
||||
self.setWindowTitle(self._tr("known_names_filter_dialog_title", "Add Known Names to Filter"))
|
||||
self.search_input.setPlaceholderText(self._tr("known_names_filter_search_placeholder", "Search names..."))
|
||||
self.select_all_button.setText(self._tr("known_names_filter_select_all_button", "Select All"))
|
||||
self.deselect_all_button.setText(self._tr("known_names_filter_deselect_all_button", "Deselect All"))
|
||||
self.add_button.setText(self._tr("known_names_filter_add_selected_button", "Add Selected"))
|
||||
self.cancel_button.setText(self._tr("fav_posts_cancel_button", "Cancel"))
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and self.parent_app.current_theme == "dark":
|
||||
# Get the scale factor from the parent app
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
# Call the imported function with the correct scale
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
# Explicitly set a blank stylesheet for light mode
|
||||
self.setStyleSheet("")
|
||||
|
||||
def _populate_list_widget(self):
|
||||
"""Populates the list widget with the known names."""
|
||||
self.names_list_widget.clear()
|
||||
for entry_obj in self.all_known_name_entries:
|
||||
item = QListWidgetItem(entry_obj['name'])
|
||||
item.setFlags(item.flags() | Qt.ItemIsUserCheckable)
|
||||
item.setCheckState(Qt.Unchecked)
|
||||
item.setData(Qt.UserRole, entry_obj)
|
||||
self.names_list_widget.addItem(item)
|
||||
|
||||
def _filter_list_display(self):
|
||||
"""Filters the displayed list based on the search input text."""
|
||||
search_text_lower = self.search_input.text().lower()
|
||||
for i in range(self.names_list_widget.count()):
|
||||
item = self.names_list_widget.item(i)
|
||||
entry_obj = item.data(Qt.UserRole)
|
||||
matches_search = not search_text_lower or search_text_lower in entry_obj['name'].lower()
|
||||
item.setHidden(not matches_search)
|
||||
|
||||
def _select_all_items(self):
|
||||
"""Checks all visible items in the list widget."""
|
||||
for i in range(self.names_list_widget.count()):
|
||||
item = self.names_list_widget.item(i)
|
||||
if not item.isHidden():
|
||||
item.setCheckState(Qt.Checked)
|
||||
|
||||
def _deselect_all_items(self):
|
||||
"""Unchecks all items in the list widget."""
|
||||
for i in range(self.names_list_widget.count()):
|
||||
self.names_list_widget.item(i).setCheckState(Qt.Unchecked)
|
||||
|
||||
def _accept_selection_action(self):
|
||||
"""Gathers the selected entries and accepts the dialog."""
|
||||
self.selected_entries_to_return = []
|
||||
for i in range(self.names_list_widget.count()):
|
||||
item = self.names_list_widget.item(i)
|
||||
if item.checkState() == Qt.Checked:
|
||||
self.selected_entries_to_return.append(item.data(Qt.UserRole))
|
||||
self.accept()
|
||||
|
||||
def get_selected_entries(self):
|
||||
"""Returns the list of known name entries selected by the user."""
|
||||
return self.selected_entries_to_return
|
||||
96
src/ui/dialogs/MoreOptionsDialog.py
Normal file
@@ -0,0 +1,96 @@
|
||||
from PyQt5.QtWidgets import (
|
||||
QDialog, QVBoxLayout, QRadioButton, QDialogButtonBox, QButtonGroup, QLabel, QComboBox, QHBoxLayout, QCheckBox
|
||||
)
|
||||
from PyQt5.QtCore import Qt
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
class MoreOptionsDialog(QDialog):
|
||||
"""
|
||||
A dialog for selecting a scope, export format, and single PDF option.
|
||||
"""
|
||||
SCOPE_CONTENT = "content"
|
||||
SCOPE_COMMENTS = "comments"
|
||||
|
||||
def __init__(self, parent=None, current_scope=None, current_format=None, single_pdf_checked=False):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent
|
||||
self.setWindowTitle("More Options")
|
||||
self.setMinimumWidth(350)
|
||||
|
||||
# ... (Layout and other widgets remain the same) ...
|
||||
|
||||
layout = QVBoxLayout(self)
|
||||
self.description_label = QLabel("Please choose the scope for the action:")
|
||||
layout.addWidget(self.description_label)
|
||||
self.radio_button_group = QButtonGroup(self)
|
||||
self.radio_content = QRadioButton("Description/Content")
|
||||
self.radio_comments = QRadioButton("Comments")
|
||||
self.radio_button_group.addButton(self.radio_content)
|
||||
self.radio_button_group.addButton(self.radio_comments)
|
||||
layout.addWidget(self.radio_content)
|
||||
layout.addWidget(self.radio_comments)
|
||||
|
||||
if current_scope == self.SCOPE_COMMENTS:
|
||||
self.radio_comments.setChecked(True)
|
||||
else:
|
||||
self.radio_content.setChecked(True)
|
||||
|
||||
export_layout = QHBoxLayout()
|
||||
export_label = QLabel("Export as:")
|
||||
self.format_combo = QComboBox()
|
||||
self.format_combo.addItems(["PDF", "DOCX", "TXT"])
|
||||
|
||||
if current_format and current_format.upper() in ["PDF", "DOCX", "TXT"]:
|
||||
self.format_combo.setCurrentText(current_format.upper())
|
||||
else:
|
||||
self.format_combo.setCurrentText("PDF")
|
||||
|
||||
export_layout.addWidget(export_label)
|
||||
export_layout.addWidget(self.format_combo)
|
||||
export_layout.addStretch()
|
||||
layout.addLayout(export_layout)
|
||||
|
||||
# --- UPDATED: Single PDF Checkbox ---
|
||||
self.single_pdf_checkbox = QCheckBox("Single PDF")
|
||||
self.single_pdf_checkbox.setToolTip("If checked, all text from matching posts will be compiled into one single PDF file.")
|
||||
self.single_pdf_checkbox.setChecked(single_pdf_checked)
|
||||
layout.addWidget(self.single_pdf_checkbox)
|
||||
|
||||
self.format_combo.currentTextChanged.connect(self.update_single_pdf_checkbox_state)
|
||||
self.update_single_pdf_checkbox_state(self.format_combo.currentText())
|
||||
|
||||
self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
self.button_box.accepted.connect(self.accept)
|
||||
self.button_box.rejected.connect(self.reject)
|
||||
layout.addWidget(self.button_box)
|
||||
self.setLayout(layout)
|
||||
self._apply_theme()
|
||||
def update_single_pdf_checkbox_state(self, text):
|
||||
"""Enable the Single PDF checkbox only if the format is PDF."""
|
||||
is_pdf = (text.upper() == "PDF")
|
||||
self.single_pdf_checkbox.setEnabled(is_pdf)
|
||||
if not is_pdf:
|
||||
self.single_pdf_checkbox.setChecked(False)
|
||||
|
||||
def get_selected_scope(self):
|
||||
if self.radio_comments.isChecked():
|
||||
return self.SCOPE_COMMENTS
|
||||
return self.SCOPE_CONTENT
|
||||
|
||||
def get_selected_format(self):
|
||||
return self.format_combo.currentText().lower()
|
||||
|
||||
def get_single_pdf_state(self):
|
||||
"""Returns the state of the Single PDF checkbox."""
|
||||
return self.single_pdf_checkbox.isChecked() and self.single_pdf_checkbox.isEnabled()
|
||||
|
||||
def _apply_theme(self):
|
||||
"""Applies the current theme from the parent application."""
|
||||
if self.parent_app and self.parent_app.current_theme == "dark":
|
||||
# Get the scale factor from the parent app
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
# Call the imported function with the correct scale
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
# Explicitly set a blank stylesheet for light mode
|
||||
self.setStyleSheet("")
|
||||
118
src/ui/dialogs/MultipartScopeDialog.py
Normal file
@@ -0,0 +1,118 @@
|
||||
# multipart_scope_dialog.py
|
||||
from PyQt5.QtWidgets import (
|
||||
QDialog, QVBoxLayout, QGroupBox, QRadioButton, QDialogButtonBox, QButtonGroup,
|
||||
QLabel, QLineEdit, QHBoxLayout, QFrame
|
||||
)
|
||||
from PyQt5.QtGui import QIntValidator
|
||||
from PyQt5.QtCore import Qt
|
||||
|
||||
# It's good practice to get this constant from the source
|
||||
# but for this example, we will define it here.
|
||||
MAX_PARTS = 16
|
||||
|
||||
class MultipartScopeDialog(QDialog):
|
||||
"""
|
||||
A dialog to let the user select the scope, number of parts, and minimum size for multipart downloads.
|
||||
"""
|
||||
SCOPE_VIDEOS = 'videos'
|
||||
SCOPE_ARCHIVES = 'archives'
|
||||
SCOPE_BOTH = 'both'
|
||||
|
||||
def __init__(self, current_scope='both', current_parts=4, current_min_size_mb=100, parent=None):
|
||||
super().__init__(parent)
|
||||
self.setWindowTitle("Multipart Download Options")
|
||||
self.setWindowFlags(self.windowFlags() & ~Qt.WindowContextHelpButtonHint)
|
||||
self.setMinimumWidth(350)
|
||||
|
||||
# Main Layout
|
||||
layout = QVBoxLayout(self)
|
||||
|
||||
# --- Options Group for Scope ---
|
||||
self.options_group_box = QGroupBox("Apply multipart downloads to:")
|
||||
options_layout = QVBoxLayout()
|
||||
# ... (Radio buttons and button group code remains unchanged) ...
|
||||
self.radio_videos = QRadioButton("Videos Only")
|
||||
self.radio_archives = QRadioButton("Archives Only (.zip, .rar, etc.)")
|
||||
self.radio_both = QRadioButton("Both Videos and Archives")
|
||||
|
||||
if current_scope == self.SCOPE_VIDEOS:
|
||||
self.radio_videos.setChecked(True)
|
||||
elif current_scope == self.SCOPE_ARCHIVES:
|
||||
self.radio_archives.setChecked(True)
|
||||
else:
|
||||
self.radio_both.setChecked(True)
|
||||
|
||||
self.button_group = QButtonGroup(self)
|
||||
self.button_group.addButton(self.radio_videos)
|
||||
self.button_group.addButton(self.radio_archives)
|
||||
self.button_group.addButton(self.radio_both)
|
||||
|
||||
options_layout.addWidget(self.radio_videos)
|
||||
options_layout.addWidget(self.radio_archives)
|
||||
options_layout.addWidget(self.radio_both)
|
||||
self.options_group_box.setLayout(options_layout)
|
||||
layout.addWidget(self.options_group_box)
|
||||
|
||||
# --- START: MODIFIED Download Settings Group ---
|
||||
self.settings_group_box = QGroupBox("Download settings:")
|
||||
settings_layout = QVBoxLayout()
|
||||
|
||||
# Layout for Parts count
|
||||
parts_layout = QHBoxLayout()
|
||||
self.parts_label = QLabel("Number of download parts per file:")
|
||||
self.parts_input = QLineEdit(str(current_parts))
|
||||
self.parts_input.setValidator(QIntValidator(2, MAX_PARTS, self))
|
||||
self.parts_input.setFixedWidth(40)
|
||||
self.parts_input.setToolTip(f"Set the number of concurrent connections per file (2-{MAX_PARTS}).")
|
||||
parts_layout.addWidget(self.parts_label)
|
||||
parts_layout.addStretch()
|
||||
parts_layout.addWidget(self.parts_input)
|
||||
settings_layout.addLayout(parts_layout)
|
||||
|
||||
# Layout for Minimum Size
|
||||
size_layout = QHBoxLayout()
|
||||
self.size_label = QLabel("Minimum file size for multipart (MB):")
|
||||
self.size_input = QLineEdit(str(current_min_size_mb))
|
||||
self.size_input.setValidator(QIntValidator(10, 10000, self)) # Min 10MB, Max ~10GB
|
||||
self.size_input.setFixedWidth(40)
|
||||
self.size_input.setToolTip("Files smaller than this will use a normal, single-part download.")
|
||||
size_layout.addWidget(self.size_label)
|
||||
size_layout.addStretch()
|
||||
size_layout.addWidget(self.size_input)
|
||||
settings_layout.addLayout(size_layout)
|
||||
|
||||
self.settings_group_box.setLayout(settings_layout)
|
||||
layout.addWidget(self.settings_group_box)
|
||||
# --- END: MODIFIED Download Settings Group ---
|
||||
|
||||
# OK and Cancel Buttons
|
||||
self.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)
|
||||
self.button_box.accepted.connect(self.accept)
|
||||
self.button_box.rejected.connect(self.reject)
|
||||
layout.addWidget(self.button_box)
|
||||
|
||||
self.setLayout(layout)
|
||||
|
||||
def get_selected_scope(self):
|
||||
# ... (This method remains unchanged) ...
|
||||
if self.radio_videos.isChecked():
|
||||
return self.SCOPE_VIDEOS
|
||||
if self.radio_archives.isChecked():
|
||||
return self.SCOPE_ARCHIVES
|
||||
return self.SCOPE_BOTH
|
||||
|
||||
def get_selected_parts(self):
|
||||
# ... (This method remains unchanged) ...
|
||||
try:
|
||||
parts = int(self.parts_input.text())
|
||||
return max(2, min(parts, MAX_PARTS))
|
||||
except (ValueError, TypeError):
|
||||
return 4
|
||||
|
||||
def get_selected_min_size(self):
|
||||
"""Returns the selected minimum size in MB as an integer."""
|
||||
try:
|
||||
size = int(self.size_input.text())
|
||||
return max(10, min(size, 10000)) # Enforce valid range
|
||||
except (ValueError, TypeError):
|
||||
return 100 # Return a safe default
|
||||
110
src/ui/dialogs/SinglePDF.py
Normal file
@@ -0,0 +1,110 @@
|
||||
import os
|
||||
import re
|
||||
try:
|
||||
from fpdf import FPDF
|
||||
FPDF_AVAILABLE = True
|
||||
|
||||
# --- FIX: Move the class definition inside the try block ---
|
||||
class PDF(FPDF):
|
||||
"""Custom PDF class to handle headers and footers."""
|
||||
def header(self):
|
||||
pass
|
||||
|
||||
def footer(self):
|
||||
self.set_y(-15)
|
||||
if self.font_family:
|
||||
self.set_font(self.font_family, '', 8)
|
||||
else:
|
||||
self.set_font('Arial', '', 8)
|
||||
self.cell(0, 10, 'Page ' + str(self.page_no()), 0, 0, 'C')
|
||||
|
||||
except ImportError:
|
||||
FPDF_AVAILABLE = False
|
||||
# If the import fails, FPDF and PDF will not be defined,
|
||||
# but the program won't crash here.
|
||||
FPDF = None
|
||||
PDF = None
|
||||
|
||||
def strip_html_tags(text):
|
||||
if not text:
|
||||
return ""
|
||||
clean = re.compile('<.*?>')
|
||||
return re.sub(clean, '', text)
|
||||
|
||||
def create_single_pdf_from_content(posts_data, output_filename, font_path, logger=print):
|
||||
"""
|
||||
Creates a single, continuous PDF, correctly formatting both descriptions and comments.
|
||||
"""
|
||||
if not FPDF_AVAILABLE:
|
||||
logger("❌ PDF Creation failed: 'fpdf2' library is not installed. Please run: pip install fpdf2")
|
||||
return False
|
||||
|
||||
if not posts_data:
|
||||
logger(" No text content was collected to create a PDF.")
|
||||
return False
|
||||
|
||||
pdf = PDF()
|
||||
default_font_family = 'DejaVu'
|
||||
|
||||
bold_font_path = ""
|
||||
if font_path:
|
||||
bold_font_path = font_path.replace("DejaVuSans.ttf", "DejaVuSans-Bold.ttf")
|
||||
|
||||
try:
|
||||
if not os.path.exists(font_path): raise RuntimeError(f"Font file not found: {font_path}")
|
||||
if not os.path.exists(bold_font_path): raise RuntimeError(f"Bold font file not found: {bold_font_path}")
|
||||
|
||||
pdf.add_font('DejaVu', '', font_path, uni=True)
|
||||
pdf.add_font('DejaVu', 'B', bold_font_path, uni=True)
|
||||
except Exception as font_error:
|
||||
logger(f" ⚠️ Could not load DejaVu font: {font_error}. Falling back to Arial.")
|
||||
default_font_family = 'Arial'
|
||||
|
||||
pdf.add_page()
|
||||
|
||||
logger(f" Starting continuous PDF creation with content from {len(posts_data)} posts...")
|
||||
|
||||
for i, post in enumerate(posts_data):
|
||||
if i > 0:
|
||||
# This ensures every post after the first gets its own page.
|
||||
pdf.add_page()
|
||||
|
||||
pdf.set_font(default_font_family, 'B', 16)
|
||||
pdf.multi_cell(w=0, h=10, txt=post.get('title', 'Untitled Post'), align='L')
|
||||
pdf.ln(5)
|
||||
|
||||
if 'comments' in post and post['comments']:
|
||||
comments_list = post['comments']
|
||||
for comment_index, comment in enumerate(comments_list):
|
||||
user = comment.get('commenter_name', 'Unknown User')
|
||||
timestamp = comment.get('published', 'No Date')
|
||||
body = strip_html_tags(comment.get('content', ''))
|
||||
|
||||
pdf.set_font(default_font_family, '', 10)
|
||||
pdf.write(8, "Comment by: ")
|
||||
if user is not None:
|
||||
pdf.set_font(default_font_family, 'B', 10)
|
||||
pdf.write(8, str(user))
|
||||
|
||||
pdf.set_font(default_font_family, '', 10)
|
||||
pdf.write(8, f" on {timestamp}")
|
||||
pdf.ln(10)
|
||||
|
||||
pdf.set_font(default_font_family, '', 11)
|
||||
pdf.multi_cell(w=0, h=7, txt=body)
|
||||
|
||||
if comment_index < len(comments_list) - 1:
|
||||
pdf.ln(3)
|
||||
pdf.cell(w=0, h=0, border='T')
|
||||
pdf.ln(3)
|
||||
elif 'content' in post:
|
||||
pdf.set_font(default_font_family, '', 12)
|
||||
pdf.multi_cell(w=0, h=7, txt=post.get('content', 'No Content'))
|
||||
|
||||
try:
|
||||
pdf.output(output_filename)
|
||||
logger(f"✅ Successfully created single PDF: '{os.path.basename(output_filename)}'")
|
||||
return True
|
||||
except Exception as e:
|
||||
logger(f"❌ A critical error occurred while saving the final PDF: {e}")
|
||||
return False
|
||||
207
src/ui/dialogs/SupportDialog.py
Normal file
@@ -0,0 +1,207 @@
|
||||
# src/app/dialogs/SupportDialog.py
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
from PyQt5.QtWidgets import (
|
||||
QDialog, QVBoxLayout, QHBoxLayout, QLabel, QFrame,
|
||||
QPushButton, QSizePolicy
|
||||
)
|
||||
from PyQt5.QtCore import Qt, QSize, QUrl
|
||||
from PyQt5.QtGui import QPixmap, QDesktopServices
|
||||
|
||||
from ...utils.resolution import get_dark_theme
|
||||
|
||||
|
||||
class SupportDialog(QDialog):
|
||||
"""
|
||||
A polished dialog showcasing support and community options in a
|
||||
clean, modern card-based layout.
|
||||
"""
|
||||
def __init__(self, parent=None):
|
||||
super().__init__(parent)
|
||||
self.parent_app = parent
|
||||
|
||||
self.setWindowTitle("❤️ Support & Community")
|
||||
self.setMinimumWidth(560)
|
||||
|
||||
self._init_ui()
|
||||
self._apply_theme()
|
||||
|
||||
def _create_card_button(
|
||||
self, icon_path, title, subtitle, url,
|
||||
hover_color="#2E2E2E", min_height=110, icon_size=44
|
||||
):
|
||||
"""Reusable clickable card widget with icon, title, and subtitle."""
|
||||
button = QPushButton()
|
||||
button.setCursor(Qt.PointingHandCursor)
|
||||
button.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
|
||||
button.setMinimumHeight(min_height)
|
||||
|
||||
# Consistent style
|
||||
button.setStyleSheet(f"""
|
||||
QPushButton {{
|
||||
background-color: #3A3A3A;
|
||||
border: 1px solid #555;
|
||||
border-radius: 10px;
|
||||
text-align: center;
|
||||
padding: 12px;
|
||||
}}
|
||||
QPushButton:hover {{
|
||||
background-color: {hover_color};
|
||||
border: 1px solid #777;
|
||||
}}
|
||||
""")
|
||||
|
||||
layout = QVBoxLayout(button)
|
||||
layout.setSpacing(6)
|
||||
|
||||
# Icon
|
||||
icon_label = QLabel()
|
||||
pixmap = QPixmap(icon_path)
|
||||
if not pixmap.isNull():
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1.0)
|
||||
scaled_size = int(icon_size * scale)
|
||||
icon_label.setPixmap(
|
||||
pixmap.scaled(QSize(scaled_size, scaled_size), Qt.KeepAspectRatio, Qt.SmoothTransformation)
|
||||
)
|
||||
icon_label.setAlignment(Qt.AlignCenter)
|
||||
layout.addWidget(icon_label)
|
||||
|
||||
# Title
|
||||
title_label = QLabel(title)
|
||||
font = self.font()
|
||||
font.setPointSize(11)
|
||||
font.setBold(True)
|
||||
title_label.setFont(font)
|
||||
title_label.setAlignment(Qt.AlignCenter)
|
||||
title_label.setStyleSheet("background-color: transparent; border: none;")
|
||||
layout.addWidget(title_label)
|
||||
|
||||
# Subtitle
|
||||
if subtitle:
|
||||
subtitle_label = QLabel(subtitle)
|
||||
subtitle_label.setStyleSheet("color: #A8A8A8; background-color: transparent; border: none;")
|
||||
subtitle_label.setAlignment(Qt.AlignCenter)
|
||||
layout.addWidget(subtitle_label)
|
||||
|
||||
button.clicked.connect(lambda: QDesktopServices.openUrl(QUrl(url)))
|
||||
return button
|
||||
|
||||
def _create_section_title(self, text):
|
||||
"""Stylized section heading."""
|
||||
label = QLabel(text)
|
||||
font = label.font()
|
||||
font.setPointSize(13)
|
||||
font.setBold(True)
|
||||
label.setFont(font)
|
||||
label.setAlignment(Qt.AlignCenter)
|
||||
label.setStyleSheet("margin-top: 10px; margin-bottom: 5px;")
|
||||
return label
|
||||
|
||||
def _init_ui(self):
|
||||
main_layout = QVBoxLayout(self)
|
||||
main_layout.setSpacing(18)
|
||||
main_layout.setContentsMargins(20, 20, 20, 20)
|
||||
|
||||
# Header
|
||||
header_label = QLabel("Support the Project")
|
||||
font = header_label.font()
|
||||
font.setPointSize(17)
|
||||
font.setBold(True)
|
||||
header_label.setFont(font)
|
||||
header_label.setAlignment(Qt.AlignCenter)
|
||||
main_layout.addWidget(header_label)
|
||||
|
||||
subtext = QLabel(
|
||||
"If you enjoy this application, consider supporting its development. "
|
||||
"Your help keeps the project alive and growing!"
|
||||
)
|
||||
subtext.setWordWrap(True)
|
||||
subtext.setAlignment(Qt.AlignCenter)
|
||||
main_layout.addWidget(subtext)
|
||||
|
||||
# Financial Support
|
||||
main_layout.addWidget(self._create_section_title("Contribute Financially"))
|
||||
donation_layout = QHBoxLayout()
|
||||
donation_layout.setSpacing(15)
|
||||
|
||||
donation_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("ko-fi.png"), "Ko-fi", "One-time ",
|
||||
"https://ko-fi.com/yuvi427183", "#2B2F36"
|
||||
))
|
||||
donation_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("patreon.png"), "Patreon", "Soon ",
|
||||
"https://www.patreon.com/Yuvi102", "#3A2E2B"
|
||||
))
|
||||
donation_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("buymeacoffee.png"), "Buy Me a Coffee", "One-time",
|
||||
"https://buymeacoffee.com/yuvi9587", "#403520"
|
||||
))
|
||||
main_layout.addLayout(donation_layout)
|
||||
|
||||
# Separator
|
||||
line = QFrame()
|
||||
line.setFrameShape(QFrame.HLine)
|
||||
line.setFrameShadow(QFrame.Sunken)
|
||||
main_layout.addWidget(line)
|
||||
|
||||
# Community Section
|
||||
main_layout.addWidget(self._create_section_title("Get Help & Connect"))
|
||||
community_layout = QHBoxLayout()
|
||||
community_layout.setSpacing(15)
|
||||
|
||||
community_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("github.png"), "GitHub", "Report issues",
|
||||
"https://github.com/Yuvi9587/Kemono-Downloader", "#2E2E2E",
|
||||
min_height=100, icon_size=36
|
||||
))
|
||||
community_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("discord.png"), "Discord", "Join the server",
|
||||
"https://discord.gg/BqP64XTdJN", "#2C2F33",
|
||||
min_height=100, icon_size=36
|
||||
))
|
||||
community_layout.addWidget(self._create_card_button(
|
||||
get_asset_path("instagram.png"), "Instagram", "Follow me",
|
||||
"https://www.instagram.com/uvi.arts/", "#3B2E40",
|
||||
min_height=100, icon_size=36
|
||||
))
|
||||
main_layout.addLayout(community_layout)
|
||||
|
||||
# Close Button
|
||||
close_button = QPushButton("Close")
|
||||
close_button.setMinimumWidth(100)
|
||||
close_button.clicked.connect(self.accept)
|
||||
close_button.setStyleSheet("""
|
||||
QPushButton {
|
||||
padding: 6px 14px;
|
||||
border-radius: 6px;
|
||||
background-color: #444;
|
||||
color: white;
|
||||
}
|
||||
QPushButton:hover {
|
||||
background-color: #555;
|
||||
}
|
||||
""")
|
||||
|
||||
button_layout = QHBoxLayout()
|
||||
button_layout.addStretch()
|
||||
button_layout.addWidget(close_button)
|
||||
button_layout.addStretch()
|
||||
main_layout.addLayout(button_layout)
|
||||
|
||||
def _apply_theme(self):
|
||||
if self.parent_app and hasattr(self.parent_app, 'current_theme') and self.parent_app.current_theme == "dark":
|
||||
scale = getattr(self.parent_app, 'scale_factor', 1)
|
||||
self.setStyleSheet(get_dark_theme(scale))
|
||||
else:
|
||||
self.setStyleSheet("")
|
||||
|
||||
|
||||
def get_asset_path(filename):
|
||||
"""Return the path to an asset, works in both dev and packaged environments."""
|
||||
if getattr(sys, 'frozen', False) and hasattr(sys, '_MEIPASS'):
|
||||
base_path = sys._MEIPASS
|
||||
else:
|
||||
base_path = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..', '..'))
|
||||
return os.path.join(base_path, 'assets', filename)
|
||||