Compare commits

...

73 Commits

Author SHA1 Message Date
Ricter Zheng
6752edfc9d
Merge pull request #402 from hzxjy1/zipTest
Close zipfile hander manually and add a test
2025-03-26 22:57:29 +08:00
Ricter Zheng
9a5fcd7d23
Merge pull request #401 from hzxjy1/NoneType
Fix none attributes in session headers
2025-03-26 22:56:11 +08:00
Hellagur4225
b4cc498a5f add a test for zipfile download 2025-03-26 15:14:15 +08:00
Hellagur4225
a4eb7f3b5f fix the uncontrollable zipfile closing function 2025-03-26 15:11:26 +08:00
Hellagur4225
36aa321ade Fix none attributes in session headers 2025-03-24 10:13:42 +08:00
Ricter Zheng
aa84b57a43 use argparse, fix #396 2025-03-12 02:50:22 +08:00
ricterz
a3c70a0c30 fix #396 2025-03-11 22:23:17 +08:00
Ricter Zheng
86060ae0a6
Merge pull request #398 from hzxjy1/zipfile
feat: add compress option
2025-03-11 22:04:09 +08:00
ricterz
9648c21b32 tiny fix of #397 2025-03-11 22:02:37 +08:00
Hellagur4225
625feb5d21 Remove unused option 2025-03-08 17:37:42 +08:00
Hellagur4225
6efbc73c10 feat: add compress option 2025-03-08 17:31:56 +08:00
ricterz
34c1ea8952 new feature #396 2025-02-28 18:59:32 +08:00
ricterz
2e895d8d0f fix title #396 2025-02-28 18:24:56 +08:00
ricterz
0c9b92ce10 0.6.0-beta #394 2025-02-28 00:17:05 +08:00
ricterz
ca71a72747 fix #395 2025-02-27 22:07:40 +08:00
ricterz
1b7f19ee18 0.5.25, fix #393 2025-02-26 00:13:41 +08:00
ricterz
132f4c83da Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-26 00:12:49 +08:00
ricterz
6789b2b363 fix bug of cover.webp.webp 2025-02-25 23:51:13 +08:00
Ricter Zheng
a6ac725ca7
Merge pull request #392 from akakishi/master
Update installation instructions in README.rst
2025-02-23 20:29:15 +08:00
akakishi
b32962bca4
Update README.rst
File `setup.py` was removed in a previous commit; updated README to reflect the new installation process.
2025-02-23 01:18:54 -03:00
ricterz
8a7be0e33d 0.5.24 2025-02-09 20:16:44 +08:00
ricterz
0a47527461 optimize logger output #390 2025-02-09 20:15:17 +08:00
ricterz
023c8969eb add global retry for search, download, fetch favorites 2025-02-09 20:02:52 +08:00
ricterz
29c3abbe5c Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-08 16:21:08 +08:00
ricterzheng
057fae8a83 0.5.23 2025-02-03 15:47:51 +08:00
ricterzheng
248d31edf0 get favorite count #386 even if not login 2025-02-03 15:45:39 +08:00
ricterzheng
4bfe0de078 0.5.22 2025-02-03 15:29:34 +08:00
ricterzheng
780a6c82b2 split metadata.json out from html generate function #386 2025-02-03 15:26:14 +08:00
ricterzheng
8791e7af55 update README to fix #367 2025-02-03 14:53:09 +08:00
ricterzheng
b434c4d58d 0.5.21 2025-02-03 14:34:14 +08:00
ricterzheng
fc69f94505 add --no-filename-padding options to fix #381 2025-01-29 22:59:28 +08:00
ricterzheng
571fba2259 fix RequestsDependencyWarning 2025-01-29 22:46:11 +08:00
ricterzheng
fa977fee04 0.5.20 2025-01-29 00:31:40 +08:00
ricterzheng
58b5ec4211 fix #382 2025-01-28 17:43:50 +08:00
Ricter Zheng
5ad416efa6
Merge pull request #380 from sgqy/master 2025-01-27 06:58:36 +08:00
sgqy
d90fd871ef fix: failure chain 2025-01-26 22:38:50 +09:00
sgqy
c7ff5c2c5c build: switch to pyproject 2025-01-26 21:45:55 +09:00
Ricter Zheng
4ab43dae24
Merge pull request #378 from bill88t/master 2025-01-24 04:36:21 +08:00
Bill Sideris
04bd88a1f7
fix: python-httpx 0.28 2025-01-23 21:16:07 +02:00
ricterz
ba59dcf4db add up/down arrow 2025-01-16 22:40:53 +08:00
ricterz
a83c571ec4 0.5.19 2025-01-15 19:47:24 +08:00
Ricter Zheng
e7ff5dab3d
Merge pull request #373 from nicojust/fix-favorite-metadata-output
fix favorite_counts output in metadata
2025-01-15 12:26:24 +08:00
Ricter Zheng
a166898b60
fix #374 2025-01-15 12:26:01 +08:00
Nekwo
ce25051fa3
fix: output favorite_counts as an int 2025-01-13 19:51:40 +01:00
Nekwo
41fba6b5ac
fix: add missing favorite_counts in metadata file 2025-01-13 19:51:04 +01:00
ricterz
8944ece4a8 use os.path.sep as path separator 2025-01-11 08:48:43 +08:00
ricterz
6b4c4bdc70 0.5.18 2025-01-11 08:35:40 +08:00
ricterz
d1d0c22af8 fix #349 2025-01-11 08:34:30 +08:00
ricterz
803957ba88 fix #349 2025-01-11 08:33:59 +08:00
ricterz
13b584a820 fix #371 and #324 2025-01-11 08:02:36 +08:00
ricterz
be08fcf4cb fix #368 2025-01-11 07:54:28 +08:00
ricterz
b585225308 fix #370 2025-01-11 07:52:51 +08:00
ricterz
54af682848 fix #369 2025-01-11 07:50:41 +08:00
ricterz
d74fd103f0 remove setup.py 2025-01-08 09:35:44 +08:00
ricterz
0cb2411955 Merge branch 'master' of github.com:RicterZ/nhentai 2025-01-08 09:17:01 +08:00
Ricter Zheng
de08d3daaa 0.5.17.1 2025-01-07 14:26:38 +08:00
ricterz
946b85ace9 tiny fix 2024-12-21 09:32:33 +08:00
ricterz
5bde24f159 remove debug print 2024-12-21 09:18:34 +08:00
ricterz
3cae13e76f fix #363 2024-12-18 23:37:00 +08:00
ricterz
7483b8f923 workaround of #359 2024-12-11 23:58:48 +08:00
ricterz
eae42c8eb5 fix #356 2024-12-11 23:57:01 +08:00
ricterz
b841747761 fix #356 2024-12-11 23:47:48 +08:00
Ricter Zheng
1f3528afad try to fix #361 2024-12-09 14:36:44 +08:00
Ricter Zheng
bb41e502c1 0.5.17 for fix #360 2024-12-09 09:26:33 +08:00
Ricter Zheng
7089144ac6 fix #360 #359 2024-12-09 09:25:40 +08:00
Ricter Zheng
0a9f7c3d3e 0.5.15 fix some bugs 2024-12-04 11:04:04 +08:00
Ricter Zheng
40536ad456 Merge branch 'master' of github.com:RicterZ/nhentai 2024-12-04 11:03:48 +08:00
Ricter Zheng
edb571c9dd fix #358 2024-12-04 11:00:50 +08:00
Ricter Zheng
b2befd3473
Merge pull request #357 from FelixJS123/favorite_metadata
add favorites count metadata
2024-12-04 10:47:32 +08:00
Ricter Zheng
c2e880f172 fix asyncio proxies settings and update httpx version 2024-12-04 10:46:45 +08:00
FelixJS
841988bc29 Updated README 2024-11-30 22:58:54 -08:00
FelixJS
390948e252 add favorites count metadata 2024-11-30 22:53:45 -08:00
Ricter Zheng
b9b8468bfe 0.5.14 2024-12-01 10:37:59 +08:00
22 changed files with 734 additions and 477 deletions

View File

@ -1,4 +1,5 @@
.git .git
.github
.gitignore .gitignore
venv venv
*.egg-info *.egg-info

1
.gitignore vendored
View File

@ -9,3 +9,4 @@ output/
venv/ venv/
.vscode/ .vscode/
test-output test-output
*.whl

View File

@ -1,11 +1,9 @@
FROM python:3 FROM python:3
WORKDIR /usr/src/nhentai WORKDIR /usr/src/nhentai
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . . COPY . .
RUN python setup.py install RUN pip install --no-cache-dir .
WORKDIR /output WORKDIR /output
ENTRYPOINT ["nhentai"] ENTRYPOINT ["nhentai"]

View File

@ -1,5 +0,0 @@
include README.rst
include requirements.txt
include nhentai/viewer/*
include nhentai/viewer/default/*
include nhentai/viewer/minimal/*

View File

@ -22,7 +22,7 @@ From Github:
git clone https://github.com/RicterZ/nhentai git clone https://github.com/RicterZ/nhentai
cd nhentai cd nhentai
python setup.py install pip install --no-cache-dir .
Build Docker container: Build Docker container:
@ -59,7 +59,7 @@ On Gentoo Linux:
.. code-block:: .. code-block::
layman -fa glicOne layman -fa glibOne
sudo emerge net-misc/nhentai sudo emerge net-misc/nhentai
On NixOS: On NixOS:
@ -129,17 +129,20 @@ Download your favorites with delay:
.. code-block:: bash .. code-block:: bash
nhentai --favorites --download --delay 1 nhentai --favorites --download --delay 1 --page 3-5,7
Format output doujinshi folder name: Format output doujinshi folder name:
.. code-block:: bash .. code-block:: bash
nhentai --id 261100 --format '[%i]%s' nhentai --id 261100 --format '[%i]%s'
# for Windows
nhentai --id 261100 --format "[%%i]%%s"
Supported doujinshi folder formatter: Supported doujinshi folder formatter:
- %i: Doujinshi id - %i: Doujinshi id
- %f: Doujinshi favorite count
- %t: Doujinshi name - %t: Doujinshi name
- %s: Doujinshi subtitle (translated name) - %s: Doujinshi subtitle (translated name)
- %a: Doujinshi authors' name - %a: Doujinshi authors' name
@ -147,6 +150,7 @@ Supported doujinshi folder formatter:
- %p: Doujinshi pretty name - %p: Doujinshi pretty name
- %ag: Doujinshi authors name or groups name - %ag: Doujinshi authors name or groups name
Note: for Windows operation system, please use double "%", such as "%%i".
Other options: Other options:
@ -184,6 +188,8 @@ Other options:
timeout for downloading doujinshi timeout for downloading doujinshi
-d DELAY, --delay=DELAY -d DELAY, --delay=DELAY
slow down between downloading every doujinshi slow down between downloading every doujinshi
--retry=RETRY retry times when downloading failed
--exit-on-fail exit on fail to prevent generating incomplete files
--proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080" --proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080"
-f FILE, --file=FILE read gallery IDs from file. -f FILE, --file=FILE read gallery IDs from file.
--format=NAME_FORMAT format the saved folder name --format=NAME_FORMAT format the saved folder name

View File

@ -1,3 +1,3 @@
__version__ = '0.5.12' __version__ = '0.6.0-beta'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -6,11 +6,12 @@ import json
import nhentai.constant as constant import nhentai.constant as constant
from urllib.parse import urlparse from urllib.parse import urlparse
from optparse import OptionParser from argparse import ArgumentParser
from nhentai import __version__ from nhentai import __version__
from nhentai.utils import generate_html, generate_main_html, DB from nhentai.utils import generate_html, generate_main_html, DB, EXTENSIONS
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.constant import PATH_SEPARATOR
def banner(): def banner():
@ -37,7 +38,7 @@ def write_config():
f.write(json.dumps(constant.CONFIG)) f.write(json.dumps(constant.CONFIG))
def callback(option, opt_str, value, parser): def callback(option, _opt_str, _value, parser):
if option == '--id': if option == '--id':
pass pass
value = [] value = []
@ -56,93 +57,133 @@ def callback(option, opt_str, value, parser):
def cmd_parser(): def cmd_parser():
load_config() load_config()
parser = OptionParser('\n nhentai --search [keyword] --download' parser = ArgumentParser(
'\n NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]' description='\n nhentai --search [keyword] --download'
'\n nhentai --file [filename]' '\n NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]'
'\n\nEnvironment Variable:\n' '\n nhentai --file [filename]'
' NHENTAI nhentai mirror url') '\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url'
)
# operation options # operation options
parser.add_option('--download', '-D', dest='is_download', action='store_true', parser.add_argument('--download', '-D', dest='is_download', action='store_true',
help='download doujinshi (for search results)') help='download doujinshi (for search results)')
parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information') parser.add_argument('--no-download', dest='no_download', action='store_true', default=False,
help='download doujinshi (for search results)')
parser.add_argument('--show', '-S', dest='is_show', action='store_true',
help='just show the doujinshi information')
# doujinshi options # doujinshi options
parser.add_option('--id', dest='id', action='callback', callback=callback, parser.add_argument('--id', dest='id', nargs='+', type=int,
help='doujinshi ids set, e.g. 167680 167681 167682') help='doujinshi ids set, e.g. 167680 167681 167682')
parser.add_option('--search', '-s', type='string', dest='keyword', action='store', parser.add_argument('--search', '-s', type=str, dest='keyword',
help='search doujinshi by keyword') help='search doujinshi by keyword')
parser.add_option('--favorites', '-F', action='store_true', dest='favorites', parser.add_argument('--favorites', '-F', action='store_true', dest='favorites',
help='list or download your favorites') help='list or download your favorites')
parser.add_option('--artist', '-a', action='store', dest='artist', parser.add_argument('--artist', '-a', type=str, dest='artist',
help='list doujinshi by artist name') help='list doujinshi by artist name')
# page options # page options
parser.add_option('--page-all', dest='page_all', action='store_true', default=False, parser.add_argument('--page-all', dest='page_all', action='store_true', default=False,
help='all search results') help='all search results')
parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='1', parser.add_argument('--page', '--page-range', type=str, dest='page',
help='page number of search results. e.g. 1,2-5,14') help='page number of search results. e.g. 1,2-5,14')
parser.add_option('--sorting', '--sort', dest='sorting', action='store', default='popular', parser.add_argument('--sorting', '--sort', dest='sorting', type=str, default='popular',
help='sorting of doujinshi (recent / popular / popular-[today|week])', help='sorting of doujinshi (recent / popular / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week', 'date']) choices=['recent', 'popular', 'popular-today', 'popular-week', 'date'])
# download options # download options
parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='./', parser.add_argument('--output', '-o', type=str, dest='output_dir', default='.',
help='output dir') help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5, parser.add_argument('--threads', '-t', type=int, dest='threads', default=5,
help='thread count for downloading doujinshi') help='thread count for downloading doujinshi')
parser.add_option('--timeout', '-T', type='int', dest='timeout', action='store', default=30, parser.add_argument('--timeout', '-T', type=int, dest='timeout', default=30,
help='timeout for downloading doujinshi') help='timeout for downloading doujinshi')
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, parser.add_argument('--delay', '-d', type=int, dest='delay', default=0,
help='slow down between downloading every doujinshi') help='slow down between downloading every doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', parser.add_argument('--retry', type=int, dest='retry', default=3,
help='store a proxy, for example: -p "http://127.0.0.1:1080"') help='retry times when downloading failed')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_argument('--exit-on-fail', dest='exit_on_fail', action='store_true', default=False,
parser.add_option('--format', type='string', dest='name_format', action='store', help='exit on fail to prevent generating incomplete files')
help='format the saved folder name', default='[%i][%a][%t]') parser.add_argument('--proxy', type=str, dest='proxy',
parser.add_option('--dry-run', action='store_true', dest='dryrun', help='Dry run, skip file download') help='store a proxy, for example: -p "http://127.0.0.1:1080"')
parser.add_argument('--file', '-f', type=str, dest='file',
help='read gallery IDs from file.')
parser.add_argument('--format', type=str, dest='name_format', default='[%i][%a][%t]',
help='format the saved folder name')
parser.add_argument('--no-filename-padding', action='store_true', dest='no_filename_padding',
default=False, help='no padding in the images filename, such as \'001.jpg\'')
# generate options # generate options
parser.add_option('--html', dest='html_viewer', action='store_true', parser.add_argument('--html', dest='html_viewer', type=str, nargs='?', const='.',
help='generate a html viewer at current directory') help='generate an HTML viewer in the specified directory, or scan all subfolders '
parser.add_option('--no-html', dest='is_nohtml', action='store_true', 'within the entire directory to generate the HTML viewer. By default, current '
help='don\'t generate HTML after downloading') 'working directory is used.')
parser.add_option('--gen-main', dest='main_viewer', action='store_true', parser.add_argument('--no-html', dest='is_nohtml', action='store_true',
help='generate a main viewer contain all the doujin in the folder') help='don\'t generate HTML after downloading')
parser.add_option('--cbz', '-C', dest='is_cbz', action='store_true', parser.add_argument('--gen-main', dest='main_viewer', action='store_true',
help='generate Comic Book CBZ File') help='generate a main viewer contain all the doujin in the folder')
parser.add_option('--pdf', '-P', dest='is_pdf', action='store_true', parser.add_argument('--cbz', '-C', dest='is_cbz', action='store_true',
help='generate PDF file') help='generate Comic Book CBZ File')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False, parser.add_argument('--pdf', '-P', dest='is_pdf', action='store_true',
help='remove downloaded doujinshi dir when generated CBZ or PDF file') help='generate PDF file')
parser.add_option('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file') parser.add_argument('--meta', dest='generate_metadata', action='store_true', default=False,
parser.add_option('--meta', dest='generate_metadata', action='store_true', help='generate a metadata file in doujinshi format')
help='generate a metadata file in doujinshi format') parser.add_argument('--update-meta', dest='update_metadata', action='store_true', default=False,
parser.add_option('--regenerate', dest='regenerate', action='store_true', default=False, help='update the metadata file of a doujinshi, update CBZ metadata if exists')
help='regenerate the cbz or pdf file if exists')
parser.add_argument('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file')
parser.add_argument('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
parser.add_argument('--regenerate', dest='regenerate', action='store_true', default=False,
help='regenerate the cbz or pdf file if exists')
parser.add_argument('--zip', action='store_true', help='Package into a single zip file')
# nhentai options # nhentai options
parser.add_option('--cookie', type='str', dest='cookie', action='store', parser.add_argument('--cookie', type=str, dest='cookie',
help='set cookie of nhentai to bypass Cloudflare captcha') help='set cookie of nhentai to bypass Cloudflare captcha')
parser.add_option('--useragent', '--user-agent', type='str', dest='useragent', action='store', parser.add_argument('--useragent', '--user-agent', type=str, dest='useragent',
help='set useragent to bypass Cloudflare captcha') help='set useragent to bypass Cloudflare captcha')
parser.add_option('--language', type='str', dest='language', action='store', parser.add_argument('--language', type=str, dest='language',
help='set default language to parse doujinshis') help='set default language to parse doujinshis')
parser.add_option('--clean-language', dest='clean_language', action='store_true', default=False, parser.add_argument('--clean-language', dest='clean_language', action='store_true', default=False,
help='set DEFAULT as language to parse doujinshis') help='set DEFAULT as language to parse doujinshis')
parser.add_option('--save-download-history', dest='is_save_download_history', action='store_true', parser.add_argument('--save-download-history', dest='is_save_download_history', action='store_true',
default=False, help='save downloaded doujinshis, whose will be skipped if you re-download them') default=False, help='save downloaded doujinshis, whose will be skipped if you re-download them')
parser.add_option('--clean-download-history', action='store_true', default=False, dest='clean_download_history', parser.add_argument('--clean-download-history', action='store_true', default=False, dest='clean_download_history',
help='clean download history') help='clean download history')
parser.add_option('--template', dest='viewer_template', action='store', parser.add_argument('--template', dest='viewer_template', type=str, default='',
help='set viewer template', default='') help='set viewer template')
parser.add_option('--legacy', dest='legacy', action='store_true', default=False, parser.add_argument('--legacy', dest='legacy', action='store_true', default=False,
help='use legacy searching method') help='use legacy searching method')
args, _ = parser.parse_args(sys.argv[1:]) args = parser.parse_args()
if args.html_viewer: if args.html_viewer:
generate_html(template=constant.CONFIG['template']) if not os.path.exists(args.html_viewer):
logger.error(f'Path \'{args.html_viewer}\' not exists')
sys.exit(1)
for root, dirs, files in os.walk(args.html_viewer):
if not dirs:
generate_html(output_dir=args.html_viewer, template=constant.CONFIG['template'])
sys.exit(0)
for dir_name in dirs:
# it will scan the entire subdirectories
doujinshi_dir = os.path.join(root, dir_name)
items = set(map(lambda s: os.path.splitext(s)[1], os.listdir(doujinshi_dir)))
# skip directory without any images
if items & set(EXTENSIONS):
generate_html(output_dir=doujinshi_dir, template=constant.CONFIG['template'])
sys.exit(0)
sys.exit(0) sys.exit(0)
if args.main_viewer and not args.id and not args.keyword and not args.favorites: if args.main_viewer and not args.id and not args.keyword and not args.favorites:
@ -158,22 +199,24 @@ def cmd_parser():
# --- set config --- # --- set config ---
if args.cookie is not None: if args.cookie is not None:
constant.CONFIG['cookie'] = args.cookie constant.CONFIG['cookie'] = args.cookie.strip()
write_config() write_config()
logger.info('Cookie saved.') logger.info('Cookie saved.')
sys.exit(0)
elif args.useragent is not None: if args.useragent is not None:
constant.CONFIG['useragent'] = args.useragent constant.CONFIG['useragent'] = args.useragent.strip()
write_config() write_config()
logger.info('User-Agent saved.') logger.info('User-Agent saved.')
sys.exit(0)
elif args.language is not None: if args.language is not None:
constant.CONFIG['language'] = args.language constant.CONFIG['language'] = args.language
write_config() write_config()
logger.info(f'Default language now set to "{args.language}"') logger.info(f'Default language now set to "{args.language}"')
sys.exit(0)
# TODO: search without language # TODO: search without language
if any([args.cookie, args.useragent, args.language]):
sys.exit(0)
if args.proxy is not None: if args.proxy is not None:
proxy_url = urlparse(args.proxy) proxy_url = urlparse(args.proxy)
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https', 'socks5', 'socks5h', if not args.proxy == '' and proxy_url.scheme not in ('http', 'https', 'socks5', 'socks5h',
@ -181,10 +224,7 @@ def cmd_parser():
logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored') logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored')
sys.exit(0) sys.exit(0)
else: else:
constant.CONFIG['proxy'] = { constant.CONFIG['proxy'] = args.proxy
'http': args.proxy,
'https': args.proxy,
}
logger.info(f'Proxy now set to "{args.proxy}"') logger.info(f'Proxy now set to "{args.proxy}"')
write_config() write_config()
sys.exit(0) sys.exit(0)
@ -229,8 +269,4 @@ def cmd_parser():
logger.critical('Maximum number of used threads is 15') logger.critical('Maximum number of used threads is 15')
sys.exit(1) sys.exit(1)
if args.dryrun and (args.is_cbz or args.is_pdf):
logger.critical('Cannot generate PDF or CBZ during dry-run')
sys.exit(1)
return args return args

View File

@ -7,13 +7,13 @@ import platform
import urllib3.exceptions import urllib3.exceptions
from nhentai import constant from nhentai import constant
from nhentai.cmdline import cmd_parser, banner from nhentai.cmdline import cmd_parser, banner, write_config
from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.constant import BASE_URL from nhentai.constant import BASE_URL
from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata_file, \ from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata, \
paging, check_cookie, signal_handler, DB, move_to_folder paging, check_cookie, signal_handler, DB, move_to_folder
@ -28,8 +28,13 @@ def main():
logger.info(f'Using mirror: {BASE_URL}') logger.info(f'Using mirror: {BASE_URL}')
# CONFIG['proxy'] will be changed after cmd_parser() # CONFIG['proxy'] will be changed after cmd_parser()
if constant.CONFIG['proxy']['http']: if constant.CONFIG['proxy']:
logger.info(f'Using proxy: {constant.CONFIG["proxy"]["http"]}') if isinstance(constant.CONFIG['proxy'], dict):
constant.CONFIG['proxy'] = constant.CONFIG['proxy'].get('http', '')
logger.warning(f'Update proxy config to: {constant.CONFIG["proxy"]}')
write_config()
logger.info(f'Using proxy: {constant.CONFIG["proxy"]}')
if not constant.CONFIG['template']: if not constant.CONFIG['template']:
constant.CONFIG['template'] = 'default' constant.CONFIG['template'] = 'default'
@ -44,11 +49,14 @@ def main():
page_list = paging(options.page) page_list = paging(options.page)
if options.retry:
constant.RETRY_TIMES = int(options.retry)
if options.favorites: if options.favorites:
if not options.is_download: if not options.is_download:
logger.warning('You do not specify --download option') logger.warning('You do not specify --download option')
doujinshis = favorites_parser() if options.page_all else favorites_parser(page=page_list) doujinshis = favorites_parser(page=page_list) if options.page else favorites_parser()
elif options.keyword: elif options.keyword:
if constant.CONFIG['language']: if constant.CONFIG['language']:
@ -72,13 +80,19 @@ def main():
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:
data = map(int, db.get_all()) data = set(map(int, db.get_all()))
doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data)) doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
logger.info(f'New doujinshis account: {len(doujinshi_ids)}')
if options.zip:
options.is_nohtml = True
if not options.is_show: if not options.is_show:
downloader = Downloader(path=options.output_dir, threads=options.threads, downloader = (CompressedDownloader if options.zip else Downloader)(path=options.output_dir, threads=options.threads,
timeout=options.timeout, delay=options.delay) timeout=options.timeout, delay=options.delay,
exit_on_fail=options.exit_on_fail,
no_filename_padding=options.no_filename_padding)
for doujinshi_id in doujinshi_ids: for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id) doujinshi_info = doujinshi_parser(doujinshi_id)
@ -87,17 +101,15 @@ def main():
else: else:
continue continue
if not options.dryrun: doujinshi.downloader = downloader
doujinshi.downloader = downloader
if doujinshi.check_if_need_download(options): if doujinshi.check_if_need_download(options):
doujinshi.download() doujinshi.download()
else: else:
logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}') logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}')
continue
if options.generate_metadata: if options.generate_metadata:
generate_metadata_file(options.output_dir, doujinshi) generate_metadata(options.output_dir, doujinshi)
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:

View File

@ -35,11 +35,18 @@ LOGIN_URL = f'{BASE_URL}/login/'
CHALLENGE_URL = f'{BASE_URL}/challenge' CHALLENGE_URL = f'{BASE_URL}/challenge'
FAV_URL = f'{BASE_URL}/favorites/' FAV_URL = f'{BASE_URL}/favorites/'
PATH_SEPARATOR = os.path.sep
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries' RETRY_TIMES = 3
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i1.{urlparse(BASE_URL).hostname}/galleries'
IMAGE_URL_MIRRORS = [ IMAGE_URL_MIRRORS = [
f'{urlparse(BASE_URL).scheme}://i2.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}', f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i4.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}', f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i6.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}', f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}',
] ]
@ -50,7 +57,7 @@ NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
__api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery' __api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
CONFIG = { CONFIG = {
'proxy': {'http': '', 'https': ''}, 'proxy': '',
'cookie': '', 'cookie': '',
'language': '', 'language': '',
'template': '', 'template': '',

View File

@ -29,11 +29,12 @@ class DoujinshiInfo(dict):
class Doujinshi(object): class Doujinshi(object):
def __init__(self, name=None, pretty_name=None, id=None, img_id=None, def __init__(self, name=None, pretty_name=None, id=None, favorite_counts=0, img_id=None,
ext='', pages=0, name_format='[%i][%a][%t]', **kwargs): ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
self.name = name self.name = name
self.pretty_name = pretty_name self.pretty_name = pretty_name
self.id = id self.id = id
self.favorite_counts = favorite_counts
self.img_id = img_id self.img_id = img_id
self.ext = ext self.ext = ext
self.pages = pages self.pages = pages
@ -45,6 +46,7 @@ class Doujinshi(object):
name_format = name_format.replace('%ag', format_filename(ag_value)) name_format = name_format.replace('%ag', format_filename(ag_value))
name_format = name_format.replace('%i', format_filename(str(self.id))) name_format = name_format.replace('%i', format_filename(str(self.id)))
name_format = name_format.replace('%f', format_filename(str(self.favorite_counts)))
name_format = name_format.replace('%a', format_filename(self.info.artists)) name_format = name_format.replace('%a', format_filename(self.info.artists))
name_format = name_format.replace('%g', format_filename(self.info.groups)) name_format = name_format.replace('%g', format_filename(self.info.groups))
@ -55,7 +57,7 @@ class Doujinshi(object):
self.table = [ self.table = [
['Parodies', self.info.parodies], ['Parodies', self.info.parodies],
['Doujinshi', self.name], ['Title', self.name],
['Subtitle', self.info.subtitle], ['Subtitle', self.info.subtitle],
['Date', self.info.date], ['Date', self.info.date],
['Characters', self.info.characters], ['Characters', self.info.characters],
@ -63,6 +65,7 @@ class Doujinshi(object):
['Groups', self.info.groups], ['Groups', self.info.groups],
['Languages', self.info.languages], ['Languages', self.info.languages],
['Tags', self.info.tags], ['Tags', self.info.tags],
['Favorite Counts', self.favorite_counts],
['URL', self.url], ['URL', self.url],
['Pages', self.pages], ['Pages', self.pages],
] ]
@ -74,6 +77,9 @@ class Doujinshi(object):
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}') logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
def check_if_need_download(self, options): def check_if_need_download(self, options):
if options.no_download:
return False
base_path = os.path.join(self.downloader.path, self.filename) base_path = os.path.join(self.downloader.path, self.filename)
# regenerate, re-download # regenerate, re-download

View File

@ -4,6 +4,8 @@ import os
import asyncio import asyncio
import httpx import httpx
import urllib3.exceptions import urllib3.exceptions
import zipfile
import io
from urllib.parse import urlparse from urllib.parse import urlparse
from nhentai import constant from nhentai import constant
@ -13,10 +15,6 @@ from nhentai.utils import Singleton, async_request
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
class NHentaiImageNotExistException(Exception):
pass
def download_callback(result): def download_callback(result):
result, data = result result, data = result
if result == 0: if result == 0:
@ -32,42 +30,52 @@ def download_callback(result):
logger.log(16, f'{data} downloaded successfully') logger.log(16, f'{data} downloaded successfully')
async def fiber(tasks):
for completed_task in asyncio.as_completed(tasks):
try:
result = await completed_task
logger.info(f'{result[1]} download completed')
except Exception as e:
logger.error(f'An error occurred: {e}')
class Downloader(Singleton): class Downloader(Singleton):
def __init__(self, path='', threads=5, timeout=30, delay=0): def __init__(self, path='', threads=5, timeout=30, delay=0, exit_on_fail=False,
no_filename_padding=False):
self.threads = threads self.threads = threads
self.path = str(path) self.path = str(path)
self.timeout = timeout self.timeout = timeout
self.delay = delay self.delay = delay
self.exit_on_fail = exit_on_fail
self.folder = None
self.semaphore = None
self.no_filename_padding = no_filename_padding
async def _semaphore_download(self, semaphore, *args, **kwargs): async def fiber(self, tasks):
async with semaphore: self.semaphore = asyncio.Semaphore(self.threads)
for completed_task in asyncio.as_completed(tasks):
try:
result = await completed_task
if result[0] > 0:
logger.info(f'{result[1]} download completed')
else:
raise Exception(f'{result[1]} download failed, return value {result[0]}')
except Exception as e:
logger.error(f'An error occurred: {e}')
if self.exit_on_fail:
raise Exception('User intends to exit on fail')
async def _semaphore_download(self, *args, **kwargs):
async with self.semaphore:
return await self.download(*args, **kwargs) return await self.download(*args, **kwargs)
async def download(self, url, folder='', filename='', retried=0, proxy=None): async def download(self, url, folder='', filename='', retried=0, proxy=None, length=0):
logger.info(f'Starting to download {url} ...') logger.info(f'Starting to download {url} ...')
if self.delay: if self.delay:
await asyncio.sleep(self.delay) await asyncio.sleep(self.delay)
filename = filename if filename else os.path.basename(urlparse(url).path) filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename)
save_file_path = os.path.join(self.folder, filename) if not self.no_filename_padding:
filename = base_filename.zfill(length) + extension
else:
filename = base_filename + extension
try: try:
if os.path.exists(save_file_path): response = await async_request('GET', url, timeout=self.timeout, proxy=proxy)
logger.warning(f'Skipped download: {save_file_path} already exists')
return 1, url
response = await async_request('GET', url, timeout=self.timeout, proxies=proxy)
if response.status_code != 200: if response.status_code != 200:
path = urlparse(url).path path = urlparse(url).path
@ -80,11 +88,11 @@ class Downloader(Singleton):
if not await self.save(filename, response): if not await self.save(filename, response):
logger.error(f'Can not download image {url}') logger.error(f'Can not download image {url}')
return 1, None return -1, url
except (httpx.HTTPStatusError, httpx.TimeoutException, httpx.ConnectError) as e: except (httpx.HTTPStatusError, httpx.TimeoutException, httpx.ConnectError) as e:
if retried < 3: if retried < constant.RETRY_TIMES:
logger.info(f'Download {filename} failed, retrying({retried + 1}) times...') logger.warning(f'Download {filename} failed, retrying({retried + 1}) times...')
return await self.download( return await self.download(
url=url, url=url,
folder=folder, folder=folder,
@ -93,11 +101,8 @@ class Downloader(Singleton):
proxy=proxy, proxy=proxy,
) )
else: else:
return 0, None logger.warning(f'Download {filename} failed with {constant.RETRY_TIMES} times retried, skipped')
return -2, url
except NHentaiImageNotExistException as e:
os.remove(save_file_path)
return -1, url
except Exception as e: except Exception as e:
import traceback import traceback
@ -105,18 +110,18 @@ class Downloader(Singleton):
logger.error(f"Exception type: {type(e)}") logger.error(f"Exception type: {type(e)}")
traceback.print_stack() traceback.print_stack()
logger.critical(str(e)) logger.critical(str(e))
return 0, None return -9, url
except KeyboardInterrupt: except KeyboardInterrupt:
return -3, None return -4, url
return 1, url return 1, url
async def save(self, save_file_path, response) -> bool: async def save(self, filename, response) -> bool:
if response is None: if response is None:
logger.error('Error: Response is None') logger.error('Error: Response is None')
return False return False
save_file_path = os.path.join(self.folder, save_file_path) save_file_path = os.path.join(self.folder, filename)
with open(save_file_path, 'wb') as f: with open(save_file_path, 'wb') as f:
if response is not None: if response is not None:
length = response.headers.get('content-length') length = response.headers.get('content-length')
@ -127,6 +132,14 @@ class Downloader(Singleton):
f.write(chunk) f.write(chunk)
return True return True
def create_storage_object(self, folder:str):
if not os.path.exists(folder):
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical(str(e))
self.folder:str = folder
self.close = lambda: None # Only available in class CompressedDownloader
def start_download(self, queue, folder='') -> bool: def start_download(self, queue, folder='') -> bool:
if not isinstance(folder, (str,)): if not isinstance(folder, (str,)):
@ -136,25 +149,47 @@ class Downloader(Singleton):
folder = os.path.join(self.path, folder) folder = os.path.join(self.path, folder)
logger.info(f'Doujinshi will be saved at "{folder}"') logger.info(f'Doujinshi will be saved at "{folder}"')
if not os.path.exists(folder): self.create_storage_object(folder)
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical(str(e))
self.folder = folder
if os.getenv('DEBUG', None) == 'NODOWNLOAD': if os.getenv('DEBUG', None) == 'NODOWNLOAD':
# Assuming we want to continue with rest of process. # Assuming we want to continue with rest of process.
return True return True
semaphore = asyncio.Semaphore(self.threads) digit_length = len(str(len(queue)))
logger.info(f'Total download pages: {len(queue)}')
coroutines = [ coroutines = [
self._semaphore_download(semaphore, url, filename=os.path.basename(urlparse(url).path)) self._semaphore_download(url, filename=os.path.basename(urlparse(url).path), length=digit_length)
for url in queue for url in queue
] ]
# Prevent coroutines infection # Prevent coroutines infection
asyncio.run(fiber(coroutines)) asyncio.run(self.fiber(coroutines))
self.close()
return True return True
class CompressedDownloader(Downloader):
def create_storage_object(self, folder):
filename = f'{folder}.zip'
print(filename)
self.zipfile = zipfile.ZipFile(filename,'w')
self.close = lambda: self.zipfile.close()
async def save(self, filename, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
image_data = io.BytesIO()
length = response.headers.get('content-length')
if length is None:
content = await response.read()
image_data.write(content)
else:
async for chunk in response.aiter_bytes(2048):
image_data.write(chunk)
image_data.seek(0)
self.zipfile.writestr(filename, image_data.read())
return True

View File

@ -92,13 +92,27 @@ def favorites_parser(page=None):
page_range_list = range(1, pages + 1) page_range_list = range(1, pages + 1)
for page in page_range_list: for page in page_range_list:
try: logger.info(f'Getting doujinshi ids of page {page}')
logger.info(f'Getting doujinshi ids of page {page}')
resp = request('get', f'{constant.FAV_URL}?page={page}').content
result.extend(_get_title_and_id(resp)) i = 0
except Exception as e: while i <= constant.RETRY_TIMES + 1:
logger.error(f'Error: {e}, continue') i += 1
if i > 3:
logger.error(f'Failed to get favorites at page {page} after 3 times retried, skipped')
break
try:
resp = request('get', f'{constant.FAV_URL}?page={page}').content
temp_result = _get_title_and_id(resp)
if not temp_result:
logger.warning(f'Failed to get favorites at page {page}, retrying ({i} times) ...')
continue
else:
result.extend(temp_result)
break
except Exception as e:
logger.warning(f'Error: {e}, retrying ({i} times) ...')
return result return result
@ -135,30 +149,38 @@ def doujinshi_parser(id_, counter=0):
logger.warning(f'Error: {e}, ignored') logger.warning(f'Error: {e}, ignored')
return None return None
# print(response)
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'}) doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text title = doujinshi_info.find('h1').text
pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
subtitle = doujinshi_info.find('h2') subtitle = doujinshi_info.find('h2')
favorite_counts = doujinshi_info.find('span', class_='nobold').text.strip('(').strip(')')
doujinshi['name'] = title doujinshi['name'] = title
doujinshi['pretty_name'] = pretty_name doujinshi['pretty_name'] = pretty_name
doujinshi['subtitle'] = subtitle.text if subtitle else '' doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi['favorite_counts'] = int(favorite_counts) if favorite_counts and favorite_counts.isdigit() else 0
doujinshi_cover = html.find('div', attrs={'id': 'cover'}) doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif|webp)$', # img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif|webp)$',
doujinshi_cover.a.img.attrs['data-src']) # doujinshi_cover.a.img.attrs['data-src'])
# fix cover.webp.webp
img_id = re.search(r'/galleries/(\d+)/cover(\.webp|\.jpg|\.png)?\.\w+$', doujinshi_cover.a.img.attrs['data-src'])
ext = [] ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}): for i in html.find_all('div', attrs={'class': 'thumb-container'}):
_, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1) base_name = os.path.basename(i.img.attrs['data-src'])
ext.append(ext_name) ext_name = base_name.split('.')
if len(ext_name) == 3:
ext.append(ext_name[1])
else:
ext.append(ext_name[-1])
if not img_id: if not img_id:
logger.critical('Tried yo get image id failed') logger.critical(f'Tried yo get image id failed of id: {id_}')
sys.exit(1) return None
doujinshi['img_id'] = img_id.group(1) doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = ext doujinshi['ext'] = ext
@ -185,53 +207,6 @@ def doujinshi_parser(id_, counter=0):
return doujinshi return doujinshi
def legacy_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_)
logger.info(f'Fetching information of doujinshi id {id_}')
doujinshi = dict()
doujinshi['id'] = id_
url = f'{constant.DETAIL_URL}/{id_}'
i = 0
while 5 > i:
try:
response = request('get', url).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
sys.exit(1)
continue
break
doujinshi['name'] = response['title']['english']
doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
doujinshi['ext'] = ''.join([i['t'] for i in response['images']['pages']])
doujinshi['pages'] = len(response['images']['pages'])
# gain information of the doujinshi
needed_fields = ['character', 'artist', 'language', 'tag', 'parody', 'group', 'category']
for tag in response['tags']:
tag_type = tag['type']
if tag_type in needed_fields:
if tag_type == 'tag':
if tag_type not in doujinshi:
doujinshi[tag_type] = {}
tag['name'] = tag['name'].replace(' ', '-')
tag['name'] = tag['name'].lower()
doujinshi[tag_type][tag['name']] = tag['id']
elif tag_type not in doujinshi:
doujinshi[tag_type] = tag['name']
else:
doujinshi[tag_type] += ', ' + tag['name']
return doujinshi
def print_doujinshi(doujinshi_list): def print_doujinshi(doujinshi_list):
if not doujinshi_list: if not doujinshi_list:
return return
@ -302,7 +277,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
i = 0 i = 0
logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}') logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}')
while i < 3: while i < constant.RETRY_TIMES:
try: try:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword, url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
'page': p, 'sort': sorting}).url 'page': p, 'sort': sorting}).url

View File

@ -1,13 +1,17 @@
# coding: utf-8 # coding: utf-8
import json import json
import os import os
from nhentai.constant import PATH_SEPARATOR, LANGUAGE_ISO
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
from nhentai.constant import LANGUAGE_ISO from requests.structures import CaseInsensitiveDict
def serialize_json(doujinshi, output_dir): def serialize_json(doujinshi, output_dir: str):
metadata = {'title': doujinshi.name, metadata = {'title': doujinshi.name,
'subtitle': doujinshi.info.subtitle} 'subtitle': doujinshi.info.subtitle}
if doujinshi.info.favorite_counts:
metadata['favorite_counts'] = doujinshi.favorite_counts
if doujinshi.info.date: if doujinshi.info.date:
metadata['upload_date'] = doujinshi.info.date metadata['upload_date'] = doujinshi.info.date
if doujinshi.info.parodies: if doujinshi.info.parodies:
@ -44,6 +48,7 @@ def serialize_comic_xml(doujinshi, output_dir):
xml_write_simple_tag(f, 'PageCount', doujinshi.pages) xml_write_simple_tag(f, 'PageCount', doujinshi.pages)
xml_write_simple_tag(f, 'URL', doujinshi.url) xml_write_simple_tag(f, 'URL', doujinshi.url)
xml_write_simple_tag(f, 'NhentaiId', doujinshi.id) xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
xml_write_simple_tag(f, 'Favorites', doujinshi.favorite_counts)
xml_write_simple_tag(f, 'Genre', doujinshi.info.categories) xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and
@ -73,13 +78,33 @@ def serialize_comic_xml(doujinshi, output_dir):
f.write('</ComicInfo>') f.write('</ComicInfo>')
def serialize_info_txt(doujinshi, output_dir: str):
info_txt_path = os.path.join(output_dir, 'info.txt')
f = open(info_txt_path, 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'FAVORITE COUNTS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL']
temp_dict = CaseInsensitiveDict(dict(doujinshi.table))
for i in fields:
v = temp_dict.get(i)
v = temp_dict.get(f'{i}s') if v is None else v
v = doujinshi.info.get(i.lower(), None) if v is None else v
v = doujinshi.info.get(f'{i.lower()}s', "Unknown") if v is None else v
f.write(f'{i}: {v}\n')
f.close()
def xml_write_simple_tag(f, name, val, indent=1): def xml_write_simple_tag(f, name, val, indent=1):
f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n') f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n')
def merge_json(): def merge_json():
lst = [] lst = []
output_dir = "./" output_dir = f".{PATH_SEPARATOR}"
os.chdir(output_dir) os.chdir(output_dir)
doujinshi_dirs = next(os.walk('.'))[1] doujinshi_dirs = next(os.walk('.'))[1]
for folder in doujinshi_dirs: for folder in doujinshi_dirs:
@ -127,3 +152,4 @@ def set_js_database():
indexed_json = json.dumps(indexed_json, separators=(',', ':')) indexed_json = json.dumps(indexed_json, separators=(',', ':'))
f.write('var data = ' + indexed_json) f.write('var data = ' + indexed_json)
f.write(';\nvar tags = ' + unique_json) f.write(';\nvar tags = ' + unique_json)

View File

@ -1,5 +1,5 @@
# coding: utf-8 # coding: utf-8
import json
import sys import sys
import re import re
import os import os
@ -10,43 +10,54 @@ import httpx
import requests import requests
import sqlite3 import sqlite3
import urllib.parse import urllib.parse
from typing import Optional, Tuple from typing import Tuple
from nhentai import constant from nhentai import constant
from nhentai.constant import PATH_SEPARATOR
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.serializer import serialize_json, serialize_comic_xml, set_js_database from nhentai.serializer import serialize_comic_xml, serialize_json, serialize_info_txt, set_js_database
MAX_FIELD_LENGTH = 100 MAX_FIELD_LENGTH = 100
EXTENSIONS = ('.png', '.jpg', '.jpeg', '.gif', '.webp')
def get_headers():
headers = {
'Referer': constant.LOGIN_URL
}
user_agent = constant.CONFIG.get('useragent')
if user_agent and user_agent.strip():
headers['User-Agent'] = user_agent
cookie = constant.CONFIG.get('cookie')
if cookie and cookie.strip():
headers['Cookie'] = cookie
return headers
def request(method, url, **kwargs): def request(method, url, **kwargs):
session = requests.Session() session = requests.Session()
session.headers.update({ session.headers.update(get_headers())
'Referer': constant.LOGIN_URL,
'User-Agent': constant.CONFIG['useragent'],
'Cookie': constant.CONFIG['cookie']
})
if not kwargs.get('proxies', None): if not kwargs.get('proxies', None):
kwargs['proxies'] = constant.CONFIG['proxy'] kwargs['proxies'] = {
'https': constant.CONFIG['proxy'],
'http': constant.CONFIG['proxy'],
}
return getattr(session, method)(url, verify=False, **kwargs) return getattr(session, method)(url, verify=False, **kwargs)
async def async_request(method, url, proxies = None, **kwargs): async def async_request(method, url, proxy = None, **kwargs):
headers = { headers=get_headers()
'Referer': constant.LOGIN_URL,
'User-Agent': constant.CONFIG['useragent'],
'Cookie': constant.CONFIG['cookie'],
}
if proxies is None: if proxy is None:
proxies = constant.CONFIG['proxy'] proxy = constant.CONFIG['proxy']
if proxies.get('http') == '' and proxies.get('https') == '': if isinstance(proxy, (str, )) and not proxy:
proxies = None proxy = None
async with httpx.AsyncClient(headers=headers, verify=False, proxies=proxies, **kwargs) as client: async with httpx.AsyncClient(headers=headers, verify=False, proxy=proxy, **kwargs) as client:
response = await client.request(method, url, **kwargs) response = await client.request(method, url, **kwargs)
return response return response
@ -94,24 +105,29 @@ def parse_doujinshi_obj(
file_type: str = '' file_type: str = ''
) -> Tuple[str, str]: ) -> Tuple[str, str]:
filename = f'./doujinshi.{file_type}' filename = f'.{PATH_SEPARATOR}doujinshi.{file_type}'
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
_filename = f'{doujinshi_obj.filename}.{file_type}' _filename = f'{doujinshi_obj.filename}.{file_type}'
if file_type == 'cbz':
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
if file_type == 'pdf': if file_type == 'pdf':
_filename = _filename.replace('/', '-') _filename = _filename.replace('/', '-')
filename = os.path.join(output_dir, _filename) filename = os.path.join(output_dir, _filename)
else:
if file_type == 'html':
return output_dir, 'index.html'
doujinshi_dir = f'.{PATH_SEPARATOR}'
if not os.path.exists(doujinshi_dir):
os.makedirs(doujinshi_dir)
return doujinshi_dir, filename return doujinshi_dir, filename
def generate_html(output_dir='.', doujinshi_obj=None, template='default'): def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '.html') doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, 'html')
image_html = '' image_html = ''
if not os.path.exists(doujinshi_dir): if not os.path.exists(doujinshi_dir):
@ -125,7 +141,7 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
file_list.sort() file_list.sort()
for image in file_list: for image in file_list:
if not os.path.splitext(image)[1] in ('.jpg', '.png', '.webp'): if not os.path.splitext(image)[1] in EXTENSIONS:
continue continue
image_html += f'<img src="{image}" class="image-item"/>\n' image_html += f'<img src="{image}" class="image-item"/>\n'
@ -134,10 +150,16 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
js = readfile(f'viewer/{template}/scripts.js') js = readfile(f'viewer/{template}/scripts.js')
if doujinshi_obj is not None: if doujinshi_obj is not None:
serialize_json(doujinshi_obj, doujinshi_dir) # serialize_json(doujinshi_obj, doujinshi_dir)
name = doujinshi_obj.name name = doujinshi_obj.name
else: else:
name = {'title': 'nHentai HTML Viewer'} metadata_path = os.path.join(doujinshi_dir, "metadata.json")
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as file:
doujinshi_info = json.loads(file.read())
name = doujinshi_info.get("title")
else:
name = 'nHentai HTML Viewer'
data = html.format(TITLE=name, IMAGES=image_html, SCRIPTS=js, STYLES=css) data = html.format(TITLE=name, IMAGES=image_html, SCRIPTS=js, STYLES=css)
try: try:
@ -170,7 +192,7 @@ def move_to_folder(output_dir='.', doujinshi_obj=None, file_type=None):
shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename))) shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename)))
def generate_main_html(output_dir='./'): def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
""" """
Generate a main html to show all the contains doujinshi. Generate a main html to show all the contains doujinshi.
With a link to their `index.html`. With a link to their `index.html`.
@ -227,8 +249,20 @@ def generate_main_html(output_dir='./'):
logger.warning(f'Writing Main Viewer failed ({e})') logger.warning(f'Writing Main Viewer failed ({e})')
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False): def generate_cbz(doujinshi_dir, filename):
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info(f'Writing CBZ file to path: {filename}')
with zipfile.ZipFile(filename, 'w') as cbz_pf:
for image in file_list:
image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image)
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"')
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type) doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate: if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate:
@ -236,22 +270,15 @@ def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=Fa
return return
if file_type == 'cbz': if file_type == 'cbz':
file_list = os.listdir(doujinshi_dir) serialize_comic_xml(doujinshi_obj, doujinshi_dir)
file_list.sort() generate_cbz(doujinshi_dir, filename)
logger.info(f'Writing CBZ file to path: {filename}')
with zipfile.ZipFile(filename, 'w') as cbz_pf:
for image in file_list:
image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image)
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"')
elif file_type == 'pdf': elif file_type == 'pdf':
try: try:
import img2pdf import img2pdf
"""Write images to a PDF file using img2pdf.""" """Write images to a PDF file using img2pdf."""
file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif', '.webp'))] file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(EXTENSIONS)]
file_list.sort() file_list.sort()
logger.info(f'Writing PDF file to path: {filename}') logger.info(f'Writing PDF file to path: {filename}')
@ -265,6 +292,16 @@ def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=Fa
except ImportError: except ImportError:
logger.error("Please install img2pdf package by using pip.") logger.error("Please install img2pdf package by using pip.")
else:
raise ValueError('invalid file type')
def generate_metadata(output_dir, doujinshi_obj=None):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '')
serialize_json(doujinshi_obj, doujinshi_dir)
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
serialize_info_txt(doujinshi_obj, doujinshi_dir)
logger.log(16, f'Metadata files have been written to "{doujinshi_dir}"')
def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False): def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
@ -272,7 +309,7 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
It used to be a whitelist approach allowed only alphabet and a part of symbols. It used to be a whitelist approach allowed only alphabet and a part of symbols.
but most doujinshi's names include Japanese 2-byte characters and these was rejected. but most doujinshi's names include Japanese 2-byte characters and these was rejected.
so it is using blacklist approach now. so it is using blacklist approach now.
if filename include forbidden characters (\'/:,;*?"<>|) ,it replace space character(' '). if filename include forbidden characters (\'/:,;*?"<>|) ,it replaces space character(" ").
""" """
# maybe you can use `--format` to select a suitable filename # maybe you can use `--format` to select a suitable filename
@ -295,7 +332,7 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
return filename return filename
def signal_handler(signal, frame): def signal_handler(_signal, _frame):
logger.error('Ctrl-C signal received. Stopping...') logger.error('Ctrl-C signal received. Stopping...')
sys.exit(1) sys.exit(1)
@ -303,7 +340,8 @@ def signal_handler(signal, frame):
def paging(page_string): def paging(page_string):
# 1,3-5,14 -> [1, 3, 4, 5, 14] # 1,3-5,14 -> [1, 3, 4, 5, 14]
if not page_string: if not page_string:
return [] # default, the first page
return [1]
page_list = [] page_list = []
for i in page_string.split(','): for i in page_string.split(','):
@ -320,29 +358,6 @@ def paging(page_string):
return page_list return page_list
def generate_metadata_file(output_dir, doujinshi_obj):
info_txt_path = os.path.join(output_dir, doujinshi_obj.filename, 'info.txt')
f = open(info_txt_path, 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL']
special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'DATE', 'CHARACTERS', 'AUTHOR', 'GROUPS',
'LANGUAGE', 'TAGS', 'URL', 'PAGES']
for i in range(len(fields)):
f.write(f'{fields[i]}: ')
if fields[i] in special_fields:
f.write(str(doujinshi_obj.table[special_fields.index(fields[i])][1]))
f.write('\n')
f.close()
logger.log(16, f'Metadata Info has been written to "{info_txt_path}"')
class DB(object): class DB(object):
conn = None conn = None
cur = None cur = None

View File

@ -49,8 +49,8 @@ document.onkeypress = event => {
switch (event.key.toLowerCase()) { switch (event.key.toLowerCase()) {
// Previous Image // Previous Image
case 'w': case 'w':
scrollBy(0, -40); scrollBy(0, -40);
break; break;
case 'a': case 'a':
changePage(currentPage - 1); changePage(currentPage - 1);
break; break;
@ -61,7 +61,7 @@ document.onkeypress = event => {
// Next Image // Next Image
case ' ': case ' ':
case 's': case 's':
scrollBy(0, 40); scrollBy(0, 40);
break; break;
case 'd': case 'd':
changePage(currentPage + 1); changePage(currentPage + 1);
@ -75,11 +75,13 @@ document.onkeydown = event =>{
changePage(currentPage - 1); changePage(currentPage - 1);
break; break;
case 38: //up case 38: //up
changePage(currentPage - 1);
break; break;
case 39: //right case 39: //right
changePage(currentPage + 1); changePage(currentPage + 1);
break; break;
case 40: //down case 40: //down
changePage(currentPage + 1);
break; break;
} }
}; };

359
poetry.lock generated
View File

@ -1,142 +1,255 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. # This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]] [[package]]
name = "beautifulsoup4" name = "beautifulsoup4"
version = "4.11.2" version = "4.12.3"
description = "Screen-scraping library" description = "Screen-scraping library"
optional = false optional = false
python-versions = ">=3.6.0" python-versions = ">=3.6.0"
files = [ files = [
{file = "beautifulsoup4-4.11.2-py3-none-any.whl", hash = "sha256:0e79446b10b3ecb499c1556f7e228a53e64a2bfcebd455f370d8927cb5b59e39"}, {file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
{file = "beautifulsoup4-4.11.2.tar.gz", hash = "sha256:bc4bdda6717de5a2987436fb8d72f45dc90dd856bdfd512a1314ce90349a0106"}, {file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
] ]
[package.dependencies] [package.dependencies]
soupsieve = ">1.2" soupsieve = ">1.2"
[package.extras] [package.extras]
cchardet = ["cchardet"]
chardet = ["chardet"]
charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"] html5lib = ["html5lib"]
lxml = ["lxml"] lxml = ["lxml"]
[[package]] [[package]]
name = "certifi" name = "certifi"
version = "2024.7.4" version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle." description = "Python package for providing Mozilla's CA Bundle."
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.6"
files = [ files = [
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"}, {file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"}, {file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]
[[package]]
name = "chardet"
version = "5.2.0"
description = "Universal encoding detector for Python 3"
optional = false
python-versions = ">=3.7"
files = [
{file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"},
{file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"},
] ]
[[package]] [[package]]
name = "charset-normalizer" name = "charset-normalizer"
version = "3.0.1" version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false optional = false
python-versions = "*" python-versions = ">=3.7"
files = [ files = [
{file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"}, {file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"}, {file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"}, {file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"}, {file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"}, {file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"}, {file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
{file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
{file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"}, {file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"}, {file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"}, {file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"}, {file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"}, {file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
{file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
{file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"}, {file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"}, {file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"}, {file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"}, {file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"}, {file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
{file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"}, {file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"}, {file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"}, {file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"}, {file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
{file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"}, {file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"}, {file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"}, {file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
{file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
{file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"}, {file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"}, {file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"}, {file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"}, {file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"}, {file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
{file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
{file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
{file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"}, {file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
{file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
] ]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]] [[package]]
name = "idna" name = "idna"
version = "3.7" version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)" description = "Internationalized Domain Names in Applications (IDNA)"
optional = false optional = false
python-versions = ">=3.5" python-versions = ">=3.6"
files = [ files = [
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"}, {file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"}, {file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
] ]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]] [[package]]
name = "iso8601" name = "iso8601"
version = "1.1.0" version = "1.1.0"
@ -150,13 +263,13 @@ files = [
[[package]] [[package]]
name = "requests" name = "requests"
version = "2.32.0" version = "2.32.3"
description = "Python HTTP for Humans." description = "Python HTTP for Humans."
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.8"
files = [ files = [
{file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"}, {file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"}, {file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
] ]
[package.dependencies] [package.dependencies]
@ -170,14 +283,25 @@ socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]] [[package]]
name = "soupsieve" name = "sniffio"
version = "2.4" version = "1.3.1"
description = "A modern CSS selector implementation for Beautiful Soup." description = "Sniff out which async library your code is running under"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
{file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"}, {file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"}, {file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "soupsieve"
version = "2.6"
description = "A modern CSS selector implementation for Beautiful Soup."
optional = false
python-versions = ">=3.8"
files = [
{file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
] ]
[[package]] [[package]]
@ -194,15 +318,26 @@ files = [
[package.extras] [package.extras]
widechars = ["wcwidth"] widechars = ["wcwidth"]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "1.26.19" version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more." description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [ files = [
{file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"}, {file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
{file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"}, {file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
] ]
[package.extras] [package.extras]
@ -213,4 +348,4 @@ socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.8" python-versions = "^3.8"
content-hash = "0a1d5abd47a669c7a1f2dc7b43824a449e29ba94908a4338d2ea0f2dfb4f805e" content-hash = "b17c2cdd4b140f2ab8081bca7d94630e821fa2e882ac768b1bd8cf3ec58726ce"

View File

@ -1,21 +1,28 @@
[tool.poetry] [tool.poetry]
name = "nhentai" name = "nhentai"
version = "0.5.12" version = "0.6.0-beta"
description = "nhentai doujinshi downloader" description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"] authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT" license = "MIT"
readme = "README.rst" readme = "README.rst"
include = ["nhentai/viewer/**"]
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.8" python = "^3.8"
requests = "^2.28.2" requests = "^2.32.3"
soupsieve = "^2.4" soupsieve = "^2.6"
beautifulsoup4 = "^4.11.2" beautifulsoup4 = "^4.12.3"
tabulate = "^0.9.0" tabulate = "^0.9.0"
iso8601 = "^1.1.0" iso8601 = "^1.1.0"
urllib3 = "^1.26.14" urllib3 = "^1.26.20"
httpx = "^0.28.1"
chardet = "^5.2.0"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
nhentai = 'nhentai.command:main'

29
qodana.yaml Executable file
View File

@ -0,0 +1,29 @@
#-------------------------------------------------------------------------------#
# Qodana analysis is configured by qodana.yaml file #
# https://www.jetbrains.com/help/qodana/qodana-yaml.html #
#-------------------------------------------------------------------------------#
version: "1.0"
#Specify inspection profile for code analysis
profile:
name: qodana.starter
#Enable inspections
#include:
# - name: <SomeEnabledInspectionId>
#Disable inspections
#exclude:
# - name: <SomeDisabledInspectionId>
# paths:
# - <path/where/not/run/inspection>
#Execute shell command before Qodana execution (Applied in CI/CD pipeline)
#bootstrap: sh ./prepare-qodana.sh
#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline)
#plugins:
# - id: <plugin.id> #(plugin id can be found at https://plugins.jetbrains.com)
#Specify Qodana linter for analysis (Applied in CI/CD pipeline)
linter: jetbrains/qodana-python:2024.3

View File

@ -1,8 +0,0 @@
httpx
requests
soupsieve
setuptools
BeautifulSoup4
tabulate
iso8601
urllib3

View File

@ -1,3 +0,0 @@
[metadata]
description_file = README.rst

View File

@ -1,38 +0,0 @@
# coding: utf-8
import codecs
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
def long_description():
with codecs.open('README.rst', 'rb') as readme:
return readme.read().decode('utf-8')
setup(
name='nhentai',
version=__version__,
packages=find_packages(),
author=__author__,
author_email=__email__,
keywords=['nhentai', 'doujinshi', 'downloader'],
description='nhentai.net doujinshis downloader',
long_description=long_description(),
url='https://github.com/RicterZ/nhentai',
download_url='https://github.com/RicterZ/nhentai/tarball/master',
include_package_data=True,
zip_safe=False,
install_requires=requirements,
entry_points={
'console_scripts': [
'nhentai = nhentai.command:main',
]
},
license='MIT',
)

View File

@ -1,14 +1,27 @@
import unittest import unittest
import os import os
import zipfile
import urllib3.exceptions import urllib3.exceptions
from nhentai import constant from nhentai import constant
from nhentai.cmdline import load_config from nhentai.cmdline import load_config
from nhentai.downloader import Downloader from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.parser import doujinshi_parser from nhentai.parser import doujinshi_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.utils import generate_html, generate_cbz from nhentai.utils import generate_html
did = 440546
def has_jepg_file(path):
with zipfile.ZipFile(path, 'r') as zf:
return '01.jpg' in zf.namelist()
def is_zip_file(path):
try:
with zipfile.ZipFile(path, 'r') as _:
return True
except (zipfile.BadZipFile, FileNotFoundError):
return False
class TestDownload(unittest.TestCase): class TestDownload(unittest.TestCase):
def setUp(self) -> None: def setUp(self) -> None:
@ -17,20 +30,27 @@ class TestDownload(unittest.TestCase):
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE') constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA') constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
self.info = Doujinshi(**doujinshi_parser(did), name_format='%i')
def test_download(self): def test_download(self):
did = 440546 info = self.info
info = Doujinshi(**doujinshi_parser(did), name_format='%i')
info.downloader = Downloader(path='/tmp', threads=5) info.downloader = Downloader(path='/tmp', threads=5)
info.download() info.download()
self.assertTrue(os.path.exists(f'/tmp/{did}/001.jpg')) self.assertTrue(os.path.exists(f'/tmp/{did}/01.jpg'))
generate_html('/tmp', info) generate_html('/tmp', info)
self.assertTrue(os.path.exists(f'/tmp/{did}/index.html')) self.assertTrue(os.path.exists(f'/tmp/{did}/index.html'))
generate_cbz('/tmp', info) def test_zipfile_download(self):
self.assertTrue(os.path.exists(f'/tmp/{did}.cbz')) info = self.info
info.downloader = CompressedDownloader(path='/tmp', threads=5)
info.download()
zipfile_path = f'/tmp/{did}.zip'
self.assertTrue(os.path.exists(zipfile_path))
self.assertTrue(is_zip_file(zipfile_path))
self.assertTrue(has_jepg_file(zipfile_path))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()