Compare commits

..

3 Commits
0.5.21 ... dev

Author SHA1 Message Date
a0f6e3f857 Merge pull request #308 from myc1ou1d/dev
fix nhentai.net api sort option changed from popular to popular-all
2024-04-01 00:53:02 +08:00
413657e076 Merge branch 'RicterZ:dev' into dev 2024-04-01 00:12:41 +08:00
0b80458c6f fix nhentai.net api sort option changed from popular to popular-all 2024-03-11 16:34:53 +08:00
22 changed files with 491 additions and 760 deletions

View File

@ -1,5 +1,4 @@
.git .git
.github
.gitignore .gitignore
venv venv
*.egg-info *.egg-info

2
.gitignore vendored
View File

@ -8,5 +8,3 @@ dist/
output/ output/
venv/ venv/
.vscode/ .vscode/
test-output
*.whl

View File

@ -1,9 +1,11 @@
FROM python:3 FROM python:3
WORKDIR /usr/src/nhentai WORKDIR /usr/src/nhentai
COPY requirements.txt ./
RUN pip install --no-cache-dir -r requirements.txt
COPY . . COPY . .
RUN pip install --no-cache-dir . RUN python setup.py install
WORKDIR /output WORKDIR /output
ENTRYPOINT ["nhentai"] ENTRYPOINT ["nhentai"]

5
MANIFEST.in Normal file
View File

@ -0,0 +1,5 @@
include README.rst
include requirements.txt
include nhentai/viewer/*
include nhentai/viewer/default/*
include nhentai/viewer/minimal/*

View File

@ -11,8 +11,6 @@ nhentai
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_ nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
=================== ===================
Manual Installation Manual Installation
=================== ===================
@ -59,7 +57,7 @@ On Gentoo Linux:
.. code-block:: .. code-block::
layman -fa glibOne layman -fa glicOne
sudo emerge net-misc/nhentai sudo emerge net-misc/nhentai
On NixOS: On NixOS:
@ -129,7 +127,7 @@ Download your favorites with delay:
.. code-block:: bash .. code-block:: bash
nhentai --favorites --download --delay 1 --page 3-5,7 nhentai --favorites --download --delay 1
Format output doujinshi folder name: Format output doujinshi folder name:
@ -140,13 +138,10 @@ Format output doujinshi folder name:
Supported doujinshi folder formatter: Supported doujinshi folder formatter:
- %i: Doujinshi id - %i: Doujinshi id
- %f: Doujinshi favorite count
- %t: Doujinshi name - %t: Doujinshi name
- %s: Doujinshi subtitle (translated name) - %s: Doujinshi subtitle (translated name)
- %a: Doujinshi authors' name - %a: Doujinshi authors' name
- %g: Doujinshi groups name
- %p: Doujinshi pretty name - %p: Doujinshi pretty name
- %ag: Doujinshi authors name or groups name
Other options: Other options:
@ -162,21 +157,25 @@ Other options:
NHENTAI nhentai mirror url NHENTAI nhentai mirror url
Options: Options:
# Operation options, control the program behaviors
-h, --help show this help message and exit -h, --help show this help message and exit
-D, --download download doujinshi (for search results) -D, --download download doujinshi (for search results)
-S, --show just show the doujinshi information -S, --show just show the doujinshi information
# Doujinshi options, specify id, keyword, etc.
--id doujinshi ids set, e.g. 167680 167681 167682 --id doujinshi ids set, e.g. 167680 167681 167682
-s KEYWORD, --search=KEYWORD -s KEYWORD, --search=KEYWORD
search doujinshi by keyword search doujinshi by keyword
-F, --favorites list or download your favorites -F, --favorites list or download your favorites
-a ARTIST, --artist=ARTIST
list doujinshi by artist name # Page options, control the page to fetch / download
--page-all all search results --page-all all search results
--page=PAGE, --page-range=PAGE --page=PAGE, --page-range=PAGE
page number of search results. e.g. 1,2-5,14 page number of search results. e.g. 1,2-5,14
--sorting=SORTING, --sort=SORTING --sorting=SORTING sorting of doujinshi (recent / popular /
sorting of doujinshi (recent / popular /
popular-[today|week]) popular-[today|week])
# Download options, the output directory, threads, timeout, delay, etc.
-o OUTPUT_DIR, --output=OUTPUT_DIR -o OUTPUT_DIR, --output=OUTPUT_DIR
output dir output dir
-t THREADS, --threads=THREADS -t THREADS, --threads=THREADS
@ -185,12 +184,12 @@ Other options:
timeout for downloading doujinshi timeout for downloading doujinshi
-d DELAY, --delay=DELAY -d DELAY, --delay=DELAY
slow down between downloading every doujinshi slow down between downloading every doujinshi
--retry=RETRY retry times when downloading failed
--exit-on-fail exit on fail to prevent generating incomplete files
--proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080" --proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080"
-f FILE, --file=FILE read gallery IDs from file. -f FILE, --file=FILE read gallery IDs from file.
--format=NAME_FORMAT format the saved folder name --format=NAME_FORMAT format the saved folder name
--dry-run Dry run, skip file download --dry-run Dry run, skip file download
# Generate options, for generate html viewer, cbz file, pdf file, etc
--html generate a html viewer at current directory --html generate a html viewer at current directory
--no-html don't generate HTML after downloading --no-html don't generate HTML after downloading
--gen-main generate a main viewer contain all the doujin in the --gen-main generate a main viewer contain all the doujin in the
@ -199,10 +198,10 @@ Other options:
-P, --pdf generate PDF file -P, --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or --rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
PDF file PDF file
--move-to-folder remove files in doujinshi dir then move new file to
folder when generated CBZ or PDF file
--meta generate a metadata file in doujinshi format --meta generate a metadata file in doujinshi format
--regenerate regenerate the cbz or pdf file if exists --regenerate-cbz regenerate the cbz file if exists
# nhentai options, set cookie, user-agent, language, remove caches, histories, etc
--cookie=COOKIE set cookie of nhentai to bypass Cloudflare captcha --cookie=COOKIE set cookie of nhentai to bypass Cloudflare captcha
--useragent=USERAGENT, --user-agent=USERAGENT --useragent=USERAGENT, --user-agent=USERAGENT
set useragent to bypass Cloudflare captcha set useragent to bypass Cloudflare captcha
@ -226,9 +225,6 @@ For example:
.. code-block:: .. code-block::
i.h.loli.club -> i.nhentai.net i.h.loli.club -> i.nhentai.net
i3.h.loli.club -> i3.nhentai.net
i5.h.loli.club -> i5.nhentai.net
i7.h.loli.club -> i7.nhentai.net
h.loli.club -> nhentai.net h.loli.club -> nhentai.net
Set `NHENTAI` env var to your nhentai mirror. Set `NHENTAI` env var to your nhentai mirror.

View File

@ -1,3 +1,3 @@
__version__ = '0.5.20' __version__ = '0.5.3'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -11,7 +11,6 @@ from optparse import OptionParser
from nhentai import __version__ from nhentai import __version__
from nhentai.utils import generate_html, generate_main_html, DB from nhentai.utils import generate_html, generate_main_html, DB
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.constant import PATH_SEPARATOR
def banner(): def banner():
@ -38,7 +37,7 @@ def write_config():
f.write(json.dumps(constant.CONFIG)) f.write(json.dumps(constant.CONFIG))
def callback(option, _opt_str, _value, parser): def callback(option, opt_str, value, parser):
if option == '--id': if option == '--id':
pass pass
value = [] value = []
@ -65,8 +64,7 @@ def cmd_parser():
# operation options # operation options
parser.add_option('--download', '-D', dest='is_download', action='store_true', parser.add_option('--download', '-D', dest='is_download', action='store_true',
help='download doujinshi (for search results)') help='download doujinshi (for search results)')
parser.add_option('--show', '-S', dest='is_show', action='store_true', parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information')
help='just show the doujinshi information')
# doujinshi options # doujinshi options
parser.add_option('--id', dest='id', action='callback', callback=callback, parser.add_option('--id', dest='id', action='callback', callback=callback,
@ -81,15 +79,14 @@ def cmd_parser():
# page options # page options
parser.add_option('--page-all', dest='page_all', action='store_true', default=False, parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
help='all search results') help='all search results')
parser.add_option('--page', '--page-range', type='string', dest='page', action='store', parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='1',
help='page number of search results. e.g. 1,2-5,14') help='page number of search results. e.g. 1,2-5,14')
parser.add_option('--sorting', '--sort', dest='sorting', action='store', default='popular', parser.add_option('--sorting', '--sort', dest='sorting', action='store', default='popular-all',
help='sorting of doujinshi (recent / popular / popular-[today|week])', help='sorting of doujinshi (recent / popular-all / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week', 'date']) choices=['recent', 'popular-all', 'popular-today', 'popular-week', 'date'])
# download options # download options
parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='./',
default=f'.{PATH_SEPARATOR}',
help='output dir') help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5, parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
help='thread count for downloading doujinshi') help='thread count for downloading doujinshi')
@ -97,21 +94,13 @@ def cmd_parser():
help='timeout for downloading doujinshi') help='timeout for downloading doujinshi')
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0,
help='slow down between downloading every doujinshi') help='slow down between downloading every doujinshi')
parser.add_option('--retry', type='int', dest='retry', action='store', default=3,
help='retry times when downloading failed')
parser.add_option('--exit-on-fail', dest='exit_on_fail', action='store_true', default=False,
help='exit on fail to prevent generating incomplete files')
parser.add_option('--proxy', type='string', dest='proxy', action='store', parser.add_option('--proxy', type='string', dest='proxy', action='store',
help='store a proxy, for example: -p "http://127.0.0.1:1080"') help='store a proxy, for example: -p "http://127.0.0.1:1080"')
parser.add_option('--file', '-f', type='string', dest='file', action='store', parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store', parser.add_option('--format', type='string', dest='name_format', action='store',
help='format the saved folder name', default='[%i][%a][%t]') help='format the saved folder name', default='[%i][%a][%t]')
parser.add_option('--dry-run', action='store_true', dest='dryrun', help='Dry run, skip file download') parser.add_option('--dry-run', action='store_true', dest='dryrun', help='Dry run, skip file download')
parser.add_option('--no-filename-padding', action='store_true', dest='no_filename_padding',
default=False, help='no padding in the images filename, such as \'001.jpg\'')
# generate options # generate options
parser.add_option('--html', dest='html_viewer', action='store_true', parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory') help='generate a html viewer at current directory')
@ -129,8 +118,8 @@ def cmd_parser():
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file') help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
parser.add_option('--meta', dest='generate_metadata', action='store_true', parser.add_option('--meta', dest='generate_metadata', action='store_true',
help='generate a metadata file in doujinshi format') help='generate a metadata file in doujinshi format')
parser.add_option('--regenerate', dest='regenerate', action='store_true', default=False, parser.add_option('--regenerate-cbz', dest='regenerate_cbz', action='store_true', default=False,
help='regenerate the cbz or pdf file if exists') help='regenerate the cbz file if exists')
# nhentai options # nhentai options
parser.add_option('--cookie', type='str', dest='cookie', action='store', parser.add_option('--cookie', type='str', dest='cookie', action='store',
@ -192,7 +181,10 @@ def cmd_parser():
logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored') logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored')
sys.exit(0) sys.exit(0)
else: else:
constant.CONFIG['proxy'] = args.proxy constant.CONFIG['proxy'] = {
'http': args.proxy,
'https': args.proxy,
}
logger.info(f'Proxy now set to "{args.proxy}"') logger.info(f'Proxy now set to "{args.proxy}"')
write_config() write_config()
sys.exit(0) sys.exit(0)

View File

@ -1,22 +1,18 @@
# coding: utf-8 # coding: utf-8
import os
import shutil
import sys import sys
import signal import signal
import platform import platform
import urllib
import urllib3.exceptions import urllib3.exceptions
from nhentai import constant from nhentai import constant
from nhentai.cmdline import cmd_parser, banner, write_config from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader from nhentai.downloader import Downloader
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.constant import BASE_URL from nhentai.constant import BASE_URL
from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata_file, \ from nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, generate_metadata_file, \
paging, check_cookie, signal_handler, DB, move_to_folder paging, check_cookie, signal_handler, DB
def main(): def main():
@ -30,13 +26,8 @@ def main():
logger.info(f'Using mirror: {BASE_URL}') logger.info(f'Using mirror: {BASE_URL}')
# CONFIG['proxy'] will be changed after cmd_parser() # CONFIG['proxy'] will be changed after cmd_parser()
if constant.CONFIG['proxy']: if constant.CONFIG['proxy']['http']:
if isinstance(constant.CONFIG['proxy'], dict): logger.info(f'Using proxy: {constant.CONFIG["proxy"]["http"]}')
constant.CONFIG['proxy'] = constant.CONFIG['proxy'].get('http', '')
logger.warning(f'Update proxy config to: {constant.CONFIG["proxy"]}')
write_config()
logger.info(f'Using proxy: {constant.CONFIG["proxy"]}')
if not constant.CONFIG['template']: if not constant.CONFIG['template']:
constant.CONFIG['template'] = 'default' constant.CONFIG['template'] = 'default'
@ -55,7 +46,7 @@ def main():
if not options.is_download: if not options.is_download:
logger.warning('You do not specify --download option') logger.warning('You do not specify --download option')
doujinshis = favorites_parser(page=page_list) if options.page else favorites_parser() doujinshis = favorites_parser(page=page_list)
elif options.keyword: elif options.keyword:
if constant.CONFIG['language']: if constant.CONFIG['language']:
@ -66,10 +57,6 @@ def main():
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list, doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
is_page_all=options.page_all) is_page_all=options.page_all)
elif options.artist:
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
is_page_all=options.page_all, type_='ARTIST')
elif not doujinshi_ids: elif not doujinshi_ids:
doujinshi_ids = options.id doujinshi_ids = options.id
@ -84,10 +71,8 @@ def main():
doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data)) doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
if not options.is_show: if not options.is_show:
downloader = Downloader(path=options.output_dir, threads=options.threads, downloader = Downloader(path=options.output_dir, size=options.threads,
timeout=options.timeout, delay=options.delay, timeout=options.timeout, delay=options.delay)
retry=options.retry, exit_on_fail=options.exit_on_fail,
no_filename_padding=options.no_filename_padding)
for doujinshi_id in doujinshi_ids: for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id) doujinshi_info = doujinshi_parser(doujinshi_id)
@ -98,40 +83,22 @@ def main():
if not options.dryrun: if not options.dryrun:
doujinshi.downloader = downloader doujinshi.downloader = downloader
doujinshi.download(regenerate_cbz=options.regenerate_cbz)
if doujinshi.check_if_need_download(options):
doujinshi.download()
else:
logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}')
continue
if options.generate_metadata: if options.generate_metadata:
generate_metadata_file(options.output_dir, doujinshi) table = doujinshi.table
generate_metadata_file(options.output_dir, table, doujinshi)
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:
db.add_one(doujinshi.id) db.add_one(doujinshi.id)
if not options.is_nohtml: if not options.is_nohtml and not options.is_cbz and not options.is_pdf:
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template']) generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
elif options.is_cbz:
if options.is_cbz: generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir)
generate_doc('cbz', options.output_dir, doujinshi, options.regenerate) elif options.is_pdf:
generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir)
if options.is_pdf:
generate_doc('pdf', options.output_dir, doujinshi, options.regenerate)
if options.move_to_folder:
if options.is_cbz:
move_to_folder(options.output_dir, doujinshi, 'cbz')
if options.is_pdf:
move_to_folder(options.output_dir, doujinshi, 'pdf')
if options.rm_origin_dir:
if options.move_to_folder:
logger.critical('You specified both --move-to-folder and --rm-origin-dir options, '
'you will not get anything :(')
shutil.rmtree(os.path.join(options.output_dir, doujinshi.filename), ignore_errors=True)
if options.main_viewer: if options.main_viewer:
generate_main_html(options.output_dir) generate_main_html(options.output_dir)

View File

@ -3,23 +3,6 @@ import os
import tempfile import tempfile
from urllib.parse import urlparse from urllib.parse import urlparse
from platform import system
def get_nhentai_home() -> str:
home = os.getenv('HOME', tempfile.gettempdir())
if system() == 'Linux':
xdgdat = os.getenv('XDG_DATA_HOME')
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
return os.path.join(xdgdat, 'nhentai')
if home and os.path.exists(os.path.join(home, '.nhentai')):
return os.path.join(home, '.nhentai')
if xdgdat:
return os.path.join(xdgdat, 'nhentai')
# Use old default path in other systems
return os.path.join(home, '.nhentai')
DEBUG = os.getenv('DEBUG', False) DEBUG = os.getenv('DEBUG', False)
@ -28,39 +11,26 @@ BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = f'{BASE_URL}/g' DETAIL_URL = f'{BASE_URL}/g'
LEGACY_SEARCH_URL = f'{BASE_URL}/search/' LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
SEARCH_URL = f'{BASE_URL}/api/galleries/search' SEARCH_URL = f'{BASE_URL}/api/galleries/search'
ARTIST_URL = f'{BASE_URL}/artist/'
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged' TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
LOGIN_URL = f'{BASE_URL}/login/' LOGIN_URL = f'{BASE_URL}/login/'
CHALLENGE_URL = f'{BASE_URL}/challenge' CHALLENGE_URL = f'{BASE_URL}/challenge'
FAV_URL = f'{BASE_URL}/favorites/' FAV_URL = f'{BASE_URL}/favorites/'
PATH_SEPARATOR = os.path.sep IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i1.{urlparse(BASE_URL).hostname}/galleries'
IMAGE_URL_MIRRORS = [
f'{urlparse(BASE_URL).scheme}://i2.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i4.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i6.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}',
]
NHENTAI_HOME = get_nhentai_home()
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3') NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json') NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
__api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery' __api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
CONFIG = { CONFIG = {
'proxy': '', 'proxy': {'http': '', 'https': ''},
'cookie': '', 'cookie': '',
'language': '', 'language': '',
'template': '', 'template': '',
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)', 'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)'
'max_filename': 85
} }
LANGUAGE_ISO = { LANGUAGE_ISO = {

View File

@ -1,5 +1,4 @@
# coding: utf-8 # coding: utf-8
import os
from tabulate import tabulate from tabulate import tabulate
@ -12,7 +11,6 @@ EXT_MAP = {
'j': 'jpg', 'j': 'jpg',
'p': 'png', 'p': 'png',
'g': 'gif', 'g': 'gif',
'w': 'webp',
} }
@ -22,19 +20,17 @@ class DoujinshiInfo(dict):
def __getattr__(self, item): def __getattr__(self, item):
try: try:
ret = dict.__getitem__(self, item) return dict.__getitem__(self, item)
return ret if ret else 'Unknown'
except KeyError: except KeyError:
return 'Unknown' return ''
class Doujinshi(object): class Doujinshi(object):
def __init__(self, name=None, pretty_name=None, id=None, favorite_counts=0, img_id=None, def __init__(self, name=None, pretty_name=None, id=None, img_id=None,
ext='', pages=0, name_format='[%i][%a][%t]', **kwargs): ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
self.name = name self.name = name
self.pretty_name = pretty_name self.pretty_name = pretty_name
self.id = id self.id = id
self.favorite_counts = favorite_counts
self.img_id = img_id self.img_id = img_id
self.ext = ext self.ext = ext
self.pages = pages self.pages = pages
@ -42,13 +38,8 @@ class Doujinshi(object):
self.url = f'{DETAIL_URL}/{self.id}' self.url = f'{DETAIL_URL}/{self.id}'
self.info = DoujinshiInfo(**kwargs) self.info = DoujinshiInfo(**kwargs)
ag_value = self.info.groups if self.info.artists == 'Unknown' else self.info.artists
name_format = name_format.replace('%ag', format_filename(ag_value))
name_format = name_format.replace('%i', format_filename(str(self.id))) name_format = name_format.replace('%i', format_filename(str(self.id)))
name_format = name_format.replace('%f', format_filename(str(self.favorite_counts)))
name_format = name_format.replace('%a', format_filename(self.info.artists)) name_format = name_format.replace('%a', format_filename(self.info.artists))
name_format = name_format.replace('%g', format_filename(self.info.groups))
name_format = name_format.replace('%t', format_filename(self.name)) name_format = name_format.replace('%t', format_filename(self.name))
name_format = name_format.replace('%p', format_filename(self.pretty_name)) name_format = name_format.replace('%p', format_filename(self.pretty_name))
@ -56,18 +47,15 @@ class Doujinshi(object):
self.filename = format_filename(name_format, 255, True) self.filename = format_filename(name_format, 255, True)
self.table = [ self.table = [
['Parodies', self.info.parodies], ["Parodies", self.info.parodies],
['Title', self.name], ["Doujinshi", self.name],
['Subtitle', self.info.subtitle], ["Subtitle", self.info.subtitle],
['Date', self.info.date], ["Characters", self.info.characters],
['Characters', self.info.characters], ["Authors", self.info.artists],
['Authors', self.info.artists], ["Languages", self.info.languages],
['Groups', self.info.groups], ["Tags", self.info.tags],
['Languages', self.info.languages], ["URL", self.url],
['Tags', self.info.tags], ["Pages", self.pages],
['Favorite Counts', self.favorite_counts],
['URL', self.url],
['Pages', self.pages],
] ]
def __repr__(self): def __repr__(self):
@ -76,35 +64,7 @@ class Doujinshi(object):
def show(self): def show(self):
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}') logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
def check_if_need_download(self, options): def download(self, regenerate_cbz=False):
base_path = os.path.join(self.downloader.path, self.filename)
# regenerate, re-download
if options.regenerate:
return True
# pdf or cbz file exists, skip re-download
# doujinshi directory may not exist b/c of --rm-origin-dir option set.
# user should pass --regenerate option to get back origin dir.
ret_pdf = ret_cbz = None
if options.is_pdf:
ret_pdf = os.path.exists(f'{base_path}.pdf') or os.path.exists(f'{base_path}/{self.filename}.pdf')
if options.is_cbz:
ret_cbz = os.path.exists(f'{base_path}.cbz') or os.path.exists(f'{base_path}/{self.filename}.cbz')
ret = list(filter(lambda s: s is not None, [ret_cbz, ret_pdf]))
if ret and all(ret):
return False
# doujinshi directory doesn't exist, re-download
if not (os.path.exists(base_path) and os.path.isdir(base_path)):
return True
# fallback
return True
def download(self):
logger.info(f'Starting to download doujinshi: {self.name}') logger.info(f'Starting to download doujinshi: {self.name}')
if self.downloader: if self.downloader:
download_queue = [] download_queue = []
@ -114,10 +74,9 @@ class Doujinshi(object):
for i in range(1, min(self.pages, len(self.ext)) + 1): for i in range(1, min(self.pages, len(self.ext)) + 1):
download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}') download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}')
return self.downloader.start_download(download_queue, self.filename) self.downloader.start_download(download_queue, self.filename, regenerate_cbz=regenerate_cbz)
else: else:
logger.critical('Downloader has not been loaded') logger.critical('Downloader has not been loaded')
return False
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -1,17 +1,23 @@
# coding: utf- # coding: utf-
import multiprocessing
import signal
import sys
import os import os
import asyncio import requests
import httpx import time
import urllib3.exceptions import urllib3.exceptions
from urllib.parse import urlparse from urllib.parse import urlparse
from nhentai import constant from nhentai import constant
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.utils import Singleton, async_request from nhentai.parser import request
from nhentai.utils import Singleton
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning) urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
semaphore = multiprocessing.Semaphore(1)
class NHentaiImageNotExistException(Exception): class NHentaiImageNotExistException(Exception):
@ -34,145 +40,120 @@ def download_callback(result):
class Downloader(Singleton): class Downloader(Singleton):
def __init__(self, path='', threads=5, timeout=30, delay=0, retry=3, exit_on_fail=False,
no_filename_padding=False): def __init__(self, path='', size=5, timeout=30, delay=0):
self.threads = threads self.size = size
self.path = str(path) self.path = str(path)
self.timeout = timeout self.timeout = timeout
self.delay = delay self.delay = delay
self.retry = retry
self.exit_on_fail = exit_on_fail
self.folder = None
self.semaphore = None
self.no_filename_padding = no_filename_padding
async def fiber(self, tasks):
self.semaphore = asyncio.Semaphore(self.threads)
for completed_task in asyncio.as_completed(tasks):
try:
result = await completed_task
if result[0] > 0:
logger.info(f'{result[1]} download completed')
else:
raise Exception(f'{result[1]} download failed, return value {result[0]}')
except Exception as e:
logger.error(f'An error occurred: {e}')
if self.exit_on_fail:
raise Exception('User intends to exit on fail')
async def _semaphore_download(self, *args, **kwargs):
async with self.semaphore:
return await self.download(*args, **kwargs)
async def download(self, url, folder='', filename='', retried=0, proxy=None, length=0):
logger.info(f'Starting to download {url} ...')
def download(self, url, folder='', filename='', retried=0, proxy=None):
if self.delay: if self.delay:
await asyncio.sleep(self.delay) time.sleep(self.delay)
logger.info(f'Starting to download {url} ...')
filename = filename if filename else os.path.basename(urlparse(url).path) filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename) base_filename, extension = os.path.splitext(filename)
if not self.no_filename_padding: save_file_path = os.path.join(folder, base_filename.zfill(3) + extension)
filename = base_filename.zfill(length) + extension
else:
filename = base_filename + extension
save_file_path = os.path.join(self.folder, filename)
try: try:
if os.path.exists(save_file_path): if os.path.exists(save_file_path):
logger.warning(f'Skipped download: {save_file_path} already exists') logger.warning(f'Ignored exists file: {save_file_path}')
return 1, url return 1, url
response = await async_request('GET', url, timeout=self.timeout, proxy=proxy) response = None
with open(save_file_path, "wb") as f:
i = 0
while i < 10:
try:
response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
if response.status_code != 200: if response.status_code != 200:
path = urlparse(url).path raise NHentaiImageNotExistException
for mirror in constant.IMAGE_URL_MIRRORS:
logger.info(f"Try mirror: {mirror}{path}")
mirror_url = f'{mirror}{path}'
response = await async_request('GET', mirror_url, timeout=self.timeout, proxies=proxy)
if response.status_code == 200:
break
if not await self.save(filename, response):
logger.error(f'Can not download image {url}')
return -1, url
except (httpx.HTTPStatusError, httpx.TimeoutException, httpx.ConnectError) as e:
if retried < self.retry:
logger.warning(f'Download {filename} failed, retrying({retried + 1}) times...')
return await self.download(
url=url,
folder=folder,
filename=filename,
retried=retried + 1,
proxy=proxy,
)
else:
logger.warning(f'Download {filename} failed with {self.retry} times retried, skipped')
return -2, url
except NHentaiImageNotExistException as e: except NHentaiImageNotExistException as e:
os.remove(save_file_path) raise e
return -3, url
except Exception as e: except Exception as e:
import traceback i += 1
if not i < 10:
logger.error(f"Exception type: {type(e)}")
traceback.print_stack()
logger.critical(str(e)) logger.critical(str(e))
return -9, url return 0, None
continue
except KeyboardInterrupt: break
return -4, url
return 1, url
async def save(self, save_file_path, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
save_file_path = os.path.join(self.folder, save_file_path)
with open(save_file_path, 'wb') as f:
if response is not None:
length = response.headers.get('content-length') length = response.headers.get('content-length')
if length is None: if length is None:
f.write(response.content) f.write(response.content)
else: else:
async for chunk in response.aiter_bytes(2048): for chunk in response.iter_content(2048):
f.write(chunk) f.write(chunk)
return True
def start_download(self, queue, folder='') -> bool: except (requests.HTTPError, requests.Timeout) as e:
if not isinstance(folder, (str,)): if retried < 3:
logger.warning(f'Warning: {e}, retrying({retried}) ...')
return 0, self.download(url=url, folder=folder, filename=filename,
retried=retried+1, proxy=proxy)
else:
return 0, None
except NHentaiImageNotExistException as e:
os.remove(save_file_path)
return -1, url
except Exception as e:
import traceback
traceback.print_stack()
logger.critical(str(e))
return 0, None
except KeyboardInterrupt:
return -3, None
return 1, url
def start_download(self, queue, folder='', regenerate_cbz=False):
if not isinstance(folder, (str, )):
folder = str(folder) folder = str(folder)
if self.path: if self.path:
folder = os.path.join(self.path, folder) folder = os.path.join(self.path, folder)
logger.info(f'Doujinshi will be saved at "{folder}"') if os.path.exists(folder + '.cbz'):
if not regenerate_cbz:
logger.warning(f'CBZ file "{folder}.cbz" exists, ignored download request')
return
if not os.path.exists(folder): if not os.path.exists(folder):
try: try:
os.makedirs(folder) os.makedirs(folder)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical(str(e)) logger.critical(str(e))
self.folder = folder
if os.getenv('DEBUG', None) == 'NODOWNLOAD': else:
# Assuming we want to continue with rest of process. logger.warning(f'Path "{folder}" already exist.')
return True
digit_length = len(str(len(queue))) queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue]
logger.info(f'Total download pages: {len(queue)}')
coroutines = [
self._semaphore_download(url, filename=os.path.basename(urlparse(url).path), length=digit_length)
for url in queue
]
# Prevent coroutines infection pool = multiprocessing.Pool(self.size, init_worker)
asyncio.run(self.fiber(coroutines)) [pool.apply_async(download_wrapper, args=item) for item in queue]
return True pool.close()
pool.join()
def download_wrapper(obj, url, folder='', proxy=None):
if sys.platform == 'darwin' or semaphore.get_value():
return Downloader.download(obj, url=url, folder=folder, proxy=proxy)
else:
return -3, None
def init_worker():
signal.signal(signal.SIGINT, subprocess_signal)
def subprocess_signal(sig, frame):
if semaphore.acquire(timeout=1):
logger.warning('Ctrl-C pressed, exiting sub processes ...')
raise KeyboardInterrupt

View File

@ -141,30 +141,23 @@ def doujinshi_parser(id_, counter=0):
title = doujinshi_info.find('h1').text title = doujinshi_info.find('h1').text
pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
subtitle = doujinshi_info.find('h2') subtitle = doujinshi_info.find('h2')
favorite_counts = doujinshi_info.find('span', class_='nobold').find('span', class_='count')
doujinshi['name'] = title doujinshi['name'] = title
doujinshi['pretty_name'] = pretty_name doujinshi['pretty_name'] = pretty_name
doujinshi['subtitle'] = subtitle.text if subtitle else '' doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi['favorite_counts'] = int(favorite_counts.text.strip()) if favorite_counts else 0
doujinshi_cover = html.find('div', attrs={'id': 'cover'}) doujinshi_cover = html.find('div', attrs={'id': 'cover'})
# img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif|webp)$', img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif)$',
# doujinshi_cover.a.img.attrs['data-src']) doujinshi_cover.a.img.attrs['data-src'])
img_id = re.search(r'/galleries/(\d+)/cover\.\w+$', doujinshi_cover.a.img.attrs['data-src'])
ext = [] ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}): for i in html.find_all('div', attrs={'class': 'thumb-container'}):
base_name = os.path.basename(i.img.attrs['data-src']) _, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
ext_name = base_name.split('.') ext.append(ext_name)
if len(ext_name) == 3:
ext.append(ext_name[1])
else:
ext.append(ext_name[-1])
if not img_id: if not img_id:
logger.critical(f'Tried yo get image id failed of id: {id_}') logger.critical('Tried yo get image id failed')
return None sys.exit(1)
doujinshi['img_id'] = img_id.group(1) doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = ext doujinshi['ext'] = ext
@ -191,6 +184,53 @@ def doujinshi_parser(id_, counter=0):
return doujinshi return doujinshi
def legacy_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_)
logger.info(f'Fetching information of doujinshi id {id_}')
doujinshi = dict()
doujinshi['id'] = id_
url = f'{constant.DETAIL_URL}/{id_}'
i = 0
while 5 > i:
try:
response = request('get', url).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
sys.exit(1)
continue
break
doujinshi['name'] = response['title']['english']
doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
doujinshi['ext'] = ''.join([i['t'] for i in response['images']['pages']])
doujinshi['pages'] = len(response['images']['pages'])
# gain information of the doujinshi
needed_fields = ['character', 'artist', 'language', 'tag', 'parody', 'group', 'category']
for tag in response['tags']:
tag_type = tag['type']
if tag_type in needed_fields:
if tag_type == 'tag':
if tag_type not in doujinshi:
doujinshi[tag_type] = {}
tag['name'] = tag['name'].replace(' ', '-')
tag['name'] = tag['name'].lower()
doujinshi[tag_type][tag['name']] = tag['id']
elif tag_type not in doujinshi:
doujinshi[tag_type] = tag['name']
else:
doujinshi[tag_type] += ', ' + tag['name']
return doujinshi
def print_doujinshi(doujinshi_list): def print_doujinshi(doujinshi_list):
if not doujinshi_list: if not doujinshi_list:
return return
@ -200,21 +240,13 @@ def print_doujinshi(doujinshi_list):
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'): def legacy_search_parser(keyword, sorting, page, is_page_all=False):
logger.info(f'Searching doujinshis of keyword {keyword}') logger.info(f'Searching doujinshis of keyword {keyword}')
result = [] result = []
if type_ not in ('SEARCH', 'ARTIST', ):
raise ValueError('Invalid type')
if is_page_all: if is_page_all:
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL, response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': 1, 'sort': sorting}).content params={'q': keyword, 'page': 1, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': 1}).content
html = BeautifulSoup(response, 'lxml') html = BeautifulSoup(response, 'lxml')
pagination = html.find(attrs={'class': 'pagination'}) pagination = html.find(attrs={'class': 'pagination'})
last_page = pagination.find(attrs={'class': 'last'}) last_page = pagination.find(attrs={'class': 'last'})
@ -226,13 +258,8 @@ def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARC
for p in pages: for p in pages:
logger.info(f'Fetching page {p} ...') logger.info(f'Fetching page {p} ...')
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL, response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': p, 'sort': sorting}).content params={'q': keyword, 'page': p, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': p}).content
if response is None: if response is None:
logger.warning(f'No result in response in page {p}') logger.warning(f'No result in response in page {p}')
continue continue
@ -286,9 +313,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
for row in response['result']: for row in response['result']:
title = row['title']['english'] title = row['title']['english']
title = title[:constant.CONFIG['max_filename']] + '..' if \ title = title[:85] + '..' if len(title) > 85 else title
len(title) > constant.CONFIG['max_filename'] else title
result.append({'id': row['id'], 'title': title}) result.append({'id': row['id'], 'title': title})
not_exists_persist = False not_exists_persist = False

View File

@ -1,8 +1,6 @@
# coding: utf-8 # coding: utf-8
import json import json
import os import os
from nhentai.constant import PATH_SEPARATOR
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
from nhentai.constant import LANGUAGE_ISO from nhentai.constant import LANGUAGE_ISO
@ -10,8 +8,6 @@ from nhentai.constant import LANGUAGE_ISO
def serialize_json(doujinshi, output_dir): def serialize_json(doujinshi, output_dir):
metadata = {'title': doujinshi.name, metadata = {'title': doujinshi.name,
'subtitle': doujinshi.info.subtitle} 'subtitle': doujinshi.info.subtitle}
if doujinshi.info.favorite_counts:
metadata['favorite_counts'] = doujinshi.favorite_counts
if doujinshi.info.date: if doujinshi.info.date:
metadata['upload_date'] = doujinshi.info.date metadata['upload_date'] = doujinshi.info.date
if doujinshi.info.parodies: if doujinshi.info.parodies:
@ -26,7 +22,7 @@ def serialize_json(doujinshi, output_dir):
metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')] metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
if doujinshi.info.languages: if doujinshi.info.languages:
metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')] metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')]
metadata['category'] = [i.strip() for i in doujinshi.info.categories.split(',')] metadata['category'] = doujinshi.info.categories
metadata['URL'] = doujinshi.url metadata['URL'] = doujinshi.url
metadata['Pages'] = doujinshi.pages metadata['Pages'] = doujinshi.pages
@ -48,7 +44,6 @@ def serialize_comic_xml(doujinshi, output_dir):
xml_write_simple_tag(f, 'PageCount', doujinshi.pages) xml_write_simple_tag(f, 'PageCount', doujinshi.pages)
xml_write_simple_tag(f, 'URL', doujinshi.url) xml_write_simple_tag(f, 'URL', doujinshi.url)
xml_write_simple_tag(f, 'NhentaiId', doujinshi.id) xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
xml_write_simple_tag(f, 'Favorites', doujinshi.favorite_counts)
xml_write_simple_tag(f, 'Genre', doujinshi.info.categories) xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and
@ -84,7 +79,7 @@ def xml_write_simple_tag(f, name, val, indent=1):
def merge_json(): def merge_json():
lst = [] lst = []
output_dir = f".{PATH_SEPARATOR}" output_dir = "./"
os.chdir(output_dir) os.chdir(output_dir)
doujinshi_dirs = next(os.walk('.'))[1] doujinshi_dirs = next(os.walk('.'))[1]
for folder in doujinshi_dirs: for folder in doujinshi_dirs:

View File

@ -5,21 +5,15 @@ import re
import os import os
import zipfile import zipfile
import shutil import shutil
import httpx
import requests import requests
import sqlite3 import sqlite3
import urllib.parse
from typing import Tuple
from requests.structures import CaseInsensitiveDict
from nhentai import constant from nhentai import constant
from nhentai.constant import PATH_SEPARATOR
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.serializer import serialize_json, serialize_comic_xml, set_js_database from nhentai.serializer import serialize_json, serialize_comic_xml, set_js_database
MAX_FIELD_LENGTH = 100 MAX_FIELD_LENGTH = 100
EXTENSIONS = ('.png', '.jpg', '.jpeg', '.gif', '.webp')
def request(method, url, **kwargs): def request(method, url, **kwargs):
@ -31,44 +25,20 @@ def request(method, url, **kwargs):
}) })
if not kwargs.get('proxies', None): if not kwargs.get('proxies', None):
kwargs['proxies'] = { kwargs['proxies'] = constant.CONFIG['proxy']
'https': constant.CONFIG['proxy'],
'http': constant.CONFIG['proxy'],
}
return getattr(session, method)(url, verify=False, **kwargs) return getattr(session, method)(url, verify=False, **kwargs)
async def async_request(method, url, proxy = None, **kwargs):
headers = {
'Referer': constant.LOGIN_URL,
'User-Agent': constant.CONFIG['useragent'],
'Cookie': constant.CONFIG['cookie'],
}
if proxy is None:
proxy = constant.CONFIG['proxy']
if isinstance(proxy, (str, )) and not proxy:
proxy = None
async with httpx.AsyncClient(headers=headers, verify=False, proxy=proxy, **kwargs) as client:
response = await client.request(method, url, **kwargs)
return response
def check_cookie(): def check_cookie():
response = request('get', constant.BASE_URL) response = request('get', constant.BASE_URL)
if response.status_code == 403 and 'Just a moment...' in response.text: if response.status_code == 403 and 'Just a moment...' in response.text:
logger.error('Blocked by Cloudflare captcha, please set your cookie and useragent') logger.error('Blocked by Cloudflare captcha, please set your cookie and useragent')
sys.exit(1) sys.exit(1)
username = re.findall('"/users/[0-9]+/(.*?)"', response.text) username = re.findall('"/users/[0-9]+/(.*?)"', response.text)
if not username: if not username:
logger.warning( logger.warning('Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
'Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
else: else:
logger.log(16, f'Login successfully! Your username: {username[0]}') logger.log(16, f'Login successfully! Your username: {username[0]}')
@ -94,33 +64,13 @@ def readfile(path):
return file.read() return file.read()
def parse_doujinshi_obj( def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
output_dir: str, image_html = ''
doujinshi_obj=None,
file_type: str = ''
) -> Tuple[str, str]:
filename = f'.{PATH_SEPARATOR}doujinshi.{file_type}'
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
_filename = f'{doujinshi_obj.filename}.{file_type}'
if file_type == 'cbz':
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
if file_type == 'pdf':
_filename = _filename.replace('/', '-')
filename = os.path.join(output_dir, _filename)
else: else:
doujinshi_dir = f'.{PATH_SEPARATOR}' doujinshi_dir = '.'
return doujinshi_dir, filename
def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '.html')
image_html = ''
if not os.path.exists(doujinshi_dir): if not os.path.exists(doujinshi_dir):
logger.warning(f'Path "{doujinshi_dir}" does not exist, creating.') logger.warning(f'Path "{doujinshi_dir}" does not exist, creating.')
@ -133,7 +83,7 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
file_list.sort() file_list.sort()
for image in file_list: for image in file_list:
if not os.path.splitext(image)[1] in EXTENSIONS: if not os.path.splitext(image)[1] in ('.jpg', '.png'):
continue continue
image_html += f'<img src="{image}" class="image-item"/>\n' image_html += f'<img src="{image}" class="image-item"/>\n'
@ -157,28 +107,7 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
logger.warning(f'Writing HTML Viewer failed ({e})') logger.warning(f'Writing HTML Viewer failed ({e})')
def move_to_folder(output_dir='.', doujinshi_obj=None, file_type=None): def generate_main_html(output_dir='./'):
if not file_type:
raise RuntimeError('no file_type specified')
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
for fn in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, fn)
_, ext = os.path.splitext(file_path)
if ext in ['.pdf', '.cbz']:
continue
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename)))
def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
""" """
Generate a main html to show all the contains doujinshi. Generate a main html to show all the contains doujinshi.
With a link to their `index.html`. With a link to their `index.html`.
@ -219,7 +148,7 @@ def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
else: else:
title = 'nHentai HTML Viewer' title = 'nHentai HTML Viewer'
image_html += element.format(FOLDER=urllib.parse.quote(folder), IMAGE=image, TITLE=title) image_html += element.format(FOLDER=folder, IMAGE=image, TITLE=title)
if image_html == '': if image_html == '':
logger.warning('No index.html found, --gen-main paused.') logger.warning('No index.html found, --gen-main paused.')
return return
@ -229,47 +158,68 @@ def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
f.write(data.encode('utf-8')) f.write(data.encode('utf-8'))
shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './') shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './')
set_js_database() set_js_database()
output_dir = output_dir[:-1] if output_dir.endswith('/') else output_dir logger.log(16, f'Main Viewer has been written to "{output_dir}main.html"')
logger.log(16, f'Main Viewer has been written to "{output_dir}/main.html"')
except Exception as e: except Exception as e:
logger.warning(f'Writing Main Viewer failed ({e})') logger.warning(f'Writing Main Viewer failed ({e})')
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True):
if doujinshi_obj is not None:
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if os.path.exists(doujinshi_dir+".cbz"):
if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate: logger.warning(f'Comic Book CBZ file exists, skip "{doujinshi_dir}"')
logger.info(f'Skipped {file_type} file generation: {doujinshi_dir}.{file_type} already exists')
return return
if write_comic_info:
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), f'{doujinshi_obj.filename}.cbz')
else:
cbz_filename = './doujinshi.cbz'
doujinshi_dir = '.'
if file_type == 'cbz':
file_list = os.listdir(doujinshi_dir) file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
logger.info(f'Writing CBZ file to path: {filename}') logger.info(f'Writing CBZ file to path: {cbz_filename}')
with zipfile.ZipFile(filename, 'w') as cbz_pf: with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
for image in file_list: for image in file_list:
image_path = os.path.join(doujinshi_dir, image) image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image) cbz_pf.write(image_path, image)
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"') if rm_origin_dir:
elif file_type == 'pdf': shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(16, f'Comic Book CBZ file has been written to "{doujinshi_dir}"')
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
try: try:
import img2pdf import img2pdf
"""Write images to a PDF file using img2pdf.""" """Write images to a PDF file using img2pdf."""
file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(EXTENSIONS)] if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
f'{doujinshi_obj.filename}.pdf'
)
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
logger.info(f'Writing PDF file to path: {filename}') logger.info(f'Writing PDF file to path: {pdf_filename}')
with open(filename, 'wb') as pdf_f: with open(pdf_filename, 'wb') as pdf_f:
full_path_list = ( full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list] [os.path.join(doujinshi_dir, image) for image in file_list]
) )
pdf_f.write(img2pdf.convert(full_path_list, rotation=img2pdf.Rotation.ifvalid)) pdf_f.write(img2pdf.convert(full_path_list))
logger.log(16, f'PDF file has been written to "{filename}"') if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(16, f'PDF file has been written to "{doujinshi_dir}"')
except ImportError: except ImportError:
logger.error("Please install img2pdf package by using pip.") logger.error("Please install img2pdf package by using pip.")
@ -280,12 +230,12 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
It used to be a whitelist approach allowed only alphabet and a part of symbols. It used to be a whitelist approach allowed only alphabet and a part of symbols.
but most doujinshi's names include Japanese 2-byte characters and these was rejected. but most doujinshi's names include Japanese 2-byte characters and these was rejected.
so it is using blacklist approach now. so it is using blacklist approach now.
if filename include forbidden characters (\'/:,;*?"<>|) ,it replaces space character(" "). if filename include forbidden characters (\'/:,;*?"<>|) ,it replace space character(' ').
""" """
# maybe you can use `--format` to select a suitable filename # maybe you can use `--format` to select a suitable filename
if not _truncate_only: if not _truncate_only:
ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b' ban_chars = '\\\'/:,;*?"<>|\t'
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip() filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split()) filename = ' '.join(filename.split())
@ -303,7 +253,7 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
return filename return filename
def signal_handler(_signal, _frame): def signal_handler(signal, frame):
logger.error('Ctrl-C signal received. Stopping...') logger.error('Ctrl-C signal received. Stopping...')
sys.exit(1) sys.exit(1)
@ -311,8 +261,7 @@ def signal_handler(_signal, _frame):
def paging(page_string): def paging(page_string):
# 1,3-5,14 -> [1, 3, 4, 5, 14] # 1,3-5,14 -> [1, 3, 4, 5, 14]
if not page_string: if not page_string:
# default, the first page return []
return [1]
page_list = [] page_list = []
for i in page_string.split(','): for i in page_string.split(','):
@ -329,27 +278,32 @@ def paging(page_string):
return page_list return page_list
def generate_metadata_file(output_dir, doujinshi_obj): def generate_metadata_file(output_dir, table, doujinshi_obj=None):
logger.info('Writing Metadata Info')
info_txt_path = os.path.join(output_dir, doujinshi_obj.filename, 'info.txt') if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
else:
doujinshi_dir = '.'
f = open(info_txt_path, 'w', encoding='utf-8') logger.info(doujinshi_dir)
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR', f = open(os.path.join(doujinshi_dir, 'info.txt'), 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES', 'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'FAVORITE COUNTS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS', 'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL'] 'SERIES', 'PARODY', 'URL']
special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'CHARACTERS', 'AUTHOR',
'LANGUAGE', 'TAGS', 'URL', 'PAGES']
temp_dict = CaseInsensitiveDict(dict(doujinshi_obj.table)) for i in range(len(fields)):
for i in fields: f.write(f'{fields[i]}: ')
v = temp_dict.get(i) if fields[i] in special_fields:
v = temp_dict.get(f'{i}s') if v is None else v f.write(str(table[special_fields.index(fields[i])][1]))
v = doujinshi_obj.info.get(i.lower(), None) if v is None else v f.write('\n')
v = doujinshi_obj.info.get(f'{i.lower()}s', "Unknown") if v is None else v
f.write(f'{i}: {v}\n')
f.close() f.close()
logger.log(16, f'Metadata Info has been written to "{info_txt_path}"')
class DB(object): class DB(object):

View File

@ -139,7 +139,7 @@ function filter_searcher(){
break break
} }
} }
if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));} if (verifier){doujinshi_id.push(data[i].Folder);}
} }
var gallery = document.getElementsByClassName("gallery-favorite"); var gallery = document.getElementsByClassName("gallery-favorite");
for (var i = 0; i < gallery.length; i++){ for (var i = 0; i < gallery.length; i++){

378
poetry.lock generated
View File

@ -1,259 +1,151 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand. # This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]] [[package]]
name = "beautifulsoup4" name = "beautifulsoup4"
version = "4.12.3" version = "4.11.2"
description = "Screen-scraping library" description = "Screen-scraping library"
category = "main"
optional = false optional = false
python-versions = ">=3.6.0" python-versions = ">=3.6.0"
files = [ files = [
{file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"}, {file = "beautifulsoup4-4.11.2-py3-none-any.whl", hash = "sha256:0e79446b10b3ecb499c1556f7e228a53e64a2bfcebd455f370d8927cb5b59e39"},
{file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"}, {file = "beautifulsoup4-4.11.2.tar.gz", hash = "sha256:bc4bdda6717de5a2987436fb8d72f45dc90dd856bdfd512a1314ce90349a0106"},
] ]
[package.dependencies] [package.dependencies]
soupsieve = ">1.2" soupsieve = ">1.2"
[package.extras] [package.extras]
cchardet = ["cchardet"]
chardet = ["chardet"]
charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"] html5lib = ["html5lib"]
lxml = ["lxml"] lxml = ["lxml"]
[[package]] [[package]]
name = "certifi" name = "certifi"
version = "2024.12.14" version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle." description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.6"
files = [ files = [
{file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"}, {file = "certifi-2022.12.7-py3-none-any.whl", hash = "sha256:4ad3232f5e926d6718ec31cfc1fcadfde020920e278684144551c91769c7bc18"},
{file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"}, {file = "certifi-2022.12.7.tar.gz", hash = "sha256:35824b4c3a97115964b408844d64aa14db1cc518f6562e8d7261699d1350a9e3"},
]
[[package]]
name = "chardet"
version = "5.2.0"
description = "Universal encoding detector for Python 3"
optional = false
python-versions = ">=3.7"
files = [
{file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"},
{file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"},
] ]
[[package]] [[package]]
name = "charset-normalizer" name = "charset-normalizer"
version = "3.4.1" version = "3.0.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = "*"
files = [ files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"}, {file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"}, {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"}, {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"}, {file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"}, {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"}, {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"}, {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"}, {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"}, {file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"}, {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"}, {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"}, {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"}, {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"},
{file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"}, {file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"}, {file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"}, {file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"}, {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"}, {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"}, {file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"}, {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"}, {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"}, {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"}, {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"}, {file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"}, {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"}, {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"},
{file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"}, {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"}, {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"}, {file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"}, {file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"}, {file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"},
{file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"}, {file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"}, {file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"}, {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"}, {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"}, {file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"}, {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"}, {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"}, {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"}, {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"},
{file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"}, {file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"}, {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"}, {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"}, {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"}, {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"}, {file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"}, {file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"}, {file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"}, {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"}, {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"}, {file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"}, {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"}, {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"},
{file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"}, {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"}, {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"}, {file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"}, {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"}, {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"}, {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"}, {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"}, {file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"}, {file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"}, {file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"}, {file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
{file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
] ]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]] [[package]]
name = "idna" name = "idna"
version = "3.10" version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)" description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.5"
files = [ files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"}, {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"}, {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
] ]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]] [[package]]
name = "iso8601" name = "iso8601"
version = "1.1.0" version = "1.1.0"
description = "Simple module to parse ISO 8601 dates" description = "Simple module to parse ISO 8601 dates"
category = "main"
optional = false optional = false
python-versions = ">=3.6.2,<4.0" python-versions = ">=3.6.2,<4.0"
files = [ files = [
@ -263,51 +155,43 @@ files = [
[[package]] [[package]]
name = "requests" name = "requests"
version = "2.32.3" version = "2.28.2"
description = "Python HTTP for Humans." description = "Python HTTP for Humans."
category = "main"
optional = false optional = false
python-versions = ">=3.8" python-versions = ">=3.7, <4"
files = [ files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"}, {file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"}, {file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"},
] ]
[package.dependencies] [package.dependencies]
certifi = ">=2017.4.17" certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4" charset-normalizer = ">=2,<4"
idna = ">=2.5,<4" idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3" urllib3 = ">=1.21.1,<1.27"
[package.extras] [package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"] socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]] [[package]]
name = "sniffio" name = "soupsieve"
version = "1.3.1" version = "2.4"
description = "Sniff out which async library your code is running under" description = "A modern CSS selector implementation for Beautiful Soup."
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"}, {file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"}, {file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"},
]
[[package]]
name = "soupsieve"
version = "2.6"
description = "A modern CSS selector implementation for Beautiful Soup."
optional = false
python-versions = ">=3.8"
files = [
{file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
] ]
[[package]] [[package]]
name = "tabulate" name = "tabulate"
version = "0.9.0" version = "0.9.0"
description = "Pretty-print tabular data" description = "Pretty-print tabular data"
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
@ -318,34 +202,24 @@ files = [
[package.extras] [package.extras]
widechars = ["wcwidth"] widechars = ["wcwidth"]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "1.26.20" version = "1.26.14"
description = "HTTP library with thread-safe connection pooling, file post, and more." description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7" python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [ files = [
{file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"}, {file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"},
{file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"}, {file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"},
] ]
[package.extras] [package.extras]
brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata] [metadata]
lock-version = "2.0" lock-version = "2.0"
python-versions = "^3.8" python-versions = "^3.8"
content-hash = "b17c2cdd4b140f2ab8081bca7d94630e821fa2e882ac768b1bd8cf3ec58726ce" content-hash = "0a1d5abd47a669c7a1f2dc7b43824a449e29ba94908a4338d2ea0f2dfb4f805e"

View File

@ -1,28 +1,21 @@
[tool.poetry] [tool.poetry]
name = "nhentai" name = "nhentai"
version = "0.5.20" version = "0.5.2"
description = "nhentai doujinshi downloader" description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"] authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT" license = "MIT"
readme = "README.rst" readme = "README.rst"
include = ["nhentai/viewer/**"]
[tool.poetry.dependencies] [tool.poetry.dependencies]
python = "^3.8" python = "^3.8"
requests = "^2.32.3" requests = "^2.28.2"
soupsieve = "^2.6" soupsieve = "^2.4"
beautifulsoup4 = "^4.12.3" beautifulsoup4 = "^4.11.2"
tabulate = "^0.9.0" tabulate = "^0.9.0"
iso8601 = "^1.1.0" iso8601 = "^1.1.0"
urllib3 = "^1.26.20" urllib3 = "^1.26.14"
httpx = "^0.28.1"
chardet = "^5.2.0"
[build-system] [build-system]
requires = ["poetry-core"] requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api" build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
nhentai = 'nhentai.command:main'

View File

@ -1,29 +0,0 @@
#-------------------------------------------------------------------------------#
# Qodana analysis is configured by qodana.yaml file #
# https://www.jetbrains.com/help/qodana/qodana-yaml.html #
#-------------------------------------------------------------------------------#
version: "1.0"
#Specify inspection profile for code analysis
profile:
name: qodana.starter
#Enable inspections
#include:
# - name: <SomeEnabledInspectionId>
#Disable inspections
#exclude:
# - name: <SomeDisabledInspectionId>
# paths:
# - <path/where/not/run/inspection>
#Execute shell command before Qodana execution (Applied in CI/CD pipeline)
#bootstrap: sh ./prepare-qodana.sh
#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline)
#plugins:
# - id: <plugin.id> #(plugin id can be found at https://plugins.jetbrains.com)
#Specify Qodana linter for analysis (Applied in CI/CD pipeline)
linter: jetbrains/qodana-python:2024.3

6
requirements.txt Normal file
View File

@ -0,0 +1,6 @@
requests
soupsieve
BeautifulSoup4
tabulate
iso8601
urllib3

3
setup.cfg Normal file
View File

@ -0,0 +1,3 @@
[metadata]
description-file = README.rst

38
setup.py Normal file
View File

@ -0,0 +1,38 @@
# coding: utf-8
import codecs
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
def long_description():
with codecs.open('README.rst', 'rb') as readme:
return readme.read().decode('utf-8')
setup(
name='nhentai',
version=__version__,
packages=find_packages(),
author=__author__,
author_email=__email__,
keywords=['nhentai', 'doujinshi', 'downloader'],
description='nhentai.net doujinshis downloader',
long_description=long_description(),
url='https://github.com/RicterZ/nhentai',
download_url='https://github.com/RicterZ/nhentai/tarball/master',
include_package_data=True,
zip_safe=False,
install_requires=requirements,
entry_points={
'console_scripts': [
'nhentai = nhentai.command:main',
]
},
license='MIT',
)

View File

@ -7,7 +7,7 @@ from nhentai.cmdline import load_config
from nhentai.downloader import Downloader from nhentai.downloader import Downloader
from nhentai.parser import doujinshi_parser from nhentai.parser import doujinshi_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.utils import generate_html from nhentai.utils import generate_html, generate_cbz
class TestDownload(unittest.TestCase): class TestDownload(unittest.TestCase):
@ -20,7 +20,7 @@ class TestDownload(unittest.TestCase):
def test_download(self): def test_download(self):
did = 440546 did = 440546
info = Doujinshi(**doujinshi_parser(did), name_format='%i') info = Doujinshi(**doujinshi_parser(did), name_format='%i')
info.downloader = Downloader(path='/tmp', threads=5) info.downloader = Downloader(path='/tmp', size=5)
info.download() info.download()
self.assertTrue(os.path.exists(f'/tmp/{did}/001.jpg')) self.assertTrue(os.path.exists(f'/tmp/{did}/001.jpg'))
@ -28,6 +28,9 @@ class TestDownload(unittest.TestCase):
generate_html('/tmp', info) generate_html('/tmp', info)
self.assertTrue(os.path.exists(f'/tmp/{did}/index.html')) self.assertTrue(os.path.exists(f'/tmp/{did}/index.html'))
generate_cbz('/tmp', info)
self.assertTrue(os.path.exists(f'/tmp/{did}.cbz'))
if __name__ == '__main__': if __name__ == '__main__':
unittest.main() unittest.main()