Merge branch 'pull/221' into master

This commit is contained in:
Ricter Zheng 2021-06-07 16:01:54 +08:00 committed by GitHub
commit 1de7e1f998
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
6 changed files with 32 additions and 12 deletions

View File

@ -1,3 +1,3 @@
__version__ = '0.4.15' __version__ = '0.4.16'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -89,6 +89,7 @@ def cmd_parser():
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store', parser.add_option('--format', type='string', dest='name_format', action='store',
help='format the saved folder name', default='[%i][%a][%t]') help='format the saved folder name', default='[%i][%a][%t]')
parser.add_option('--dry-run', '-r', action='store_true', dest='dryrun', help='Dry run, skip file download.')
# generate options # generate options
parser.add_option('--html', dest='html_viewer', action='store_true', parser.add_option('--html', dest='html_viewer', action='store_true',
@ -217,4 +218,8 @@ def cmd_parser():
logger.critical('Maximum number of used threads is 15') logger.critical('Maximum number of used threads is 15')
exit(1) exit(1)
if args.dryrun and (args.is_cbz or args.is_pdf):
logger.critical('Cannot generate PDF or CBZ during dry-run')
exit(1)
return args return args

View File

@ -91,7 +91,9 @@ def main():
timeout=options.timeout, delay=options.delay) timeout=options.timeout, delay=options.delay)
for doujinshi in doujinshi_list: for doujinshi in doujinshi_list:
if not options.dryrun:
doujinshi.downloader = downloader
doujinshi.download()
doujinshi.downloader = downloader doujinshi.downloader = downloader
doujinshi.download() doujinshi.download()
@ -100,6 +102,7 @@ def main():
table=doujinshi.table table=doujinshi.table
generate_metadatafile(options.output_dir,table,doujinshi) generate_metadatafile(options.output_dir,table,doujinshi)
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:
db.add_one(doujinshi.id) db.add_one(doujinshi.id)

View File

@ -29,7 +29,6 @@ NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai'
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3') NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json') NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
CONFIG = { CONFIG = {
'proxy': {'http': '', 'https': ''}, 'proxy': {'http': '', 'https': ''},
'cookie': '', 'cookie': '',

View File

@ -14,6 +14,7 @@ try:
except ImportError: except ImportError:
from urlparse import urlparse from urlparse import urlparse
from nhentai import constant
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.parser import request from nhentai.parser import request
from nhentai.utils import Singleton from nhentai.utils import Singleton
@ -34,7 +35,7 @@ class Downloader(Singleton):
self.timeout = timeout self.timeout = timeout
self.delay = delay self.delay = delay
def download_(self, url, folder='', filename='', retried=0): def download_(self, url, folder='', filename='', retried=0, proxy=None):
if self.delay: if self.delay:
time.sleep(self.delay) time.sleep(self.delay)
logger.info('Starting to download {0} ...'.format(url)) logger.info('Starting to download {0} ...'.format(url))
@ -51,7 +52,7 @@ class Downloader(Singleton):
i = 0 i = 0
while i < 10: while i < 10:
try: try:
response = request('get', url, stream=True, timeout=self.timeout) response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
if response.status_code != 200: if response.status_code != 200:
raise NHentaiImageNotExistException raise NHentaiImageNotExistException
@ -77,7 +78,8 @@ class Downloader(Singleton):
except (requests.HTTPError, requests.Timeout) as e: except (requests.HTTPError, requests.Timeout) as e:
if retried < 3: if retried < 3:
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried)) logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried))
return 0, self.download_(url=url, folder=folder, filename=filename, retried=retried+1) return 0, self.download_(url=url, folder=folder, filename=filename,
retried=retried+1, proxy=proxy)
else: else:
return 0, None return 0, None
@ -128,7 +130,7 @@ class Downloader(Singleton):
else: else:
logger.warning('Path \'{0}\' already exist.'.format(folder)) logger.warning('Path \'{0}\' already exist.'.format(folder))
queue = [(self, url, folder) for url in queue] queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue]
pool = multiprocessing.Pool(self.size, init_worker) pool = multiprocessing.Pool(self.size, init_worker)
[pool.apply_async(download_wrapper, args=item) for item in queue] [pool.apply_async(download_wrapper, args=item) for item in queue]
@ -137,9 +139,9 @@ class Downloader(Singleton):
pool.join() pool.join()
def download_wrapper(obj, url, folder=''): def download_wrapper(obj, url, folder='', proxy=None):
if sys.platform == 'darwin' or semaphore.get_value(): if sys.platform == 'darwin' or semaphore.get_value():
return Downloader.download_(obj, url=url, folder=folder) return Downloader.download_(obj, url=url, folder=folder, proxy=proxy)
else: else:
return -3, None return -3, None

View File

@ -20,7 +20,11 @@ def request(method, url, **kwargs):
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)', 'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'Cookie': constant.CONFIG['cookie'] 'Cookie': constant.CONFIG['cookie']
}) })
return getattr(session, method)(url, proxies=constant.CONFIG['proxy'], verify=False, **kwargs)
if not kwargs.get('proxies', None):
kwargs['proxies'] = constant.CONFIG['proxy']
return getattr(session, method)(url, verify=False, **kwargs)
def check_cookie(): def check_cookie():
@ -70,6 +74,13 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
else: else:
doujinshi_dir = '.' doujinshi_dir = '.'
if not os.path.exists(doujinshi_dir):
logger.warning('Path \'{0}\' does not exist, creating.'.format(doujinshi_dir))
try:
os.makedirs(doujinshi_dir)
except EnvironmentError as e:
logger.critical('{0}'.format(str(e)))
file_list = os.listdir(doujinshi_dir) file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
@ -194,7 +205,7 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False): def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
try: try:
import img2pdf import img2pdf
"""Write images to a PDF file using img2pdf.""" """Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
@ -220,7 +231,7 @@ def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
shutil.rmtree(doujinshi_dir, ignore_errors=True) shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir)) logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
except ImportError: except ImportError:
logger.error("Please install img2pdf package by using pip.") logger.error("Please install img2pdf package by using pip.")