Compare commits

...

29 Commits

Author SHA1 Message Date
d9d2a6fb91 fix bug of proxy while downloading doujinshi 2021-06-02 23:20:56 +08:00
8cd4b948e7 0.4.15 2021-05-08 15:36:49 +08:00
f884384eb3 fix bug 2021-05-08 15:36:36 +08:00
87afab46c4 Merge pull request #211 from jwfiredragon/master 2021-04-25 09:56:49 +08:00
c7b1d7e6a8 Fix broken constant import 2021-04-24 16:39:54 -07:00
ad02371158 Update constant.py 2021-04-21 15:37:13 +08:00
7c9d55e0ee Merge pull request #208 from karamori77/master
Changed write_comic_info from False to True
2021-04-21 15:30:51 +08:00
00aad774ae Fixed potential re-download
Moved forward save-history check 1 indent so it works with download by id too
Mapped all ids to int since there are cases where its a string in the API
2021-04-20 11:04:52 +08:00
373086b459 Update serializer.py
changed Language to LanguageISO for ComicInfo.xml
Language will be displayed by the LanguageISO code, it also forgoes rare language tags like rewrite and speechless
2021-04-18 21:45:15 +08:00
3a83f99771 Update constant.py 2021-04-18 21:40:47 +08:00
00627ab36a Update utils.py 2021-04-03 23:11:33 +08:00
592e163891 Update requirements.txt 2021-03-26 22:25:49 +08:00
84523475b0 Merge pull request #206 from Un1Gfn/patch-1 2021-03-25 19:01:39 +08:00
5f5461c902 Instuctions on getting csrftoken & sessionid 2021-03-25 18:57:20 +08:00
05e6ceb3cd Merge pull request #205 from Nontre12/master 2021-03-25 09:22:13 +08:00
db59426503 FIX: Use of img2lib even if it is not installed 2021-03-24 21:49:45 +01:00
74197f8f90 0.4.14 released for fix issue #204 2021-02-11 15:42:53 +08:00
6d91a39533 Merge pull request #203 from jwfiredragon/master
Switching 'logger.warn' to 'logger.warning'
2021-02-11 15:41:15 +08:00
e181e0b9dd Switching 'logger.warn' to 'logger.warning' 2021-02-10 22:45:22 -08:00
6fed1f94cb 0.4.13 2021-01-18 16:26:39 +08:00
9cfb23c8ec Merge pull request #201 from mobrine1/patch-1
Fix #200
2021-01-18 16:25:42 +08:00
fc347cdadf Fix #200 2021-01-17 15:02:43 -05:00
1cdebaab61 Merge pull request #199 from RicterZ/dev
0.4.12
2021-01-17 12:16:56 +08:00
9513141ccf 0.4.12 2021-01-17 11:51:22 +08:00
bdc9fa113e fix #197 set proxy to null 2021-01-17 11:50:22 +08:00
36946111db fix #198 add notice 2021-01-17 11:42:06 +08:00
ce8ae54536 Merge pull request #195 from RicterZ/dev
0.4.11
2021-01-11 11:19:58 +08:00
7aedb905d6 Merge pull request #194 from RicterZ/dev
0.4.11
2021-01-11 11:16:09 +08:00
08bb8ffda4 Merge pull request #192 from RicterZ/dev
Dev
2021-01-10 14:41:02 +08:00
11 changed files with 81 additions and 61 deletions

View File

@ -67,6 +67,15 @@ Set your nhentai cookie against captcha:
**NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"` **NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"`
| To get csrftoken and sessionid, first login to your nhentai account in web browser, then:
| (Chrome) |ve| |ld| More tools |ld| Developer tools |ld| Application |ld| Storage |ld| Cookies |ld| https://nhentai.net
| (Firefox) |hv| |ld| Web Developer |ld| Web Developer Tools |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
.. |hv| unicode:: U+2630 .. https://www.compart.com/en/unicode/U+2630
.. |ve| unicode:: U+22EE .. https://www.compart.com/en/unicode/U+22EE
.. |ld| unicode:: U+2014 .. https://www.compart.com/en/unicode/U+2014
Download specified doujinshi: Download specified doujinshi:
.. code-block:: bash .. code-block:: bash

View File

@ -1,3 +1,3 @@
__version__ = '0.4.11' __version__ = '0.4.15'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -84,7 +84,7 @@ def cmd_parser():
help='timeout for downloading doujinshi') help='timeout for downloading doujinshi')
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0,
help='slow down between downloading every doujinshi') help='slow down between downloading every doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='', parser.add_option('--proxy', type='string', dest='proxy', action='store',
help='store a proxy, for example: -p \'http://127.0.0.1:1080\'') help='store a proxy, for example: -p \'http://127.0.0.1:1080\'')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store', parser.add_option('--format', type='string', dest='name_format', action='store',
@ -120,7 +120,6 @@ def cmd_parser():
try: try:
sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv] sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv]
print()
except (NameError, TypeError): except (NameError, TypeError):
pass pass
except UnicodeDecodeError: except UnicodeDecodeError:
@ -157,7 +156,7 @@ def cmd_parser():
exit(0) exit(0)
# TODO: search without language # TODO: search without language
if args.proxy: if args.proxy is not None:
proxy_url = urlparse(args.proxy) proxy_url = urlparse(args.proxy)
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https'): if not args.proxy == '' and proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme)) logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
@ -171,7 +170,7 @@ def cmd_parser():
write_config() write_config()
exit(0) exit(0)
if args.viewer_template: if args.viewer_template is not None:
if not args.viewer_template: if not args.viewer_template:
args.viewer_template = 'default' args.viewer_template = 'default'

View File

@ -19,6 +19,11 @@ from nhentai.utils import generate_html, generate_cbz, generate_main_html, gener
def main(): def main():
banner() banner()
if sys.version_info < (3, 0, 0):
logger.error('nhentai now only support Python 3.x')
exit(1)
options = cmd_parser() options = cmd_parser()
logger.info('Using mirror: {0}'.format(BASE_URL)) logger.info('Using mirror: {0}'.format(BASE_URL))
@ -60,11 +65,11 @@ def main():
if options.is_download and doujinshis: if options.is_download and doujinshis:
doujinshi_ids = [i['id'] for i in doujinshis] doujinshi_ids = [i['id'] for i in doujinshis]
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:
data = map(int, db.get_all()) data = map(int, db.get_all())
doujinshi_ids = list(set(doujinshi_ids) - set(data)) doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
if doujinshi_ids: if doujinshi_ids:
for i, id_ in enumerate(doujinshi_ids): for i, id_ in enumerate(doujinshi_ids):
@ -114,8 +119,4 @@ signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__': if __name__ == '__main__':
if sys.version_info < (3, 0, 0):
logger.error('nhentai now only support Python 3.x')
exit(1)
main() main()

View File

@ -29,10 +29,16 @@ NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai'
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3') NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json') NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
CONFIG = { CONFIG = {
'proxy': {'http': '', 'https': ''}, 'proxy': {'http': '', 'https': ''},
'cookie': '', 'cookie': '',
'language': '', 'language': '',
'template': '', 'template': '',
} }
LANGUAGEISO ={
'english' : 'en',
'chinese' : 'zh',
'japanese' : 'ja',
'translated' : 'translated'
}

View File

@ -14,6 +14,7 @@ try:
except ImportError: except ImportError:
from urlparse import urlparse from urlparse import urlparse
from nhentai import constant
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.parser import request from nhentai.parser import request
from nhentai.utils import Singleton from nhentai.utils import Singleton
@ -34,7 +35,7 @@ class Downloader(Singleton):
self.timeout = timeout self.timeout = timeout
self.delay = delay self.delay = delay
def download_(self, url, folder='', filename='', retried=0): def download_(self, url, folder='', filename='', retried=0, proxy=None):
if self.delay: if self.delay:
time.sleep(self.delay) time.sleep(self.delay)
logger.info('Starting to download {0} ...'.format(url)) logger.info('Starting to download {0} ...'.format(url))
@ -51,7 +52,7 @@ class Downloader(Singleton):
i = 0 i = 0
while i < 10: while i < 10:
try: try:
response = request('get', url, stream=True, timeout=self.timeout) response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
if response.status_code != 200: if response.status_code != 200:
raise NHentaiImageNotExistException raise NHentaiImageNotExistException
@ -77,7 +78,8 @@ class Downloader(Singleton):
except (requests.HTTPError, requests.Timeout) as e: except (requests.HTTPError, requests.Timeout) as e:
if retried < 3: if retried < 3:
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried)) logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried))
return 0, self.download_(url=url, folder=folder, filename=filename, retried=retried+1) return 0, self.download_(url=url, folder=folder, filename=filename,
retried=retried+1, proxy=proxy)
else: else:
return 0, None return 0, None
@ -119,16 +121,16 @@ class Downloader(Singleton):
folder = os.path.join(self.path, folder) folder = os.path.join(self.path, folder)
if not os.path.exists(folder): if not os.path.exists(folder):
logger.warn('Path \'{0}\' does not exist, creating.'.format(folder)) logger.warning('Path \'{0}\' does not exist, creating.'.format(folder))
try: try:
os.makedirs(folder) os.makedirs(folder)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical('{0}'.format(str(e))) logger.critical('{0}'.format(str(e)))
else: else:
logger.warn('Path \'{0}\' already exist.'.format(folder)) logger.warning('Path \'{0}\' already exist.'.format(folder))
queue = [(self, url, folder) for url in queue] queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue]
pool = multiprocessing.Pool(self.size, init_worker) pool = multiprocessing.Pool(self.size, init_worker)
[pool.apply_async(download_wrapper, args=item) for item in queue] [pool.apply_async(download_wrapper, args=item) for item in queue]
@ -137,9 +139,9 @@ class Downloader(Singleton):
pool.join() pool.join()
def download_wrapper(obj, url, folder=''): def download_wrapper(obj, url, folder='', proxy=None):
if sys.platform == 'darwin' or semaphore.get_value(): if sys.platform == 'darwin' or semaphore.get_value():
return Downloader.download_(obj, url=url, folder=folder) return Downloader.download_(obj, url=url, folder=folder, proxy=proxy)
else: else:
return -3, None return -3, None

View File

@ -1,7 +1,6 @@
# #
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license. # Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
# #
from __future__ import print_function, unicode_literals
import logging import logging
import re import re
import platform import platform
@ -174,7 +173,7 @@ logger.setLevel(logging.DEBUG)
if __name__ == '__main__': if __name__ == '__main__':
logger.log(15, 'nhentai') logger.log(15, 'nhentai')
logger.info('info') logger.info('info')
logger.warn('warn') logger.warning('warning')
logger.debug('debug') logger.debug('debug')
logger.error('error') logger.error('error')
logger.critical('critical') logger.critical('critical')

View File

@ -126,7 +126,7 @@ def doujinshi_parser(id_):
return doujinshi_parser(str(id_)) return doujinshi_parser(str(id_))
except Exception as e: except Exception as e:
logger.warn('Error: {}, ignored'.format(str(e))) logger.warning('Error: {}, ignored'.format(str(e)))
return None return None
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
@ -180,7 +180,7 @@ def old_search_parser(keyword, sorting='date', page=1):
result = _get_title_and_id(response) result = _get_title_and_id(response)
if not result: if not result:
logger.warn('Not found anything of keyword {}'.format(keyword)) logger.warning('Not found anything of keyword {}'.format(keyword))
return result return result
@ -221,7 +221,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
break break
if 'result' not in response: if 'result' not in response:
logger.warn('No result in response in page {}'.format(p)) logger.warning('No result in response in page {}'.format(p))
break break
for row in response['result']: for row in response['result']:
@ -230,7 +230,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
result.append({'id': row['id'], 'title': title}) result.append({'id': row['id'], 'title': title})
if not result: if not result:
logger.warn('No results for keywords {}'.format(keyword)) logger.warning('No results for keywords {}'.format(keyword))
return result return result

View File

@ -2,7 +2,7 @@
import json import json
import os import os
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
from nhentai.constant import LANGUAGEISO
def serialize_json(doujinshi, dir): def serialize_json(doujinshi, dir):
metadata = {'title': doujinshi.name, metadata = {'title': doujinshi.name,
@ -65,7 +65,8 @@ def serialize_comicxml(doujinshi, dir):
if doujinshi.info.languages: if doujinshi.info.languages:
languages = [i.strip() for i in doujinshi.info.languages.split(',')] languages = [i.strip() for i in doujinshi.info.languages.split(',')]
xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No') xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
[xml_write_simple_tag(f, 'Language', i) for i in languages if i != 'translated'] [xml_write_simple_tag(f, 'LanguageISO', LANGUAGEISO[i]) for i in languages \
if (i != 'translated' and i in LANGUAGEISO)]
f.write('</ComicInfo>') f.write('</ComicInfo>')

View File

@ -20,7 +20,11 @@ def request(method, url, **kwargs):
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)', 'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'Cookie': constant.CONFIG['cookie'] 'Cookie': constant.CONFIG['cookie']
}) })
return getattr(session, method)(url, proxies=constant.CONFIG['proxy'], verify=False, **kwargs)
if not kwargs.get('proxies', None):
kwargs['proxies'] = constant.CONFIG['proxy']
return getattr(session, method)(url, verify=False, **kwargs)
def check_cookie(): def check_cookie():
@ -166,7 +170,7 @@ def generate_main_html(output_dir='./'):
logger.warning('Writing Main Viewer failed ({})'.format(str(e))) logger.warning('Writing Main Viewer failed ({})'.format(str(e)))
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=False): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True):
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if write_comic_info: if write_comic_info:
@ -194,36 +198,36 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False): def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
try: try:
import img2pdf import img2pdf
"""Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
'{}.pdf'.format(doujinshi_obj.filename)
)
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
with open(pdf_filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list))
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
except ImportError: except ImportError:
logger.error("Please install img2pdf package by using pip.") logger.error("Please install img2pdf package by using pip.")
"""Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
'{}.pdf'.format(doujinshi_obj.filename)
)
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
with open(pdf_filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list))
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
def unicode_truncate(s, length, encoding='utf-8'): def unicode_truncate(s, length, encoding='utf-8'):
"""https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer """https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer
""" """

View File

@ -1,7 +1,6 @@
requests>=2.5.0 requests>=2.5.0
soupsieve<2.0 soupsieve
BeautifulSoup4>=4.0.0 BeautifulSoup4>=4.0.0
threadpool>=1.2.7
tabulate>=0.7.5 tabulate>=0.7.5
future>=0.15.2 future>=0.15.2
iso8601 >= 0.1 iso8601 >= 0.1