Compare commits

..

25 Commits

Author SHA1 Message Date
8cd4b948e7 0.4.15 2021-05-08 15:36:49 +08:00
f884384eb3 fix bug 2021-05-08 15:36:36 +08:00
87afab46c4 Merge pull request #211 from jwfiredragon/master 2021-04-25 09:56:49 +08:00
c7b1d7e6a8 Fix broken constant import 2021-04-24 16:39:54 -07:00
ad02371158 Update constant.py 2021-04-21 15:37:13 +08:00
7c9d55e0ee Merge pull request #208 from karamori77/master
Changed write_comic_info from False to True
2021-04-21 15:30:51 +08:00
00aad774ae Fixed potential re-download
Moved forward save-history check 1 indent so it works with download by id too
Mapped all ids to int since there are cases where its a string in the API
2021-04-20 11:04:52 +08:00
373086b459 Update serializer.py
changed Language to LanguageISO for ComicInfo.xml
Language will be displayed by the LanguageISO code, it also forgoes rare language tags like rewrite and speechless
2021-04-18 21:45:15 +08:00
3a83f99771 Update constant.py 2021-04-18 21:40:47 +08:00
00627ab36a Update utils.py 2021-04-03 23:11:33 +08:00
592e163891 Update requirements.txt 2021-03-26 22:25:49 +08:00
84523475b0 Merge pull request #206 from Un1Gfn/patch-1 2021-03-25 19:01:39 +08:00
5f5461c902 Instuctions on getting csrftoken & sessionid 2021-03-25 18:57:20 +08:00
05e6ceb3cd Merge pull request #205 from Nontre12/master 2021-03-25 09:22:13 +08:00
db59426503 FIX: Use of img2lib even if it is not installed 2021-03-24 21:49:45 +01:00
74197f8f90 0.4.14 released for fix issue #204 2021-02-11 15:42:53 +08:00
6d91a39533 Merge pull request #203 from jwfiredragon/master
Switching 'logger.warn' to 'logger.warning'
2021-02-11 15:41:15 +08:00
e181e0b9dd Switching 'logger.warn' to 'logger.warning' 2021-02-10 22:45:22 -08:00
6fed1f94cb 0.4.13 2021-01-18 16:26:39 +08:00
9cfb23c8ec Merge pull request #201 from mobrine1/patch-1
Fix #200
2021-01-18 16:25:42 +08:00
fc347cdadf Fix #200 2021-01-17 15:02:43 -05:00
1cdebaab61 Merge pull request #199 from RicterZ/dev
0.4.12
2021-01-17 12:16:56 +08:00
ce8ae54536 Merge pull request #195 from RicterZ/dev
0.4.11
2021-01-11 11:19:58 +08:00
7aedb905d6 Merge pull request #194 from RicterZ/dev
0.4.11
2021-01-11 11:16:09 +08:00
08bb8ffda4 Merge pull request #192 from RicterZ/dev
Dev
2021-01-10 14:41:02 +08:00
11 changed files with 61 additions and 45 deletions

View File

@ -67,6 +67,15 @@ Set your nhentai cookie against captcha:
**NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"` **NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"`
| To get csrftoken and sessionid, first login to your nhentai account in web browser, then:
| (Chrome) |ve| |ld| More tools |ld| Developer tools |ld| Application |ld| Storage |ld| Cookies |ld| https://nhentai.net
| (Firefox) |hv| |ld| Web Developer |ld| Web Developer Tools |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
.. |hv| unicode:: U+2630 .. https://www.compart.com/en/unicode/U+2630
.. |ve| unicode:: U+22EE .. https://www.compart.com/en/unicode/U+22EE
.. |ld| unicode:: U+2014 .. https://www.compart.com/en/unicode/U+2014
Download specified doujinshi: Download specified doujinshi:
.. code-block:: bash .. code-block:: bash

View File

@ -1,3 +1,3 @@
__version__ = '0.4.12' __version__ = '0.4.15'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -84,7 +84,7 @@ def cmd_parser():
help='timeout for downloading doujinshi') help='timeout for downloading doujinshi')
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0, parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0,
help='slow down between downloading every doujinshi') help='slow down between downloading every doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='', parser.add_option('--proxy', type='string', dest='proxy', action='store',
help='store a proxy, for example: -p \'http://127.0.0.1:1080\'') help='store a proxy, for example: -p \'http://127.0.0.1:1080\'')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.') parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store', parser.add_option('--format', type='string', dest='name_format', action='store',

View File

@ -65,11 +65,11 @@ def main():
if options.is_download and doujinshis: if options.is_download and doujinshis:
doujinshi_ids = [i['id'] for i in doujinshis] doujinshi_ids = [i['id'] for i in doujinshis]
if options.is_save_download_history: if options.is_save_download_history:
with DB() as db: with DB() as db:
data = map(int, db.get_all()) data = map(int, db.get_all())
doujinshi_ids = list(set(doujinshi_ids) - set(data)) doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
if doujinshi_ids: if doujinshi_ids:
for i, id_ in enumerate(doujinshi_ids): for i, id_ in enumerate(doujinshi_ids):

View File

@ -36,3 +36,10 @@ CONFIG = {
'language': '', 'language': '',
'template': '', 'template': '',
} }
LANGUAGEISO ={
'english' : 'en',
'chinese' : 'zh',
'japanese' : 'ja',
'translated' : 'translated'
}

View File

@ -119,14 +119,14 @@ class Downloader(Singleton):
folder = os.path.join(self.path, folder) folder = os.path.join(self.path, folder)
if not os.path.exists(folder): if not os.path.exists(folder):
logger.warn('Path \'{0}\' does not exist, creating.'.format(folder)) logger.warning('Path \'{0}\' does not exist, creating.'.format(folder))
try: try:
os.makedirs(folder) os.makedirs(folder)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical('{0}'.format(str(e))) logger.critical('{0}'.format(str(e)))
else: else:
logger.warn('Path \'{0}\' already exist.'.format(folder)) logger.warning('Path \'{0}\' already exist.'.format(folder))
queue = [(self, url, folder) for url in queue] queue = [(self, url, folder) for url in queue]

View File

@ -173,7 +173,7 @@ logger.setLevel(logging.DEBUG)
if __name__ == '__main__': if __name__ == '__main__':
logger.log(15, 'nhentai') logger.log(15, 'nhentai')
logger.info('info') logger.info('info')
logger.warn('warn') logger.warning('warning')
logger.debug('debug') logger.debug('debug')
logger.error('error') logger.error('error')
logger.critical('critical') logger.critical('critical')

View File

@ -126,7 +126,7 @@ def doujinshi_parser(id_):
return doujinshi_parser(str(id_)) return doujinshi_parser(str(id_))
except Exception as e: except Exception as e:
logger.warn('Error: {}, ignored'.format(str(e))) logger.warning('Error: {}, ignored'.format(str(e)))
return None return None
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
@ -180,7 +180,7 @@ def old_search_parser(keyword, sorting='date', page=1):
result = _get_title_and_id(response) result = _get_title_and_id(response)
if not result: if not result:
logger.warn('Not found anything of keyword {}'.format(keyword)) logger.warning('Not found anything of keyword {}'.format(keyword))
return result return result
@ -221,7 +221,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
break break
if 'result' not in response: if 'result' not in response:
logger.warn('No result in response in page {}'.format(p)) logger.warning('No result in response in page {}'.format(p))
break break
for row in response['result']: for row in response['result']:
@ -230,7 +230,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
result.append({'id': row['id'], 'title': title}) result.append({'id': row['id'], 'title': title})
if not result: if not result:
logger.warn('No results for keywords {}'.format(keyword)) logger.warning('No results for keywords {}'.format(keyword))
return result return result

View File

@ -2,7 +2,7 @@
import json import json
import os import os
from xml.sax.saxutils import escape from xml.sax.saxutils import escape
from nhentai.constant import LANGUAGEISO
def serialize_json(doujinshi, dir): def serialize_json(doujinshi, dir):
metadata = {'title': doujinshi.name, metadata = {'title': doujinshi.name,
@ -65,7 +65,8 @@ def serialize_comicxml(doujinshi, dir):
if doujinshi.info.languages: if doujinshi.info.languages:
languages = [i.strip() for i in doujinshi.info.languages.split(',')] languages = [i.strip() for i in doujinshi.info.languages.split(',')]
xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No') xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
[xml_write_simple_tag(f, 'Language', i) for i in languages if i != 'translated'] [xml_write_simple_tag(f, 'LanguageISO', LANGUAGEISO[i]) for i in languages \
if (i != 'translated' and i in LANGUAGEISO)]
f.write('</ComicInfo>') f.write('</ComicInfo>')

View File

@ -166,7 +166,7 @@ def generate_main_html(output_dir='./'):
logger.warning('Writing Main Viewer failed ({})'.format(str(e))) logger.warning('Writing Main Viewer failed ({})'.format(str(e)))
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=False): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True):
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if write_comic_info: if write_comic_info:
@ -194,36 +194,36 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False): def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
try: try:
import img2pdf import img2pdf
"""Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
'{}.pdf'.format(doujinshi_obj.filename)
)
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
with open(pdf_filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list))
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
except ImportError: except ImportError:
logger.error("Please install img2pdf package by using pip.") logger.error("Please install img2pdf package by using pip.")
"""Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
'{}.pdf'.format(doujinshi_obj.filename)
)
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
with open(pdf_filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list))
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
def unicode_truncate(s, length, encoding='utf-8'): def unicode_truncate(s, length, encoding='utf-8'):
"""https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer """https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer
""" """

View File

@ -1,7 +1,6 @@
requests>=2.5.0 requests>=2.5.0
soupsieve<2.0 soupsieve
BeautifulSoup4>=4.0.0 BeautifulSoup4>=4.0.0
threadpool>=1.2.7
tabulate>=0.7.5 tabulate>=0.7.5
future>=0.15.2 future>=0.15.2
iso8601 >= 0.1 iso8601 >= 0.1