reformat files #266

This commit is contained in:
Ricter Z 2023-02-05 23:13:47 +08:00
parent 06fdf0dade
commit 2adf8ccc9d
9 changed files with 105 additions and 120 deletions

View File

@ -1,10 +1,7 @@
#!/usr/bin/env python2.7
# coding: utf-8 # coding: utf-8
import sys import sys
import signal import signal
import platform import platform
import time
from nhentai import constant from nhentai import constant
from nhentai.cmdline import cmd_parser, banner from nhentai.cmdline import cmd_parser, banner
@ -25,16 +22,16 @@ def main():
exit(1) exit(1)
options = cmd_parser() options = cmd_parser()
logger.info('Using mirror: {0}'.format(BASE_URL)) logger.info(f'Using mirror: {BASE_URL}')
# CONFIG['proxy'] will be changed after cmd_parser() # CONFIG['proxy'] will be changed after cmd_parser()
if constant.CONFIG['proxy']['http']: if constant.CONFIG['proxy']['http']:
logger.info('Using proxy: {0}'.format(constant.CONFIG['proxy']['http'])) logger.info(f'Using proxy: {constant.CONFIG["proxy"]["http"]}')
if not constant.CONFIG['template']: if not constant.CONFIG['template']:
constant.CONFIG['template'] = 'default' constant.CONFIG['template'] = 'default'
logger.info('Using viewer template "{}"'.format(constant.CONFIG['template'])) logger.info(f'Using viewer template "{constant.CONFIG["template"]}"')
# check your cookie # check your cookie
check_cookie() check_cookie()
@ -53,8 +50,8 @@ def main():
elif options.keyword: elif options.keyword:
if constant.CONFIG['language']: if constant.CONFIG['language']:
logger.info('Using default language: {0}'.format(constant.CONFIG['language'])) logger.info(f'Using default language: {constant.CONFIG["language"]}')
options.keyword += ' language:{}'.format(constant.CONFIG['language']) options.keyword += f' language:{constant.CONFIG["language"]}'
_search_parser = legacy_search_parser if options.legacy else search_parser _search_parser = legacy_search_parser if options.legacy else search_parser
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list, doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
@ -121,7 +118,8 @@ def main():
doujinshi.show() doujinshi.show()
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__': if __name__ == '__main__':
import urllib3.exceptions
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
signal.signal(signal.SIGINT, signal_handler)
main() main()

View File

@ -1,30 +1,24 @@
# coding: utf-8 # coding: utf-8
import os import os
import tempfile import tempfile
try: from urllib.parse import urlparse
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net') BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
__api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL __api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
DETAIL_URL = '%s/g' % BASE_URL DETAIL_URL = f'{BASE_URL}/g'
LEGACY_SEARCH_URL = '%s/search/' % BASE_URL LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
SEARCH_URL = '%s/api/galleries/search' % BASE_URL SEARCH_URL = f'{BASE_URL}/api/galleries/search'
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL LOGIN_URL = f'{BASE_URL}/login/'
LOGIN_URL = '%s/login/' % BASE_URL CHALLENGE_URL = f'{BASE_URL}/challenge'
CHALLENGE_URL = '%s/challenge' % BASE_URL FAV_URL = f'{BASE_URL}/favorites/'
FAV_URL = '%s/favorites/' % BASE_URL
u = urlparse(BASE_URL) u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname) IMAGE_URL = f'{u.scheme}://i.{u.hostname}/galleries'
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai') NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3') NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
@ -38,9 +32,9 @@ CONFIG = {
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)' 'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)'
} }
LANGUAGEISO ={ LANGUAGEISO = {
'english' : 'en', 'english': 'en',
'chinese' : 'zh', 'chinese': 'zh',
'japanese' : 'ja', 'japanese': 'ja',
'translated' : 'translated' 'translated': 'translated'
} }

View File

@ -35,7 +35,7 @@ class Doujinshi(object):
self.ext = ext self.ext = ext
self.pages = pages self.pages = pages
self.downloader = None self.downloader = None
self.url = '%s/%d' % (DETAIL_URL, self.id) self.url = f'{DETAIL_URL}/{self.id}'
self.info = DoujinshiInfo(**kwargs) self.info = DoujinshiInfo(**kwargs)
name_format = name_format.replace('%i', format_filename(str(self.id))) name_format = name_format.replace('%i', format_filename(str(self.id)))
@ -59,23 +59,22 @@ class Doujinshi(object):
] ]
def __repr__(self): def __repr__(self):
return '<Doujinshi: {0}>'.format(self.name) return f'<Doujinshi: {self.name}>'
def show(self): def show(self):
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
logger.info(u'Print doujinshi information of {0}\n{1}'.format(self.id, tabulate(self.table)))
def download(self, regenerate_cbz=False): def download(self, regenerate_cbz=False):
logger.info('Starting to download doujinshi: %s' % self.name) logger.info(f'Starting to download doujinshi: {self.name}')
if self.downloader: if self.downloader:
download_queue = [] download_queue = []
if len(self.ext) != self.pages: if len(self.ext) != self.pages:
logger.warning('Page count and ext count do not equal') logger.warning('Page count and ext count do not equal')
for i in range(1, min(self.pages, len(self.ext)) + 1): for i in range(1, min(self.pages, len(self.ext)) + 1):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i - 1])) download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}')
self.downloader.download(download_queue, self.filename, regenerate_cbz=regenerate_cbz) self.downloader.start_download(download_queue, self.filename, regenerate_cbz=regenerate_cbz)
else: else:
logger.critical('Downloader has not been loaded') logger.critical('Downloader has not been loaded')
@ -87,4 +86,4 @@ if __name__ == '__main__':
try: try:
test.download() test.download()
except Exception as e: except Exception as e:
print('Exception: %s' % str(e)) print(f'Exception: {e}')

View File

@ -3,23 +3,18 @@
import multiprocessing import multiprocessing
import signal import signal
from future.builtins import str as text
import sys import sys
import os import os
import requests import requests
import time import time
try: from urllib.parse import urlparse
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
from nhentai import constant from nhentai import constant
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.parser import request from nhentai.parser import request
from nhentai.utils import Singleton from nhentai.utils import Singleton
requests.packages.urllib3.disable_warnings()
semaphore = multiprocessing.Semaphore(1) semaphore = multiprocessing.Semaphore(1)
@ -27,6 +22,22 @@ class NHentaiImageNotExistException(Exception):
pass pass
def download_callback(result):
result, data = result
if result == 0:
logger.warning('fatal errors occurred, ignored')
# exit(1)
elif result == -1:
logger.warning(f'url {data} return status code 404')
elif result == -2:
logger.warning('Ctrl-C pressed, exiting sub processes ...')
elif result == -3:
# workers won't be run, just pass
pass
else:
logger.log(15, f'{data} downloaded successfully')
class Downloader(Singleton): class Downloader(Singleton):
def __init__(self, path='', size=5, timeout=30, delay=0): def __init__(self, path='', size=5, timeout=30, delay=0):
@ -35,20 +46,21 @@ class Downloader(Singleton):
self.timeout = timeout self.timeout = timeout
self.delay = delay self.delay = delay
def download_(self, url, folder='', filename='', retried=0, proxy=None): def download(self, url, folder='', filename='', retried=0, proxy=None):
if self.delay: if self.delay:
time.sleep(self.delay) time.sleep(self.delay)
logger.info('Starting to download {0} ...'.format(url)) logger.info(f'Starting to download {url} ...')
filename = filename if filename else os.path.basename(urlparse(url).path) filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename) base_filename, extension = os.path.splitext(filename)
save_file_path = os.path.join(folder, base_filename.zfill(3) + extension)
try: try:
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)): if os.path.exists(save_file_path):
logger.warning('File: {0} exists, ignoring'.format(os.path.join(folder, base_filename.zfill(3) + logger.warning(f'Ignored exists file: {save_file_path}')
extension)))
return 1, url return 1, url
response = None response = None
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f: with open(save_file_path, "wb") as f:
i = 0 i = 0
while i < 10: while i < 10:
try: try:
@ -77,14 +89,14 @@ class Downloader(Singleton):
except (requests.HTTPError, requests.Timeout) as e: except (requests.HTTPError, requests.Timeout) as e:
if retried < 3: if retried < 3:
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried)) logger.warning(f'Warning: {e}, retrying({retried}) ...')
return 0, self.download_(url=url, folder=folder, filename=filename, return 0, self.download(url=url, folder=folder, filename=filename,
retried=retried+1, proxy=proxy) retried=retried+1, proxy=proxy)
else: else:
return 0, None return 0, None
except NHentaiImageNotExistException as e: except NHentaiImageNotExistException as e:
os.remove(os.path.join(folder, base_filename.zfill(3) + extension)) os.remove(save_file_path)
return -1, url return -1, url
except Exception as e: except Exception as e:
@ -98,23 +110,8 @@ class Downloader(Singleton):
return 1, url return 1, url
def _download_callback(self, result): def start_download(self, queue, folder='', regenerate_cbz=False):
result, data = result if not isinstance(folder, (str, )):
if result == 0:
logger.warning('fatal errors occurred, ignored')
# exit(1)
elif result == -1:
logger.warning('url {} return status code 404'.format(data))
elif result == -2:
logger.warning('Ctrl-C pressed, exiting sub processes ...')
elif result == -3:
# workers wont be run, just pass
pass
else:
logger.log(15, '{0} downloaded successfully'.format(data))
def download(self, queue, folder='', regenerate_cbz=False):
if not isinstance(folder, text):
folder = str(folder) folder = str(folder)
if self.path: if self.path:
@ -122,18 +119,17 @@ class Downloader(Singleton):
if os.path.exists(folder + '.cbz'): if os.path.exists(folder + '.cbz'):
if not regenerate_cbz: if not regenerate_cbz:
logger.warning('CBZ file \'{}.cbz\' exists, ignored download request'.format(folder)) logger.warning(f'CBZ file "{folder}.cbz" exists, ignored download request')
return return
if not os.path.exists(folder): if not os.path.exists(folder):
logger.warning('Path \'{0}\' does not exist, creating.'.format(folder))
try: try:
os.makedirs(folder) os.makedirs(folder)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical('{0}'.format(str(e))) logger.critical(str(e))
else: else:
logger.warning('Path \'{0}\' already exist.'.format(folder)) logger.warning(f'Path "{folder}" already exist.')
queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue] queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue]
@ -146,7 +142,7 @@ class Downloader(Singleton):
def download_wrapper(obj, url, folder='', proxy=None): def download_wrapper(obj, url, folder='', proxy=None):
if sys.platform == 'darwin' or semaphore.get_value(): if sys.platform == 'darwin' or semaphore.get_value():
return Downloader.download_(obj, url=url, folder=folder, proxy=proxy) return Downloader.download(obj, url=url, folder=folder, proxy=proxy)
else: else:
return -3, None return -3, None
@ -155,7 +151,7 @@ def init_worker():
signal.signal(signal.SIGINT, subprocess_signal) signal.signal(signal.SIGINT, subprocess_signal)
def subprocess_signal(signal, frame): def subprocess_signal(sig, frame):
if semaphore.acquire(timeout=1): if semaphore.acquire(timeout=1):
logger.warning('Ctrl-C pressed, exiting sub processes ...') logger.warning('Ctrl-C pressed, exiting sub processes ...')

View File

@ -146,12 +146,10 @@ class ColorizingStreamHandler(logging.StreamHandler):
if params and message: if params and message:
if message.lstrip() != message: if message.lstrip() != message:
prefix = re.search(r"\s+", message).group(0) prefix = re.search(r"\s+", message).group(0)
message = message[len(prefix):]
else: else:
prefix = "" prefix = ""
message = "%s%s" % (prefix, ''.join((self.csi, ';'.join(params), message += prefix + ''.join((self.csi, ';'.join(params), 'm', message, self.reset))
'm', message, self.reset)))
return message return message

View File

@ -26,7 +26,7 @@ def login(username, password):
logger.info('Getting CSRF token ...') logger.info('Getting CSRF token ...')
if os.getenv('DEBUG'): if os.getenv('DEBUG'):
logger.info('CSRF token is {}'.format(csrf_token)) logger.info(f'CSRF token is {csrf_token}')
login_dict = { login_dict = {
'csrfmiddlewaretoken': csrf_token, 'csrfmiddlewaretoken': csrf_token,
@ -56,7 +56,7 @@ def _get_title_and_id(response):
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'}) doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip() title = doujinshi_container.text.strip()
title = title if len(title) < 85 else title[:82] + '...' title = title if len(title) < 85 else title[:82] + '...'
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1) id_ = re.search('/g/([0-9]+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title}) result.append({'id': id_, 'title': title})
return result return result
@ -67,7 +67,7 @@ def favorites_parser(page=None):
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser') html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'}) count = html.find('span', attrs={'class': 'count'})
if not count: if not count:
logger.error("Can't get your number of favorited doujins. Did the login failed?") logger.error("Can't get your number of favorite doujinshis. Did the login failed?")
return [] return []
count = int(count.text.strip('(').strip(')').replace(',', '')) count = int(count.text.strip('(').strip(')').replace(',', ''))
@ -84,7 +84,7 @@ def favorites_parser(page=None):
else: else:
pages = 1 pages = 1
logger.info('You have %d favorites in %d pages.' % (count, pages)) logger.info(f'You have {count} favorites in {pages} pages.')
if os.getenv('DEBUG'): if os.getenv('DEBUG'):
pages = 1 pages = 1
@ -93,40 +93,40 @@ def favorites_parser(page=None):
for page in page_range_list: for page in page_range_list:
try: try:
logger.info('Getting doujinshi ids of page %d' % page) logger.info(f'Getting doujinshi ids of page {page}')
resp = request('get', constant.FAV_URL + '?page=%d' % page).content resp = request('get', f'{constant.FAV_URL}?page={page}').content
result.extend(_get_title_and_id(resp)) result.extend(_get_title_and_id(resp))
except Exception as e: except Exception as e:
logger.error('Error: %s, continue', str(e)) logger.error(f'Error: {e}, continue')
return result return result
def doujinshi_parser(id_): def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()): if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_)) raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_) id_ = int(id_)
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_)) logger.log(15, f'Fetching doujinshi information of id {id_}')
doujinshi = dict() doujinshi = dict()
doujinshi['id'] = id_ doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_) url = f'{constant.DETAIL_URL}/{id_}/'
try: try:
response = request('get', url) response = request('get', url)
if response.status_code in (200, ): if response.status_code in (200, ):
response = response.content response = response.content
elif response.status_code in (404,): elif response.status_code in (404,):
logger.error("Doujinshi with id {0} cannot be found".format(id_)) logger.error(f'Doujinshi with id {id_} cannot be found')
return [] return []
else: else:
logger.debug('Slow down and retry ({}) ...'.format(id_)) logger.debug(f'Slow down and retry ({id_}) ...')
time.sleep(1) time.sleep(1)
return doujinshi_parser(str(id_)) return doujinshi_parser(str(id_))
except Exception as e: except Exception as e:
logger.warning('Error: {}, ignored'.format(str(e))) logger.warning(f'Error: {e}, ignored')
return None return None
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
@ -179,7 +179,7 @@ def doujinshi_parser(id_):
def legacy_search_parser(keyword, sorting, page, is_page_all=False): def legacy_search_parser(keyword, sorting, page, is_page_all=False):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword)) logger.debug(f'Searching doujinshis of keyword {keyword}')
response = None response = None
result = [] result = []
@ -189,13 +189,13 @@ def legacy_search_parser(keyword, sorting, page, is_page_all=False):
page = [1] page = [1]
for p in page: for p in page:
logger.debug('Fetching page {} ...'.format(p)) logger.debug(f'Fetching page {p} ...')
response = request('get', url=constant.LEGACY_SEARCH_URL, response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': p, 'sort': sorting}).content params={'q': keyword, 'page': p, 'sort': sorting}).content
result.extend(_get_title_and_id(response)) result.extend(_get_title_and_id(response))
if not result: if not result:
logger.warning('Not found anything of keyword {} on page {}'.format(keyword, page[0])) logger.warning(f'Not found anything of keyword {keyword} on page {page[0]}')
return result return result
if is_page_all: if is_page_all:
@ -219,12 +219,11 @@ def print_doujinshi(doujinshi_list):
return return
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list] doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
headers = ['id', 'doujinshi'] headers = ['id', 'doujinshi']
logger.info('Search Result || Found %i doujinshis \n' % doujinshi_list.__len__() + logger.info(f'Search Result || Found {doujinshi_list.__len__()} doujinshis \n' +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def search_parser(keyword, sorting, page, is_page_all=False): def search_parser(keyword, sorting, page, is_page_all=False):
# keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')])
result = [] result = []
response = None response = None
if not page: if not page:
@ -235,12 +234,12 @@ def search_parser(keyword, sorting, page, is_page_all=False):
init_response = request('get', url.replace('%2B', '+')).json() init_response = request('get', url.replace('%2B', '+')).json()
page = range(1, init_response['num_pages']+1) page = range(1, init_response['num_pages']+1)
total = '/{0}'.format(page[-1]) if is_page_all else '' total = f'/{page[-1]}' if is_page_all else ''
not_exists_persist = False not_exists_persist = False
for p in page: for p in page:
i = 0 i = 0
logger.info('Searching doujinshis using keywords "{0}" on page {1}{2}'.format(keyword, p, total)) logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}')
while i < 3: while i < 3:
try: try:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword, url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
@ -252,7 +251,7 @@ def search_parser(keyword, sorting, page, is_page_all=False):
break break
if response is None or 'result' not in response: if response is None or 'result' not in response:
logger.warning('No result in response in page {}'.format(p)) logger.warning(f'No result in response in page {p}')
if not_exists_persist is True: if not_exists_persist is True:
break break
continue continue
@ -264,20 +263,20 @@ def search_parser(keyword, sorting, page, is_page_all=False):
not_exists_persist = False not_exists_persist = False
if not result: if not result:
logger.warning('No results for keywords {}'.format(keyword)) logger.warning(f'No results for keywords {keyword}')
return result return result
def __api_suspended_doujinshi_parser(id_): def __api_suspended_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()): if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_)) raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_) id_ = int(id_)
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_)) logger.log(15, f'Fetching information of doujinshi id {id_}')
doujinshi = dict() doujinshi = dict()
doujinshi['id'] = id_ doujinshi['id'] = id_
url = '{0}/{1}'.format(constant.DETAIL_URL, id_) url = f'{constant.DETAIL_URL}/{id_}'
i = 0 i = 0
while 5 > i: while 5 > i:
try: try:

View File

@ -5,7 +5,7 @@ from xml.sax.saxutils import escape
from nhentai.constant import LANGUAGEISO from nhentai.constant import LANGUAGEISO
def serialize_json(doujinshi, dir): def serialize_json(doujinshi, output_dir):
metadata = {'title': doujinshi.name, metadata = {'title': doujinshi.name,
'subtitle': doujinshi.info.subtitle} 'subtitle': doujinshi.info.subtitle}
if doujinshi.info.date: if doujinshi.info.date:
@ -26,13 +26,13 @@ def serialize_json(doujinshi, dir):
metadata['URL'] = doujinshi.url metadata['URL'] = doujinshi.url
metadata['Pages'] = doujinshi.pages metadata['Pages'] = doujinshi.pages
with open(os.path.join(dir, 'metadata.json'), 'w') as f: with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f, separators=(',', ':')) json.dump(metadata, f, separators=(',', ':'))
def serialize_comic_xml(doujinshi, dir): def serialize_comic_xml(doujinshi, output_dir):
from iso8601 import parse_date from iso8601 import parse_date
with open(os.path.join(dir, 'ComicInfo.xml'), 'w', encoding="utf-8") as f: with open(os.path.join(output_dir, 'ComicInfo.xml'), 'w', encoding="utf-8") as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n') f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" ' f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n') 'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
@ -74,7 +74,7 @@ def serialize_comic_xml(doujinshi, dir):
def xml_write_simple_tag(f, name, val, indent=1): def xml_write_simple_tag(f, name, val, indent=1):
f.write('{}<{}>{}</{}>\n'.format(' ' * indent, name, escape(str(val)), name)) f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n')
def merge_json(): def merge_json():

View File

@ -86,7 +86,7 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
try: try:
os.makedirs(doujinshi_dir) os.makedirs(doujinshi_dir)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical('{0}'.format(str(e))) logger.critical(e)
file_list = os.listdir(doujinshi_dir) file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
@ -96,15 +96,13 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
continue continue
image_html += f'<img src="{image}" class="image-item"/>\n' image_html += f'<img src="{image}" class="image-item"/>\n'
html = readfile('viewer/{}/index.html'.format(template)) html = readfile(f'viewer/{template}/index.html')
css = readfile('viewer/{}/styles.css'.format(template)) css = readfile(f'viewer/{template}/styles.css')
js = readfile('viewer/{}/scripts.js'.format(template)) js = readfile(f'viewer/{template}/scripts.js')
if doujinshi_obj is not None: if doujinshi_obj is not None:
serialize_json(doujinshi_obj, doujinshi_dir) serialize_json(doujinshi_obj, doujinshi_dir)
name = doujinshi_obj.name name = doujinshi_obj.name
if sys.version_info < (3, 0):
name = doujinshi_obj.name.encode('utf-8')
else: else:
name = {'title': 'nHentai HTML Viewer'} name = {'title': 'nHentai HTML Viewer'}
@ -187,7 +185,7 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if write_comic_info: if write_comic_info:
serialize_comic_xml(doujinshi_obj, doujinshi_dir) serialize_comic_xml(doujinshi_obj, doujinshi_dir)
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), '{}.cbz'.format(doujinshi_obj.filename)) cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), f'{doujinshi_obj.filename}.cbz')
else: else:
cbz_filename = './doujinshi.cbz' cbz_filename = './doujinshi.cbz'
doujinshi_dir = '.' doujinshi_dir = '.'
@ -195,7 +193,7 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
file_list = os.listdir(doujinshi_dir) file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
logger.info('Writing CBZ file to path: {}'.format(cbz_filename)) logger.info(f'Writing CBZ file to path: {cbz_filename}')
with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf: with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
for image in file_list: for image in file_list:
image_path = os.path.join(doujinshi_dir, image) image_path = os.path.join(doujinshi_dir, image)

View File

@ -4,3 +4,6 @@ BeautifulSoup4>=4.0.0
tabulate>=0.7.5 tabulate>=0.7.5
future>=0.15.2 future>=0.15.2
iso8601 >= 0.1 iso8601 >= 0.1
urllib3~=1.26.9
setuptools~=60.2.0