mirror of
https://github.com/RicterZ/nhentai.git
synced 2025-07-01 16:09:28 +02:00
Compare commits
15 Commits
Author | SHA1 | Date | |
---|---|---|---|
35c55503fa | |||
29aac84d53 | |||
4ed4523782 | |||
4223326c13 | |||
a248ff98c4 | |||
021f17d229 | |||
4162eabe93 | |||
dc54a43610 | |||
4ecffaff55 | |||
457f12d40d | |||
499081a9cd | |||
53aa04af1e | |||
473f948565 | |||
f701485840 | |||
d8e4f50609 |
@ -1,3 +1,3 @@
|
|||||||
__version__ = '0.5.4'
|
__version__ = '0.5.6'
|
||||||
__author__ = 'RicterZ'
|
__author__ = 'RicterZ'
|
||||||
__email__ = 'ricterzheng@gmail.com'
|
__email__ = 'ricterzheng@gmail.com'
|
||||||
|
@ -73,6 +73,8 @@ def cmd_parser():
|
|||||||
help='search doujinshi by keyword')
|
help='search doujinshi by keyword')
|
||||||
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
|
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
|
||||||
help='list or download your favorites')
|
help='list or download your favorites')
|
||||||
|
parser.add_option('--artist', '-a', action='store', dest='artist',
|
||||||
|
help='list doujinshi by artist name')
|
||||||
|
|
||||||
# page options
|
# page options
|
||||||
parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
|
parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
|
||||||
@ -211,12 +213,12 @@ def cmd_parser():
|
|||||||
_ = [i.strip() for i in f.readlines()]
|
_ = [i.strip() for i in f.readlines()]
|
||||||
args.id = set(int(i) for i in _ if i.isdigit())
|
args.id = set(int(i) for i in _ if i.isdigit())
|
||||||
|
|
||||||
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites:
|
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites and not args.artist:
|
||||||
logger.critical('Doujinshi id(s) are required for downloading')
|
logger.critical('Doujinshi id(s) are required for downloading')
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
if not args.keyword and not args.id and not args.favorites:
|
if not args.keyword and not args.id and not args.favorites and not args.artist:
|
||||||
parser.print_help()
|
parser.print_help()
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|
||||||
|
@ -57,6 +57,10 @@ def main():
|
|||||||
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
|
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
|
||||||
is_page_all=options.page_all)
|
is_page_all=options.page_all)
|
||||||
|
|
||||||
|
elif options.artist:
|
||||||
|
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
|
||||||
|
is_page_all=options.page_all, type_='ARTIST')
|
||||||
|
|
||||||
elif not doujinshi_ids:
|
elif not doujinshi_ids:
|
||||||
doujinshi_ids = options.id
|
doujinshi_ids = options.id
|
||||||
|
|
||||||
|
@ -3,6 +3,23 @@ import os
|
|||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from urllib.parse import urlparse
|
from urllib.parse import urlparse
|
||||||
|
from platform import system
|
||||||
|
|
||||||
|
|
||||||
|
def get_nhentai_home() -> str:
|
||||||
|
home = os.getenv('HOME', tempfile.gettempdir())
|
||||||
|
|
||||||
|
if system() == 'Linux':
|
||||||
|
xdgdat = os.getenv('XDG_DATA_HOME')
|
||||||
|
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
|
||||||
|
return os.path.join(xdgdat, 'nhentai')
|
||||||
|
if home and os.path.exists(os.path.join(home, '.nhentai')):
|
||||||
|
return os.path.join(home, '.nhentai')
|
||||||
|
if xdgdat:
|
||||||
|
return os.path.join(xdgdat, 'nhentai')
|
||||||
|
|
||||||
|
# Use old default path in other systems
|
||||||
|
return os.path.join(home, '.nhentai')
|
||||||
|
|
||||||
|
|
||||||
DEBUG = os.getenv('DEBUG', False)
|
DEBUG = os.getenv('DEBUG', False)
|
||||||
@ -11,15 +28,22 @@ BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
|
|||||||
DETAIL_URL = f'{BASE_URL}/g'
|
DETAIL_URL = f'{BASE_URL}/g'
|
||||||
LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
|
LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
|
||||||
SEARCH_URL = f'{BASE_URL}/api/galleries/search'
|
SEARCH_URL = f'{BASE_URL}/api/galleries/search'
|
||||||
|
ARTIST_URL = f'{BASE_URL}/artist/'
|
||||||
|
|
||||||
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
|
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
|
||||||
LOGIN_URL = f'{BASE_URL}/login/'
|
LOGIN_URL = f'{BASE_URL}/login/'
|
||||||
CHALLENGE_URL = f'{BASE_URL}/challenge'
|
CHALLENGE_URL = f'{BASE_URL}/challenge'
|
||||||
FAV_URL = f'{BASE_URL}/favorites/'
|
FAV_URL = f'{BASE_URL}/favorites/'
|
||||||
|
|
||||||
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
|
|
||||||
|
|
||||||
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
|
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
|
||||||
|
IMAGE_URL_MIRRORS = [
|
||||||
|
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}'
|
||||||
|
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}'
|
||||||
|
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}'
|
||||||
|
]
|
||||||
|
|
||||||
|
NHENTAI_HOME = get_nhentai_home()
|
||||||
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
|
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
|
||||||
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
|
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
|
||||||
|
|
||||||
@ -30,7 +54,8 @@ CONFIG = {
|
|||||||
'cookie': '',
|
'cookie': '',
|
||||||
'language': '',
|
'language': '',
|
||||||
'template': '',
|
'template': '',
|
||||||
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)'
|
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
|
||||||
|
'max_filename': 85
|
||||||
}
|
}
|
||||||
|
|
||||||
LANGUAGE_ISO = {
|
LANGUAGE_ISO = {
|
||||||
|
@ -67,10 +67,14 @@ class Downloader(Singleton):
|
|||||||
try:
|
try:
|
||||||
response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
|
response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
|
||||||
if response.status_code != 200:
|
if response.status_code != 200:
|
||||||
raise NHentaiImageNotExistException
|
path = urlparse(url).path
|
||||||
|
for mirror in constant.IMAGE_URL_MIRRORS:
|
||||||
except NHentaiImageNotExistException as e:
|
print(f'{mirror}{path}')
|
||||||
raise e
|
mirror_url = f'{mirror}{path}'
|
||||||
|
response = request('get', mirror_url, stream=True,
|
||||||
|
timeout=self.timeout, proxies=proxy)
|
||||||
|
if response.status_code == 200:
|
||||||
|
break
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
i += 1
|
i += 1
|
||||||
|
@ -135,6 +135,7 @@ def doujinshi_parser(id_, counter=0):
|
|||||||
logger.warning(f'Error: {e}, ignored')
|
logger.warning(f'Error: {e}, ignored')
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
# print(response)
|
||||||
html = BeautifulSoup(response, 'html.parser')
|
html = BeautifulSoup(response, 'html.parser')
|
||||||
doujinshi_info = html.find('div', attrs={'id': 'info'})
|
doujinshi_info = html.find('div', attrs={'id': 'info'})
|
||||||
|
|
||||||
@ -240,13 +241,21 @@ def print_doujinshi(doujinshi_list):
|
|||||||
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
||||||
|
|
||||||
|
|
||||||
def legacy_search_parser(keyword, sorting, page, is_page_all=False):
|
def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'):
|
||||||
logger.info(f'Searching doujinshis of keyword {keyword}')
|
logger.info(f'Searching doujinshis of keyword {keyword}')
|
||||||
result = []
|
result = []
|
||||||
|
|
||||||
|
if type_ not in ('SEARCH', 'ARTIST', ):
|
||||||
|
raise ValueError('Invalid type')
|
||||||
|
|
||||||
if is_page_all:
|
if is_page_all:
|
||||||
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
if type_ == 'SEARCH':
|
||||||
params={'q': keyword, 'page': 1, 'sort': sorting}).content
|
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
||||||
|
params={'q': keyword, 'page': 1, 'sort': sorting}).content
|
||||||
|
else:
|
||||||
|
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
|
||||||
|
response = request('get', url=url, params={'page': 1}).content
|
||||||
|
|
||||||
html = BeautifulSoup(response, 'lxml')
|
html = BeautifulSoup(response, 'lxml')
|
||||||
pagination = html.find(attrs={'class': 'pagination'})
|
pagination = html.find(attrs={'class': 'pagination'})
|
||||||
last_page = pagination.find(attrs={'class': 'last'})
|
last_page = pagination.find(attrs={'class': 'last'})
|
||||||
@ -258,8 +267,13 @@ def legacy_search_parser(keyword, sorting, page, is_page_all=False):
|
|||||||
|
|
||||||
for p in pages:
|
for p in pages:
|
||||||
logger.info(f'Fetching page {p} ...')
|
logger.info(f'Fetching page {p} ...')
|
||||||
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
if type_ == 'SEARCH':
|
||||||
params={'q': keyword, 'page': p, 'sort': sorting}).content
|
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
||||||
|
params={'q': keyword, 'page': p, 'sort': sorting}).content
|
||||||
|
else:
|
||||||
|
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
|
||||||
|
response = request('get', url=url, params={'page': p}).content
|
||||||
|
|
||||||
if response is None:
|
if response is None:
|
||||||
logger.warning(f'No result in response in page {p}')
|
logger.warning(f'No result in response in page {p}')
|
||||||
continue
|
continue
|
||||||
@ -313,7 +327,9 @@ def search_parser(keyword, sorting, page, is_page_all=False):
|
|||||||
|
|
||||||
for row in response['result']:
|
for row in response['result']:
|
||||||
title = row['title']['english']
|
title = row['title']['english']
|
||||||
title = title[:85] + '..' if len(title) > 85 else title
|
title = title[:constant.CONFIG['max_filename']] + '..' if \
|
||||||
|
len(title) > constant.CONFIG['max_filename'] else title
|
||||||
|
|
||||||
result.append({'id': row['id'], 'title': title})
|
result.append({'id': row['id'], 'title': title})
|
||||||
|
|
||||||
not_exists_persist = False
|
not_exists_persist = False
|
||||||
|
@ -166,6 +166,9 @@ def generate_main_html(output_dir='./'):
|
|||||||
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True, move_to_folder=False):
|
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True, move_to_folder=False):
|
||||||
if doujinshi_obj is not None:
|
if doujinshi_obj is not None:
|
||||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||||
|
if os.path.exists(doujinshi_dir+".cbz"):
|
||||||
|
logger.warning(f'Comic Book CBZ file exists, skip "{doujinshi_dir}"')
|
||||||
|
return
|
||||||
if write_comic_info:
|
if write_comic_info:
|
||||||
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
|
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
|
||||||
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), f'{doujinshi_obj.filename}.cbz')
|
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), f'{doujinshi_obj.filename}.cbz')
|
||||||
|
@ -139,7 +139,7 @@ function filter_searcher(){
|
|||||||
break
|
break
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (verifier){doujinshi_id.push(data[i].Folder);}
|
if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));}
|
||||||
}
|
}
|
||||||
var gallery = document.getElementsByClassName("gallery-favorite");
|
var gallery = document.getElementsByClassName("gallery-favorite");
|
||||||
for (var i = 0; i < gallery.length; i++){
|
for (var i = 0; i < gallery.length; i++){
|
||||||
@ -174,4 +174,4 @@ function tag_maker(data){
|
|||||||
document.getElementById("tags").appendChild(node);
|
document.getElementById("tags").appendChild(node);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
Reference in New Issue
Block a user