fix #18 #19 use nhentai api

This commit is contained in:
Ricter Z 2018-04-19 17:21:43 +08:00
parent 22cf2592dd
commit 967e0b4ff5
4 changed files with 41 additions and 50 deletions

View File

@ -5,8 +5,8 @@ from nhentai.utils import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net') BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = '%s/g' % BASE_URL DETAIL_URL = '%s/api/gallery' % BASE_URL
SEARCH_URL = '%s/search/' % BASE_URL SEARCH_URL = '%s/api/galleries/search' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL LOGIN_URL = '%s/login/' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL FAV_URL = '%s/favorites/' % BASE_URL

View File

@ -8,6 +8,12 @@ from nhentai.logger import logger
from nhentai.utils import format_filename from nhentai.utils import format_filename
EXT_MAP = {
'j': 'jpg',
'p': 'png',
}
class DoujinshiInfo(dict): class DoujinshiInfo(dict):
def __init__(self, **kwargs): def __init__(self, **kwargs):
super(DoujinshiInfo, self).__init__(**kwargs) super(DoujinshiInfo, self).__init__(**kwargs)
@ -20,7 +26,7 @@ class DoujinshiInfo(dict):
class Doujinshi(object): class Doujinshi(object):
def __init__(self, name=None, id=None, img_id=None, ext='jpg', pages=0, **kwargs): def __init__(self, name=None, id=None, img_id=None, ext='', pages=0, **kwargs):
self.name = name self.name = name
self.id = id self.id = id
self.img_id = img_id self.img_id = img_id
@ -50,8 +56,9 @@ class Doujinshi(object):
logger.info('Start download doujinshi: %s' % self.name) logger.info('Start download doujinshi: %s' % self.name)
if self.downloader: if self.downloader:
download_queue = [] download_queue = []
for i in range(1, self.pages + 1): for i in range(len(self.ext)):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext)) download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200]))) self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200])))
else: else:
logger.critical('Downloader has not be loaded') logger.critical('Downloader has not be loaded')

View File

@ -36,6 +36,11 @@ class Downloader(Singleton):
filename = filename if filename else os.path.basename(urlparse(url).path) filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename) base_filename, extension = os.path.splitext(filename)
try: try:
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)):
logger.warning('File: {0} existed, ignore.'.format(os.path.join(folder, base_filename.zfill(3) +
extension)))
return 1, url
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f: with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
response = request('get', url, stream=True, timeout=self.timeout) response = request('get', url, stream=True, timeout=self.timeout)
if response.status_code != 200: if response.status_code != 200:
@ -75,7 +80,7 @@ class Downloader(Singleton):
logger.log(15, '{0} download successfully'.format(data)) logger.log(15, '{0} download successfully'.format(data))
def download(self, queue, folder=''): def download(self, queue, folder=''):
if not isinstance(folder, (text)): if not isinstance(folder, text):
folder = str(folder) folder = str(folder)
if self.path: if self.path:

View File

@ -87,49 +87,29 @@ def doujinshi_parser(id_):
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_)) logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict() doujinshi = dict()
doujinshi['id'] = id_ doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_) url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
try: try:
response = request('get', url).content response = request('get', url).json()
except Exception as e: except Exception as e:
logger.critical(str(e)) logger.critical(str(e))
exit(1) exit(1)
html = BeautifulSoup(response, 'html.parser') doujinshi['name'] = response['title']['english']
doujinshi_info = html.find('div', attrs={'id': 'info'}) doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
title = doujinshi_info.find('h1').text doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
subtitle = doujinshi_info.find('h2') doujinshi['pages'] = len(response['images']['pages'])
doujinshi['name'] = title
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
if not img_id:
logger.critical('Tried yo get image id failed')
exit(1)
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = img_id.group(2)
pages = 0
for _ in doujinshi_info.find_all('div', class_=''):
pages = re.search('([\d]+) pages', _.text)
if pages:
pages = pages.group(1)
break
doujinshi['pages'] = int(pages)
# gain information of the doujinshi # gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'}) needed_fields = ['character', 'artist', 'language']
needed_fields = ['Characters', 'Artists', 'Language', 'Tags'] for tag in response['tags']:
for field in information_fields: tag_type = tag['type']
field_name = field.contents[0].strip().strip(':') if tag_type in needed_fields:
if field_name in needed_fields: if tag_type not in doujinshi:
data = [sub_field.contents[0].strip() for sub_field in doujinshi[tag_type] = tag['name']
field.find_all('a', attrs={'class': 'tag'})] else:
doujinshi[field_name.lower()] = ', '.join(data) doujinshi[tag_type] += tag['name']
return doujinshi return doujinshi
@ -138,20 +118,19 @@ def search_parser(keyword, page):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword)) logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
result = [] result = []
try: try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
if 'result' not in response:
raise Exception('No result in response')
except requests.ConnectionError as e: except requests.ConnectionError as e:
logger.critical(e) logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.') logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
exit(1) exit(1)
html = BeautifulSoup(response, 'html.parser') for row in response['result']:
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'}) title = row['title']['english']
for doujinshi in doujinshi_search_result: title = title[:85] + '..' if len(title) > 85 else title
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'}) result.append({'id': row['id'], 'title': title})
title = doujinshi_container.text.strip()
title = (title[:85] + '..') if len(title) > 85 else title
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
if not result: if not result:
logger.warn('Not found anything of keyword {}'.format(keyword)) logger.warn('Not found anything of keyword {}'.format(keyword))