fix tag download issue #40

This commit is contained in:
RicterZ 2019-01-15 21:09:24 +08:00
parent 0c8264bcc6
commit b2d622f11a
2 changed files with 25 additions and 59 deletions

View File

@ -5,7 +5,7 @@ import signal
import platform import platform
from nhentai.cmdline import cmd_parser, banner from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_guessing, tag_parser from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader from nhentai.downloader import Downloader
from nhentai.logger import logger from nhentai.logger import logger
@ -28,12 +28,10 @@ def main():
doujinshi_list.append(Doujinshi(**doujinshi_info)) doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.tag: if options.tag:
tag_id = tag_guessing(options.tag) doujinshis = tag_parser(options.tag, max_page=options.max_page)
if tag_id: print_doujinshi(doujinshis)
doujinshis = tag_parser(tag_id, max_page=options.max_page) if options.is_download:
print_doujinshi(doujinshis) doujinshi_ids = map(lambda d: d['id'], doujinshis)
if options.is_download:
doujinshi_ids = map(lambda d: d['id'], doujinshis)
if options.keyword: if options.keyword:
doujinshis = search_parser(options.keyword, options.page) doujinshis = search_parser(options.keyword, options.page)

View File

@ -158,7 +158,7 @@ def search_parser(keyword, page):
for doujinshi in doujinshi_search_result: for doujinshi in doujinshi_search_result:
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'}) doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip() title = doujinshi_container.text.strip()
title = (title[:85] + '..') if len(title) > 85 else title title = title if len(title) < 85 else title[:82] + '...'
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1) id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title}) result.append({'id': id_, 'title': title})
if not result: if not result:
@ -253,38 +253,18 @@ def print_doujinshi(doujinshi_list):
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def tag_parser(tag_id, max_page=1): def __api_suspended_tag_parser(tag_id, max_page=1):
logger.info('Searching for doujinshi with tag id {0}'.format(tag_id)) logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
result = [] result = []
i = 0 response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
while i < 5:
try:
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
exit(1)
continue
break
page = max_page if max_page <= response['num_pages'] else int(response['num_pages']) page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
for i in range(1, page + 1): for i in range(1, page + 1):
logger.info('Getting page {} ...'.format(i)) logger.info('Getting page {} ...'.format(i))
if page != 1: if page != 1:
i = 0 response = request('get', url=constant.TAG_API_URL,
while i < 5: params={'sort': 'popular', 'tag_id': tag_id}).json()
try:
response = request('get', url=constant.TAG_API_URL,
params={'sort': 'popular', 'tag_id': tag_id}).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
exit(1)
continue
break
for row in response['result']: for row in response['result']:
title = row['title']['english'] title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title title = title[:85] + '..' if len(title) > 85 else title
@ -296,42 +276,30 @@ def tag_parser(tag_id, max_page=1):
return result return result
def tag_guessing(tag_name): def tag_parser(tag_name, max_page=1):
result = []
tag_name = tag_name.lower() tag_name = tag_name.lower()
tag_name = tag_name.replace(' ', '-') tag_name = tag_name.replace(' ', '-')
logger.info('Trying to get tag_id of tag \'{0}\''.format(tag_name))
i = 0 logger.info('Searching for doujinshi with tag \'{0}\''.format(tag_name))
while i < 5: response = request('get', url='%s/%s' % (constant.TAG_URL, tag_name)).content
try:
response = request('get', url='%s/%s' % (constant.TAG_URL, tag_name)).content
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
exit(1)
continue
break
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
first_item = html.find('div', attrs={'class': 'gallery'}) doujinshi_items = html.find_all('div', attrs={'class': 'gallery'})
if not first_item: if not doujinshi_items:
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name)) logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return return
doujinshi_id = re.findall('(\d+)', first_item.a.attrs['href']) for i in doujinshi_items[:2]:
if not doujinshi_id: doujinshi_id = i.a.attrs['href'].strip('/g')
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name)) doujinshi_title = i.a.text.strip()
return doujinshi_title = doujinshi_title if len(doujinshi_title) < 85 else doujinshi_title[:82] + '...'
result.append({'title': doujinshi_title, 'id': doujinshi_id})
ret = doujinshi_parser(doujinshi_id[0]) if not result:
if 'tag' in ret and tag_name in ret['tag']: logger.warn('No results for tag \'{}\''.format(tag_name))
tag_id = ret['tag'][tag_name]
logger.info('Tag id of tag \'{0}\' is {1}'.format(tag_name, tag_id))
else:
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return
return tag_id return result
if __name__ == '__main__': if __name__ == '__main__':