Merge pull request #79 from Waiifu/added-sorting

sorting option
This commit is contained in:
Ricter Zheng 2019-07-30 22:53:40 +08:00 committed by GitHub
commit 7eeed17ea5
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 17 additions and 12 deletions

View File

@ -1,3 +1,3 @@
__version__ = '0.3.6' __version__ = '0.3.7'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -58,6 +58,8 @@ def cmd_parser():
help='page number of search results') help='page number of search results')
parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1, parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1,
help='The max page when recursive download tagged doujinshi') help='The max page when recursive download tagged doujinshi')
parser.add_option('--sorting', type='string', dest='sorting', action='store', default='date',
help='sorting of doujinshi, e.g. date/popular')
# download options # download options
parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='', parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='',

View File

@ -40,13 +40,13 @@ def main():
doujinshi_ids = map(lambda d: d['id'], doujinshis) doujinshi_ids = map(lambda d: d['id'], doujinshis)
elif options.tag: elif options.tag:
doujinshis = tag_parser(options.tag, max_page=options.max_page) doujinshis = tag_parser(options.tag, options.sorting, max_page=options.max_page)
print_doujinshi(doujinshis) print_doujinshi(doujinshis)
if options.is_download and doujinshis: if options.is_download and doujinshis:
doujinshi_ids = map(lambda d: d['id'], doujinshis) doujinshi_ids = map(lambda d: d['id'], doujinshis)
elif options.keyword: elif options.keyword:
doujinshis = search_parser(options.keyword, options.page) doujinshis = search_parser(options.keyword, options.sorting, options.page)
print_doujinshi(doujinshis) print_doujinshi(doujinshis)
if options.is_download: if options.is_download:
doujinshi_ids = map(lambda d: d['id'], doujinshis) doujinshi_ids = map(lambda d: d['id'], doujinshis)

View File

@ -169,10 +169,10 @@ def doujinshi_parser(id_):
return doujinshi return doujinshi
def search_parser(keyword, page): def search_parser(keyword, sorting, page):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword)) logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
try: try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page, 'sort': sorting}).content
except requests.ConnectionError as e: except requests.ConnectionError as e:
logger.critical(e) logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.') logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
@ -194,14 +194,17 @@ def print_doujinshi(doujinshi_list):
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def tag_parser(tag_name, max_page=1): def tag_parser(tag_name, sorting, max_page=1):
result = [] result = []
tag_name = tag_name.lower() tag_name = tag_name.lower()
tag_name = tag_name.replace(' ', '-') tag_name = tag_name.replace(' ', '-')
if sorting == 'date':
sorting = ''
for p in range(1, max_page + 1): for p in range(1, max_page + 1):
logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name)) logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
response = request('get', url='%s/%s/?page=%d' % (constant.TAG_URL, tag_name, p)).content response = request('get', url='%s/%s/%s?page=%d' % (constant.TAG_URL, tag_name, sorting, p)).content
result += _get_title_and_id(response) result += _get_title_and_id(response)
if not result: if not result:
@ -214,13 +217,13 @@ def tag_parser(tag_name, max_page=1):
return result return result
def __api_suspended_search_parser(keyword, page): def __api_suspended_search_parser(keyword, sorting, page):
logger.debug('Searching doujinshis using keywords {0}'.format(keyword)) logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
result = [] result = []
i = 0 i = 0
while i < 5: while i < 5:
try: try:
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json() response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page, 'sort': sorting}).json()
except Exception as e: except Exception as e:
i += 1 i += 1
if not i < 5: if not i < 5:
@ -244,10 +247,10 @@ def __api_suspended_search_parser(keyword, page):
return result return result
def __api_suspended_tag_parser(tag_id, max_page=1): def __api_suspended_tag_parser(tag_id, sorting, max_page=1):
logger.info('Searching for doujinshi with tag id {0}'.format(tag_id)) logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
result = [] result = []
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json() response = request('get', url=constant.TAG_API_URL, params={'sort': sorting, 'tag_id': tag_id}).json()
page = max_page if max_page <= response['num_pages'] else int(response['num_pages']) page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
for i in range(1, page + 1): for i in range(1, page + 1):
@ -255,7 +258,7 @@ def __api_suspended_tag_parser(tag_id, max_page=1):
if page != 1: if page != 1:
response = request('get', url=constant.TAG_API_URL, response = request('get', url=constant.TAG_API_URL,
params={'sort': 'popular', 'tag_id': tag_id}).json() params={'sort': sorting, 'tag_id': tag_id}).json()
for row in response['result']: for row in response['result']:
title = row['title']['english'] title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title title = title[:85] + '..' if len(title) > 85 else title