Compare commits

..

29 Commits

Author SHA1 Message Date
0660cb0fed update user-agent 2019-04-11 22:48:18 +08:00
680b004c24 update README 2019-04-11 22:47:49 +08:00
6709af2a20 0.3.1 - add login session 2019-04-11 22:44:26 +08:00
a3fead2852 pep-8 2019-04-11 22:43:42 +08:00
0728dd8c6d use text rather than content 2019-04-11 22:41:37 +08:00
9160b38c3f bypass the challenge 2019-04-11 22:39:20 +08:00
f74be0c665 add new tests 2019-04-11 22:10:16 +08:00
c30f562a83 Merge pull request #48 from onlymyflower/master
download ids from file
2019-04-11 22:09:30 +08:00
37547cc97f global login session #49 #46 2019-04-11 22:08:19 +08:00
f6fb90aab5 download ids from file 2019-03-06 16:46:47 +08:00
50be89db44 fix extension issue #44 2019-01-27 10:06:12 +08:00
fc0be35b2c 0.3.0 #40 2019-01-15 21:16:14 +08:00
5c3dace937 tag page download #40 2019-01-15 21:12:20 +08:00
b2d622f11a fix tag download issue #40 2019-01-15 21:09:24 +08:00
0c8264bcc6 fix download issues 2019-01-15 20:43:00 +08:00
a6074242fb nhentai suspended api #40 2019-01-15 20:29:10 +08:00
eb6df28fba 0.2.19 2018-12-30 14:13:27 +08:00
1091ea3e0a remove debug 2018-12-30 14:12:38 +08:00
0df51c83e5 change output filename 2018-12-30 14:06:15 +08:00
c5fa98ebd1 Update .travis.yml 2018-11-04 21:44:59 +08:00
3154a94c3d 0.2.18 2018-10-24 22:21:29 +08:00
c47018251f fix #27 2018-10-24 22:20:33 +08:00
74d0499092 add test 2018-10-24 22:07:43 +08:00
7e56d9b901 fix #33 2018-10-24 22:06:49 +08:00
8cbb334d36 fix #31 2018-10-24 21:56:21 +08:00
db6d45efe0 fix bug #34 2018-10-19 10:55:21 +08:00
d412794bce Merge pull request #32 from violetdarkness/patch-1
requirement.txt missing new line
2018-10-08 23:36:38 +08:00
8eedbf077b requirement.txt missing new line
I got error when installing and find this requirement.txt missing newline
2018-10-08 21:13:52 +07:00
c95ecdded4 remove gdb 2018-10-01 15:04:32 +08:00
12 changed files with 254 additions and 130 deletions

View File

@ -12,7 +12,10 @@ install:
- python setup.py install - python setup.py install
script: script:
- echo 268642 > /tmp/test.txt
- NHENTAI=https://nhentai.net nhentai --search umaru - NHENTAI=https://nhentai.net nhentai --search umaru
- NHENTAI=https://nhentai.net nhentai --id=152503,146134 -t 10 --output=/tmp/ - NHENTAI=https://nhentai.net nhentai --id=152503,146134 -t 10 --output=/tmp/
- NHENTAI=https://nhentai.net nhentai -l nhentai_test:nhentai --output=/tmp/ - NHENTAI=https://nhentai.net nhentai -l nhentai_test:nhentai --download --output=/tmp/
- NHENTAI=https://nhentai.net nhentai --tag lolicon - NHENTAI=https://nhentai.net nhentai --tag lolicon
- NHENTAI=https://nhentai.net nhentai --id 92066 --output=/tmp/ --cbz
- NHENTAI=https://nhentai.net nhentai --file /tmp/test.txt

View File

@ -18,17 +18,24 @@ nHentai is a CLI tool for downloading doujinshi from [nhentai.net](http://nhenta
cd nhentai cd nhentai
python setup.py install python setup.py install
### Gentoo ### Installation (Gentoo)
layman -fa glicOne layman -fa glicOne
sudo emerge net-misc/nhentai sudo emerge net-misc/nhentai
### Usage ### Usage
**IMPORTANT**: To bypass the nhentai frequency limit, you should use `--login` option to log into nhentai.net.
Download specified doujinshi: Download specified doujinshi:
```bash ```bash
nhentai --id=123855,123866 nhentai --id=123855,123866
``` ```
Download doujinshi with ids specified in a file:
```bash
nhentai --file=doujinshi.txt
```
Search a keyword and download the first page: Search a keyword and download the first page:
```bash ```bash
nhentai --search="tomori" --page=1 --download nhentai --search="tomori" --page=1 --download

5
doujinshi.txt Normal file
View File

@ -0,0 +1,5 @@
184212
204944
222460
244502
261909

View File

@ -1,3 +1,3 @@
__version__ = '0.2.16' __version__ = '0.3.1'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -37,6 +37,7 @@ def banner():
def cmd_parser(): def cmd_parser():
parser = OptionParser('\n nhentai --search [keyword] --download' parser = OptionParser('\n nhentai --search [keyword] --download'
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]' '\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
'\n nhentai --file [filename]'
'\n\nEnvironment Variable:\n' '\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url') ' NHENTAI nhentai mirror url')
parser.add_option('--download', dest='is_download', action='store_true', parser.add_option('--download', dest='is_download', action='store_true',
@ -67,8 +68,11 @@ def cmd_parser():
help='Don\'t generate HTML') help='Don\'t generate HTML')
parser.add_option('--cbz', dest='is_cbz', action='store_true', parser.add_option('--cbz', dest='is_cbz', action='store_true',
help='Generate Comic Book CBZ File') help='Generate Comic Book CBZ File')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='Remove downloaded doujinshi dir when generated CBZ file.')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='Read gallery IDs from file.')
try: try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv)) sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
except (NameError, TypeError): except (NameError, TypeError):
@ -96,6 +100,11 @@ def cmd_parser():
_ = map(lambda id: id.strip(), args.id.split(',')) _ = map(lambda id: id.strip(), args.id.split(','))
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _))) args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if args.file:
with open(args.file, 'r') as f:
_ = map(lambda id: id.strip(), f.readlines())
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword and \ if (args.is_download or args.is_show) and not args.id and not args.keyword and \
not args.login and not args.tag: not args.login and not args.tag:
logger.critical('Doujinshi id(s) are required for downloading') logger.critical('Doujinshi id(s) are required for downloading')

View File

@ -5,7 +5,7 @@ import signal
import platform import platform
from nhentai.cmdline import cmd_parser, banner from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_guessing, tag_parser from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_parser, login
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader from nhentai.downloader import Downloader
from nhentai.logger import logger from nhentai.logger import logger
@ -21,21 +21,20 @@ def main():
doujinshi_ids = [] doujinshi_ids = []
doujinshi_list = [] doujinshi_list = []
import pdb; pdb.set_trace()
if options.login: if options.login:
username, password = options.login.split(':', 1) username, password = options.login.split(':', 1)
logger.info('Logging in to nhentai using credential pair \'%s:%s\'' % (username, '*' * len(password))) logger.info('Logging in to nhentai using credential pair \'%s:%s\'' % (username, '*' * len(password)))
for doujinshi_info in login_parser(username=username, password=password): login(username, password)
doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.is_download:
for doujinshi_info in login_parser():
doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.tag: if options.tag:
tag_id = tag_guessing(options.tag) doujinshis = tag_parser(options.tag, max_page=options.max_page)
if tag_id: print_doujinshi(doujinshis)
doujinshis = tag_parser(tag_id, max_page=options.max_page) if options.is_download:
print_doujinshi(doujinshis) doujinshi_ids = map(lambda d: d['id'], doujinshis)
if options.is_download:
doujinshi_ids = map(lambda d: d['id'], doujinshis)
if options.keyword: if options.keyword:
doujinshis = search_parser(options.keyword, options.page) doujinshis = search_parser(options.keyword, options.page)
@ -61,7 +60,7 @@ def main():
if not options.is_nohtml and not options.is_cbz: if not options.is_nohtml and not options.is_cbz:
generate_html(options.output_dir, doujinshi) generate_html(options.output_dir, doujinshi)
elif options.is_cbz: elif options.is_cbz:
generate_cbz(options.output_dir, doujinshi) generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir)
if not platform.system() == 'Windows': if not platform.system() == 'Windows':
logger.log(15, '🍻 All done.') logger.log(15, '🍻 All done.')

View File

@ -5,11 +5,16 @@ from nhentai.utils import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net') BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = '%s/api/gallery' % BASE_URL __api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL
SEARCH_URL = '%s/api/galleries/search' % BASE_URL __api_suspended_SEARCH_URL = '%s/api/galleries/search' % BASE_URL
DETAIL_URL = '%s/g' % BASE_URL
SEARCH_URL = '%s/search/' % BASE_URL
TAG_URL = '%s/tag' % BASE_URL TAG_URL = '%s/tag' % BASE_URL
TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL LOGIN_URL = '%s/login/' % BASE_URL
CHALLENGE_URL = '%s/challenge' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL FAV_URL = '%s/favorites/' % BASE_URL
u = urlparse(BASE_URL) u = urlparse(BASE_URL)

View File

@ -36,6 +36,7 @@ class Doujinshi(object):
self.downloader = None self.downloader = None
self.url = '%s/%d' % (DETAIL_URL, self.id) self.url = '%s/%d' % (DETAIL_URL, self.id)
self.info = DoujinshiInfo(**kwargs) self.info = DoujinshiInfo(**kwargs)
self.filename = format_filename('[%s][%s][%s]' % (self.id, self.info.artist, self.name))
def __repr__(self): def __repr__(self):
return '<Doujinshi: {0}>'.format(self.name) return '<Doujinshi: {0}>'.format(self.name)
@ -44,10 +45,10 @@ class Doujinshi(object):
table = [ table = [
["Doujinshi", self.name], ["Doujinshi", self.name],
["Subtitle", self.info.subtitle], ["Subtitle", self.info.subtitle],
["Characters", self.info.characters], ["Characters", self.info.character],
["Authors", self.info.artists], ["Authors", self.info.artist],
["Language", self.info.language], ["Language", self.info.language],
["Tags", self.info.tags], ["Tags", ', '.join(self.info.tag.keys())],
["URL", self.url], ["URL", self.url],
["Pages", self.pages], ["Pages", self.pages],
] ]
@ -57,10 +58,20 @@ class Doujinshi(object):
logger.info('Starting to download doujinshi: %s' % self.name) logger.info('Starting to download doujinshi: %s' % self.name)
if self.downloader: if self.downloader:
download_queue = [] download_queue = []
if len(self.ext) != self.pages:
logger.warning('Page count and ext count do not equal')
for i in range(1, min(self.pages, len(self.ext)) + 1):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1]))
self.downloader.download(download_queue, self.filename)
'''
for i in range(len(self.ext)): for i in range(len(self.ext)):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]])) download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
'''
self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200])))
else: else:
logger.critical('Downloader has not been loaded') logger.critical('Downloader has not been loaded')

View File

@ -29,6 +29,7 @@ class Downloader(Singleton):
self.path = str(path) self.path = str(path)
self.thread_count = thread self.thread_count = thread
self.threads = [] self.threads = []
self.thread_pool = None
self.timeout = timeout self.timeout = timeout
def _download(self, url, folder='', filename='', retried=0): def _download(self, url, folder='', filename='', retried=0):
@ -41,20 +42,27 @@ class Downloader(Singleton):
extension))) extension)))
return 1, url return 1, url
response = None
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f: with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
i=0 i = 0
while i<10: while i < 10:
try: try:
response = request('get', url, stream=True, timeout=self.timeout) response = request('get', url, stream=True, timeout=self.timeout)
if response.status_code != 200:
raise NhentaiImageNotExistException
except NhentaiImageNotExistException as e:
raise e
except Exception as e: except Exception as e:
i+=1 i += 1
if not i<10: if not i < 10:
logger.critical(str(e)) logger.critical(str(e))
return 0, None return 0, None
continue continue
break break
if response.status_code != 200:
raise NhentaiImageNotExistException
length = response.headers.get('content-length') length = response.headers.get('content-length')
if length is None: if length is None:
f.write(response.content) f.write(response.content)

View File

@ -13,44 +13,66 @@ import nhentai.constant as constant
from nhentai.logger import logger from nhentai.logger import logger
session = requests.Session()
session.headers.update({
'Referer': constant.LOGIN_URL,
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
})
def request(method, url, **kwargs): def request(method, url, **kwargs):
if not hasattr(requests, method): global session
raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method)) if not hasattr(session, method):
raise AttributeError('\'requests.Session\' object has no attribute \'{0}\''.format(method))
return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs) return getattr(session, method)(url, proxies=constant.PROXY, verify=False, **kwargs)
def login_parser(username, password): def _get_csrf_token(content):
s = requests.Session()
s.proxies = constant.PROXY
s.verify = False
s.headers.update({'Referer': constant.LOGIN_URL})
s.get(constant.LOGIN_URL)
content = s.get(constant.LOGIN_URL).content
html = BeautifulSoup(content, 'html.parser') html = BeautifulSoup(content, 'html.parser')
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'}) csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
if not csrf_token_elem: if not csrf_token_elem:
raise Exception('Cannot find csrf token to login') raise Exception('Cannot find csrf token to login')
csrf_token = csrf_token_elem.attrs['value'] return csrf_token_elem.attrs['value']
def login(username, password):
csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
if os.getenv('DEBUG'):
logger.info('Getting CSRF token ...')
if os.getenv('DEBUG'):
logger.info('CSRF token is {}'.format(csrf_token))
login_dict = { login_dict = {
'csrfmiddlewaretoken': csrf_token, 'csrfmiddlewaretoken': csrf_token,
'username_or_email': username, 'username_or_email': username,
'password': password, 'password': password,
} }
resp = s.post(constant.LOGIN_URL, data=login_dict) resp = request('post', url=constant.LOGIN_URL, data=login_dict)
if 'You\'re loading pages way too quickly.' in resp.text:
csrf_token = _get_csrf_token(resp.text)
resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
if 'Invalid username/email or password' in resp.text: if 'Invalid username/email or password' in resp.text:
logger.error('Login failed, please check your username and password') logger.error('Login failed, please check your username and password')
exit(1) exit(1)
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser') if 'You\'re loading pages way too quickly.' in resp.text:
logger.error('You meet challenge insistently, please submit a issue'
' at https://github.com/RicterZ/nhentai/issues')
exit(2)
def login_parser():
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'}) count = html.find('span', attrs={'class': 'count'})
if not count: if not count:
logger.error("Can't get your number of favorited doujins. Did the login failed?") logger.error("Can't get your number of favorited doujins. Did the login failed?")
return
count = int(count.text.strip('(').strip(')')) count = int(count.text.strip('(').strip(')').replace(',', ''))
if count == 0: if count == 0:
logger.warning('No favorites found') logger.warning('No favorites found')
return [] return []
@ -74,10 +96,10 @@ def login_parser(username, password):
thread_pool = threadpool.ThreadPool(5) thread_pool = threadpool.ThreadPool(5)
for page in range(1, pages+1): for page in range(1, pages + 1):
try: try:
logger.info('Getting doujinshi ids of page %d' % page) logger.info('Getting doujinshi ids of page %d' % page)
resp = s.get(constant.FAV_URL + '?page=%d' % page).text resp = request('get', constant.FAV_URL + '?page=%d' % page).text
ids = doujinshi_id.findall(resp) ids = doujinshi_id.findall(resp)
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback) requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
[thread_pool.putRequest(req) for req in requests_] [thread_pool.putRequest(req) for req in requests_]
@ -92,18 +114,103 @@ def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()): if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_)) raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_)
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
try:
response = request('get', url).content
except Exception as e:
logger.critical(str(e))
raise SystemExit
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
subtitle = doujinshi_info.find('h2')
doujinshi['name'] = title
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
_, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
ext.append(ext_name)
if not img_id:
logger.critical('Tried yo get image id failed')
exit(1)
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = ext
pages = 0
for _ in doujinshi_info.find_all('div', class_=''):
pages = re.search('([\d]+) pages', _.text)
if pages:
pages = pages.group(1)
break
doujinshi['pages'] = int(pages)
# gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
needed_fields = ['Characters', 'Artists', 'Language', 'Tags']
for field in information_fields:
field_name = field.contents[0].strip().strip(':')
if field_name in needed_fields:
data = [sub_field.contents[0].strip() for sub_field in
field.find_all('a', attrs={'class': 'tag'})]
doujinshi[field_name.lower()] = ', '.join(data)
return doujinshi
def search_parser(keyword, page):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
result = []
try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
except requests.ConnectionError as e:
logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
raise SystemExit
html = BeautifulSoup(response, 'html.parser')
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
for doujinshi in doujinshi_search_result:
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip()
title = title if len(title) < 85 else title[:82] + '...'
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
if not result:
logger.warn('Not found anything of keyword {}'.format(keyword))
return result
def __api_suspended_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_) id_ = int(id_)
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_)) logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
doujinshi = dict() doujinshi = dict()
doujinshi['id'] = id_ doujinshi['id'] = id_
url = '{0}/{1}'.format(constant.DETAIL_URL, id_) url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
i=0 i = 0
while i<5: while 5 > i:
try: try:
response = request('get', url).json() response = request('get', url).json()
except Exception as e: except Exception as e:
i+=1 i += 1
if not i<5: if not i < 5:
logger.critical(str(e)) logger.critical(str(e))
exit(1) exit(1)
continue continue
@ -130,21 +237,21 @@ def doujinshi_parser(id_):
elif tag_type not in doujinshi: elif tag_type not in doujinshi:
doujinshi[tag_type] = tag['name'] doujinshi[tag_type] = tag['name']
else: else:
doujinshi[tag_type] += tag['name'] doujinshi[tag_type] += ', ' + tag['name']
return doujinshi return doujinshi
def search_parser(keyword, page): def __api_suspended_search_parser(keyword, page):
logger.debug('Searching doujinshis using keywords {0}'.format(keyword)) logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
result = [] result = []
i=0 i = 0
while i<5: while i < 5:
try: try:
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json() response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
except Exception as e: except Exception as e:
i+=1 i += 1
if not i<5: if not i < 5:
logger.critical(str(e)) logger.critical(str(e))
logger.warn('If you are in China, please configure the proxy to fu*k GFW.') logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
exit(1) exit(1)
@ -174,37 +281,18 @@ def print_doujinshi(doujinshi_list):
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def tag_parser(tag_id, max_page=1): def __api_suspended_tag_parser(tag_id, max_page=1):
logger.info('Searching for doujinshi with tag id {0}'.format(tag_id)) logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
result = [] result = []
i=0 response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
while i<5:
try:
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
except Exception as e:
i+=1
if not i<5:
logger.critical(str(e))
exit(1)
continue
break
page = max_page if max_page <= response['num_pages'] else int(response['num_pages']) page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
for i in range(1, page+1): for i in range(1, page + 1):
logger.info('Getting page {} ...'.format(i)) logger.info('Getting page {} ...'.format(i))
if page != 1: if page != 1:
i=0 response = request('get', url=constant.TAG_API_URL,
while i<5: params={'sort': 'popular', 'tag_id': tag_id}).json()
try:
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
except Exception as e:
i+=1
if not i<5:
logger.critical(str(e))
exit(1)
continue
break
for row in response['result']: for row in response['result']:
title = row['title']['english'] title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title title = title[:85] + '..' if len(title) > 85 else title
@ -212,46 +300,35 @@ def tag_parser(tag_id, max_page=1):
if not result: if not result:
logger.warn('No results for tag id {}'.format(tag_id)) logger.warn('No results for tag id {}'.format(tag_id))
return result return result
def tag_guessing(tag_name): def tag_parser(tag_name, max_page=1):
result = []
tag_name = tag_name.lower() tag_name = tag_name.lower()
tag_name = tag_name.replace(' ', '-') tag_name = tag_name.replace(' ', '-')
logger.info('Trying to get tag_id of tag \'{0}\''.format(tag_name))
i=0
while i<5:
try:
response = request('get', url='%s/%s' % (constant.TAG_URL, tag_name)).content
except Exception as e:
i+=1
if not i<5:
logger.critical(str(e))
exit(1)
continue
break
html = BeautifulSoup(response, 'html.parser') for p in range(1, max_page + 1):
first_item = html.find('div', attrs={'class': 'gallery'}) logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
if not first_item: response = request('get', url='%s/%s?page=%d' % (constant.TAG_URL, tag_name, p)).content
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return
doujinshi_id = re.findall('(\d+)', first_item.a.attrs['href']) html = BeautifulSoup(response, 'html.parser')
if not doujinshi_id: doujinshi_items = html.find_all('div', attrs={'class': 'gallery'})
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name)) if not doujinshi_items:
return logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return
ret = doujinshi_parser(doujinshi_id[0]) for i in doujinshi_items:
if 'tag' in ret and tag_name in ret['tag']: doujinshi_id = i.a.attrs['href'].strip('/g')
tag_id = ret['tag'][tag_name] doujinshi_title = i.a.text.strip()
logger.info('Tag id of tag \'{0}\' is {1}'.format(tag_name, tag_id)) doujinshi_title = doujinshi_title if len(doujinshi_title) < 85 else doujinshi_title[:82] + '...'
else: result.append({'title': doujinshi_title, 'id': doujinshi_id})
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
return
return tag_id if not result:
logger.warn('No results for tag \'{}\''.format(tag_name))
return result
if __name__ == '__main__': if __name__ == '__main__':

View File

@ -43,8 +43,7 @@ def generate_html(output_dir='.', doujinshi_obj=None):
image_html = '' image_html = ''
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id, doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
doujinshi_obj.name)))
else: else:
doujinshi_dir = '.' doujinshi_dir = '.'
@ -83,33 +82,29 @@ def generate_html(output_dir='.', doujinshi_obj=None):
logger.warning('Writen HTML Viewer failed ({})'.format(str(e))) logger.warning('Writen HTML Viewer failed ({})'.format(str(e)))
def generate_cbz(output_dir='.', doujinshi_obj=None): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id, doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
str(doujinshi_obj.name[:200])))) cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), '%s.cbz' % doujinshi_obj.id)
cbz_filename = os.path.join(output_dir, format_filename('%s-%s.cbz' % (doujinshi_obj.id,
str(doujinshi_obj.name[:200]))))
else: else:
cbz_filename = './doujinshi.cbz' cbz_filename = './doujinshi.cbz'
doujinshi_dir = '.' doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir) file_list = os.listdir(doujinshi_dir)
file_list.sort() file_list.sort()
logger.info('Writing CBZ file to path: {}'.format(cbz_filename))
with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf: with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
for image in file_list: for image in file_list:
image_path = os.path.join(doujinshi_dir, image) image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image) cbz_pf.write(image_path, image)
shutil.rmtree(doujinshi_dir, ignore_errors=True) if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'Comic Book CBZ file has been write to \'{0}\''.format(doujinshi_dir)) logger.log(15, 'Comic Book CBZ file has been write to \'{0}\''.format(doujinshi_dir))
def format_filename(s): def format_filename(s):
"""Take a string and return a valid filename constructed from the string. """Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are Uses a whitelist approach: any characters not present in valid_chars are
@ -121,7 +116,12 @@ and append a file extension like '.txt', so I avoid the potential of using
an invalid filename. an invalid filename.
""" """
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits) valid_chars = "-_.()[] %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars) filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_') # I don't like spaces in filenames. filename = filename.replace(' ', '_') # I don't like spaces in filenames.
if len(filename) > 100:
filename = filename[:100] + '...]'
# Remove [] from filename
filename = filename.replace('[]', '')
return filename return filename

View File

@ -2,4 +2,4 @@ requests>=2.5.0
BeautifulSoup4>=4.0.0 BeautifulSoup4>=4.0.0
threadpool>=1.2.7 threadpool>=1.2.7
tabulate>=0.7.5 tabulate>=0.7.5
future>=0.15.2threadpool==1.3.2 future>=0.15.2