using cookie rather than login #54

This commit is contained in:
RicterZ 2019-05-18 19:34:54 +08:00
parent b173a6c28f
commit 049ab4d9ad
4 changed files with 71 additions and 32 deletions

View File

@ -1,14 +1,15 @@
# coding: utf-8
from __future__ import print_function
import os
import sys
from optparse import OptionParser
from nhentai import __version__
try:
from itertools import ifilter as filter
except ImportError:
pass
import nhentai.constant as constant
from nhentai import __version__
from nhentai.utils import urlparse, generate_html
from nhentai.logger import logger
@ -40,16 +41,25 @@ def cmd_parser():
'\n nhentai --file [filename]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
# operation options
parser.add_option('--download', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_option('--show', dest='is_show', action='store_true', help='just show the doujinshi information')
# doujinshi options
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
help='list or download your favorites.')
# page options
parser.add_option('--page', type='int', dest='page', action='store', default=1,
help='page number of search results')
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1,
help='The max page when recursive download tagged doujinshi')
# download options
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
@ -58,20 +68,21 @@ def cmd_parser():
help='timeout for downloading doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='uses a proxy, for example: http://127.0.0.1:1080')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
# generate options
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--login', '-l', type='str', dest='login', action='store',
help='username:password pair of nhentai account')
parser.add_option('--nohtml', dest='is_nohtml', action='store_true',
help='Don\'t generate HTML')
help='don\'t generate HTML')
parser.add_option('--cbz', dest='is_cbz', action='store_true',
help='Generate Comic Book CBZ File')
help='generate Comic Book CBZ File')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='Remove downloaded doujinshi dir when generated CBZ file.')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='Read gallery IDs from file.')
help='remove downloaded doujinshi dir when generated CBZ file.')
# nhentai options
parser.add_option('--cookie', type='str', dest='cookie', action='store',
help='set cookie of nhentai to bypass Google recaptcha')
try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
@ -86,6 +97,25 @@ def cmd_parser():
generate_html()
exit(0)
if os.path.exists(os.path.join(constant.NHENTAI_HOME, 'cookie')):
with open(os.path.join(constant.NHENTAI_HOME, 'cookie'), 'r') as f:
constant.COOKIE = f.read()
if args.cookie:
try:
if not os.path.exists(constant.NHENTAI_HOME):
os.mkdir(constant.NHENTAI_HOME)
with open(os.path.join(constant.NHENTAI_HOME, 'cookie'), 'w') as f:
f.write(args.cookie)
except Exception as e:
logger.error('Cannot create NHENTAI_HOME: {}'.format(str(e)))
exit(1)
logger.info('Cookie saved.')
exit(0)
'''
if args.login:
try:
_, _ = args.login.split(':', 1)
@ -95,6 +125,12 @@ def cmd_parser():
if not args.is_download:
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
'''
if args.favorites:
if not constant.COOKIE:
logger.warning('Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.')
exit(1)
if args.id:
_ = map(lambda id_: id_.strip(), args.id.split(','))
@ -106,12 +142,12 @@ def cmd_parser():
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword and \
not args.login and not args.tag:
not args.tag and not args.favorites:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
exit(1)
if not args.keyword and not args.id and not args.login and not args.tag:
if not args.keyword and not args.id and not args.tag and not args.favorites:
parser.print_help()
exit(1)

View File

@ -5,7 +5,7 @@ import signal
import platform
from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_parser, login
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, favorites_parser, tag_parser, login
from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader
from nhentai.logger import logger
@ -21,13 +21,15 @@ def main():
doujinshi_ids = []
doujinshi_list = []
if options.login:
if options.favorites:
'''
username, password = options.login.split(':', 1)
logger.info('Logging in to nhentai using credential pair \'%s:%s\'' % (username, '*' * len(password)))
login(username, password)
'''
if options.is_download or options.is_show:
for doujinshi_info in login_parser():
for doujinshi_info in favorites_parser():
doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.is_show and not options.is_download:
@ -37,7 +39,7 @@ def main():
if options.tag:
doujinshis = tag_parser(options.tag, max_page=options.max_page)
print_doujinshi(doujinshis)
if options.is_download:
if options.is_download and doujinshis:
doujinshi_ids = map(lambda d: d['id'], doujinshis)
if options.keyword:

View File

@ -1,6 +1,7 @@
# coding: utf-8
from __future__ import unicode_literals, print_function
import os
import tempfile
from nhentai.utils import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
@ -20,4 +21,8 @@ FAV_URL = '%s/favorites/' % BASE_URL
u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
PROXY = {}
COOKIE = ''

View File

@ -25,6 +25,7 @@ def request(method, url, **kwargs):
if not hasattr(session, method):
raise AttributeError('\'requests.Session\' object has no attribute \'{0}\''.format(method))
session.headers.update({'Cookie': constant.COOKIE})
return getattr(session, method)(url, proxies=constant.PROXY, verify=False, **kwargs)
@ -37,6 +38,7 @@ def _get_csrf_token(content):
def login(username, password):
logger.warning('This feature is deprecated, please use --cookie to set your cookie.')
csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
if os.getenv('DEBUG'):
logger.info('Getting CSRF token ...')
@ -51,7 +53,7 @@ def login(username, password):
}
resp = request('post', url=constant.LOGIN_URL, data=login_dict)
if 'You\'re loading pages way too quickly.' in resp.text:
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
csrf_token = _get_csrf_token(resp.text)
resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
@ -59,13 +61,12 @@ def login(username, password):
logger.error('Login failed, please check your username and password')
exit(1)
if 'You\'re loading pages way too quickly.' in resp.text:
logger.error('You meet challenge insistently, please submit a issue'
' at https://github.com/RicterZ/nhentai/issues')
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
exit(2)
def login_parser():
def favorites_parser():
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
@ -91,20 +92,15 @@ def login_parser():
ret = []
doujinshi_id = re.compile('data-id="([\d]+)"')
def _callback(request, result):
ret.append(result)
# TODO: reduce threads number ...
thread_pool = threadpool.ThreadPool(1)
for page in range(1, pages + 1):
try:
logger.info('Getting doujinshi ids of page %d' % page)
resp = request('get', constant.FAV_URL + '?page=%d' % page).text
ids = doujinshi_id.findall(resp)
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
[thread_pool.putRequest(req) for req in requests_]
thread_pool.wait()
for i in ids:
ret.append(doujinshi_parser(i))
except Exception as e:
logger.error('Error: %s, continue', str(e))