mirror of
https://github.com/RicterZ/nhentai.git
synced 2025-07-01 16:09:28 +02:00
Compare commits
29 Commits
Author | SHA1 | Date | |
---|---|---|---|
263dba51f3 | |||
049ab4d9ad | |||
b173a6c28f | |||
b64b718c88 | |||
8317662664 | |||
13e60a69e9 | |||
b5acbc76fd | |||
1eb1b5c04c | |||
2acb6a1249 | |||
0660cb0fed | |||
680b004c24 | |||
6709af2a20 | |||
a3fead2852 | |||
0728dd8c6d | |||
9160b38c3f | |||
f74be0c665 | |||
c30f562a83 | |||
37547cc97f | |||
f6fb90aab5 | |||
50be89db44 | |||
fc0be35b2c | |||
5c3dace937 | |||
b2d622f11a | |||
0c8264bcc6 | |||
a6074242fb | |||
eb6df28fba | |||
1091ea3e0a | |||
0df51c83e5 | |||
c5fa98ebd1 |
1
.gitignore
vendored
1
.gitignore
vendored
@ -5,3 +5,4 @@ dist/
|
||||
*.egg-info
|
||||
.python-version
|
||||
.DS_Store
|
||||
output/
|
||||
|
@ -12,8 +12,10 @@ install:
|
||||
- python setup.py install
|
||||
|
||||
script:
|
||||
- echo 268642 > /tmp/test.txt
|
||||
- NHENTAI=https://nhentai.net nhentai --cookie '__cfduid=da09f237ceb0f51c75980b0b3fda3ce571558179357; _ga=GA1.2.2000087053.1558179358; _gid=GA1.2.717818542.1558179358; csrftoken=iSxrTFOjrujJqauhAqWvTTI9dl3sfWnxdEFoMuqgmlBrbMin5Gj9wJW4r61cmH1X; sessionid=ewuaayfewbzpiukrarx9d52oxwlz2esd'
|
||||
- NHENTAI=https://nhentai.net nhentai --search umaru
|
||||
- NHENTAI=https://nhentai.net nhentai --id=152503,146134 -t 10 --output=/tmp/
|
||||
- NHENTAI=https://nhentai.net nhentai -l nhentai_test:nhentai --output=/tmp/
|
||||
- NHENTAI=https://nhentai.net nhentai --id=152503,146134 -t 10 --output=/tmp/ --cbz
|
||||
- NHENTAI=https://nhentai.net nhentai --tag lolicon
|
||||
- NHENTAI=https://nhentai.net nhentai --id 92066 --output=/tmp/ --cbz
|
||||
- NHENTAI=https://nhentai.net nhentai -F
|
||||
- NHENTAI=https://nhentai.net nhentai --file /tmp/test.txt
|
||||
|
16
README.md
16
README.md
@ -7,7 +7,7 @@ nhentai
|
||||
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
||||
|
||||
あなたも変態。 いいね?
|
||||
[](https://travis-ci.org/RicterZ/nhentai) 
|
||||
[](https://travis-ci.org/RicterZ/nhentai)  [](https://github.com/RicterZ/nhentai/blob/master/LICENSE)
|
||||
|
||||
|
||||
nHentai is a CLI tool for downloading doujinshi from [nhentai.net](http://nhentai.net).
|
||||
@ -18,17 +18,26 @@ nHentai is a CLI tool for downloading doujinshi from [nhentai.net](http://nhenta
|
||||
cd nhentai
|
||||
python setup.py install
|
||||
|
||||
### Gentoo
|
||||
### Installation (Gentoo)
|
||||
|
||||
layman -fa glicOne
|
||||
sudo emerge net-misc/nhentai
|
||||
|
||||
### Usage
|
||||
**IMPORTANT**: To bypass the nhentai frequency limit, you should use `--login` option to log into nhentai.net.
|
||||
|
||||
*The default download folder will be the path where you run the command (CLI path).*
|
||||
|
||||
Download specified doujinshi:
|
||||
```bash
|
||||
nhentai --id=123855,123866
|
||||
```
|
||||
|
||||
Download doujinshi with ids specified in a file:
|
||||
```bash
|
||||
nhentai --file=doujinshi.txt
|
||||
```
|
||||
|
||||
Search a keyword and download the first page:
|
||||
```bash
|
||||
nhentai --search="tomori" --page=1 --download
|
||||
@ -71,8 +80,5 @@ NHENTAI=http://h.loli.club nhentai --id 123456
|
||||

|
||||

|
||||
|
||||
### License
|
||||
MIT
|
||||
|
||||
### あなたも変態
|
||||

|
||||
|
5
doujinshi.txt
Normal file
5
doujinshi.txt
Normal file
@ -0,0 +1,5 @@
|
||||
184212
|
||||
204944
|
||||
222460
|
||||
244502
|
||||
261909
|
@ -1,3 +1,3 @@
|
||||
__version__ = '0.2.18'
|
||||
__version__ = '0.3.1'
|
||||
__author__ = 'RicterZ'
|
||||
__email__ = 'ricterzheng@gmail.com'
|
||||
|
@ -1,14 +1,15 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function
|
||||
import os
|
||||
import sys
|
||||
from optparse import OptionParser
|
||||
from nhentai import __version__
|
||||
try:
|
||||
from itertools import ifilter as filter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import nhentai.constant as constant
|
||||
from nhentai import __version__
|
||||
from nhentai.utils import urlparse, generate_html
|
||||
from nhentai.logger import logger
|
||||
|
||||
@ -37,18 +38,28 @@ def banner():
|
||||
def cmd_parser():
|
||||
parser = OptionParser('\n nhentai --search [keyword] --download'
|
||||
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
|
||||
'\n nhentai --file [filename]'
|
||||
'\n\nEnvironment Variable:\n'
|
||||
' NHENTAI nhentai mirror url')
|
||||
# operation options
|
||||
parser.add_option('--download', dest='is_download', action='store_true',
|
||||
help='download doujinshi (for search results)')
|
||||
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
|
||||
parser.add_option('--show', dest='is_show', action='store_true', help='just show the doujinshi information')
|
||||
|
||||
# doujinshi options
|
||||
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
|
||||
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
|
||||
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
|
||||
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
|
||||
help='list or download your favorites.')
|
||||
|
||||
# page options
|
||||
parser.add_option('--page', type='int', dest='page', action='store', default=1,
|
||||
help='page number of search results')
|
||||
parser.add_option('--tag', type='string', dest='tag', action='store', help='download doujinshi by tag')
|
||||
parser.add_option('--max-page', type='int', dest='max_page', action='store', default=1,
|
||||
help='The max page when recursive download tagged doujinshi')
|
||||
|
||||
# download options
|
||||
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
|
||||
help='output dir')
|
||||
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
|
||||
@ -57,19 +68,21 @@ def cmd_parser():
|
||||
help='timeout for downloading doujinshi')
|
||||
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
|
||||
help='uses a proxy, for example: http://127.0.0.1:1080')
|
||||
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
|
||||
|
||||
# generate options
|
||||
parser.add_option('--html', dest='html_viewer', action='store_true',
|
||||
help='generate a html viewer at current directory')
|
||||
|
||||
parser.add_option('--login', '-l', type='str', dest='login', action='store',
|
||||
help='username:password pair of nhentai account')
|
||||
|
||||
parser.add_option('--nohtml', dest='is_nohtml', action='store_true',
|
||||
help='Don\'t generate HTML')
|
||||
|
||||
help='don\'t generate HTML')
|
||||
parser.add_option('--cbz', dest='is_cbz', action='store_true',
|
||||
help='Generate Comic Book CBZ File')
|
||||
help='generate Comic Book CBZ File')
|
||||
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
|
||||
help='Remove downloaded doujinshi dir when generated CBZ file.')
|
||||
help='remove downloaded doujinshi dir when generated CBZ file.')
|
||||
|
||||
# nhentai options
|
||||
parser.add_option('--cookie', type='str', dest='cookie', action='store',
|
||||
help='set cookie of nhentai to bypass Google recaptcha')
|
||||
|
||||
try:
|
||||
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
|
||||
@ -84,6 +97,25 @@ def cmd_parser():
|
||||
generate_html()
|
||||
exit(0)
|
||||
|
||||
if os.path.exists(os.path.join(constant.NHENTAI_HOME, 'cookie')):
|
||||
with open(os.path.join(constant.NHENTAI_HOME, 'cookie'), 'r') as f:
|
||||
constant.COOKIE = f.read()
|
||||
|
||||
if args.cookie:
|
||||
try:
|
||||
if not os.path.exists(constant.NHENTAI_HOME):
|
||||
os.mkdir(constant.NHENTAI_HOME)
|
||||
|
||||
with open(os.path.join(constant.NHENTAI_HOME, 'cookie'), 'w') as f:
|
||||
f.write(args.cookie)
|
||||
except Exception as e:
|
||||
logger.error('Cannot create NHENTAI_HOME: {}'.format(str(e)))
|
||||
exit(1)
|
||||
|
||||
logger.info('Cookie saved.')
|
||||
exit(0)
|
||||
|
||||
'''
|
||||
if args.login:
|
||||
try:
|
||||
_, _ = args.login.split(':', 1)
|
||||
@ -93,18 +125,29 @@ def cmd_parser():
|
||||
|
||||
if not args.is_download:
|
||||
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
|
||||
'''
|
||||
|
||||
if args.favorites:
|
||||
if not constant.COOKIE:
|
||||
logger.warning('Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.')
|
||||
exit(1)
|
||||
|
||||
if args.id:
|
||||
_ = map(lambda id: id.strip(), args.id.split(','))
|
||||
_ = map(lambda id_: id_.strip(), args.id.split(','))
|
||||
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
|
||||
|
||||
if args.file:
|
||||
with open(args.file, 'r') as f:
|
||||
_ = map(lambda id: id.strip(), f.readlines())
|
||||
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
|
||||
|
||||
if (args.is_download or args.is_show) and not args.id and not args.keyword and \
|
||||
not args.login and not args.tag:
|
||||
not args.tag and not args.favorites:
|
||||
logger.critical('Doujinshi id(s) are required for downloading')
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
|
||||
if not args.keyword and not args.id and not args.login and not args.tag:
|
||||
if not args.keyword and not args.id and not args.tag and not args.favorites:
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
|
||||
|
@ -5,7 +5,7 @@ import signal
|
||||
import platform
|
||||
|
||||
from nhentai.cmdline import cmd_parser, banner
|
||||
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser, tag_guessing, tag_parser
|
||||
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, favorites_parser, tag_parser, login
|
||||
from nhentai.doujinshi import Doujinshi
|
||||
from nhentai.downloader import Downloader
|
||||
from nhentai.logger import logger
|
||||
@ -21,19 +21,22 @@ def main():
|
||||
doujinshi_ids = []
|
||||
doujinshi_list = []
|
||||
|
||||
if options.login:
|
||||
username, password = options.login.split(':', 1)
|
||||
logger.info('Logging in to nhentai using credential pair \'%s:%s\'' % (username, '*' * len(password)))
|
||||
for doujinshi_info in login_parser(username=username, password=password):
|
||||
if options.favorites:
|
||||
if not options.is_download:
|
||||
logger.warning('You do not specify --download option')
|
||||
|
||||
for doujinshi_info in favorites_parser():
|
||||
doujinshi_list.append(Doujinshi(**doujinshi_info))
|
||||
|
||||
if not options.is_download:
|
||||
print_doujinshi([{'id': i.id, 'title': i.name} for i in doujinshi_list])
|
||||
exit(0)
|
||||
|
||||
if options.tag:
|
||||
tag_id = tag_guessing(options.tag)
|
||||
if tag_id:
|
||||
doujinshis = tag_parser(tag_id, max_page=options.max_page)
|
||||
print_doujinshi(doujinshis)
|
||||
if options.is_download:
|
||||
doujinshi_ids = map(lambda d: d['id'], doujinshis)
|
||||
doujinshis = tag_parser(options.tag, max_page=options.max_page)
|
||||
print_doujinshi(doujinshis)
|
||||
if options.is_download and doujinshis:
|
||||
doujinshi_ids = map(lambda d: d['id'], doujinshis)
|
||||
|
||||
if options.keyword:
|
||||
doujinshis = search_parser(options.keyword, options.page)
|
||||
|
@ -1,18 +1,28 @@
|
||||
# coding: utf-8
|
||||
from __future__ import unicode_literals, print_function
|
||||
import os
|
||||
import tempfile
|
||||
from nhentai.utils import urlparse
|
||||
|
||||
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
|
||||
|
||||
DETAIL_URL = '%s/api/gallery' % BASE_URL
|
||||
SEARCH_URL = '%s/api/galleries/search' % BASE_URL
|
||||
__api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL
|
||||
__api_suspended_SEARCH_URL = '%s/api/galleries/search' % BASE_URL
|
||||
|
||||
DETAIL_URL = '%s/g' % BASE_URL
|
||||
SEARCH_URL = '%s/search/' % BASE_URL
|
||||
|
||||
TAG_URL = '%s/tag' % BASE_URL
|
||||
TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL
|
||||
LOGIN_URL = '%s/login/' % BASE_URL
|
||||
CHALLENGE_URL = '%s/challenge' % BASE_URL
|
||||
FAV_URL = '%s/favorites/' % BASE_URL
|
||||
|
||||
u = urlparse(BASE_URL)
|
||||
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)
|
||||
|
||||
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
|
||||
|
||||
PROXY = {}
|
||||
|
||||
COOKIE = ''
|
||||
|
@ -36,6 +36,7 @@ class Doujinshi(object):
|
||||
self.downloader = None
|
||||
self.url = '%s/%d' % (DETAIL_URL, self.id)
|
||||
self.info = DoujinshiInfo(**kwargs)
|
||||
self.filename = format_filename('[%s][%s][%s]' % (self.id, self.info.artist, self.name))
|
||||
|
||||
def __repr__(self):
|
||||
return '<Doujinshi: {0}>'.format(self.name)
|
||||
@ -44,8 +45,8 @@ class Doujinshi(object):
|
||||
table = [
|
||||
["Doujinshi", self.name],
|
||||
["Subtitle", self.info.subtitle],
|
||||
["Characters", self.info.characters],
|
||||
["Authors", self.info.artists],
|
||||
["Characters", self.info.character],
|
||||
["Authors", self.info.artist],
|
||||
["Language", self.info.language],
|
||||
["Tags", self.info.tags],
|
||||
["URL", self.url],
|
||||
@ -57,10 +58,20 @@ class Doujinshi(object):
|
||||
logger.info('Starting to download doujinshi: %s' % self.name)
|
||||
if self.downloader:
|
||||
download_queue = []
|
||||
|
||||
if len(self.ext) != self.pages:
|
||||
logger.warning('Page count and ext count do not equal')
|
||||
|
||||
for i in range(1, min(self.pages, len(self.ext)) + 1):
|
||||
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1]))
|
||||
|
||||
self.downloader.download(download_queue, self.filename)
|
||||
|
||||
'''
|
||||
for i in range(len(self.ext)):
|
||||
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
|
||||
'''
|
||||
|
||||
self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200])))
|
||||
else:
|
||||
logger.critical('Downloader has not been loaded')
|
||||
|
||||
|
@ -29,6 +29,7 @@ class Downloader(Singleton):
|
||||
self.path = str(path)
|
||||
self.thread_count = thread
|
||||
self.threads = []
|
||||
self.thread_pool = None
|
||||
self.timeout = timeout
|
||||
|
||||
def _download(self, url, folder='', filename='', retried=0):
|
||||
|
@ -13,42 +13,65 @@ import nhentai.constant as constant
|
||||
from nhentai.logger import logger
|
||||
|
||||
|
||||
session = requests.Session()
|
||||
session.headers.update({
|
||||
'Referer': constant.LOGIN_URL,
|
||||
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
|
||||
})
|
||||
|
||||
|
||||
def request(method, url, **kwargs):
|
||||
if not hasattr(requests, method):
|
||||
raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method))
|
||||
global session
|
||||
if not hasattr(session, method):
|
||||
raise AttributeError('\'requests.Session\' object has no attribute \'{0}\''.format(method))
|
||||
|
||||
return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs)
|
||||
session.headers.update({'Cookie': constant.COOKIE})
|
||||
return getattr(session, method)(url, proxies=constant.PROXY, verify=False, **kwargs)
|
||||
|
||||
|
||||
def login_parser(username, password):
|
||||
s = requests.Session()
|
||||
s.proxies = constant.PROXY
|
||||
s.verify = False
|
||||
s.headers.update({'Referer': constant.LOGIN_URL})
|
||||
|
||||
s.get(constant.LOGIN_URL)
|
||||
content = s.get(constant.LOGIN_URL).content
|
||||
def _get_csrf_token(content):
|
||||
html = BeautifulSoup(content, 'html.parser')
|
||||
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
|
||||
|
||||
if not csrf_token_elem:
|
||||
raise Exception('Cannot find csrf token to login')
|
||||
csrf_token = csrf_token_elem.attrs['value']
|
||||
return csrf_token_elem.attrs['value']
|
||||
|
||||
|
||||
def login(username, password):
|
||||
logger.warning('This feature is deprecated, please use --cookie to set your cookie.')
|
||||
csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
|
||||
if os.getenv('DEBUG'):
|
||||
logger.info('Getting CSRF token ...')
|
||||
|
||||
if os.getenv('DEBUG'):
|
||||
logger.info('CSRF token is {}'.format(csrf_token))
|
||||
|
||||
login_dict = {
|
||||
'csrfmiddlewaretoken': csrf_token,
|
||||
'username_or_email': username,
|
||||
'password': password,
|
||||
}
|
||||
resp = s.post(constant.LOGIN_URL, data=login_dict)
|
||||
resp = request('post', url=constant.LOGIN_URL, data=login_dict)
|
||||
|
||||
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
|
||||
csrf_token = _get_csrf_token(resp.text)
|
||||
resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
|
||||
|
||||
if 'Invalid username/email or password' in resp.text:
|
||||
logger.error('Login failed, please check your username and password')
|
||||
exit(1)
|
||||
|
||||
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser')
|
||||
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
|
||||
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
|
||||
exit(2)
|
||||
|
||||
|
||||
def favorites_parser():
|
||||
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
|
||||
count = html.find('span', attrs={'class': 'count'})
|
||||
if not count:
|
||||
logger.error("Can't get your number of favorited doujins. Did the login failed?")
|
||||
return []
|
||||
|
||||
count = int(count.text.strip('(').strip(')').replace(',', ''))
|
||||
if count == 0:
|
||||
@ -69,19 +92,15 @@ def login_parser(username, password):
|
||||
ret = []
|
||||
doujinshi_id = re.compile('data-id="([\d]+)"')
|
||||
|
||||
def _callback(request, result):
|
||||
ret.append(result)
|
||||
|
||||
thread_pool = threadpool.ThreadPool(5)
|
||||
|
||||
for page in range(1, pages+1):
|
||||
for page in range(1, pages + 1):
|
||||
try:
|
||||
logger.info('Getting doujinshi ids of page %d' % page)
|
||||
resp = s.get(constant.FAV_URL + '?page=%d' % page).text
|
||||
resp = request('get', constant.FAV_URL + '?page=%d' % page).text
|
||||
ids = doujinshi_id.findall(resp)
|
||||
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
|
||||
[thread_pool.putRequest(req) for req in requests_]
|
||||
thread_pool.wait()
|
||||
|
||||
for i in ids:
|
||||
ret.append(doujinshi_parser(i))
|
||||
|
||||
except Exception as e:
|
||||
logger.error('Error: %s, continue', str(e))
|
||||
|
||||
@ -92,18 +111,110 @@ def doujinshi_parser(id_):
|
||||
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
||||
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
||||
|
||||
id_ = int(id_)
|
||||
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
|
||||
doujinshi = dict()
|
||||
doujinshi['id'] = id_
|
||||
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
|
||||
|
||||
try:
|
||||
response = request('get', url)
|
||||
if response.status_code in (200, ):
|
||||
response = response.content
|
||||
else:
|
||||
logger.debug('Slow down and retry ({}) ...'.format(id_))
|
||||
time.sleep(1)
|
||||
return doujinshi_parser(str(id_))
|
||||
|
||||
except Exception as e:
|
||||
logger.critical(str(e))
|
||||
raise SystemExit
|
||||
|
||||
html = BeautifulSoup(response, 'html.parser')
|
||||
doujinshi_info = html.find('div', attrs={'id': 'info'})
|
||||
|
||||
title = doujinshi_info.find('h1').text
|
||||
subtitle = doujinshi_info.find('h2')
|
||||
|
||||
doujinshi['name'] = title
|
||||
doujinshi['subtitle'] = subtitle.text if subtitle else ''
|
||||
|
||||
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
|
||||
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
|
||||
|
||||
ext = []
|
||||
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
|
||||
_, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
|
||||
ext.append(ext_name)
|
||||
|
||||
if not img_id:
|
||||
logger.critical('Tried yo get image id failed')
|
||||
exit(1)
|
||||
|
||||
doujinshi['img_id'] = img_id.group(1)
|
||||
doujinshi['ext'] = ext
|
||||
|
||||
pages = 0
|
||||
for _ in doujinshi_info.find_all('div', class_=''):
|
||||
pages = re.search('([\d]+) pages', _.text)
|
||||
if pages:
|
||||
pages = pages.group(1)
|
||||
break
|
||||
doujinshi['pages'] = int(pages)
|
||||
|
||||
# gain information of the doujinshi
|
||||
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
|
||||
needed_fields = ['Characters', 'Artists', 'Language', 'Tags']
|
||||
for field in information_fields:
|
||||
field_name = field.contents[0].strip().strip(':')
|
||||
if field_name in needed_fields:
|
||||
data = [sub_field.contents[0].strip() for sub_field in
|
||||
field.find_all('a', attrs={'class': 'tag'})]
|
||||
doujinshi[field_name.lower()] = ', '.join(data)
|
||||
|
||||
return doujinshi
|
||||
|
||||
|
||||
def search_parser(keyword, page):
|
||||
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
|
||||
result = []
|
||||
try:
|
||||
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
|
||||
except requests.ConnectionError as e:
|
||||
logger.critical(e)
|
||||
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
|
||||
raise SystemExit
|
||||
|
||||
html = BeautifulSoup(response, 'html.parser')
|
||||
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
|
||||
for doujinshi in doujinshi_search_result:
|
||||
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
|
||||
title = doujinshi_container.text.strip()
|
||||
title = title if len(title) < 85 else title[:82] + '...'
|
||||
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
|
||||
result.append({'id': id_, 'title': title})
|
||||
if not result:
|
||||
logger.warn('Not found anything of keyword {}'.format(keyword))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def __api_suspended_doujinshi_parser(id_):
|
||||
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
||||
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
||||
|
||||
id_ = int(id_)
|
||||
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
|
||||
doujinshi = dict()
|
||||
doujinshi['id'] = id_
|
||||
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
|
||||
i=0
|
||||
while i<5:
|
||||
i = 0
|
||||
while 5 > i:
|
||||
try:
|
||||
response = request('get', url).json()
|
||||
except Exception as e:
|
||||
i+=1
|
||||
if not i<5:
|
||||
i += 1
|
||||
if not i < 5:
|
||||
logger.critical(str(e))
|
||||
exit(1)
|
||||
continue
|
||||
@ -130,21 +241,21 @@ def doujinshi_parser(id_):
|
||||
elif tag_type not in doujinshi:
|
||||
doujinshi[tag_type] = tag['name']
|
||||
else:
|
||||
doujinshi[tag_type] += tag['name']
|
||||
doujinshi[tag_type] += ', ' + tag['name']
|
||||
|
||||
return doujinshi
|
||||
|
||||
|
||||
def search_parser(keyword, page):
|
||||
def __api_suspended_search_parser(keyword, page):
|
||||
logger.debug('Searching doujinshis using keywords {0}'.format(keyword))
|
||||
result = []
|
||||
i=0
|
||||
while i<5:
|
||||
i = 0
|
||||
while i < 5:
|
||||
try:
|
||||
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
|
||||
except Exception as e:
|
||||
i+=1
|
||||
if not i<5:
|
||||
i += 1
|
||||
if not i < 5:
|
||||
logger.critical(str(e))
|
||||
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
|
||||
exit(1)
|
||||
@ -174,37 +285,18 @@ def print_doujinshi(doujinshi_list):
|
||||
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
||||
|
||||
|
||||
def tag_parser(tag_id, max_page=1):
|
||||
def __api_suspended_tag_parser(tag_id, max_page=1):
|
||||
logger.info('Searching for doujinshi with tag id {0}'.format(tag_id))
|
||||
result = []
|
||||
i=0
|
||||
while i<5:
|
||||
try:
|
||||
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
|
||||
except Exception as e:
|
||||
i+=1
|
||||
if not i<5:
|
||||
logger.critical(str(e))
|
||||
exit(1)
|
||||
continue
|
||||
break
|
||||
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
|
||||
page = max_page if max_page <= response['num_pages'] else int(response['num_pages'])
|
||||
|
||||
for i in range(1, page+1):
|
||||
for i in range(1, page + 1):
|
||||
logger.info('Getting page {} ...'.format(i))
|
||||
|
||||
if page != 1:
|
||||
i=0
|
||||
while i<5:
|
||||
try:
|
||||
response = request('get', url=constant.TAG_API_URL, params={'sort': 'popular', 'tag_id': tag_id}).json()
|
||||
except Exception as e:
|
||||
i+=1
|
||||
if not i<5:
|
||||
logger.critical(str(e))
|
||||
exit(1)
|
||||
continue
|
||||
break
|
||||
response = request('get', url=constant.TAG_API_URL,
|
||||
params={'sort': 'popular', 'tag_id': tag_id}).json()
|
||||
for row in response['result']:
|
||||
title = row['title']['english']
|
||||
title = title[:85] + '..' if len(title) > 85 else title
|
||||
@ -216,42 +308,31 @@ def tag_parser(tag_id, max_page=1):
|
||||
return result
|
||||
|
||||
|
||||
def tag_guessing(tag_name):
|
||||
def tag_parser(tag_name, max_page=1):
|
||||
result = []
|
||||
tag_name = tag_name.lower()
|
||||
tag_name = tag_name.replace(' ', '-')
|
||||
logger.info('Trying to get tag_id of tag \'{0}\''.format(tag_name))
|
||||
i=0
|
||||
while i<5:
|
||||
try:
|
||||
response = request('get', url='%s/%s' % (constant.TAG_URL, tag_name)).content
|
||||
except Exception as e:
|
||||
i+=1
|
||||
if not i<5:
|
||||
logger.critical(str(e))
|
||||
exit(1)
|
||||
continue
|
||||
break
|
||||
|
||||
html = BeautifulSoup(response, 'html.parser')
|
||||
first_item = html.find('div', attrs={'class': 'gallery'})
|
||||
if not first_item:
|
||||
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
|
||||
return
|
||||
for p in range(1, max_page + 1):
|
||||
logger.debug('Fetching page {0} for doujinshi with tag \'{1}\''.format(p, tag_name))
|
||||
response = request('get', url='%s/%s?page=%d' % (constant.TAG_URL, tag_name, p)).content
|
||||
|
||||
doujinshi_id = re.findall('(\d+)', first_item.a.attrs['href'])
|
||||
if not doujinshi_id:
|
||||
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
|
||||
return
|
||||
html = BeautifulSoup(response, 'html.parser')
|
||||
doujinshi_items = html.find_all('div', attrs={'class': 'gallery'})
|
||||
if not doujinshi_items:
|
||||
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
|
||||
return
|
||||
|
||||
ret = doujinshi_parser(doujinshi_id[0])
|
||||
if 'tag' in ret and tag_name in ret['tag']:
|
||||
tag_id = ret['tag'][tag_name]
|
||||
logger.info('Tag id of tag \'{0}\' is {1}'.format(tag_name, tag_id))
|
||||
else:
|
||||
logger.error('Cannot find doujinshi id of tag \'{0}\''.format(tag_name))
|
||||
return
|
||||
for i in doujinshi_items:
|
||||
doujinshi_id = i.a.attrs['href'].strip('/g')
|
||||
doujinshi_title = i.a.text.strip()
|
||||
doujinshi_title = doujinshi_title if len(doujinshi_title) < 85 else doujinshi_title[:82] + '...'
|
||||
result.append({'title': doujinshi_title, 'id': doujinshi_id})
|
||||
|
||||
return tag_id
|
||||
if not result:
|
||||
logger.warn('No results for tag \'{}\''.format(tag_name))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
@ -43,8 +43,7 @@ def generate_html(output_dir='.', doujinshi_obj=None):
|
||||
image_html = ''
|
||||
|
||||
if doujinshi_obj is not None:
|
||||
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
|
||||
doujinshi_obj.name)))
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
else:
|
||||
doujinshi_dir = '.'
|
||||
|
||||
@ -85,8 +84,7 @@ def generate_html(output_dir='.', doujinshi_obj=None):
|
||||
|
||||
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
|
||||
if doujinshi_obj is not None:
|
||||
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
|
||||
doujinshi_obj.name)))
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), '%s.cbz' % doujinshi_obj.id)
|
||||
else:
|
||||
cbz_filename = './doujinshi.cbz'
|
||||
@ -118,9 +116,12 @@ and append a file extension like '.txt', so I avoid the potential of using
|
||||
an invalid filename.
|
||||
|
||||
"""
|
||||
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
|
||||
valid_chars = "-_.()[] %s%s" % (string.ascii_letters, string.digits)
|
||||
filename = ''.join(c for c in s if c in valid_chars)
|
||||
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
|
||||
if len(filename) > 100:
|
||||
filename = filename[:100]
|
||||
filename = filename[:100] + '...]'
|
||||
|
||||
# Remove [] from filename
|
||||
filename = filename.replace('[]', '')
|
||||
return filename
|
||||
|
@ -46,17 +46,33 @@ document.getElementById('image-container').onclick = event => {
|
||||
document.onkeypress = event => {
|
||||
switch (event.key.toLowerCase()) {
|
||||
// Previous Image
|
||||
case 'arrowleft':
|
||||
case 'a':
|
||||
changePage(currentPage - 1);
|
||||
break;
|
||||
|
||||
// Next Image
|
||||
case ' ':
|
||||
case 'esc': // future close page function
|
||||
case 'enter':
|
||||
case 'arrowright':
|
||||
case 'd':
|
||||
changePage(currentPage + 1);
|
||||
break;
|
||||
}// remove arrow cause it won't work
|
||||
};
|
||||
|
||||
document.onkeydown = event =>{
|
||||
switch (event.keyCode) {
|
||||
case 37: //left
|
||||
changePage(currentPage - 1);
|
||||
break;
|
||||
case 38: //up
|
||||
changePage(currentPage - 1);
|
||||
break;
|
||||
case 39: //right
|
||||
changePage(currentPage + 1);
|
||||
break;
|
||||
case 40: //down
|
||||
changePage(currentPage + 1);
|
||||
break;
|
||||
}
|
||||
};
|
@ -3,4 +3,3 @@ BeautifulSoup4>=4.0.0
|
||||
threadpool>=1.2.7
|
||||
tabulate>=0.7.5
|
||||
future>=0.15.2
|
||||
threadpool==1.3.2
|
||||
|
Reference in New Issue
Block a user