Compare commits

...

13 Commits

Author SHA1 Message Date
f316c3243b 0.2.12 2018-04-19 17:29:23 +08:00
967e0b4ff5 fix #18 #19 use nhentai api 2018-04-19 17:21:43 +08:00
22cf2592dd 0.2.11 2018-03-16 23:48:58 +08:00
caa0753adb fix bug #13 2018-03-16 23:45:05 +08:00
0e14dd62d5 fix bug #13 2018-03-16 23:42:24 +08:00
7c9693785e fix #14 2018-03-16 23:39:04 +08:00
08ad73b683 fix bug #13 2018-03-16 23:33:16 +08:00
a56d3ca18c fix bug #13 2018-03-16 23:23:25 +08:00
c1975897d2 save downloaded doujinshi as doujinshi name #13 2018-03-16 23:16:26 +08:00
4ed596ff98 download user fav 2018-03-05 21:47:27 +08:00
debf287fb0 download user fav 2018-03-05 21:45:56 +08:00
308c5277b8 Merge pull request #12 from RomaniukVadim/master
Add install for Gentoo
2018-03-03 19:33:23 +08:00
b425c883c7 Add install for Gentoo 2018-03-02 17:18:22 +02:00
10 changed files with 194 additions and 85 deletions

View File

@ -12,38 +12,50 @@ nhentai
🎉🎉 nhentai 现在支持 Windows 啦!
由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。
### 安装
### Installation
git clone https://github.com/RicterZ/nhentai
cd nhentai
python setup.py install
### Gentoo
### 用法
+ 下载指定 id 列表的本子:
layman -fa glicOne
sudo emerge net-misc/nhentai
### Usage
下载指定 id 列表的本子:
```bash
nhentai --id=123855,123866
```
nhentai --id=123855,123866
下载某关键词第一页的本子:
```bash
nhentai --search="tomori" --page=1 --download
```
下载用户 favorites 内容:
```bash
nhentai --login "username:password" --download
```
+ 下载某关键词第一页的本子(不推荐):
nhentai --search="tomori" --page=1 --download
### Options
`-t, --thread`:指定下载的线程数,最多为 10 线程。
`--path`:指定下载文件的输出路径,默认为当前目录。
`--timeout`:指定下载图片的超时时间,默认为 30 秒。
`--proxy`:指定下载的代理,例如: http://127.0.0.1:8080/
`--login`nhentai 账号的“用户名:密码”组合
### 自建 nhentai 镜像
### nHentai Mirror
如果想用自建镜像下载 nhentai 的本子,需要搭建 nhentai.net 和 i.nhentai.net 的反向代理。
例如用 h.loli.club 来做反向代理的话,需要 h.loli.club 反代 nhentai.neti.h.loli.club 反带 i.nhentai.net。
然后利用环境变量来下载:
NHENTAI=http://h.loli.club nhentai --id 123456
```bash
NHENTAI=http://h.loli.club nhentai --id 123456
```
![](./images/search.png)
![](./images/download.png)

View File

@ -1,3 +1,3 @@
__version__ = '0.2.10'
__version__ = '0.2.12'
__author__ = 'Ricter'
__email__ = 'ricterzheng@gmail.com'

View File

@ -34,7 +34,8 @@ def cmd_parser():
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
parser.add_option('--download', dest='is_download', action='store_true', help='download doujinshi (for search result)')
parser.add_option('--download', dest='is_download', action='store_true',
help='download doujinshi (for search result)')
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
@ -49,7 +50,11 @@ def cmd_parser():
help='timeout of download doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='use proxy, example: http://127.0.0.1:1080')
parser.add_option('--html', dest='html_viewer', action='store_true', help='generate a html viewer at current directory')
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--login', '-l', type='str', dest='login', action='store',
help='username:password pair of nhentai account')
try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
@ -64,35 +69,45 @@ def cmd_parser():
generate_html()
exit(0)
if args.login:
try:
_, _ = args.login.split(':', 1)
except ValueError:
logger.error('Invalid `username:password` pair.')
exit(1)
if not args.is_download:
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
if args.tags:
logger.warning('`--tags` is under construction')
exit(0)
exit(1)
if args.id:
_ = map(lambda id: id.strip(), args.id.split(','))
args.id = set(map(int, filter(lambda id: id.isdigit(), _)))
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword:
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.login:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
exit(0)
exit(1)
if not args.keyword and not args.id:
if not args.keyword and not args.id and not args.login:
parser.print_help()
exit(0)
exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 15:
logger.critical('Maximum number of used threads is 15')
exit(0)
exit(1)
if args.proxy:
proxy_url = urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
else:
constant.PROXY = {proxy_url.scheme: args.proxy}
constant.PROXY = {'http': args.proxy, 'https': args.proxy}
return args

View File

@ -1,12 +1,11 @@
#!/usr/bin/env python2.7
# coding: utf-8
from __future__ import unicode_literals, print_function
import os
import signal
import platform
from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser
from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader
from nhentai.logger import logger
@ -22,6 +21,12 @@ def main():
doujinshi_ids = []
doujinshi_list = []
if options.login:
username, password = options.login.split(':', 1)
logger.info('Login to nhentai use credential \'%s:%s\'' % (username, '*' * len(password)))
for doujinshi_info in login_parser(username=username, password=password):
doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.keyword:
doujinshis = search_parser(options.keyword, options.page)
print_doujinshi(doujinshis)
@ -31,11 +36,9 @@ def main():
doujinshi_ids = options.id
if doujinshi_ids:
for id in doujinshi_ids:
doujinshi_info = doujinshi_parser(id)
for id_ in doujinshi_ids:
doujinshi_info = doujinshi_parser(id_)
doujinshi_list.append(Doujinshi(**doujinshi_info))
else:
exit(0)
if not options.is_show:
downloader = Downloader(path=options.output_dir,

View File

@ -5,8 +5,10 @@ from nhentai.utils import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = '%s/g' % BASE_URL
SEARCH_URL = '%s/search/' % BASE_URL
DETAIL_URL = '%s/api/gallery' % BASE_URL
SEARCH_URL = '%s/api/galleries/search' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL
u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)

View File

@ -5,6 +5,13 @@ from future.builtins import range
from nhentai.constant import DETAIL_URL, IMAGE_URL
from nhentai.logger import logger
from nhentai.utils import format_filename
EXT_MAP = {
'j': 'jpg',
'p': 'png',
}
class DoujinshiInfo(dict):
@ -19,7 +26,7 @@ class DoujinshiInfo(dict):
class Doujinshi(object):
def __init__(self, name=None, id=None, img_id=None, ext='jpg', pages=0, **kwargs):
def __init__(self, name=None, id=None, img_id=None, ext='', pages=0, **kwargs):
self.name = name
self.id = id
self.img_id = img_id
@ -49,9 +56,10 @@ class Doujinshi(object):
logger.info('Start download doujinshi: %s' % self.name)
if self.downloader:
download_queue = []
for i in range(1, self.pages + 1):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext))
self.downloader.download(download_queue, self.id)
for i in range(len(self.ext)):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200])))
else:
logger.critical('Downloader has not be loaded')

View File

@ -36,6 +36,11 @@ class Downloader(Singleton):
filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename)
try:
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)):
logger.warning('File: {0} existed, ignore.'.format(os.path.join(folder, base_filename.zfill(3) +
extension)))
return 1, url
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
response = request('get', url, stream=True, timeout=self.timeout)
if response.status_code != 200:
@ -75,7 +80,7 @@ class Downloader(Singleton):
logger.log(15, '{0} download successfully'.format(data))
def download(self, queue, folder=''):
if not isinstance(folder, (text)):
if not isinstance(folder, text):
folder = str(folder)
if self.path:

View File

@ -1,9 +1,11 @@
# coding: utf-8
from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup
import os
import re
import threadpool
import requests
from bs4 import BeautifulSoup
from tabulate import tabulate
import nhentai.constant as constant
@ -17,6 +19,66 @@ def request(method, url, **kwargs):
return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs)
def login_parser(username, password):
s = requests.Session()
s.proxies = constant.PROXY
s.verify = False
s.headers.update({'Referer': constant.LOGIN_URL})
s.get(constant.LOGIN_URL)
content = s.get(constant.LOGIN_URL).content
html = BeautifulSoup(content, 'html.parser')
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
if not csrf_token_elem:
raise Exception('Cannot find csrf token to login')
csrf_token = csrf_token_elem.attrs['value']
login_dict = {
'csrfmiddlewaretoken': csrf_token,
'username_or_email': username,
'password': password,
}
resp = s.post(constant.LOGIN_URL, data=login_dict)
if 'Invalid username (or email) or password' in resp.text:
logger.error('Login failed, please check your username and password')
exit(1)
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
logger.error('Cannot get count of your favorites, maybe login failed.')
count = int(count.text.strip('(').strip(')'))
pages = count / 25
pages += 1 if count % (25 * pages) else 0
logger.info('Your have %d favorites in %d pages.' % (count, pages))
if os.getenv('DEBUG'):
pages = 1
ret = []
doujinshi_id = re.compile('data-id="([\d]+)"')
def _callback(request, result):
ret.append(result)
thread_pool = threadpool.ThreadPool(5)
for page in range(1, pages+1):
try:
logger.info('Getting doujinshi id of page %d' % page)
resp = s.get(constant.FAV_URL + '?page=%d' % page).content
ids = doujinshi_id.findall(resp)
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
[thread_pool.putRequest(req) for req in requests_]
thread_pool.wait()
except Exception as e:
logger.error('Error: %s, continue', str(e))
return ret
def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
@ -25,49 +87,29 @@ def doujinshi_parser(id_):
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
try:
response = request('get', url).content
response = request('get', url).json()
except Exception as e:
logger.critical(str(e))
exit(1)
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
subtitle = doujinshi_info.find('h2')
doujinshi['name'] = title
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img.attrs['data-src'])
if not img_id:
logger.critical('Tried yo get image id failed')
exit(1)
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = img_id.group(2)
pages = 0
for _ in doujinshi_info.find_all('div', class_=''):
pages = re.search('([\d]+) pages', _.text)
if pages:
pages = pages.group(1)
break
doujinshi['pages'] = int(pages)
doujinshi['name'] = response['title']['english']
doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
doujinshi['pages'] = len(response['images']['pages'])
# gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
needed_fields = ['Characters', 'Artists', 'Language', 'Tags']
for field in information_fields:
field_name = field.contents[0].strip().strip(':')
if field_name in needed_fields:
data = [sub_field.contents[0].strip() for sub_field in
field.find_all('a', attrs={'class': 'tag'})]
doujinshi[field_name.lower()] = ', '.join(data)
needed_fields = ['character', 'artist', 'language']
for tag in response['tags']:
tag_type = tag['type']
if tag_type in needed_fields:
if tag_type not in doujinshi:
doujinshi[tag_type] = tag['name']
else:
doujinshi[tag_type] += tag['name']
return doujinshi
@ -76,20 +118,19 @@ def search_parser(keyword, page):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
result = []
try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
if 'result' not in response:
raise Exception('No result in response')
except requests.ConnectionError as e:
logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
exit(1)
html = BeautifulSoup(response, 'html.parser')
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
for doujinshi in doujinshi_search_result:
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip()
title = (title[:85] + '..') if len(title) > 85 else title
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
for row in response['result']:
title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title
result.append({'id': row['id'], 'title': title})
if not result:
logger.warn('Not found anything of keyword {}'.format(keyword))
@ -104,5 +145,6 @@ def print_doujinshi(doujinshi_list):
logger.info('Search Result\n' +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
if __name__ == '__main__':
print(doujinshi_parser("32271"))

View File

@ -2,6 +2,7 @@
from __future__ import unicode_literals, print_function
import os
import string
from nhentai.logger import logger
@ -33,7 +34,8 @@ def generate_html(output_dir='.', doujinshi_obj=None):
previous = ''
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, str(doujinshi_obj.id))
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
doujinshi_obj.name[:200])))
else:
doujinshi_dir = '.'
@ -66,3 +68,20 @@ def generate_html(output_dir='.', doujinshi_obj=None):
f.write(data)
logger.log(15, 'HTML Viewer has been write to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
return filename

View File

@ -1,15 +1,18 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import sys
import codecs
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
def long_description():
with codecs.open('README.md', 'r') as f:
with codecs.open('README.md', 'rb') as f:
if sys.version_info >= (3, 0, 0):
return str(f.read())
setup(