Compare commits

..

28 Commits

Author SHA1 Message Date
f316c3243b 0.2.12 2018-04-19 17:29:23 +08:00
967e0b4ff5 fix #18 #19 use nhentai api 2018-04-19 17:21:43 +08:00
22cf2592dd 0.2.11 2018-03-16 23:48:58 +08:00
caa0753adb fix bug #13 2018-03-16 23:45:05 +08:00
0e14dd62d5 fix bug #13 2018-03-16 23:42:24 +08:00
7c9693785e fix #14 2018-03-16 23:39:04 +08:00
08ad73b683 fix bug #13 2018-03-16 23:33:16 +08:00
a56d3ca18c fix bug #13 2018-03-16 23:23:25 +08:00
c1975897d2 save downloaded doujinshi as doujinshi name #13 2018-03-16 23:16:26 +08:00
4ed596ff98 download user fav 2018-03-05 21:47:27 +08:00
debf287fb0 download user fav 2018-03-05 21:45:56 +08:00
308c5277b8 Merge pull request #12 from RomaniukVadim/master
Add install for Gentoo
2018-03-03 19:33:23 +08:00
b425c883c7 Add install for Gentoo 2018-03-02 17:18:22 +02:00
7bf9507bd2 0.2.10 2018-01-09 16:05:52 +08:00
5f5245f70f fix bug 2018-01-09 16:02:16 +08:00
45fb35b950 fix bug and add --html 2018-01-01 17:44:55 +08:00
2271b83d93 0.2.8 2017-08-19 00:50:38 +08:00
0ee000edeb sort #10 2017-08-19 00:48:53 +08:00
a47359f411 tiny bug 2017-07-06 15:41:33 +08:00
48c6fadc98 add viewer image 2017-06-18 16:48:54 +08:00
dbc834ea2e 0.2.7 2017-06-18 14:25:00 +08:00
71177ff94e 0.2.6 2017-06-18 14:19:28 +08:00
d1ed9b6980 add html doujinshi viewer 2017-06-18 14:19:07 +08:00
42a09e2c1e fix timeout 2017-03-17 20:19:40 +08:00
e306d50b7e fix bug 2017-03-17 20:14:42 +08:00
043f391d04 fix https error 2016-11-23 22:45:03 +08:00
9549c5f5a2 fix bug 2016-11-23 22:35:56 +08:00
5592b30be4 do not download 404 2016-11-23 22:11:47 +08:00
15 changed files with 413 additions and 102 deletions

2
.gitignore vendored
View File

@ -4,4 +4,4 @@ build
dist/ dist/
*.egg-info *.egg-info
.python-version .python-version
.DS_Store

View File

@ -1,2 +1,3 @@
include README.md include README.md
include requirements.txt include requirements.txt
include nhentai/doujinshi.html

View File

@ -12,41 +12,54 @@ nhentai
🎉🎉 nhentai 现在支持 Windows 啦! 🎉🎉 nhentai 现在支持 Windows 啦!
由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。 由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。
### 安装
### Installation
git clone https://github.com/RicterZ/nhentai git clone https://github.com/RicterZ/nhentai
cd nhentai cd nhentai
python setup.py install python setup.py install
### Gentoo
### 用法 layman -fa glicOne
+ 下载指定 id 列表的本子: sudo emerge net-misc/nhentai
### Usage
下载指定 id 列表的本子:
```bash
nhentai --id=123855,123866
```
nhentai --id=123855,123866 下载某关键词第一页的本子:
```bash
nhentai --search="tomori" --page=1 --download
```
下载用户 favorites 内容:
```bash
nhentai --login "username:password" --download
```
+ 下载某关键词第一页的本子(不推荐): ### Options
nhentai --search="tomori" --page=1 --download
`-t, --thread`:指定下载的线程数,最多为 10 线程。 `-t, --thread`:指定下载的线程数,最多为 10 线程。
`--path`:指定下载文件的输出路径,默认为当前目录。 `--path`:指定下载文件的输出路径,默认为当前目录。
`--timeout`:指定下载图片的超时时间,默认为 30 秒。 `--timeout`:指定下载图片的超时时间,默认为 30 秒。
`--proxy`:指定下载的代理,例如: http://127.0.0.1:8080/ `--proxy`:指定下载的代理,例如: http://127.0.0.1:8080/
`--login`nhentai 账号的“用户名:密码”组合
### 自建 nhentai 镜像 ### nHentai Mirror
如果想用自建镜像下载 nhentai 的本子,需要搭建 nhentai.net 和 i.nhentai.net 的反向代理。 如果想用自建镜像下载 nhentai 的本子,需要搭建 nhentai.net 和 i.nhentai.net 的反向代理。
例如用 h.loli.club 来做反向代理的话,需要 h.loli.club 反代 nhentai.neti.h.loli.club 反带 i.nhentai.net。 例如用 h.loli.club 来做反向代理的话,需要 h.loli.club 反代 nhentai.neti.h.loli.club 反带 i.nhentai.net。
然后利用环境变量来下载: 然后利用环境变量来下载:
NHENTAI=http://h.loli.club nhentai --id 123456 ```bash
NHENTAI=http://h.loli.club nhentai --id 123456
```
![](./images/search.png) ![](./images/search.png)
![](./images/download.png) ![](./images/download.png)
![](./images/viewer.png)
### License ### License
MIT MIT

0
images/image.jpg Executable file → Normal file
View File

Before

Width:  |  Height:  |  Size: 34 KiB

After

Width:  |  Height:  |  Size: 34 KiB

BIN
images/viewer.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 311 KiB

View File

@ -1,3 +1,3 @@
__version__ = '0.2.2' __version__ = '0.2.12'
__author__ = 'Ricter' __author__ = 'Ricter'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -8,7 +8,7 @@ except ImportError:
pass pass
import nhentai.constant as constant import nhentai.constant as constant
from nhentai.utils import urlparse from nhentai.utils import urlparse, generate_html
from nhentai.logger import logger from nhentai.logger import logger
try: try:
@ -30,9 +30,13 @@ def banner():
def cmd_parser(): def cmd_parser():
parser = OptionParser() parser = OptionParser('\n nhentai --search [keyword] --download'
parser.add_option('--download', dest='is_download', action='store_true', help='download doujinshi or not') '\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information.') '\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
parser.add_option('--download', dest='is_download', action='store_true',
help='download doujinshi (for search result)')
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3') parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword') parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
parser.add_option('--page', type='int', dest='page', action='store', default=1, parser.add_option('--page', type='int', dest='page', action='store', default=1,
@ -46,6 +50,11 @@ def cmd_parser():
help='timeout of download doujinshi') help='timeout of download doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='', parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='use proxy, example: http://127.0.0.1:1080') help='use proxy, example: http://127.0.0.1:1080')
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--login', '-l', type='str', dest='login', action='store',
help='username:password pair of nhentai account')
try: try:
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv)) sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
@ -56,35 +65,49 @@ def cmd_parser():
args, _ = parser.parse_args(sys.argv[1:]) args, _ = parser.parse_args(sys.argv[1:])
if args.html_viewer:
generate_html()
exit(0)
if args.login:
try:
_, _ = args.login.split(':', 1)
except ValueError:
logger.error('Invalid `username:password` pair.')
exit(1)
if not args.is_download:
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
if args.tags: if args.tags:
logger.warning('`--tags` is under construction') logger.warning('`--tags` is under construction')
exit(0) exit(1)
if args.id: if args.id:
_ = map(lambda id: id.strip(), args.id.split(',')) _ = map(lambda id: id.strip(), args.id.split(','))
args.id = set(map(int, filter(lambda id: id.isdigit(), _))) args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
if (args.is_download or args.is_show) and not args.id and not args.keyword: if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.login:
logger.critical('Doujinshi id(s) are required for downloading') logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help() parser.print_help()
exit(0) exit(1)
if not args.keyword and not args.id: if not args.keyword and not args.id and not args.login:
parser.print_help() parser.print_help()
exit(0) exit(1)
if args.threads <= 0: if args.threads <= 0:
args.threads = 1 args.threads = 1
elif args.threads > 10: elif args.threads > 15:
logger.critical('Maximum number of used threads is 10') logger.critical('Maximum number of used threads is 15')
exit(0) exit(1)
if args.proxy: if args.proxy:
proxy_url = urlparse(args.proxy) proxy_url = urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'): if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme)) logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
else: else:
constant.PROXY = {proxy_url.scheme: args.proxy} constant.PROXY = {'http': args.proxy, 'https': args.proxy}
return args return args

View File

@ -5,11 +5,12 @@ import signal
import platform import platform
from nhentai.cmdline import cmd_parser, banner from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser
from nhentai.doujinshi import Doujinshi from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader from nhentai.downloader import Downloader
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.constant import BASE_URL from nhentai.constant import BASE_URL
from nhentai.utils import generate_html
def main(): def main():
@ -20,6 +21,12 @@ def main():
doujinshi_ids = [] doujinshi_ids = []
doujinshi_list = [] doujinshi_list = []
if options.login:
username, password = options.login.split(':', 1)
logger.info('Login to nhentai use credential \'%s:%s\'' % (username, '*' * len(password)))
for doujinshi_info in login_parser(username=username, password=password):
doujinshi_list.append(Doujinshi(**doujinshi_info))
if options.keyword: if options.keyword:
doujinshis = search_parser(options.keyword, options.page) doujinshis = search_parser(options.keyword, options.page)
print_doujinshi(doujinshis) print_doujinshi(doujinshis)
@ -29,11 +36,9 @@ def main():
doujinshi_ids = options.id doujinshi_ids = options.id
if doujinshi_ids: if doujinshi_ids:
for id in doujinshi_ids: for id_ in doujinshi_ids:
doujinshi_info = doujinshi_parser(id) doujinshi_info = doujinshi_parser(id_)
doujinshi_list.append(Doujinshi(**doujinshi_info)) doujinshi_list.append(Doujinshi(**doujinshi_info))
else:
exit(0)
if not options.is_show: if not options.is_show:
downloader = Downloader(path=options.output_dir, downloader = Downloader(path=options.output_dir,
@ -42,6 +47,7 @@ def main():
for doujinshi in doujinshi_list: for doujinshi in doujinshi_list:
doujinshi.downloader = downloader doujinshi.downloader = downloader
doujinshi.download() doujinshi.download()
generate_html(options.output_dir, doujinshi)
if not platform.system() == 'Windows': if not platform.system() == 'Windows':
logger.log(15, '🍺 All done.') logger.log(15, '🍺 All done.')

View File

@ -5,8 +5,10 @@ from nhentai.utils import urlparse
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net') BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = '%s/g' % BASE_URL DETAIL_URL = '%s/api/gallery' % BASE_URL
SEARCH_URL = '%s/search/' % BASE_URL SEARCH_URL = '%s/api/galleries/search' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL
u = urlparse(BASE_URL) u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname) IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)

126
nhentai/doujinshi.html Normal file
View File

@ -0,0 +1,126 @@
<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="UTF-8">
<title>{TITLE}</title>
<style>
html, body {{
background-color: #e8e6e6;
height: 100%;
padding: 0;
margin: 0;
overflow: hidden;
}}
.container img {{
display: block;
width: 100%;
margin: 30px 0;
padding: 10px;
cursor: pointer;
}}
.container {{
height: 100%;
overflow: scroll;
background: #e8e6e6;
width: 200px;
padding: 30px;
float: left;
}}
.image {{
margin-left: 260px;
height: 100%;
background: #222;
text-align: center;
}}
.image img {{
height: 100%;
}}
.i a {{
display: block;
position: absolute;
top: 0;
width: 50%;
height: 100%;
}}
.i {{
position: relative;
height: 100%;
}}
.current {{
background: #BBB;
border-radius: 10px;
}}
</style>
<script>
function cursorfocus(elem) {{
var container = document.getElementsByClassName('container')[0];
container.scrollTop = elem.offsetTop - 500;
}}
function getImage(type) {{
var current = document.getElementsByClassName("current")[0];
current.className = "image-item";
var img_src = type == 1 ? current.getAttribute('attr-next') : current.getAttribute('attr-prev');
if (img_src === "") {{
img_src = current.src;
}}
var img_list = document.getElementsByClassName("image-item");
for (i=0; i<img_list.length; i++) {{
if (img_list[i].src.endsWith(img_src)) {{
img_list[i].className = "image-item current";
cursorfocus(img_list[i]);
break;
}}
}}
var display = document.getElementById("dest");
display.src = img_src;
display.focus();
}}
</script>
</head>
<body>
<div class="container">
{IMAGES}</div>
<div class="image">
<div class="i">
<img src="" id="dest">
<a href="javascript:getImage(-1)" style="left: 0;"></a>
<a href="javascript:getImage(1)" style="left: 50%;"></a>
</div>
</div>
</body>
<script>
var img_list = document.getElementsByClassName("image-item");
var display = document.getElementById("dest");
display.src = img_list[0].src;
for (var i = 0; i < img_list.length; i++) {{
img_list[i].addEventListener('click', function() {{
var current = document.getElementsByClassName("current")[0];
current.className = "image-item";
this.className = "image-item current";
var display = document.getElementById("dest");
display.src = this.src;
display.focus();
}}, false);
}}
document.onkeypress = function(e) {{
if (e.keyCode == 32) {{
getImage(1);
}}
}}
</script>
</html>

View File

@ -5,6 +5,13 @@ from future.builtins import range
from nhentai.constant import DETAIL_URL, IMAGE_URL from nhentai.constant import DETAIL_URL, IMAGE_URL
from nhentai.logger import logger from nhentai.logger import logger
from nhentai.utils import format_filename
EXT_MAP = {
'j': 'jpg',
'p': 'png',
}
class DoujinshiInfo(dict): class DoujinshiInfo(dict):
@ -19,7 +26,7 @@ class DoujinshiInfo(dict):
class Doujinshi(object): class Doujinshi(object):
def __init__(self, name=None, id=None, img_id=None, ext='jpg', pages=0, **kwargs): def __init__(self, name=None, id=None, img_id=None, ext='', pages=0, **kwargs):
self.name = name self.name = name
self.id = id self.id = id
self.img_id = img_id self.img_id = img_id
@ -49,9 +56,10 @@ class Doujinshi(object):
logger.info('Start download doujinshi: %s' % self.name) logger.info('Start download doujinshi: %s' % self.name)
if self.downloader: if self.downloader:
download_queue = [] download_queue = []
for i in range(1, self.pages + 1): for i in range(len(self.ext)):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext)) download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
self.downloader.download(download_queue, self.id)
self.downloader.download(download_queue, format_filename('%s-%s' % (self.id, self.name[:200])))
else: else:
logger.critical('Downloader has not be loaded') logger.critical('Downloader has not be loaded')

View File

@ -14,48 +14,73 @@ from nhentai.parser import request
from nhentai.utils import Singleton from nhentai.utils import Singleton
requests.packages.urllib3.disable_warnings()
class NhentaiImageNotExistException(Exception):
pass
class Downloader(Singleton): class Downloader(Singleton):
def __init__(self, path='', thread=1, timeout=30): def __init__(self, path='', thread=1, timeout=30):
if not isinstance(thread, (int, )) or thread < 1 or thread > 10: if not isinstance(thread, (int, )) or thread < 1 or thread > 15:
raise ValueError('Invalid threads count') raise ValueError('Invalid threads count')
self.path = str(path) self.path = str(path)
self.thread_count = thread self.thread_count = thread
self.threads = [] self.threads = []
self.timeout = timeout self.timeout = timeout
def _download(self, url, folder='', filename='', retried=False): def _download(self, url, folder='', filename='', retried=0):
logger.info('Start downloading: {0} ...'.format(url)) logger.info('Start downloading: {0} ...'.format(url))
filename = filename if filename else os.path.basename(urlparse(url).path) filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename) base_filename, extension = os.path.splitext(filename)
try: try:
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)):
logger.warning('File: {0} existed, ignore.'.format(os.path.join(folder, base_filename.zfill(3) +
extension)))
return 1, url
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f: with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
response = request('get', url, stream=True, timeout=self.timeout) response = request('get', url, stream=True, timeout=self.timeout)
if response.status_code != 200:
raise NhentaiImageNotExistException
length = response.headers.get('content-length') length = response.headers.get('content-length')
if length is None: if length is None:
f.write(response.content) f.write(response.content)
else: else:
for chunk in response.iter_content(2048): for chunk in response.iter_content(2048):
f.write(chunk) f.write(chunk)
except requests.HTTPError as e:
if not retried: except (requests.HTTPError, requests.Timeout) as e:
logger.error('Error: {0}, retrying'.format(str(e))) if retried < 3:
return self._download(url=url, folder=folder, filename=filename, retried=True) logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried))
return 0, self._download(url=url, folder=folder, filename=filename, retried=retried+1)
else: else:
return None return 0, None
except NhentaiImageNotExistException as e:
os.remove(os.path.join(folder, base_filename.zfill(3) + extension))
return -1, url
except Exception as e: except Exception as e:
logger.critical(str(e)) logger.critical(str(e))
return None return 0, None
return url
return 1, url
def _download_callback(self, request, result): def _download_callback(self, request, result):
if not result: result, data = result
logger.critical('Too many errors occurred, quit.') if result == 0:
exit(1) logger.warning('fatal errors occurred, ignored')
logger.log(15, '{0} download successfully'.format(result)) # exit(1)
elif result == -1:
logger.warning('url {} return status code 404'.format(data))
else:
logger.log(15, '{0} download successfully'.format(data))
def download(self, queue, folder=''): def download(self, queue, folder=''):
if not isinstance(folder, (text)): if not isinstance(folder, text):
folder = str(folder) folder = str(folder)
if self.path: if self.path:
@ -66,7 +91,7 @@ class Downloader(Singleton):
try: try:
os.makedirs(folder) os.makedirs(folder)
except EnvironmentError as e: except EnvironmentError as e:
logger.critical('Error: {0}'.format(str(e))) logger.critical('{0}'.format(str(e)))
exit(1) exit(1)
else: else:
logger.warn('Path \'{0}\' already exist.'.format(folder)) logger.warn('Path \'{0}\' already exist.'.format(folder))

View File

@ -1,9 +1,11 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals, print_function from __future__ import unicode_literals, print_function
from bs4 import BeautifulSoup import os
import re import re
import threadpool
import requests import requests
from bs4 import BeautifulSoup
from tabulate import tabulate from tabulate import tabulate
import nhentai.constant as constant import nhentai.constant as constant
@ -14,7 +16,67 @@ def request(method, url, **kwargs):
if not hasattr(requests, method): if not hasattr(requests, method):
raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method)) raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method))
return requests.__dict__[method](url, proxies=constant.PROXY, **kwargs) return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs)
def login_parser(username, password):
s = requests.Session()
s.proxies = constant.PROXY
s.verify = False
s.headers.update({'Referer': constant.LOGIN_URL})
s.get(constant.LOGIN_URL)
content = s.get(constant.LOGIN_URL).content
html = BeautifulSoup(content, 'html.parser')
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
if not csrf_token_elem:
raise Exception('Cannot find csrf token to login')
csrf_token = csrf_token_elem.attrs['value']
login_dict = {
'csrfmiddlewaretoken': csrf_token,
'username_or_email': username,
'password': password,
}
resp = s.post(constant.LOGIN_URL, data=login_dict)
if 'Invalid username (or email) or password' in resp.text:
logger.error('Login failed, please check your username and password')
exit(1)
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
logger.error('Cannot get count of your favorites, maybe login failed.')
count = int(count.text.strip('(').strip(')'))
pages = count / 25
pages += 1 if count % (25 * pages) else 0
logger.info('Your have %d favorites in %d pages.' % (count, pages))
if os.getenv('DEBUG'):
pages = 1
ret = []
doujinshi_id = re.compile('data-id="([\d]+)"')
def _callback(request, result):
ret.append(result)
thread_pool = threadpool.ThreadPool(5)
for page in range(1, pages+1):
try:
logger.info('Getting doujinshi id of page %d' % page)
resp = s.get(constant.FAV_URL + '?page=%d' % page).content
ids = doujinshi_id.findall(resp)
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
[thread_pool.putRequest(req) for req in requests_]
thread_pool.wait()
except Exception as e:
logger.error('Error: %s, continue', str(e))
return ret
def doujinshi_parser(id_): def doujinshi_parser(id_):
@ -25,49 +87,29 @@ def doujinshi_parser(id_):
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_)) logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
doujinshi = dict() doujinshi = dict()
doujinshi['id'] = id_ doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_) url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
try: try:
response = request('get', url).content response = request('get', url).json()
except Exception as e: except Exception as e:
logger.critical(str(e)) logger.critical(str(e))
exit(1) exit(1)
html = BeautifulSoup(response, 'html.parser') doujinshi['name'] = response['title']['english']
doujinshi_info = html.find('div', attrs={'id': 'info'}) doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
title = doujinshi_info.find('h1').text doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
subtitle = doujinshi_info.find('h2') doujinshi['pages'] = len(response['images']['pages'])
doujinshi['name'] = title
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img['src'])
if not img_id:
logger.critical('Tried yo get image id failed')
exit(1)
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = img_id.group(2)
pages = 0
for _ in doujinshi_info.find_all('div', class_=''):
pages = re.search('([\d]+) pages', _.text)
if pages:
pages = pages.group(1)
break
doujinshi['pages'] = int(pages)
# gain information of the doujinshi # gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'}) needed_fields = ['character', 'artist', 'language']
needed_fields = ['Characters', 'Artists', 'Language', 'Tags'] for tag in response['tags']:
for field in information_fields: tag_type = tag['type']
field_name = field.contents[0].strip().strip(':') if tag_type in needed_fields:
if field_name in needed_fields: if tag_type not in doujinshi:
data = [sub_field.contents[0].strip() for sub_field in doujinshi[tag_type] = tag['name']
field.find_all('a', attrs={'class': 'tag'})] else:
doujinshi[field_name.lower()] = ', '.join(data) doujinshi[tag_type] += tag['name']
return doujinshi return doujinshi
@ -76,20 +118,19 @@ def search_parser(keyword, page):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword)) logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
result = [] result = []
try: try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
if 'result' not in response:
raise Exception('No result in response')
except requests.ConnectionError as e: except requests.ConnectionError as e:
logger.critical(e) logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.') logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
exit(1) exit(1)
html = BeautifulSoup(response, 'html.parser') for row in response['result']:
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'}) title = row['title']['english']
for doujinshi in doujinshi_search_result: title = title[:85] + '..' if len(title) > 85 else title
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'}) result.append({'id': row['id'], 'title': title})
title = doujinshi_container.text.strip()
title = (title[:85] + '..') if len(title) > 85 else title
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
if not result: if not result:
logger.warn('Not found anything of keyword {}'.format(keyword)) logger.warn('Not found anything of keyword {}'.format(keyword))
@ -104,5 +145,6 @@ def print_doujinshi(doujinshi_list):
logger.info('Search Result\n' + logger.info('Search Result\n' +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
if __name__ == '__main__': if __name__ == '__main__':
print(doujinshi_parser("32271")) print(doujinshi_parser("32271"))

View File

@ -1,6 +1,10 @@
# coding: utf-8 # coding: utf-8
from __future__ import unicode_literals, print_function from __future__ import unicode_literals, print_function
import os
import string
from nhentai.logger import logger
class _Singleton(type): class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """ """ A metaclass that creates a Singleton base class when called. """
@ -23,3 +27,61 @@ def urlparse(url):
from urllib.parse import urlparse from urllib.parse import urlparse
return urlparse(url) return urlparse(url)
def generate_html(output_dir='.', doujinshi_obj=None):
image_html = ''
previous = ''
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
doujinshi_obj.name[:200])))
else:
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
for index, image in enumerate(file_list):
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
continue
try:
next_ = file_list[file_list.index(image) + 1]
except IndexError:
next_ = ''
image_html += '<img src="{0}" class="image-item {1}" attr-prev="{2}" attr-next="{3}">\n'\
.format(image, 'current' if index == 0 else '', previous, next_)
previous = image
with open(os.path.join(os.path.dirname(__file__), 'doujinshi.html'), 'r') as template:
html = template.read()
if doujinshi_obj is not None:
title = doujinshi_obj.name
else:
title = 'nHentai HTML Viewer'
data = html.format(TITLE=title, IMAGES=image_html)
with open(os.path.join(doujinshi_dir, 'index.html'), 'w') as f:
f.write(data)
logger.log(15, 'HTML Viewer has been write to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
return filename

View File

@ -1,15 +1,18 @@
# coding: utf-8 # coding: utf-8
from __future__ import print_function, unicode_literals from __future__ import print_function, unicode_literals
import sys
import codecs import codecs
from setuptools import setup, find_packages from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__ from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f: with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l] requirements = [l for l in f.read().splitlines() if l]
def long_description(): def long_description():
with codecs.open('README.md', 'rb') as f: with codecs.open('README.md', 'rb') as f:
if sys.version_info >= (3, 0, 0):
return str(f.read()) return str(f.read())
setup( setup(