mirror of
https://github.com/RicterZ/nhentai.git
synced 2025-04-20 11:01:17 +02:00
Merge branch 'master' of github.com:RicterZ/nhentai
This commit is contained in:
commit
de157ccb7f
@ -1,3 +1,5 @@
|
|||||||
include README.md
|
include README.md
|
||||||
include requirements.txt
|
include requirements.txt
|
||||||
include nhentai/doujinshi.html
|
include nhentai/viewer/index.html
|
||||||
|
include nhentai/viewer/styles.css
|
||||||
|
include nhentai/viewer/scripts.js
|
||||||
|
136
README.md
136
README.md
@ -1,68 +1,70 @@
|
|||||||
nhentai
|
nhentai
|
||||||
=======
|
=======
|
||||||
_ _ _ _
|
_ _ _ _
|
||||||
_ __ | | | | ___ _ __ | |_ __ _(_)
|
_ __ | | | | ___ _ __ | |_ __ _(_)
|
||||||
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
||||||
| | | | _ | __/ | | | || (_| | |
|
| | | | _ | __/ | | | || (_| | |
|
||||||
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
||||||
|
|
||||||
あなたも変態。 いいね?
|
あなたも変態。 いいね?
|
||||||
[](https://travis-ci.org/RicterZ/nhentai)
|
[](https://travis-ci.org/RicterZ/nhentai)
|
||||||
|
|
||||||
🎉🎉 nhentai 现在支持 Windows 啦!
|
🎉🎉 nhentai 现在支持 Windows 啦!
|
||||||
|
|
||||||
由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。
|
由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。
|
||||||
|
|
||||||
### Installation
|
### Installation
|
||||||
|
|
||||||
git clone https://github.com/RicterZ/nhentai
|
git clone https://github.com/RicterZ/nhentai
|
||||||
cd nhentai
|
cd nhentai
|
||||||
python setup.py install
|
python setup.py install
|
||||||
|
|
||||||
### Gentoo
|
### Gentoo
|
||||||
|
|
||||||
layman -fa glicOne
|
layman -fa glicOne
|
||||||
sudo emerge net-misc/nhentai
|
sudo emerge net-misc/nhentai
|
||||||
|
|
||||||
### Usage
|
### Usage
|
||||||
下载指定 id 列表的本子:
|
下载指定 id 列表的本子:
|
||||||
```bash
|
```bash
|
||||||
nhentai --id=123855,123866
|
nhentai --id=123855,123866
|
||||||
```
|
```
|
||||||
|
|
||||||
下载某关键词第一页的本子:
|
下载某关键词第一页的本子:
|
||||||
```bash
|
```bash
|
||||||
nhentai --search="tomori" --page=1 --download
|
nhentai --search="tomori" --page=1 --download
|
||||||
```
|
```
|
||||||
|
|
||||||
下载用户 favorites 内容:
|
下载用户 favorites 内容:
|
||||||
```bash
|
```bash
|
||||||
nhentai --login "username:password" --download
|
nhentai --login "username:password" --download
|
||||||
```
|
```
|
||||||
|
|
||||||
### Options
|
### Options
|
||||||
|
|
||||||
`-t, --thread`:指定下载的线程数,最多为 10 线程。
|
`-t, --thread`:指定下载的线程数,最多为 10 线程。
|
||||||
`--path`:指定下载文件的输出路径,默认为当前目录。
|
`--path`:指定下载文件的输出路径,默认为当前目录。
|
||||||
`--timeout`:指定下载图片的超时时间,默认为 30 秒。
|
`--timeout`:指定下载图片的超时时间,默认为 30 秒。
|
||||||
`--proxy`:指定下载的代理,例如: http://127.0.0.1:8080/
|
`--proxy`:指定下载的代理,例如: http://127.0.0.1:8080/
|
||||||
`--login`:nhentai 账号的“用户名:密码”组合
|
`--login`:nhentai 账号的“用户名:密码”组合
|
||||||
|
`--nohtml`:nhentai Don't generate HTML
|
||||||
### nHentai Mirror
|
`--cbz`:nhentai Generate Comic Book CBZ file
|
||||||
如果想用自建镜像下载 nhentai 的本子,需要搭建 nhentai.net 和 i.nhentai.net 的反向代理。
|
|
||||||
例如用 h.loli.club 来做反向代理的话,需要 h.loli.club 反代 nhentai.net,i.h.loli.club 反带 i.nhentai.net。
|
### nHentai Mirror
|
||||||
然后利用环境变量来下载:
|
如果想用自建镜像下载 nhentai 的本子,需要搭建 nhentai.net 和 i.nhentai.net 的反向代理。
|
||||||
|
例如用 h.loli.club 来做反向代理的话,需要 h.loli.club 反代 nhentai.net,i.h.loli.club 反带 i.nhentai.net。
|
||||||
```bash
|
然后利用环境变量来下载:
|
||||||
NHENTAI=http://h.loli.club nhentai --id 123456
|
|
||||||
```
|
```bash
|
||||||
|
NHENTAI=http://h.loli.club nhentai --id 123456
|
||||||

|
```
|
||||||

|
|
||||||

|

|
||||||
|

|
||||||
### License
|

|
||||||
MIT
|
|
||||||
|
### License
|
||||||
### あなたも変態
|
MIT
|
||||||
|
|
||||||
|
### あなたも変態
|
||||||

|

|
@ -1,3 +1,3 @@
|
|||||||
__version__ = '0.2.12'
|
__version__ = '0.2.14'
|
||||||
__author__ = 'Ricter'
|
__author__ = 'RicterZ'
|
||||||
__email__ = 'ricterzheng@gmail.com'
|
__email__ = 'ricterzheng@gmail.com'
|
||||||
|
@ -1,113 +1,120 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import print_function
|
from __future__ import print_function
|
||||||
import sys
|
import sys
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
try:
|
from nhentai import __version__
|
||||||
from itertools import ifilter as filter
|
try:
|
||||||
except ImportError:
|
from itertools import ifilter as filter
|
||||||
pass
|
except ImportError:
|
||||||
|
pass
|
||||||
import nhentai.constant as constant
|
|
||||||
from nhentai.utils import urlparse, generate_html
|
import nhentai.constant as constant
|
||||||
from nhentai.logger import logger
|
from nhentai.utils import urlparse, generate_html
|
||||||
|
from nhentai.logger import logger
|
||||||
try:
|
|
||||||
reload(sys)
|
try:
|
||||||
sys.setdefaultencoding(sys.stdin.encoding)
|
reload(sys)
|
||||||
except NameError:
|
sys.setdefaultencoding(sys.stdin.encoding)
|
||||||
# python3
|
except NameError:
|
||||||
pass
|
# python3
|
||||||
|
pass
|
||||||
|
|
||||||
def banner():
|
|
||||||
logger.info(u'''nHentai: あなたも変態。 いいね?
|
def banner():
|
||||||
_ _ _ _
|
logger.info(u'''nHentai ver %s: あなたも変態。 いいね?
|
||||||
_ __ | | | | ___ _ __ | |_ __ _(_)
|
_ _ _ _
|
||||||
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
_ __ | | | | ___ _ __ | |_ __ _(_)
|
||||||
| | | | _ | __/ | | | || (_| | |
|
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
||||||
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
| | | | _ | __/ | | | || (_| | |
|
||||||
''')
|
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
||||||
|
''' % __version__)
|
||||||
|
|
||||||
def cmd_parser():
|
|
||||||
parser = OptionParser('\n nhentai --search [keyword] --download'
|
def cmd_parser():
|
||||||
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
|
parser = OptionParser('\n nhentai --search [keyword] --download'
|
||||||
'\n\nEnvironment Variable:\n'
|
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
|
||||||
' NHENTAI nhentai mirror url')
|
'\n\nEnvironment Variable:\n'
|
||||||
parser.add_option('--download', dest='is_download', action='store_true',
|
' NHENTAI nhentai mirror url')
|
||||||
help='download doujinshi (for search result)')
|
parser.add_option('--download', dest='is_download', action='store_true',
|
||||||
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
|
help='download doujinshi (for search result)')
|
||||||
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
|
parser.add_option('--show-info', dest='is_show', action='store_true', help='just show the doujinshi information')
|
||||||
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
|
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
|
||||||
parser.add_option('--page', type='int', dest='page', action='store', default=1,
|
parser.add_option('--search', type='string', dest='keyword', action='store', help='search doujinshi by keyword')
|
||||||
help='page number of search result')
|
parser.add_option('--page', type='int', dest='page', action='store', default=1,
|
||||||
parser.add_option('--tags', type='string', dest='tags', action='store', help='download doujinshi by tags')
|
help='page number of search result')
|
||||||
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
|
parser.add_option('--tags', type='string', dest='tags', action='store', help='download doujinshi by tags')
|
||||||
help='output dir')
|
parser.add_option('--output', type='string', dest='output_dir', action='store', default='',
|
||||||
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
|
help='output dir')
|
||||||
help='thread count of download doujinshi')
|
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
|
||||||
parser.add_option('--timeout', type='int', dest='timeout', action='store', default=30,
|
help='thread count of download doujinshi')
|
||||||
help='timeout of download doujinshi')
|
parser.add_option('--timeout', type='int', dest='timeout', action='store', default=30,
|
||||||
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
|
help='timeout of download doujinshi')
|
||||||
help='use proxy, example: http://127.0.0.1:1080')
|
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
|
||||||
parser.add_option('--html', dest='html_viewer', action='store_true',
|
help='use proxy, example: http://127.0.0.1:1080')
|
||||||
help='generate a html viewer at current directory')
|
parser.add_option('--html', dest='html_viewer', action='store_true',
|
||||||
|
help='generate a html viewer at current directory')
|
||||||
parser.add_option('--login', '-l', type='str', dest='login', action='store',
|
|
||||||
help='username:password pair of nhentai account')
|
parser.add_option('--login', '-l', type='str', dest='login', action='store',
|
||||||
|
help='username:password pair of nhentai account')
|
||||||
try:
|
|
||||||
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
|
parser.add_option('--nohtml', dest='is_nohtml', action='store_true',
|
||||||
except (NameError, TypeError):
|
help='Don\'t generate HTML')
|
||||||
pass
|
|
||||||
except UnicodeDecodeError:
|
parser.add_option('--cbz', dest='is_cbz', action='store_true',
|
||||||
exit(0)
|
help='Generate Comic Book CBZ File')
|
||||||
|
|
||||||
args, _ = parser.parse_args(sys.argv[1:])
|
try:
|
||||||
|
sys.argv = list(map(lambda x: unicode(x.decode(sys.stdin.encoding)), sys.argv))
|
||||||
if args.html_viewer:
|
except (NameError, TypeError):
|
||||||
generate_html()
|
pass
|
||||||
exit(0)
|
except UnicodeDecodeError:
|
||||||
|
exit(0)
|
||||||
if args.login:
|
|
||||||
try:
|
args, _ = parser.parse_args(sys.argv[1:])
|
||||||
_, _ = args.login.split(':', 1)
|
|
||||||
except ValueError:
|
if args.html_viewer:
|
||||||
logger.error('Invalid `username:password` pair.')
|
generate_html()
|
||||||
exit(1)
|
exit(0)
|
||||||
|
|
||||||
if not args.is_download:
|
if args.login:
|
||||||
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
|
try:
|
||||||
|
_, _ = args.login.split(':', 1)
|
||||||
if args.tags:
|
except ValueError:
|
||||||
logger.warning('`--tags` is under construction')
|
logger.error('Invalid `username:password` pair.')
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if args.id:
|
if not args.is_download:
|
||||||
_ = map(lambda id: id.strip(), args.id.split(','))
|
logger.warning('YOU DO NOT SPECIFY `--download` OPTION !!!')
|
||||||
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
|
|
||||||
|
if args.tags:
|
||||||
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.login:
|
logger.warning('`--tags` is under construction')
|
||||||
logger.critical('Doujinshi id(s) are required for downloading')
|
exit(1)
|
||||||
parser.print_help()
|
|
||||||
exit(1)
|
if args.id:
|
||||||
|
_ = map(lambda id: id.strip(), args.id.split(','))
|
||||||
if not args.keyword and not args.id and not args.login:
|
args.id = set(map(int, filter(lambda id_: id_.isdigit(), _)))
|
||||||
parser.print_help()
|
|
||||||
exit(1)
|
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.login:
|
||||||
|
logger.critical('Doujinshi id(s) are required for downloading')
|
||||||
if args.threads <= 0:
|
parser.print_help()
|
||||||
args.threads = 1
|
exit(1)
|
||||||
|
|
||||||
elif args.threads > 15:
|
if not args.keyword and not args.id and not args.login:
|
||||||
logger.critical('Maximum number of used threads is 15')
|
parser.print_help()
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
if args.proxy:
|
if args.threads <= 0:
|
||||||
proxy_url = urlparse(args.proxy)
|
args.threads = 1
|
||||||
if proxy_url.scheme not in ('http', 'https'):
|
|
||||||
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
|
elif args.threads > 15:
|
||||||
else:
|
logger.critical('Maximum number of used threads is 15')
|
||||||
constant.PROXY = {'http': args.proxy, 'https': args.proxy}
|
exit(1)
|
||||||
|
|
||||||
return args
|
if args.proxy:
|
||||||
|
proxy_url = urlparse(args.proxy)
|
||||||
|
if proxy_url.scheme not in ('http', 'https'):
|
||||||
|
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
|
||||||
|
else:
|
||||||
|
constant.PROXY = {'http': args.proxy, 'https': args.proxy}
|
||||||
|
|
||||||
|
return args
|
||||||
|
@ -1,69 +1,72 @@
|
|||||||
#!/usr/bin/env python2.7
|
#!/usr/bin/env python2.7
|
||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals, print_function
|
from __future__ import unicode_literals, print_function
|
||||||
import signal
|
import signal
|
||||||
import platform
|
import platform
|
||||||
|
|
||||||
from nhentai.cmdline import cmd_parser, banner
|
from nhentai.cmdline import cmd_parser, banner
|
||||||
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser
|
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, login_parser
|
||||||
from nhentai.doujinshi import Doujinshi
|
from nhentai.doujinshi import Doujinshi
|
||||||
from nhentai.downloader import Downloader
|
from nhentai.downloader import Downloader
|
||||||
from nhentai.logger import logger
|
from nhentai.logger import logger
|
||||||
from nhentai.constant import BASE_URL
|
from nhentai.constant import BASE_URL
|
||||||
from nhentai.utils import generate_html
|
from nhentai.utils import generate_html, generate_cbz
|
||||||
|
|
||||||
|
|
||||||
def main():
|
def main():
|
||||||
banner()
|
banner()
|
||||||
logger.info('Using mirror: {0}'.format(BASE_URL))
|
logger.info('Using mirror: {0}'.format(BASE_URL))
|
||||||
options = cmd_parser()
|
options = cmd_parser()
|
||||||
|
|
||||||
doujinshi_ids = []
|
doujinshi_ids = []
|
||||||
doujinshi_list = []
|
doujinshi_list = []
|
||||||
|
|
||||||
if options.login:
|
if options.login:
|
||||||
username, password = options.login.split(':', 1)
|
username, password = options.login.split(':', 1)
|
||||||
logger.info('Login to nhentai use credential \'%s:%s\'' % (username, '*' * len(password)))
|
logger.info('Login to nhentai use credential \'%s:%s\'' % (username, '*' * len(password)))
|
||||||
for doujinshi_info in login_parser(username=username, password=password):
|
for doujinshi_info in login_parser(username=username, password=password):
|
||||||
doujinshi_list.append(Doujinshi(**doujinshi_info))
|
doujinshi_list.append(Doujinshi(**doujinshi_info))
|
||||||
|
|
||||||
if options.keyword:
|
if options.keyword:
|
||||||
doujinshis = search_parser(options.keyword, options.page)
|
doujinshis = search_parser(options.keyword, options.page)
|
||||||
print_doujinshi(doujinshis)
|
print_doujinshi(doujinshis)
|
||||||
if options.is_download:
|
if options.is_download:
|
||||||
doujinshi_ids = map(lambda d: d['id'], doujinshis)
|
doujinshi_ids = map(lambda d: d['id'], doujinshis)
|
||||||
else:
|
else:
|
||||||
doujinshi_ids = options.id
|
doujinshi_ids = options.id
|
||||||
|
|
||||||
if doujinshi_ids:
|
if doujinshi_ids:
|
||||||
for id_ in doujinshi_ids:
|
for id_ in doujinshi_ids:
|
||||||
doujinshi_info = doujinshi_parser(id_)
|
doujinshi_info = doujinshi_parser(id_)
|
||||||
doujinshi_list.append(Doujinshi(**doujinshi_info))
|
doujinshi_list.append(Doujinshi(**doujinshi_info))
|
||||||
|
|
||||||
if not options.is_show:
|
if not options.is_show:
|
||||||
downloader = Downloader(path=options.output_dir,
|
downloader = Downloader(path=options.output_dir,
|
||||||
thread=options.threads, timeout=options.timeout)
|
thread=options.threads, timeout=options.timeout)
|
||||||
|
|
||||||
for doujinshi in doujinshi_list:
|
for doujinshi in doujinshi_list:
|
||||||
doujinshi.downloader = downloader
|
doujinshi.downloader = downloader
|
||||||
doujinshi.download()
|
doujinshi.download()
|
||||||
generate_html(options.output_dir, doujinshi)
|
if not options.is_nohtml and not options.is_cbz:
|
||||||
|
generate_html(options.output_dir, doujinshi)
|
||||||
if not platform.system() == 'Windows':
|
elif options.is_cbz:
|
||||||
logger.log(15, '🍺 All done.')
|
generate_cbz(options.output_dir, doujinshi)
|
||||||
else:
|
|
||||||
logger.log(15, 'All done.')
|
if not platform.system() == 'Windows':
|
||||||
|
logger.log(15, '🍻 All done.')
|
||||||
else:
|
else:
|
||||||
[doujinshi.show() for doujinshi in doujinshi_list]
|
logger.log(15, 'All done.')
|
||||||
|
|
||||||
|
else:
|
||||||
def signal_handler(signal, frame):
|
[doujinshi.show() for doujinshi in doujinshi_list]
|
||||||
logger.error('Ctrl-C signal received. Quit.')
|
|
||||||
exit(1)
|
|
||||||
|
def signal_handler(signal, frame):
|
||||||
|
logger.error('Ctrl-C signal received. Quit.')
|
||||||
signal.signal(signal.SIGINT, signal_handler)
|
exit(1)
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
main()
|
signal.signal(signal.SIGINT, signal_handler)
|
||||||
|
|
||||||
|
if __name__ == '__main__':
|
||||||
|
main()
|
||||||
|
@ -1,126 +0,0 @@
|
|||||||
<!DOCTYPE html>
|
|
||||||
<html lang="en">
|
|
||||||
<head>
|
|
||||||
<meta charset="UTF-8">
|
|
||||||
<title>{TITLE}</title>
|
|
||||||
<style>
|
|
||||||
html, body {{
|
|
||||||
background-color: #e8e6e6;
|
|
||||||
height: 100%;
|
|
||||||
padding: 0;
|
|
||||||
margin: 0;
|
|
||||||
overflow: hidden;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.container img {{
|
|
||||||
display: block;
|
|
||||||
width: 100%;
|
|
||||||
margin: 30px 0;
|
|
||||||
padding: 10px;
|
|
||||||
cursor: pointer;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.container {{
|
|
||||||
height: 100%;
|
|
||||||
overflow: scroll;
|
|
||||||
background: #e8e6e6;
|
|
||||||
width: 200px;
|
|
||||||
padding: 30px;
|
|
||||||
float: left;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.image {{
|
|
||||||
margin-left: 260px;
|
|
||||||
height: 100%;
|
|
||||||
background: #222;
|
|
||||||
text-align: center;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.image img {{
|
|
||||||
height: 100%;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.i a {{
|
|
||||||
display: block;
|
|
||||||
position: absolute;
|
|
||||||
top: 0;
|
|
||||||
width: 50%;
|
|
||||||
height: 100%;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.i {{
|
|
||||||
position: relative;
|
|
||||||
height: 100%;
|
|
||||||
}}
|
|
||||||
|
|
||||||
.current {{
|
|
||||||
background: #BBB;
|
|
||||||
border-radius: 10px;
|
|
||||||
}}
|
|
||||||
</style>
|
|
||||||
|
|
||||||
<script>
|
|
||||||
function cursorfocus(elem) {{
|
|
||||||
var container = document.getElementsByClassName('container')[0];
|
|
||||||
container.scrollTop = elem.offsetTop - 500;
|
|
||||||
}}
|
|
||||||
|
|
||||||
function getImage(type) {{
|
|
||||||
var current = document.getElementsByClassName("current")[0];
|
|
||||||
current.className = "image-item";
|
|
||||||
var img_src = type == 1 ? current.getAttribute('attr-next') : current.getAttribute('attr-prev');
|
|
||||||
if (img_src === "") {{
|
|
||||||
img_src = current.src;
|
|
||||||
}}
|
|
||||||
|
|
||||||
var img_list = document.getElementsByClassName("image-item");
|
|
||||||
for (i=0; i<img_list.length; i++) {{
|
|
||||||
if (img_list[i].src.endsWith(img_src)) {{
|
|
||||||
img_list[i].className = "image-item current";
|
|
||||||
cursorfocus(img_list[i]);
|
|
||||||
break;
|
|
||||||
}}
|
|
||||||
}}
|
|
||||||
var display = document.getElementById("dest");
|
|
||||||
display.src = img_src;
|
|
||||||
display.focus();
|
|
||||||
}}
|
|
||||||
</script>
|
|
||||||
</head>
|
|
||||||
<body>
|
|
||||||
|
|
||||||
<div class="container">
|
|
||||||
{IMAGES}</div>
|
|
||||||
<div class="image">
|
|
||||||
<div class="i">
|
|
||||||
<img src="" id="dest">
|
|
||||||
<a href="javascript:getImage(-1)" style="left: 0;"></a>
|
|
||||||
<a href="javascript:getImage(1)" style="left: 50%;"></a>
|
|
||||||
</div>
|
|
||||||
</div>
|
|
||||||
</body>
|
|
||||||
|
|
||||||
<script>
|
|
||||||
var img_list = document.getElementsByClassName("image-item");
|
|
||||||
|
|
||||||
var display = document.getElementById("dest");
|
|
||||||
display.src = img_list[0].src;
|
|
||||||
|
|
||||||
for (var i = 0; i < img_list.length; i++) {{
|
|
||||||
img_list[i].addEventListener('click', function() {{
|
|
||||||
var current = document.getElementsByClassName("current")[0];
|
|
||||||
current.className = "image-item";
|
|
||||||
this.className = "image-item current";
|
|
||||||
var display = document.getElementById("dest");
|
|
||||||
display.src = this.src;
|
|
||||||
display.focus();
|
|
||||||
}}, false);
|
|
||||||
}}
|
|
||||||
|
|
||||||
document.onkeypress = function(e) {{
|
|
||||||
if (e.keyCode == 32) {{
|
|
||||||
getImage(1);
|
|
||||||
}}
|
|
||||||
}}
|
|
||||||
</script>
|
|
||||||
</html>
|
|
@ -1,158 +1,158 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals, print_function
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import re
|
import re
|
||||||
import threadpool
|
import threadpool
|
||||||
import requests
|
import requests
|
||||||
from bs4 import BeautifulSoup
|
from bs4 import BeautifulSoup
|
||||||
from tabulate import tabulate
|
from tabulate import tabulate
|
||||||
|
|
||||||
import nhentai.constant as constant
|
import nhentai.constant as constant
|
||||||
from nhentai.logger import logger
|
from nhentai.logger import logger
|
||||||
|
|
||||||
|
|
||||||
def request(method, url, **kwargs):
|
def request(method, url, **kwargs):
|
||||||
if not hasattr(requests, method):
|
if not hasattr(requests, method):
|
||||||
raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method))
|
raise AttributeError('\'requests\' object has no attribute \'{0}\''.format(method))
|
||||||
|
|
||||||
return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs)
|
return requests.__dict__[method](url, proxies=constant.PROXY, verify=False, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
def login_parser(username, password):
|
def login_parser(username, password):
|
||||||
s = requests.Session()
|
s = requests.Session()
|
||||||
s.proxies = constant.PROXY
|
s.proxies = constant.PROXY
|
||||||
s.verify = False
|
s.verify = False
|
||||||
s.headers.update({'Referer': constant.LOGIN_URL})
|
s.headers.update({'Referer': constant.LOGIN_URL})
|
||||||
|
|
||||||
s.get(constant.LOGIN_URL)
|
s.get(constant.LOGIN_URL)
|
||||||
content = s.get(constant.LOGIN_URL).content
|
content = s.get(constant.LOGIN_URL).content
|
||||||
html = BeautifulSoup(content, 'html.parser')
|
html = BeautifulSoup(content, 'html.parser').encode("ascii")
|
||||||
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
|
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
|
||||||
|
|
||||||
if not csrf_token_elem:
|
if not csrf_token_elem:
|
||||||
raise Exception('Cannot find csrf token to login')
|
raise Exception('Cannot find csrf token to login')
|
||||||
csrf_token = csrf_token_elem.attrs['value']
|
csrf_token = csrf_token_elem.attrs['value']
|
||||||
|
|
||||||
login_dict = {
|
login_dict = {
|
||||||
'csrfmiddlewaretoken': csrf_token,
|
'csrfmiddlewaretoken': csrf_token,
|
||||||
'username_or_email': username,
|
'username_or_email': username,
|
||||||
'password': password,
|
'password': password,
|
||||||
}
|
}
|
||||||
resp = s.post(constant.LOGIN_URL, data=login_dict)
|
resp = s.post(constant.LOGIN_URL, data=login_dict)
|
||||||
if 'Invalid username (or email) or password' in resp.text:
|
if 'Invalid username (or email) or password' in resp.text:
|
||||||
logger.error('Login failed, please check your username and password')
|
logger.error('Login failed, please check your username and password')
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser')
|
html = BeautifulSoup(s.get(constant.FAV_URL).content, 'html.parser').encode("ascii")
|
||||||
count = html.find('span', attrs={'class': 'count'})
|
count = html.find('span', attrs={'class': 'count'})
|
||||||
if not count:
|
if not count:
|
||||||
logger.error('Cannot get count of your favorites, maybe login failed.')
|
logger.error('Cannot get count of your favorites, maybe login failed.')
|
||||||
|
|
||||||
count = int(count.text.strip('(').strip(')'))
|
count = int(count.text.strip('(').strip(')'))
|
||||||
if count == 0:
|
if count == 0:
|
||||||
logger.warning('No favorites found')
|
logger.warning('No favorites found')
|
||||||
return []
|
return []
|
||||||
pages = int(count / 25)
|
pages = int(count / 25)
|
||||||
|
|
||||||
if pages:
|
if pages:
|
||||||
pages += 1 if count % (25 * pages) else 0
|
pages += 1 if count % (25 * pages) else 0
|
||||||
else:
|
else:
|
||||||
pages = 1
|
pages = 1
|
||||||
|
|
||||||
logger.info('Your have %d favorites in %d pages.' % (count, pages))
|
logger.info('Your have %d favorites in %d pages.' % (count, pages))
|
||||||
|
|
||||||
if os.getenv('DEBUG'):
|
if os.getenv('DEBUG'):
|
||||||
pages = 1
|
pages = 1
|
||||||
|
|
||||||
ret = []
|
ret = []
|
||||||
doujinshi_id = re.compile('data-id="([\d]+)"')
|
doujinshi_id = re.compile('data-id="([\d]+)"')
|
||||||
|
|
||||||
def _callback(request, result):
|
def _callback(request, result):
|
||||||
ret.append(result)
|
ret.append(result)
|
||||||
|
|
||||||
thread_pool = threadpool.ThreadPool(5)
|
thread_pool = threadpool.ThreadPool(5)
|
||||||
|
|
||||||
for page in range(1, pages+1):
|
for page in range(1, pages+1):
|
||||||
try:
|
try:
|
||||||
logger.info('Getting doujinshi id of page %d' % page)
|
logger.info('Getting doujinshi id of page %d' % page)
|
||||||
resp = s.get(constant.FAV_URL + '?page=%d' % page).text
|
resp = s.get(constant.FAV_URL + '?page=%d' % page).text
|
||||||
ids = doujinshi_id.findall(resp)
|
ids = doujinshi_id.findall(resp)
|
||||||
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
|
requests_ = threadpool.makeRequests(doujinshi_parser, ids, _callback)
|
||||||
[thread_pool.putRequest(req) for req in requests_]
|
[thread_pool.putRequest(req) for req in requests_]
|
||||||
thread_pool.wait()
|
thread_pool.wait()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error('Error: %s, continue', str(e))
|
logger.error('Error: %s, continue', str(e))
|
||||||
|
|
||||||
return ret
|
return ret
|
||||||
|
|
||||||
|
|
||||||
def doujinshi_parser(id_):
|
def doujinshi_parser(id_):
|
||||||
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
||||||
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
||||||
|
|
||||||
id_ = int(id_)
|
id_ = int(id_)
|
||||||
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
|
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
|
||||||
doujinshi = dict()
|
doujinshi = dict()
|
||||||
doujinshi['id'] = id_
|
doujinshi['id'] = id_
|
||||||
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
|
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
response = request('get', url).json()
|
response = request('get', url).json()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.critical(str(e))
|
logger.critical(str(e))
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
doujinshi['name'] = response['title']['english']
|
doujinshi['name'] = response['title']['english']
|
||||||
doujinshi['subtitle'] = response['title']['japanese']
|
doujinshi['subtitle'] = response['title']['japanese']
|
||||||
doujinshi['img_id'] = response['media_id']
|
doujinshi['img_id'] = response['media_id']
|
||||||
doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
|
doujinshi['ext'] = ''.join(map(lambda s: s['t'], response['images']['pages']))
|
||||||
doujinshi['pages'] = len(response['images']['pages'])
|
doujinshi['pages'] = len(response['images']['pages'])
|
||||||
|
|
||||||
# gain information of the doujinshi
|
# gain information of the doujinshi
|
||||||
needed_fields = ['character', 'artist', 'language']
|
needed_fields = ['character', 'artist', 'language']
|
||||||
for tag in response['tags']:
|
for tag in response['tags']:
|
||||||
tag_type = tag['type']
|
tag_type = tag['type']
|
||||||
if tag_type in needed_fields:
|
if tag_type in needed_fields:
|
||||||
if tag_type not in doujinshi:
|
if tag_type not in doujinshi:
|
||||||
doujinshi[tag_type] = tag['name']
|
doujinshi[tag_type] = tag['name']
|
||||||
else:
|
else:
|
||||||
doujinshi[tag_type] += tag['name']
|
doujinshi[tag_type] += tag['name']
|
||||||
|
|
||||||
return doujinshi
|
return doujinshi
|
||||||
|
|
||||||
|
|
||||||
def search_parser(keyword, page):
|
def search_parser(keyword, page):
|
||||||
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
|
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
|
||||||
result = []
|
result = []
|
||||||
try:
|
try:
|
||||||
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
|
response = request('get', url=constant.SEARCH_URL, params={'query': keyword, 'page': page}).json()
|
||||||
if 'result' not in response:
|
if 'result' not in response:
|
||||||
raise Exception('No result in response')
|
raise Exception('No result in response')
|
||||||
except requests.ConnectionError as e:
|
except requests.ConnectionError as e:
|
||||||
logger.critical(e)
|
logger.critical(e)
|
||||||
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
|
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
|
||||||
exit(1)
|
exit(1)
|
||||||
|
|
||||||
for row in response['result']:
|
for row in response['result']:
|
||||||
title = row['title']['english']
|
title = row['title']['english']
|
||||||
title = title[:85] + '..' if len(title) > 85 else title
|
title = title[:85] + '..' if len(title) > 85 else title
|
||||||
result.append({'id': row['id'], 'title': title})
|
result.append({'id': row['id'], 'title': title})
|
||||||
|
|
||||||
if not result:
|
if not result:
|
||||||
logger.warn('Not found anything of keyword {}'.format(keyword))
|
logger.warn('Not found anything of keyword {}'.format(keyword))
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def print_doujinshi(doujinshi_list):
|
def print_doujinshi(doujinshi_list):
|
||||||
if not doujinshi_list:
|
if not doujinshi_list:
|
||||||
return
|
return
|
||||||
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
|
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
|
||||||
headers = ['id', 'doujinshi']
|
headers = ['id', 'doujinshi']
|
||||||
logger.info('Search Result\n' +
|
logger.info('Search Result\n' +
|
||||||
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
print(doujinshi_parser("32271"))
|
print(doujinshi_parser("32271"))
|
||||||
|
202
nhentai/utils.py
202
nhentai/utils.py
@ -1,87 +1,115 @@
|
|||||||
# coding: utf-8
|
# coding: utf-8
|
||||||
from __future__ import unicode_literals, print_function
|
from __future__ import unicode_literals, print_function
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import string
|
import string
|
||||||
from nhentai.logger import logger
|
import zipfile
|
||||||
|
import shutil
|
||||||
|
from nhentai.logger import logger
|
||||||
class _Singleton(type):
|
|
||||||
""" A metaclass that creates a Singleton base class when called. """
|
|
||||||
_instances = {}
|
class _Singleton(type):
|
||||||
|
""" A metaclass that creates a Singleton base class when called. """
|
||||||
def __call__(cls, *args, **kwargs):
|
_instances = {}
|
||||||
if cls not in cls._instances:
|
|
||||||
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
|
def __call__(cls, *args, **kwargs):
|
||||||
return cls._instances[cls]
|
if cls not in cls._instances:
|
||||||
|
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
|
||||||
|
return cls._instances[cls]
|
||||||
class Singleton(_Singleton(str('SingletonMeta'), (object,), {})):
|
|
||||||
pass
|
|
||||||
|
class Singleton(_Singleton(str('SingletonMeta'), (object,), {})):
|
||||||
|
pass
|
||||||
def urlparse(url):
|
|
||||||
try:
|
|
||||||
from urlparse import urlparse
|
def urlparse(url):
|
||||||
except ImportError:
|
try:
|
||||||
from urllib.parse import urlparse
|
from urlparse import urlparse
|
||||||
|
except ImportError:
|
||||||
return urlparse(url)
|
from urllib.parse import urlparse
|
||||||
|
|
||||||
|
return urlparse(url)
|
||||||
def generate_html(output_dir='.', doujinshi_obj=None):
|
|
||||||
image_html = ''
|
def readfile(path):
|
||||||
previous = ''
|
loc = os.path.dirname(__file__)
|
||||||
|
|
||||||
if doujinshi_obj is not None:
|
with open(os.path.join(loc, path), 'r') as file:
|
||||||
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
|
return file.read()
|
||||||
doujinshi_obj.name[:200])))
|
|
||||||
else:
|
def generate_html(output_dir='.', doujinshi_obj=None):
|
||||||
doujinshi_dir = '.'
|
image_html = ''
|
||||||
|
|
||||||
file_list = os.listdir(doujinshi_dir)
|
if doujinshi_obj is not None:
|
||||||
file_list.sort()
|
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
|
||||||
|
str(doujinshi_obj.name[:200]))))
|
||||||
for index, image in enumerate(file_list):
|
else:
|
||||||
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
|
doujinshi_dir = '.'
|
||||||
continue
|
|
||||||
|
file_list = os.listdir(doujinshi_dir)
|
||||||
try:
|
file_list.sort()
|
||||||
next_ = file_list[file_list.index(image) + 1]
|
|
||||||
except IndexError:
|
for image in file_list:
|
||||||
next_ = ''
|
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
|
||||||
|
continue
|
||||||
image_html += '<img src="{0}" class="image-item {1}" attr-prev="{2}" attr-next="{3}">\n'\
|
|
||||||
.format(image, 'current' if index == 0 else '', previous, next_)
|
image_html += '<img src="{0}" class="image-item"/>\n'\
|
||||||
previous = image
|
.format(image)
|
||||||
|
|
||||||
with open(os.path.join(os.path.dirname(__file__), 'doujinshi.html'), 'r') as template:
|
html = readfile('viewer/index.html')
|
||||||
html = template.read()
|
css = readfile('viewer/styles.css')
|
||||||
|
js = readfile('viewer/scripts.js')
|
||||||
if doujinshi_obj is not None:
|
|
||||||
title = doujinshi_obj.name
|
if doujinshi_obj is not None:
|
||||||
else:
|
title = doujinshi_obj.name
|
||||||
title = 'nHentai HTML Viewer'
|
else:
|
||||||
|
title = 'nHentai HTML Viewer'
|
||||||
data = html.format(TITLE=title, IMAGES=image_html)
|
|
||||||
with open(os.path.join(doujinshi_dir, 'index.html'), 'w') as f:
|
data = html.format(TITLE=title, IMAGES=image_html, SCRIPTS=js, STYLES=css)
|
||||||
f.write(data)
|
with open(os.path.join(doujinshi_dir, 'index.html'), 'w') as f:
|
||||||
|
f.write(data)
|
||||||
logger.log(15, 'HTML Viewer has been write to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
|
|
||||||
|
logger.log(15, 'HTML Viewer has been write to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
|
||||||
|
|
||||||
def format_filename(s):
|
|
||||||
"""Take a string and return a valid filename constructed from the string.
|
def generate_cbz(output_dir='.', doujinshi_obj=None):
|
||||||
Uses a whitelist approach: any characters not present in valid_chars are
|
if doujinshi_obj is not None:
|
||||||
removed. Also spaces are replaced with underscores.
|
doujinshi_dir = os.path.join(output_dir, format_filename('%s-%s' % (doujinshi_obj.id,
|
||||||
|
str(doujinshi_obj.name[:200]))))
|
||||||
Note: this method may produce invalid filenames such as ``, `.` or `..`
|
cbz_filename = os.path.join(output_dir, format_filename('%s-%s.cbz' % (doujinshi_obj.id,
|
||||||
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
|
str(doujinshi_obj.name[:200]))))
|
||||||
and append a file extension like '.txt', so I avoid the potential of using
|
else:
|
||||||
an invalid filename.
|
cbz_filename = './doujinshi.cbz'
|
||||||
|
doujinshi_dir = '.'
|
||||||
"""
|
|
||||||
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
|
file_list = os.listdir(doujinshi_dir)
|
||||||
filename = ''.join(c for c in s if c in valid_chars)
|
file_list.sort()
|
||||||
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
|
|
||||||
return filename
|
with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
|
||||||
|
for image in file_list:
|
||||||
|
image_path = os.path.join(doujinshi_dir, image)
|
||||||
|
cbz_pf.write(image_path, image)
|
||||||
|
|
||||||
|
shutil.rmtree(doujinshi_dir, ignore_errors=True)
|
||||||
|
logger.log(15, 'Comic Book CBZ file has been write to \'{0}\''.format(doujinshi_dir))
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
def format_filename(s):
|
||||||
|
"""Take a string and return a valid filename constructed from the string.
|
||||||
|
Uses a whitelist approach: any characters not present in valid_chars are
|
||||||
|
removed. Also spaces are replaced with underscores.
|
||||||
|
|
||||||
|
Note: this method may produce invalid filenames such as ``, `.` or `..`
|
||||||
|
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
|
||||||
|
and append a file extension like '.txt', so I avoid the potential of using
|
||||||
|
an invalid filename.
|
||||||
|
|
||||||
|
"""
|
||||||
|
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
|
||||||
|
filename = ''.join(c for c in s if c in valid_chars)
|
||||||
|
filename = filename.replace(' ', '_') # I don't like spaces in filenames.
|
||||||
|
return filename
|
||||||
|
24
nhentai/viewer/index.html
Normal file
24
nhentai/viewer/index.html
Normal file
@ -0,0 +1,24 @@
|
|||||||
|
<!DOCTYPE html>
|
||||||
|
<html>
|
||||||
|
<head>
|
||||||
|
<meta charset="UTF-8">
|
||||||
|
<title>{TITLE}</title>
|
||||||
|
<style>
|
||||||
|
{STYLES}
|
||||||
|
</style>
|
||||||
|
</head>
|
||||||
|
<body>
|
||||||
|
|
||||||
|
<nav id="list">
|
||||||
|
{IMAGES}</nav>
|
||||||
|
|
||||||
|
<div id="image-container">
|
||||||
|
<span id="page-num"></span>
|
||||||
|
<div id="dest"></div>
|
||||||
|
</div>
|
||||||
|
|
||||||
|
<script>
|
||||||
|
{SCRIPTS}
|
||||||
|
</script>
|
||||||
|
</body>
|
||||||
|
</html>
|
62
nhentai/viewer/scripts.js
Normal file
62
nhentai/viewer/scripts.js
Normal file
@ -0,0 +1,62 @@
|
|||||||
|
const pages = Array.from(document.querySelectorAll('img.image-item'));
|
||||||
|
let currentPage = 0;
|
||||||
|
|
||||||
|
function changePage(pageNum) {
|
||||||
|
const previous = pages[currentPage];
|
||||||
|
const current = pages[pageNum];
|
||||||
|
|
||||||
|
if (current == null) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
previous.classList.remove('current');
|
||||||
|
current.classList.add('current');
|
||||||
|
|
||||||
|
currentPage = pageNum;
|
||||||
|
|
||||||
|
const display = document.getElementById('dest');
|
||||||
|
display.style.backgroundImage = `url("${current.src}")`;
|
||||||
|
|
||||||
|
document.getElementById('page-num')
|
||||||
|
.innerText = [
|
||||||
|
(pageNum + 1).toLocaleString(),
|
||||||
|
pages.length.toLocaleString()
|
||||||
|
].join('\u200a/\u200a');
|
||||||
|
}
|
||||||
|
|
||||||
|
changePage(0);
|
||||||
|
|
||||||
|
document.getElementById('list').onclick = event => {
|
||||||
|
if (pages.includes(event.target)) {
|
||||||
|
changePage(pages.indexOf(event.target));
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
document.getElementById('image-container').onclick = event => {
|
||||||
|
const width = document.getElementById('image-container').clientWidth;
|
||||||
|
const clickPos = event.clientX / width;
|
||||||
|
|
||||||
|
if (clickPos < 0.5) {
|
||||||
|
changePage(currentPage - 1);
|
||||||
|
} else {
|
||||||
|
changePage(currentPage + 1);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
document.onkeypress = event => {
|
||||||
|
switch (event.key.toLowerCase()) {
|
||||||
|
// Previous Image
|
||||||
|
case 'arrowleft':
|
||||||
|
case 'a':
|
||||||
|
changePage(currentPage - 1);
|
||||||
|
break;
|
||||||
|
|
||||||
|
// Next Image
|
||||||
|
case ' ':
|
||||||
|
case 'enter':
|
||||||
|
case 'arrowright':
|
||||||
|
case 'd':
|
||||||
|
changePage(currentPage + 1);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
};
|
69
nhentai/viewer/styles.css
Normal file
69
nhentai/viewer/styles.css
Normal file
@ -0,0 +1,69 @@
|
|||||||
|
*, *::after, *::before {
|
||||||
|
box-sizing: border-box;
|
||||||
|
}
|
||||||
|
|
||||||
|
img {
|
||||||
|
vertical-align: middle;
|
||||||
|
}
|
||||||
|
|
||||||
|
html, body {
|
||||||
|
display: flex;
|
||||||
|
background-color: #e8e6e6;
|
||||||
|
height: 100%;
|
||||||
|
width: 100%;
|
||||||
|
padding: 0;
|
||||||
|
margin: 0;
|
||||||
|
font-family: sans-serif;
|
||||||
|
}
|
||||||
|
|
||||||
|
#list {
|
||||||
|
height: 100%;
|
||||||
|
overflow: auto;
|
||||||
|
width: 260px;
|
||||||
|
text-align: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
#list img {
|
||||||
|
width: 200px;
|
||||||
|
padding: 10px;
|
||||||
|
border-radius: 10px;
|
||||||
|
margin: 15px 0;
|
||||||
|
cursor: pointer;
|
||||||
|
}
|
||||||
|
|
||||||
|
#list img.current {
|
||||||
|
background: #0003;
|
||||||
|
}
|
||||||
|
|
||||||
|
#image-container {
|
||||||
|
flex: auto;
|
||||||
|
height: 100vh;
|
||||||
|
background: #222;
|
||||||
|
color: #fff;
|
||||||
|
text-align: center;
|
||||||
|
cursor: pointer;
|
||||||
|
-webkit-user-select: none;
|
||||||
|
user-select: none;
|
||||||
|
position: relative;
|
||||||
|
}
|
||||||
|
|
||||||
|
#image-container #dest {
|
||||||
|
height: 100%;
|
||||||
|
width: 100%;
|
||||||
|
background-size: contain;
|
||||||
|
background-repeat: no-repeat;
|
||||||
|
background-position: center;
|
||||||
|
}
|
||||||
|
|
||||||
|
#image-container #page-num {
|
||||||
|
position: absolute;
|
||||||
|
font-size: 18pt;
|
||||||
|
left: 10px;
|
||||||
|
bottom: 5px;
|
||||||
|
font-weight: bold;
|
||||||
|
opacity: 0.75;
|
||||||
|
text-shadow: /* Duplicate the same shadow to make it very strong */
|
||||||
|
0 0 2px #222,
|
||||||
|
0 0 2px #222,
|
||||||
|
0 0 2px #222;
|
||||||
|
}
|
Loading…
x
Reference in New Issue
Block a user