Compare commits
195 Commits
Author | SHA1 | Date | |
---|---|---|---|
744a9e4418 | |||
c3e9fff491 | |||
a84e2c5714 | |||
c814c35c50 | |||
e2f71437e2 | |||
2fa45ae4df | |||
17bc33c6cb | |||
09bb8460f6 | |||
eb5b93d654 | |||
cb6cf6df1a | |||
98a66a3cb0 | |||
02d47632cf | |||
f932b1fbbe | |||
fd9e92f9d4 | |||
a8a48c6ce7 | |||
f6e9d08fc7 | |||
9c1c2ea069 | |||
984ae4262c | |||
cbf9448ed9 | |||
16bac45f02 | |||
7fa9193112 | |||
a05a308e71 | |||
5a29eaf775 | |||
497eb6fe50 | |||
4bfe104714 | |||
12364e980c | |||
b51e812449 | |||
0ed5fa1931 | |||
7f655b0f10 | |||
dec3f44542 | |||
40072a8483 | |||
f97469259d | |||
ec608cc741 | |||
30e2814fe2 | |||
da298e1fe7 | |||
51d43ddde0 | |||
c734881fc7 | |||
8d5803a45e | |||
b441085b45 | |||
132b26f8c4 | |||
a0dc952fd3 | |||
2bd862777b | |||
35c55503fa | |||
29aac84d53 | |||
4ed4523782 | |||
4223326c13 | |||
a248ff98c4 | |||
021f17d229 | |||
4162eabe93 | |||
c75e9efb21 | |||
f2dec5c2a3 | |||
845a0d5659 | |||
03d85c4e5d | |||
dc54a43610 | |||
4ecffaff55 | |||
457f12d40d | |||
499081a9cd | |||
53aa04af1e | |||
473f948565 | |||
f701485840 | |||
d8e4f50609 | |||
a893f54da1 | |||
4e307911ce | |||
f9b7f828a5 | |||
092df9e539 | |||
8d74866abf | |||
bc5b7f982d | |||
e54f3cbd06 | |||
a31c615259 | |||
cf0b76204d | |||
17402623c4 | |||
a1a310f06b | |||
57673da762 | |||
dab61291cb | |||
67cb88dbbd | |||
9ed4e04241 | |||
0b0f9bd7e8 | |||
f1cc63a591 | |||
aa77cb1c7c | |||
f9878d080b | |||
f534b0b47f | |||
6b675fd9ba | |||
458c68d5e6 | |||
2eed0a7463 | |||
fc507d246a | |||
3ed84c5a67 | |||
61f4a43081 | |||
4179947f16 | |||
9f55223e28 | |||
b56e5b63a9 | |||
6dc1e0ef5a | |||
fefdd3858a | |||
f66653c55e | |||
179852a343 | |||
8972026456 | |||
cbff6496c3 | |||
5a08981e89 | |||
6c5b83d5be | |||
3de4159a39 | |||
c66fa5f816 | |||
66d0d91eae | |||
0aa8e1d358 | |||
0f54762229 | |||
93c3a77a57 | |||
f411b7cfea | |||
ed1686bb9c | |||
f44b9e9911 | |||
1d20a82e3d | |||
e3a6d67560 | |||
c7c3572811 | |||
421e8bce64 | |||
25e0d80024 | |||
a10510b12d | |||
2c20d19621 | |||
c4313e59f1 | |||
c06f3225a3 | |||
1fac55137a | |||
22412eb904 | |||
8ccfedbfc8 | |||
483bef2207 | |||
730daec1ab | |||
5778d7a6e5 | |||
c48a25bd4e | |||
f5c4bf4dd1 | |||
9f17ee3f6e | |||
290f03d05e | |||
fe443a4229 | |||
2fe5536950 | |||
7a7f2559ff | |||
444efcbee5 | |||
08d812c614 | |||
cb691c782c | |||
927d5b1b39 | |||
a8566482aa | |||
8c900a833d | |||
466fa4c094 | |||
2adf8ccc9d | |||
06fdf0dade | |||
a609243794 | |||
e89c2c0860 | |||
e08b0659e5 | |||
221ff6b32c | |||
bc6ef0cf5d | |||
c8c63cbc11 | |||
a63856d076 | |||
aa4986189f | |||
0fb81599dc | |||
e9f9651d07 | |||
1860b5f0cf | |||
eff4f3bf9b | |||
501840172e | |||
e5ed6d098a | |||
98606202fb | |||
5a3f1009c9 | |||
61945a6e97 | |||
443fcdc7da | |||
31b95fe2dd | |||
be8c97f8d4 | |||
348e51676e | |||
ea356a1ca2 | |||
5a4dfb8a76 | |||
4b15744ceb | |||
b05fa16286 | |||
0879486881 | |||
c66ba730d3 | |||
606c5e0ffd | |||
ba04f81a6f | |||
6519e6f221 | |||
7594625d72 | |||
4948c8f0c5 | |||
e22a99fa8c | |||
19a1d5c404 | |||
ad1e876611 | |||
1de7e1f998 | |||
b97e707817 | |||
6ef2189bfe | |||
24be2d37d4 | |||
d9d2a6fb91 | |||
bd38294bb7 | |||
2cf4e6718e | |||
8cd4b948e7 | |||
f884384eb3 | |||
87afab46c4 | |||
c7b1d7e6a8 | |||
ad02371158 | |||
7c9d55e0ee | |||
00aad774ae | |||
373086b459 | |||
3a83f99771 | |||
00627ab36a | |||
592e163891 | |||
84523475b0 | |||
5f5461c902 | |||
05e6ceb3cd | |||
db59426503 |
10
.dockerignore
Normal file
@ -0,0 +1,10 @@
|
||||
.git
|
||||
.gitignore
|
||||
venv
|
||||
*.egg-info
|
||||
build
|
||||
dist
|
||||
images
|
||||
LICENSE
|
||||
.travis.yml
|
||||
.idea
|
27
.github/workflows/docker-image.yml
vendored
Normal file
@ -0,0 +1,27 @@
|
||||
name: Docker Image CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ "master" ]
|
||||
|
||||
jobs:
|
||||
|
||||
build:
|
||||
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
-
|
||||
name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build the Docker image
|
||||
uses: docker/build-push-action@v4
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ricterz/nhentai:latest
|
2
.gitignore
vendored
@ -7,3 +7,5 @@ dist/
|
||||
.DS_Store
|
||||
output/
|
||||
venv/
|
||||
.vscode/
|
||||
test-output
|
19
.travis.yml
@ -1,19 +0,0 @@
|
||||
os:
|
||||
- linux
|
||||
|
||||
language: python
|
||||
python:
|
||||
- 3.7
|
||||
- 3.8
|
||||
|
||||
install:
|
||||
- python setup.py install
|
||||
|
||||
script:
|
||||
- echo 268642 > /tmp/test.txt
|
||||
- nhentai --cookie "_ga=GA1.2.1651446371.1545407218; __cfduid=d0ed34dfb81167d2a51a1d6392c1768a81601380350; csrftoken=KRN0GR1ft86m3HTefpQA99pp6R1Bo7hUs5QxNGOAIuwB5g4EcJj04fwMB8QKgLaB; sessionid=7hzoowox78c90wi5ud5ibphm4axcck7c"
|
||||
- nhentai --search umaru
|
||||
- nhentai --id=152503,146134 -t 10 --output=/tmp/ --cbz
|
||||
- nhentai -F
|
||||
- nhentai --file /tmp/test.txt
|
||||
- nhentai --id=152503,146134 --gen-main --output=/tmp/
|
11
Dockerfile
Normal file
@ -0,0 +1,11 @@
|
||||
FROM python:3
|
||||
|
||||
WORKDIR /usr/src/nhentai
|
||||
COPY requirements.txt ./
|
||||
RUN pip install --no-cache-dir -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN python setup.py install
|
||||
|
||||
WORKDIR /output
|
||||
ENTRYPOINT ["nhentai"]
|
@ -1,4 +1,5 @@
|
||||
include README.md
|
||||
include README.rst
|
||||
include requirements.txt
|
||||
include nhentai/viewer/*
|
||||
include nhentai/viewer/default/*
|
||||
include nhentai/viewer/minimal/*
|
||||
|
170
README.rst
@ -1,77 +1,108 @@
|
||||
nhentai
|
||||
=======
|
||||
|
||||
.. code-block::
|
||||
|
||||
_ _ _ _
|
||||
_ __ | | | | ___ _ __ | |_ __ _(_)
|
||||
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
||||
| | | | _ | __/ | | | || (_| | |
|
||||
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
||||
|
||||
|
||||
あなたも変態。 いいね?
|
||||
|
||||
|travis|
|
||||
|pypi|
|
||||
|version|
|
||||
|license|
|
||||
|
||||
|
||||
nHentai is a CLI tool for downloading doujinshi from <http://nhentai.net>
|
||||
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
|
||||
|
||||
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
|
||||
|
||||
===================
|
||||
Manual Installation
|
||||
===================
|
||||
From Github:
|
||||
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/RicterZ/nhentai
|
||||
cd nhentai
|
||||
python setup.py install
|
||||
|
||||
Build Docker container:
|
||||
|
||||
.. code-block::
|
||||
|
||||
git clone https://github.com/RicterZ/nhentai
|
||||
cd nhentai
|
||||
docker build -t nhentai:latest .
|
||||
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai nhentai --id 123855
|
||||
|
||||
==================
|
||||
Installation (pip)
|
||||
Installation
|
||||
==================
|
||||
Alternatively, install from PyPI with pip:
|
||||
From PyPI with pip:
|
||||
|
||||
.. code-block::
|
||||
|
||||
pip install nhentai
|
||||
|
||||
For a self-contained installation, use `Pipx <https://github.com/pipxproject/pipx/>`_:
|
||||
For a self-contained installation, use `pipx <https://github.com/pipxproject/pipx/>`_:
|
||||
|
||||
.. code-block::
|
||||
|
||||
pipx install nhentai
|
||||
|
||||
=====================
|
||||
Installation (Gentoo)
|
||||
=====================
|
||||
Pull from Dockerhub:
|
||||
|
||||
.. code-block::
|
||||
|
||||
docker pull ricterz/nhentai
|
||||
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai ricterz/nhentai --id 123855
|
||||
|
||||
On Gentoo Linux:
|
||||
|
||||
.. code-block::
|
||||
|
||||
layman -fa glicOne
|
||||
sudo emerge net-misc/nhentai
|
||||
|
||||
On NixOS:
|
||||
|
||||
.. code-block::
|
||||
|
||||
nix-env -iA nixos.nhentai
|
||||
|
||||
=====
|
||||
Usage
|
||||
=====
|
||||
**IMPORTANT**: To bypass the nhentai frequency limit, you should use `--cookie` option to store your cookie.
|
||||
|
||||
*The default download folder will be the path where you run the command (CLI path).*
|
||||
|
||||
|
||||
Set your nhentai cookie against captcha:
|
||||
**⚠️IMPORTANT⚠️**: To bypass the nhentai frequency limit, you should use `--cookie` and `--useragent` options to store your cookie and your user-agent.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
nhentai --useragent "USER AGENT of YOUR BROWSER"
|
||||
nhentai --cookie "YOUR COOKIE FROM nhentai.net"
|
||||
|
||||
**NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"`
|
||||
**NOTE:**
|
||||
|
||||
- The format of the cookie is `"csrftoken=TOKEN; sessionid=ID; cf_clearance=CLOUDFLARE"`
|
||||
- `cf_clearance` cookie and useragent must be set if you encounter "blocked by cloudflare captcha" error. Make sure you use the same IP and useragent as when you got it
|
||||
|
||||
| To get csrftoken and sessionid, first login to your nhentai account in web browser, then:
|
||||
| (Chrome) |ve| |ld| More tools |ld| Developer tools |ld| Application |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
||||
| (Firefox) |hv| |ld| Web Developer |ld| Web Developer Tools |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
||||
|
|
||||
|
||||
.. |hv| unicode:: U+2630 .. https://www.compart.com/en/unicode/U+2630
|
||||
.. |ve| unicode:: U+22EE .. https://www.compart.com/en/unicode/U+22EE
|
||||
.. |ld| unicode:: U+2014 .. https://www.compart.com/en/unicode/U+2014
|
||||
|
||||
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/usage.png
|
||||
:alt: nhentai
|
||||
:align: center
|
||||
|
||||
*The default download folder will be the path where you run the command (%cd% or $PWD).*
|
||||
|
||||
Download specified doujinshi:
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
nhentai --id=123855,123866
|
||||
nhentai --id 123855 123866 123877
|
||||
|
||||
Download doujinshi with ids specified in a file (doujinshi ids split by line):
|
||||
|
||||
@ -112,30 +143,39 @@ Supported doujinshi folder formatter:
|
||||
- %t: Doujinshi name
|
||||
- %s: Doujinshi subtitle (translated name)
|
||||
- %a: Doujinshi authors' name
|
||||
- %g: Doujinshi groups name
|
||||
- %p: Doujinshi pretty name
|
||||
- %ag: Doujinshi authors name or groups name
|
||||
|
||||
|
||||
Other options:
|
||||
|
||||
.. code-block::
|
||||
|
||||
Usage:
|
||||
nhentai --search [keyword] --download
|
||||
NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]
|
||||
nhentai --file [filename]
|
||||
|
||||
Environment Variable:
|
||||
NHENTAI nhentai mirror url
|
||||
|
||||
Options:
|
||||
# Operation options
|
||||
-h, --help show this help message and exit
|
||||
-D, --download download doujinshi (for search results)
|
||||
-S, --show just show the doujinshi information
|
||||
|
||||
# Doujinshi options
|
||||
--id=ID doujinshi ids set, e.g. 1,2,3
|
||||
--id doujinshi ids set, e.g. 167680 167681 167682
|
||||
-s KEYWORD, --search=KEYWORD
|
||||
search doujinshi by keyword
|
||||
--tag=TAG download doujinshi by tag
|
||||
-F, --favorites list or download your favorites.
|
||||
|
||||
# Multi-page options
|
||||
--page=PAGE page number of search results
|
||||
--max-page=MAX_PAGE The max page when recursive download tagged doujinshi
|
||||
|
||||
# Download options
|
||||
-F, --favorites list or download your favorites
|
||||
-a ARTIST, --artist=ARTIST
|
||||
list doujinshi by artist name
|
||||
--page-all all search results
|
||||
--page=PAGE, --page-range=PAGE
|
||||
page number of search results. e.g. 1,2-5,14
|
||||
--sorting=SORTING, --sort=SORTING
|
||||
sorting of doujinshi (recent / popular /
|
||||
popular-[today|week])
|
||||
-o OUTPUT_DIR, --output=OUTPUT_DIR
|
||||
output dir
|
||||
-t THREADS, --threads=THREADS
|
||||
@ -144,23 +184,35 @@ Other options:
|
||||
timeout for downloading doujinshi
|
||||
-d DELAY, --delay=DELAY
|
||||
slow down between downloading every doujinshi
|
||||
-p PROXY, --proxy=PROXY
|
||||
uses a proxy, for example: http://127.0.0.1:1080
|
||||
--proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080"
|
||||
-f FILE, --file=FILE read gallery IDs from file.
|
||||
--format=NAME_FORMAT format the saved folder name
|
||||
|
||||
# Generating options
|
||||
--dry-run Dry run, skip file download
|
||||
--html generate a html viewer at current directory
|
||||
--no-html don't generate HTML after downloading
|
||||
--gen-main generate a main viewer contain all the doujin in the folder
|
||||
--gen-main generate a main viewer contain all the doujin in the
|
||||
folder
|
||||
-C, --cbz generate Comic Book CBZ File
|
||||
-P --pdf generate PDF file
|
||||
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ
|
||||
or PDF file.
|
||||
|
||||
# nHentai options
|
||||
--cookie=COOKIE set cookie of nhentai to bypass Google recaptcha
|
||||
|
||||
-P, --pdf generate PDF file
|
||||
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
|
||||
PDF file
|
||||
--move-to-folder remove files in doujinshi dir then move new file to
|
||||
folder when generated CBZ or PDF file
|
||||
--meta generate a metadata file in doujinshi format
|
||||
--regenerate regenerate the cbz or pdf file if exists
|
||||
--cookie=COOKIE set cookie of nhentai to bypass Cloudflare captcha
|
||||
--useragent=USERAGENT, --user-agent=USERAGENT
|
||||
set useragent to bypass Cloudflare captcha
|
||||
--language=LANGUAGE set default language to parse doujinshis
|
||||
--clean-language set DEFAULT as language to parse doujinshis
|
||||
--save-download-history
|
||||
save downloaded doujinshis, whose will be skipped if
|
||||
you re-download them
|
||||
--clean-download-history
|
||||
clean download history
|
||||
--template=VIEWER_TEMPLATE
|
||||
set viewer template
|
||||
--legacy use legacy searching method
|
||||
|
||||
==============
|
||||
nHentai Mirror
|
||||
@ -171,33 +223,28 @@ For example:
|
||||
.. code-block::
|
||||
|
||||
i.h.loli.club -> i.nhentai.net
|
||||
i3.h.loli.club -> i3.nhentai.net
|
||||
i5.h.loli.club -> i5.nhentai.net
|
||||
i7.h.loli.club -> i7.nhentai.net
|
||||
h.loli.club -> nhentai.net
|
||||
|
||||
Set `NHENTAI` env var to your nhentai mirror.
|
||||
|
||||
.. code-block:: bash
|
||||
|
||||
NHENTAI=http://h.loli.club nhentai --id 123456
|
||||
NHENTAI=https://h.loli.club nhentai --id 123456
|
||||
|
||||
|
||||
.. image:: ./images/search.png?raw=true
|
||||
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/search.png
|
||||
:alt: nhentai
|
||||
:align: center
|
||||
.. image:: ./images/download.png?raw=true
|
||||
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/download.png
|
||||
:alt: nhentai
|
||||
:align: center
|
||||
.. image:: ./images/viewer.png?raw=true
|
||||
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/viewer.png
|
||||
:alt: nhentai
|
||||
:align: center
|
||||
|
||||
============
|
||||
あなたも変態
|
||||
============
|
||||
.. image:: ./images/image.jpg?raw=true
|
||||
:alt: nhentai
|
||||
:align: center
|
||||
|
||||
|
||||
|
||||
.. |travis| image:: https://travis-ci.org/RicterZ/nhentai.svg?branch=master
|
||||
:target: https://travis-ci.org/RicterZ/nhentai
|
||||
@ -205,5 +252,8 @@ Set `NHENTAI` env var to your nhentai mirror.
|
||||
.. |pypi| image:: https://img.shields.io/pypi/dm/nhentai.svg
|
||||
:target: https://pypi.org/project/nhentai/
|
||||
|
||||
.. |version| image:: https://img.shields.io/pypi/v/nhentai
|
||||
:target: https://pypi.org/project/nhentai/
|
||||
|
||||
.. |license| image:: https://img.shields.io/github/license/ricterz/nhentai.svg
|
||||
:target: https://github.com/RicterZ/nhentai/blob/master/LICENSE
|
||||
|
@ -1,5 +0,0 @@
|
||||
184212
|
||||
204944
|
||||
222460
|
||||
244502
|
||||
261909
|
Before Width: | Height: | Size: 189 KiB After Width: | Height: | Size: 1.0 MiB |
BIN
images/image.jpg
Before Width: | Height: | Size: 34 KiB |
Before Width: | Height: | Size: 173 KiB After Width: | Height: | Size: 991 KiB |
BIN
images/usage.png
Normal file
After Width: | Height: | Size: 679 KiB |
Before Width: | Height: | Size: 311 KiB After Width: | Height: | Size: 1.9 MiB |
@ -1,3 +1,3 @@
|
||||
__version__ = '0.4.14'
|
||||
__version__ = '0.5.10'
|
||||
__author__ = 'RicterZ'
|
||||
__email__ = 'ricterzheng@gmail.com'
|
||||
|
@ -3,27 +3,18 @@
|
||||
import os
|
||||
import sys
|
||||
import json
|
||||
import nhentai.constant as constant
|
||||
|
||||
from urllib.parse import urlparse
|
||||
from optparse import OptionParser
|
||||
|
||||
try:
|
||||
from itertools import ifilter as filter
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
import nhentai.constant as constant
|
||||
from nhentai import __version__
|
||||
from nhentai.utils import urlparse, generate_html, generate_main_html, DB
|
||||
from nhentai.utils import generate_html, generate_main_html, DB
|
||||
from nhentai.logger import logger
|
||||
|
||||
|
||||
def banner():
|
||||
logger.info(u'''nHentai ver %s: あなたも変態。 いいね?
|
||||
_ _ _ _
|
||||
_ __ | | | | ___ _ __ | |_ __ _(_)
|
||||
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
|
||||
| | | | _ | __/ | | | || (_| | |
|
||||
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
|
||||
''' % __version__)
|
||||
logger.debug(f'nHentai ver {__version__}: あなたも変態。 いいね?')
|
||||
|
||||
|
||||
def load_config():
|
||||
@ -46,11 +37,27 @@ def write_config():
|
||||
f.write(json.dumps(constant.CONFIG))
|
||||
|
||||
|
||||
def callback(option, opt_str, value, parser):
|
||||
if option == '--id':
|
||||
pass
|
||||
value = []
|
||||
|
||||
for arg in parser.rargs:
|
||||
if arg.isdigit():
|
||||
value.append(int(arg))
|
||||
elif arg.startswith('-'):
|
||||
break
|
||||
else:
|
||||
logger.warning(f'Ignore invalid id {arg}')
|
||||
|
||||
setattr(parser.values, option.dest, value)
|
||||
|
||||
|
||||
def cmd_parser():
|
||||
load_config()
|
||||
|
||||
parser = OptionParser('\n nhentai --search [keyword] --download'
|
||||
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
|
||||
'\n NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]'
|
||||
'\n nhentai --file [filename]'
|
||||
'\n\nEnvironment Variable:\n'
|
||||
' NHENTAI nhentai mirror url')
|
||||
@ -60,20 +67,23 @@ def cmd_parser():
|
||||
parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information')
|
||||
|
||||
# doujinshi options
|
||||
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
|
||||
parser.add_option('--id', dest='id', action='callback', callback=callback,
|
||||
help='doujinshi ids set, e.g. 167680 167681 167682')
|
||||
parser.add_option('--search', '-s', type='string', dest='keyword', action='store',
|
||||
help='search doujinshi by keyword')
|
||||
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
|
||||
help='list or download your favorites.')
|
||||
help='list or download your favorites')
|
||||
parser.add_option('--artist', '-a', action='store', dest='artist',
|
||||
help='list doujinshi by artist name')
|
||||
|
||||
# page options
|
||||
parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
|
||||
help='all search results')
|
||||
parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='',
|
||||
parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='1',
|
||||
help='page number of search results. e.g. 1,2-5,14')
|
||||
parser.add_option('--sorting', dest='sorting', action='store', default='recent',
|
||||
parser.add_option('--sorting', '--sort', dest='sorting', action='store', default='popular',
|
||||
help='sorting of doujinshi (recent / popular / popular-[today|week])',
|
||||
choices=['recent', 'popular', 'popular-today', 'popular-week'])
|
||||
choices=['recent', 'popular', 'popular-today', 'popular-week', 'date'])
|
||||
|
||||
# download options
|
||||
parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='./',
|
||||
@ -85,10 +95,11 @@ def cmd_parser():
|
||||
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0,
|
||||
help='slow down between downloading every doujinshi')
|
||||
parser.add_option('--proxy', type='string', dest='proxy', action='store',
|
||||
help='store a proxy, for example: -p \'http://127.0.0.1:1080\'')
|
||||
help='store a proxy, for example: -p "http://127.0.0.1:1080"')
|
||||
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
|
||||
parser.add_option('--format', type='string', dest='name_format', action='store',
|
||||
help='format the saved folder name', default='[%i][%a][%t]')
|
||||
parser.add_option('--dry-run', action='store_true', dest='dryrun', help='Dry run, skip file download')
|
||||
|
||||
# generate options
|
||||
parser.add_option('--html', dest='html_viewer', action='store_true',
|
||||
@ -102,11 +113,19 @@ def cmd_parser():
|
||||
parser.add_option('--pdf', '-P', dest='is_pdf', action='store_true',
|
||||
help='generate PDF file')
|
||||
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
|
||||
help='remove downloaded doujinshi dir when generated CBZ or PDF file.')
|
||||
help='remove downloaded doujinshi dir when generated CBZ or PDF file')
|
||||
parser.add_option('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
|
||||
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
|
||||
parser.add_option('--meta', dest='generate_metadata', action='store_true',
|
||||
help='generate a metadata file in doujinshi format')
|
||||
parser.add_option('--regenerate', dest='regenerate', action='store_true', default=False,
|
||||
help='regenerate the cbz or pdf file if exists')
|
||||
|
||||
# nhentai options
|
||||
parser.add_option('--cookie', type='str', dest='cookie', action='store',
|
||||
help='set cookie of nhentai to bypass Google recaptcha')
|
||||
help='set cookie of nhentai to bypass Cloudflare captcha')
|
||||
parser.add_option('--useragent', '--user-agent', type='str', dest='useragent', action='store',
|
||||
help='set useragent to bypass Cloudflare captcha')
|
||||
parser.add_option('--language', type='str', dest='language', action='store',
|
||||
help='set default language to parse doujinshis')
|
||||
parser.add_option('--clean-language', dest='clean_language', action='store_true', default=False,
|
||||
@ -117,67 +136,67 @@ def cmd_parser():
|
||||
help='clean download history')
|
||||
parser.add_option('--template', dest='viewer_template', action='store',
|
||||
help='set viewer template', default='')
|
||||
|
||||
try:
|
||||
sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv]
|
||||
except (NameError, TypeError):
|
||||
pass
|
||||
except UnicodeDecodeError:
|
||||
exit(0)
|
||||
parser.add_option('--legacy', dest='legacy', action='store_true', default=False,
|
||||
help='use legacy searching method')
|
||||
|
||||
args, _ = parser.parse_args(sys.argv[1:])
|
||||
|
||||
if args.html_viewer:
|
||||
generate_html()
|
||||
exit(0)
|
||||
generate_html(template=constant.CONFIG['template'])
|
||||
sys.exit(0)
|
||||
|
||||
if args.main_viewer and not args.id and not args.keyword and not args.favorites:
|
||||
generate_main_html()
|
||||
exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
if args.clean_download_history:
|
||||
with DB() as db:
|
||||
db.clean_all()
|
||||
|
||||
logger.info('Download history cleaned.')
|
||||
exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
# --- set config ---
|
||||
if args.cookie is not None:
|
||||
constant.CONFIG['cookie'] = args.cookie
|
||||
write_config()
|
||||
logger.info('Cookie saved.')
|
||||
sys.exit(0)
|
||||
elif args.useragent is not None:
|
||||
constant.CONFIG['useragent'] = args.useragent
|
||||
write_config()
|
||||
exit(0)
|
||||
|
||||
if args.language is not None:
|
||||
logger.info('User-Agent saved.')
|
||||
sys.exit(0)
|
||||
elif args.language is not None:
|
||||
constant.CONFIG['language'] = args.language
|
||||
logger.info('Default language now set to \'{0}\''.format(args.language))
|
||||
write_config()
|
||||
exit(0)
|
||||
logger.info(f'Default language now set to "{args.language}"')
|
||||
sys.exit(0)
|
||||
# TODO: search without language
|
||||
|
||||
if args.proxy is not None:
|
||||
proxy_url = urlparse(args.proxy)
|
||||
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https'):
|
||||
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
|
||||
exit(0)
|
||||
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https', 'socks5', 'socks5h',
|
||||
'socks4', 'socks4a'):
|
||||
logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored')
|
||||
sys.exit(0)
|
||||
else:
|
||||
constant.CONFIG['proxy'] = {
|
||||
'http': args.proxy,
|
||||
'https': args.proxy,
|
||||
}
|
||||
logger.info('Proxy now set to \'{0}\'.'.format(args.proxy))
|
||||
logger.info(f'Proxy now set to "{args.proxy}"')
|
||||
write_config()
|
||||
exit(0)
|
||||
sys.exit(0)
|
||||
|
||||
if args.viewer_template is not None:
|
||||
if not args.viewer_template:
|
||||
args.viewer_template = 'default'
|
||||
|
||||
if not os.path.exists(os.path.join(os.path.dirname(__file__),
|
||||
'viewer/{}/index.html'.format(args.viewer_template))):
|
||||
logger.error('Template \'{}\' does not exists'.format(args.viewer_template))
|
||||
exit(1)
|
||||
f'viewer/{args.viewer_template}/index.html')):
|
||||
logger.error(f'Template "{args.viewer_template}" does not exists')
|
||||
sys.exit(1)
|
||||
else:
|
||||
constant.CONFIG['template'] = args.viewer_template
|
||||
write_config()
|
||||
@ -187,31 +206,31 @@ def cmd_parser():
|
||||
if args.favorites:
|
||||
if not constant.CONFIG['cookie']:
|
||||
logger.warning('Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.')
|
||||
exit(1)
|
||||
|
||||
if args.id:
|
||||
_ = [i.strip() for i in args.id.split(',')]
|
||||
args.id = set(int(i) for i in _ if i.isdigit())
|
||||
sys.exit(1)
|
||||
|
||||
if args.file:
|
||||
with open(args.file, 'r') as f:
|
||||
_ = [i.strip() for i in f.readlines()]
|
||||
args.id = set(int(i) for i in _ if i.isdigit())
|
||||
|
||||
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites:
|
||||
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites and not args.artist:
|
||||
logger.critical('Doujinshi id(s) are required for downloading')
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
if not args.keyword and not args.id and not args.favorites:
|
||||
if not args.keyword and not args.id and not args.favorites and not args.artist:
|
||||
parser.print_help()
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
if args.threads <= 0:
|
||||
args.threads = 1
|
||||
|
||||
elif args.threads > 15:
|
||||
logger.critical('Maximum number of used threads is 15')
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
if args.dryrun and (args.is_cbz or args.is_pdf):
|
||||
logger.critical('Cannot generate PDF or CBZ during dry-run')
|
||||
sys.exit(1)
|
||||
|
||||
return args
|
||||
|
@ -1,20 +1,20 @@
|
||||
#!/usr/bin/env python2.7
|
||||
# coding: utf-8
|
||||
|
||||
import os
|
||||
import shutil
|
||||
import sys
|
||||
import signal
|
||||
import platform
|
||||
import time
|
||||
import urllib3.exceptions
|
||||
|
||||
from nhentai import constant
|
||||
from nhentai.cmdline import cmd_parser, banner
|
||||
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, favorites_parser
|
||||
from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser
|
||||
from nhentai.doujinshi import Doujinshi
|
||||
from nhentai.downloader import Downloader
|
||||
from nhentai.logger import logger
|
||||
from nhentai.constant import BASE_URL
|
||||
from nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, \
|
||||
paging, check_cookie, signal_handler, DB
|
||||
from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata_file, \
|
||||
paging, check_cookie, signal_handler, DB, move_to_folder
|
||||
|
||||
|
||||
def main():
|
||||
@ -22,26 +22,25 @@ def main():
|
||||
|
||||
if sys.version_info < (3, 0, 0):
|
||||
logger.error('nhentai now only support Python 3.x')
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
options = cmd_parser()
|
||||
logger.info('Using mirror: {0}'.format(BASE_URL))
|
||||
logger.info(f'Using mirror: {BASE_URL}')
|
||||
|
||||
# CONFIG['proxy'] will be changed after cmd_parser()
|
||||
if constant.CONFIG['proxy']['http']:
|
||||
logger.info('Using proxy: {0}'.format(constant.CONFIG['proxy']['http']))
|
||||
logger.info(f'Using proxy: {constant.CONFIG["proxy"]["http"]}')
|
||||
|
||||
if not constant.CONFIG['template']:
|
||||
constant.CONFIG['template'] = 'default'
|
||||
|
||||
logger.info('Using viewer template "{}"'.format(constant.CONFIG['template']))
|
||||
logger.info(f'Using viewer template "{constant.CONFIG["template"]}"')
|
||||
|
||||
# check your cookie
|
||||
check_cookie()
|
||||
|
||||
doujinshis = []
|
||||
doujinshi_ids = []
|
||||
doujinshi_list = []
|
||||
|
||||
page_list = paging(options.page)
|
||||
|
||||
@ -49,15 +48,21 @@ def main():
|
||||
if not options.is_download:
|
||||
logger.warning('You do not specify --download option')
|
||||
|
||||
doujinshis = favorites_parser(page=page_list)
|
||||
doujinshis = favorites_parser() if options.page_all else favorites_parser(page=page_list)
|
||||
|
||||
elif options.keyword:
|
||||
if constant.CONFIG['language']:
|
||||
logger.info('Using default language: {0}'.format(constant.CONFIG['language']))
|
||||
options.keyword += ' language:{}'.format(constant.CONFIG['language'])
|
||||
doujinshis = search_parser(options.keyword, sorting=options.sorting, page=page_list,
|
||||
logger.info(f'Using default language: {constant.CONFIG["language"]}')
|
||||
options.keyword += f' language:{constant.CONFIG["language"]}'
|
||||
|
||||
_search_parser = legacy_search_parser if options.legacy else search_parser
|
||||
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
|
||||
is_page_all=options.page_all)
|
||||
|
||||
elif options.artist:
|
||||
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
|
||||
is_page_all=options.page_all, type_='ARTIST')
|
||||
|
||||
elif not doujinshi_ids:
|
||||
doujinshi_ids = options.id
|
||||
|
||||
@ -69,54 +74,76 @@ def main():
|
||||
with DB() as db:
|
||||
data = map(int, db.get_all())
|
||||
|
||||
doujinshi_ids = list(set(doujinshi_ids) - set(data))
|
||||
|
||||
if doujinshi_ids:
|
||||
for i, id_ in enumerate(doujinshi_ids):
|
||||
if options.delay:
|
||||
time.sleep(options.delay)
|
||||
|
||||
doujinshi_info = doujinshi_parser(id_)
|
||||
|
||||
if doujinshi_info:
|
||||
doujinshi_list.append(Doujinshi(name_format=options.name_format, **doujinshi_info))
|
||||
|
||||
if (i + 1) % 10 == 0:
|
||||
logger.info('Progress: %d / %d' % (i + 1, len(doujinshi_ids)))
|
||||
doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
|
||||
|
||||
if not options.is_show:
|
||||
downloader = Downloader(path=options.output_dir, size=options.threads,
|
||||
timeout=options.timeout, delay=options.delay)
|
||||
|
||||
for doujinshi in doujinshi_list:
|
||||
for doujinshi_id in doujinshi_ids:
|
||||
doujinshi_info = doujinshi_parser(doujinshi_id)
|
||||
if doujinshi_info:
|
||||
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
|
||||
else:
|
||||
continue
|
||||
|
||||
if not options.dryrun:
|
||||
doujinshi.downloader = downloader
|
||||
|
||||
if doujinshi.check_if_need_download(options):
|
||||
doujinshi.download()
|
||||
else:
|
||||
logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}')
|
||||
continue
|
||||
|
||||
if options.generate_metadata:
|
||||
generate_metadata_file(options.output_dir, doujinshi)
|
||||
|
||||
if options.is_save_download_history:
|
||||
with DB() as db:
|
||||
db.add_one(doujinshi.id)
|
||||
|
||||
if not options.is_nohtml and not options.is_cbz and not options.is_pdf:
|
||||
if not options.is_nohtml:
|
||||
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
|
||||
elif options.is_cbz:
|
||||
generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir)
|
||||
elif options.is_pdf:
|
||||
generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir)
|
||||
|
||||
if options.is_cbz:
|
||||
generate_doc('cbz', options.output_dir, doujinshi, options.regenerate)
|
||||
|
||||
if options.is_pdf:
|
||||
generate_doc('pdf', options.output_dir, doujinshi, options.regenerate)
|
||||
|
||||
if options.move_to_folder:
|
||||
if options.is_cbz:
|
||||
move_to_folder(options.output_dir, doujinshi, 'cbz')
|
||||
if options.is_pdf:
|
||||
move_to_folder(options.output_dir, doujinshi, 'pdf')
|
||||
|
||||
if options.rm_origin_dir:
|
||||
if options.move_to_folder:
|
||||
logger.critical('You specified both --move-to-folder and --rm-origin-dir options, '
|
||||
'you will not get anything :(')
|
||||
shutil.rmtree(os.path.join(options.output_dir, doujinshi.filename), ignore_errors=True)
|
||||
|
||||
if options.main_viewer:
|
||||
generate_main_html(options.output_dir)
|
||||
|
||||
if not platform.system() == 'Windows':
|
||||
logger.log(15, '🍻 All done.')
|
||||
logger.log(16, '🍻 All done.')
|
||||
else:
|
||||
logger.log(15, 'All done.')
|
||||
logger.log(16, 'All done.')
|
||||
|
||||
else:
|
||||
[doujinshi.show() for doujinshi in doujinshi_list]
|
||||
for doujinshi_id in doujinshi_ids:
|
||||
doujinshi_info = doujinshi_parser(doujinshi_id)
|
||||
if doujinshi_info:
|
||||
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
|
||||
else:
|
||||
continue
|
||||
doujinshi.show()
|
||||
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
signal.signal(signal.SIGINT, signal_handler)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
@ -1,38 +1,66 @@
|
||||
# coding: utf-8
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
from urllib.parse import urlparse
|
||||
from platform import system
|
||||
|
||||
|
||||
def get_nhentai_home() -> str:
|
||||
home = os.getenv('HOME', tempfile.gettempdir())
|
||||
|
||||
if system() == 'Linux':
|
||||
xdgdat = os.getenv('XDG_DATA_HOME')
|
||||
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
|
||||
return os.path.join(xdgdat, 'nhentai')
|
||||
if home and os.path.exists(os.path.join(home, '.nhentai')):
|
||||
return os.path.join(home, '.nhentai')
|
||||
if xdgdat:
|
||||
return os.path.join(xdgdat, 'nhentai')
|
||||
|
||||
# Use old default path in other systems
|
||||
return os.path.join(home, '.nhentai')
|
||||
|
||||
|
||||
DEBUG = os.getenv('DEBUG', False)
|
||||
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
|
||||
|
||||
__api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL
|
||||
DETAIL_URL = f'{BASE_URL}/g'
|
||||
LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
|
||||
SEARCH_URL = f'{BASE_URL}/api/galleries/search'
|
||||
ARTIST_URL = f'{BASE_URL}/artist/'
|
||||
|
||||
DETAIL_URL = '%s/g' % BASE_URL
|
||||
SEARCH_URL = '%s/api/galleries/search' % BASE_URL
|
||||
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
|
||||
LOGIN_URL = f'{BASE_URL}/login/'
|
||||
CHALLENGE_URL = f'{BASE_URL}/challenge'
|
||||
FAV_URL = f'{BASE_URL}/favorites/'
|
||||
|
||||
|
||||
TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL
|
||||
LOGIN_URL = '%s/login/' % BASE_URL
|
||||
CHALLENGE_URL = '%s/challenge' % BASE_URL
|
||||
FAV_URL = '%s/favorites/' % BASE_URL
|
||||
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
|
||||
IMAGE_URL_MIRRORS = [
|
||||
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}',
|
||||
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}',
|
||||
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}',
|
||||
]
|
||||
|
||||
u = urlparse(BASE_URL)
|
||||
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)
|
||||
|
||||
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
|
||||
NHENTAI_HOME = get_nhentai_home()
|
||||
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
|
||||
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
|
||||
|
||||
__api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
|
||||
|
||||
CONFIG = {
|
||||
'proxy': {'http': '', 'https': ''},
|
||||
'cookie': '',
|
||||
'language': '',
|
||||
'template': '',
|
||||
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
|
||||
'max_filename': 85
|
||||
}
|
||||
|
||||
LANGUAGE_ISO = {
|
||||
'english': 'en',
|
||||
'chinese': 'zh',
|
||||
'japanese': 'ja',
|
||||
'translated': 'translated'
|
||||
}
|
||||
|
@ -1,4 +1,5 @@
|
||||
# coding: utf-8
|
||||
import os
|
||||
|
||||
from tabulate import tabulate
|
||||
|
||||
@ -20,65 +21,99 @@ class DoujinshiInfo(dict):
|
||||
|
||||
def __getattr__(self, item):
|
||||
try:
|
||||
return dict.__getitem__(self, item)
|
||||
ret = dict.__getitem__(self, item)
|
||||
return ret if ret else 'Unknown'
|
||||
except KeyError:
|
||||
return ''
|
||||
return 'Unknown'
|
||||
|
||||
|
||||
class Doujinshi(object):
|
||||
def __init__(self, name=None, id=None, img_id=None, ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
|
||||
def __init__(self, name=None, pretty_name=None, id=None, img_id=None,
|
||||
ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
|
||||
self.name = name
|
||||
self.pretty_name = pretty_name
|
||||
self.id = id
|
||||
self.img_id = img_id
|
||||
self.ext = ext
|
||||
self.pages = pages
|
||||
self.downloader = None
|
||||
self.url = '%s/%d' % (DETAIL_URL, self.id)
|
||||
self.url = f'{DETAIL_URL}/{self.id}'
|
||||
self.info = DoujinshiInfo(**kwargs)
|
||||
|
||||
name_format = name_format.replace('%i', str(self.id))
|
||||
name_format = name_format.replace('%a', self.info.artists)
|
||||
name_format = name_format.replace('%t', self.name)
|
||||
name_format = name_format.replace('%s', self.info.subtitle)
|
||||
self.filename = format_filename(name_format)
|
||||
ag_value = self.info.groups if self.info.artists == 'Unknown' else self.info.artists
|
||||
name_format = name_format.replace('%ag', format_filename(ag_value))
|
||||
|
||||
name_format = name_format.replace('%i', format_filename(str(self.id)))
|
||||
name_format = name_format.replace('%a', format_filename(self.info.artists))
|
||||
name_format = name_format.replace('%g', format_filename(self.info.groups))
|
||||
|
||||
name_format = name_format.replace('%t', format_filename(self.name))
|
||||
name_format = name_format.replace('%p', format_filename(self.pretty_name))
|
||||
name_format = name_format.replace('%s', format_filename(self.info.subtitle))
|
||||
self.filename = format_filename(name_format, 255, True)
|
||||
|
||||
self.table = [
|
||||
['Parodies', self.info.parodies],
|
||||
['Doujinshi', self.name],
|
||||
['Subtitle', self.info.subtitle],
|
||||
['Date', self.info.date],
|
||||
['Characters', self.info.characters],
|
||||
['Authors', self.info.artists],
|
||||
['Groups', self.info.groups],
|
||||
['Languages', self.info.languages],
|
||||
['Tags', self.info.tags],
|
||||
['URL', self.url],
|
||||
['Pages', self.pages],
|
||||
]
|
||||
|
||||
def __repr__(self):
|
||||
return '<Doujinshi: {0}>'.format(self.name)
|
||||
return f'<Doujinshi: {self.name}>'
|
||||
|
||||
def show(self):
|
||||
table = [
|
||||
["Parodies", self.info.parodies],
|
||||
["Doujinshi", self.name],
|
||||
["Subtitle", self.info.subtitle],
|
||||
["Characters", self.info.characters],
|
||||
["Authors", self.info.artists],
|
||||
["Languages", self.info.languages],
|
||||
["Tags", self.info.tags],
|
||||
["URL", self.url],
|
||||
["Pages", self.pages],
|
||||
]
|
||||
logger.info(u'Print doujinshi information of {0}\n{1}'.format(self.id, tabulate(table)))
|
||||
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
|
||||
|
||||
def check_if_need_download(self, options):
|
||||
base_path = os.path.join(self.downloader.path, self.filename)
|
||||
|
||||
# regenerate, re-download
|
||||
if options.regenerate:
|
||||
return True
|
||||
|
||||
# pdf or cbz file exists, skip re-download
|
||||
# doujinshi directory may not exist b/c of --rm-origin-dir option set.
|
||||
# user should pass --regenerate option to get back origin dir.
|
||||
ret_pdf = ret_cbz = None
|
||||
if options.is_pdf:
|
||||
ret_pdf = os.path.exists(f'{base_path}.pdf') or os.path.exists(f'{base_path}/{self.filename}.pdf')
|
||||
|
||||
if options.is_cbz:
|
||||
ret_cbz = os.path.exists(f'{base_path}.cbz') or os.path.exists(f'{base_path}/{self.filename}.cbz')
|
||||
|
||||
ret = list(filter(lambda s: s is not None, [ret_cbz, ret_pdf]))
|
||||
if ret and all(ret):
|
||||
return False
|
||||
|
||||
# doujinshi directory doesn't exist, re-download
|
||||
if not (os.path.exists(base_path) and os.path.isdir(base_path)):
|
||||
return True
|
||||
|
||||
# fallback
|
||||
return True
|
||||
|
||||
def download(self):
|
||||
logger.info('Starting to download doujinshi: %s' % self.name)
|
||||
logger.info(f'Starting to download doujinshi: {self.name}')
|
||||
if self.downloader:
|
||||
download_queue = []
|
||||
|
||||
if len(self.ext) != self.pages:
|
||||
logger.warning('Page count and ext count do not equal')
|
||||
|
||||
for i in range(1, min(self.pages, len(self.ext)) + 1):
|
||||
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1]))
|
||||
|
||||
self.downloader.download(download_queue, self.filename)
|
||||
|
||||
'''
|
||||
for i in range(len(self.ext)):
|
||||
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
|
||||
'''
|
||||
download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}')
|
||||
|
||||
return self.downloader.start_download(download_queue, self.filename)
|
||||
else:
|
||||
logger.critical('Downloader has not been loaded')
|
||||
return False
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
@ -88,4 +123,4 @@ if __name__ == '__main__':
|
||||
try:
|
||||
test.download()
|
||||
except Exception as e:
|
||||
print('Exception: %s' % str(e))
|
||||
print(f'Exception: {e}')
|
||||
|
@ -3,22 +3,20 @@
|
||||
import multiprocessing
|
||||
import signal
|
||||
|
||||
from future.builtins import str as text
|
||||
import sys
|
||||
import os
|
||||
import requests
|
||||
import time
|
||||
import urllib3.exceptions
|
||||
|
||||
try:
|
||||
from urllib.parse import urlparse
|
||||
except ImportError:
|
||||
from urlparse import urlparse
|
||||
|
||||
from urllib.parse import urlparse
|
||||
from nhentai import constant
|
||||
from nhentai.logger import logger
|
||||
from nhentai.parser import request
|
||||
from nhentai.utils import Singleton
|
||||
|
||||
requests.packages.urllib3.disable_warnings()
|
||||
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
semaphore = multiprocessing.Semaphore(1)
|
||||
|
||||
|
||||
@ -26,6 +24,21 @@ class NHentaiImageNotExistException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def download_callback(result):
|
||||
result, data = result
|
||||
if result == 0:
|
||||
logger.warning('fatal errors occurred, ignored')
|
||||
elif result == -1:
|
||||
logger.warning(f'url {data} return status code 404')
|
||||
elif result == -2:
|
||||
logger.warning('Ctrl-C pressed, exiting sub processes ...')
|
||||
elif result == -3:
|
||||
# workers won't be run, just pass
|
||||
pass
|
||||
else:
|
||||
logger.log(16, f'{data} downloaded successfully')
|
||||
|
||||
|
||||
class Downloader(Singleton):
|
||||
|
||||
def __init__(self, path='', size=5, timeout=30, delay=0):
|
||||
@ -34,29 +47,34 @@ class Downloader(Singleton):
|
||||
self.timeout = timeout
|
||||
self.delay = delay
|
||||
|
||||
def download_(self, url, folder='', filename='', retried=0):
|
||||
def download(self, url, folder='', filename='', retried=0, proxy=None):
|
||||
if self.delay:
|
||||
time.sleep(self.delay)
|
||||
logger.info('Starting to download {0} ...'.format(url))
|
||||
logger.info(f'Starting to download {url} ...')
|
||||
filename = filename if filename else os.path.basename(urlparse(url).path)
|
||||
base_filename, extension = os.path.splitext(filename)
|
||||
|
||||
save_file_path = os.path.join(folder, base_filename.zfill(3) + extension)
|
||||
try:
|
||||
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)):
|
||||
logger.warning('File: {0} exists, ignoring'.format(os.path.join(folder, base_filename.zfill(3) +
|
||||
extension)))
|
||||
if os.path.exists(save_file_path):
|
||||
logger.warning(f'Skipped download: {save_file_path} already exists')
|
||||
return 1, url
|
||||
|
||||
response = None
|
||||
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
|
||||
with open(save_file_path, "wb") as f:
|
||||
i = 0
|
||||
while i < 10:
|
||||
try:
|
||||
response = request('get', url, stream=True, timeout=self.timeout)
|
||||
response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
|
||||
if response.status_code != 200:
|
||||
raise NHentaiImageNotExistException
|
||||
|
||||
except NHentaiImageNotExistException as e:
|
||||
raise e
|
||||
path = urlparse(url).path
|
||||
for mirror in constant.IMAGE_URL_MIRRORS:
|
||||
print(f'{mirror}{path}')
|
||||
mirror_url = f'{mirror}{path}'
|
||||
response = request('get', mirror_url, stream=True,
|
||||
timeout=self.timeout, proxies=proxy)
|
||||
if response.status_code == 200:
|
||||
break
|
||||
|
||||
except Exception as e:
|
||||
i += 1
|
||||
@ -76,13 +94,14 @@ class Downloader(Singleton):
|
||||
|
||||
except (requests.HTTPError, requests.Timeout) as e:
|
||||
if retried < 3:
|
||||
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried))
|
||||
return 0, self.download_(url=url, folder=folder, filename=filename, retried=retried+1)
|
||||
logger.warning(f'Warning: {e}, retrying({retried}) ...')
|
||||
return 0, self.download(url=url, folder=folder, filename=filename,
|
||||
retried=retried+1, proxy=proxy)
|
||||
else:
|
||||
return 0, None
|
||||
|
||||
except NHentaiImageNotExistException as e:
|
||||
os.remove(os.path.join(folder, base_filename.zfill(3) + extension))
|
||||
os.remove(save_file_path)
|
||||
return -1, url
|
||||
|
||||
except Exception as e:
|
||||
@ -96,39 +115,24 @@ class Downloader(Singleton):
|
||||
|
||||
return 1, url
|
||||
|
||||
def _download_callback(self, result):
|
||||
result, data = result
|
||||
if result == 0:
|
||||
logger.warning('fatal errors occurred, ignored')
|
||||
# exit(1)
|
||||
elif result == -1:
|
||||
logger.warning('url {} return status code 404'.format(data))
|
||||
elif result == -2:
|
||||
logger.warning('Ctrl-C pressed, exiting sub processes ...')
|
||||
elif result == -3:
|
||||
# workers wont be run, just pass
|
||||
pass
|
||||
else:
|
||||
logger.log(15, '{0} downloaded successfully'.format(data))
|
||||
|
||||
def download(self, queue, folder=''):
|
||||
if not isinstance(folder, text):
|
||||
def start_download(self, queue, folder='') -> bool:
|
||||
if not isinstance(folder, (str, )):
|
||||
folder = str(folder)
|
||||
|
||||
if self.path:
|
||||
folder = os.path.join(self.path, folder)
|
||||
|
||||
logger.info(f'Doujinshi will be saved at "{folder}"')
|
||||
if not os.path.exists(folder):
|
||||
logger.warning('Path \'{0}\' does not exist, creating.'.format(folder))
|
||||
try:
|
||||
os.makedirs(folder)
|
||||
except EnvironmentError as e:
|
||||
logger.critical('{0}'.format(str(e)))
|
||||
logger.critical(str(e))
|
||||
|
||||
else:
|
||||
logger.warning('Path \'{0}\' already exist.'.format(folder))
|
||||
|
||||
queue = [(self, url, folder) for url in queue]
|
||||
if os.getenv('DEBUG', None) == 'NODOWNLOAD':
|
||||
# Assuming we want to continue with rest of process.
|
||||
return True
|
||||
queue = [(self, url, folder, constant.CONFIG['proxy']) for url in queue]
|
||||
|
||||
pool = multiprocessing.Pool(self.size, init_worker)
|
||||
[pool.apply_async(download_wrapper, args=item) for item in queue]
|
||||
@ -136,10 +140,12 @@ class Downloader(Singleton):
|
||||
pool.close()
|
||||
pool.join()
|
||||
|
||||
return True
|
||||
|
||||
def download_wrapper(obj, url, folder=''):
|
||||
|
||||
def download_wrapper(obj, url, folder='', proxy=None):
|
||||
if sys.platform == 'darwin' or semaphore.get_value():
|
||||
return Downloader.download_(obj, url=url, folder=folder)
|
||||
return Downloader.download(obj, url=url, folder=folder, proxy=proxy)
|
||||
else:
|
||||
return -3, None
|
||||
|
||||
@ -148,7 +154,7 @@ def init_worker():
|
||||
signal.signal(signal.SIGINT, subprocess_signal)
|
||||
|
||||
|
||||
def subprocess_signal(signal, frame):
|
||||
def subprocess_signal(sig, frame):
|
||||
if semaphore.acquire(timeout=1):
|
||||
logger.warning('Ctrl-C pressed, exiting sub processes ...')
|
||||
|
||||
|
@ -34,7 +34,7 @@ class ColorizingStreamHandler(logging.StreamHandler):
|
||||
# levels to (background, foreground, bold/intense)
|
||||
level_map = {
|
||||
logging.DEBUG: (None, 'blue', False),
|
||||
logging.INFO: (None, 'green', False),
|
||||
logging.INFO: (None, 'white', False),
|
||||
logging.WARNING: (None, 'yellow', False),
|
||||
logging.ERROR: (None, 'red', False),
|
||||
logging.CRITICAL: ('red', 'white', False)
|
||||
@ -160,18 +160,18 @@ class ColorizingStreamHandler(logging.StreamHandler):
|
||||
return self.colorize(message, record)
|
||||
|
||||
|
||||
logging.addLevelName(15, "INFO")
|
||||
logging.addLevelName(16, "SUCCESS")
|
||||
logger = logging.getLogger('nhentai')
|
||||
LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
|
||||
FORMATTER = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
|
||||
FORMATTER = logging.Formatter("\r[%(asctime)s] %(funcName)s: %(message)s", "%H:%M:%S")
|
||||
LOGGER_HANDLER.setFormatter(FORMATTER)
|
||||
LOGGER_HANDLER.level_map[logging.getLevelName("INFO")] = (None, "cyan", False)
|
||||
LOGGER_HANDLER.level_map[logging.getLevelName("SUCCESS")] = (None, "green", False)
|
||||
logger.addHandler(LOGGER_HANDLER)
|
||||
logger.setLevel(logging.DEBUG)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
logger.log(15, 'nhentai')
|
||||
logger.log(16, 'nhentai')
|
||||
logger.info('info')
|
||||
logger.warning('warning')
|
||||
logger.debug('debug')
|
||||
|
@ -1,5 +1,5 @@
|
||||
# coding: utf-8
|
||||
|
||||
import sys
|
||||
import os
|
||||
import re
|
||||
import time
|
||||
@ -26,7 +26,7 @@ def login(username, password):
|
||||
logger.info('Getting CSRF token ...')
|
||||
|
||||
if os.getenv('DEBUG'):
|
||||
logger.info('CSRF token is {}'.format(csrf_token))
|
||||
logger.info(f'CSRF token is {csrf_token}')
|
||||
|
||||
login_dict = {
|
||||
'csrfmiddlewaretoken': csrf_token,
|
||||
@ -41,11 +41,11 @@ def login(username, password):
|
||||
|
||||
if 'Invalid username/email or password' in resp.text:
|
||||
logger.error('Login failed, please check your username and password')
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
|
||||
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
|
||||
exit(2)
|
||||
sys.exit(2)
|
||||
|
||||
|
||||
def _get_title_and_id(response):
|
||||
@ -56,7 +56,7 @@ def _get_title_and_id(response):
|
||||
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
|
||||
title = doujinshi_container.text.strip()
|
||||
title = title if len(title) < 85 else title[:82] + '...'
|
||||
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
|
||||
id_ = re.search('/g/([0-9]+)/', doujinshi.a['href']).group(1)
|
||||
result.append({'id': id_, 'title': title})
|
||||
|
||||
return result
|
||||
@ -67,7 +67,7 @@ def favorites_parser(page=None):
|
||||
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
|
||||
count = html.find('span', attrs={'class': 'count'})
|
||||
if not count:
|
||||
logger.error("Can't get your number of favorited doujins. Did the login failed?")
|
||||
logger.error("Can't get your number of favorite doujinshis. Did the login failed?")
|
||||
return []
|
||||
|
||||
count = int(count.text.strip('(').strip(')').replace(',', ''))
|
||||
@ -84,7 +84,7 @@ def favorites_parser(page=None):
|
||||
else:
|
||||
pages = 1
|
||||
|
||||
logger.info('You have %d favorites in %d pages.' % (count, pages))
|
||||
logger.info(f'You have {count} favorites in {pages} pages.')
|
||||
|
||||
if os.getenv('DEBUG'):
|
||||
pages = 1
|
||||
@ -93,53 +93,63 @@ def favorites_parser(page=None):
|
||||
|
||||
for page in page_range_list:
|
||||
try:
|
||||
logger.info('Getting doujinshi ids of page %d' % page)
|
||||
resp = request('get', constant.FAV_URL + '?page=%d' % page).content
|
||||
logger.info(f'Getting doujinshi ids of page {page}')
|
||||
resp = request('get', f'{constant.FAV_URL}?page={page}').content
|
||||
|
||||
result.extend(_get_title_and_id(resp))
|
||||
except Exception as e:
|
||||
logger.error('Error: %s, continue', str(e))
|
||||
logger.error(f'Error: {e}, continue')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def doujinshi_parser(id_):
|
||||
def doujinshi_parser(id_, counter=0):
|
||||
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
||||
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
||||
raise Exception(f'Doujinshi id({id_}) is not valid')
|
||||
|
||||
id_ = int(id_)
|
||||
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
|
||||
logger.info(f'Fetching doujinshi information of id {id_}')
|
||||
doujinshi = dict()
|
||||
doujinshi['id'] = id_
|
||||
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
|
||||
url = f'{constant.DETAIL_URL}/{id_}/'
|
||||
|
||||
try:
|
||||
response = request('get', url)
|
||||
if response.status_code in (200, ):
|
||||
response = response.content
|
||||
elif response.status_code in (404,):
|
||||
logger.error("Doujinshi with id {0} cannot be found".format(id_))
|
||||
logger.error(f'Doujinshi with id {id_} cannot be found')
|
||||
return []
|
||||
else:
|
||||
logger.debug('Slow down and retry ({}) ...'.format(id_))
|
||||
time.sleep(1)
|
||||
return doujinshi_parser(str(id_))
|
||||
counter += 1
|
||||
|
||||
except Exception as e:
|
||||
logger.warning('Error: {}, ignored'.format(str(e)))
|
||||
if counter == 10:
|
||||
logger.critical(f'Failed to fetch doujinshi information of id {id_}')
|
||||
return None
|
||||
|
||||
logger.debug(f'Slow down and retry ({id_}) ...')
|
||||
time.sleep(1)
|
||||
return doujinshi_parser(str(id_), counter)
|
||||
|
||||
except Exception as e:
|
||||
logger.warning(f'Error: {e}, ignored')
|
||||
return None
|
||||
|
||||
# print(response)
|
||||
html = BeautifulSoup(response, 'html.parser')
|
||||
doujinshi_info = html.find('div', attrs={'id': 'info'})
|
||||
|
||||
title = doujinshi_info.find('h1').text
|
||||
pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
|
||||
subtitle = doujinshi_info.find('h2')
|
||||
|
||||
doujinshi['name'] = title
|
||||
doujinshi['pretty_name'] = pretty_name
|
||||
doujinshi['subtitle'] = subtitle.text if subtitle else ''
|
||||
|
||||
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
|
||||
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png|gif)$', doujinshi_cover.a.img.attrs['data-src'])
|
||||
img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif)$',
|
||||
doujinshi_cover.a.img.attrs['data-src'])
|
||||
|
||||
ext = []
|
||||
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
|
||||
@ -148,11 +158,12 @@ def doujinshi_parser(id_):
|
||||
|
||||
if not img_id:
|
||||
logger.critical('Tried yo get image id failed')
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
doujinshi['img_id'] = img_id.group(1)
|
||||
doujinshi['ext'] = ext
|
||||
|
||||
pages = 0
|
||||
for _ in doujinshi_info.find_all('div', class_='tag-container field-name'):
|
||||
if re.search('Pages:', _.text):
|
||||
pages = _.find('span', class_='name').string
|
||||
@ -174,76 +185,15 @@ def doujinshi_parser(id_):
|
||||
return doujinshi
|
||||
|
||||
|
||||
def old_search_parser(keyword, sorting='date', page=1):
|
||||
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
|
||||
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page, 'sort': sorting}).content
|
||||
|
||||
result = _get_title_and_id(response)
|
||||
if not result:
|
||||
logger.warning('Not found anything of keyword {}'.format(keyword))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def print_doujinshi(doujinshi_list):
|
||||
if not doujinshi_list:
|
||||
return
|
||||
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
|
||||
headers = ['id', 'doujinshi']
|
||||
logger.info('Search Result || Found %i doujinshis \n' % doujinshi_list.__len__() +
|
||||
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
||||
|
||||
|
||||
def search_parser(keyword, sorting, page, is_page_all=False):
|
||||
# keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')])
|
||||
result = []
|
||||
if not page:
|
||||
page = [1]
|
||||
|
||||
if is_page_all:
|
||||
url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url
|
||||
init_response = request('get', url.replace('%2B', '+')).json()
|
||||
page = range(1, init_response['num_pages']+1)
|
||||
|
||||
total = '/{0}'.format(page[-1]) if is_page_all else ''
|
||||
for p in page:
|
||||
i = 0
|
||||
|
||||
logger.info('Searching doujinshis using keywords "{0}" on page {1}{2}'.format(keyword, p, total))
|
||||
while i < 3:
|
||||
try:
|
||||
url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
|
||||
'page': p, 'sort': sorting}).url
|
||||
response = request('get', url.replace('%2B', '+')).json()
|
||||
except Exception as e:
|
||||
logger.critical(str(e))
|
||||
|
||||
break
|
||||
|
||||
if 'result' not in response:
|
||||
logger.warning('No result in response in page {}'.format(p))
|
||||
break
|
||||
|
||||
for row in response['result']:
|
||||
title = row['title']['english']
|
||||
title = title[:85] + '..' if len(title) > 85 else title
|
||||
result.append({'id': row['id'], 'title': title})
|
||||
|
||||
if not result:
|
||||
logger.warning('No results for keywords {}'.format(keyword))
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def __api_suspended_doujinshi_parser(id_):
|
||||
def legacy_doujinshi_parser(id_):
|
||||
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
|
||||
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
|
||||
raise Exception(f'Doujinshi id({id_}) is not valid')
|
||||
|
||||
id_ = int(id_)
|
||||
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
|
||||
logger.info(f'Fetching information of doujinshi id {id_}')
|
||||
doujinshi = dict()
|
||||
doujinshi['id'] = id_
|
||||
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
|
||||
url = f'{constant.DETAIL_URL}/{id_}'
|
||||
i = 0
|
||||
while 5 > i:
|
||||
try:
|
||||
@ -252,7 +202,7 @@ def __api_suspended_doujinshi_parser(id_):
|
||||
i += 1
|
||||
if not i < 5:
|
||||
logger.critical(str(e))
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
continue
|
||||
break
|
||||
|
||||
@ -282,5 +232,112 @@ def __api_suspended_doujinshi_parser(id_):
|
||||
return doujinshi
|
||||
|
||||
|
||||
def print_doujinshi(doujinshi_list):
|
||||
if not doujinshi_list:
|
||||
return
|
||||
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
|
||||
headers = ['id', 'doujinshi']
|
||||
logger.info(f'Search Result || Found {doujinshi_list.__len__()} doujinshis')
|
||||
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
|
||||
|
||||
|
||||
def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'):
|
||||
logger.info(f'Searching doujinshis of keyword {keyword}')
|
||||
result = []
|
||||
|
||||
if type_ not in ('SEARCH', 'ARTIST', ):
|
||||
raise ValueError('Invalid type')
|
||||
|
||||
if is_page_all:
|
||||
if type_ == 'SEARCH':
|
||||
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
||||
params={'q': keyword, 'page': 1, 'sort': sorting}).content
|
||||
else:
|
||||
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
|
||||
response = request('get', url=url, params={'page': 1}).content
|
||||
|
||||
html = BeautifulSoup(response, 'lxml')
|
||||
pagination = html.find(attrs={'class': 'pagination'})
|
||||
last_page = pagination.find(attrs={'class': 'last'})
|
||||
last_page = re.findall('page=([0-9]+)', last_page.attrs['href'])[0]
|
||||
logger.info(f'Getting doujinshi ids of {last_page} pages')
|
||||
pages = range(1, int(last_page))
|
||||
else:
|
||||
pages = page
|
||||
|
||||
for p in pages:
|
||||
logger.info(f'Fetching page {p} ...')
|
||||
if type_ == 'SEARCH':
|
||||
response = request('get', url=constant.LEGACY_SEARCH_URL,
|
||||
params={'q': keyword, 'page': p, 'sort': sorting}).content
|
||||
else:
|
||||
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
|
||||
response = request('get', url=url, params={'page': p}).content
|
||||
|
||||
if response is None:
|
||||
logger.warning(f'No result in response in page {p}')
|
||||
continue
|
||||
result.extend(_get_title_and_id(response))
|
||||
|
||||
if not result:
|
||||
logger.warning(f'No results for keywords {keyword}')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
def search_parser(keyword, sorting, page, is_page_all=False):
|
||||
result = []
|
||||
response = None
|
||||
if not page:
|
||||
page = [1]
|
||||
|
||||
if is_page_all:
|
||||
url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url
|
||||
init_response = request('get', url.replace('%2B', '+')).json()
|
||||
page = range(1, init_response['num_pages']+1)
|
||||
|
||||
total = f'/{page[-1]}' if is_page_all else ''
|
||||
not_exists_persist = False
|
||||
for p in page:
|
||||
i = 0
|
||||
|
||||
logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}')
|
||||
while i < 3:
|
||||
try:
|
||||
url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
|
||||
'page': p, 'sort': sorting}).url
|
||||
|
||||
if constant.DEBUG:
|
||||
logger.debug(f'Request URL: {url}')
|
||||
|
||||
response = request('get', url.replace('%2B', '+')).json()
|
||||
except Exception as e:
|
||||
logger.critical(str(e))
|
||||
response = None
|
||||
break
|
||||
|
||||
if constant.DEBUG:
|
||||
logger.debug(f'Response: {response}')
|
||||
|
||||
if response is None or 'result' not in response:
|
||||
logger.warning(f'No result in response in page {p}')
|
||||
if not_exists_persist is True:
|
||||
break
|
||||
continue
|
||||
|
||||
for row in response['result']:
|
||||
title = row['title']['english']
|
||||
title = title[:constant.CONFIG['max_filename']] + '..' if \
|
||||
len(title) > constant.CONFIG['max_filename'] else title
|
||||
|
||||
result.append({'id': row['id'], 'title': title})
|
||||
|
||||
not_exists_persist = False
|
||||
if not result:
|
||||
logger.warning(f'No results for keywords {keyword}')
|
||||
|
||||
return result
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
print(doujinshi_parser("32271"))
|
||||
|
@ -2,9 +2,10 @@
|
||||
import json
|
||||
import os
|
||||
from xml.sax.saxutils import escape
|
||||
from nhentai.constant import LANGUAGE_ISO
|
||||
|
||||
|
||||
def serialize_json(doujinshi, dir):
|
||||
def serialize_json(doujinshi, output_dir):
|
||||
metadata = {'title': doujinshi.name,
|
||||
'subtitle': doujinshi.info.subtitle}
|
||||
if doujinshi.info.date:
|
||||
@ -21,17 +22,17 @@ def serialize_json(doujinshi, dir):
|
||||
metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
|
||||
if doujinshi.info.languages:
|
||||
metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')]
|
||||
metadata['category'] = doujinshi.info.categories
|
||||
metadata['category'] = [i.strip() for i in doujinshi.info.categories.split(',')]
|
||||
metadata['URL'] = doujinshi.url
|
||||
metadata['Pages'] = doujinshi.pages
|
||||
|
||||
with open(os.path.join(dir, 'metadata.json'), 'w') as f:
|
||||
json.dump(metadata, f, separators=','':')
|
||||
with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
|
||||
json.dump(metadata, f, separators=(',', ':'))
|
||||
|
||||
|
||||
def serialize_comicxml(doujinshi, dir):
|
||||
def serialize_comic_xml(doujinshi, output_dir):
|
||||
from iso8601 import parse_date
|
||||
with open(os.path.join(dir, 'ComicInfo.xml'), 'w') as f:
|
||||
with open(os.path.join(output_dir, 'ComicInfo.xml'), 'w', encoding="utf-8") as f:
|
||||
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
|
||||
f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
|
||||
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
|
||||
@ -45,7 +46,8 @@ def serialize_comicxml(doujinshi, dir):
|
||||
xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
|
||||
xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
|
||||
|
||||
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and 'full color' in doujinshi.info.tags else 'Yes')
|
||||
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and
|
||||
'full color' in doujinshi.info.tags else 'Yes')
|
||||
|
||||
if doujinshi.info.date:
|
||||
dt = parse_date(doujinshi.info.date)
|
||||
@ -59,19 +61,20 @@ def serialize_comicxml(doujinshi, dir):
|
||||
if doujinshi.info.tags:
|
||||
xml_write_simple_tag(f, 'Tags', doujinshi.info.tags)
|
||||
if doujinshi.info.artists:
|
||||
xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in doujinshi.info.artists.split(',')]))
|
||||
# if doujinshi.info.groups:
|
||||
# metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
|
||||
xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in
|
||||
doujinshi.info.artists.split(',')]))
|
||||
|
||||
if doujinshi.info.languages:
|
||||
languages = [i.strip() for i in doujinshi.info.languages.split(',')]
|
||||
xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
|
||||
[xml_write_simple_tag(f, 'Language', i) for i in languages if i != 'translated']
|
||||
[xml_write_simple_tag(f, 'LanguageISO', LANGUAGE_ISO[i]) for i in languages
|
||||
if (i != 'translated' and i in LANGUAGE_ISO)]
|
||||
|
||||
f.write('</ComicInfo>')
|
||||
|
||||
|
||||
def xml_write_simple_tag(f, name, val, indent=1):
|
||||
f.write('{}<{}>{}</{}>\n'.format(' ' * indent, name, escape(str(val)), name))
|
||||
f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n')
|
||||
|
||||
|
||||
def merge_json():
|
||||
@ -120,7 +123,7 @@ def serialize_unique(lst):
|
||||
def set_js_database():
|
||||
with open('data.js', 'w') as f:
|
||||
indexed_json = merge_json()
|
||||
unique_json = json.dumps(serialize_unique(indexed_json), separators=','':')
|
||||
indexed_json = json.dumps(indexed_json, separators=','':')
|
||||
unique_json = json.dumps(serialize_unique(indexed_json), separators=(',', ':'))
|
||||
indexed_json = json.dumps(indexed_json, separators=(',', ':'))
|
||||
f.write('var data = ' + indexed_json)
|
||||
f.write(';\nvar tags = ' + unique_json)
|
||||
|
234
nhentai/utils.py
@ -5,31 +5,45 @@ import re
|
||||
import os
|
||||
import zipfile
|
||||
import shutil
|
||||
|
||||
import requests
|
||||
import sqlite3
|
||||
import urllib.parse
|
||||
from typing import Optional, Tuple
|
||||
|
||||
from nhentai import constant
|
||||
from nhentai.logger import logger
|
||||
from nhentai.serializer import serialize_json, serialize_comicxml, set_js_database
|
||||
from nhentai.serializer import serialize_json, serialize_comic_xml, set_js_database
|
||||
|
||||
MAX_FIELD_LENGTH = 100
|
||||
|
||||
|
||||
def request(method, url, **kwargs):
|
||||
session = requests.Session()
|
||||
session.headers.update({
|
||||
'Referer': constant.LOGIN_URL,
|
||||
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
|
||||
'User-Agent': constant.CONFIG['useragent'],
|
||||
'Cookie': constant.CONFIG['cookie']
|
||||
})
|
||||
return getattr(session, method)(url, proxies=constant.CONFIG['proxy'], verify=False, **kwargs)
|
||||
|
||||
if not kwargs.get('proxies', None):
|
||||
kwargs['proxies'] = constant.CONFIG['proxy']
|
||||
|
||||
return getattr(session, method)(url, verify=False, **kwargs)
|
||||
|
||||
|
||||
def check_cookie():
|
||||
response = request('get', constant.BASE_URL).text
|
||||
username = re.findall('"/users/\d+/(.*?)"', response)
|
||||
response = request('get', constant.BASE_URL)
|
||||
if response.status_code == 403 and 'Just a moment...' in response.text:
|
||||
logger.error('Blocked by Cloudflare captcha, please set your cookie and useragent')
|
||||
sys.exit(1)
|
||||
|
||||
username = re.findall('"/users/[0-9]+/(.*?)"', response.text)
|
||||
if not username:
|
||||
logger.error('Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
|
||||
logger.warning(
|
||||
'Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
|
||||
else:
|
||||
logger.info('Login successfully! Your username: {}'.format(username[0]))
|
||||
logger.log(16, f'Login successfully! Your username: {username[0]}')
|
||||
|
||||
|
||||
class _Singleton(type):
|
||||
@ -46,15 +60,6 @@ class Singleton(_Singleton(str('SingletonMeta'), (object,), {})):
|
||||
pass
|
||||
|
||||
|
||||
def urlparse(url):
|
||||
try:
|
||||
from urlparse import urlparse
|
||||
except ImportError:
|
||||
from urllib.parse import urlparse
|
||||
|
||||
return urlparse(url)
|
||||
|
||||
|
||||
def readfile(path):
|
||||
loc = os.path.dirname(__file__)
|
||||
|
||||
@ -62,13 +67,38 @@ def readfile(path):
|
||||
return file.read()
|
||||
|
||||
|
||||
def parse_doujinshi_obj(
|
||||
output_dir: str,
|
||||
doujinshi_obj=None,
|
||||
file_type: str = ''
|
||||
) -> Tuple[str, str]:
|
||||
|
||||
filename = f'./doujinshi.{file_type}'
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
if doujinshi_obj is not None:
|
||||
_filename = f'{doujinshi_obj.filename}.{file_type}'
|
||||
|
||||
if file_type == 'cbz':
|
||||
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
|
||||
|
||||
if file_type == 'pdf':
|
||||
_filename = _filename.replace('/', '-')
|
||||
|
||||
filename = os.path.join(output_dir, _filename)
|
||||
|
||||
return doujinshi_dir, filename
|
||||
|
||||
|
||||
def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
|
||||
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '.html')
|
||||
image_html = ''
|
||||
|
||||
if doujinshi_obj is not None:
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
else:
|
||||
doujinshi_dir = '.'
|
||||
if not os.path.exists(doujinshi_dir):
|
||||
logger.warning(f'Path "{doujinshi_dir}" does not exist, creating.')
|
||||
try:
|
||||
os.makedirs(doujinshi_dir)
|
||||
except EnvironmentError as e:
|
||||
logger.critical(e)
|
||||
|
||||
file_list = os.listdir(doujinshi_dir)
|
||||
file_list.sort()
|
||||
@ -76,38 +106,52 @@ def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
|
||||
for image in file_list:
|
||||
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
|
||||
continue
|
||||
image_html += f'<img src="{image}" class="image-item"/>\n'
|
||||
|
||||
image_html += '<img src="{0}" class="image-item"/>\n'\
|
||||
.format(image)
|
||||
html = readfile('viewer/{}/index.html'.format(template))
|
||||
css = readfile('viewer/{}/styles.css'.format(template))
|
||||
js = readfile('viewer/{}/scripts.js'.format(template))
|
||||
html = readfile(f'viewer/{template}/index.html')
|
||||
css = readfile(f'viewer/{template}/styles.css')
|
||||
js = readfile(f'viewer/{template}/scripts.js')
|
||||
|
||||
if doujinshi_obj is not None:
|
||||
serialize_json(doujinshi_obj, doujinshi_dir)
|
||||
name = doujinshi_obj.name
|
||||
if sys.version_info < (3, 0):
|
||||
name = doujinshi_obj.name.encode('utf-8')
|
||||
else:
|
||||
name = {'title': 'nHentai HTML Viewer'}
|
||||
|
||||
data = html.format(TITLE=name, IMAGES=image_html, SCRIPTS=js, STYLES=css)
|
||||
try:
|
||||
if sys.version_info < (3, 0):
|
||||
with open(os.path.join(doujinshi_dir, 'index.html'), 'w') as f:
|
||||
f.write(data)
|
||||
else:
|
||||
with open(os.path.join(doujinshi_dir, 'index.html'), 'wb') as f:
|
||||
f.write(data.encode('utf-8'))
|
||||
|
||||
logger.log(15, 'HTML Viewer has been written to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
|
||||
logger.log(16, f'HTML Viewer has been written to "{os.path.join(doujinshi_dir, "index.html")}"')
|
||||
except Exception as e:
|
||||
logger.warning('Writing HTML Viewer failed ({})'.format(str(e)))
|
||||
logger.warning(f'Writing HTML Viewer failed ({e})')
|
||||
|
||||
|
||||
def move_to_folder(output_dir='.', doujinshi_obj=None, file_type=None):
|
||||
if not file_type:
|
||||
raise RuntimeError('no file_type specified')
|
||||
|
||||
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
|
||||
|
||||
for fn in os.listdir(doujinshi_dir):
|
||||
file_path = os.path.join(doujinshi_dir, fn)
|
||||
_, ext = os.path.splitext(file_path)
|
||||
if ext in ['.pdf', '.cbz']:
|
||||
continue
|
||||
|
||||
if os.path.isfile(file_path):
|
||||
try:
|
||||
os.remove(file_path)
|
||||
except Exception as e:
|
||||
print(f"Error deleting file: {e}")
|
||||
|
||||
shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename)))
|
||||
|
||||
|
||||
def generate_main_html(output_dir='./'):
|
||||
"""
|
||||
Generate a main html to show all the contain doujinshi.
|
||||
Generate a main html to show all the contains doujinshi.
|
||||
With a link to their `index.html`.
|
||||
Default output folder will be the CLI path.
|
||||
"""
|
||||
@ -136,7 +180,7 @@ def generate_main_html(output_dir='./'):
|
||||
files.sort()
|
||||
|
||||
if 'index.html' in files:
|
||||
logger.info('Add doujinshi \'{}\''.format(folder))
|
||||
logger.info(f'Add doujinshi "{folder}"')
|
||||
else:
|
||||
continue
|
||||
|
||||
@ -146,92 +190,63 @@ def generate_main_html(output_dir='./'):
|
||||
else:
|
||||
title = 'nHentai HTML Viewer'
|
||||
|
||||
image_html += element.format(FOLDER=folder, IMAGE=image, TITLE=title)
|
||||
image_html += element.format(FOLDER=urllib.parse.quote(folder), IMAGE=image, TITLE=title)
|
||||
if image_html == '':
|
||||
logger.warning('No index.html found, --gen-main paused.')
|
||||
return
|
||||
try:
|
||||
data = main.format(STYLES=css, SCRIPTS=js, PICTURE=image_html)
|
||||
if sys.version_info < (3, 0):
|
||||
with open('./main.html', 'w') as f:
|
||||
f.write(data)
|
||||
else:
|
||||
with open('./main.html', 'wb') as f:
|
||||
f.write(data.encode('utf-8'))
|
||||
shutil.copy(os.path.dirname(__file__)+'/viewer/logo.png', './')
|
||||
shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './')
|
||||
set_js_database()
|
||||
logger.log(
|
||||
15, 'Main Viewer has been written to \'{0}main.html\''.format(output_dir))
|
||||
output_dir = output_dir[:-1] if output_dir.endswith('/') else output_dir
|
||||
logger.log(16, f'Main Viewer has been written to "{output_dir}/main.html"')
|
||||
except Exception as e:
|
||||
logger.warning('Writing Main Viewer failed ({})'.format(str(e)))
|
||||
logger.warning(f'Writing Main Viewer failed ({e})')
|
||||
|
||||
|
||||
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=False):
|
||||
if doujinshi_obj is not None:
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
if write_comic_info:
|
||||
serialize_comicxml(doujinshi_obj, doujinshi_dir)
|
||||
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), '{}.cbz'.format(doujinshi_obj.filename))
|
||||
else:
|
||||
cbz_filename = './doujinshi.cbz'
|
||||
doujinshi_dir = '.'
|
||||
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False):
|
||||
|
||||
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
|
||||
|
||||
if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate:
|
||||
logger.info(f'Skipped {file_type} file generation: {doujinshi_dir}.{file_type} already exists')
|
||||
return
|
||||
|
||||
if file_type == 'cbz':
|
||||
file_list = os.listdir(doujinshi_dir)
|
||||
file_list.sort()
|
||||
|
||||
logger.info('Writing CBZ file to path: {}'.format(cbz_filename))
|
||||
with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
|
||||
logger.info(f'Writing CBZ file to path: {filename}')
|
||||
with zipfile.ZipFile(filename, 'w') as cbz_pf:
|
||||
for image in file_list:
|
||||
image_path = os.path.join(doujinshi_dir, image)
|
||||
cbz_pf.write(image_path, image)
|
||||
|
||||
if rm_origin_dir:
|
||||
shutil.rmtree(doujinshi_dir, ignore_errors=True)
|
||||
|
||||
logger.log(15, 'Comic Book CBZ file has been written to \'{0}\''.format(doujinshi_dir))
|
||||
|
||||
|
||||
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
|
||||
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"')
|
||||
elif file_type == 'pdf':
|
||||
try:
|
||||
import img2pdf
|
||||
except ImportError:
|
||||
logger.error("Please install img2pdf package by using pip.")
|
||||
|
||||
"""Write images to a PDF file using img2pdf."""
|
||||
if doujinshi_obj is not None:
|
||||
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
|
||||
pdf_filename = os.path.join(
|
||||
os.path.join(doujinshi_dir, '..'),
|
||||
'{}.pdf'.format(doujinshi_obj.filename)
|
||||
)
|
||||
else:
|
||||
pdf_filename = './doujinshi.pdf'
|
||||
doujinshi_dir = '.'
|
||||
|
||||
file_list = os.listdir(doujinshi_dir)
|
||||
file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(('.png', '.jpg', '.jpeg', '.gif'))]
|
||||
file_list.sort()
|
||||
|
||||
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
|
||||
with open(pdf_filename, 'wb') as pdf_f:
|
||||
logger.info(f'Writing PDF file to path: {filename}')
|
||||
with open(filename, 'wb') as pdf_f:
|
||||
full_path_list = (
|
||||
[os.path.join(doujinshi_dir, image) for image in file_list]
|
||||
)
|
||||
pdf_f.write(img2pdf.convert(full_path_list))
|
||||
pdf_f.write(img2pdf.convert(full_path_list, rotation=img2pdf.Rotation.ifvalid))
|
||||
|
||||
if rm_origin_dir:
|
||||
shutil.rmtree(doujinshi_dir, ignore_errors=True)
|
||||
logger.log(16, f'PDF file has been written to "{filename}"')
|
||||
|
||||
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
|
||||
except ImportError:
|
||||
logger.error("Please install img2pdf package by using pip.")
|
||||
|
||||
|
||||
def unicode_truncate(s, length, encoding='utf-8'):
|
||||
"""https://stackoverflow.com/questions/1809531/truncating-unicode-so-it-fits-a-maximum-size-when-encoded-for-wire-transfer
|
||||
"""
|
||||
encoded = s.encode(encoding)[:length]
|
||||
return encoded.decode(encoding, 'ignore')
|
||||
|
||||
|
||||
def format_filename(s):
|
||||
def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
|
||||
"""
|
||||
It used to be a whitelist approach allowed only alphabet and a part of symbols.
|
||||
but most doujinshi's names include Japanese 2-byte characters and these was rejected.
|
||||
@ -239,16 +254,20 @@ def format_filename(s):
|
||||
if filename include forbidden characters (\'/:,;*?"<>|) ,it replace space character(' ').
|
||||
"""
|
||||
# maybe you can use `--format` to select a suitable filename
|
||||
ban_chars = '\\\'/:,;*?"<>|\t'
|
||||
filename = s.translate(str.maketrans(ban_chars, ' '*len(ban_chars))).strip()
|
||||
|
||||
if not _truncate_only:
|
||||
ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b'
|
||||
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
|
||||
filename = ' '.join(filename.split())
|
||||
print(repr(filename))
|
||||
|
||||
while filename.endswith('.'):
|
||||
filename = filename[:-1]
|
||||
else:
|
||||
filename = s
|
||||
|
||||
if len(filename) > 100:
|
||||
filename = filename[:100] + u'…'
|
||||
# limit `length` chars
|
||||
if len(filename) >= length:
|
||||
filename = filename[:length - 1] + u'…'
|
||||
|
||||
# Remove [] from filename
|
||||
filename = filename.replace('[]', '').strip()
|
||||
@ -257,7 +276,7 @@ def format_filename(s):
|
||||
|
||||
def signal_handler(signal, frame):
|
||||
logger.error('Ctrl-C signal received. Stopping...')
|
||||
exit(1)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def paging(page_string):
|
||||
@ -271,7 +290,7 @@ def paging(page_string):
|
||||
start, end = i.split('-')
|
||||
if not (start.isdigit() and end.isdigit()):
|
||||
raise Exception('Invalid page number')
|
||||
page_list.extend(list(range(int(start), int(end)+1)))
|
||||
page_list.extend(list(range(int(start), int(end) + 1)))
|
||||
else:
|
||||
if not i.isdigit():
|
||||
raise Exception('Invalid page number')
|
||||
@ -280,6 +299,29 @@ def paging(page_string):
|
||||
return page_list
|
||||
|
||||
|
||||
def generate_metadata_file(output_dir, doujinshi_obj):
|
||||
|
||||
info_txt_path = os.path.join(output_dir, doujinshi_obj.filename, 'info.txt')
|
||||
|
||||
f = open(info_txt_path, 'w', encoding='utf-8')
|
||||
|
||||
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
|
||||
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
|
||||
'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
|
||||
'SERIES', 'PARODY', 'URL']
|
||||
special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'CHARACTERS', 'AUTHOR', 'GROUPS',
|
||||
'LANGUAGE', 'TAGS', 'URL', 'PAGES']
|
||||
|
||||
for i in range(len(fields)):
|
||||
f.write(f'{fields[i]}: ')
|
||||
if fields[i] in special_fields:
|
||||
f.write(str(doujinshi_obj.table[special_fields.index(fields[i])][1]))
|
||||
f.write('\n')
|
||||
|
||||
f.close()
|
||||
logger.log(16, f'Metadata Info has been written to "{info_txt_path}"')
|
||||
|
||||
|
||||
class DB(object):
|
||||
conn = None
|
||||
cur = None
|
||||
|
@ -139,7 +139,7 @@ function filter_searcher(){
|
||||
break
|
||||
}
|
||||
}
|
||||
if (verifier){doujinshi_id.push(data[i].Folder);}
|
||||
if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));}
|
||||
}
|
||||
var gallery = document.getElementsByClassName("gallery-favorite");
|
||||
for (var i = 0; i < gallery.length; i++){
|
||||
|
25
nhentai/viewer/minimal/index.html
Normal file
@ -0,0 +1,25 @@
|
||||
<!DOCTYPE html>
|
||||
<html>
|
||||
<head>
|
||||
<meta charset="UTF-8">
|
||||
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes, viewport-fit=cover" />
|
||||
<title>{TITLE}</title>
|
||||
<style>
|
||||
{STYLES}
|
||||
</style>
|
||||
</head>
|
||||
<body>
|
||||
|
||||
<nav id="list" hidden=true>
|
||||
{IMAGES}</nav>
|
||||
|
||||
<div id="image-container">
|
||||
<div id="dest"></div>
|
||||
<span id="page-num"></span>
|
||||
</div>
|
||||
|
||||
<script>
|
||||
{SCRIPTS}
|
||||
</script>
|
||||
</body>
|
||||
</html>
|
79
nhentai/viewer/minimal/scripts.js
Normal file
@ -0,0 +1,79 @@
|
||||
const pages = Array.from(document.querySelectorAll('img.image-item'));
|
||||
let currentPage = 0;
|
||||
|
||||
function changePage(pageNum) {
|
||||
const previous = pages[currentPage];
|
||||
const current = pages[pageNum];
|
||||
|
||||
if (current == null) {
|
||||
return;
|
||||
}
|
||||
|
||||
previous.classList.remove('current');
|
||||
current.classList.add('current');
|
||||
|
||||
currentPage = pageNum;
|
||||
|
||||
const display = document.getElementById('dest');
|
||||
display.style.backgroundImage = `url("${current.src}")`;
|
||||
|
||||
scroll(0,0)
|
||||
|
||||
document.getElementById('page-num')
|
||||
.innerText = [
|
||||
(pageNum + 1).toLocaleString(),
|
||||
pages.length.toLocaleString()
|
||||
].join('\u200a/\u200a');
|
||||
}
|
||||
|
||||
changePage(0);
|
||||
|
||||
document.getElementById('image-container').onclick = event => {
|
||||
const width = document.getElementById('image-container').clientWidth;
|
||||
const clickPos = event.clientX / width;
|
||||
|
||||
if (clickPos < 0.5) {
|
||||
changePage(currentPage - 1);
|
||||
} else {
|
||||
changePage(currentPage + 1);
|
||||
}
|
||||
};
|
||||
|
||||
document.onkeypress = event => {
|
||||
switch (event.key.toLowerCase()) {
|
||||
// Previous Image
|
||||
case 'w':
|
||||
scrollBy(0, -40);
|
||||
break;
|
||||
case 'a':
|
||||
changePage(currentPage - 1);
|
||||
break;
|
||||
// Return to previous page
|
||||
case 'q':
|
||||
window.history.go(-1);
|
||||
break;
|
||||
// Next Image
|
||||
case ' ':
|
||||
case 's':
|
||||
scrollBy(0, 40);
|
||||
break;
|
||||
case 'd':
|
||||
changePage(currentPage + 1);
|
||||
break;
|
||||
}// remove arrow cause it won't work
|
||||
};
|
||||
|
||||
document.onkeydown = event =>{
|
||||
switch (event.keyCode) {
|
||||
case 37: //left
|
||||
changePage(currentPage - 1);
|
||||
break;
|
||||
case 38: //up
|
||||
break;
|
||||
case 39: //right
|
||||
changePage(currentPage + 1);
|
||||
break;
|
||||
case 40: //down
|
||||
break;
|
||||
}
|
||||
};
|
75
nhentai/viewer/minimal/styles.css
Normal file
@ -0,0 +1,75 @@
|
||||
|
||||
*, *::after, *::before {
|
||||
box-sizing: border-box;
|
||||
}
|
||||
|
||||
img {
|
||||
vertical-align: middle;
|
||||
}
|
||||
|
||||
html, body {
|
||||
display: flex;
|
||||
background-color: #e8e6e6;
|
||||
height: 100%;
|
||||
width: 100%;
|
||||
padding: 0;
|
||||
margin: 0;
|
||||
font-family: sans-serif;
|
||||
}
|
||||
|
||||
#list {
|
||||
height: 2000px;
|
||||
overflow: scroll;
|
||||
width: 260px;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
#list img {
|
||||
width: 200px;
|
||||
padding: 10px;
|
||||
border-radius: 10px;
|
||||
margin: 15px 0;
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
#list img.current {
|
||||
background: #0003;
|
||||
}
|
||||
|
||||
#image-container {
|
||||
flex: auto;
|
||||
height: 100%;
|
||||
background: rgb(0, 0, 0);
|
||||
color: rgb(100, 100, 100);
|
||||
text-align: center;
|
||||
cursor: pointer;
|
||||
-webkit-user-select: none;
|
||||
user-select: none;
|
||||
position: relative;
|
||||
}
|
||||
|
||||
#image-container #dest {
|
||||
height: 2000px;
|
||||
width: 100%;
|
||||
background-size: contain;
|
||||
background-repeat: no-repeat;
|
||||
background-position: top;
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
max-width: 100%;
|
||||
max-height: 100vh;
|
||||
margin: auto;
|
||||
}
|
||||
|
||||
#image-container #page-num {
|
||||
position: static;
|
||||
font-size: 9pt;
|
||||
left: 10px;
|
||||
bottom: 5px;
|
||||
font-weight: bold;
|
||||
opacity: 0.9;
|
||||
text-shadow: /* Duplicate the same shadow to make it very strong */
|
||||
0 0 2px #222,
|
||||
0 0 2px #222,
|
||||
0 0 2px #222;
|
||||
}
|
216
poetry.lock
generated
Normal file
@ -0,0 +1,216 @@
|
||||
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
|
||||
|
||||
[[package]]
|
||||
name = "beautifulsoup4"
|
||||
version = "4.11.2"
|
||||
description = "Screen-scraping library"
|
||||
optional = false
|
||||
python-versions = ">=3.6.0"
|
||||
files = [
|
||||
{file = "beautifulsoup4-4.11.2-py3-none-any.whl", hash = "sha256:0e79446b10b3ecb499c1556f7e228a53e64a2bfcebd455f370d8927cb5b59e39"},
|
||||
{file = "beautifulsoup4-4.11.2.tar.gz", hash = "sha256:bc4bdda6717de5a2987436fb8d72f45dc90dd856bdfd512a1314ce90349a0106"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
soupsieve = ">1.2"
|
||||
|
||||
[package.extras]
|
||||
html5lib = ["html5lib"]
|
||||
lxml = ["lxml"]
|
||||
|
||||
[[package]]
|
||||
name = "certifi"
|
||||
version = "2024.7.4"
|
||||
description = "Python package for providing Mozilla's CA Bundle."
|
||||
optional = false
|
||||
python-versions = ">=3.6"
|
||||
files = [
|
||||
{file = "certifi-2024.7.4-py3-none-any.whl", hash = "sha256:c198e21b1289c2ab85ee4e67bb4b4ef3ead0892059901a8d5b622f24a1101e90"},
|
||||
{file = "certifi-2024.7.4.tar.gz", hash = "sha256:5a1e7645bc0ec61a09e26c36f6106dd4cf40c6db3a1fb6352b0244e7fb057c7b"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "charset-normalizer"
|
||||
version = "3.0.1"
|
||||
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
|
||||
optional = false
|
||||
python-versions = "*"
|
||||
files = [
|
||||
{file = "charset-normalizer-3.0.1.tar.gz", hash = "sha256:ebea339af930f8ca5d7a699b921106c6e29c617fe9606fa7baa043c1cdae326f"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:88600c72ef7587fe1708fd242b385b6ed4b8904976d5da0893e31df8b3480cb6"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:c75ffc45f25324e68ab238cb4b5c0a38cd1c3d7f1fb1f72b5541de469e2247db"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:db72b07027db150f468fbada4d85b3b2729a3db39178abf5c543b784c1254539"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:62595ab75873d50d57323a91dd03e6966eb79c41fa834b7a1661ed043b2d404d"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ff6f3db31555657f3163b15a6b7c6938d08df7adbfc9dd13d9d19edad678f1e8"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:772b87914ff1152b92a197ef4ea40efe27a378606c39446ded52c8f80f79702e"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:70990b9c51340e4044cfc394a81f614f3f90d41397104d226f21e66de668730d"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:292d5e8ba896bbfd6334b096e34bffb56161c81408d6d036a7dfa6929cff8783"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:2edb64ee7bf1ed524a1da60cdcd2e1f6e2b4f66ef7c077680739f1641f62f555"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:31a9ddf4718d10ae04d9b18801bd776693487cbb57d74cc3458a7673f6f34639"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:44ba614de5361b3e5278e1241fda3dc1838deed864b50a10d7ce92983797fa76"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:12db3b2c533c23ab812c2b25934f60383361f8a376ae272665f8e48b88e8e1c6"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c512accbd6ff0270939b9ac214b84fb5ada5f0409c44298361b2f5e13f9aed9e"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-win32.whl", hash = "sha256:502218f52498a36d6bf5ea77081844017bf7982cdbe521ad85e64cabee1b608b"},
|
||||
{file = "charset_normalizer-3.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:601f36512f9e28f029d9481bdaf8e89e5148ac5d89cffd3b05cd533eeb423b59"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0298eafff88c99982a4cf66ba2efa1128e4ddaca0b05eec4c456bbc7db691d8d"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:a8d0fc946c784ff7f7c3742310cc8a57c5c6dc31631269876a88b809dbeff3d3"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:87701167f2a5c930b403e9756fab1d31d4d4da52856143b609e30a1ce7160f3c"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:14e76c0f23218b8f46c4d87018ca2e441535aed3632ca134b10239dfb6dadd6b"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:0c0a590235ccd933d9892c627dec5bc7511ce6ad6c1011fdf5b11363022746c1"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8c7fe7afa480e3e82eed58e0ca89f751cd14d767638e2550c77a92a9e749c317"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:79909e27e8e4fcc9db4addea88aa63f6423ebb171db091fb4373e3312cb6d603"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8ac7b6a045b814cf0c47f3623d21ebd88b3e8cf216a14790b455ea7ff0135d18"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:72966d1b297c741541ca8cf1223ff262a6febe52481af742036a0b296e35fa5a"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:f9d0c5c045a3ca9bedfc35dca8526798eb91a07aa7a2c0fee134c6c6f321cbd7"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5995f0164fa7df59db4746112fec3f49c461dd6b31b841873443bdb077c13cfc"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:4a8fcf28c05c1f6d7e177a9a46a1c52798bfe2ad80681d275b10dcf317deaf0b"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:761e8904c07ad053d285670f36dd94e1b6ab7f16ce62b9805c475b7aa1cffde6"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-win32.whl", hash = "sha256:71140351489970dfe5e60fc621ada3e0f41104a5eddaca47a7acb3c1b851d6d3"},
|
||||
{file = "charset_normalizer-3.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:9ab77acb98eba3fd2a85cd160851816bfce6871d944d885febf012713f06659c"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:84c3990934bae40ea69a82034912ffe5a62c60bbf6ec5bc9691419641d7d5c9a"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:74292fc76c905c0ef095fe11e188a32ebd03bc38f3f3e9bcb85e4e6db177b7ea"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c95a03c79bbe30eec3ec2b7f076074f4281526724c8685a42872974ef4d36b72"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f4c39b0e3eac288fedc2b43055cfc2ca7a60362d0e5e87a637beac5d801ef478"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df2c707231459e8a4028eabcd3cfc827befd635b3ef72eada84ab13b52e1574d"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:93ad6d87ac18e2a90b0fe89df7c65263b9a99a0eb98f0a3d2e079f12a0735837"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:59e5686dd847347e55dffcc191a96622f016bc0ad89105e24c14e0d6305acbc6"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:cd6056167405314a4dc3c173943f11249fa0f1b204f8b51ed4bde1a9cd1834dc"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:083c8d17153ecb403e5e1eb76a7ef4babfc2c48d58899c98fcaa04833e7a2f9a"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:f5057856d21e7586765171eac8b9fc3f7d44ef39425f85dbcccb13b3ebea806c"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:7eb33a30d75562222b64f569c642ff3dc6689e09adda43a082208397f016c39a"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-win32.whl", hash = "sha256:95dea361dd73757c6f1c0a1480ac499952c16ac83f7f5f4f84f0658a01b8ef41"},
|
||||
{file = "charset_normalizer-3.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:eaa379fcd227ca235d04152ca6704c7cb55564116f8bc52545ff357628e10602"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:3e45867f1f2ab0711d60c6c71746ac53537f1684baa699f4f668d4c6f6ce8e14"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cadaeaba78750d58d3cc6ac4d1fd867da6fc73c88156b7a3212a3cd4819d679d"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:911d8a40b2bef5b8bbae2e36a0b103f142ac53557ab421dc16ac4aafee6f53dc"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:503e65837c71b875ecdd733877d852adbc465bd82c768a067badd953bf1bc5a3"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a60332922359f920193b1d4826953c507a877b523b2395ad7bc716ddd386d866"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16a8663d6e281208d78806dbe14ee9903715361cf81f6d4309944e4d1e59ac5b"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:a16418ecf1329f71df119e8a65f3aa68004a3f9383821edcb20f0702934d8087"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:9d9153257a3f70d5f69edf2325357251ed20f772b12e593f3b3377b5f78e7ef8"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:02a51034802cbf38db3f89c66fb5d2ec57e6fe7ef2f4a44d070a593c3688667b"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:2e396d70bc4ef5325b72b593a72c8979999aa52fb8bcf03f701c1b03e1166918"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:11b53acf2411c3b09e6af37e4b9005cba376c872503c8f28218c7243582df45d"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-win32.whl", hash = "sha256:0bf2dae5291758b6f84cf923bfaa285632816007db0330002fa1de38bfcb7154"},
|
||||
{file = "charset_normalizer-3.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:2c03cc56021a4bd59be889c2b9257dae13bf55041a3372d3295416f86b295fb5"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:024e606be3ed92216e2b6952ed859d86b4cfa52cd5bc5f050e7dc28f9b43ec42"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:4b0d02d7102dd0f997580b51edc4cebcf2ab6397a7edf89f1c73b586c614272c"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:358a7c4cb8ba9b46c453b1dd8d9e431452d5249072e4f56cfda3149f6ab1405e"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:81d6741ab457d14fdedc215516665050f3822d3e56508921cc7239f8c8e66a58"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8b8af03d2e37866d023ad0ddea594edefc31e827fee64f8de5611a1dbc373174"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9cf4e8ad252f7c38dd1f676b46514f92dc0ebeb0db5552f5f403509705e24753"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e696f0dd336161fca9adbb846875d40752e6eba585843c768935ba5c9960722b"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c22d3fe05ce11d3671297dc8973267daa0f938b93ec716e12e0f6dee81591dc1"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:109487860ef6a328f3eec66f2bf78b0b72400280d8f8ea05f69c51644ba6521a"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:37f8febc8ec50c14f3ec9637505f28e58d4f66752207ea177c1d67df25da5aed"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:f97e83fa6c25693c7a35de154681fcc257c1c41b38beb0304b9c4d2d9e164479"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:a152f5f33d64a6be73f1d30c9cc82dfc73cec6477ec268e7c6e4c7d23c2d2291"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:39049da0ffb96c8cbb65cbf5c5f3ca3168990adf3551bd1dee10c48fce8ae820"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-win32.whl", hash = "sha256:4457ea6774b5611f4bed5eaa5df55f70abde42364d498c5134b7ef4c6958e20e"},
|
||||
{file = "charset_normalizer-3.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:e62164b50f84e20601c1ff8eb55620d2ad25fb81b59e3cd776a1902527a788af"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8eade758719add78ec36dc13201483f8e9b5d940329285edcd5f70c0a9edbd7f"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8499ca8f4502af841f68135133d8258f7b32a53a1d594aa98cc52013fff55678"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3fc1c4a2ffd64890aebdb3f97e1278b0cc72579a08ca4de8cd2c04799a3a22be"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d3ffdaafe92a5dc603cb9bd5111aaa36dfa187c8285c543be562e61b755f6b"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2ac1b08635a8cd4e0cbeaf6f5e922085908d48eb05d44c5ae9eabab148512ca"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f6f45710b4459401609ebebdbcfb34515da4fc2aa886f95107f556ac69a9147e"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3ae1de54a77dc0d6d5fcf623290af4266412a7c4be0b1ff7444394f03f5c54e3"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3b590df687e3c5ee0deef9fc8c547d81986d9a1b56073d82de008744452d6541"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab5de034a886f616a5668aa5d098af2b5385ed70142090e2a31bcbd0af0fdb3d"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:9cb3032517f1627cc012dbc80a8ec976ae76d93ea2b5feaa9d2a5b8882597579"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:608862a7bf6957f2333fc54ab4399e405baad0163dc9f8d99cb236816db169d4"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0f438ae3532723fb6ead77e7c604be7c8374094ef4ee2c5e03a3a17f1fca256c"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:356541bf4381fa35856dafa6a965916e54bed415ad8a24ee6de6e37deccf2786"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-win32.whl", hash = "sha256:39cf9ed17fe3b1bc81f33c9ceb6ce67683ee7526e65fde1447c772afc54a1bb8"},
|
||||
{file = "charset_normalizer-3.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:0a11e971ed097d24c534c037d298ad32c6ce81a45736d31e0ff0ad37ab437d59"},
|
||||
{file = "charset_normalizer-3.0.1-py3-none-any.whl", hash = "sha256:7e189e2e1d3ed2f4aebabd2d5b0f931e883676e51c7624826e0a4e5fe8a0bf24"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "idna"
|
||||
version = "3.7"
|
||||
description = "Internationalized Domain Names in Applications (IDNA)"
|
||||
optional = false
|
||||
python-versions = ">=3.5"
|
||||
files = [
|
||||
{file = "idna-3.7-py3-none-any.whl", hash = "sha256:82fee1fc78add43492d3a1898bfa6d8a904cc97d8427f683ed8e798d07761aa0"},
|
||||
{file = "idna-3.7.tar.gz", hash = "sha256:028ff3aadf0609c1fd278d8ea3089299412a7a8b9bd005dd08b9f8285bcb5cfc"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "iso8601"
|
||||
version = "1.1.0"
|
||||
description = "Simple module to parse ISO 8601 dates"
|
||||
optional = false
|
||||
python-versions = ">=3.6.2,<4.0"
|
||||
files = [
|
||||
{file = "iso8601-1.1.0-py3-none-any.whl", hash = "sha256:8400e90141bf792bce2634df533dc57e3bee19ea120a87bebcd3da89a58ad73f"},
|
||||
{file = "iso8601-1.1.0.tar.gz", hash = "sha256:32811e7b81deee2063ea6d2e94f8819a86d1f3811e49d23623a41fa832bef03f"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "requests"
|
||||
version = "2.32.0"
|
||||
description = "Python HTTP for Humans."
|
||||
optional = false
|
||||
python-versions = ">=3.8"
|
||||
files = [
|
||||
{file = "requests-2.32.0-py3-none-any.whl", hash = "sha256:f2c3881dddb70d056c5bd7600a4fae312b2a300e39be6a118d30b90bd27262b5"},
|
||||
{file = "requests-2.32.0.tar.gz", hash = "sha256:fa5490319474c82ef1d2c9bc459d3652e3ae4ef4c4ebdd18a21145a47ca4b6b8"},
|
||||
]
|
||||
|
||||
[package.dependencies]
|
||||
certifi = ">=2017.4.17"
|
||||
charset-normalizer = ">=2,<4"
|
||||
idna = ">=2.5,<4"
|
||||
urllib3 = ">=1.21.1,<3"
|
||||
|
||||
[package.extras]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
|
||||
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
|
||||
|
||||
[[package]]
|
||||
name = "soupsieve"
|
||||
version = "2.4"
|
||||
description = "A modern CSS selector implementation for Beautiful Soup."
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "soupsieve-2.4-py3-none-any.whl", hash = "sha256:49e5368c2cda80ee7e84da9dbe3e110b70a4575f196efb74e51b94549d921955"},
|
||||
{file = "soupsieve-2.4.tar.gz", hash = "sha256:e28dba9ca6c7c00173e34e4ba57448f0688bb681b7c5e8bf4971daafc093d69a"},
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "tabulate"
|
||||
version = "0.9.0"
|
||||
description = "Pretty-print tabular data"
|
||||
optional = false
|
||||
python-versions = ">=3.7"
|
||||
files = [
|
||||
{file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
|
||||
{file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
widechars = ["wcwidth"]
|
||||
|
||||
[[package]]
|
||||
name = "urllib3"
|
||||
version = "1.26.19"
|
||||
description = "HTTP library with thread-safe connection pooling, file post, and more."
|
||||
optional = false
|
||||
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
|
||||
files = [
|
||||
{file = "urllib3-1.26.19-py2.py3-none-any.whl", hash = "sha256:37a0344459b199fce0e80b0d3569837ec6b6937435c5244e7fd73fa6006830f3"},
|
||||
{file = "urllib3-1.26.19.tar.gz", hash = "sha256:3e3d753a8618b86d7de333b4223005f68720bcd6a7d2bcb9fbd2229ec7c1e429"},
|
||||
]
|
||||
|
||||
[package.extras]
|
||||
brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
|
||||
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
|
||||
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
|
||||
|
||||
[metadata]
|
||||
lock-version = "2.0"
|
||||
python-versions = "^3.8"
|
||||
content-hash = "0a1d5abd47a669c7a1f2dc7b43824a449e29ba94908a4338d2ea0f2dfb4f805e"
|
21
pyproject.toml
Normal file
@ -0,0 +1,21 @@
|
||||
[tool.poetry]
|
||||
name = "nhentai"
|
||||
version = "0.5.10"
|
||||
description = "nhentai doujinshi downloader"
|
||||
authors = ["Ricter Z <ricterzheng@gmail.com>"]
|
||||
license = "MIT"
|
||||
readme = "README.rst"
|
||||
|
||||
[tool.poetry.dependencies]
|
||||
python = "^3.8"
|
||||
requests = "^2.28.2"
|
||||
soupsieve = "^2.4"
|
||||
beautifulsoup4 = "^4.11.2"
|
||||
tabulate = "^0.9.0"
|
||||
iso8601 = "^1.1.0"
|
||||
urllib3 = "^1.26.14"
|
||||
|
||||
|
||||
[build-system]
|
||||
requires = ["poetry-core"]
|
||||
build-backend = "poetry.core.masonry.api"
|
@ -1,7 +1,7 @@
|
||||
requests>=2.5.0
|
||||
soupsieve<2.0
|
||||
BeautifulSoup4>=4.0.0
|
||||
threadpool>=1.2.7
|
||||
tabulate>=0.7.5
|
||||
future>=0.15.2
|
||||
iso8601 >= 0.1
|
||||
requests
|
||||
soupsieve
|
||||
setuptools
|
||||
BeautifulSoup4
|
||||
tabulate
|
||||
iso8601
|
||||
urllib3
|
||||
|
@ -1,3 +1,3 @@
|
||||
[metadata]
|
||||
description-file = README.md
|
||||
description_file = README.rst
|
||||
|
||||
|
3
setup.py
@ -1,6 +1,4 @@
|
||||
# coding: utf-8
|
||||
from __future__ import print_function, unicode_literals
|
||||
import sys
|
||||
import codecs
|
||||
from setuptools import setup, find_packages
|
||||
from nhentai import __version__, __author__, __email__
|
||||
@ -12,7 +10,6 @@ with open('requirements.txt') as f:
|
||||
|
||||
def long_description():
|
||||
with codecs.open('README.rst', 'rb') as readme:
|
||||
if not sys.version_info < (3, 0, 0):
|
||||
return readme.read().decode('utf-8')
|
||||
|
||||
|
||||
|
0
tests/__init__.py
Normal file
36
tests/test_download.py
Normal file
@ -0,0 +1,36 @@
|
||||
import unittest
|
||||
import os
|
||||
import urllib3.exceptions
|
||||
|
||||
from nhentai import constant
|
||||
from nhentai.cmdline import load_config
|
||||
from nhentai.downloader import Downloader
|
||||
from nhentai.parser import doujinshi_parser
|
||||
from nhentai.doujinshi import Doujinshi
|
||||
from nhentai.utils import generate_html, generate_cbz
|
||||
|
||||
|
||||
class TestDownload(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
load_config()
|
||||
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
|
||||
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
|
||||
|
||||
def test_download(self):
|
||||
did = 440546
|
||||
info = Doujinshi(**doujinshi_parser(did), name_format='%i')
|
||||
info.downloader = Downloader(path='/tmp', size=5)
|
||||
info.download()
|
||||
|
||||
self.assertTrue(os.path.exists(f'/tmp/{did}/001.jpg'))
|
||||
|
||||
generate_html('/tmp', info)
|
||||
self.assertTrue(os.path.exists(f'/tmp/{did}/index.html'))
|
||||
|
||||
generate_cbz('/tmp', info)
|
||||
self.assertTrue(os.path.exists(f'/tmp/{did}.cbz'))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
26
tests/test_login.py
Normal file
@ -0,0 +1,26 @@
|
||||
import os
|
||||
import unittest
|
||||
import urllib3.exceptions
|
||||
|
||||
from nhentai import constant
|
||||
from nhentai.cmdline import load_config
|
||||
from nhentai.utils import check_cookie
|
||||
|
||||
|
||||
class TestLogin(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
load_config()
|
||||
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
|
||||
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
|
||||
|
||||
def test_cookie(self):
|
||||
try:
|
||||
check_cookie()
|
||||
self.assertTrue(True)
|
||||
except Exception as e:
|
||||
self.assertIsNone(e)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
27
tests/test_parser.py
Normal file
@ -0,0 +1,27 @@
|
||||
import unittest
|
||||
import os
|
||||
import urllib3.exceptions
|
||||
|
||||
from nhentai import constant
|
||||
from nhentai.cmdline import load_config
|
||||
from nhentai.parser import search_parser, doujinshi_parser, favorites_parser
|
||||
|
||||
|
||||
class TestParser(unittest.TestCase):
|
||||
def setUp(self) -> None:
|
||||
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
|
||||
load_config()
|
||||
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
|
||||
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
|
||||
|
||||
def test_search(self):
|
||||
result = search_parser('umaru', 'recent', [1], False)
|
||||
self.assertTrue(len(result) > 0)
|
||||
|
||||
def test_doujinshi_parser(self):
|
||||
result = doujinshi_parser(123456)
|
||||
self.assertTrue(result['pages'] == 84)
|
||||
|
||||
def test_favorites_parser(self):
|
||||
result = favorites_parser(page=[1])
|
||||
self.assertTrue(len(result) > 0)
|