Compare commits

..

337 Commits

Author SHA1 Message Date
ee3de81931 Bump requests from 2.32.3 to 2.32.4
Bumps [requests](https://github.com/psf/requests) from 2.32.3 to 2.32.4.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.32.3...v2.32.4)

---
updated-dependencies:
- dependency-name: requests
  dependency-version: 2.32.4
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-10 10:49:11 +00:00
3be7c02458 Merge pull request #405 from luolili233/master
Fix parsing bug in ComicInfo.xml, fixes #404
2025-05-29 21:36:52 +08:00
fd1a40867e Update serializer.py 2025-05-28 14:58:37 +08:00
6752edfc9d Merge pull request #402 from hzxjy1/zipTest
Close zipfile hander manually and add a test
2025-03-26 22:57:29 +08:00
9a5fcd7d23 Merge pull request #401 from hzxjy1/NoneType
Fix none attributes in session headers
2025-03-26 22:56:11 +08:00
b4cc498a5f add a test for zipfile download 2025-03-26 15:14:15 +08:00
a4eb7f3b5f fix the uncontrollable zipfile closing function 2025-03-26 15:11:26 +08:00
36aa321ade Fix none attributes in session headers 2025-03-24 10:13:42 +08:00
aa84b57a43 use argparse, fix #396 2025-03-12 02:50:22 +08:00
a3c70a0c30 fix #396 2025-03-11 22:23:17 +08:00
86060ae0a6 Merge pull request #398 from hzxjy1/zipfile
feat: add compress option
2025-03-11 22:04:09 +08:00
9648c21b32 tiny fix of #397 2025-03-11 22:02:37 +08:00
625feb5d21 Remove unused option 2025-03-08 17:37:42 +08:00
6efbc73c10 feat: add compress option 2025-03-08 17:31:56 +08:00
34c1ea8952 new feature #396 2025-02-28 18:59:32 +08:00
2e895d8d0f fix title #396 2025-02-28 18:24:56 +08:00
0c9b92ce10 0.6.0-beta #394 2025-02-28 00:17:05 +08:00
ca71a72747 fix #395 2025-02-27 22:07:40 +08:00
1b7f19ee18 0.5.25, fix #393 2025-02-26 00:13:41 +08:00
132f4c83da Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-26 00:12:49 +08:00
6789b2b363 fix bug of cover.webp.webp 2025-02-25 23:51:13 +08:00
a6ac725ca7 Merge pull request #392 from akakishi/master
Update installation instructions in README.rst
2025-02-23 20:29:15 +08:00
b32962bca4 Update README.rst
File `setup.py` was removed in a previous commit; updated README to reflect the new installation process.
2025-02-23 01:18:54 -03:00
8a7be0e33d 0.5.24 2025-02-09 20:16:44 +08:00
0a47527461 optimize logger output #390 2025-02-09 20:15:17 +08:00
023c8969eb add global retry for search, download, fetch favorites 2025-02-09 20:02:52 +08:00
29c3abbe5c Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-08 16:21:08 +08:00
057fae8a83 0.5.23 2025-02-03 15:47:51 +08:00
248d31edf0 get favorite count #386 even if not login 2025-02-03 15:45:39 +08:00
4bfe0de078 0.5.22 2025-02-03 15:29:34 +08:00
780a6c82b2 split metadata.json out from html generate function #386 2025-02-03 15:26:14 +08:00
8791e7af55 update README to fix #367 2025-02-03 14:53:09 +08:00
b434c4d58d 0.5.21 2025-02-03 14:34:14 +08:00
fc69f94505 add --no-filename-padding options to fix #381 2025-01-29 22:59:28 +08:00
571fba2259 fix RequestsDependencyWarning 2025-01-29 22:46:11 +08:00
fa977fee04 0.5.20 2025-01-29 00:31:40 +08:00
58b5ec4211 fix #382 2025-01-28 17:43:50 +08:00
5ad416efa6 Merge pull request #380 from sgqy/master 2025-01-27 06:58:36 +08:00
d90fd871ef fix: failure chain 2025-01-26 22:38:50 +09:00
c7ff5c2c5c build: switch to pyproject 2025-01-26 21:45:55 +09:00
4ab43dae24 Merge pull request #378 from bill88t/master 2025-01-24 04:36:21 +08:00
04bd88a1f7 fix: python-httpx 0.28 2025-01-23 21:16:07 +02:00
ba59dcf4db add up/down arrow 2025-01-16 22:40:53 +08:00
a83c571ec4 0.5.19 2025-01-15 19:47:24 +08:00
e7ff5dab3d Merge pull request #373 from nicojust/fix-favorite-metadata-output
fix favorite_counts output in metadata
2025-01-15 12:26:24 +08:00
a166898b60 fix #374 2025-01-15 12:26:01 +08:00
ce25051fa3 fix: output favorite_counts as an int 2025-01-13 19:51:40 +01:00
41fba6b5ac fix: add missing favorite_counts in metadata file 2025-01-13 19:51:04 +01:00
8944ece4a8 use os.path.sep as path separator 2025-01-11 08:48:43 +08:00
6b4c4bdc70 0.5.18 2025-01-11 08:35:40 +08:00
d1d0c22af8 fix #349 2025-01-11 08:34:30 +08:00
803957ba88 fix #349 2025-01-11 08:33:59 +08:00
13b584a820 fix #371 and #324 2025-01-11 08:02:36 +08:00
be08fcf4cb fix #368 2025-01-11 07:54:28 +08:00
b585225308 fix #370 2025-01-11 07:52:51 +08:00
54af682848 fix #369 2025-01-11 07:50:41 +08:00
d74fd103f0 remove setup.py 2025-01-08 09:35:44 +08:00
0cb2411955 Merge branch 'master' of github.com:RicterZ/nhentai 2025-01-08 09:17:01 +08:00
de08d3daaa 0.5.17.1 2025-01-07 14:26:38 +08:00
946b85ace9 tiny fix 2024-12-21 09:32:33 +08:00
5bde24f159 remove debug print 2024-12-21 09:18:34 +08:00
3cae13e76f fix #363 2024-12-18 23:37:00 +08:00
7483b8f923 workaround of #359 2024-12-11 23:58:48 +08:00
eae42c8eb5 fix #356 2024-12-11 23:57:01 +08:00
b841747761 fix #356 2024-12-11 23:47:48 +08:00
1f3528afad try to fix #361 2024-12-09 14:36:44 +08:00
bb41e502c1 0.5.17 for fix #360 2024-12-09 09:26:33 +08:00
7089144ac6 fix #360 #359 2024-12-09 09:25:40 +08:00
0a9f7c3d3e 0.5.15 fix some bugs 2024-12-04 11:04:04 +08:00
40536ad456 Merge branch 'master' of github.com:RicterZ/nhentai 2024-12-04 11:03:48 +08:00
edb571c9dd fix #358 2024-12-04 11:00:50 +08:00
b2befd3473 Merge pull request #357 from FelixJS123/favorite_metadata
add favorites count metadata
2024-12-04 10:47:32 +08:00
c2e880f172 fix asyncio proxies settings and update httpx version 2024-12-04 10:46:45 +08:00
841988bc29 Updated README 2024-11-30 22:58:54 -08:00
390948e252 add favorites count metadata 2024-11-30 22:53:45 -08:00
b9b8468bfe 0.5.14 2024-12-01 10:37:59 +08:00
3d6263cf11 Merge pull request #354 from normalizedwater546/master
asyncio: fix downloader being run sequentially + httpx: fix proxy and missing headers
2024-11-24 13:50:22 +08:00
e3410f5a9a fix: add headers, proxy to async_request 2024-11-23 13:11:25 +00:00
feb7f45533 fix: semaphore bound to different event loop 2024-11-23 12:19:36 +00:00
0754caaeb7 fix: update threads argument 2024-11-23 11:20:58 +00:00
49e5a3094a fix: recent asyncio change resulting in sequential downloads
This was due to AsyncIO completely ignoring the thread (size) argument, and not updating sleep to be non-blocking.
2024-11-23 11:17:09 +00:00
c044b64beb Merge pull request #353 from hzxjy1/master
Fix issue #7
2024-11-19 02:10:34 +08:00
f8334c09b5 Add dependence httpx 2024-11-19 01:16:51 +08:00
c90c486fb4 Add a fix fatch for downloader 2024-11-19 01:13:16 +08:00
90b17832cc Merge pull request #351 from hzxjy1/master
Use coroutine in url download
2024-11-17 10:10:54 +08:00
14c6db9cc3 Use coroutine in url download and improve the extensibility of class Downloader 2024-11-16 15:57:59 +08:00
f30ff59b2b Merge pull request #348 from JustAHumanBean/webp
add webp support
2024-11-08 16:33:21 +08:00
1504ee779f Update utils.py 2024-11-08 07:49:20 +00:00
98d9eecf6d Update parser.py 2024-11-08 07:47:50 +00:00
e16e623b9d Update doujinshi.py 2024-11-08 07:46:53 +00:00
c3f3182df3 0.5.12 2024-10-01 22:55:01 +09:00
12aad842f8 fix #347 2024-10-01 22:42:26 +09:00
f9f76ab0f5 0.5.11 2024-10-01 12:48:28 +09:00
744a9e4418 Merge branch 'master' of github.com:RicterZ/nhentai 2024-10-01 12:47:48 +09:00
c3e9fff491 fix bug #345 2024-10-01 12:47:13 +09:00
a84e2c5714 fix bug #341 2024-10-01 12:47:10 +09:00
c814c35c50 fix bug #341 2024-10-01 12:39:28 +09:00
e2f71437e2 fix setuptools warning 2024-09-22 16:37:49 +08:00
2fa45ae4df 0.5.10 2024-09-22 16:36:50 +08:00
17bc33c6cb fix arguments pass issue #344 2024-09-22 16:34:53 +08:00
09bb8460f6 fix overwrite issue #344 2024-09-22 16:32:01 +08:00
eb5b93d654 fix: pdf/cbz file already exists, but download process continues 2024-09-22 07:33:52 +00:00
cb6cf6df1a regression: pdf/cbz file already exists, but origin files are downloaded anyways.
- call download with `--cbz --rm-origin-dir`, and run command twice.
- user should pass `--regenerate` option to get back origin dir.
2024-09-22 07:24:16 +00:00
98a66a3cb0 0.5.9 2024-09-22 15:09:36 +08:00
02d47632cf fix bug of move-to-dir 2024-09-22 15:07:53 +08:00
f932b1fbbe update README: mirror setup 2024-09-22 14:45:07 +08:00
fd9e92f9d4 update README 2024-09-22 14:44:42 +08:00
a8a48c6ce7 Merge pull request #343 from RicterZ/pull-342
improve #342
2024-09-22 14:42:32 +08:00
f6e9d08fc7 0.5.8 #343 2024-09-22 14:42:02 +08:00
9c1c2ea069 improve download logic #343 2024-09-22 14:39:32 +08:00
984ae4262c generate_metadata_file no need to use parse_doujinshi_obj 2024-09-22 14:11:55 +08:00
cbf9448ed9 improve #342 2024-09-22 13:35:07 +08:00
16bac45f02 generate html viewer automatically after download #342 2024-09-22 12:30:55 +08:00
7fa9193112 fix: non-image files in pdf conversion causing crash 2024-09-22 02:05:32 +00:00
a05a308e71 fix: check if metadata file is downloaded before skipping 2024-09-22 01:39:40 +00:00
5a29eaf775 fix: add file_type check to downloader
If you wanted to generate both .cbz and .pdf, the .pdf will be skipped if .cbz was generated first.
2024-09-22 01:38:54 +00:00
497eb6fe50 fix: remove warning for folder already exists in downloader
Nothing is wrong with the folder already existing -- silently ignore and move on. Might still have other files inside that haven't been downloaded yet.
2024-09-22 01:00:06 +00:00
4bfe104714 refactor: de-dupe doujinshi_obj parsers 2024-09-22 00:44:06 +00:00
12364e980c fix process continuing despite cbz download request skipped 2024-09-22 00:43:10 +00:00
b51e812449 fix #330 2024-09-21 11:49:22 +08:00
0ed5fa1931 fix #320 2024-09-21 00:43:14 +08:00
7f655b0f10 fix #295 2024-09-21 00:32:10 +08:00
dec3f44542 add some debug hack 2024-09-21 00:21:01 +08:00
40072a8483 0.5.7 2024-09-21 00:00:04 +08:00
f97469259d fix #331 2024-09-20 23:59:34 +08:00
ec608cc741 fix workflow docker issue 2024-09-20 23:58:25 +08:00
30e2814fe2 update version number in pyproject.toml 2024-09-20 23:57:10 +08:00
da298e1fe7 Merge pull request #312 from RicterZ/dependabot/pip/idna-3.7
Bump idna from 3.4 to 3.7
2024-09-20 23:56:25 +08:00
51d43ddde0 Merge branch 'master' into dependabot/pip/idna-3.7 2024-09-20 23:56:18 +08:00
c734881fc7 Merge pull request #316 from RicterZ/dependabot/pip/requests-2.32.0
Bump requests from 2.31.0 to 2.32.0
2024-09-20 23:55:33 +08:00
8d5803a45e Merge branch 'master' into dependabot/pip/requests-2.32.0 2024-09-20 23:55:28 +08:00
b441085b45 Merge pull request #318 from RicterZ/dependabot/pip/urllib3-1.26.19
Bump urllib3 from 1.26.18 to 1.26.19
2024-09-20 23:55:08 +08:00
132b26f8c4 Merge branch 'master' into dependabot/pip/urllib3-1.26.19 2024-09-20 23:54:57 +08:00
a0dc952fd3 Merge pull request #319 from RicterZ/dependabot/pip/certifi-2024.7.4
Bump certifi from 2022.12.7 to 2024.7.4
2024-09-20 23:54:18 +08:00
2bd862777b fix #333 2024-09-20 23:53:26 +08:00
35c55503fa 0.5.6 2024-09-20 23:39:38 +08:00
29aac84d53 fix #336 2024-09-20 23:34:26 +08:00
4ed4523782 fix #341 2024-09-20 23:27:37 +08:00
4223326c13 Merge pull request #340 from vglint/patch-3
Fix gallery search for folders with underscore
2024-09-14 10:17:57 +08:00
a248ff98c4 Fix gallery search for folders with underscore
Gallery title names replace '_' in the folder name with ' ' (generate_main_html()). To match against these title names when searching, we must also replace '_' with ' ' for each folder name we add to the list of titles to unhide.
2024-09-13 15:56:01 -07:00
021f17d229 Merge pull request #321 from PenitentMonke/xdg-base-dir
Adhere to XDG base dir spec on Linux
2024-07-08 22:03:38 +08:00
4162eabe93 Adhere to XDG base dir spec on Linux
Change how NHENTAI_HOME is set to follow the XDG Base Directory
Specification where possible, when running on Linux.

ISSUE: 299
2024-07-07 02:40:33 -03:00
c75e9efb21 Bump certifi from 2022.12.7 to 2024.7.4
Bumps [certifi](https://github.com/certifi/python-certifi) from 2022.12.7 to 2024.7.4.
- [Commits](https://github.com/certifi/python-certifi/compare/2022.12.07...2024.07.04)

---
updated-dependencies:
- dependency-name: certifi
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-07-05 21:52:23 +00:00
f2dec5c2a3 Bump urllib3 from 1.26.18 to 1.26.19
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.18 to 1.26.19.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/1.26.19/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.18...1.26.19)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-06-18 01:35:13 +00:00
845a0d5659 ---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-05-21 05:39:26 +00:00
03d85c4e5d Bump idna from 3.4 to 3.7
Bumps [idna](https://github.com/kjd/idna) from 3.4 to 3.7.
- [Release notes](https://github.com/kjd/idna/releases)
- [Changelog](https://github.com/kjd/idna/blob/master/HISTORY.rst)
- [Commits](https://github.com/kjd/idna/compare/v3.4...v3.7)

---
updated-dependencies:
- dependency-name: idna
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-12 02:06:40 +00:00
dc54a43610 Merge pull request #311 from RicterZ/dev
Dev merge to master
2024-03-28 17:56:28 +08:00
4ecffaff55 Merge pull request #310 from Spyridion/dev
Changed parser option checks to allow artist search
2024-03-28 17:42:42 +08:00
457f12d40d Changed parser option checks to allow artist search 2024-03-28 02:40:14 -07:00
499081a9cd Merge pull request #306 from myc1ou1d/dev
fix file not found error when cbz file exists.
2024-02-25 00:37:32 +08:00
53aa04af1e fix file not found error when cbz file exists. 2024-02-24 23:27:52 +08:00
473f948565 update 2024-02-20 10:28:54 +08:00
f701485840 remove print 2024-02-20 10:27:34 +08:00
d8e4f50609 support #291 2024-02-20 10:25:44 +08:00
a893f54da1 0.5.4 2023-12-28 17:46:40 +08:00
4e307911ce Merge pull request #297 from RicterZ/dependabot/pip/urllib3-1.26.18
Bump urllib3 from 1.26.14 to 1.26.18
2023-12-28 17:46:07 +08:00
f9b7f828a5 fix #298 2023-12-28 17:45:37 +08:00
092df9e539 Bump urllib3 from 1.26.14 to 1.26.18
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.14 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.14...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-17 23:59:22 +00:00
8d74866abf Update README.rst 2023-08-21 21:47:07 +08:00
bc5b7f982d Merge pull request #294 from edgar1016/master
Added --move-to-folder
2023-08-19 19:13:38 +08:00
e54f3cbd06 Added --move-to-folder 2023-08-18 18:30:14 -07:00
a31c615259 Merge pull request #284 from RicterZ/dependabot/pip/requests-2.31.0
Bump requests from 2.28.2 to 2.31.0
2023-05-25 20:40:59 +08:00
cf0b76204d Bump requests from 2.28.2 to 2.31.0
Bumps [requests](https://github.com/psf/requests) from 2.28.2 to 2.31.0.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.28.2...v2.31.0)

---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-23 06:19:34 +00:00
17402623c4 Merge pull request #282 from edgar1016/master
--page-all works with favorites
2023-04-22 13:06:40 +08:00
a1a310f06b --page-all works with favorites 2023-04-21 22:00:00 -07:00
57673da762 update version 2023-03-28 21:02:47 +08:00
dab61291cb Merge pull request #280 from RicterZ/dev
0.5.3
2023-03-28 20:58:08 +08:00
67cb88dbbd 0.5.3 2023-03-28 20:57:36 +08:00
9ed4e04241 Merge pull request #279 from RicterZ/dev
update setup informations
2023-03-28 20:56:53 +08:00
0b0f9bd7e8 update setup informations 2023-03-28 20:55:40 +08:00
f1cc63a591 Merge pull request #278 from RicterZ/dev
fix #277
2023-03-28 20:54:49 +08:00
aa77cb1c7c fix some bugs #277 2023-03-28 20:54:02 +08:00
f9878d080b add debug information 2023-03-04 18:49:28 +08:00
f534b0b47f Merge pull request #275 from RicterZ/dev
remove tests
2023-03-04 18:40:45 +08:00
6b675fd9ba remove tests 2023-03-04 18:40:10 +08:00
458c68d5e6 Merge pull request #274 from RicterZ/dev
Dev
2023-03-04 18:39:07 +08:00
2eed0a7463 add poetry 2023-03-04 18:33:51 +08:00
fc507d246a Merge pull request #271 from edgar1016/master
Fixed info.txt
2023-02-20 23:58:26 +08:00
3ed84c5a67 Fixed info.txt 2023-02-20 01:54:32 -07:00
61f4a43081 remove test 2023-02-20 12:58:28 +08:00
4179947f16 add %ag %g formatter #269 2023-02-20 12:55:18 +08:00
9f55223e28 use Unknown as field value if it is null #269 2023-02-20 12:47:00 +08:00
b56e5b63a9 Merge pull request #268 from RicterZ/dev
enhancement of legacy search parser
2023-02-07 19:46:09 +08:00
6dc1e0ef5a update test 2023-02-07 19:43:55 +08:00
fefdd3858a update test 2023-02-07 19:42:27 +08:00
f66653c55e legacy search by @gayspacegems of issue #265 2023-02-07 19:40:52 +08:00
179852a343 Merge pull request #267 from RicterZ/dev
add counter
2023-02-06 17:51:54 +08:00
8972026456 update tests 2023-02-06 17:50:51 +08:00
cbff6496c3 update 2023-02-06 17:49:42 +08:00
5a08981e89 update 2023-02-06 17:47:23 +08:00
6c5b83d5be update tests 2023-02-06 17:46:03 +08:00
3de4159a39 update tests 2023-02-06 17:44:28 +08:00
c66fa5f816 rename 2023-02-06 17:43:00 +08:00
66d0d91eae fix env 2023-02-06 17:40:11 +08:00
0aa8e1d358 update tests 2023-02-06 17:27:42 +08:00
0f54762229 print cookie 2023-02-06 17:25:34 +08:00
93c3a77a57 add counter 2023-02-06 17:22:31 +08:00
f411b7cfea update 2023-02-06 17:15:48 +08:00
ed1686bb9c Merge branch 'master' of github.com:RicterZ/nhentai 2023-02-06 17:12:22 +08:00
f44b9e9911 add counter 2023-02-06 17:12:10 +08:00
1d20a82e3d Create python-app.yml 2023-02-06 17:07:54 +08:00
e3a6d67560 Merge branch 'master' of github.com:RicterZ/nhentai 2023-02-06 17:03:14 +08:00
c7c3572811 add tests 2023-02-06 17:02:02 +08:00
421e8bce64 Update docker-image.yml 2023-02-06 16:14:04 +08:00
25e0d80024 Update docker-image.yml 2023-02-06 16:12:46 +08:00
a10510b12d Update docker-image.yml 2023-02-06 16:09:38 +08:00
2c20d19621 Update docker-image.yml 2023-02-06 07:19:46 +08:00
c4313e59f1 Create docker-image.yml 2023-02-06 07:16:42 +08:00
c06f3225a3 remove travis-ci 2023-02-06 07:14:19 +08:00
1fac55137a update travis-ci 2023-02-06 00:58:51 +08:00
22412eb904 add docker ignore 2023-02-06 00:49:29 +08:00
8ccfedbfc8 add dockerignore 2023-02-06 00:48:53 +08:00
483bef2207 update docker usage 2023-02-06 00:45:43 +08:00
730daec1ab update README 2023-02-06 00:44:04 +08:00
5778d7a6e5 update README 2023-02-06 00:42:53 +08:00
c48a25bd4e fix typo 2023-02-06 00:37:10 +08:00
f5c4bf4dd1 update README 2023-02-06 00:36:56 +08:00
9f17ee3f6e update README 2023-02-06 00:34:44 +08:00
290f03d05e rm trash files 2023-02-06 00:22:43 +08:00
fe443a4229 add Dockerfile 2023-02-06 00:22:23 +08:00
2fe5536950 0.5.2 2023-02-06 00:03:54 +08:00
7a7f2559ff update broken images on pypi 2023-02-06 00:02:48 +08:00
444efcbee5 0.5.1 2023-02-05 23:55:21 +08:00
08d812c614 fix UnicodeDecodeError on windows 2023-02-05 23:55:05 +08:00
cb691c782c update README 2023-02-05 23:51:11 +08:00
927d5b1b39 update requirements 2023-02-05 23:45:33 +08:00
a8566482aa change log color and update images 2023-02-05 23:44:15 +08:00
8c900a833d update README 2023-02-05 23:25:41 +08:00
466fa4c094 rename some constants 2023-02-05 23:17:23 +08:00
2adf8ccc9d reformat files #266 2023-02-05 23:13:47 +08:00
06fdf0dade reformat files #266 2023-02-05 22:44:37 +08:00
a609243794 change logger 2023-02-05 07:07:19 +08:00
e89c2c0860 fix bug #265 2023-02-05 07:02:45 +08:00
e08b0659e5 improve #265 2023-02-05 06:55:03 +08:00
221ff6b32c 0.4.18 bugs fix 2023-02-04 20:24:53 +08:00
bc6ef0cf5d solve #251 2023-02-04 20:22:57 +08:00
c8c63cbc11 add usage images 2023-02-04 20:09:51 +08:00
a63856d076 update usage 2023-02-04 20:09:46 +08:00
aa4986189f resolve issue #264 2023-02-04 19:55:51 +08:00
0fb81599dc resolve #265 2023-02-04 19:47:24 +08:00
e9f9651d07 change the default sort method 2023-02-04 19:38:29 +08:00
1860b5f0cf resoved issue #249 2022-05-03 16:54:38 +08:00
eff4f3bf9b remove debug print 2022-05-03 16:51:49 +08:00
501840172e change sorting from recent to date 2022-05-03 16:49:26 +08:00
e5ed6d098a update README 2022-05-02 18:53:40 +08:00
98606202fb remove some unused images 2022-05-02 18:49:34 +08:00
5a3f1009c9 update README for issue #237 2022-05-02 18:48:02 +08:00
61945a6e97 fix for issue #236 2022-05-02 17:01:30 +08:00
443fcdc7da fix for issue #232 2022-05-02 16:53:23 +08:00
31b95fe2dd 0.4.17 releases, for #246 2022-05-02 16:24:04 +08:00
be8c97f8d4 Merge pull request #247 from krrr/master 2022-05-02 13:21:53 +08:00
348e51676e Update README.rst 2022-05-02 12:13:19 +08:00
ea356a1ca2 Merge pull request #244 from krrr/master 2022-04-30 13:47:57 +08:00
5a4dfb8a76 Add new option to avoid cloudflare captcha 2022-04-30 11:22:41 +08:00
4b15744ceb Merge pull request #235 from TravisDavis-ops/nixpkg 2021-12-24 03:27:07 +08:00
b05fa16286 Update README.rst 2021-12-23 12:43:20 -06:00
0879486881 Merge pull request #228 from culturecloud/master 2021-08-23 20:27:38 +08:00
c66ba730d3 Fix UnicodeEncodeError 2021-07-28 18:43:45 +06:00
606c5e0ffd Merge pull request #226 from nanaih/minimal_viewer 2021-06-23 18:14:47 +08:00
ba04f81a6f add minimal viewer, fix not using config's template on --html only option 2021-06-22 23:17:03 -04:00
6519e6f221 Merge pull request #224 from RicterZ/pull/221
Pull/221
2021-06-07 17:21:00 +08:00
7594625d72 fix format 2021-06-07 17:17:54 +08:00
4948c8f0c5 update README 2021-06-07 16:50:03 +08:00
e22a99fa8c Merge branch 'master' of github.com:RicterZ/nhentai 2021-06-07 16:48:36 +08:00
19a1d5c404 fix #220 add pretty name of doujinshi format 2021-06-07 16:47:54 +08:00
ad1e876611 Merge pull request #221 from SomeRandomDude870/master
HDoujin-format Metadata file
2021-06-07 16:02:43 +08:00
1de7e1f998 Merge branch 'pull/221' into master 2021-06-07 16:01:54 +08:00
b97e707817 HDoujin-format Metadata file 2021-06-05 17:13:18 +02:00
6ef2189bfe Merge pull request #214 from lleene/master
Add dryrun option to command line interface
2021-06-03 08:00:18 +08:00
24be2d37d4 0.4.16 2021-06-02 23:22:23 +08:00
d9d2a6fb91 fix bug of proxy while downloading doujinshi 2021-06-02 23:20:56 +08:00
bd38294bb7 undo whitespace edits 2021-05-16 19:49:26 +02:00
2cf4e6718e Add the option to perform a dry-run and only download meta-data / generate file structure 2021-05-16 19:44:01 +02:00
8cd4b948e7 0.4.15 2021-05-08 15:36:49 +08:00
f884384eb3 fix bug 2021-05-08 15:36:36 +08:00
87afab46c4 Merge pull request #211 from jwfiredragon/master 2021-04-25 09:56:49 +08:00
c7b1d7e6a8 Fix broken constant import 2021-04-24 16:39:54 -07:00
ad02371158 Update constant.py 2021-04-21 15:37:13 +08:00
7c9d55e0ee Merge pull request #208 from karamori77/master
Changed write_comic_info from False to True
2021-04-21 15:30:51 +08:00
00aad774ae Fixed potential re-download
Moved forward save-history check 1 indent so it works with download by id too
Mapped all ids to int since there are cases where its a string in the API
2021-04-20 11:04:52 +08:00
373086b459 Update serializer.py
changed Language to LanguageISO for ComicInfo.xml
Language will be displayed by the LanguageISO code, it also forgoes rare language tags like rewrite and speechless
2021-04-18 21:45:15 +08:00
3a83f99771 Update constant.py 2021-04-18 21:40:47 +08:00
00627ab36a Update utils.py 2021-04-03 23:11:33 +08:00
592e163891 Update requirements.txt 2021-03-26 22:25:49 +08:00
84523475b0 Merge pull request #206 from Un1Gfn/patch-1 2021-03-25 19:01:39 +08:00
5f5461c902 Instuctions on getting csrftoken & sessionid 2021-03-25 18:57:20 +08:00
05e6ceb3cd Merge pull request #205 from Nontre12/master 2021-03-25 09:22:13 +08:00
db59426503 FIX: Use of img2lib even if it is not installed 2021-03-24 21:49:45 +01:00
74197f8f90 0.4.14 released for fix issue #204 2021-02-11 15:42:53 +08:00
6d91a39533 Merge pull request #203 from jwfiredragon/master
Switching 'logger.warn' to 'logger.warning'
2021-02-11 15:41:15 +08:00
e181e0b9dd Switching 'logger.warn' to 'logger.warning' 2021-02-10 22:45:22 -08:00
6fed1f94cb 0.4.13 2021-01-18 16:26:39 +08:00
9cfb23c8ec Merge pull request #201 from mobrine1/patch-1
Fix #200
2021-01-18 16:25:42 +08:00
fc347cdadf Fix #200 2021-01-17 15:02:43 -05:00
1cdebaab61 Merge pull request #199 from RicterZ/dev
0.4.12
2021-01-17 12:16:56 +08:00
9513141ccf 0.4.12 2021-01-17 11:51:22 +08:00
bdc9fa113e fix #197 set proxy to null 2021-01-17 11:50:22 +08:00
36946111db fix #198 add notice 2021-01-17 11:42:06 +08:00
ce8ae54536 Merge pull request #195 from RicterZ/dev
0.4.11
2021-01-11 11:19:58 +08:00
7aedb905d6 Merge pull request #194 from RicterZ/dev
0.4.11
2021-01-11 11:16:09 +08:00
8b8b5f193e 0.4.11 2021-01-11 11:15:21 +08:00
fc99d91ac1 fix #193 2021-01-11 11:14:35 +08:00
ba141efba7 remove repeated spaces 2021-01-11 11:04:29 +08:00
f78d8750f3 remove __future__ 2021-01-11 11:03:45 +08:00
08bb8ffda4 Merge pull request #192 from RicterZ/dev
Dev
2021-01-10 14:41:02 +08:00
af379c825c Merge branch 'master' into dev 2021-01-10 14:40:09 +08:00
2f9386f22c fix #188 2021-01-10 11:44:04 +08:00
3667bc34b7 0.4.10 2021-01-10 11:41:38 +08:00
84749c56bd fix #191 2021-01-10 11:40:46 +08:00
24f79e0945 Merge pull request #190 from RicterZ/dev
fix bugs
2021-01-07 20:42:26 +08:00
edc46a9531 Merge pull request #189 from mobrine1/mobrine1-patch-1
Fixing loop when id not found, issue #188
2021-01-07 20:39:44 +08:00
72035a14e6 Fixing loop when id not found, issue #188 2021-01-07 07:32:29 -05:00
472528e464 Merge pull request #187 from atsushi-hirako/patch-1
fix issue #186
2021-01-02 02:16:50 +08:00
3f5915fd2a fix issue #186
change to blacklist approach (allow 2-bytes character)
2021-01-01 20:11:09 +09:00
0cd2576dab 0.4.9 2020-12-02 07:45:31 +08:00
445a8c052e Merge pull request #180 from RicterZ/dev
0.4.8
2020-12-01 21:01:00 +08:00
7a75afef0a 0.4.8 2020-12-01 20:58:28 +08:00
a5813e19b1 fix bug on first start 2020-12-01 20:56:27 +08:00
8462d2f2aa use dict.update to update config values 2020-11-26 17:52:10 +08:00
51074ee948 support multi viewers 2020-11-26 17:22:23 +08:00
9c7354be32 0.4.6 2020-11-07 12:04:42 +08:00
7f48b3edd1 Merge pull request #175 from RicterZ/dev
add default value of output dir
2020-10-15 02:10:06 +08:00
d84b827241 add default value of output dir 2020-10-15 02:09:09 +08:00
4ac161a38c Merge pull request #174 from Nontre12/fix-gen-main
Fix change directory output_dir option on gen-main
2020-10-15 01:47:51 +08:00
648b6f87bf Added logo.png to the installation 2020-10-14 12:09:39 +02:00
2ec1283ba8 Fix change directory output_dir option on gen-main 2020-10-14 12:02:57 +02:00
a9bd46b426 Merge pull request #173 from Nontre12/db-ignored
Fix db ignored
2020-10-14 02:44:03 +08:00
c52bc271fc Fix db ignored 2020-10-13 13:39:24 +02:00
f2d22f8e7d Merge pull request #169 from Nontre12/master
Fix running without parameters
2020-10-11 03:48:39 +08:00
ea6089ff31 Fix 2020-10-10 21:15:20 +02:00
670d14c3f3 Merge pull request #4 from RicterZ/master
Update master branch
2020-10-10 20:50:01 +02:00
b46106a5bc Merge pull request #167 from RicterZ/0.4.5
0.4.5
2020-10-11 02:00:02 +08:00
f04359e486 0.4.5 2020-10-11 01:57:37 +08:00
6861cbcbc1 Merge pull request #166 from RicterZ/dev
0.4.4
2020-10-11 01:45:53 +08:00
e0938c5a0e Merge pull request #165 from RicterZ/dev
0.4.4
2020-10-11 01:43:41 +08:00
4aa34c668a Merge pull request #3 from RicterZ/master
Update master branch from origin
2020-10-10 19:11:56 +02:00
8ad60d9838 Merge pull request #1 from RicterZ/master
Merge pull request #162 from Nontre12/master
2020-10-10 18:31:47 +02:00
40 changed files with 1851 additions and 737 deletions

11
.dockerignore Normal file
View File

@ -0,0 +1,11 @@
.git
.github
.gitignore
venv
*.egg-info
build
dist
images
LICENSE
.travis.yml
.idea

27
.github/workflows/docker-image.yml vendored Normal file
View File

@ -0,0 +1,27 @@
name: Docker Image CI
on:
push:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build the Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ricterz/nhentai:latest

3
.gitignore vendored
View File

@ -7,3 +7,6 @@ dist/
.DS_Store
output/
venv/
.vscode/
test-output
*.whl

View File

@ -1,19 +0,0 @@
os:
- linux
language: python
python:
- 3.7
- 3.8
install:
- python setup.py install
script:
- echo 268642 > /tmp/test.txt
- nhentai --cookie "_ga=GA1.2.1651446371.1545407218; __cfduid=d0ed34dfb81167d2a51a1d6392c1768a81601380350; csrftoken=KRN0GR1ft86m3HTefpQA99pp6R1Bo7hUs5QxNGOAIuwB5g4EcJj04fwMB8QKgLaB; sessionid=7hzoowox78c90wi5ud5ibphm4axcck7c"
- nhentai --search umaru
- nhentai --id=152503,146134 -t 10 --output=/tmp/ --cbz
- nhentai -F
- nhentai --file /tmp/test.txt
- nhentai --id=152503,146134 --gen-main --output=/tmp/

9
Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM python:3
WORKDIR /usr/src/nhentai
COPY . .
RUN pip install --no-cache-dir .
WORKDIR /output
ENTRYPOINT ["nhentai"]

View File

@ -1,8 +0,0 @@
include README.md
include requirements.txt
include nhentai/viewer/index.html
include nhentai/viewer/styles.css
include nhentai/viewer/scripts.js
include nhentai/viewer/main.html
include nhentai/viewer/main.css
include nhentai/viewer/main.js

View File

@ -1,77 +1,108 @@
nhentai
=======
.. code-block::
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
あなたも変態。 いいね?
|travis|
|pypi|
|version|
|license|
nHentai is a CLI tool for downloading doujinshi from <http://nhentai.net>
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
===================
Manual Installation
===================
From Github:
.. code-block::
git clone https://github.com/RicterZ/nhentai
cd nhentai
python setup.py install
pip install --no-cache-dir .
Build Docker container:
.. code-block::
git clone https://github.com/RicterZ/nhentai
cd nhentai
docker build -t nhentai:latest .
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai nhentai --id 123855
==================
Installation (pip)
Installation
==================
Alternatively, install from PyPI with pip:
From PyPI with pip:
.. code-block::
pip install nhentai
pip install nhentai
For a self-contained installation, use `Pipx <https://github.com/pipxproject/pipx/>`_:
For a self-contained installation, use `pipx <https://github.com/pipxproject/pipx/>`_:
.. code-block::
pipx install nhentai
pipx install nhentai
Pull from Dockerhub:
=====================
Installation (Gentoo)
=====================
.. code-block::
layman -fa glicOne
docker pull ricterz/nhentai
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai ricterz/nhentai --id 123855
On Gentoo Linux:
.. code-block::
layman -fa glibOne
sudo emerge net-misc/nhentai
On NixOS:
.. code-block::
nix-env -iA nixos.nhentai
=====
Usage
=====
**IMPORTANT**: To bypass the nhentai frequency limit, you should use `--cookie` option to store your cookie.
*The default download folder will be the path where you run the command (CLI path).*
Set your nhentai cookie against captcha:
**⚠️IMPORTANT⚠️**: To bypass the nhentai frequency limit, you should use `--cookie` and `--useragent` options to store your cookie and your user-agent.
.. code-block:: bash
nhentai --useragent "USER AGENT of YOUR BROWSER"
nhentai --cookie "YOUR COOKIE FROM nhentai.net"
**NOTE**: The format of the cookie is `"csrftoken=TOKEN; sessionid=ID"`
**NOTE:**
- The format of the cookie is `"csrftoken=TOKEN; sessionid=ID; cf_clearance=CLOUDFLARE"`
- `cf_clearance` cookie and useragent must be set if you encounter "blocked by cloudflare captcha" error. Make sure you use the same IP and useragent as when you got it
| To get csrftoken and sessionid, first login to your nhentai account in web browser, then:
| (Chrome) |ve| |ld| More tools |ld| Developer tools |ld| Application |ld| Storage |ld| Cookies |ld| https://nhentai.net
| (Firefox) |hv| |ld| Web Developer |ld| Web Developer Tools |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
.. |hv| unicode:: U+2630 .. https://www.compart.com/en/unicode/U+2630
.. |ve| unicode:: U+22EE .. https://www.compart.com/en/unicode/U+22EE
.. |ld| unicode:: U+2014 .. https://www.compart.com/en/unicode/U+2014
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/usage.png
:alt: nhentai
:align: center
*The default download folder will be the path where you run the command (%cd% or $PWD).*
Download specified doujinshi:
.. code-block:: bash
nhentai --id=123855,123866
nhentai --id 123855 123866 123877
Download doujinshi with ids specified in a file (doujinshi ids split by line):
@ -98,44 +129,57 @@ Download your favorites with delay:
.. code-block:: bash
nhentai --favorites --download --delay 1
nhentai --favorites --download --delay 1 --page 3-5,7
Format output doujinshi folder name:
.. code-block:: bash
nhentai --id 261100 --format '[%i]%s'
# for Windows
nhentai --id 261100 --format "[%%i]%%s"
Supported doujinshi folder formatter:
- %i: Doujinshi id
- %f: Doujinshi favorite count
- %t: Doujinshi name
- %s: Doujinshi subtitle (translated name)
- %a: Doujinshi authors' name
- %g: Doujinshi groups name
- %p: Doujinshi pretty name
- %ag: Doujinshi authors name or groups name
Note: for Windows operation system, please use double "%", such as "%%i".
Other options:
.. code-block::
Usage:
nhentai --search [keyword] --download
NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]
nhentai --file [filename]
Environment Variable:
NHENTAI nhentai mirror url
Options:
# Operation options
-h, --help show this help message and exit
-D, --download download doujinshi (for search results)
-S, --show just show the doujinshi information
# Doujinshi options
--id=ID doujinshi ids set, e.g. 1,2,3
--id doujinshi ids set, e.g. 167680 167681 167682
-s KEYWORD, --search=KEYWORD
search doujinshi by keyword
--tag=TAG download doujinshi by tag
-F, --favorites list or download your favorites.
# Multi-page options
--page=PAGE page number of search results
--max-page=MAX_PAGE The max page when recursive download tagged doujinshi
# Download options
-F, --favorites list or download your favorites
-a ARTIST, --artist=ARTIST
list doujinshi by artist name
--page-all all search results
--page=PAGE, --page-range=PAGE
page number of search results. e.g. 1,2-5,14
--sorting=SORTING, --sort=SORTING
sorting of doujinshi (recent / popular /
popular-[today|week])
-o OUTPUT_DIR, --output=OUTPUT_DIR
output dir
-t THREADS, --threads=THREADS
@ -144,23 +188,37 @@ Other options:
timeout for downloading doujinshi
-d DELAY, --delay=DELAY
slow down between downloading every doujinshi
-p PROXY, --proxy=PROXY
uses a proxy, for example: http://127.0.0.1:1080
--retry=RETRY retry times when downloading failed
--exit-on-fail exit on fail to prevent generating incomplete files
--proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080"
-f FILE, --file=FILE read gallery IDs from file.
--format=NAME_FORMAT format the saved folder name
# Generating options
--dry-run Dry run, skip file download
--html generate a html viewer at current directory
--no-html don't generate HTML after downloading
--gen-main generate a main viewer contain all the doujin in the folder
--gen-main generate a main viewer contain all the doujin in the
folder
-C, --cbz generate Comic Book CBZ File
-P --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ
or PDF file.
# nHentai options
--cookie=COOKIE set cookie of nhentai to bypass Google recaptcha
-P, --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
PDF file
--move-to-folder remove files in doujinshi dir then move new file to
folder when generated CBZ or PDF file
--meta generate a metadata file in doujinshi format
--regenerate regenerate the cbz or pdf file if exists
--cookie=COOKIE set cookie of nhentai to bypass Cloudflare captcha
--useragent=USERAGENT, --user-agent=USERAGENT
set useragent to bypass Cloudflare captcha
--language=LANGUAGE set default language to parse doujinshis
--clean-language set DEFAULT as language to parse doujinshis
--save-download-history
save downloaded doujinshis, whose will be skipped if
you re-download them
--clean-download-history
clean download history
--template=VIEWER_TEMPLATE
set viewer template
--legacy use legacy searching method
==============
nHentai Mirror
@ -171,33 +229,28 @@ For example:
.. code-block::
i.h.loli.club -> i.nhentai.net
i3.h.loli.club -> i3.nhentai.net
i5.h.loli.club -> i5.nhentai.net
i7.h.loli.club -> i7.nhentai.net
h.loli.club -> nhentai.net
Set `NHENTAI` env var to your nhentai mirror.
.. code-block:: bash
NHENTAI=http://h.loli.club nhentai --id 123456
NHENTAI=https://h.loli.club nhentai --id 123456
.. image:: ./images/search.png?raw=true
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/search.png
:alt: nhentai
:align: center
.. image:: ./images/download.png?raw=true
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/download.png
:alt: nhentai
:align: center
.. image:: ./images/viewer.png?raw=true
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/viewer.png
:alt: nhentai
:align: center
============
あなたも変態
============
.. image:: ./images/image.jpg?raw=true
:alt: nhentai
:align: center
.. |travis| image:: https://travis-ci.org/RicterZ/nhentai.svg?branch=master
:target: https://travis-ci.org/RicterZ/nhentai
@ -205,5 +258,8 @@ Set `NHENTAI` env var to your nhentai mirror.
.. |pypi| image:: https://img.shields.io/pypi/dm/nhentai.svg
:target: https://pypi.org/project/nhentai/
.. |version| image:: https://img.shields.io/pypi/v/nhentai
:target: https://pypi.org/project/nhentai/
.. |license| image:: https://img.shields.io/github/license/ricterz/nhentai.svg
:target: https://github.com/RicterZ/nhentai/blob/master/LICENSE

View File

@ -1,5 +0,0 @@
184212
204944
222460
244502
261909

Binary file not shown.

Before

Width:  |  Height:  |  Size: 189 KiB

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 173 KiB

After

Width:  |  Height:  |  Size: 991 KiB

BIN
images/usage.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 679 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 311 KiB

After

Width:  |  Height:  |  Size: 1.9 MiB

View File

@ -1,3 +1,3 @@
__version__ = '0.4.4'
__version__ = '0.6.0-beta'
__author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com'

View File

@ -1,40 +1,21 @@
# coding: utf-8
from __future__ import print_function
import os
import sys
import json
from optparse import OptionParser
try:
from itertools import ifilter as filter
except ImportError:
pass
import nhentai.constant as constant
from urllib.parse import urlparse
from argparse import ArgumentParser
from nhentai import __version__
from nhentai.utils import urlparse, generate_html, generate_main_html, DB
from nhentai.utils import generate_html, generate_main_html, DB, EXTENSIONS
from nhentai.logger import logger
try:
if sys.version_info < (3, 0, 0):
import codecs
import locale
sys.stdout = codecs.getwriter(locale.getpreferredencoding())(sys.stdout)
sys.stderr = codecs.getwriter(locale.getpreferredencoding())(sys.stderr)
except NameError:
# python3
pass
from nhentai.constant import PATH_SEPARATOR
def banner():
logger.info(u'''nHentai ver %s: あなたも変態。 いいね?
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
''' % __version__)
logger.debug(f'nHentai ver {__version__}: あなたも変態。 いいね?')
def load_config():
@ -43,7 +24,7 @@ def load_config():
try:
with open(constant.NHENTAI_CONFIG_FILE, 'r') as f:
constant.CONFIG = json.load(f)
constant.CONFIG.update(json.load(f))
except json.JSONDecodeError:
logger.error('Failed to load config file.')
write_config()
@ -57,156 +38,235 @@ def write_config():
f.write(json.dumps(constant.CONFIG))
def callback(option, _opt_str, _value, parser):
if option == '--id':
pass
value = []
for arg in parser.rargs:
if arg.isdigit():
value.append(int(arg))
elif arg.startswith('-'):
break
else:
logger.warning(f'Ignore invalid id {arg}')
setattr(parser.values, option.dest, value)
def cmd_parser():
load_config()
parser = OptionParser('\n nhentai --search [keyword] --download'
'\n NHENTAI=http://h.loli.club nhentai --id [ID ...]'
'\n nhentai --file [filename]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url')
parser = ArgumentParser(
description='\n nhentai --search [keyword] --download'
'\n NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]'
'\n nhentai --file [filename]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url'
)
# operation options
parser.add_option('--download', '-D', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_option('--show', '-S', dest='is_show', action='store_true', help='just show the doujinshi information')
parser.add_argument('--download', '-D', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_argument('--no-download', dest='no_download', action='store_true', default=False,
help='download doujinshi (for search results)')
parser.add_argument('--show', '-S', dest='is_show', action='store_true',
help='just show the doujinshi information')
# doujinshi options
parser.add_option('--id', type='string', dest='id', action='store', help='doujinshi ids set, e.g. 1,2,3')
parser.add_option('--search', '-s', type='string', dest='keyword', action='store',
help='search doujinshi by keyword')
parser.add_option('--favorites', '-F', action='store_true', dest='favorites',
help='list or download your favorites.')
parser.add_argument('--id', dest='id', nargs='+', type=int,
help='doujinshi ids set, e.g. 167680 167681 167682')
parser.add_argument('--search', '-s', type=str, dest='keyword',
help='search doujinshi by keyword')
parser.add_argument('--favorites', '-F', action='store_true', dest='favorites',
help='list or download your favorites')
parser.add_argument('--artist', '-a', type=str, dest='artist',
help='list doujinshi by artist name')
# page options
parser.add_option('--page-all', dest='page_all', action='store_true', default=False,
help='all search results')
parser.add_option('--page', '--page-range', type='string', dest='page', action='store', default='',
help='page number of search results. e.g. 1,2-5,14')
parser.add_option('--sorting', dest='sorting', action='store', default='recent',
help='sorting of doujinshi (recent / popular / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week'])
parser.add_argument('--page-all', dest='page_all', action='store_true', default=False,
help='all search results')
parser.add_argument('--page', '--page-range', type=str, dest='page',
help='page number of search results. e.g. 1,2-5,14')
parser.add_argument('--sorting', '--sort', dest='sorting', type=str, default='popular',
help='sorting of doujinshi (recent / popular / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week', 'date'])
# download options
parser.add_option('--output', '-o', type='string', dest='output_dir', action='store', default='',
help='output dir')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
help='thread count for downloading doujinshi')
parser.add_option('--timeout', '-T', type='int', dest='timeout', action='store', default=30,
help='timeout for downloading doujinshi')
parser.add_option('--delay', '-d', type='int', dest='delay', action='store', default=0,
help='slow down between downloading every doujinshi')
parser.add_option('--proxy', '-p', type='string', dest='proxy', action='store', default='',
help='store a proxy, for example: -p \'http://127.0.0.1:1080\'')
parser.add_option('--file', '-f', type='string', dest='file', action='store', help='read gallery IDs from file.')
parser.add_option('--format', type='string', dest='name_format', action='store',
help='format the saved folder name', default='[%i][%a][%t]')
parser.add_argument('--output', '-o', type=str, dest='output_dir', default='.',
help='output dir')
parser.add_argument('--threads', '-t', type=int, dest='threads', default=5,
help='thread count for downloading doujinshi')
parser.add_argument('--timeout', '-T', type=int, dest='timeout', default=30,
help='timeout for downloading doujinshi')
parser.add_argument('--delay', '-d', type=int, dest='delay', default=0,
help='slow down between downloading every doujinshi')
parser.add_argument('--retry', type=int, dest='retry', default=3,
help='retry times when downloading failed')
parser.add_argument('--exit-on-fail', dest='exit_on_fail', action='store_true', default=False,
help='exit on fail to prevent generating incomplete files')
parser.add_argument('--proxy', type=str, dest='proxy',
help='store a proxy, for example: -p "http://127.0.0.1:1080"')
parser.add_argument('--file', '-f', type=str, dest='file',
help='read gallery IDs from file.')
parser.add_argument('--format', type=str, dest='name_format', default='[%i][%a][%t]',
help='format the saved folder name')
parser.add_argument('--no-filename-padding', action='store_true', dest='no_filename_padding',
default=False, help='no padding in the images filename, such as \'001.jpg\'')
# generate options
parser.add_option('--html', dest='html_viewer', action='store_true',
help='generate a html viewer at current directory')
parser.add_option('--no-html', dest='is_nohtml', action='store_true',
help='don\'t generate HTML after downloading')
parser.add_option('--gen-main', dest='main_viewer', action='store_true',
help='generate a main viewer contain all the doujin in the folder')
parser.add_option('--cbz', '-C', dest='is_cbz', action='store_true',
help='generate Comic Book CBZ File')
parser.add_option('--pdf', '-P', dest='is_pdf', action='store_true',
help='generate PDF file')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file.')
parser.add_argument('--html', dest='html_viewer', type=str, nargs='?', const='.',
help='generate an HTML viewer in the specified directory, or scan all subfolders '
'within the entire directory to generate the HTML viewer. By default, current '
'working directory is used.')
parser.add_argument('--no-html', dest='is_nohtml', action='store_true',
help='don\'t generate HTML after downloading')
parser.add_argument('--gen-main', dest='main_viewer', action='store_true',
help='generate a main viewer contain all the doujin in the folder')
parser.add_argument('--cbz', '-C', dest='is_cbz', action='store_true',
help='generate Comic Book CBZ File')
parser.add_argument('--pdf', '-P', dest='is_pdf', action='store_true',
help='generate PDF file')
parser.add_argument('--meta', dest='generate_metadata', action='store_true', default=False,
help='generate a metadata file in doujinshi format')
parser.add_argument('--update-meta', dest='update_metadata', action='store_true', default=False,
help='update the metadata file of a doujinshi, update CBZ metadata if exists')
parser.add_argument('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file')
parser.add_argument('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
parser.add_argument('--regenerate', dest='regenerate', action='store_true', default=False,
help='regenerate the cbz or pdf file if exists')
parser.add_argument('--zip', action='store_true', help='Package into a single zip file')
# nhentai options
parser.add_option('--cookie', type='str', dest='cookie', action='store',
help='set cookie of nhentai to bypass Google recaptcha')
parser.add_option('--language', type='str', dest='language', action='store',
help='set default language to parse doujinshis')
parser.add_option('--clean-language', dest='clean_language', action='store_true', default=False,
help='set DEFAULT as language to parse doujinshis')
parser.add_option('--save-download-history', dest='is_save_download_history', action='store_true',
default=False, help='save downloaded doujinshis, whose will be skipped if you re-download them')
parser.add_option('--clean-download-history', action='store_true', default=False, dest='clean_download_history',
help='clean download history')
parser.add_argument('--cookie', type=str, dest='cookie',
help='set cookie of nhentai to bypass Cloudflare captcha')
parser.add_argument('--useragent', '--user-agent', type=str, dest='useragent',
help='set useragent to bypass Cloudflare captcha')
parser.add_argument('--language', type=str, dest='language',
help='set default language to parse doujinshis')
parser.add_argument('--clean-language', dest='clean_language', action='store_true', default=False,
help='set DEFAULT as language to parse doujinshis')
parser.add_argument('--save-download-history', dest='is_save_download_history', action='store_true',
default=False, help='save downloaded doujinshis, whose will be skipped if you re-download them')
parser.add_argument('--clean-download-history', action='store_true', default=False, dest='clean_download_history',
help='clean download history')
parser.add_argument('--template', dest='viewer_template', type=str, default='',
help='set viewer template')
parser.add_argument('--legacy', dest='legacy', action='store_true', default=False,
help='use legacy searching method')
try:
sys.argv = [unicode(i.decode(sys.stdin.encoding)) for i in sys.argv]
print()
except (NameError, TypeError):
pass
except UnicodeDecodeError:
exit(0)
args, _ = parser.parse_args(sys.argv[1:])
args = parser.parse_args()
if args.html_viewer:
generate_html()
exit(0)
if not os.path.exists(args.html_viewer):
logger.error(f'Path \'{args.html_viewer}\' not exists')
sys.exit(1)
for root, dirs, files in os.walk(args.html_viewer):
if not dirs:
generate_html(output_dir=args.html_viewer, template=constant.CONFIG['template'])
sys.exit(0)
for dir_name in dirs:
# it will scan the entire subdirectories
doujinshi_dir = os.path.join(root, dir_name)
items = set(map(lambda s: os.path.splitext(s)[1], os.listdir(doujinshi_dir)))
# skip directory without any images
if items & set(EXTENSIONS):
generate_html(output_dir=doujinshi_dir, template=constant.CONFIG['template'])
sys.exit(0)
sys.exit(0)
if args.main_viewer and not args.id and not args.keyword and not args.favorites:
generate_main_html()
exit(0)
sys.exit(0)
if args.clean_download_history:
with DB() as db:
db.clean_all()
logger.info('Download history cleaned.')
exit(0)
sys.exit(0)
# --- set config ---
if args.cookie:
constant.CONFIG['cookie'] = args.cookie
if args.cookie is not None:
constant.CONFIG['cookie'] = args.cookie.strip()
write_config()
logger.info('Cookie saved.')
write_config()
exit(0)
if args.language:
constant.CONFIG['language'] = args.language
logger.info('LANGUAGE now set to \'{0}\''.format(args.language))
if args.useragent is not None:
constant.CONFIG['useragent'] = args.useragent.strip()
write_config()
exit(0)
logger.info('User-Agent saved.')
if args.language is not None:
constant.CONFIG['language'] = args.language
write_config()
logger.info(f'Default language now set to "{args.language}"')
# TODO: search without language
if args.proxy:
if any([args.cookie, args.useragent, args.language]):
sys.exit(0)
if args.proxy is not None:
proxy_url = urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{0}\' of proxy, ignored'.format(proxy_url.scheme))
constant.CONFIG['proxy'] = {
'http': args.proxy,
'https': args.proxy,
}
logger.info('Proxy \'{0}\' saved.'.format(args.proxy))
write_config()
exit(0)
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https', 'socks5', 'socks5h',
'socks4', 'socks4a'):
logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored')
sys.exit(0)
else:
constant.CONFIG['proxy'] = args.proxy
logger.info(f'Proxy now set to "{args.proxy}"')
write_config()
sys.exit(0)
if args.viewer_template is not None:
if not args.viewer_template:
args.viewer_template = 'default'
if not os.path.exists(os.path.join(os.path.dirname(__file__),
f'viewer/{args.viewer_template}/index.html')):
logger.error(f'Template "{args.viewer_template}" does not exists')
sys.exit(1)
else:
constant.CONFIG['template'] = args.viewer_template
write_config()
# --- end set config ---
if args.favorites:
if not constant.CONFIG['cookie']:
logger.warning('Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.')
exit(1)
if args.id:
_ = [i.strip() for i in args.id.split(',')]
args.id = set(int(i) for i in _ if i.isdigit())
sys.exit(1)
if args.file:
with open(args.file, 'r') as f:
_ = [i.strip() for i in f.readlines()]
args.id = set(int(i) for i in _ if i.isdigit())
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites:
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites and not args.artist:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
exit(1)
sys.exit(1)
if not args.keyword and not args.id and not args.favorites:
if not args.keyword and not args.id and not args.favorites and not args.artist:
parser.print_help()
exit(1)
sys.exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 15:
logger.critical('Maximum number of used threads is 15')
exit(1)
sys.exit(1)
return args

View File

@ -1,53 +1,75 @@
#!/usr/bin/env python2.7
# coding: utf-8
from __future__ import unicode_literals, print_function
import json
import os
import shutil
import sys
import signal
import platform
import time
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import cmd_parser, banner
from nhentai.parser import doujinshi_parser, search_parser, print_doujinshi, favorites_parser
from nhentai.cmdline import cmd_parser, banner, write_config
from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser
from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader
from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.logger import logger
from nhentai.constant import NHENTAI_CONFIG_FILE, BASE_URL
from nhentai.utils import generate_html, generate_cbz, generate_main_html, generate_pdf, \
paging, check_cookie, signal_handler, DB
from nhentai.constant import BASE_URL
from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata, \
paging, check_cookie, signal_handler, DB, move_to_folder
def main():
banner()
if sys.version_info < (3, 0, 0):
logger.error('nhentai now only support Python 3.x')
sys.exit(1)
options = cmd_parser()
logger.info('Using mirror: {0}'.format(BASE_URL))
logger.info(f'Using mirror: {BASE_URL}')
# CONFIG['proxy'] will be changed after cmd_parser()
if constant.CONFIG['proxy']:
logger.info('Using proxy: {0}'.format(constant.CONFIG['proxy']))
if isinstance(constant.CONFIG['proxy'], dict):
constant.CONFIG['proxy'] = constant.CONFIG['proxy'].get('http', '')
logger.warning(f'Update proxy config to: {constant.CONFIG["proxy"]}')
write_config()
logger.info(f'Using proxy: {constant.CONFIG["proxy"]}')
if not constant.CONFIG['template']:
constant.CONFIG['template'] = 'default'
logger.info(f'Using viewer template "{constant.CONFIG["template"]}"')
# check your cookie
check_cookie()
doujinshis = []
doujinshi_ids = []
doujinshi_list = []
page_list = paging(options.page)
if options.retry:
constant.RETRY_TIMES = int(options.retry)
if options.favorites:
if not options.is_download:
logger.warning('You do not specify --download option')
doujinshis = favorites_parser(page=page_list)
doujinshis = favorites_parser(page=page_list) if options.page else favorites_parser()
elif options.keyword:
if constant.CONFIG['language']:
logger.info('Using default language: {0}'.format(constant.CONFIG['language']))
options.keyword += ' language:{}'.format(constant.CONFIG['language'])
doujinshis = search_parser(options.keyword, sorting=options.sorting, page=page_list,
is_page_all=options.page_all)
logger.info(f'Using default language: {constant.CONFIG["language"]}')
options.keyword += f' language:{constant.CONFIG["language"]}'
_search_parser = legacy_search_parser if options.legacy else search_parser
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
is_page_all=options.page_all)
elif options.artist:
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
is_page_all=options.page_all, type_='ARTIST')
elif not doujinshi_ids:
doujinshi_ids = options.id
@ -56,56 +78,83 @@ def main():
if options.is_download and doujinshis:
doujinshi_ids = [i['id'] for i in doujinshis]
if options.is_save_download_history:
with DB() as db:
data = set(db.get_all())
if options.is_save_download_history:
with DB() as db:
data = set(map(int, db.get_all()))
doujinshi_ids = list(set(doujinshi_ids) - data)
doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
logger.info(f'New doujinshis account: {len(doujinshi_ids)}')
if doujinshi_ids:
for i, id_ in enumerate(doujinshi_ids):
if options.delay:
time.sleep(options.delay)
doujinshi_info = doujinshi_parser(id_)
if doujinshi_info:
doujinshi_list.append(Doujinshi(name_format=options.name_format, **doujinshi_info))
if (i + 1) % 10 == 0:
logger.info('Progress: %d / %d' % (i + 1, len(doujinshi_ids)))
if options.zip:
options.is_nohtml = True
if not options.is_show:
downloader = Downloader(path=options.output_dir, size=options.threads,
timeout=options.timeout, delay=options.delay)
downloader = (CompressedDownloader if options.zip else Downloader)(path=options.output_dir, threads=options.threads,
timeout=options.timeout, delay=options.delay,
exit_on_fail=options.exit_on_fail,
no_filename_padding=options.no_filename_padding)
for doujinshi in doujinshi_list:
for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id)
if doujinshi_info:
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
else:
continue
doujinshi.downloader = downloader
doujinshi.download()
if doujinshi.check_if_need_download(options):
doujinshi.download()
else:
logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}')
if options.generate_metadata:
generate_metadata(options.output_dir, doujinshi)
if options.is_save_download_history:
with DB() as db:
db.add_one(doujinshi.id)
if not options.is_nohtml and not options.is_cbz and not options.is_pdf:
generate_html(options.output_dir, doujinshi)
elif options.is_cbz:
generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir)
elif options.is_pdf:
generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir)
if not options.is_nohtml:
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
if options.is_cbz:
generate_doc('cbz', options.output_dir, doujinshi, options.regenerate)
if options.is_pdf:
generate_doc('pdf', options.output_dir, doujinshi, options.regenerate)
if options.move_to_folder:
if options.is_cbz:
move_to_folder(options.output_dir, doujinshi, 'cbz')
if options.is_pdf:
move_to_folder(options.output_dir, doujinshi, 'pdf')
if options.rm_origin_dir:
if options.move_to_folder:
logger.critical('You specified both --move-to-folder and --rm-origin-dir options, '
'you will not get anything :(')
shutil.rmtree(os.path.join(options.output_dir, doujinshi.filename), ignore_errors=True)
if options.main_viewer:
generate_main_html(options.output_dir)
if not platform.system() == 'Windows':
logger.log(15, '🍻 All done.')
logger.log(16, '🍻 All done.')
else:
logger.log(15, 'All done.')
logger.log(16, 'All done.')
else:
[doujinshi.show() for doujinshi in doujinshi_list]
for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id)
if doujinshi_info:
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
else:
continue
doujinshi.show()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':

View File

@ -1,37 +1,73 @@
# coding: utf-8
from __future__ import unicode_literals, print_function
import os
import tempfile
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
from urllib.parse import urlparse
from platform import system
def get_nhentai_home() -> str:
home = os.getenv('HOME', tempfile.gettempdir())
if system() == 'Linux':
xdgdat = os.getenv('XDG_DATA_HOME')
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
return os.path.join(xdgdat, 'nhentai')
if home and os.path.exists(os.path.join(home, '.nhentai')):
return os.path.join(home, '.nhentai')
if xdgdat:
return os.path.join(xdgdat, 'nhentai')
# Use old default path in other systems
return os.path.join(home, '.nhentai')
DEBUG = os.getenv('DEBUG', False)
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
__api_suspended_DETAIL_URL = '%s/api/gallery' % BASE_URL
DETAIL_URL = f'{BASE_URL}/g'
LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
SEARCH_URL = f'{BASE_URL}/api/galleries/search'
ARTIST_URL = f'{BASE_URL}/artist/'
DETAIL_URL = '%s/g' % BASE_URL
SEARCH_URL = '%s/api/galleries/search' % BASE_URL
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
LOGIN_URL = f'{BASE_URL}/login/'
CHALLENGE_URL = f'{BASE_URL}/challenge'
FAV_URL = f'{BASE_URL}/favorites/'
PATH_SEPARATOR = os.path.sep
RETRY_TIMES = 3
TAG_API_URL = '%s/api/galleries/tagged' % BASE_URL
LOGIN_URL = '%s/login/' % BASE_URL
CHALLENGE_URL = '%s/challenge' % BASE_URL
FAV_URL = '%s/favorites/' % BASE_URL
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i1.{urlparse(BASE_URL).hostname}/galleries'
IMAGE_URL_MIRRORS = [
f'{urlparse(BASE_URL).scheme}://i2.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i4.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i6.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}',
]
u = urlparse(BASE_URL)
IMAGE_URL = '%s://i.%s/galleries' % (u.scheme, u.hostname)
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai')
NHENTAI_HOME = get_nhentai_home()
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
__api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
CONFIG = {
'proxy': {},
'proxy': '',
'cookie': '',
'language': '',
'template': '',
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'max_filename': 85
}
LANGUAGE_ISO = {
'english': 'en',
'chinese': 'zh',
'japanese': 'ja',
'translated': 'translated'
}

View File

@ -1,7 +1,7 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import os
from tabulate import tabulate
from future.builtins import range
from nhentai.constant import DETAIL_URL, IMAGE_URL
from nhentai.logger import logger
@ -12,6 +12,7 @@ EXT_MAP = {
'j': 'jpg',
'p': 'png',
'g': 'gif',
'w': 'webp',
}
@ -21,65 +22,105 @@ class DoujinshiInfo(dict):
def __getattr__(self, item):
try:
return dict.__getitem__(self, item)
ret = dict.__getitem__(self, item)
return ret if ret else 'Unknown'
except KeyError:
return ''
return 'Unknown'
class Doujinshi(object):
def __init__(self, name=None, id=None, img_id=None, ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
def __init__(self, name=None, pretty_name=None, id=None, favorite_counts=0, img_id=None,
ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
self.name = name
self.pretty_name = pretty_name
self.id = id
self.favorite_counts = favorite_counts
self.img_id = img_id
self.ext = ext
self.pages = pages
self.downloader = None
self.url = '%s/%d' % (DETAIL_URL, self.id)
self.url = f'{DETAIL_URL}/{self.id}'
self.info = DoujinshiInfo(**kwargs)
name_format = name_format.replace('%i', str(self.id))
name_format = name_format.replace('%a', self.info.artists)
name_format = name_format.replace('%t', self.name)
name_format = name_format.replace('%s', self.info.subtitle)
self.filename = format_filename(name_format)
ag_value = self.info.groups if self.info.artists == 'Unknown' else self.info.artists
name_format = name_format.replace('%ag', format_filename(ag_value))
name_format = name_format.replace('%i', format_filename(str(self.id)))
name_format = name_format.replace('%f', format_filename(str(self.favorite_counts)))
name_format = name_format.replace('%a', format_filename(self.info.artists))
name_format = name_format.replace('%g', format_filename(self.info.groups))
name_format = name_format.replace('%t', format_filename(self.name))
name_format = name_format.replace('%p', format_filename(self.pretty_name))
name_format = name_format.replace('%s', format_filename(self.info.subtitle))
self.filename = format_filename(name_format, 255, True)
self.table = [
['Parodies', self.info.parodies],
['Title', self.name],
['Subtitle', self.info.subtitle],
['Date', self.info.date],
['Characters', self.info.characters],
['Authors', self.info.artists],
['Groups', self.info.groups],
['Languages', self.info.languages],
['Tags', self.info.tags],
['Favorite Counts', self.favorite_counts],
['URL', self.url],
['Pages', self.pages],
]
def __repr__(self):
return '<Doujinshi: {0}>'.format(self.name)
return f'<Doujinshi: {self.name}>'
def show(self):
table = [
["Parodies", self.info.parodies],
["Doujinshi", self.name],
["Subtitle", self.info.subtitle],
["Characters", self.info.characters],
["Authors", self.info.artists],
["Languages", self.info.languages],
["Tags", self.info.tags],
["URL", self.url],
["Pages", self.pages],
]
logger.info(u'Print doujinshi information of {0}\n{1}'.format(self.id, tabulate(table)))
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
def check_if_need_download(self, options):
if options.no_download:
return False
base_path = os.path.join(self.downloader.path, self.filename)
# regenerate, re-download
if options.regenerate:
return True
# pdf or cbz file exists, skip re-download
# doujinshi directory may not exist b/c of --rm-origin-dir option set.
# user should pass --regenerate option to get back origin dir.
ret_pdf = ret_cbz = None
if options.is_pdf:
ret_pdf = os.path.exists(f'{base_path}.pdf') or os.path.exists(f'{base_path}/{self.filename}.pdf')
if options.is_cbz:
ret_cbz = os.path.exists(f'{base_path}.cbz') or os.path.exists(f'{base_path}/{self.filename}.cbz')
ret = list(filter(lambda s: s is not None, [ret_cbz, ret_pdf]))
if ret and all(ret):
return False
# doujinshi directory doesn't exist, re-download
if not (os.path.exists(base_path) and os.path.isdir(base_path)):
return True
# fallback
return True
def download(self):
logger.info('Starting to download doujinshi: %s' % self.name)
logger.info(f'Starting to download doujinshi: {self.name}')
if self.downloader:
download_queue = []
if len(self.ext) != self.pages:
logger.warning('Page count and ext count do not equal')
for i in range(1, min(self.pages, len(self.ext)) + 1):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext[i-1]))
self.downloader.download(download_queue, self.filename)
'''
for i in range(len(self.ext)):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i+1, EXT_MAP[self.ext[i]]))
'''
download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}')
return self.downloader.start_download(download_queue, self.filename)
else:
logger.critical('Downloader has not been loaded')
return False
if __name__ == '__main__':
@ -89,4 +130,4 @@ if __name__ == '__main__':
try:
test.download()
except Exception as e:
print('Exception: %s' % str(e))
print(f'Exception: {e}')

View File

@ -1,156 +1,195 @@
# coding: utf-
from __future__ import unicode_literals, print_function
import multiprocessing
import signal
from future.builtins import str as text
import sys
import os
import requests
import time
try:
from urllib.parse import urlparse
except ImportError:
from urlparse import urlparse
import asyncio
import httpx
import urllib3.exceptions
import zipfile
import io
from urllib.parse import urlparse
from nhentai import constant
from nhentai.logger import logger
from nhentai.parser import request
from nhentai.utils import Singleton
requests.packages.urllib3.disable_warnings()
semaphore = multiprocessing.Semaphore(1)
from nhentai.utils import Singleton, async_request
class NHentaiImageNotExistException(Exception):
pass
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def download_callback(result):
result, data = result
if result == 0:
logger.warning('fatal errors occurred, ignored')
elif result == -1:
logger.warning(f'url {data} return status code 404')
elif result == -2:
logger.warning('Ctrl-C pressed, exiting sub processes ...')
elif result == -3:
# workers won't be run, just pass
pass
else:
logger.log(16, f'{data} downloaded successfully')
class Downloader(Singleton):
def __init__(self, path='', size=5, timeout=30, delay=0):
self.size = size
def __init__(self, path='', threads=5, timeout=30, delay=0, exit_on_fail=False,
no_filename_padding=False):
self.threads = threads
self.path = str(path)
self.timeout = timeout
self.delay = delay
self.exit_on_fail = exit_on_fail
self.folder = None
self.semaphore = None
self.no_filename_padding = no_filename_padding
async def fiber(self, tasks):
self.semaphore = asyncio.Semaphore(self.threads)
for completed_task in asyncio.as_completed(tasks):
try:
result = await completed_task
if result[0] > 0:
logger.info(f'{result[1]} download completed')
else:
raise Exception(f'{result[1]} download failed, return value {result[0]}')
except Exception as e:
logger.error(f'An error occurred: {e}')
if self.exit_on_fail:
raise Exception('User intends to exit on fail')
async def _semaphore_download(self, *args, **kwargs):
async with self.semaphore:
return await self.download(*args, **kwargs)
async def download(self, url, folder='', filename='', retried=0, proxy=None, length=0):
logger.info(f'Starting to download {url} ...')
def download_(self, url, folder='', filename='', retried=0):
if self.delay:
time.sleep(self.delay)
logger.info('Starting to download {0} ...'.format(url))
await asyncio.sleep(self.delay)
filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename)
if not self.no_filename_padding:
filename = base_filename.zfill(length) + extension
else:
filename = base_filename + extension
try:
if os.path.exists(os.path.join(folder, base_filename.zfill(3) + extension)):
logger.warning('File: {0} exists, ignoring'.format(os.path.join(folder, base_filename.zfill(3) +
extension)))
return 1, url
response = await async_request('GET', url, timeout=self.timeout, proxy=proxy)
response = None
with open(os.path.join(folder, base_filename.zfill(3) + extension), "wb") as f:
i = 0
while i < 10:
try:
response = request('get', url, stream=True, timeout=self.timeout)
if response.status_code != 200:
raise NHentaiImageNotExistException
if response.status_code != 200:
path = urlparse(url).path
for mirror in constant.IMAGE_URL_MIRRORS:
logger.info(f"Try mirror: {mirror}{path}")
mirror_url = f'{mirror}{path}'
response = await async_request('GET', mirror_url, timeout=self.timeout, proxies=proxy)
if response.status_code == 200:
break
except NHentaiImageNotExistException as e:
raise e
if not await self.save(filename, response):
logger.error(f'Can not download image {url}')
return -1, url
except Exception as e:
i += 1
if not i < 10:
logger.critical(str(e))
return 0, None
continue
except (httpx.HTTPStatusError, httpx.TimeoutException, httpx.ConnectError) as e:
if retried < constant.RETRY_TIMES:
logger.warning(f'Download {filename} failed, retrying({retried + 1}) times...')
return await self.download(
url=url,
folder=folder,
filename=filename,
retried=retried + 1,
proxy=proxy,
)
else:
logger.warning(f'Download {filename} failed with {constant.RETRY_TIMES} times retried, skipped')
return -2, url
break
except Exception as e:
import traceback
logger.error(f"Exception type: {type(e)}")
traceback.print_stack()
logger.critical(str(e))
return -9, url
except KeyboardInterrupt:
return -4, url
return 1, url
async def save(self, filename, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
save_file_path = os.path.join(self.folder, filename)
with open(save_file_path, 'wb') as f:
if response is not None:
length = response.headers.get('content-length')
if length is None:
f.write(response.content)
else:
for chunk in response.iter_content(2048):
async for chunk in response.aiter_bytes(2048):
f.write(chunk)
return True
except (requests.HTTPError, requests.Timeout) as e:
if retried < 3:
logger.warning('Warning: {0}, retrying({1}) ...'.format(str(e), retried))
return 0, self.download_(url=url, folder=folder, filename=filename, retried=retried+1)
else:
return 0, None
def create_storage_object(self, folder:str):
if not os.path.exists(folder):
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical(str(e))
self.folder:str = folder
self.close = lambda: None # Only available in class CompressedDownloader
except NHentaiImageNotExistException as e:
os.remove(os.path.join(folder, base_filename.zfill(3) + extension))
return -1, url
except Exception as e:
import traceback
traceback.print_stack()
logger.critical(str(e))
return 0, None
except KeyboardInterrupt:
return -3, None
return 1, url
def _download_callback(self, result):
result, data = result
if result == 0:
logger.warning('fatal errors occurred, ignored')
# exit(1)
elif result == -1:
logger.warning('url {} return status code 404'.format(data))
elif result == -2:
logger.warning('Ctrl-C pressed, exiting sub processes ...')
elif result == -3:
# workers wont be run, just pass
pass
else:
logger.log(15, '{0} downloaded successfully'.format(data))
def download(self, queue, folder=''):
if not isinstance(folder, text):
def start_download(self, queue, folder='') -> bool:
if not isinstance(folder, (str,)):
folder = str(folder)
if self.path:
folder = os.path.join(self.path, folder)
if not os.path.exists(folder):
logger.warn('Path \'{0}\' does not exist, creating.'.format(folder))
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical('{0}'.format(str(e)))
logger.info(f'Doujinshi will be saved at "{folder}"')
self.create_storage_object(folder)
if os.getenv('DEBUG', None) == 'NODOWNLOAD':
# Assuming we want to continue with rest of process.
return True
digit_length = len(str(len(queue)))
logger.info(f'Total download pages: {len(queue)}')
coroutines = [
self._semaphore_download(url, filename=os.path.basename(urlparse(url).path), length=digit_length)
for url in queue
]
# Prevent coroutines infection
asyncio.run(self.fiber(coroutines))
self.close()
return True
class CompressedDownloader(Downloader):
def create_storage_object(self, folder):
filename = f'{folder}.zip'
print(filename)
self.zipfile = zipfile.ZipFile(filename,'w')
self.close = lambda: self.zipfile.close()
async def save(self, filename, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
image_data = io.BytesIO()
length = response.headers.get('content-length')
if length is None:
content = await response.read()
image_data.write(content)
else:
logger.warn('Path \'{0}\' already exist.'.format(folder))
async for chunk in response.aiter_bytes(2048):
image_data.write(chunk)
queue = [(self, url, folder) for url in queue]
pool = multiprocessing.Pool(self.size, init_worker)
[pool.apply_async(download_wrapper, args=item) for item in queue]
pool.close()
pool.join()
def download_wrapper(obj, url, folder=''):
if sys.platform == 'darwin' or semaphore.get_value():
return Downloader.download_(obj, url=url, folder=folder)
else:
return -3, None
def init_worker():
signal.signal(signal.SIGINT, subprocess_signal)
def subprocess_signal(signal, frame):
if semaphore.acquire(timeout=1):
logger.warning('Ctrl-C pressed, exiting sub processes ...')
raise KeyboardInterrupt
image_data.seek(0)
self.zipfile.writestr(filename, image_data.read())
return True

View File

@ -1,7 +1,6 @@
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
from __future__ import print_function, unicode_literals
import logging
import re
import platform
@ -35,7 +34,7 @@ class ColorizingStreamHandler(logging.StreamHandler):
# levels to (background, foreground, bold/intense)
level_map = {
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'green', False),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
@ -161,20 +160,20 @@ class ColorizingStreamHandler(logging.StreamHandler):
return self.colorize(message, record)
logging.addLevelName(15, "INFO")
logging.addLevelName(16, "SUCCESS")
logger = logging.getLogger('nhentai')
LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
FORMATTER = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
FORMATTER = logging.Formatter("\r[%(asctime)s] %(funcName)s: %(message)s", "%H:%M:%S")
LOGGER_HANDLER.setFormatter(FORMATTER)
LOGGER_HANDLER.level_map[logging.getLevelName("INFO")] = (None, "cyan", False)
LOGGER_HANDLER.level_map[logging.getLevelName("SUCCESS")] = (None, "green", False)
logger.addHandler(LOGGER_HANDLER)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
logger.log(15, 'nhentai')
logger.log(16, 'nhentai')
logger.info('info')
logger.warn('warn')
logger.warning('warning')
logger.debug('debug')
logger.error('error')
logger.critical('critical')

View File

@ -1,6 +1,5 @@
# coding: utf-8
from __future__ import unicode_literals, print_function
import sys
import os
import re
import time
@ -27,7 +26,7 @@ def login(username, password):
logger.info('Getting CSRF token ...')
if os.getenv('DEBUG'):
logger.info('CSRF token is {}'.format(csrf_token))
logger.info(f'CSRF token is {csrf_token}')
login_dict = {
'csrfmiddlewaretoken': csrf_token,
@ -42,11 +41,11 @@ def login(username, password):
if 'Invalid username/email or password' in resp.text:
logger.error('Login failed, please check your username and password')
exit(1)
sys.exit(1)
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
exit(2)
sys.exit(2)
def _get_title_and_id(response):
@ -57,7 +56,7 @@ def _get_title_and_id(response):
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip()
title = title if len(title) < 85 else title[:82] + '...'
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
id_ = re.search('/g/([0-9]+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
return result
@ -68,7 +67,7 @@ def favorites_parser(page=None):
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
logger.error("Can't get your number of favorited doujins. Did the login failed?")
logger.error("Can't get your number of favorite doujinshis. Did the login failed?")
return []
count = int(count.text.strip('(').strip(')').replace(',', ''))
@ -85,7 +84,7 @@ def favorites_parser(page=None):
else:
pages = 1
logger.info('You have %d favorites in %d pages.' % (count, pages))
logger.info(f'You have {count} favorites in {pages} pages.')
if os.getenv('DEBUG'):
pages = 1
@ -93,64 +92,100 @@ def favorites_parser(page=None):
page_range_list = range(1, pages + 1)
for page in page_range_list:
try:
logger.info('Getting doujinshi ids of page %d' % page)
resp = request('get', constant.FAV_URL + '?page=%d' % page).content
logger.info(f'Getting doujinshi ids of page {page}')
result.extend(_get_title_and_id(resp))
except Exception as e:
logger.error('Error: %s, continue', str(e))
i = 0
while i <= constant.RETRY_TIMES + 1:
i += 1
if i > 3:
logger.error(f'Failed to get favorites at page {page} after 3 times retried, skipped')
break
try:
resp = request('get', f'{constant.FAV_URL}?page={page}').content
temp_result = _get_title_and_id(resp)
if not temp_result:
logger.warning(f'Failed to get favorites at page {page}, retrying ({i} times) ...')
continue
else:
result.extend(temp_result)
break
except Exception as e:
logger.warning(f'Error: {e}, retrying ({i} times) ...')
return result
def doujinshi_parser(id_):
def doujinshi_parser(id_, counter=0):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_)
logger.log(15, 'Fetching doujinshi information of id {0}'.format(id_))
logger.info(f'Fetching doujinshi information of id {id_}')
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}/'.format(constant.DETAIL_URL, id_)
url = f'{constant.DETAIL_URL}/{id_}/'
try:
response = request('get', url)
if response.status_code in (200,):
if response.status_code in (200, ):
response = response.content
elif response.status_code in (404,):
logger.error(f'Doujinshi with id {id_} cannot be found')
return []
else:
logger.debug('Slow down and retry ({}) ...'.format(id_))
counter += 1
if counter == 10:
logger.critical(f'Failed to fetch doujinshi information of id {id_}')
return None
logger.debug(f'Slow down and retry ({id_}) ...')
time.sleep(1)
return doujinshi_parser(str(id_))
return doujinshi_parser(str(id_), counter)
except Exception as e:
logger.warn('Error: {}, ignored'.format(str(e)))
logger.warning(f'Error: {e}, ignored')
return None
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
subtitle = doujinshi_info.find('h2')
favorite_counts = doujinshi_info.find('span', class_='nobold').text.strip('(').strip(')')
doujinshi['name'] = title
doujinshi['pretty_name'] = pretty_name
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi['favorite_counts'] = int(favorite_counts) if favorite_counts and favorite_counts.isdigit() else 0
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png|gif)$', doujinshi_cover.a.img.attrs['data-src'])
# img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif|webp)$',
# doujinshi_cover.a.img.attrs['data-src'])
# fix cover.webp.webp
img_id = re.search(r'/galleries/(\d+)/cover(\.webp|\.jpg|\.png)?\.\w+$', doujinshi_cover.a.img.attrs['data-src'])
ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
_, ext_name = os.path.basename(i.img.attrs['data-src']).rsplit('.', 1)
ext.append(ext_name)
base_name = os.path.basename(i.img.attrs['data-src'])
ext_name = base_name.split('.')
if len(ext_name) == 3:
ext.append(ext_name[1])
else:
ext.append(ext_name[-1])
if not img_id:
logger.critical('Tried yo get image id failed')
exit(1)
logger.critical(f'Tried yo get image id failed of id: {id_}')
return None
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = ext
pages = 0
for _ in doujinshi_info.find_all('div', class_='tag-container field-name'):
if re.search('Pages:', _.text):
pages = _.find('span', class_='name').string
@ -172,29 +207,62 @@ def doujinshi_parser(id_):
return doujinshi
def old_search_parser(keyword, sorting='date', page=1):
logger.debug('Searching doujinshis of keyword {0}'.format(keyword))
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page, 'sort': sorting}).content
result = _get_title_and_id(response)
if not result:
logger.warn('Not found anything of keyword {}'.format(keyword))
return result
def print_doujinshi(doujinshi_list):
if not doujinshi_list:
return
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
headers = ['id', 'doujinshi']
logger.info('Search Result || Found %i doujinshis \n' % doujinshi_list.__len__() +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
logger.info(f'Search Result || Found {doujinshi_list.__len__()} doujinshis')
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'):
logger.info(f'Searching doujinshis of keyword {keyword}')
result = []
if type_ not in ('SEARCH', 'ARTIST', ):
raise ValueError('Invalid type')
if is_page_all:
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': 1, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': 1}).content
html = BeautifulSoup(response, 'lxml')
pagination = html.find(attrs={'class': 'pagination'})
last_page = pagination.find(attrs={'class': 'last'})
last_page = re.findall('page=([0-9]+)', last_page.attrs['href'])[0]
logger.info(f'Getting doujinshi ids of {last_page} pages')
pages = range(1, int(last_page))
else:
pages = page
for p in pages:
logger.info(f'Fetching page {p} ...')
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': p, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': p}).content
if response is None:
logger.warning(f'No result in response in page {p}')
continue
result.extend(_get_title_and_id(response))
if not result:
logger.warning(f'No results for keywords {keyword}')
return result
def search_parser(keyword, sorting, page, is_page_all=False):
# keyword = '+'.join([i.strip().replace(' ', '-').lower() for i in keyword.split(',')])
result = []
response = None
if not page:
page = [1]
@ -203,82 +271,48 @@ def search_parser(keyword, sorting, page, is_page_all=False):
init_response = request('get', url.replace('%2B', '+')).json()
page = range(1, init_response['num_pages']+1)
total = '/{0}'.format(page[-1]) if is_page_all else ''
total = f'/{page[-1]}' if is_page_all else ''
not_exists_persist = False
for p in page:
i = 0
logger.info('Searching doujinshis using keywords "{0}" on page {1}{2}'.format(keyword, p, total))
while i < 3:
logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}')
while i < constant.RETRY_TIMES:
try:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
'page': p, 'sort': sorting}).url
if constant.DEBUG:
logger.debug(f'Request URL: {url}')
response = request('get', url.replace('%2B', '+')).json()
except Exception as e:
logger.critical(str(e))
response = None
break
if 'result' not in response:
logger.warn('No result in response in page {}'.format(p))
break
if constant.DEBUG:
logger.debug(f'Response: {response}')
if response is None or 'result' not in response:
logger.warning(f'No result in response in page {p}')
if not_exists_persist is True:
break
continue
for row in response['result']:
title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title
title = title[:constant.CONFIG['max_filename']] + '..' if \
len(title) > constant.CONFIG['max_filename'] else title
result.append({'id': row['id'], 'title': title})
not_exists_persist = False
if not result:
logger.warn('No results for keywords {}'.format(keyword))
logger.warning(f'No results for keywords {keyword}')
return result
def __api_suspended_doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({0}) is not valid'.format(id_))
id_ = int(id_)
logger.log(15, 'Fetching information of doujinshi id {0}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{0}/{1}'.format(constant.DETAIL_URL, id_)
i = 0
while 5 > i:
try:
response = request('get', url).json()
except Exception as e:
i += 1
if not i < 5:
logger.critical(str(e))
exit(1)
continue
break
doujinshi['name'] = response['title']['english']
doujinshi['subtitle'] = response['title']['japanese']
doujinshi['img_id'] = response['media_id']
doujinshi['ext'] = ''.join([i['t'] for i in response['images']['pages']])
doujinshi['pages'] = len(response['images']['pages'])
# gain information of the doujinshi
needed_fields = ['character', 'artist', 'language', 'tag', 'parody', 'group', 'category']
for tag in response['tags']:
tag_type = tag['type']
if tag_type in needed_fields:
if tag_type == 'tag':
if tag_type not in doujinshi:
doujinshi[tag_type] = {}
tag['name'] = tag['name'].replace(' ', '-')
tag['name'] = tag['name'].lower()
doujinshi[tag_type][tag['name']] = tag['id']
elif tag_type not in doujinshi:
doujinshi[tag_type] = tag['name']
else:
doujinshi[tag_type] += ', ' + tag['name']
return doujinshi
if __name__ == '__main__':
print(doujinshi_parser("32271"))

View File

@ -1,12 +1,17 @@
# coding: utf-8
import json
import os
from nhentai.constant import PATH_SEPARATOR, LANGUAGE_ISO
from xml.sax.saxutils import escape
from requests.structures import CaseInsensitiveDict
def serialize_json(doujinshi, dir):
def serialize_json(doujinshi, output_dir: str):
metadata = {'title': doujinshi.name,
'subtitle': doujinshi.info.subtitle}
if doujinshi.info.favorite_counts:
metadata['favorite_counts'] = doujinshi.favorite_counts
if doujinshi.info.date:
metadata['upload_date'] = doujinshi.info.date
if doujinshi.info.parodies:
@ -21,17 +26,17 @@ def serialize_json(doujinshi, dir):
metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
if doujinshi.info.languages:
metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')]
metadata['category'] = doujinshi.info.categories
metadata['category'] = [i.strip() for i in doujinshi.info.categories.split(',')]
metadata['URL'] = doujinshi.url
metadata['Pages'] = doujinshi.pages
with open(os.path.join(dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f, separators=','':')
with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f, separators=(',', ':'))
def serialize_comicxml(doujinshi, dir):
def serialize_comic_xml(doujinshi, output_dir):
from iso8601 import parse_date
with open(os.path.join(dir, 'ComicInfo.xml'), 'w') as f:
with open(os.path.join(output_dir, 'ComicInfo.xml'), 'w', encoding="utf-8") as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
@ -43,9 +48,11 @@ def serialize_comicxml(doujinshi, dir):
xml_write_simple_tag(f, 'PageCount', doujinshi.pages)
xml_write_simple_tag(f, 'URL', doujinshi.url)
xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
xml_write_simple_tag(f, 'Favorites', doujinshi.favorite_counts)
xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and 'full color' in doujinshi.info.tags else 'Yes')
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and
'full color' in doujinshi.info.tags else 'Yes')
if doujinshi.info.date:
dt = parse_date(doujinshi.info.date)
@ -54,29 +61,52 @@ def serialize_comicxml(doujinshi, dir):
xml_write_simple_tag(f, 'Day', dt.day)
if doujinshi.info.parodies:
xml_write_simple_tag(f, 'Series', doujinshi.info.parodies)
if doujinshi.info.groups:
xml_write_simple_tag(f, 'Groups', doujinshi.info.groups)
if doujinshi.info.characters:
xml_write_simple_tag(f, 'Characters', doujinshi.info.characters)
if doujinshi.info.tags:
xml_write_simple_tag(f, 'Tags', doujinshi.info.tags)
if doujinshi.info.artists:
xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in doujinshi.info.artists.split(',')]))
# if doujinshi.info.groups:
# metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in
doujinshi.info.artists.split(',')]))
if doujinshi.info.languages:
languages = [i.strip() for i in doujinshi.info.languages.split(',')]
xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
[xml_write_simple_tag(f, 'Language', i) for i in languages if i != 'translated']
[xml_write_simple_tag(f, 'LanguageISO', LANGUAGE_ISO[i]) for i in languages
if (i != 'translated' and i in LANGUAGE_ISO)]
f.write('</ComicInfo>')
def serialize_info_txt(doujinshi, output_dir: str):
info_txt_path = os.path.join(output_dir, 'info.txt')
f = open(info_txt_path, 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'FAVORITE COUNTS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL']
temp_dict = CaseInsensitiveDict(dict(doujinshi.table))
for i in fields:
v = temp_dict.get(i)
v = temp_dict.get(f'{i}s') if v is None else v
v = doujinshi.info.get(i.lower(), None) if v is None else v
v = doujinshi.info.get(f'{i.lower()}s', "Unknown") if v is None else v
f.write(f'{i}: {v}\n')
f.close()
def xml_write_simple_tag(f, name, val, indent=1):
f.write('{}<{}>{}</{}>\n'.format(' ' * indent, name, escape(str(val)), name))
f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n')
def merge_json():
lst = []
output_dir = "./"
output_dir = f".{PATH_SEPARATOR}"
os.chdir(output_dir)
doujinshi_dirs = next(os.walk('.'))[1]
for folder in doujinshi_dirs:
@ -120,7 +150,8 @@ def serialize_unique(lst):
def set_js_database():
with open('data.js', 'w') as f:
indexed_json = merge_json()
unique_json = json.dumps(serialize_unique(indexed_json), separators=','':')
indexed_json = json.dumps(indexed_json, separators=','':')
unique_json = json.dumps(serialize_unique(indexed_json), separators=(',', ':'))
indexed_json = json.dumps(indexed_json, separators=(',', ':'))
f.write('var data = ' + indexed_json)
f.write(';\nvar tags = ' + unique_json)

View File

@ -1,37 +1,81 @@
# coding: utf-8
from __future__ import unicode_literals, print_function
import json
import sys
import re
import os
import string
import zipfile
import shutil
import httpx
import requests
import sqlite3
import urllib.parse
from typing import Tuple
from nhentai import constant
from nhentai.constant import PATH_SEPARATOR
from nhentai.logger import logger
from nhentai.serializer import serialize_json, serialize_comicxml, set_js_database
from nhentai.serializer import serialize_comic_xml, serialize_json, serialize_info_txt, set_js_database
MAX_FIELD_LENGTH = 100
EXTENSIONS = ('.png', '.jpg', '.jpeg', '.gif', '.webp')
def get_headers():
headers = {
'Referer': constant.LOGIN_URL
}
user_agent = constant.CONFIG.get('useragent')
if user_agent and user_agent.strip():
headers['User-Agent'] = user_agent
cookie = constant.CONFIG.get('cookie')
if cookie and cookie.strip():
headers['Cookie'] = cookie
return headers
def request(method, url, **kwargs):
session = requests.Session()
session.headers.update({
'Referer': constant.LOGIN_URL,
'User-Agent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'Cookie': constant.CONFIG['cookie']
})
return getattr(session, method)(url, proxies=constant.CONFIG['proxy'], verify=False, **kwargs)
session.headers.update(get_headers())
if not kwargs.get('proxies', None):
kwargs['proxies'] = {
'https': constant.CONFIG['proxy'],
'http': constant.CONFIG['proxy'],
}
return getattr(session, method)(url, verify=False, **kwargs)
async def async_request(method, url, proxy = None, **kwargs):
headers=get_headers()
if proxy is None:
proxy = constant.CONFIG['proxy']
if isinstance(proxy, (str, )) and not proxy:
proxy = None
async with httpx.AsyncClient(headers=headers, verify=False, proxy=proxy, **kwargs) as client:
response = await client.request(method, url, **kwargs)
return response
def check_cookie():
response = request('get', constant.BASE_URL).text
username = re.findall('"/users/\d+/(.*?)"', response)
response = request('get', constant.BASE_URL)
if response.status_code == 403 and 'Just a moment...' in response.text:
logger.error('Blocked by Cloudflare captcha, please set your cookie and useragent')
sys.exit(1)
username = re.findall('"/users/[0-9]+/(.*?)"', response.text)
if not username:
logger.error('Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
logger.warning(
'Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
else:
logger.info('Login successfully! Your username: {}'.format(username[0]))
logger.log(16, f'Login successfully! Your username: {username[0]}')
class _Singleton(type):
@ -48,15 +92,6 @@ class Singleton(_Singleton(str('SingletonMeta'), (object,), {})):
pass
def urlparse(url):
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
return urlparse(url)
def readfile(path):
loc = os.path.dirname(__file__)
@ -64,52 +99,102 @@ def readfile(path):
return file.read()
def generate_html(output_dir='.', doujinshi_obj=None):
image_html = ''
def parse_doujinshi_obj(
output_dir: str,
doujinshi_obj=None,
file_type: str = ''
) -> Tuple[str, str]:
filename = f'.{PATH_SEPARATOR}doujinshi.{file_type}'
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
_filename = f'{doujinshi_obj.filename}.{file_type}'
if file_type == 'pdf':
_filename = _filename.replace('/', '-')
filename = os.path.join(output_dir, _filename)
else:
doujinshi_dir = '.'
if file_type == 'html':
return output_dir, 'index.html'
doujinshi_dir = f'.{PATH_SEPARATOR}'
if not os.path.exists(doujinshi_dir):
os.makedirs(doujinshi_dir)
return doujinshi_dir, filename
def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, 'html')
image_html = ''
if not os.path.exists(doujinshi_dir):
logger.warning(f'Path "{doujinshi_dir}" does not exist, creating.')
try:
os.makedirs(doujinshi_dir)
except EnvironmentError as e:
logger.critical(e)
file_list = os.listdir(doujinshi_dir)
file_list.sort()
for image in file_list:
if not os.path.splitext(image)[1] in ('.jpg', '.png'):
if not os.path.splitext(image)[1] in EXTENSIONS:
continue
image_html += f'<img src="{image}" class="image-item"/>\n'
image_html += '<img src="{0}" class="image-item"/>\n'\
.format(image)
html = readfile('viewer/index.html')
css = readfile('viewer/styles.css')
js = readfile('viewer/scripts.js')
html = readfile(f'viewer/{template}/index.html')
css = readfile(f'viewer/{template}/styles.css')
js = readfile(f'viewer/{template}/scripts.js')
if doujinshi_obj is not None:
serialize_json(doujinshi_obj, doujinshi_dir)
# serialize_json(doujinshi_obj, doujinshi_dir)
name = doujinshi_obj.name
if sys.version_info < (3, 0):
name = doujinshi_obj.name.encode('utf-8')
else:
name = {'title': 'nHentai HTML Viewer'}
metadata_path = os.path.join(doujinshi_dir, "metadata.json")
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as file:
doujinshi_info = json.loads(file.read())
name = doujinshi_info.get("title")
else:
name = 'nHentai HTML Viewer'
data = html.format(TITLE=name, IMAGES=image_html, SCRIPTS=js, STYLES=css)
try:
if sys.version_info < (3, 0):
with open(os.path.join(doujinshi_dir, 'index.html'), 'w') as f:
f.write(data)
else:
with open(os.path.join(doujinshi_dir, 'index.html'), 'wb') as f:
f.write(data.encode('utf-8'))
with open(os.path.join(doujinshi_dir, 'index.html'), 'wb') as f:
f.write(data.encode('utf-8'))
logger.log(15, 'HTML Viewer has been written to \'{0}\''.format(os.path.join(doujinshi_dir, 'index.html')))
logger.log(16, f'HTML Viewer has been written to "{os.path.join(doujinshi_dir, "index.html")}"')
except Exception as e:
logger.warning('Writing HTML Viewer failed ({})'.format(str(e)))
logger.warning(f'Writing HTML Viewer failed ({e})')
def generate_main_html(output_dir='./'):
def move_to_folder(output_dir='.', doujinshi_obj=None, file_type=None):
if not file_type:
raise RuntimeError('no file_type specified')
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
for fn in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, fn)
_, ext = os.path.splitext(file_path)
if ext in ['.pdf', '.cbz']:
continue
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename)))
def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
"""
Generate a main html to show all the contain doujinshi.
Generate a main html to show all the contains doujinshi.
With a link to their `index.html`.
Default output folder will be the CLI path.
"""
@ -138,7 +223,7 @@ def generate_main_html(output_dir='./'):
files.sort()
if 'index.html' in files:
logger.info('Add doujinshi \'{}\''.format(folder))
logger.info(f'Add doujinshi "{folder}"')
else:
continue
@ -148,115 +233,115 @@ def generate_main_html(output_dir='./'):
else:
title = 'nHentai HTML Viewer'
image_html += element.format(FOLDER=folder, IMAGE=image, TITLE=title)
image_html += element.format(FOLDER=urllib.parse.quote(folder), IMAGE=image, TITLE=title)
if image_html == '':
logger.warning('No index.html found, --gen-main paused.')
return
try:
data = main.format(STYLES=css, SCRIPTS=js, PICTURE=image_html)
if sys.version_info < (3, 0):
with open('./main.html', 'w') as f:
f.write(data)
else:
with open('./main.html', 'wb') as f:
f.write(data.encode('utf-8'))
shutil.copy(os.path.dirname(__file__)+'/viewer/logo.png', './')
with open('./main.html', 'wb') as f:
f.write(data.encode('utf-8'))
shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './')
set_js_database()
logger.log(
15, 'Main Viewer has been written to \'{0}main.html\''.format(output_dir))
output_dir = output_dir[:-1] if output_dir.endswith('/') else output_dir
logger.log(16, f'Main Viewer has been written to "{output_dir}/main.html"')
except Exception as e:
logger.warning('Writing Main Viewer failed ({})'.format(str(e)))
logger.warning(f'Writing Main Viewer failed ({e})')
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=False):
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if write_comic_info:
serialize_comicxml(doujinshi_obj, doujinshi_dir)
cbz_filename = os.path.join(os.path.join(doujinshi_dir, '..'), '{}.cbz'.format(doujinshi_obj.filename))
else:
cbz_filename = './doujinshi.cbz'
doujinshi_dir = '.'
def generate_cbz(doujinshi_dir, filename):
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing CBZ file to path: {}'.format(cbz_filename))
with zipfile.ZipFile(cbz_filename, 'w') as cbz_pf:
logger.info(f'Writing CBZ file to path: {filename}')
with zipfile.ZipFile(filename, 'w') as cbz_pf:
for image in file_list:
image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image)
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'Comic Book CBZ file has been written to \'{0}\''.format(doujinshi_dir))
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"')
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
try:
import img2pdf
except ImportError:
logger.error("Please install img2pdf package by using pip.")
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
"""Write images to a PDF file using img2pdf."""
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
pdf_filename = os.path.join(
os.path.join(doujinshi_dir, '..'),
'{}.pdf'.format(doujinshi_obj.filename)
)
if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate:
logger.info(f'Skipped {file_type} file generation: {doujinshi_dir}.{file_type} already exists')
return
if file_type == 'cbz':
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
generate_cbz(doujinshi_dir, filename)
elif file_type == 'pdf':
try:
import img2pdf
"""Write images to a PDF file using img2pdf."""
file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(EXTENSIONS)]
file_list.sort()
logger.info(f'Writing PDF file to path: {filename}')
with open(filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list, rotation=img2pdf.Rotation.ifvalid))
logger.log(16, f'PDF file has been written to "{filename}"')
except ImportError:
logger.error("Please install img2pdf package by using pip.")
else:
pdf_filename = './doujinshi.pdf'
doujinshi_dir = '.'
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info('Writing PDF file to path: {}'.format(pdf_filename))
with open(pdf_filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list))
if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True)
logger.log(15, 'PDF file has been written to \'{0}\''.format(doujinshi_dir))
raise ValueError('invalid file type')
def format_filename(s):
"""Take a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
def generate_metadata(output_dir, doujinshi_obj=None):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '')
serialize_json(doujinshi_obj, doujinshi_dir)
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
serialize_info_txt(doujinshi_obj, doujinshi_dir)
logger.log(16, f'Metadata files have been written to "{doujinshi_dir}"')
Note: this method may produce invalid filenames such as ``, `.` or `..`
When I use this method I prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so I avoid the potential of using
an invalid filename.
"""
def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
"""
It used to be a whitelist approach allowed only alphabet and a part of symbols.
but most doujinshi's names include Japanese 2-byte characters and these was rejected.
so it is using blacklist approach now.
if filename include forbidden characters (\'/:,;*?"<>|) ,it replaces space character(" ").
"""
# maybe you can use `--format` to select a suitable filename
valid_chars = "-_.()[] %s%s" % (string.ascii_letters, string.digits)
filename = ''.join(c for c in s if c in valid_chars)
if len(filename) > 100:
filename = filename[:100] + '...]'
if not _truncate_only:
ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b'
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split())
while filename.endswith('.'):
filename = filename[:-1]
else:
filename = s
# limit `length` chars
if len(filename) >= length:
filename = filename[:length - 1] + u''
# Remove [] from filename
filename = filename.replace('[]', '').strip()
return filename
def signal_handler(signal, frame):
def signal_handler(_signal, _frame):
logger.error('Ctrl-C signal received. Stopping...')
exit(1)
sys.exit(1)
def paging(page_string):
# 1,3-5,14 -> [1, 3, 4, 5, 14]
if not page_string:
return []
# default, the first page
return [1]
page_list = []
for i in page_string.split(','):
@ -264,7 +349,7 @@ def paging(page_string):
start, end = i.split('-')
if not (start.isdigit() and end.isdigit()):
raise Exception('Invalid page number')
page_list.extend(list(range(int(start), int(end)+1)))
page_list.extend(list(range(int(start), int(end) + 1)))
else:
if not i.isdigit():
raise Exception('Invalid page number')

View File

@ -0,0 +1,87 @@
const pages = Array.from(document.querySelectorAll('img.image-item'));
let currentPage = 0;
function changePage(pageNum) {
const previous = pages[currentPage];
const current = pages[pageNum];
if (current == null) {
return;
}
previous.classList.remove('current');
current.classList.add('current');
currentPage = pageNum;
const display = document.getElementById('dest');
display.style.backgroundImage = `url("${current.src}")`;
scroll(0,0)
document.getElementById('page-num')
.innerText = [
(pageNum + 1).toLocaleString(),
pages.length.toLocaleString()
].join('\u200a/\u200a');
}
changePage(0);
document.getElementById('list').onclick = event => {
if (pages.includes(event.target)) {
changePage(pages.indexOf(event.target));
}
};
document.getElementById('image-container').onclick = event => {
const width = document.getElementById('image-container').clientWidth;
const clickPos = event.clientX / width;
if (clickPos < 0.5) {
changePage(currentPage - 1);
} else {
changePage(currentPage + 1);
}
};
document.onkeypress = event => {
switch (event.key.toLowerCase()) {
// Previous Image
case 'w':
scrollBy(0, -40);
break;
case 'a':
changePage(currentPage - 1);
break;
// Return to previous page
case 'q':
window.history.go(-1);
break;
// Next Image
case ' ':
case 's':
scrollBy(0, 40);
break;
case 'd':
changePage(currentPage + 1);
break;
}// remove arrow cause it won't work
};
document.onkeydown = event =>{
switch (event.keyCode) {
case 37: //left
changePage(currentPage - 1);
break;
case 38: //up
changePage(currentPage - 1);
break;
case 39: //right
changePage(currentPage + 1);
break;
case 40: //down
changePage(currentPage + 1);
break;
}
};

View File

@ -139,7 +139,7 @@ function filter_searcher(){
break
}
}
if (verifier){doujinshi_id.push(data[i].Folder);}
if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));}
}
var gallery = document.getElementsByClassName("gallery-favorite");
for (var i = 0; i < gallery.length; i++){
@ -174,4 +174,4 @@ function tag_maker(data){
document.getElementById("tags").appendChild(node);
}
}
}
}

View File

@ -0,0 +1,25 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes, viewport-fit=cover" />
<title>{TITLE}</title>
<style>
{STYLES}
</style>
</head>
<body>
<nav id="list" hidden=true>
{IMAGES}</nav>
<div id="image-container">
<div id="dest"></div>
<span id="page-num"></span>
</div>
<script>
{SCRIPTS}
</script>
</body>
</html>

View File

@ -28,12 +28,6 @@ function changePage(pageNum) {
changePage(0);
document.getElementById('list').onclick = event => {
if (pages.includes(event.target)) {
changePage(pages.indexOf(event.target));
}
};
document.getElementById('image-container').onclick = event => {
const width = document.getElementById('image-container').clientWidth;
const clickPos = event.clientX / width;

View File

@ -0,0 +1,75 @@
*, *::after, *::before {
box-sizing: border-box;
}
img {
vertical-align: middle;
}
html, body {
display: flex;
background-color: #e8e6e6;
height: 100%;
width: 100%;
padding: 0;
margin: 0;
font-family: sans-serif;
}
#list {
height: 2000px;
overflow: scroll;
width: 260px;
text-align: center;
}
#list img {
width: 200px;
padding: 10px;
border-radius: 10px;
margin: 15px 0;
cursor: pointer;
}
#list img.current {
background: #0003;
}
#image-container {
flex: auto;
height: 100%;
background: rgb(0, 0, 0);
color: rgb(100, 100, 100);
text-align: center;
cursor: pointer;
-webkit-user-select: none;
user-select: none;
position: relative;
}
#image-container #dest {
height: 2000px;
width: 100%;
background-size: contain;
background-repeat: no-repeat;
background-position: top;
margin-left: auto;
margin-right: auto;
max-width: 100%;
max-height: 100vh;
margin: auto;
}
#image-container #page-num {
position: static;
font-size: 9pt;
left: 10px;
bottom: 5px;
font-weight: bold;
opacity: 0.9;
text-shadow: /* Duplicate the same shadow to make it very strong */
0 0 2px #222,
0 0 2px #222,
0 0 2px #222;
}

370
poetry.lock generated Normal file
View File

@ -0,0 +1,370 @@
# This file is automatically @generated by Poetry 2.1.1 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1) ; python_version >= \"3.10\"", "uvloop (>=0.21.0b1) ; platform_python_implementation == \"CPython\" and platform_system != \"Windows\""]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "beautifulsoup4"
version = "4.12.3"
description = "Screen-scraping library"
optional = false
python-versions = ">=3.6.0"
groups = ["main"]
files = [
{file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
{file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
]
[package.dependencies]
soupsieve = ">1.2"
[package.extras]
cchardet = ["cchardet"]
chardet = ["chardet"]
charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"]
lxml = ["lxml"]
[[package]]
name = "certifi"
version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
groups = ["main"]
files = [
{file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
{file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]
[[package]]
name = "chardet"
version = "5.2.0"
description = "Universal encoding detector for Python 3"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"},
{file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"},
]
[[package]]
name = "charset-normalizer"
version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
{file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
{file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
{file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
{file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
groups = ["main"]
markers = "python_version < \"3.11\""
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
[package.extras]
brotli = ["brotli ; platform_python_implementation == \"CPython\"", "brotlicffi ; platform_python_implementation != \"CPython\""]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
groups = ["main"]
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iso8601"
version = "1.1.0"
description = "Simple module to parse ISO 8601 dates"
optional = false
python-versions = ">=3.6.2,<4.0"
groups = ["main"]
files = [
{file = "iso8601-1.1.0-py3-none-any.whl", hash = "sha256:8400e90141bf792bce2634df533dc57e3bee19ea120a87bebcd3da89a58ad73f"},
{file = "iso8601-1.1.0.tar.gz", hash = "sha256:32811e7b81deee2063ea6d2e94f8819a86d1f3811e49d23623a41fa832bef03f"},
]
[[package]]
name = "requests"
version = "2.32.4"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "requests-2.32.4-py3-none-any.whl", hash = "sha256:27babd3cda2a6d50b30443204ee89830707d396671944c998b5975b031ac2b2c"},
{file = "requests-2.32.4.tar.gz", hash = "sha256:27d0316682c8a29834d3264820024b62a36942083d52caf2f14c0591336d3422"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset_normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "soupsieve"
version = "2.6"
description = "A modern CSS selector implementation for Beautiful Soup."
optional = false
python-versions = ">=3.8"
groups = ["main"]
files = [
{file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
]
[[package]]
name = "tabulate"
version = "0.9.0"
description = "Pretty-print tabular data"
optional = false
python-versions = ">=3.7"
groups = ["main"]
files = [
{file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
{file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
]
[package.extras]
widechars = ["wcwidth"]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
groups = ["main"]
markers = "python_version < \"3.11\""
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
groups = ["main"]
files = [
{file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
{file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
]
[package.extras]
brotli = ["brotli (==1.0.9) ; os_name != \"nt\" and python_version < \"3\" and platform_python_implementation == \"CPython\"", "brotli (>=1.0.9) ; python_version >= \"3\" and platform_python_implementation == \"CPython\"", "brotlicffi (>=0.8.0) ; (os_name != \"nt\" or python_version >= \"3\") and platform_python_implementation != \"CPython\"", "brotlipy (>=0.6.0) ; os_name == \"nt\" and python_version < \"3\""]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress ; python_version == \"2.7\"", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata]
lock-version = "2.1"
python-versions = "^3.8"
content-hash = "b17c2cdd4b140f2ab8081bca7d94630e821fa2e882ac768b1bd8cf3ec58726ce"

28
pyproject.toml Normal file
View File

@ -0,0 +1,28 @@
[tool.poetry]
name = "nhentai"
version = "0.6.0-beta"
description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT"
readme = "README.rst"
include = ["nhentai/viewer/**"]
[tool.poetry.dependencies]
python = "^3.8"
requests = "^2.32.3"
soupsieve = "^2.6"
beautifulsoup4 = "^4.12.3"
tabulate = "^0.9.0"
iso8601 = "^1.1.0"
urllib3 = "^1.26.20"
httpx = "^0.28.1"
chardet = "^5.2.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
nhentai = 'nhentai.command:main'

29
qodana.yaml Executable file
View File

@ -0,0 +1,29 @@
#-------------------------------------------------------------------------------#
# Qodana analysis is configured by qodana.yaml file #
# https://www.jetbrains.com/help/qodana/qodana-yaml.html #
#-------------------------------------------------------------------------------#
version: "1.0"
#Specify inspection profile for code analysis
profile:
name: qodana.starter
#Enable inspections
#include:
# - name: <SomeEnabledInspectionId>
#Disable inspections
#exclude:
# - name: <SomeDisabledInspectionId>
# paths:
# - <path/where/not/run/inspection>
#Execute shell command before Qodana execution (Applied in CI/CD pipeline)
#bootstrap: sh ./prepare-qodana.sh
#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline)
#plugins:
# - id: <plugin.id> #(plugin id can be found at https://plugins.jetbrains.com)
#Specify Qodana linter for analysis (Applied in CI/CD pipeline)
linter: jetbrains/qodana-python:2024.3

View File

@ -1,7 +0,0 @@
requests>=2.5.0
soupsieve<2.0
BeautifulSoup4>=4.0.0
threadpool>=1.2.7
tabulate>=0.7.5
future>=0.15.2
iso8601 >= 0.1

View File

@ -1,3 +0,0 @@
[metadata]
description-file = README.md

View File

@ -1,41 +0,0 @@
# coding: utf-8
from __future__ import print_function, unicode_literals
import sys
import codecs
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
def long_description():
with codecs.open('README.rst', 'rb') as readme:
if not sys.version_info < (3, 0, 0):
return readme.read().decode('utf-8')
setup(
name='nhentai',
version=__version__,
packages=find_packages(),
author=__author__,
author_email=__email__,
keywords=['nhentai', 'doujinshi', 'downloader'],
description='nhentai.net doujinshis downloader',
long_description=long_description(),
url='https://github.com/RicterZ/nhentai',
download_url='https://github.com/RicterZ/nhentai/tarball/master',
include_package_data=True,
zip_safe=False,
install_requires=requirements,
entry_points={
'console_scripts': [
'nhentai = nhentai.command:main',
]
},
license='MIT',
)

0
tests/__init__.py Normal file
View File

56
tests/test_download.py Normal file
View File

@ -0,0 +1,56 @@
import unittest
import os
import zipfile
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.parser import doujinshi_parser
from nhentai.doujinshi import Doujinshi
from nhentai.utils import generate_html
did = 440546
def has_jepg_file(path):
with zipfile.ZipFile(path, 'r') as zf:
return '01.jpg' in zf.namelist()
def is_zip_file(path):
try:
with zipfile.ZipFile(path, 'r') as _:
return True
except (zipfile.BadZipFile, FileNotFoundError):
return False
class TestDownload(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
self.info = Doujinshi(**doujinshi_parser(did), name_format='%i')
def test_download(self):
info = self.info
info.downloader = Downloader(path='/tmp', threads=5)
info.download()
self.assertTrue(os.path.exists(f'/tmp/{did}/01.jpg'))
generate_html('/tmp', info)
self.assertTrue(os.path.exists(f'/tmp/{did}/index.html'))
def test_zipfile_download(self):
info = self.info
info.downloader = CompressedDownloader(path='/tmp', threads=5)
info.download()
zipfile_path = f'/tmp/{did}.zip'
self.assertTrue(os.path.exists(zipfile_path))
self.assertTrue(is_zip_file(zipfile_path))
self.assertTrue(has_jepg_file(zipfile_path))
if __name__ == '__main__':
unittest.main()

26
tests/test_login.py Normal file
View File

@ -0,0 +1,26 @@
import os
import unittest
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.utils import check_cookie
class TestLogin(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
def test_cookie(self):
try:
check_cookie()
self.assertTrue(True)
except Exception as e:
self.assertIsNone(e)
if __name__ == '__main__':
unittest.main()

27
tests/test_parser.py Normal file
View File

@ -0,0 +1,27 @@
import unittest
import os
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.parser import search_parser, doujinshi_parser, favorites_parser
class TestParser(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
def test_search(self):
result = search_parser('umaru', 'recent', [1], False)
self.assertTrue(len(result) > 0)
def test_doujinshi_parser(self):
result = doujinshi_parser(123456)
self.assertTrue(result['pages'] == 84)
def test_favorites_parser(self):
result = favorites_parser(page=[1])
self.assertTrue(len(result) > 0)