Compare commits

...

674 Commits

Author SHA1 Message Date
Ricter Zheng
6752edfc9d
Merge pull request #402 from hzxjy1/zipTest
Close zipfile hander manually and add a test
2025-03-26 22:57:29 +08:00
Ricter Zheng
9a5fcd7d23
Merge pull request #401 from hzxjy1/NoneType
Fix none attributes in session headers
2025-03-26 22:56:11 +08:00
Hellagur4225
b4cc498a5f add a test for zipfile download 2025-03-26 15:14:15 +08:00
Hellagur4225
a4eb7f3b5f fix the uncontrollable zipfile closing function 2025-03-26 15:11:26 +08:00
Hellagur4225
36aa321ade Fix none attributes in session headers 2025-03-24 10:13:42 +08:00
Ricter Zheng
aa84b57a43 use argparse, fix #396 2025-03-12 02:50:22 +08:00
ricterz
a3c70a0c30 fix #396 2025-03-11 22:23:17 +08:00
Ricter Zheng
86060ae0a6
Merge pull request #398 from hzxjy1/zipfile
feat: add compress option
2025-03-11 22:04:09 +08:00
ricterz
9648c21b32 tiny fix of #397 2025-03-11 22:02:37 +08:00
Hellagur4225
625feb5d21 Remove unused option 2025-03-08 17:37:42 +08:00
Hellagur4225
6efbc73c10 feat: add compress option 2025-03-08 17:31:56 +08:00
ricterz
34c1ea8952 new feature #396 2025-02-28 18:59:32 +08:00
ricterz
2e895d8d0f fix title #396 2025-02-28 18:24:56 +08:00
ricterz
0c9b92ce10 0.6.0-beta #394 2025-02-28 00:17:05 +08:00
ricterz
ca71a72747 fix #395 2025-02-27 22:07:40 +08:00
ricterz
1b7f19ee18 0.5.25, fix #393 2025-02-26 00:13:41 +08:00
ricterz
132f4c83da Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-26 00:12:49 +08:00
ricterz
6789b2b363 fix bug of cover.webp.webp 2025-02-25 23:51:13 +08:00
Ricter Zheng
a6ac725ca7
Merge pull request #392 from akakishi/master
Update installation instructions in README.rst
2025-02-23 20:29:15 +08:00
akakishi
b32962bca4
Update README.rst
File `setup.py` was removed in a previous commit; updated README to reflect the new installation process.
2025-02-23 01:18:54 -03:00
ricterz
8a7be0e33d 0.5.24 2025-02-09 20:16:44 +08:00
ricterz
0a47527461 optimize logger output #390 2025-02-09 20:15:17 +08:00
ricterz
023c8969eb add global retry for search, download, fetch favorites 2025-02-09 20:02:52 +08:00
ricterz
29c3abbe5c Merge branch 'master' of github.com:RicterZ/nhentai 2025-02-08 16:21:08 +08:00
ricterzheng
057fae8a83 0.5.23 2025-02-03 15:47:51 +08:00
ricterzheng
248d31edf0 get favorite count #386 even if not login 2025-02-03 15:45:39 +08:00
ricterzheng
4bfe0de078 0.5.22 2025-02-03 15:29:34 +08:00
ricterzheng
780a6c82b2 split metadata.json out from html generate function #386 2025-02-03 15:26:14 +08:00
ricterzheng
8791e7af55 update README to fix #367 2025-02-03 14:53:09 +08:00
ricterzheng
b434c4d58d 0.5.21 2025-02-03 14:34:14 +08:00
ricterzheng
fc69f94505 add --no-filename-padding options to fix #381 2025-01-29 22:59:28 +08:00
ricterzheng
571fba2259 fix RequestsDependencyWarning 2025-01-29 22:46:11 +08:00
ricterzheng
fa977fee04 0.5.20 2025-01-29 00:31:40 +08:00
ricterzheng
58b5ec4211 fix #382 2025-01-28 17:43:50 +08:00
Ricter Zheng
5ad416efa6
Merge pull request #380 from sgqy/master 2025-01-27 06:58:36 +08:00
sgqy
d90fd871ef fix: failure chain 2025-01-26 22:38:50 +09:00
sgqy
c7ff5c2c5c build: switch to pyproject 2025-01-26 21:45:55 +09:00
Ricter Zheng
4ab43dae24
Merge pull request #378 from bill88t/master 2025-01-24 04:36:21 +08:00
Bill Sideris
04bd88a1f7
fix: python-httpx 0.28 2025-01-23 21:16:07 +02:00
ricterz
ba59dcf4db add up/down arrow 2025-01-16 22:40:53 +08:00
ricterz
a83c571ec4 0.5.19 2025-01-15 19:47:24 +08:00
Ricter Zheng
e7ff5dab3d
Merge pull request #373 from nicojust/fix-favorite-metadata-output
fix favorite_counts output in metadata
2025-01-15 12:26:24 +08:00
Ricter Zheng
a166898b60
fix #374 2025-01-15 12:26:01 +08:00
Nekwo
ce25051fa3
fix: output favorite_counts as an int 2025-01-13 19:51:40 +01:00
Nekwo
41fba6b5ac
fix: add missing favorite_counts in metadata file 2025-01-13 19:51:04 +01:00
ricterz
8944ece4a8 use os.path.sep as path separator 2025-01-11 08:48:43 +08:00
ricterz
6b4c4bdc70 0.5.18 2025-01-11 08:35:40 +08:00
ricterz
d1d0c22af8 fix #349 2025-01-11 08:34:30 +08:00
ricterz
803957ba88 fix #349 2025-01-11 08:33:59 +08:00
ricterz
13b584a820 fix #371 and #324 2025-01-11 08:02:36 +08:00
ricterz
be08fcf4cb fix #368 2025-01-11 07:54:28 +08:00
ricterz
b585225308 fix #370 2025-01-11 07:52:51 +08:00
ricterz
54af682848 fix #369 2025-01-11 07:50:41 +08:00
ricterz
d74fd103f0 remove setup.py 2025-01-08 09:35:44 +08:00
ricterz
0cb2411955 Merge branch 'master' of github.com:RicterZ/nhentai 2025-01-08 09:17:01 +08:00
Ricter Zheng
de08d3daaa 0.5.17.1 2025-01-07 14:26:38 +08:00
ricterz
946b85ace9 tiny fix 2024-12-21 09:32:33 +08:00
ricterz
5bde24f159 remove debug print 2024-12-21 09:18:34 +08:00
ricterz
3cae13e76f fix #363 2024-12-18 23:37:00 +08:00
ricterz
7483b8f923 workaround of #359 2024-12-11 23:58:48 +08:00
ricterz
eae42c8eb5 fix #356 2024-12-11 23:57:01 +08:00
ricterz
b841747761 fix #356 2024-12-11 23:47:48 +08:00
Ricter Zheng
1f3528afad try to fix #361 2024-12-09 14:36:44 +08:00
Ricter Zheng
bb41e502c1 0.5.17 for fix #360 2024-12-09 09:26:33 +08:00
Ricter Zheng
7089144ac6 fix #360 #359 2024-12-09 09:25:40 +08:00
Ricter Zheng
0a9f7c3d3e 0.5.15 fix some bugs 2024-12-04 11:04:04 +08:00
Ricter Zheng
40536ad456 Merge branch 'master' of github.com:RicterZ/nhentai 2024-12-04 11:03:48 +08:00
Ricter Zheng
edb571c9dd fix #358 2024-12-04 11:00:50 +08:00
Ricter Zheng
b2befd3473
Merge pull request #357 from FelixJS123/favorite_metadata
add favorites count metadata
2024-12-04 10:47:32 +08:00
Ricter Zheng
c2e880f172 fix asyncio proxies settings and update httpx version 2024-12-04 10:46:45 +08:00
FelixJS
841988bc29 Updated README 2024-11-30 22:58:54 -08:00
FelixJS
390948e252 add favorites count metadata 2024-11-30 22:53:45 -08:00
Ricter Zheng
b9b8468bfe 0.5.14 2024-12-01 10:37:59 +08:00
Ricter Zheng
3d6263cf11
Merge pull request #354 from normalizedwater546/master
asyncio: fix downloader being run sequentially + httpx: fix proxy and missing headers
2024-11-24 13:50:22 +08:00
normalizedwater546
e3410f5a9a fix: add headers, proxy to async_request 2024-11-23 13:11:25 +00:00
normalizedwater546
feb7f45533 fix: semaphore bound to different event loop 2024-11-23 12:19:36 +00:00
normalizedwater546
0754caaeb7 fix: update threads argument 2024-11-23 11:20:58 +00:00
normalizedwater546
49e5a3094a fix: recent asyncio change resulting in sequential downloads
This was due to AsyncIO completely ignoring the thread (size) argument, and not updating sleep to be non-blocking.
2024-11-23 11:17:09 +00:00
Ricter Zheng
c044b64beb
Merge pull request #353 from hzxjy1/master
Fix issue #7
2024-11-19 02:10:34 +08:00
Hellagur4225
f8334c09b5 Add dependence httpx 2024-11-19 01:16:51 +08:00
Hellagur4225
c90c486fb4 Add a fix fatch for downloader 2024-11-19 01:13:16 +08:00
Ricter Zheng
90b17832cc
Merge pull request #351 from hzxjy1/master
Use coroutine in url download
2024-11-17 10:10:54 +08:00
Hellagur4225
14c6db9cc3 Use coroutine in url download and improve the extensibility of class Downloader 2024-11-16 15:57:59 +08:00
Ricter Zheng
f30ff59b2b
Merge pull request #348 from JustAHumanBean/webp
add webp support
2024-11-08 16:33:21 +08:00
JustAHumanBean
1504ee779f
Update utils.py 2024-11-08 07:49:20 +00:00
JustAHumanBean
98d9eecf6d
Update parser.py 2024-11-08 07:47:50 +00:00
JustAHumanBean
e16e623b9d
Update doujinshi.py 2024-11-08 07:46:53 +00:00
ricterzheng
c3f3182df3 0.5.12 2024-10-01 22:55:01 +09:00
ricterzheng
12aad842f8 fix #347 2024-10-01 22:42:26 +09:00
ricterzheng
f9f76ab0f5 0.5.11 2024-10-01 12:48:28 +09:00
ricterzheng
744a9e4418 Merge branch 'master' of github.com:RicterZ/nhentai 2024-10-01 12:47:48 +09:00
ricterzheng
c3e9fff491 fix bug #345 2024-10-01 12:47:13 +09:00
ricterzheng
a84e2c5714 fix bug #341 2024-10-01 12:47:10 +09:00
ricterzheng
c814c35c50 fix bug #341 2024-10-01 12:39:28 +09:00
ricterz
e2f71437e2 fix setuptools warning 2024-09-22 16:37:49 +08:00
ricterz
2fa45ae4df 0.5.10 2024-09-22 16:36:50 +08:00
ricterz
17bc33c6cb fix arguments pass issue #344 2024-09-22 16:34:53 +08:00
ricterz
09bb8460f6 fix overwrite issue #344 2024-09-22 16:32:01 +08:00
normalizedwater546
eb5b93d654 fix: pdf/cbz file already exists, but download process continues 2024-09-22 07:33:52 +00:00
normalizedwater546
cb6cf6df1a regression: pdf/cbz file already exists, but origin files are downloaded anyways.
- call download with `--cbz --rm-origin-dir`, and run command twice.
- user should pass `--regenerate` option to get back origin dir.
2024-09-22 07:24:16 +00:00
ricterz
98a66a3cb0 0.5.9 2024-09-22 15:09:36 +08:00
ricterz
02d47632cf fix bug of move-to-dir 2024-09-22 15:07:53 +08:00
ricterz
f932b1fbbe update README: mirror setup 2024-09-22 14:45:07 +08:00
ricterz
fd9e92f9d4 update README 2024-09-22 14:44:42 +08:00
Ricter Zheng
a8a48c6ce7
Merge pull request #343 from RicterZ/pull-342
improve #342
2024-09-22 14:42:32 +08:00
ricterz
f6e9d08fc7 0.5.8 #343 2024-09-22 14:42:02 +08:00
ricterz
9c1c2ea069 improve download logic #343 2024-09-22 14:39:32 +08:00
ricterz
984ae4262c generate_metadata_file no need to use parse_doujinshi_obj 2024-09-22 14:11:55 +08:00
ricterz
cbf9448ed9 improve #342 2024-09-22 13:35:07 +08:00
ricterz
16bac45f02 generate html viewer automatically after download #342 2024-09-22 12:30:55 +08:00
normalizedwater546
7fa9193112 fix: non-image files in pdf conversion causing crash 2024-09-22 02:05:32 +00:00
normalizedwater546
a05a308e71 fix: check if metadata file is downloaded before skipping 2024-09-22 01:39:40 +00:00
normalizedwater546
5a29eaf775 fix: add file_type check to downloader
If you wanted to generate both .cbz and .pdf, the .pdf will be skipped if .cbz was generated first.
2024-09-22 01:38:54 +00:00
normalizedwater546
497eb6fe50 fix: remove warning for folder already exists in downloader
Nothing is wrong with the folder already existing -- silently ignore and move on. Might still have other files inside that haven't been downloaded yet.
2024-09-22 01:00:06 +00:00
normalizedwater546
4bfe104714 refactor: de-dupe doujinshi_obj parsers 2024-09-22 00:44:06 +00:00
normalizedwater546
12364e980c fix process continuing despite cbz download request skipped 2024-09-22 00:43:10 +00:00
ricterz
b51e812449 fix #330 2024-09-21 11:49:22 +08:00
ricterz
0ed5fa1931 fix #320 2024-09-21 00:43:14 +08:00
ricterz
7f655b0f10 fix #295 2024-09-21 00:32:10 +08:00
ricterz
dec3f44542 add some debug hack 2024-09-21 00:21:01 +08:00
ricterz
40072a8483 0.5.7 2024-09-21 00:00:04 +08:00
ricterz
f97469259d fix #331 2024-09-20 23:59:34 +08:00
ricterz
ec608cc741 fix workflow docker issue 2024-09-20 23:58:25 +08:00
ricterz
30e2814fe2 update version number in pyproject.toml 2024-09-20 23:57:10 +08:00
Ricter Zheng
da298e1fe7
Merge pull request #312 from RicterZ/dependabot/pip/idna-3.7
Bump idna from 3.4 to 3.7
2024-09-20 23:56:25 +08:00
Ricter Zheng
51d43ddde0
Merge branch 'master' into dependabot/pip/idna-3.7 2024-09-20 23:56:18 +08:00
Ricter Zheng
c734881fc7
Merge pull request #316 from RicterZ/dependabot/pip/requests-2.32.0
Bump requests from 2.31.0 to 2.32.0
2024-09-20 23:55:33 +08:00
Ricter Zheng
8d5803a45e
Merge branch 'master' into dependabot/pip/requests-2.32.0 2024-09-20 23:55:28 +08:00
Ricter Zheng
b441085b45
Merge pull request #318 from RicterZ/dependabot/pip/urllib3-1.26.19
Bump urllib3 from 1.26.18 to 1.26.19
2024-09-20 23:55:08 +08:00
Ricter Zheng
132b26f8c4
Merge branch 'master' into dependabot/pip/urllib3-1.26.19 2024-09-20 23:54:57 +08:00
Ricter Zheng
a0dc952fd3
Merge pull request #319 from RicterZ/dependabot/pip/certifi-2024.7.4
Bump certifi from 2022.12.7 to 2024.7.4
2024-09-20 23:54:18 +08:00
ricterz
2bd862777b fix #333 2024-09-20 23:53:26 +08:00
ricterz
35c55503fa 0.5.6 2024-09-20 23:39:38 +08:00
ricterz
29aac84d53 fix #336 2024-09-20 23:34:26 +08:00
ricterz
4ed4523782 fix #341 2024-09-20 23:27:37 +08:00
Ricter Zheng
4223326c13
Merge pull request #340 from vglint/patch-3
Fix gallery search for folders with underscore
2024-09-14 10:17:57 +08:00
vglint
a248ff98c4
Fix gallery search for folders with underscore
Gallery title names replace '_' in the folder name with ' ' (generate_main_html()). To match against these title names when searching, we must also replace '_' with ' ' for each folder name we add to the list of titles to unhide.
2024-09-13 15:56:01 -07:00
Ricter Zheng
021f17d229
Merge pull request #321 from PenitentMonke/xdg-base-dir
Adhere to XDG base dir spec on Linux
2024-07-08 22:03:38 +08:00
PenitentMonke
4162eabe93 Adhere to XDG base dir spec on Linux
Change how NHENTAI_HOME is set to follow the XDG Base Directory
Specification where possible, when running on Linux.

ISSUE: 299
2024-07-07 02:40:33 -03:00
dependabot[bot]
c75e9efb21
Bump certifi from 2022.12.7 to 2024.7.4
Bumps [certifi](https://github.com/certifi/python-certifi) from 2022.12.7 to 2024.7.4.
- [Commits](https://github.com/certifi/python-certifi/compare/2022.12.07...2024.07.04)

---
updated-dependencies:
- dependency-name: certifi
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-07-05 21:52:23 +00:00
dependabot[bot]
f2dec5c2a3
Bump urllib3 from 1.26.18 to 1.26.19
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.18 to 1.26.19.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/1.26.19/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.18...1.26.19)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-06-18 01:35:13 +00:00
dependabot[bot]
845a0d5659
---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-05-21 05:39:26 +00:00
dependabot[bot]
03d85c4e5d
Bump idna from 3.4 to 3.7
Bumps [idna](https://github.com/kjd/idna) from 3.4 to 3.7.
- [Release notes](https://github.com/kjd/idna/releases)
- [Changelog](https://github.com/kjd/idna/blob/master/HISTORY.rst)
- [Commits](https://github.com/kjd/idna/compare/v3.4...v3.7)

---
updated-dependencies:
- dependency-name: idna
  dependency-type: indirect
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-04-12 02:06:40 +00:00
Ricter Zheng
dc54a43610
Merge pull request #311 from RicterZ/dev
Dev merge to master
2024-03-28 17:56:28 +08:00
Ricter Zheng
4ecffaff55
Merge pull request #310 from Spyridion/dev
Changed parser option checks to allow artist search
2024-03-28 17:42:42 +08:00
Spyridion
457f12d40d Changed parser option checks to allow artist search 2024-03-28 02:40:14 -07:00
Ricter Zheng
499081a9cd
Merge pull request #306 from myc1ou1d/dev
fix file not found error when cbz file exists.
2024-02-25 00:37:32 +08:00
myc1ou1d
53aa04af1e
fix file not found error when cbz file exists. 2024-02-24 23:27:52 +08:00
Ricter Zheng
473f948565 update 2024-02-20 10:28:54 +08:00
Ricter Zheng
f701485840 remove print 2024-02-20 10:27:34 +08:00
Ricter Zheng
d8e4f50609 support #291 2024-02-20 10:25:44 +08:00
Ricter Zheng
a893f54da1 0.5.4 2023-12-28 17:46:40 +08:00
Ricter Zheng
4e307911ce
Merge pull request #297 from RicterZ/dependabot/pip/urllib3-1.26.18
Bump urllib3 from 1.26.14 to 1.26.18
2023-12-28 17:46:07 +08:00
Ricter Zheng
f9b7f828a5 fix #298 2023-12-28 17:45:37 +08:00
dependabot[bot]
092df9e539
Bump urllib3 from 1.26.14 to 1.26.18
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.14 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.14...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-17 23:59:22 +00:00
Ricter Zheng
8d74866abf
Update README.rst 2023-08-21 21:47:07 +08:00
Ricter Zheng
bc5b7f982d
Merge pull request #294 from edgar1016/master
Added --move-to-folder
2023-08-19 19:13:38 +08:00
KUROSHIORI\edgar
e54f3cbd06 Added --move-to-folder 2023-08-18 18:30:14 -07:00
Ricter Zheng
a31c615259
Merge pull request #284 from RicterZ/dependabot/pip/requests-2.31.0
Bump requests from 2.28.2 to 2.31.0
2023-05-25 20:40:59 +08:00
dependabot[bot]
cf0b76204d
Bump requests from 2.28.2 to 2.31.0
Bumps [requests](https://github.com/psf/requests) from 2.28.2 to 2.31.0.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.28.2...v2.31.0)

---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-23 06:19:34 +00:00
Ricter Zheng
17402623c4
Merge pull request #282 from edgar1016/master
--page-all works with favorites
2023-04-22 13:06:40 +08:00
KUROSHIORI\edgar
a1a310f06b --page-all works with favorites 2023-04-21 22:00:00 -07:00
Ricter Z
57673da762 update version 2023-03-28 21:02:47 +08:00
Ricter Zheng
dab61291cb
Merge pull request #280 from RicterZ/dev
0.5.3
2023-03-28 20:58:08 +08:00
Ricter Z
67cb88dbbd 0.5.3 2023-03-28 20:57:36 +08:00
Ricter Zheng
9ed4e04241
Merge pull request #279 from RicterZ/dev
update setup informations
2023-03-28 20:56:53 +08:00
Ricter Z
0b0f9bd7e8 update setup informations 2023-03-28 20:55:40 +08:00
Ricter Zheng
f1cc63a591
Merge pull request #278 from RicterZ/dev
fix #277
2023-03-28 20:54:49 +08:00
Ricter Z
aa77cb1c7c fix some bugs #277 2023-03-28 20:54:02 +08:00
Ricter Z
f9878d080b add debug information 2023-03-04 18:49:28 +08:00
Ricter Zheng
f534b0b47f
Merge pull request #275 from RicterZ/dev
remove tests
2023-03-04 18:40:45 +08:00
Ricter Z
6b675fd9ba remove tests 2023-03-04 18:40:10 +08:00
Ricter Zheng
458c68d5e6
Merge pull request #274 from RicterZ/dev
Dev
2023-03-04 18:39:07 +08:00
Ricter Z
2eed0a7463 add poetry 2023-03-04 18:33:51 +08:00
Ricter Zheng
fc507d246a
Merge pull request #271 from edgar1016/master
Fixed info.txt
2023-02-20 23:58:26 +08:00
KUROSHIORI\edgar
3ed84c5a67 Fixed info.txt 2023-02-20 01:54:32 -07:00
Ricter Z
61f4a43081 remove test 2023-02-20 12:58:28 +08:00
Ricter Z
4179947f16 add %ag %g formatter #269 2023-02-20 12:55:18 +08:00
Ricter Z
9f55223e28 use Unknown as field value if it is null #269 2023-02-20 12:47:00 +08:00
Ricter Zheng
b56e5b63a9
Merge pull request #268 from RicterZ/dev
enhancement of legacy search parser
2023-02-07 19:46:09 +08:00
Ricter Z
6dc1e0ef5a update test 2023-02-07 19:43:55 +08:00
Ricter Z
fefdd3858a update test 2023-02-07 19:42:27 +08:00
Ricter Z
f66653c55e legacy search by @gayspacegems of issue #265 2023-02-07 19:40:52 +08:00
Ricter Zheng
179852a343
Merge pull request #267 from RicterZ/dev
add counter
2023-02-06 17:51:54 +08:00
Ricter Z
8972026456 update tests 2023-02-06 17:50:51 +08:00
Ricter Z
cbff6496c3 update 2023-02-06 17:49:42 +08:00
Ricter Z
5a08981e89 update 2023-02-06 17:47:23 +08:00
Ricter Z
6c5b83d5be update tests 2023-02-06 17:46:03 +08:00
Ricter Z
3de4159a39 update tests 2023-02-06 17:44:28 +08:00
Ricter Z
c66fa5f816 rename 2023-02-06 17:43:00 +08:00
Ricter Z
66d0d91eae fix env 2023-02-06 17:40:11 +08:00
Ricter Z
0aa8e1d358 update tests 2023-02-06 17:27:42 +08:00
Ricter Z
0f54762229 print cookie 2023-02-06 17:25:34 +08:00
Ricter Z
93c3a77a57 add counter 2023-02-06 17:22:31 +08:00
Ricter Z
f411b7cfea update 2023-02-06 17:15:48 +08:00
Ricter Z
ed1686bb9c Merge branch 'master' of github.com:RicterZ/nhentai 2023-02-06 17:12:22 +08:00
Ricter Z
f44b9e9911 add counter 2023-02-06 17:12:10 +08:00
Ricter Zheng
1d20a82e3d
Create python-app.yml 2023-02-06 17:07:54 +08:00
Ricter Z
e3a6d67560 Merge branch 'master' of github.com:RicterZ/nhentai 2023-02-06 17:03:14 +08:00
Ricter Z
c7c3572811 add tests 2023-02-06 17:02:02 +08:00
Ricter Zheng
421e8bce64
Update docker-image.yml 2023-02-06 16:14:04 +08:00
Ricter Zheng
25e0d80024
Update docker-image.yml 2023-02-06 16:12:46 +08:00
Ricter Zheng
a10510b12d
Update docker-image.yml 2023-02-06 16:09:38 +08:00
Ricter Zheng
2c20d19621
Update docker-image.yml 2023-02-06 07:19:46 +08:00
Ricter Zheng
c4313e59f1
Create docker-image.yml 2023-02-06 07:16:42 +08:00
Ricter Z
c06f3225a3 remove travis-ci 2023-02-06 07:14:19 +08:00
Ricter Z
1fac55137a update travis-ci 2023-02-06 00:58:51 +08:00
Ricter Z
22412eb904 add docker ignore 2023-02-06 00:49:29 +08:00
Ricter Z
8ccfedbfc8 add dockerignore 2023-02-06 00:48:53 +08:00
Ricter Z
483bef2207 update docker usage 2023-02-06 00:45:43 +08:00
Ricter Z
730daec1ab update README 2023-02-06 00:44:04 +08:00
Ricter Z
5778d7a6e5 update README 2023-02-06 00:42:53 +08:00
Ricter Z
c48a25bd4e fix typo 2023-02-06 00:37:10 +08:00
Ricter Z
f5c4bf4dd1 update README 2023-02-06 00:36:56 +08:00
Ricter Z
9f17ee3f6e update README 2023-02-06 00:34:44 +08:00
Ricter Z
290f03d05e rm trash files 2023-02-06 00:22:43 +08:00
Ricter Z
fe443a4229 add Dockerfile 2023-02-06 00:22:23 +08:00
Ricter Z
2fe5536950 0.5.2 2023-02-06 00:03:54 +08:00
Ricter Z
7a7f2559ff update broken images on pypi 2023-02-06 00:02:48 +08:00
Ricter Z
444efcbee5 0.5.1 2023-02-05 23:55:21 +08:00
Ricter Z
08d812c614 fix UnicodeDecodeError on windows 2023-02-05 23:55:05 +08:00
Ricter Z
cb691c782c update README 2023-02-05 23:51:11 +08:00
Ricter Z
927d5b1b39 update requirements 2023-02-05 23:45:33 +08:00
Ricter Z
a8566482aa change log color and update images 2023-02-05 23:44:15 +08:00
Ricter Z
8c900a833d update README 2023-02-05 23:25:41 +08:00
Ricter Z
466fa4c094 rename some constants 2023-02-05 23:17:23 +08:00
Ricter Z
2adf8ccc9d reformat files #266 2023-02-05 23:13:47 +08:00
Ricter Z
06fdf0dade reformat files #266 2023-02-05 22:44:37 +08:00
Ricter Z
a609243794 change logger 2023-02-05 07:07:19 +08:00
Ricter Z
e89c2c0860 fix bug #265 2023-02-05 07:02:45 +08:00
Ricter Z
e08b0659e5 improve #265 2023-02-05 06:55:03 +08:00
Ricter Z
221ff6b32c 0.4.18 bugs fix 2023-02-04 20:24:53 +08:00
Ricter Z
bc6ef0cf5d solve #251 2023-02-04 20:22:57 +08:00
Ricter Z
c8c63cbc11 add usage images 2023-02-04 20:09:51 +08:00
Ricter Z
a63856d076 update usage 2023-02-04 20:09:46 +08:00
Ricter Z
aa4986189f resolve issue #264 2023-02-04 19:55:51 +08:00
Ricter Z
0fb81599dc resolve #265 2023-02-04 19:47:24 +08:00
Ricter Z
e9f9651d07 change the default sort method 2023-02-04 19:38:29 +08:00
Ricter Z
1860b5f0cf resoved issue #249 2022-05-03 16:54:38 +08:00
Ricter Z
eff4f3bf9b remove debug print 2022-05-03 16:51:49 +08:00
Ricter Z
501840172e change sorting from recent to date 2022-05-03 16:49:26 +08:00
Ricter Z
e5ed6d098a update README 2022-05-02 18:53:40 +08:00
Ricter Z
98606202fb remove some unused images 2022-05-02 18:49:34 +08:00
Ricter Z
5a3f1009c9 update README for issue #237 2022-05-02 18:48:02 +08:00
Ricter Z
61945a6e97 fix for issue #236 2022-05-02 17:01:30 +08:00
Ricter Z
443fcdc7da fix for issue #232 2022-05-02 16:53:23 +08:00
Ricter Z
31b95fe2dd 0.4.17 releases, for #246 2022-05-02 16:24:04 +08:00
Ricter Zheng
be8c97f8d4
Merge pull request #247 from krrr/master 2022-05-02 13:21:53 +08:00
krrr
348e51676e
Update README.rst 2022-05-02 12:13:19 +08:00
Ricter Zheng
ea356a1ca2
Merge pull request #244 from krrr/master 2022-04-30 13:47:57 +08:00
krrr
5a4dfb8a76 Add new option to avoid cloudflare captcha 2022-04-30 11:22:41 +08:00
Ricter Zheng
4b15744ceb
Merge pull request #235 from TravisDavis-ops/nixpkg 2021-12-24 03:27:07 +08:00
Travis Davis
b05fa16286
Update README.rst 2021-12-23 12:43:20 -06:00
Ricter Zheng
0879486881
Merge pull request #228 from culturecloud/master 2021-08-23 20:27:38 +08:00
RedoX
c66ba730d3 Fix UnicodeEncodeError 2021-07-28 18:43:45 +06:00
Ricter Zheng
606c5e0ffd
Merge pull request #226 from nanaih/minimal_viewer 2021-06-23 18:14:47 +08:00
rodrigo_qwertyuiop
ba04f81a6f add minimal viewer, fix not using config's template on --html only option 2021-06-22 23:17:03 -04:00
Ricter Zheng
6519e6f221
Merge pull request #224 from RicterZ/pull/221
Pull/221
2021-06-07 17:21:00 +08:00
RicterZ
7594625d72 fix format 2021-06-07 17:17:54 +08:00
RicterZ
4948c8f0c5 update README 2021-06-07 16:50:03 +08:00
RicterZ
e22a99fa8c Merge branch 'master' of github.com:RicterZ/nhentai 2021-06-07 16:48:36 +08:00
RicterZ
19a1d5c404 fix #220 add pretty name of doujinshi format 2021-06-07 16:47:54 +08:00
Ricter Zheng
ad1e876611
Merge pull request #221 from SomeRandomDude870/master
HDoujin-format Metadata file
2021-06-07 16:02:43 +08:00
Ricter Zheng
1de7e1f998
Merge branch 'pull/221' into master 2021-06-07 16:01:54 +08:00
DESKTOP-58CH9VE\Michael
b97e707817 HDoujin-format Metadata file 2021-06-05 17:13:18 +02:00
Ricter Zheng
6ef2189bfe
Merge pull request #214 from lleene/master
Add dryrun option to command line interface
2021-06-03 08:00:18 +08:00
RicterZ
24be2d37d4 0.4.16 2021-06-02 23:22:23 +08:00
RicterZ
d9d2a6fb91 fix bug of proxy while downloading doujinshi 2021-06-02 23:20:56 +08:00
Lieuwe Leene
bd38294bb7
undo whitespace edits 2021-05-16 19:49:26 +02:00
Lieuwe Leene
2cf4e6718e
Add the option to perform a dry-run and only download meta-data / generate file structure 2021-05-16 19:44:01 +02:00
RicterZ
8cd4b948e7 0.4.15 2021-05-08 15:36:49 +08:00
RicterZ
f884384eb3 fix bug 2021-05-08 15:36:36 +08:00
Ricter Zheng
87afab46c4
Merge pull request #211 from jwfiredragon/master 2021-04-25 09:56:49 +08:00
Johnny Wei
c7b1d7e6a8 Fix broken constant import 2021-04-24 16:39:54 -07:00
Ricter Zheng
ad02371158
Update constant.py 2021-04-21 15:37:13 +08:00
Ricter Zheng
7c9d55e0ee
Merge pull request #208 from karamori77/master
Changed write_comic_info from False to True
2021-04-21 15:30:51 +08:00
karamori77
00aad774ae
Fixed potential re-download
Moved forward save-history check 1 indent so it works with download by id too
Mapped all ids to int since there are cases where its a string in the API
2021-04-20 11:04:52 +08:00
karamori77
373086b459
Update serializer.py
changed Language to LanguageISO for ComicInfo.xml
Language will be displayed by the LanguageISO code, it also forgoes rare language tags like rewrite and speechless
2021-04-18 21:45:15 +08:00
karamori77
3a83f99771
Update constant.py 2021-04-18 21:40:47 +08:00
karamori77
00627ab36a
Update utils.py 2021-04-03 23:11:33 +08:00
Ricter Zheng
592e163891
Update requirements.txt 2021-03-26 22:25:49 +08:00
Ricter Zheng
84523475b0
Merge pull request #206 from Un1Gfn/patch-1 2021-03-25 19:01:39 +08:00
Darren Ng
5f5461c902
Instuctions on getting csrftoken & sessionid 2021-03-25 18:57:20 +08:00
Ricter Zheng
05e6ceb3cd
Merge pull request #205 from Nontre12/master 2021-03-25 09:22:13 +08:00
Nontre
db59426503
FIX: Use of img2lib even if it is not installed 2021-03-24 21:49:45 +01:00
Ricter Z
74197f8f90 0.4.14 released for fix issue #204 2021-02-11 15:42:53 +08:00
Ricter Zheng
6d91a39533
Merge pull request #203 from jwfiredragon/master
Switching 'logger.warn' to 'logger.warning'
2021-02-11 15:41:15 +08:00
Johnny Wei
e181e0b9dd Switching 'logger.warn' to 'logger.warning' 2021-02-10 22:45:22 -08:00
Ricter Z
6fed1f94cb 0.4.13 2021-01-18 16:26:39 +08:00
Ricter Zheng
9cfb23c8ec
Merge pull request #201 from mobrine1/patch-1
Fix #200
2021-01-18 16:25:42 +08:00
mobrine1
fc347cdadf
Fix #200 2021-01-17 15:02:43 -05:00
Ricter Zheng
1cdebaab61
Merge pull request #199 from RicterZ/dev
0.4.12
2021-01-17 12:16:56 +08:00
Ricter Z
9513141ccf 0.4.12 2021-01-17 11:51:22 +08:00
Ricter Z
bdc9fa113e fix #197 set proxy to null 2021-01-17 11:50:22 +08:00
Ricter Z
36946111db fix #198 add notice 2021-01-17 11:42:06 +08:00
Ricter Zheng
ce8ae54536
Merge pull request #195 from RicterZ/dev
0.4.11
2021-01-11 11:19:58 +08:00
Ricter Zheng
7aedb905d6
Merge pull request #194 from RicterZ/dev
0.4.11
2021-01-11 11:16:09 +08:00
RicterZ
8b8b5f193e 0.4.11 2021-01-11 11:15:21 +08:00
RicterZ
fc99d91ac1 fix #193 2021-01-11 11:14:35 +08:00
RicterZ
ba141efba7 remove repeated spaces 2021-01-11 11:04:29 +08:00
RicterZ
f78d8750f3 remove __future__ 2021-01-11 11:03:45 +08:00
Ricter Zheng
08bb8ffda4
Merge pull request #192 from RicterZ/dev
Dev
2021-01-10 14:41:02 +08:00
Ricter Zheng
af379c825c
Merge branch 'master' into dev 2021-01-10 14:40:09 +08:00
RicterZ
2f9386f22c fix #188 2021-01-10 11:44:04 +08:00
RicterZ
3667bc34b7 0.4.10 2021-01-10 11:41:38 +08:00
RicterZ
84749c56bd fix #191 2021-01-10 11:40:46 +08:00
Ricter Zheng
24f79e0945
Merge pull request #190 from RicterZ/dev
fix bugs
2021-01-07 20:42:26 +08:00
Ricter Zheng
edc46a9531
Merge pull request #189 from mobrine1/mobrine1-patch-1
Fixing loop when id not found, issue #188
2021-01-07 20:39:44 +08:00
mobrine1
72035a14e6
Fixing loop when id not found, issue #188 2021-01-07 07:32:29 -05:00
Ricter Zheng
472528e464
Merge pull request #187 from atsushi-hirako/patch-1
fix issue #186
2021-01-02 02:16:50 +08:00
atsushi-hirako
3f5915fd2a
fix issue #186
change to blacklist approach (allow 2-bytes character)
2021-01-01 20:11:09 +09:00
Ricter Z
0cd2576dab 0.4.9 2020-12-02 07:45:31 +08:00
Ricter Zheng
445a8c052e
Merge pull request #180 from RicterZ/dev
0.4.8
2020-12-01 21:01:00 +08:00
Ricter Z
7a75afef0a 0.4.8 2020-12-01 20:58:28 +08:00
Ricter Z
a5813e19b1 fix bug on first start 2020-12-01 20:56:27 +08:00
RicterZ
8462d2f2aa use dict.update to update config values 2020-11-26 17:52:10 +08:00
RicterZ
51074ee948 support multi viewers 2020-11-26 17:22:23 +08:00
RicterZ
9c7354be32 0.4.6 2020-11-07 12:04:42 +08:00
Ricter Zheng
7f48b3edd1
Merge pull request #175 from RicterZ/dev
add default value of output dir
2020-10-15 02:10:06 +08:00
RicterZ
d84b827241 add default value of output dir 2020-10-15 02:09:09 +08:00
Ricter Zheng
4ac161a38c
Merge pull request #174 from Nontre12/fix-gen-main
Fix change directory output_dir option on gen-main
2020-10-15 01:47:51 +08:00
Nontre12
648b6f87bf Added logo.png to the installation 2020-10-14 12:09:39 +02:00
Nontre12
2ec1283ba8 Fix change directory output_dir option on gen-main 2020-10-14 12:02:57 +02:00
Ricter Zheng
a9bd46b426
Merge pull request #173 from Nontre12/db-ignored
Fix db ignored
2020-10-14 02:44:03 +08:00
Nontre12
c52bc271fc Fix db ignored 2020-10-13 13:39:24 +02:00
Ricter Zheng
f2d22f8e7d
Merge pull request #169 from Nontre12/master
Fix running without parameters
2020-10-11 03:48:39 +08:00
Nontre12
ea6089ff31 Fix 2020-10-10 21:15:20 +02:00
Nontre
670d14c3f3
Merge pull request #4 from RicterZ/master
Update master branch
2020-10-10 20:50:01 +02:00
Ricter Zheng
b46106a5bc
Merge pull request #167 from RicterZ/0.4.5
0.4.5
2020-10-11 02:00:02 +08:00
RicterZ
f04359e486 0.4.5 2020-10-11 01:57:37 +08:00
Ricter Zheng
6861cbcbc1
Merge pull request #166 from RicterZ/dev
0.4.4
2020-10-11 01:45:53 +08:00
Ricter Zheng
e0938c5a0e
Merge pull request #165 from RicterZ/dev
0.4.4
2020-10-11 01:43:41 +08:00
RicterZ
641f8e4c51 0.4.4 2020-10-11 01:42:02 +08:00
RicterZ
b2fae226f9 use config.json 2020-10-11 01:38:08 +08:00
Nontre
4aa34c668a
Merge pull request #3 from RicterZ/master
Update master branch from origin
2020-10-10 19:11:56 +02:00
RicterZ
f157ac3246 merge to functions 2020-10-11 01:09:13 +08:00
Ricter Zheng
139e01d3ca
Merge pull request #163 from Nontre12/dev-page-range
Added --page-all option to download all search results
2020-10-11 00:58:57 +08:00
Ricter Zheng
4d870e36a1
Merge branch 'master' into dev-page-range 2020-10-11 00:53:27 +08:00
Ricter Zheng
74b0df26a9
Merge pull request #164 from RicterZ/fix-page-range
fix page range issue #158
2020-10-11 00:51:58 +08:00
RicterZ
1746e731ec fix page range issue #158 2020-10-11 00:48:36 +08:00
Nontre
8ad60d9838
Merge pull request #1 from RicterZ/master
Merge pull request #162 from Nontre12/master
2020-10-10 18:31:47 +02:00
Nontre12
be05b9c0eb Added --page-all option to download all search results 2020-10-10 18:29:00 +02:00
Ricter Zheng
9054b98934
Merge pull request #162 from Nontre12/master
Added 'Parodies' output and Updated package version
2020-10-11 00:10:27 +08:00
Nontre12
b82201ff27 Added to -S --show option the "Parodies" output 2020-10-10 12:33:14 +02:00
Nontre12
532c74e075 Update __version__ 2020-10-10 12:31:54 +02:00
Ricter Zheng
5a50a5b1ba
Merge pull request #159 from Nontre12/dev
Added --clean-language option
2020-10-10 04:56:51 +08:00
Nontre12
b5fe48746e Added --clean-language option 2020-10-09 17:34:03 +02:00
Nontre12
94d8da655a Fix misspelling 2020-10-09 17:30:11 +02:00
Ricter Zheng
6ff2816d95
Merge pull request #157 from RicterZ/dev
0.4.3
2020-10-02 01:59:50 +08:00
Ricter Z
4d89b80e67 Merge branch 'dev' of github.com:RicterZ/nhentai into dev 2020-10-02 01:56:31 +08:00
Ricter Zheng
0a94ef9cf1
Merge pull request #156 from RicterZ/dev
0.4.2
2020-10-02 01:56:04 +08:00
Ricter Z
4cc4f35a0d fix bug in search 2020-10-02 01:55:03 +08:00
Ricter Zheng
ad86c49de9
Merge branch 'master' into dev 2020-10-02 01:47:35 +08:00
Ricter Z
5a538fe82f add tests and new python version 2020-10-02 01:43:44 +08:00
Ricter Z
eb35ba9848 0.4.2 2020-10-02 01:41:02 +08:00
Ricter Z
14a53a0953 fix 2020-10-02 01:39:42 +08:00
Ricter Z
c5e4b5ffa8 update 2020-10-02 01:39:14 +08:00
Ricter Z
b3f25875d0 fix bug on mac #126 2020-10-02 01:32:18 +08:00
Ricter Z
91053b98af 0.4.1 2020-10-02 01:02:41 +08:00
Ricter Z
7570b6ae7d remove img2pdf in requirements 2020-10-02 00:55:26 +08:00
Ricter Z
d2e68c6c45 fix #146 #142 #146 2020-10-02 00:51:37 +08:00
Ricter Zheng
b0902c2d58
Merge pull request #147 from fuchs2711/fix-win32-filename
Fix invalid filenames on Windows
2020-07-19 11:12:25 +08:00
Fuzi Fuz
320f36c264 Fix invalid filenames on Windows 2020-07-18 15:19:41 +02:00
Ricter Zheng
1dae63be39
Merge pull request #141 from RicterZ/dev
update tests
2020-06-26 13:32:35 +08:00
RicterZ
78429423d9 fix bug 2020-06-26 13:29:44 +08:00
RicterZ
38ff69d99d add sort options 2020-06-26 13:28:10 +08:00
RicterZ
2ce36204fe update tests 2020-06-26 13:18:08 +08:00
Ricter Zheng
8ed1b89277
Merge pull request #140 from RicterZ/dev
0.4.0
2020-06-26 13:16:55 +08:00
RicterZ
e9864d158f update tests 2020-06-26 13:15:57 +08:00
RicterZ
43013badd4 update .gitignore 2020-06-26 13:12:49 +08:00
RicterZ
7508a2010d 0.4.0 2020-06-26 13:12:37 +08:00
Ricter Zheng
946761477d
Merge pull request #139 from RicterZ/master
Merge into dev branch
2020-06-26 12:48:51 +08:00
Ricter Zheng
db80408024
Merge pull request #138 from RicterZ/revert-134-master
Revert "Fix fatal error and keep index of id which from file"
2020-06-26 12:47:25 +08:00
Ricter Zheng
4c85cebb78
Revert "Fix fatal error and keep index of id which from file" 2020-06-26 12:47:10 +08:00
Ricter Zheng
e982a8170c
Merge pull request #134 from ODtian/master
Fix fatal error and keep index of id which from file
2020-06-26 12:46:08 +08:00
Ricter Zheng
0b62f0ebd9
Merge pull request #137 from jwfiredragon/patch-1
Fixing typos
2020-06-26 12:45:55 +08:00
jwfiredragon
37b4ee7d00
Fixing typos
ms-user-select should be -ms-user-select. #0d0d0d9 isn't a valid hex code - I assume it's supposed to be #0d0d0d?
2020-06-23 23:04:09 -07:00
ODtian
84cad0d475 Update cmdline.py 2020-06-24 12:00:17 +08:00
ODtian
bf03881ed6 Fix fatal error and keep index of id which from file 2020-06-23 20:39:41 +08:00
Ricter Zheng
f97b814b45
Merge pull request #131 from myzWILLmake/dev
remove args.tag since no tag option in parser
2020-06-22 18:11:18 +08:00
Ma Yunzhe
7323eae99b remove args.tag since no tag option in parser 2020-06-15 10:00:23 +08:00
Ricter Zheng
6e07f0426b
Merge pull request #130 from jwfiredragon/patch-1
Fixing parser for nhentai site update
2020-06-12 10:32:34 +08:00
jwfiredragon
44c424a321
Fixing parser for nhentai site update
nhentai's recent site update broke the parser, this fixes it. Based off the work on [my fork here](8c4a4f02bc).
2020-06-10 22:39:35 -07:00
Ricter Zheng
3db77e0ce3
Merge pull request #127 from Tsuribori/dev
Add PDF support
2020-06-08 11:11:42 +08:00
user
22dbb4dd0d Add PDF support 2020-06-07 19:07:40 +03:00
Ricter Zheng
2be4bd71ce
Merge pull request #123 from Alocks/dev
--search fix, removed --tag commands
2020-05-06 19:16:27 +08:00
Alocks
fc39aeb49e
stupid fix 2020-05-02 14:52:24 -03:00
Alocks
be2ec3f452 updated documentation 2020-05-02 14:35:22 -03:00
Alocks
0c23f64356 removed all --tag commands since --search API is working again, now --language is a setting, cleaned some code 2020-05-02 14:23:31 -03:00
Ricter Z
7e4dff8fec move import statement to function 2020-05-01 22:20:55 +08:00
Ricter Z
e2a1d79b1b fix #117 2020-05-01 22:18:03 +08:00
Ricter Zheng
8183f3a7a9
Merge pull request #119 from BachoSeven/master
Updated README
2020-04-26 09:57:39 +08:00
Francesco Minnocci
80713d2e00 updated README.rst 2020-04-25 18:19:44 +02:00
Francesco Minnocci
a2cd025027 updated README.rst 2020-04-25 18:18:48 +02:00
Francesco
2f7bb59e58
Update README.rst 2020-04-25 18:04:50 +02:00
Ricter Zheng
e94685d9c5
Merge pull request #116 from AnhNhan/master
write ComicInfo.xml for CBZ files
2020-04-22 12:52:17 +08:00
Anh Nhan Nguyen
07d804b047 move ComicInfo.xml behind the --comic-info flag 2020-04-22 06:19:12 +02:00
Anh Nhan Nguyen
5552d39337 fix --artist, --character, --parody, --group 2020-04-21 14:54:04 +02:00
Anh Nhan Nguyen
d35190f9d0 write ComicInfo.xml for CBZ files 2020-04-21 13:23:50 +02:00
Ricter Zheng
c8bca4240a
Merge pull request #115 from RicterZ/dev
fix bug #114
2020-04-20 20:17:09 +08:00
RicterZ
130386054f 0.3.9 2020-04-20 20:16:48 +08:00
RicterZ
df16109788 fix install script on python2 2020-04-20 20:15:06 +08:00
Ricter Zheng
c18cd2aaa5
Merge pull request #112 from RicterZ/dev
0.3.8
2020-04-20 20:07:02 +08:00
RicterZ
197b5e4923 update 2020-04-09 22:04:45 +08:00
RicterZ
9f747dad7e 0.3.8 2020-04-09 21:12:24 +08:00
RicterZ
ca713197cc add sqlite3 db to save download history 2020-04-09 21:07:20 +08:00
RicterZ
49f07de95d remove repeat code 2020-04-09 20:37:13 +08:00
RicterZ
5c7bdae0d7 add a new option #111 2020-04-09 20:32:20 +08:00
RicterZ
d5f41bf37c fix bug of --tag in python2.7 2020-03-15 00:41:40 +08:00
RicterZ
56153015b1 update cookie 2020-03-15 00:25:02 +08:00
RicterZ
140249217a fix 2020-03-15 00:24:12 +08:00
RicterZ
9e537e60f2 reformat file 2020-03-15 00:03:48 +08:00
RicterZ
4df8e1bae0 update tests 2020-03-14 23:59:18 +08:00
RicterZ
c250d9c787 fix #106 2020-03-14 23:56:22 +08:00
Ricter Zheng
a5547696eb
Merge pull request #108 from RicterZ/dev
Merge dev to master
2020-03-14 23:35:02 +08:00
Ricter Zheng
49ac1d035d
Merge branch 'master' into dev 2020-03-14 23:34:49 +08:00
Ricter Zheng
f234b7234e
Merge pull request #104 from myzWILLmake/master
add page_range option for favorites
2020-02-08 16:12:25 +08:00
myzWILLmake
43a9b981dd add page_range option for favorites 2020-02-07 01:32:51 +08:00
Ricter Zheng
bc29869a8b
Merge pull request #101 from reynog/patch-1
Suggested change to viewer
2020-01-18 19:50:04 +08:00
reynog
53e1923e67
Changed keyboard nav
In conjunction with styles.css change, changed W, and S keys to scroll image vertically and removed page change from Up and Down, leaving A, D, Left, and Right as keys for changing page. Page returns to the top when changing page. W and S scroll behavior is not smooth. Up and Down scroll relies on browser's in-built keyboard scrolling functionality.
2020-01-16 20:20:42 +01:00
reynog
ba6d4047e2
Larger image display
Bodged file edit. Changed image to extend off the screen, and be scrollable. Easier to read speech and other text on smaller displays. Moved page counter to top center. Not quite as nice looking.
2020-01-16 20:12:27 +01:00
Ricter Zheng
dcf22b30a5
Merge pull request #96 from symant233/dev
Add @media to html_viewer (mobile friendly)
2019-12-16 10:41:53 +08:00
symant233
0208d9b9e6 remove... 2019-12-13 11:57:42 +08:00
symant233
0115285e10 trying to fix conflict 2019-12-13 11:56:36 +08:00
symant233
ea8a576f7e remove webkit tap color and outline 2019-12-11 18:52:27 +08:00
symant233
05eaa9eebc fix 'max-width' not working 2019-12-11 18:35:53 +08:00
symant233
ab2dff4859 Merge remote-tracking branch 'upstream/master' into dev 2019-12-11 11:02:43 +08:00
symant233
9592870d85 add html viewer @media 2019-12-11 10:55:50 +08:00
Ricter Zheng
c1a82635bd
Merge pull request #94 from Alocks/dev
added filter for main.html and #95 fix
2019-12-09 11:27:32 +08:00
Alocks
1974644513 download gif images 2019-12-08 20:59:37 -03:00
Alocks
fe4fb46e30 fixed language tag 2019-12-07 17:50:23 -03:00
Alocks
6156cf5914 added zoom in index.html and some increments in main.html 2019-12-07 14:36:19 -03:00
Alocks
75b00fc523 Merge remote-tracking branch 'origin/dev' into dev 2019-12-07 12:58:59 -03:00
Alocks
ff8af8279f fixed html and removed unused .css properties 2019-12-07 12:58:19 -03:00
Alocks
e1556b09cc
fixed unicode issues with japanese characters 2019-12-07 11:19:49 -03:00
Alocks
110a2acb7c main page filter fixes 2019-12-06 13:08:16 -03:00
Alocks
c60f1f34d5 main page filter(2/2) 2019-12-05 18:02:03 -03:00
Alocks
4f2db83a13 almost gave up 2019-12-04 18:54:40 -03:00
Alocks
bd8bb42ecd main page filter(1/2) 2019-12-04 00:45:14 -03:00
Alocks
0abcb048b4 filter for main page(1/2) 2019-12-02 16:46:22 -03:00
Ricter Zheng
411d6c2f30
Merge pull request #93 from Alocks/dev
Added language option and metadata serializer
2019-12-02 11:38:09 +08:00
Alocks
88c0c1e021 Added language option and metadata serializer 2019-12-01 21:23:41 -03:00
Ricter Zheng
86c43e5d8c
Merge pull request #92 from RicterZ/dev
merge & update
2019-11-22 10:49:13 +08:00
Ricter Zheng
39f8729d51
Merge pull request #91 from jwfiredragon/patch-1
Documenting --gen-main
2019-11-22 10:48:14 +08:00
jwfiredragon
d6461335f8
Adding --gen-main to documentation
--gen-main exists as an option in cmdline.py but is not documented in README
2019-11-21 08:40:57 -08:00
Ricter Zheng
c0c7b33909
Merge pull request #88 from Alocks/dev
changed all map(lambda) to listcomp
2019-11-12 14:47:49 +08:00
Alocks
893a8c194e
removed list(). stupid mistake 2019-11-05 10:41:20 -03:00
Alocks
e6d2eb554d Merge remote-tracking branch 'Alocks/dev' into dev 2019-11-04 16:17:20 -03:00
Alocks
25e5acf671 changed every map(lambda) to listcomp 2019-11-04 16:14:52 -03:00
Ricter Zheng
4f33228cec
Merge pull request #86 from Alocks/dev
Fixed parser to work with new options, and updated readme
2019-10-23 10:16:09 +08:00
Alocks
f227c9e897
Update README.rst 2019-10-22 14:18:38 -03:00
Alocks
9f2f57248b Added commands in README and fixer parser 2019-10-22 14:14:50 -03:00
Ricter Zheng
024f08ca97
Merge pull request #84 from Alocks/master
new options added [--artist, --character, --parody, --group]
2019-10-10 12:42:33 +08:00
Alocks
3017fff823
Merge branch 'dev' into master 2019-10-08 15:42:35 -03:00
Alocks
070e8917f4
Fixed whitespaces when using comma² 2019-10-05 15:07:49 -03:00
Alocks
01caa8d4e5
Fixed if user add white spaces 2019-10-05 15:00:33 -03:00
Alocks
35e724e206 xablau
Signed-off-by: Alocks <alocksmasao@gmail.com>
2019-10-03 18:26:28 -03:00
RicterZ
d045adfd6a 0.3.6 2019-08-04 22:39:31 +08:00
RicterZ
62e3552c84 update cookiewq 2019-08-04 22:39:31 +08:00
RicterZ
6e2a25cf55 fix bug in tag parser #70 2019-08-04 22:39:31 +08:00
RicterZ
44178a8cfb remove comment 2019-08-04 22:39:31 +08:00
RicterZ
4ca582c104 fix #74 2019-08-04 22:39:31 +08:00
RicterZ
97857b8dc6 "" :) 2019-08-04 22:39:31 +08:00
RicterZ
23774d9526 fix bugs 2019-08-01 21:06:40 +08:00
RicterZ
8dc7a1f40b singleton pool 2019-08-01 18:52:30 +08:00
RicterZ
349e21193b remove print 2019-07-31 19:04:25 +08:00
RicterZ
7e826c5255 use multiprocess instead of threadpool #78 2019-07-31 01:22:54 +08:00
RicterZ
bc70a2071b add test for sorting 2019-07-30 23:04:23 +08:00
RicterZ
1b49911166 code style 2019-07-30 23:03:29 +08:00
Ricter Zheng
7eeed17ea5
Merge pull request #79 from Waiifu/added-sorting
sorting option
2019-07-30 22:53:40 +08:00
Waifu
f4afcd549e Added sorting option 2019-07-29 09:11:45 +02:00
Ricter Zheng
4fc6303db2
Merge pull request #76 from RicterZ/dev
0.3.6
2019-07-28 12:00:54 +08:00
RicterZ
f2aa65b64b 0.3.6 2019-07-28 11:58:00 +08:00
RicterZ
0a343935ab update cookiewq 2019-07-28 11:55:12 +08:00
RicterZ
03f1aeada7 fix bug in tag parser #70 2019-07-28 11:48:47 +08:00
RicterZ
94395d9165 remove comment 2019-07-28 11:46:48 +08:00
RicterZ
bacaa096e0 fix #74 2019-07-28 11:46:06 +08:00
RicterZ
3e420f05fa "" :) 2019-07-28 11:40:19 +08:00
Ricter Zheng
158b15bda8
Merge pull request #66 from RicterZ/dev
0.3.5
2019-06-12 23:04:08 +08:00
RicterZ
92640d9767 0.3.5 2019-06-12 22:54:22 +08:00
RicterZ
6b97777b7d fix bug 2019-06-12 22:48:41 +08:00
RicterZ
1af195d727 add cookie check 2019-06-12 22:45:44 +08:00
RicterZ
58b2b644c1 fix #64 2019-06-12 22:37:25 +08:00
RicterZ
0cfec34e9e modify cookie 2019-06-12 22:08:32 +08:00
RicterZ
1172282362 fix #50 2019-06-04 08:38:42 +08:00
RicterZ
a909ad6d92 fix --gen-main bugs 2019-06-04 08:35:13 +08:00
Ricter Zheng
440bb0dc38
Merge pull request #58 from symant233/master
fix show info
2019-06-03 17:53:27 +08:00
symant233
f5b7d89fb0 fix show info 2019-06-01 11:31:53 +08:00
Ricter Zheng
535b804ef6
Merge pull request #53 from symant233/master
Create a main viewer contains all the sub index.html and thumb pic
2019-05-30 20:10:22 +08:00
symant233
9b65544942 add travis-ci test 2019-05-30 20:05:46 +08:00
symant233
0935d609c3 fix --gen-main action 2019-05-29 13:43:47 +08:00
symant233
f10ae3cf58 store proxy config 2019-05-28 19:47:48 +08:00
symant233
86b3a092c7 ignore other folders 2019-05-26 15:57:50 +08:00
symant233
710cc86eaf fix codec error for py2 2019-05-21 17:06:42 +08:00
symant233
2d327359aa small fix 2019-05-21 16:16:58 +08:00
symant233
f78b8bc2cd fix conflict 2019-05-21 15:53:43 +08:00
Ricter Zheng
a95396033b
Update README.rst 2019-05-18 22:36:03 +08:00
RicterZ
01c0e73849 fix bug while installing on windows / python3 2019-05-18 22:30:20 +08:00
RicterZ
57e9305849 0.3.3 2019-05-18 22:15:42 +08:00
RicterZ
6bd37f384c fix 2019-05-18 22:14:08 +08:00
RicterZ
2c61fd3a3f add doujinshi folder formatter 2019-05-18 22:13:23 +08:00
RicterZ
cf4291d3c2 new line 2019-05-18 22:01:29 +08:00
RicterZ
450e3689a0 fix 2019-05-18 22:00:33 +08:00
RicterZ
b5deca2704 fix 2019-05-18 21:57:43 +08:00
RicterZ
57dc4a58b9 remove Options block 2019-05-18 21:56:59 +08:00
RicterZ
1e1d03064b readme 2019-05-18 21:56:35 +08:00
RicterZ
40a98881c6 add some shortcut options 2019-05-18 21:53:40 +08:00
RicterZ
a7848c3cd0 fix bug 2019-05-18 21:52:36 +08:00
RicterZ
5df58780d9 add delay #55 2019-05-18 21:51:38 +08:00
RicterZ
56dace81f1 remove readme.md 2019-05-18 20:31:18 +08:00
Ricter Zheng
086e469275
Update README.rst 2019-05-18 20:27:08 +08:00
Ricter Zheng
1f76a8a70e
Update README.rst 2019-05-18 20:24:49 +08:00
Ricter Zheng
5d294212e6
Update README.rst 2019-05-18 20:24:15 +08:00
Ricter Zheng
ef274a672b
Update README.rst 2019-05-18 20:23:19 +08:00
Ricter Zheng
795f80752f
Update README.rst 2019-05-18 20:22:55 +08:00
Ricter Zheng
53c23bb6dc
Update README.rst 2019-05-18 20:07:45 +08:00
RicterZ
8d5f12292c update rst 2019-05-18 20:06:10 +08:00
RicterZ
f3141d5726 add rst 2019-05-18 20:04:16 +08:00
RicterZ
475e4db9af 0.3.2 #54 2019-05-18 19:47:04 +08:00
RicterZ
263dba51f3 modify tests #54 2019-05-18 19:40:09 +08:00
RicterZ
049ab4d9ad using cookie rather than login #54 2019-05-18 19:34:54 +08:00
symant233
a5eba94064 clean unused style for main.css 2019-05-06 15:41:26 +08:00
symant233
6053e302ee fix output_dir make gen-main error 2019-05-05 22:02:24 +08:00
symant233
c32b516575 js return to prev page press 'q' 2019-05-05 21:47:23 +08:00
symant233
0150e79c49 Add main viewer sources 2019-05-05 21:10:24 +08:00
symant233
0cda30385b Main viewer generator 2019-05-05 21:01:49 +08:00
symant233
18bdab1962 add main viewer 2019-05-05 21:01:49 +08:00
symant233
8e8f935a9b set alias for local:1080 proxy 2019-05-05 21:01:49 +08:00
RicterZ
b173a6c28f slow down #50 2019-05-04 12:12:57 +08:00
RicterZ
b64b718c88 remove eval 2019-05-04 11:31:41 +08:00
RicterZ
8317662664 fix #50 2019-05-04 11:29:01 +08:00
Ricter Zheng
13e60a69e9
Merge pull request #51 from symant233/master
Add viewer arrow support, add README license badge.
2019-05-04 11:11:34 +08:00
symant233
b5acbc76fd Update README license badage 2019-05-04 11:07:15 +08:00
symant233
1eb1b5c04c Add viewer arrow support & Readme license badage 2019-05-04 11:04:43 +08:00
Ricter Zheng
2acb6a1249
Update README.md 2019-04-25 03:36:31 +08:00
RicterZ
0660cb0fed update user-agent 2019-04-11 22:48:18 +08:00
RicterZ
680b004c24 update README 2019-04-11 22:47:49 +08:00
RicterZ
6709af2a20 0.3.1 - add login session 2019-04-11 22:44:26 +08:00
RicterZ
a3fead2852 pep-8 2019-04-11 22:43:42 +08:00
RicterZ
0728dd8c6d use text rather than content 2019-04-11 22:41:37 +08:00
RicterZ
9160b38c3f bypass the challenge 2019-04-11 22:39:20 +08:00
RicterZ
f74be0c665 add new tests 2019-04-11 22:10:16 +08:00
Ricter Zheng
c30f562a83
Merge pull request #48 from onlymyflower/master
download ids from file
2019-04-11 22:09:30 +08:00
RicterZ
37547cc97f global login session #49 #46 2019-04-11 22:08:19 +08:00
onlymyflower
f6fb90aab5 download ids from file 2019-03-06 16:46:47 +08:00
RicterZ
50be89db44 fix extension issue #44 2019-01-27 10:06:12 +08:00
RicterZ
fc0be35b2c 0.3.0 #40 2019-01-15 21:16:14 +08:00
RicterZ
5c3dace937 tag page download #40 2019-01-15 21:12:20 +08:00
RicterZ
b2d622f11a fix tag download issue #40 2019-01-15 21:09:24 +08:00
RicterZ
0c8264bcc6 fix download issues 2019-01-15 20:43:00 +08:00
RicterZ
a6074242fb nhentai suspended api #40 2019-01-15 20:29:10 +08:00
RicterZ
eb6df28fba 0.2.19 2018-12-30 14:13:27 +08:00
RicterZ
1091ea3e0a remove debug 2018-12-30 14:12:38 +08:00
root
0df51c83e5 change output filename 2018-12-30 14:06:15 +08:00
Ricter Zheng
c5fa98ebd1
Update .travis.yml 2018-11-04 21:44:59 +08:00
Ricter Z
3154a94c3d 0.2.18 2018-10-24 22:21:29 +08:00
Ricter Z
c47018251f fix #27 2018-10-24 22:20:33 +08:00
Ricter Z
74d0499092 add test 2018-10-24 22:07:43 +08:00
Ricter Z
7e56d9b901 fix #33 2018-10-24 22:06:49 +08:00
Ricter Z
8cbb334d36 fix #31 2018-10-24 21:56:21 +08:00
Ricter Zheng
db6d45efe0
fix bug #34 2018-10-19 10:55:21 +08:00
Ricter Zheng
d412794bce
Merge pull request #32 from violetdarkness/patch-1
requirement.txt missing new line
2018-10-08 23:36:38 +08:00
violetdarkness
8eedbf077b
requirement.txt missing new line
I got error when installing and find this requirement.txt missing newline
2018-10-08 21:13:52 +07:00
Ricter Z
c95ecdded4 remove gdb 2018-10-01 15:04:32 +08:00
Ricter Z
489e8bf0f4 fix #29 0.2.16 2018-10-01 15:02:04 +08:00
Ricter Zheng
86c31f9b5e
Merge pull request #28 from tbinavsl/master
Max retries + misc. language fixes
2018-09-28 13:28:44 +08:00
tbinavsl
6f20405f47 adding gif support and fixing yet another english typo 2018-09-09 23:38:30 +02:00
tbinavsl
c0143548d1 reverted partially by mistake the max_page commit; also added retries on other features 2018-09-09 22:24:34 +02:00
tbinavsl
114c364f03 oops 2018-09-09 21:42:03 +02:00
tbinavsl
af26482b6d Max retries + misc. language fixes 2018-09-09 21:33:50 +02:00
Ricter Z
b8ea917db2 max page #26 2018-08-24 23:55:34 +08:00
Ricter Z
963f4d9ddf fix 2018-08-12 23:22:30 +08:00
Ricter Z
ef36e012ce fix unicode error on windows / python2 2018-08-12 23:11:01 +08:00
Ricter Z
16e8ce6f45 0.2.15 2018-08-12 22:48:26 +08:00
Ricter Z
0632826827 download by tagname #15 2018-08-12 22:43:36 +08:00
Ricter Z
8d2cd1974b fix unicodeerror on python3 2018-08-12 18:04:36 +08:00
Ricter Zheng
8c176cd2ad
Update README.md 2018-08-11 09:47:32 +08:00
Ricter Zheng
f2c88e8ade
Update README.md 2018-08-11 09:46:46 +08:00
Ricter Zheng
2300744c5c
Update README.md 2018-08-11 09:46:04 +08:00
Ricter Zheng
7f30c84eff
Update README.md 2018-08-11 09:45:04 +08:00
Ricter Z
dda849b770 remove python3.7 2018-08-11 09:32:35 +08:00
Ricter Z
14b3c82248 remove \r 2018-08-11 09:28:39 +08:00
Ricter Z
4577e9df9a fix 2018-08-11 09:24:16 +08:00
Ricter Z
de157ccb7f Merge branch 'master' of github.com:RicterZ/nhentai 2018-08-11 09:19:31 +08:00
Ricter Z
126bbe8d49 add a test 2018-08-11 09:18:00 +08:00
Ricter Z
8546b9e759 fix bug #24 2018-08-11 09:17:05 +08:00
Ricter Z
6ff9751c30 fix 2018-07-01 12:50:37 +08:00
Ricter Z
ddc4a20251 0.2.12 2018-07-01 12:48:30 +08:00
Ricter Z
206aa3710a fix bug 2018-07-01 12:48:05 +08:00
Ricter Z
b5b201f61c 🍻 2018-07-01 02:15:26 +08:00
Ricter Zheng
eb8b41cd1d
Merge pull request #22 from Pizzacus/master
Rework the HTML Viewer
2018-06-03 22:53:00 +08:00
Pizzacus
98bf88d638
Actually use MANIFEST.ini to specify the package data
*considers suicide*
2018-06-03 11:32:06 +02:00
Pizzacus
0bc83982e4
Add the viewer to the package_data entry 2018-06-03 11:09:46 +02:00
Pizzacus
99edcef9ac
Rework the HTML Viewer
* More modern and efficient code, particularily for the JS
 * Also the layout is better, with flexboxes and all
 * The CSS and JS have their own files
 * The sidebar has proper margins around the images
 * You can use A + D and the arrow keys to navigate images, like on nhentai
 * Images with a lot of width are  properly sized
 * There is a page counter on the bottom left
2018-06-02 23:22:37 +02:00
Ricter Zheng
3ddd474aab
Merge pull request #21 from mentaterasmus/master
fixing issue 16 and adding functionalities
2018-05-15 23:17:10 +08:00
Mentat Erasmus
f2573d5f10 fixing identation 2018-05-14 01:52:38 -03:00
Mentat Erasmus
147eec57cf fixing issue 16 and adding functionalities 2018-05-09 15:42:12 -03:00
Ricter Z
f316c3243b 0.2.12 2018-04-19 17:29:23 +08:00
Ricter Z
967e0b4ff5 fix #18 #19 use nhentai api 2018-04-19 17:21:43 +08:00
Ricter Z
22cf2592dd 0.2.11 2018-03-16 23:48:58 +08:00
Ricter Z
caa0753adb fix bug #13 2018-03-16 23:45:05 +08:00
Ricter Z
0e14dd62d5 fix bug #13 2018-03-16 23:42:24 +08:00
Ricter Z
7c9693785e fix #14 2018-03-16 23:39:04 +08:00
Ricter Z
08ad73b683 fix bug #13 2018-03-16 23:33:16 +08:00
Ricter Z
a56d3ca18c fix bug #13 2018-03-16 23:23:25 +08:00
Ricter Z
c1975897d2 save downloaded doujinshi as doujinshi name #13 2018-03-16 23:16:26 +08:00
Ricter Z
4ed596ff98 download user fav 2018-03-05 21:47:27 +08:00
Ricter Z
debf287fb0 download user fav 2018-03-05 21:45:56 +08:00
Ricter Zheng
308c5277b8
Merge pull request #12 from RomaniukVadim/master
Add install for Gentoo
2018-03-03 19:33:23 +08:00
Romaniuk Vadim
b425c883c7 Add install for Gentoo 2018-03-02 17:18:22 +02:00
Ricter Z
7bf9507bd2 0.2.10 2018-01-09 16:05:52 +08:00
Ricter Z
5f5245f70f fix bug 2018-01-09 16:02:16 +08:00
Ricter Z
45fb35b950 fix bug and add --html 2018-01-01 17:44:55 +08:00
Ricter Z
2271b83d93 0.2.8 2017-08-19 00:50:38 +08:00
Ricter Z
0ee000edeb sort #10 2017-08-19 00:48:53 +08:00
Ricter Z
a47359f411 tiny bug 2017-07-06 15:41:33 +08:00
Ricter Z
48c6fadc98 add viewer image 2017-06-18 16:48:54 +08:00
Ricter Z
dbc834ea2e 0.2.7 2017-06-18 14:25:00 +08:00
Ricter Z
71177ff94e 0.2.6 2017-06-18 14:19:28 +08:00
Ricter Z
d1ed9b6980 add html doujinshi viewer 2017-06-18 14:19:07 +08:00
Ricter Z
42a09e2c1e fix timeout 2017-03-17 20:19:40 +08:00
Ricter Z
e306d50b7e fix bug 2017-03-17 20:14:42 +08:00
Ricter Z
043f391d04 fix https error 2016-11-23 22:45:03 +08:00
Ricter Z
9549c5f5a2 fix bug 2016-11-23 22:35:56 +08:00
Ricter Z
5592b30be4 do not download 404 2016-11-23 22:11:47 +08:00
Ricter Z
12f7b2225b 0.2.2 2016-10-19 22:23:02 +08:00
Ricter Z
b0e71c9a6c remove windows 2016-10-19 22:16:56 +08:00
Ricter Z
ad64a5685a try ... except for reload 2016-10-19 22:09:43 +08:00
Ricter Z
6bd0a6b96a add unicode prefix 2016-10-19 22:08:18 +08:00
Ricter Z
3a80c233d5 remove darwin 2016-10-19 22:03:38 +08:00
Ricter Z
69e0d1d6f1 fix bug of unicode in optparse 2016-10-19 21:21:03 +08:00
Ricter Zheng
c300a2777f Update README.md 2016-10-19 13:14:21 +08:00
Ricter Z
d0d7fb7015 0.2.1 2016-10-19 13:06:27 +08:00
Ricter Z
4ed91db60a remove emoji windows 2016-10-19 13:04:36 +08:00
Ricter Z
4c11288d63 add test 2016-10-19 13:01:46 +08:00
Ricter Z
de476aac46 qwq 2016-10-19 13:00:59 +08:00
Ricter Z
a3fb75eb11 fix bug in logger 2016-10-19 12:55:09 +08:00
Ricter Z
bb5024f1d7 logger on windows 2016-10-19 12:50:30 +08:00
Ricter Z
da5b860e5f fix bug in python3 2016-10-19 12:20:14 +08:00
Ricter Z
8b63d41cbb fix unicodecodeerror on windows 2016-10-19 12:18:42 +08:00
Ricter Z
55d24883be update 2016-10-19 12:07:39 +08:00
Ricter Z
8f3bdc73bf unicode literals 2016-10-19 11:07:48 +08:00
Ricter Z
cc2f0521b3 urlparse for python3(再犯错我直播吃屎) 2016-10-17 21:56:53 +08:00
Ricter Z
795e8b2bb8 urlparse for python3 2016-10-17 21:53:44 +08:00
Ricter Z
97b2ba8fd2 urlparse for python3 2016-10-17 21:52:06 +08:00
Ricter Z
6858bacd41 urlparse for python3 2016-10-17 21:50:07 +08:00
Ricter Z
148b4a1a08 urlparse for python3 2016-10-17 21:48:21 +08:00
Ricter Z
3ba8f62fe2 update test 2016-10-17 21:44:53 +08:00
Ricter Z
16d3b555c9 update usages 2016-10-17 21:43:40 +08:00
Ricter Z
0d185f465d 忘记干啥了.. 2016-10-17 21:26:58 +08:00
Ricter Z
3eacd118ed change the way of download 2016-10-17 21:00:28 +08:00
Ricter Z
e42f42d7db use https 2016-08-13 20:03:40 +08:00
Ricter Z
fd0b53ee36 🐶 2016-08-11 22:37:06 +08:00
Ricter Z
35fec2e1f4 take the warning of bs4 off 2016-08-11 22:32:34 +08:00
Ricter Z
40e880cf77 modify test case 2016-08-11 22:26:03 +08:00
Ricter Z
2f756ecb5b get nhentai url from env 2016-08-11 22:25:10 +08:00
Ricter Z
441317c28c use nhentai mirror 2016-08-11 21:10:11 +08:00
Ricter Zheng
8442f00c6c Merge pull request #5 from navrudh/master
Python3 compatablility and new Singleton implementation
2016-08-11 21:07:02 +08:00
Dhruvan Ganesh
43e59b724a Update .travis.yml 2016-08-10 20:05:15 +05:30
Dhruvan Ganesh
5d6a773460 bumping major version due to dependency changes 2016-08-10 16:12:28 +05:30
Dhruvan Ganesh
9fe43dc219 project is now Py3 and Py2 compatible 2016-08-10 16:11:52 +05:30
Dhruvan Ganesh
0f89ff4d63 implemented Py2 & Py3 compatible Singleton 2016-08-10 16:11:10 +05:30
Ricter Zheng
5bb98aa007 Merge pull request #4 from navrudh/patch-1
Padding filenames with width 3
2016-08-10 15:21:43 +08:00
Dhruvan Ganesh
a4ac1c9720 Padding filenames with width 3
Pad the filenames with a width of 3 characters so that image viewers display files in the expected order.
2016-08-10 12:16:15 +05:30
Ricter Z
8d25673180 nhentai origin 2016-07-03 17:06:22 +08:00
Ricter Z
aab92bbc8e nhentai mirror 2016-07-03 17:02:47 +08:00
Ricter Z
2b52e300d4 use nhentai.net as default domain 2016-05-20 09:54:23 +08:00
Ricter Z
6e3299a08d pep8 2016-05-18 21:23:07 +08:00
Ricter Z
e598c8686a modify constant.py 2016-05-18 21:22:27 +08:00
Ricter Z
dd7b2d493e fix bug of return value 2016-05-02 16:03:28 +08:00
Ricter Z
3d481dbf13 fix bug of setup 2016-05-02 15:57:17 +08:00
Ricter Z
3a52e8a8bc support python2.6 2016-05-02 15:55:14 +08:00
Ricter Z
bf8205efbe update README.md 2016-05-02 15:47:44 +08:00
Ricter Z
e3980b0696 0.1.4 2016-05-02 15:44:23 +08:00
42 changed files with 3377 additions and 380 deletions

11
.dockerignore Normal file
View File

@ -0,0 +1,11 @@
.git
.github
.gitignore
venv
*.egg-info
build
dist
images
LICENSE
.travis.yml
.idea

27
.github/workflows/docker-image.yml vendored Normal file
View File

@ -0,0 +1,27 @@
name: Docker Image CI
on:
push:
branches: [ "master" ]
jobs:
build:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
-
name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
-
name: Build the Docker image
uses: docker/build-push-action@v4
with:
context: .
push: true
tags: ricterz/nhentai:latest

7
.gitignore vendored
View File

@ -4,4 +4,9 @@ build
dist/
*.egg-info
.python-version
.DS_Store
output/
venv/
.vscode/
test-output
*.whl

View File

@ -1,15 +0,0 @@
os:
- linux
- os x
language: python
python:
- 2.7
- 2.6
install:
- python setup.py install
script:
- nhentai --search umaru
- nhentai --ids=152503,146134 -t 10 --download --path=/tmp/

9
Dockerfile Normal file
View File

@ -0,0 +1,9 @@
FROM python:3
WORKDIR /usr/src/nhentai
COPY . .
RUN pip install --no-cache-dir .
WORKDIR /output
ENTRYPOINT ["nhentai"]

View File

@ -1,2 +0,0 @@
include README.md
include requirements.txt

View File

@ -1,51 +0,0 @@
nhentai
=======
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
あなたも変態。 いいね?
由于 [http://nhentai.net](http://nhentai.net) 下载下来的种子速度很慢,而且官方也提供在线观看本子的功能,所以可以利用本脚本下载本子。
### 安装
git clone https://github.com/RicterZ/nhentai
cd nhentai
python setup.py install
### 用法
+ 下载指定 id 的本子:
nhentai --id=123855 --download
+ 下载指定 id 列表的本子:
nhentai --ids=123855,123866 --download
+ 下载某关键词第一页的本子(不推荐):
nhentai --search="tomori" --page=1 --download
`-t, --thread` 指定下载的线程数,最多为 10 线程。
`--path` 指定下载文件的输出路径,默认为当前目录。
`--timeout` 指定下载图片的超时时间,默认为 30 秒。
`--proxy` 指定下载的代理,例如: http://127.0.0.1:8080/
![](./images/search.png)
![](./images/download.png)
### License
MIT
### あなたも変態
![](./images/image.jpg)

265
README.rst Normal file
View File

@ -0,0 +1,265 @@
nhentai
=======
あなたも変態。 いいね?
|travis|
|pypi|
|version|
|license|
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
===================
Manual Installation
===================
From Github:
.. code-block::
git clone https://github.com/RicterZ/nhentai
cd nhentai
pip install --no-cache-dir .
Build Docker container:
.. code-block::
git clone https://github.com/RicterZ/nhentai
cd nhentai
docker build -t nhentai:latest .
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai nhentai --id 123855
==================
Installation
==================
From PyPI with pip:
.. code-block::
pip install nhentai
For a self-contained installation, use `pipx <https://github.com/pipxproject/pipx/>`_:
.. code-block::
pipx install nhentai
Pull from Dockerhub:
.. code-block::
docker pull ricterz/nhentai
docker run --rm -it -v ~/Downloads/doujinshi:/output -v ~/.nhentai/:/root/.nhentai ricterz/nhentai --id 123855
On Gentoo Linux:
.. code-block::
layman -fa glibOne
sudo emerge net-misc/nhentai
On NixOS:
.. code-block::
nix-env -iA nixos.nhentai
=====
Usage
=====
**⚠IMPORTANT⚠**: To bypass the nhentai frequency limit, you should use `--cookie` and `--useragent` options to store your cookie and your user-agent.
.. code-block:: bash
nhentai --useragent "USER AGENT of YOUR BROWSER"
nhentai --cookie "YOUR COOKIE FROM nhentai.net"
**NOTE:**
- The format of the cookie is `"csrftoken=TOKEN; sessionid=ID; cf_clearance=CLOUDFLARE"`
- `cf_clearance` cookie and useragent must be set if you encounter "blocked by cloudflare captcha" error. Make sure you use the same IP and useragent as when you got it
| To get csrftoken and sessionid, first login to your nhentai account in web browser, then:
| (Chrome) |ve| |ld| More tools |ld| Developer tools |ld| Application |ld| Storage |ld| Cookies |ld| https://nhentai.net
| (Firefox) |hv| |ld| Web Developer |ld| Web Developer Tools |ld| Storage |ld| Cookies |ld| https://nhentai.net
|
.. |hv| unicode:: U+2630 .. https://www.compart.com/en/unicode/U+2630
.. |ve| unicode:: U+22EE .. https://www.compart.com/en/unicode/U+22EE
.. |ld| unicode:: U+2014 .. https://www.compart.com/en/unicode/U+2014
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/usage.png
:alt: nhentai
:align: center
*The default download folder will be the path where you run the command (%cd% or $PWD).*
Download specified doujinshi:
.. code-block:: bash
nhentai --id 123855 123866 123877
Download doujinshi with ids specified in a file (doujinshi ids split by line):
.. code-block:: bash
nhentai --file=doujinshi.txt
Set search default language
.. code-block:: bash
nhentai --language=english
Search a keyword and download the first page:
.. code-block:: bash
nhentai --search="tomori" --page=1 --download
# you also can download by tags and multiple keywords
nhentai --search="tag:lolicon, artist:henreader, tag:full color"
nhentai --search="lolicon, henreader, full color"
Download your favorites with delay:
.. code-block:: bash
nhentai --favorites --download --delay 1 --page 3-5,7
Format output doujinshi folder name:
.. code-block:: bash
nhentai --id 261100 --format '[%i]%s'
# for Windows
nhentai --id 261100 --format "[%%i]%%s"
Supported doujinshi folder formatter:
- %i: Doujinshi id
- %f: Doujinshi favorite count
- %t: Doujinshi name
- %s: Doujinshi subtitle (translated name)
- %a: Doujinshi authors' name
- %g: Doujinshi groups name
- %p: Doujinshi pretty name
- %ag: Doujinshi authors name or groups name
Note: for Windows operation system, please use double "%", such as "%%i".
Other options:
.. code-block::
Usage:
nhentai --search [keyword] --download
NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]
nhentai --file [filename]
Environment Variable:
NHENTAI nhentai mirror url
Options:
-h, --help show this help message and exit
-D, --download download doujinshi (for search results)
-S, --show just show the doujinshi information
--id doujinshi ids set, e.g. 167680 167681 167682
-s KEYWORD, --search=KEYWORD
search doujinshi by keyword
-F, --favorites list or download your favorites
-a ARTIST, --artist=ARTIST
list doujinshi by artist name
--page-all all search results
--page=PAGE, --page-range=PAGE
page number of search results. e.g. 1,2-5,14
--sorting=SORTING, --sort=SORTING
sorting of doujinshi (recent / popular /
popular-[today|week])
-o OUTPUT_DIR, --output=OUTPUT_DIR
output dir
-t THREADS, --threads=THREADS
thread count for downloading doujinshi
-T TIMEOUT, --timeout=TIMEOUT
timeout for downloading doujinshi
-d DELAY, --delay=DELAY
slow down between downloading every doujinshi
--retry=RETRY retry times when downloading failed
--exit-on-fail exit on fail to prevent generating incomplete files
--proxy=PROXY store a proxy, for example: -p "http://127.0.0.1:1080"
-f FILE, --file=FILE read gallery IDs from file.
--format=NAME_FORMAT format the saved folder name
--dry-run Dry run, skip file download
--html generate a html viewer at current directory
--no-html don't generate HTML after downloading
--gen-main generate a main viewer contain all the doujin in the
folder
-C, --cbz generate Comic Book CBZ File
-P, --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
PDF file
--move-to-folder remove files in doujinshi dir then move new file to
folder when generated CBZ or PDF file
--meta generate a metadata file in doujinshi format
--regenerate regenerate the cbz or pdf file if exists
--cookie=COOKIE set cookie of nhentai to bypass Cloudflare captcha
--useragent=USERAGENT, --user-agent=USERAGENT
set useragent to bypass Cloudflare captcha
--language=LANGUAGE set default language to parse doujinshis
--clean-language set DEFAULT as language to parse doujinshis
--save-download-history
save downloaded doujinshis, whose will be skipped if
you re-download them
--clean-download-history
clean download history
--template=VIEWER_TEMPLATE
set viewer template
--legacy use legacy searching method
==============
nHentai Mirror
==============
If you want to use a mirror, you should set up a reverse proxy of `nhentai.net` and `i.nhentai.net`.
For example:
.. code-block::
i.h.loli.club -> i.nhentai.net
i3.h.loli.club -> i3.nhentai.net
i5.h.loli.club -> i5.nhentai.net
i7.h.loli.club -> i7.nhentai.net
h.loli.club -> nhentai.net
Set `NHENTAI` env var to your nhentai mirror.
.. code-block:: bash
NHENTAI=https://h.loli.club nhentai --id 123456
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/search.png
:alt: nhentai
:align: center
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/download.png
:alt: nhentai
:align: center
.. image:: https://github.com/RicterZ/nhentai/raw/master/images/viewer.png
:alt: nhentai
:align: center
.. |travis| image:: https://travis-ci.org/RicterZ/nhentai.svg?branch=master
:target: https://travis-ci.org/RicterZ/nhentai
.. |pypi| image:: https://img.shields.io/pypi/dm/nhentai.svg
:target: https://pypi.org/project/nhentai/
.. |version| image:: https://img.shields.io/pypi/v/nhentai
:target: https://pypi.org/project/nhentai/
.. |license| image:: https://img.shields.io/github/license/ricterz/nhentai.svg
:target: https://github.com/RicterZ/nhentai/blob/master/LICENSE

Binary file not shown.

Before

Width:  |  Height:  |  Size: 541 KiB

After

Width:  |  Height:  |  Size: 1.0 MiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 658 KiB

After

Width:  |  Height:  |  Size: 991 KiB

BIN
images/usage.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 679 KiB

BIN
images/viewer.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.9 MiB

View File

@ -1,3 +1,3 @@
__version__ = '0.1.3'
__author__ = 'Ricter'
__version__ = '0.6.0-beta'
__author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com'

View File

@ -1,72 +1,272 @@
# coding: utf-8
from __future__ import print_function
from optparse import OptionParser
from logger import logger
try:
from itertools import ifilter as filter
except ImportError:
pass
import os
import sys
import json
import nhentai.constant as constant
import constant
from urllib.parse import urlparse
from argparse import ArgumentParser
from nhentai import __version__
from nhentai.utils import generate_html, generate_main_html, DB, EXTENSIONS
from nhentai.logger import logger
from nhentai.constant import PATH_SEPARATOR
def banner():
logger.info('''nHentai: あなたも変態。 いいね?
_ _ _ _
_ __ | | | | ___ _ __ | |_ __ _(_)
| '_ \| |_| |/ _ \ '_ \| __/ _` | |
| | | | _ | __/ | | | || (_| | |
|_| |_|_| |_|\___|_| |_|\__\__,_|_|
''')
logger.debug(f'nHentai ver {__version__}: あなたも変態。 いいね?')
def load_config():
if not os.path.exists(constant.NHENTAI_CONFIG_FILE):
return
try:
with open(constant.NHENTAI_CONFIG_FILE, 'r') as f:
constant.CONFIG.update(json.load(f))
except json.JSONDecodeError:
logger.error('Failed to load config file.')
write_config()
def write_config():
if not os.path.exists(constant.NHENTAI_HOME):
os.mkdir(constant.NHENTAI_HOME)
with open(constant.NHENTAI_CONFIG_FILE, 'w') as f:
f.write(json.dumps(constant.CONFIG))
def callback(option, _opt_str, _value, parser):
if option == '--id':
pass
value = []
for arg in parser.rargs:
if arg.isdigit():
value.append(int(arg))
elif arg.startswith('-'):
break
else:
logger.warning(f'Ignore invalid id {arg}')
setattr(parser.values, option.dest, value)
def cmd_parser():
parser = OptionParser()
parser.add_option('--download', dest='is_download', action='store_true', help='download doujinshi or not')
parser.add_option('--id', type='int', dest='id', action='store', help='doujinshi id of nhentai')
parser.add_option('--ids', type='str', dest='ids', action='store', help='doujinshi id set, e.g. 1,2,3')
parser.add_option('--search', type='string', dest='keyword', action='store', help='keyword searched')
parser.add_option('--page', type='int', dest='page', action='store', default=1,
help='page number of search result')
parser.add_option('--path', type='string', dest='saved_path', action='store', default='',
help='path which save the doujinshi')
parser.add_option('--threads', '-t', type='int', dest='threads', action='store', default=5,
help='thread count of download doujinshi')
parser.add_option('--timeout', type='int', dest='timeout', action='store', default=30,
help='timeout of download doujinshi')
parser.add_option('--proxy', type='string', dest='proxy', action='store', default='',
help='use proxy, example: http://127.0.0.1:1080')
args, _ = parser.parse_args()
load_config()
if args.ids:
_ = map(lambda id: id.strip(), args.ids.split(','))
args.ids = set(map(int, filter(lambda id: id.isdigit(), _)))
parser = ArgumentParser(
description='\n nhentai --search [keyword] --download'
'\n NHENTAI=https://nhentai-mirror-url/ nhentai --id [ID ...]'
'\n nhentai --file [filename]'
'\n\nEnvironment Variable:\n'
' NHENTAI nhentai mirror url'
)
if args.is_download and not args.id and not args.ids and not args.keyword:
logger.critical('Doujinshi id/ids is required for downloading')
# operation options
parser.add_argument('--download', '-D', dest='is_download', action='store_true',
help='download doujinshi (for search results)')
parser.add_argument('--no-download', dest='no_download', action='store_true', default=False,
help='download doujinshi (for search results)')
parser.add_argument('--show', '-S', dest='is_show', action='store_true',
help='just show the doujinshi information')
# doujinshi options
parser.add_argument('--id', dest='id', nargs='+', type=int,
help='doujinshi ids set, e.g. 167680 167681 167682')
parser.add_argument('--search', '-s', type=str, dest='keyword',
help='search doujinshi by keyword')
parser.add_argument('--favorites', '-F', action='store_true', dest='favorites',
help='list or download your favorites')
parser.add_argument('--artist', '-a', type=str, dest='artist',
help='list doujinshi by artist name')
# page options
parser.add_argument('--page-all', dest='page_all', action='store_true', default=False,
help='all search results')
parser.add_argument('--page', '--page-range', type=str, dest='page',
help='page number of search results. e.g. 1,2-5,14')
parser.add_argument('--sorting', '--sort', dest='sorting', type=str, default='popular',
help='sorting of doujinshi (recent / popular / popular-[today|week])',
choices=['recent', 'popular', 'popular-today', 'popular-week', 'date'])
# download options
parser.add_argument('--output', '-o', type=str, dest='output_dir', default='.',
help='output dir')
parser.add_argument('--threads', '-t', type=int, dest='threads', default=5,
help='thread count for downloading doujinshi')
parser.add_argument('--timeout', '-T', type=int, dest='timeout', default=30,
help='timeout for downloading doujinshi')
parser.add_argument('--delay', '-d', type=int, dest='delay', default=0,
help='slow down between downloading every doujinshi')
parser.add_argument('--retry', type=int, dest='retry', default=3,
help='retry times when downloading failed')
parser.add_argument('--exit-on-fail', dest='exit_on_fail', action='store_true', default=False,
help='exit on fail to prevent generating incomplete files')
parser.add_argument('--proxy', type=str, dest='proxy',
help='store a proxy, for example: -p "http://127.0.0.1:1080"')
parser.add_argument('--file', '-f', type=str, dest='file',
help='read gallery IDs from file.')
parser.add_argument('--format', type=str, dest='name_format', default='[%i][%a][%t]',
help='format the saved folder name')
parser.add_argument('--no-filename-padding', action='store_true', dest='no_filename_padding',
default=False, help='no padding in the images filename, such as \'001.jpg\'')
# generate options
parser.add_argument('--html', dest='html_viewer', type=str, nargs='?', const='.',
help='generate an HTML viewer in the specified directory, or scan all subfolders '
'within the entire directory to generate the HTML viewer. By default, current '
'working directory is used.')
parser.add_argument('--no-html', dest='is_nohtml', action='store_true',
help='don\'t generate HTML after downloading')
parser.add_argument('--gen-main', dest='main_viewer', action='store_true',
help='generate a main viewer contain all the doujin in the folder')
parser.add_argument('--cbz', '-C', dest='is_cbz', action='store_true',
help='generate Comic Book CBZ File')
parser.add_argument('--pdf', '-P', dest='is_pdf', action='store_true',
help='generate PDF file')
parser.add_argument('--meta', dest='generate_metadata', action='store_true', default=False,
help='generate a metadata file in doujinshi format')
parser.add_argument('--update-meta', dest='update_metadata', action='store_true', default=False,
help='update the metadata file of a doujinshi, update CBZ metadata if exists')
parser.add_argument('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file')
parser.add_argument('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
parser.add_argument('--regenerate', dest='regenerate', action='store_true', default=False,
help='regenerate the cbz or pdf file if exists')
parser.add_argument('--zip', action='store_true', help='Package into a single zip file')
# nhentai options
parser.add_argument('--cookie', type=str, dest='cookie',
help='set cookie of nhentai to bypass Cloudflare captcha')
parser.add_argument('--useragent', '--user-agent', type=str, dest='useragent',
help='set useragent to bypass Cloudflare captcha')
parser.add_argument('--language', type=str, dest='language',
help='set default language to parse doujinshis')
parser.add_argument('--clean-language', dest='clean_language', action='store_true', default=False,
help='set DEFAULT as language to parse doujinshis')
parser.add_argument('--save-download-history', dest='is_save_download_history', action='store_true',
default=False, help='save downloaded doujinshis, whose will be skipped if you re-download them')
parser.add_argument('--clean-download-history', action='store_true', default=False, dest='clean_download_history',
help='clean download history')
parser.add_argument('--template', dest='viewer_template', type=str, default='',
help='set viewer template')
parser.add_argument('--legacy', dest='legacy', action='store_true', default=False,
help='use legacy searching method')
args = parser.parse_args()
if args.html_viewer:
if not os.path.exists(args.html_viewer):
logger.error(f'Path \'{args.html_viewer}\' not exists')
sys.exit(1)
for root, dirs, files in os.walk(args.html_viewer):
if not dirs:
generate_html(output_dir=args.html_viewer, template=constant.CONFIG['template'])
sys.exit(0)
for dir_name in dirs:
# it will scan the entire subdirectories
doujinshi_dir = os.path.join(root, dir_name)
items = set(map(lambda s: os.path.splitext(s)[1], os.listdir(doujinshi_dir)))
# skip directory without any images
if items & set(EXTENSIONS):
generate_html(output_dir=doujinshi_dir, template=constant.CONFIG['template'])
sys.exit(0)
sys.exit(0)
if args.main_viewer and not args.id and not args.keyword and not args.favorites:
generate_main_html()
sys.exit(0)
if args.clean_download_history:
with DB() as db:
db.clean_all()
logger.info('Download history cleaned.')
sys.exit(0)
# --- set config ---
if args.cookie is not None:
constant.CONFIG['cookie'] = args.cookie.strip()
write_config()
logger.info('Cookie saved.')
if args.useragent is not None:
constant.CONFIG['useragent'] = args.useragent.strip()
write_config()
logger.info('User-Agent saved.')
if args.language is not None:
constant.CONFIG['language'] = args.language
write_config()
logger.info(f'Default language now set to "{args.language}"')
# TODO: search without language
if any([args.cookie, args.useragent, args.language]):
sys.exit(0)
if args.proxy is not None:
proxy_url = urlparse(args.proxy)
if not args.proxy == '' and proxy_url.scheme not in ('http', 'https', 'socks5', 'socks5h',
'socks4', 'socks4a'):
logger.error(f'Invalid protocol "{proxy_url.scheme}" of proxy, ignored')
sys.exit(0)
else:
constant.CONFIG['proxy'] = args.proxy
logger.info(f'Proxy now set to "{args.proxy}"')
write_config()
sys.exit(0)
if args.viewer_template is not None:
if not args.viewer_template:
args.viewer_template = 'default'
if not os.path.exists(os.path.join(os.path.dirname(__file__),
f'viewer/{args.viewer_template}/index.html')):
logger.error(f'Template "{args.viewer_template}" does not exists')
sys.exit(1)
else:
constant.CONFIG['template'] = args.viewer_template
write_config()
# --- end set config ---
if args.favorites:
if not constant.CONFIG['cookie']:
logger.warning('Cookie has not been set, please use `nhentai --cookie \'COOKIE\'` to set it.')
sys.exit(1)
if args.file:
with open(args.file, 'r') as f:
_ = [i.strip() for i in f.readlines()]
args.id = set(int(i) for i in _ if i.isdigit())
if (args.is_download or args.is_show) and not args.id and not args.keyword and not args.favorites and not args.artist:
logger.critical('Doujinshi id(s) are required for downloading')
parser.print_help()
raise SystemExit
sys.exit(1)
if args.id:
args.ids = (args.id, ) if not args.ids else args.ids
if not args.keyword and not args.ids:
if not args.keyword and not args.id and not args.favorites and not args.artist:
parser.print_help()
raise SystemExit
sys.exit(1)
if args.threads <= 0:
args.threads = 1
elif args.threads > 10:
logger.critical('Maximum number of used threads is 10')
raise SystemExit
if args.proxy:
import urlparse
proxy_url = urlparse.urlparse(args.proxy)
if proxy_url.scheme not in ('http', 'https'):
logger.error('Invalid protocol \'{}\' of proxy, ignored'.format(proxy_url.scheme))
else:
constant.PROXY = {proxy_url.scheme: args.proxy}
elif args.threads > 15:
logger.critical('Maximum number of used threads is 15')
sys.exit(1)
return args

View File

@ -1,52 +1,160 @@
#!/usr/bin/env python2.7
# coding: utf-8
import os
import shutil
import sys
import signal
from cmdline import cmd_parser, banner
from parser import doujinshi_parser, search_parser, print_doujinshi
from doujinshi import Doujinshi
from downloader import Downloader
from logger import logger
import platform
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import cmd_parser, banner, write_config
from nhentai.parser import doujinshi_parser, search_parser, legacy_search_parser, print_doujinshi, favorites_parser
from nhentai.doujinshi import Doujinshi
from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.logger import logger
from nhentai.constant import BASE_URL
from nhentai.utils import generate_html, generate_doc, generate_main_html, generate_metadata, \
paging, check_cookie, signal_handler, DB, move_to_folder
def main():
banner()
if sys.version_info < (3, 0, 0):
logger.error('nhentai now only support Python 3.x')
sys.exit(1)
options = cmd_parser()
logger.info(f'Using mirror: {BASE_URL}')
# CONFIG['proxy'] will be changed after cmd_parser()
if constant.CONFIG['proxy']:
if isinstance(constant.CONFIG['proxy'], dict):
constant.CONFIG['proxy'] = constant.CONFIG['proxy'].get('http', '')
logger.warning(f'Update proxy config to: {constant.CONFIG["proxy"]}')
write_config()
logger.info(f'Using proxy: {constant.CONFIG["proxy"]}')
if not constant.CONFIG['template']:
constant.CONFIG['template'] = 'default'
logger.info(f'Using viewer template "{constant.CONFIG["template"]}"')
# check your cookie
check_cookie()
doujinshis = []
doujinshi_ids = []
doujinshi_list = []
if options.keyword:
doujinshis = search_parser(options.keyword, options.page)
page_list = paging(options.page)
if options.retry:
constant.RETRY_TIMES = int(options.retry)
if options.favorites:
if not options.is_download:
logger.warning('You do not specify --download option')
doujinshis = favorites_parser(page=page_list) if options.page else favorites_parser()
elif options.keyword:
if constant.CONFIG['language']:
logger.info(f'Using default language: {constant.CONFIG["language"]}')
options.keyword += f' language:{constant.CONFIG["language"]}'
_search_parser = legacy_search_parser if options.legacy else search_parser
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
is_page_all=options.page_all)
elif options.artist:
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
is_page_all=options.page_all, type_='ARTIST')
elif not doujinshi_ids:
doujinshi_ids = options.id
print_doujinshi(doujinshis)
if options.is_download:
doujinshi_ids = map(lambda d: d['id'], doujinshis)
else:
doujinshi_ids = options.ids
if options.is_download and doujinshis:
doujinshi_ids = [i['id'] for i in doujinshis]
if doujinshi_ids:
for id in doujinshi_ids:
doujinshi_info = doujinshi_parser(id)
doujinshi_list.append(Doujinshi(**doujinshi_info))
else:
raise SystemExit
if options.is_save_download_history:
with DB() as db:
data = set(map(int, db.get_all()))
doujinshi_ids = list(set(map(int, doujinshi_ids)) - set(data))
logger.info(f'New doujinshis account: {len(doujinshi_ids)}')
if options.zip:
options.is_nohtml = True
if not options.is_show:
downloader = (CompressedDownloader if options.zip else Downloader)(path=options.output_dir, threads=options.threads,
timeout=options.timeout, delay=options.delay,
exit_on_fail=options.exit_on_fail,
no_filename_padding=options.no_filename_padding)
for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id)
if doujinshi_info:
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
else:
continue
if options.is_download:
downloader = Downloader(path=options.saved_path,
thread=options.threads, timeout=options.timeout)
for doujinshi in doujinshi_list:
doujinshi.downloader = downloader
if doujinshi.check_if_need_download(options):
doujinshi.download()
else:
map(lambda doujinshi: doujinshi.show(), doujinshi_list)
logger.info(f'Skip download doujinshi because a PDF/CBZ file exists of doujinshi {doujinshi.name}')
logger.log(15, u'🍺 All done.')
def signal_handler(signal, frame):
logger.error('Ctrl-C signal received. Quit.')
raise SystemExit
if options.generate_metadata:
generate_metadata(options.output_dir, doujinshi)
if options.is_save_download_history:
with DB() as db:
db.add_one(doujinshi.id)
if not options.is_nohtml:
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
if options.is_cbz:
generate_doc('cbz', options.output_dir, doujinshi, options.regenerate)
if options.is_pdf:
generate_doc('pdf', options.output_dir, doujinshi, options.regenerate)
if options.move_to_folder:
if options.is_cbz:
move_to_folder(options.output_dir, doujinshi, 'cbz')
if options.is_pdf:
move_to_folder(options.output_dir, doujinshi, 'pdf')
if options.rm_origin_dir:
if options.move_to_folder:
logger.critical('You specified both --move-to-folder and --rm-origin-dir options, '
'you will not get anything :(')
shutil.rmtree(os.path.join(options.output_dir, doujinshi.filename), ignore_errors=True)
if options.main_viewer:
generate_main_html(options.output_dir)
if not platform.system() == 'Windows':
logger.log(16, '🍻 All done.')
else:
logger.log(16, 'All done.')
else:
for doujinshi_id in doujinshi_ids:
doujinshi_info = doujinshi_parser(doujinshi_id)
if doujinshi_info:
doujinshi = Doujinshi(name_format=options.name_format, **doujinshi_info)
else:
continue
doujinshi.show()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
signal.signal(signal.SIGINT, signal_handler)
if __name__ == '__main__':

View File

@ -1,6 +1,73 @@
SCHEMA = 'http://'
URL = '%snhentai.net' % SCHEMA
DETAIL_URL = '%s/g' % URL
SEARCH_URL = '%s/search/' % URL
IMAGE_URL = '%si.nhentai.net/galleries' % SCHEMA
PROXY = {}
# coding: utf-8
import os
import tempfile
from urllib.parse import urlparse
from platform import system
def get_nhentai_home() -> str:
home = os.getenv('HOME', tempfile.gettempdir())
if system() == 'Linux':
xdgdat = os.getenv('XDG_DATA_HOME')
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
return os.path.join(xdgdat, 'nhentai')
if home and os.path.exists(os.path.join(home, '.nhentai')):
return os.path.join(home, '.nhentai')
if xdgdat:
return os.path.join(xdgdat, 'nhentai')
# Use old default path in other systems
return os.path.join(home, '.nhentai')
DEBUG = os.getenv('DEBUG', False)
BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = f'{BASE_URL}/g'
LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
SEARCH_URL = f'{BASE_URL}/api/galleries/search'
ARTIST_URL = f'{BASE_URL}/artist/'
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
LOGIN_URL = f'{BASE_URL}/login/'
CHALLENGE_URL = f'{BASE_URL}/challenge'
FAV_URL = f'{BASE_URL}/favorites/'
PATH_SEPARATOR = os.path.sep
RETRY_TIMES = 3
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i1.{urlparse(BASE_URL).hostname}/galleries'
IMAGE_URL_MIRRORS = [
f'{urlparse(BASE_URL).scheme}://i2.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i4.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i6.{urlparse(BASE_URL).hostname}',
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}',
]
NHENTAI_HOME = get_nhentai_home()
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
__api_suspended_DETAIL_URL = f'{BASE_URL}/api/gallery'
CONFIG = {
'proxy': '',
'cookie': '',
'language': '',
'template': '',
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'max_filename': 85
}
LANGUAGE_ISO = {
'english': 'en',
'chinese': 'zh',
'japanese': 'ja',
'translated': 'translated'
}

View File

@ -1,8 +1,19 @@
# coding: utf-8
from __future__ import print_function
import os
from tabulate import tabulate
from constant import DETAIL_URL, IMAGE_URL
from logger import logger
from nhentai.constant import DETAIL_URL, IMAGE_URL
from nhentai.logger import logger
from nhentai.utils import format_filename
EXT_MAP = {
'j': 'jpg',
'p': 'png',
'g': 'gif',
'w': 'webp',
}
class DoujinshiInfo(dict):
@ -11,47 +22,105 @@ class DoujinshiInfo(dict):
def __getattr__(self, item):
try:
return dict.__getitem__(self, item)
ret = dict.__getitem__(self, item)
return ret if ret else 'Unknown'
except KeyError:
return ''
return 'Unknown'
class Doujinshi(object):
def __init__(self, name=None, id=None, img_id=None, ext='jpg', pages=0, **kwargs):
def __init__(self, name=None, pretty_name=None, id=None, favorite_counts=0, img_id=None,
ext='', pages=0, name_format='[%i][%a][%t]', **kwargs):
self.name = name
self.pretty_name = pretty_name
self.id = id
self.favorite_counts = favorite_counts
self.img_id = img_id
self.ext = ext
self.pages = pages
self.downloader = None
self.url = '%s/%d' % (DETAIL_URL, self.id)
self.url = f'{DETAIL_URL}/{self.id}'
self.info = DoujinshiInfo(**kwargs)
ag_value = self.info.groups if self.info.artists == 'Unknown' else self.info.artists
name_format = name_format.replace('%ag', format_filename(ag_value))
name_format = name_format.replace('%i', format_filename(str(self.id)))
name_format = name_format.replace('%f', format_filename(str(self.favorite_counts)))
name_format = name_format.replace('%a', format_filename(self.info.artists))
name_format = name_format.replace('%g', format_filename(self.info.groups))
name_format = name_format.replace('%t', format_filename(self.name))
name_format = name_format.replace('%p', format_filename(self.pretty_name))
name_format = name_format.replace('%s', format_filename(self.info.subtitle))
self.filename = format_filename(name_format, 255, True)
self.table = [
['Parodies', self.info.parodies],
['Title', self.name],
['Subtitle', self.info.subtitle],
['Date', self.info.date],
['Characters', self.info.characters],
['Authors', self.info.artists],
['Groups', self.info.groups],
['Languages', self.info.languages],
['Tags', self.info.tags],
['Favorite Counts', self.favorite_counts],
['URL', self.url],
['Pages', self.pages],
]
def __repr__(self):
return '<Doujinshi: {}>'.format(self.name)
return f'<Doujinshi: {self.name}>'
def show(self):
table = [
["Doujinshi", self.name],
["Subtitle", self.info.subtitle],
["Characters", self.info.characters],
["Authors", self.info.artists],
["Language", self.info.language],
["Tags", self.info.tags],
["URL", self.url],
["Pages", self.pages],
]
logger.info(u'Print doujinshi information\n{}'.format(tabulate(table)))
logger.info(f'Print doujinshi information of {self.id}\n{tabulate(self.table)}')
def check_if_need_download(self, options):
if options.no_download:
return False
base_path = os.path.join(self.downloader.path, self.filename)
# regenerate, re-download
if options.regenerate:
return True
# pdf or cbz file exists, skip re-download
# doujinshi directory may not exist b/c of --rm-origin-dir option set.
# user should pass --regenerate option to get back origin dir.
ret_pdf = ret_cbz = None
if options.is_pdf:
ret_pdf = os.path.exists(f'{base_path}.pdf') or os.path.exists(f'{base_path}/{self.filename}.pdf')
if options.is_cbz:
ret_cbz = os.path.exists(f'{base_path}.cbz') or os.path.exists(f'{base_path}/{self.filename}.cbz')
ret = list(filter(lambda s: s is not None, [ret_cbz, ret_pdf]))
if ret and all(ret):
return False
# doujinshi directory doesn't exist, re-download
if not (os.path.exists(base_path) and os.path.isdir(base_path)):
return True
# fallback
return True
def download(self):
logger.info('Start download doujinshi: %s' % self.name)
logger.info(f'Starting to download doujinshi: {self.name}')
if self.downloader:
download_queue = []
for i in xrange(1, self.pages + 1):
download_queue.append('%s/%d/%d.%s' % (IMAGE_URL, int(self.img_id), i, self.ext))
self.downloader.download(download_queue, self.id)
if len(self.ext) != self.pages:
logger.warning('Page count and ext count do not equal')
for i in range(1, min(self.pages, len(self.ext)) + 1):
download_queue.append(f'{IMAGE_URL}/{self.img_id}/{i}.{self.ext[i-1]}')
return self.downloader.start_download(download_queue, self.filename)
else:
logger.critical('Downloader has not be loaded')
logger.critical('Downloader has not been loaded')
return False
if __name__ == '__main__':
@ -61,4 +130,4 @@ if __name__ == '__main__':
try:
test.download()
except Exception as e:
print('Exception: %s' % str(e))
print(f'Exception: {e}')

View File

@ -1,78 +1,195 @@
# coding: utf-8
# coding: utf-
import os
import requests
import threadpool
from urlparse import urlparse
from logger import logger
from parser import request
import asyncio
import httpx
import urllib3.exceptions
import zipfile
import io
from urllib.parse import urlparse
from nhentai import constant
from nhentai.logger import logger
from nhentai.utils import Singleton, async_request
class Downloader(object):
_instance = None
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
def __new__(cls, *args, **kwargs):
if not cls._instance:
cls._instance = super(Downloader, cls).__new__(cls, *args, **kwargs)
return cls._instance
def download_callback(result):
result, data = result
if result == 0:
logger.warning('fatal errors occurred, ignored')
elif result == -1:
logger.warning(f'url {data} return status code 404')
elif result == -2:
logger.warning('Ctrl-C pressed, exiting sub processes ...')
elif result == -3:
# workers won't be run, just pass
pass
else:
logger.log(16, f'{data} downloaded successfully')
def __init__(self, path='', thread=1, timeout=30):
if not isinstance(thread, (int, )) or thread < 1 or thread > 10:
raise ValueError('Invalid threads count')
class Downloader(Singleton):
def __init__(self, path='', threads=5, timeout=30, delay=0, exit_on_fail=False,
no_filename_padding=False):
self.threads = threads
self.path = str(path)
self.thread_count = thread
self.threads = []
self.timeout = timeout
self.delay = delay
self.exit_on_fail = exit_on_fail
self.folder = None
self.semaphore = None
self.no_filename_padding = no_filename_padding
def _download(self, url, folder='', filename='', retried=False):
logger.info('Start downloading: {} ...'.format(url))
filename = filename if filename else os.path.basename(urlparse(url).path)
async def fiber(self, tasks):
self.semaphore = asyncio.Semaphore(self.threads)
for completed_task in asyncio.as_completed(tasks):
try:
with open(os.path.join(folder, filename), "wb") as f:
response = request('get', url, stream=True, timeout=self.timeout)
result = await completed_task
if result[0] > 0:
logger.info(f'{result[1]} download completed')
else:
raise Exception(f'{result[1]} download failed, return value {result[0]}')
except Exception as e:
logger.error(f'An error occurred: {e}')
if self.exit_on_fail:
raise Exception('User intends to exit on fail')
async def _semaphore_download(self, *args, **kwargs):
async with self.semaphore:
return await self.download(*args, **kwargs)
async def download(self, url, folder='', filename='', retried=0, proxy=None, length=0):
logger.info(f'Starting to download {url} ...')
if self.delay:
await asyncio.sleep(self.delay)
filename = filename if filename else os.path.basename(urlparse(url).path)
base_filename, extension = os.path.splitext(filename)
if not self.no_filename_padding:
filename = base_filename.zfill(length) + extension
else:
filename = base_filename + extension
try:
response = await async_request('GET', url, timeout=self.timeout, proxy=proxy)
if response.status_code != 200:
path = urlparse(url).path
for mirror in constant.IMAGE_URL_MIRRORS:
logger.info(f"Try mirror: {mirror}{path}")
mirror_url = f'{mirror}{path}'
response = await async_request('GET', mirror_url, timeout=self.timeout, proxies=proxy)
if response.status_code == 200:
break
if not await self.save(filename, response):
logger.error(f'Can not download image {url}')
return -1, url
except (httpx.HTTPStatusError, httpx.TimeoutException, httpx.ConnectError) as e:
if retried < constant.RETRY_TIMES:
logger.warning(f'Download {filename} failed, retrying({retried + 1}) times...')
return await self.download(
url=url,
folder=folder,
filename=filename,
retried=retried + 1,
proxy=proxy,
)
else:
logger.warning(f'Download {filename} failed with {constant.RETRY_TIMES} times retried, skipped')
return -2, url
except Exception as e:
import traceback
logger.error(f"Exception type: {type(e)}")
traceback.print_stack()
logger.critical(str(e))
return -9, url
except KeyboardInterrupt:
return -4, url
return 1, url
async def save(self, filename, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
save_file_path = os.path.join(self.folder, filename)
with open(save_file_path, 'wb') as f:
if response is not None:
length = response.headers.get('content-length')
if length is None:
f.write(response.content)
else:
for chunk in response.iter_content(2048):
async for chunk in response.aiter_bytes(2048):
f.write(chunk)
except requests.HTTPError as e:
if not retried:
logger.error('Error: {}, retrying'.format(str(e)))
return self._download(url=url, folder=folder, filename=filename, retried=True)
else:
return None
except Exception as e:
return True
def create_storage_object(self, folder:str):
if not os.path.exists(folder):
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical(str(e))
return None
return url
self.folder:str = folder
self.close = lambda: None # Only available in class CompressedDownloader
def _download_callback(self, request, result):
if not result:
logger.critical('Too many errors occurred, quit.')
raise SystemExit
logger.log(15, '{} download successfully'.format(result))
def download(self, queue, folder=''):
if not isinstance(folder, (str, unicode)):
def start_download(self, queue, folder='') -> bool:
if not isinstance(folder, (str,)):
folder = str(folder)
if self.path:
folder = os.path.join(self.path, folder)
if not os.path.exists(folder):
logger.warn('Path \'{}\' not exist.'.format(folder))
try:
os.makedirs(folder)
except EnvironmentError as e:
logger.critical('Error: {}'.format(str(e)))
raise SystemExit
logger.info(f'Doujinshi will be saved at "{folder}"')
self.create_storage_object(folder)
if os.getenv('DEBUG', None) == 'NODOWNLOAD':
# Assuming we want to continue with rest of process.
return True
digit_length = len(str(len(queue)))
logger.info(f'Total download pages: {len(queue)}')
coroutines = [
self._semaphore_download(url, filename=os.path.basename(urlparse(url).path), length=digit_length)
for url in queue
]
# Prevent coroutines infection
asyncio.run(self.fiber(coroutines))
self.close()
return True
class CompressedDownloader(Downloader):
def create_storage_object(self, folder):
filename = f'{folder}.zip'
print(filename)
self.zipfile = zipfile.ZipFile(filename,'w')
self.close = lambda: self.zipfile.close()
async def save(self, filename, response) -> bool:
if response is None:
logger.error('Error: Response is None')
return False
image_data = io.BytesIO()
length = response.headers.get('content-length')
if length is None:
content = await response.read()
image_data.write(content)
else:
logger.warn('Path \'{}\' already exist.'.format(folder))
async for chunk in response.aiter_bytes(2048):
image_data.write(chunk)
queue = [([url], {'folder': folder}) for url in queue]
self.thread_pool = threadpool.ThreadPool(self.thread_count)
requests_ = threadpool.makeRequests(self._download, queue, self._download_callback)
[self.thread_pool.putRequest(req) for req in requests_]
self.thread_pool.wait()
image_data.seek(0)
self.zipfile.writestr(filename, image_data.read())
return True

View File

@ -1,13 +1,23 @@
import logging
#
# Copyright (C) 2010-2012 Vinay Sajip. All rights reserved. Licensed under the new BSD license.
#
import logging
import os
import re
import platform
import sys
if platform.system() == 'Windows':
import ctypes
import ctypes.wintypes
# Reference: https://gist.github.com/vsajip/758430
# https://github.com/ipython/ipython/issues/4252
# https://msdn.microsoft.com/en-us/library/windows/desktop/ms686047%28v=vs.85%29.aspx
ctypes.windll.kernel32.SetConsoleTextAttribute.argtypes = [ctypes.wintypes.HANDLE, ctypes.wintypes.WORD]
ctypes.windll.kernel32.SetConsoleTextAttribute.restype = ctypes.wintypes.BOOL
class ColorizingStreamHandler(logging.StreamHandler):
# color names to indices
color_map = {
@ -22,18 +32,9 @@ class ColorizingStreamHandler(logging.StreamHandler):
}
# levels to (background, foreground, bold/intense)
if os.name == 'nt':
level_map = {
logging.DEBUG: (None, 'white', False),
logging.INFO: (None, 'green', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
}
else:
level_map = {
logging.DEBUG: (None, 'white', False),
logging.INFO: (None, 'green', False),
logging.DEBUG: (None, 'blue', False),
logging.INFO: (None, 'white', False),
logging.WARNING: (None, 'yellow', False),
logging.ERROR: (None, 'red', False),
logging.CRITICAL: ('red', 'white', False)
@ -47,7 +48,29 @@ class ColorizingStreamHandler(logging.StreamHandler):
isatty = getattr(self.stream, 'isatty', None)
return isatty and isatty() and not self.disable_coloring
if os.name != 'nt':
def emit(self, record):
try:
message = self.format(record)
stream = self.stream
if not self.is_tty:
if message and message[0] == "\r":
message = message[1:]
stream.write(message)
else:
self.output_colorized(message)
stream.write(getattr(self, 'terminator', '\n'))
self.flush()
except (KeyboardInterrupt, SystemExit):
raise
except IOError:
pass
except:
self.handleError(record)
if not platform.system() == 'Windows':
def output_colorized(self, message):
self.stream.write(message)
else:
@ -65,8 +88,6 @@ class ColorizingStreamHandler(logging.StreamHandler):
}
def output_colorized(self, message):
import ctypes
parts = self.ansi_esc.split(message)
write = self.stream.write
h = None
@ -82,6 +103,9 @@ class ColorizingStreamHandler(logging.StreamHandler):
text = parts.pop(0)
if text:
if sys.version_info < (3, 0, 0):
write(text.encode('utf-8'))
else:
write(text)
if parts:
@ -135,20 +159,21 @@ class ColorizingStreamHandler(logging.StreamHandler):
message = logging.StreamHandler.format(self, record)
return self.colorize(message, record)
logging.addLevelName(15, "INFO")
logging.addLevelName(16, "SUCCESS")
logger = logging.getLogger('nhentai')
LOGGER_HANDLER = ColorizingStreamHandler(sys.stdout)
FORMATTER = logging.Formatter("\r[%(asctime)s] [%(levelname)s] %(message)s", "%H:%M:%S")
FORMATTER = logging.Formatter("\r[%(asctime)s] %(funcName)s: %(message)s", "%H:%M:%S")
LOGGER_HANDLER.setFormatter(FORMATTER)
LOGGER_HANDLER.level_map[logging.getLevelName("INFO")] = (None, "cyan", False)
LOGGER_HANDLER.level_map[logging.getLevelName("SUCCESS")] = (None, "green", False)
logger.addHandler(LOGGER_HANDLER)
logger.setLevel(logging.DEBUG)
if __name__ == '__main__':
logger.log(15, 'nhentai')
logger.log(16, 'nhentai')
logger.info('info')
logger.warn('warn')
logger.warning('warning')
logger.debug('debug')
logger.error('error')
logger.critical('critical')

View File

@ -1,103 +1,318 @@
# coding: utf-8
from __future__ import print_function
import sys
import os
import re
import requests
import time
from bs4 import BeautifulSoup
import constant
from logger import logger
from tabulate import tabulate
def request(method, url, **kwargs):
if not hasattr(requests, method):
raise AttributeError('\'requests\' object has no attribute \'{}\''.format(method))
return requests.__dict__[method](url, proxies=constant.PROXY, **kwargs)
import nhentai.constant as constant
from nhentai.utils import request
from nhentai.logger import logger
def doujinshi_parser(id_):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception('Doujinshi id({}) is not valid'.format(id_))
id_ = int(id_)
logger.log(15, 'Fetching doujinshi information of id {}'.format(id_))
doujinshi = dict()
doujinshi['id'] = id_
url = '{}/{}/'.format(constant.DETAIL_URL, id_)
try:
response = request('get', url).content
except Exception as e:
logger.critical(str(e))
sys.exit()
html = BeautifulSoup(response)
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
subtitle = doujinshi_info.find('h2')
doujinshi['name'] = title
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
img_id = re.search('/galleries/([\d]+)/cover\.(jpg|png)$', doujinshi_cover.a.img['src'])
if not img_id:
logger.critical('Tried yo get image id failed')
sys.exit()
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = img_id.group(2)
pages = 0
for _ in doujinshi_info.find_all('div', class_=''):
pages = re.search('([\d]+) pages', _.text)
if pages:
pages = pages.group(1)
break
doujinshi['pages'] = int(pages)
# gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
needed_fields = ['Characters', 'Artists', 'Language', 'Tags']
for field in information_fields:
field_name = field.contents[0].strip().strip(':')
if field_name in needed_fields:
data = [sub_field.contents[0].strip() for sub_field in
field.find_all('a', attrs={'class': 'tag'})]
doujinshi[field_name.lower()] = ', '.join(data)
return doujinshi
def _get_csrf_token(content):
html = BeautifulSoup(content, 'html.parser')
csrf_token_elem = html.find('input', attrs={'name': 'csrfmiddlewaretoken'})
if not csrf_token_elem:
raise Exception('Cannot find csrf token to login')
return csrf_token_elem.attrs['value']
def search_parser(keyword, page):
logger.debug('Searching doujinshis of keyword {}'.format(keyword))
def login(username, password):
logger.warning('This feature is deprecated, please use --cookie to set your cookie.')
csrf_token = _get_csrf_token(request('get', url=constant.LOGIN_URL).text)
if os.getenv('DEBUG'):
logger.info('Getting CSRF token ...')
if os.getenv('DEBUG'):
logger.info(f'CSRF token is {csrf_token}')
login_dict = {
'csrfmiddlewaretoken': csrf_token,
'username_or_email': username,
'password': password,
}
resp = request('post', url=constant.LOGIN_URL, data=login_dict)
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
csrf_token = _get_csrf_token(resp.text)
resp = request('post', url=resp.url, data={'csrfmiddlewaretoken': csrf_token, 'next': '/'})
if 'Invalid username/email or password' in resp.text:
logger.error('Login failed, please check your username and password')
sys.exit(1)
if 'You\'re loading pages way too quickly.' in resp.text or 'Really, slow down' in resp.text:
logger.error('Using nhentai --cookie \'YOUR_COOKIE_HERE\' to save your Cookie.')
sys.exit(2)
def _get_title_and_id(response):
result = []
try:
response = request('get', url=constant.SEARCH_URL, params={'q': keyword, 'page': page}).content
except requests.ConnectionError as e:
logger.critical(e)
logger.warn('If you are in China, please configure the proxy to fu*k GFW.')
raise SystemExit
html = BeautifulSoup(response)
html = BeautifulSoup(response, 'html.parser')
doujinshi_search_result = html.find_all('div', attrs={'class': 'gallery'})
for doujinshi in doujinshi_search_result:
doujinshi_container = doujinshi.find('div', attrs={'class': 'caption'})
title = doujinshi_container.text.strip()
title = (title[:85] + '..') if len(title) > 85 else title
id_ = re.search('/g/(\d+)/', doujinshi.a['href']).group(1)
title = title if len(title) < 85 else title[:82] + '...'
id_ = re.search('/g/([0-9]+)/', doujinshi.a['href']).group(1)
result.append({'id': id_, 'title': title})
return result
def favorites_parser(page=None):
result = []
html = BeautifulSoup(request('get', constant.FAV_URL).content, 'html.parser')
count = html.find('span', attrs={'class': 'count'})
if not count:
logger.error("Can't get your number of favorite doujinshis. Did the login failed?")
return []
count = int(count.text.strip('(').strip(')').replace(',', ''))
if count == 0:
logger.warning('No favorites found')
return []
pages = int(count / 25)
if page:
page_range_list = page
else:
if pages:
pages += 1 if count % (25 * pages) else 0
else:
pages = 1
logger.info(f'You have {count} favorites in {pages} pages.')
if os.getenv('DEBUG'):
pages = 1
page_range_list = range(1, pages + 1)
for page in page_range_list:
logger.info(f'Getting doujinshi ids of page {page}')
i = 0
while i <= constant.RETRY_TIMES + 1:
i += 1
if i > 3:
logger.error(f'Failed to get favorites at page {page} after 3 times retried, skipped')
break
try:
resp = request('get', f'{constant.FAV_URL}?page={page}').content
temp_result = _get_title_and_id(resp)
if not temp_result:
logger.warning(f'Failed to get favorites at page {page}, retrying ({i} times) ...')
continue
else:
result.extend(temp_result)
break
except Exception as e:
logger.warning(f'Error: {e}, retrying ({i} times) ...')
return result
def doujinshi_parser(id_, counter=0):
if not isinstance(id_, (int,)) and (isinstance(id_, (str,)) and not id_.isdigit()):
raise Exception(f'Doujinshi id({id_}) is not valid')
id_ = int(id_)
logger.info(f'Fetching doujinshi information of id {id_}')
doujinshi = dict()
doujinshi['id'] = id_
url = f'{constant.DETAIL_URL}/{id_}/'
try:
response = request('get', url)
if response.status_code in (200, ):
response = response.content
elif response.status_code in (404,):
logger.error(f'Doujinshi with id {id_} cannot be found')
return []
else:
counter += 1
if counter == 10:
logger.critical(f'Failed to fetch doujinshi information of id {id_}')
return None
logger.debug(f'Slow down and retry ({id_}) ...')
time.sleep(1)
return doujinshi_parser(str(id_), counter)
except Exception as e:
logger.warning(f'Error: {e}, ignored')
return None
html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'})
title = doujinshi_info.find('h1').text
pretty_name = doujinshi_info.find('h1').find('span', attrs={'class': 'pretty'}).text
subtitle = doujinshi_info.find('h2')
favorite_counts = doujinshi_info.find('span', class_='nobold').text.strip('(').strip(')')
doujinshi['name'] = title
doujinshi['pretty_name'] = pretty_name
doujinshi['subtitle'] = subtitle.text if subtitle else ''
doujinshi['favorite_counts'] = int(favorite_counts) if favorite_counts and favorite_counts.isdigit() else 0
doujinshi_cover = html.find('div', attrs={'id': 'cover'})
# img_id = re.search('/galleries/([0-9]+)/cover.(jpg|png|gif|webp)$',
# doujinshi_cover.a.img.attrs['data-src'])
# fix cover.webp.webp
img_id = re.search(r'/galleries/(\d+)/cover(\.webp|\.jpg|\.png)?\.\w+$', doujinshi_cover.a.img.attrs['data-src'])
ext = []
for i in html.find_all('div', attrs={'class': 'thumb-container'}):
base_name = os.path.basename(i.img.attrs['data-src'])
ext_name = base_name.split('.')
if len(ext_name) == 3:
ext.append(ext_name[1])
else:
ext.append(ext_name[-1])
if not img_id:
logger.critical(f'Tried yo get image id failed of id: {id_}')
return None
doujinshi['img_id'] = img_id.group(1)
doujinshi['ext'] = ext
pages = 0
for _ in doujinshi_info.find_all('div', class_='tag-container field-name'):
if re.search('Pages:', _.text):
pages = _.find('span', class_='name').string
doujinshi['pages'] = int(pages)
# gain information of the doujinshi
information_fields = doujinshi_info.find_all('div', attrs={'class': 'field-name'})
needed_fields = ['Characters', 'Artists', 'Languages', 'Tags', 'Parodies', 'Groups', 'Categories']
for field in information_fields:
field_name = field.contents[0].strip().strip(':')
if field_name in needed_fields:
data = [sub_field.find('span', attrs={'class': 'name'}).contents[0].strip() for sub_field in
field.find_all('a', attrs={'class': 'tag'})]
doujinshi[field_name.lower()] = ', '.join(data)
time_field = doujinshi_info.find('time')
if time_field.has_attr('datetime'):
doujinshi['date'] = time_field['datetime']
return doujinshi
def print_doujinshi(doujinshi_list):
if not doujinshi_list:
return
doujinshi_list = [i.values() for i in doujinshi_list]
doujinshi_list = [(i['id'], i['title']) for i in doujinshi_list]
headers = ['id', 'doujinshi']
logger.info('Search Result\n' +
tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
logger.info(f'Search Result || Found {doujinshi_list.__len__()} doujinshis')
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'):
logger.info(f'Searching doujinshis of keyword {keyword}')
result = []
if type_ not in ('SEARCH', 'ARTIST', ):
raise ValueError('Invalid type')
if is_page_all:
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': 1, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': 1}).content
html = BeautifulSoup(response, 'lxml')
pagination = html.find(attrs={'class': 'pagination'})
last_page = pagination.find(attrs={'class': 'last'})
last_page = re.findall('page=([0-9]+)', last_page.attrs['href'])[0]
logger.info(f'Getting doujinshi ids of {last_page} pages')
pages = range(1, int(last_page))
else:
pages = page
for p in pages:
logger.info(f'Fetching page {p} ...')
if type_ == 'SEARCH':
response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': p, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': p}).content
if response is None:
logger.warning(f'No result in response in page {p}')
continue
result.extend(_get_title_and_id(response))
if not result:
logger.warning(f'No results for keywords {keyword}')
return result
def search_parser(keyword, sorting, page, is_page_all=False):
result = []
response = None
if not page:
page = [1]
if is_page_all:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword}).url
init_response = request('get', url.replace('%2B', '+')).json()
page = range(1, init_response['num_pages']+1)
total = f'/{page[-1]}' if is_page_all else ''
not_exists_persist = False
for p in page:
i = 0
logger.info(f'Searching doujinshis using keywords "{keyword}" on page {p}{total}')
while i < constant.RETRY_TIMES:
try:
url = request('get', url=constant.SEARCH_URL, params={'query': keyword,
'page': p, 'sort': sorting}).url
if constant.DEBUG:
logger.debug(f'Request URL: {url}')
response = request('get', url.replace('%2B', '+')).json()
except Exception as e:
logger.critical(str(e))
response = None
break
if constant.DEBUG:
logger.debug(f'Response: {response}')
if response is None or 'result' not in response:
logger.warning(f'No result in response in page {p}')
if not_exists_persist is True:
break
continue
for row in response['result']:
title = row['title']['english']
title = title[:constant.CONFIG['max_filename']] + '..' if \
len(title) > constant.CONFIG['max_filename'] else title
result.append({'id': row['id'], 'title': title})
not_exists_persist = False
if not result:
logger.warning(f'No results for keywords {keyword}')
return result
if __name__ == '__main__':
print(doujinshi_parser("32271"))

155
nhentai/serializer.py Normal file
View File

@ -0,0 +1,155 @@
# coding: utf-8
import json
import os
from nhentai.constant import PATH_SEPARATOR, LANGUAGE_ISO
from xml.sax.saxutils import escape
from requests.structures import CaseInsensitiveDict
def serialize_json(doujinshi, output_dir: str):
metadata = {'title': doujinshi.name,
'subtitle': doujinshi.info.subtitle}
if doujinshi.info.favorite_counts:
metadata['favorite_counts'] = doujinshi.favorite_counts
if doujinshi.info.date:
metadata['upload_date'] = doujinshi.info.date
if doujinshi.info.parodies:
metadata['parody'] = [i.strip() for i in doujinshi.info.parodies.split(',')]
if doujinshi.info.characters:
metadata['character'] = [i.strip() for i in doujinshi.info.characters.split(',')]
if doujinshi.info.tags:
metadata['tag'] = [i.strip() for i in doujinshi.info.tags.split(',')]
if doujinshi.info.artists:
metadata['artist'] = [i.strip() for i in doujinshi.info.artists.split(',')]
if doujinshi.info.groups:
metadata['group'] = [i.strip() for i in doujinshi.info.groups.split(',')]
if doujinshi.info.languages:
metadata['language'] = [i.strip() for i in doujinshi.info.languages.split(',')]
metadata['category'] = [i.strip() for i in doujinshi.info.categories.split(',')]
metadata['URL'] = doujinshi.url
metadata['Pages'] = doujinshi.pages
with open(os.path.join(output_dir, 'metadata.json'), 'w') as f:
json.dump(metadata, f, separators=(',', ':'))
def serialize_comic_xml(doujinshi, output_dir):
from iso8601 import parse_date
with open(os.path.join(output_dir, 'ComicInfo.xml'), 'w', encoding="utf-8") as f:
f.write('<?xml version="1.0" encoding="utf-8"?>\n')
f.write('<ComicInfo xmlns:xsd="http://www.w3.org/2001/XMLSchema" '
'xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">\n')
xml_write_simple_tag(f, 'Manga', 'Yes')
xml_write_simple_tag(f, 'Title', doujinshi.name)
xml_write_simple_tag(f, 'Summary', doujinshi.info.subtitle)
xml_write_simple_tag(f, 'PageCount', doujinshi.pages)
xml_write_simple_tag(f, 'URL', doujinshi.url)
xml_write_simple_tag(f, 'NhentaiId', doujinshi.id)
xml_write_simple_tag(f, 'Favorites', doujinshi.favorite_counts)
xml_write_simple_tag(f, 'Genre', doujinshi.info.categories)
xml_write_simple_tag(f, 'BlackAndWhite', 'No' if doujinshi.info.tags and
'full color' in doujinshi.info.tags else 'Yes')
if doujinshi.info.date:
dt = parse_date(doujinshi.info.date)
xml_write_simple_tag(f, 'Year', dt.year)
xml_write_simple_tag(f, 'Month', dt.month)
xml_write_simple_tag(f, 'Day', dt.day)
if doujinshi.info.parodies:
xml_write_simple_tag(f, 'Series', doujinshi.info.parodies)
if doujinshi.info.characters:
xml_write_simple_tag(f, 'Characters', doujinshi.info.characters)
if doujinshi.info.tags:
xml_write_simple_tag(f, 'Tags', doujinshi.info.tags)
if doujinshi.info.artists:
xml_write_simple_tag(f, 'Writer', ' & '.join([i.strip() for i in
doujinshi.info.artists.split(',')]))
if doujinshi.info.languages:
languages = [i.strip() for i in doujinshi.info.languages.split(',')]
xml_write_simple_tag(f, 'Translated', 'Yes' if 'translated' in languages else 'No')
[xml_write_simple_tag(f, 'LanguageISO', LANGUAGE_ISO[i]) for i in languages
if (i != 'translated' and i in LANGUAGE_ISO)]
f.write('</ComicInfo>')
def serialize_info_txt(doujinshi, output_dir: str):
info_txt_path = os.path.join(output_dir, 'info.txt')
f = open(info_txt_path, 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'FAVORITE COUNTS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL']
temp_dict = CaseInsensitiveDict(dict(doujinshi.table))
for i in fields:
v = temp_dict.get(i)
v = temp_dict.get(f'{i}s') if v is None else v
v = doujinshi.info.get(i.lower(), None) if v is None else v
v = doujinshi.info.get(f'{i.lower()}s', "Unknown") if v is None else v
f.write(f'{i}: {v}\n')
f.close()
def xml_write_simple_tag(f, name, val, indent=1):
f.write(f'{" "*indent}<{name}>{escape(str(val))}</{name}>\n')
def merge_json():
lst = []
output_dir = f".{PATH_SEPARATOR}"
os.chdir(output_dir)
doujinshi_dirs = next(os.walk('.'))[1]
for folder in doujinshi_dirs:
files = os.listdir(folder)
if 'metadata.json' not in files:
continue
data_folder = output_dir + folder + '/' + 'metadata.json'
json_file = open(data_folder, 'r')
json_dict = json.load(json_file)
json_dict['Folder'] = folder
lst.append(json_dict)
return lst
def serialize_unique(lst):
dictionary = {}
parody = []
character = []
tag = []
artist = []
group = []
for dic in lst:
if 'parody' in dic:
parody.extend([i for i in dic['parody']])
if 'character' in dic:
character.extend([i for i in dic['character']])
if 'tag' in dic:
tag.extend([i for i in dic['tag']])
if 'artist' in dic:
artist.extend([i for i in dic['artist']])
if 'group' in dic:
group.extend([i for i in dic['group']])
dictionary['parody'] = list(set(parody))
dictionary['character'] = list(set(character))
dictionary['tag'] = list(set(tag))
dictionary['artist'] = list(set(artist))
dictionary['group'] = list(set(group))
return dictionary
def set_js_database():
with open('data.js', 'w') as f:
indexed_json = merge_json()
unique_json = json.dumps(serialize_unique(indexed_json), separators=(',', ':'))
indexed_json = json.dumps(indexed_json, separators=(',', ':'))
f.write('var data = ' + indexed_json)
f.write(';\nvar tags = ' + unique_json)

385
nhentai/utils.py Normal file
View File

@ -0,0 +1,385 @@
# coding: utf-8
import json
import sys
import re
import os
import zipfile
import shutil
import httpx
import requests
import sqlite3
import urllib.parse
from typing import Tuple
from nhentai import constant
from nhentai.constant import PATH_SEPARATOR
from nhentai.logger import logger
from nhentai.serializer import serialize_comic_xml, serialize_json, serialize_info_txt, set_js_database
MAX_FIELD_LENGTH = 100
EXTENSIONS = ('.png', '.jpg', '.jpeg', '.gif', '.webp')
def get_headers():
headers = {
'Referer': constant.LOGIN_URL
}
user_agent = constant.CONFIG.get('useragent')
if user_agent and user_agent.strip():
headers['User-Agent'] = user_agent
cookie = constant.CONFIG.get('cookie')
if cookie and cookie.strip():
headers['Cookie'] = cookie
return headers
def request(method, url, **kwargs):
session = requests.Session()
session.headers.update(get_headers())
if not kwargs.get('proxies', None):
kwargs['proxies'] = {
'https': constant.CONFIG['proxy'],
'http': constant.CONFIG['proxy'],
}
return getattr(session, method)(url, verify=False, **kwargs)
async def async_request(method, url, proxy = None, **kwargs):
headers=get_headers()
if proxy is None:
proxy = constant.CONFIG['proxy']
if isinstance(proxy, (str, )) and not proxy:
proxy = None
async with httpx.AsyncClient(headers=headers, verify=False, proxy=proxy, **kwargs) as client:
response = await client.request(method, url, **kwargs)
return response
def check_cookie():
response = request('get', constant.BASE_URL)
if response.status_code == 403 and 'Just a moment...' in response.text:
logger.error('Blocked by Cloudflare captcha, please set your cookie and useragent')
sys.exit(1)
username = re.findall('"/users/[0-9]+/(.*?)"', response.text)
if not username:
logger.warning(
'Cannot get your username, please check your cookie or use `nhentai --cookie` to set your cookie')
else:
logger.log(16, f'Login successfully! Your username: {username[0]}')
class _Singleton(type):
""" A metaclass that creates a Singleton base class when called. """
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
cls._instances[cls] = super(_Singleton, cls).__call__(*args, **kwargs)
return cls._instances[cls]
class Singleton(_Singleton(str('SingletonMeta'), (object,), {})):
pass
def readfile(path):
loc = os.path.dirname(__file__)
with open(os.path.join(loc, path), 'r') as file:
return file.read()
def parse_doujinshi_obj(
output_dir: str,
doujinshi_obj=None,
file_type: str = ''
) -> Tuple[str, str]:
filename = f'.{PATH_SEPARATOR}doujinshi.{file_type}'
if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
_filename = f'{doujinshi_obj.filename}.{file_type}'
if file_type == 'pdf':
_filename = _filename.replace('/', '-')
filename = os.path.join(output_dir, _filename)
else:
if file_type == 'html':
return output_dir, 'index.html'
doujinshi_dir = f'.{PATH_SEPARATOR}'
if not os.path.exists(doujinshi_dir):
os.makedirs(doujinshi_dir)
return doujinshi_dir, filename
def generate_html(output_dir='.', doujinshi_obj=None, template='default'):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, 'html')
image_html = ''
if not os.path.exists(doujinshi_dir):
logger.warning(f'Path "{doujinshi_dir}" does not exist, creating.')
try:
os.makedirs(doujinshi_dir)
except EnvironmentError as e:
logger.critical(e)
file_list = os.listdir(doujinshi_dir)
file_list.sort()
for image in file_list:
if not os.path.splitext(image)[1] in EXTENSIONS:
continue
image_html += f'<img src="{image}" class="image-item"/>\n'
html = readfile(f'viewer/{template}/index.html')
css = readfile(f'viewer/{template}/styles.css')
js = readfile(f'viewer/{template}/scripts.js')
if doujinshi_obj is not None:
# serialize_json(doujinshi_obj, doujinshi_dir)
name = doujinshi_obj.name
else:
metadata_path = os.path.join(doujinshi_dir, "metadata.json")
if os.path.exists(metadata_path):
with open(metadata_path, 'r') as file:
doujinshi_info = json.loads(file.read())
name = doujinshi_info.get("title")
else:
name = 'nHentai HTML Viewer'
data = html.format(TITLE=name, IMAGES=image_html, SCRIPTS=js, STYLES=css)
try:
with open(os.path.join(doujinshi_dir, 'index.html'), 'wb') as f:
f.write(data.encode('utf-8'))
logger.log(16, f'HTML Viewer has been written to "{os.path.join(doujinshi_dir, "index.html")}"')
except Exception as e:
logger.warning(f'Writing HTML Viewer failed ({e})')
def move_to_folder(output_dir='.', doujinshi_obj=None, file_type=None):
if not file_type:
raise RuntimeError('no file_type specified')
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
for fn in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, fn)
_, ext = os.path.splitext(file_path)
if ext in ['.pdf', '.cbz']:
continue
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(filename, os.path.join(doujinshi_dir, os.path.basename(filename)))
def generate_main_html(output_dir=f'.{PATH_SEPARATOR}'):
"""
Generate a main html to show all the contains doujinshi.
With a link to their `index.html`.
Default output folder will be the CLI path.
"""
image_html = ''
main = readfile('viewer/main.html')
css = readfile('viewer/main.css')
js = readfile('viewer/main.js')
element = '\n\
<div class="gallery-favorite">\n\
<div class="gallery">\n\
<a href="./{FOLDER}/index.html" class="cover" style="padding:0 0 141.6% 0"><img\n\
src="./{FOLDER}/{IMAGE}" />\n\
<div class="caption">{TITLE}</div>\n\
</a>\n\
</div>\n\
</div>\n'
os.chdir(output_dir)
doujinshi_dirs = next(os.walk('.'))[1]
for folder in doujinshi_dirs:
files = os.listdir(folder)
files.sort()
if 'index.html' in files:
logger.info(f'Add doujinshi "{folder}"')
else:
continue
image = files[0] # 001.jpg or 001.png
if folder is not None:
title = folder.replace('_', ' ')
else:
title = 'nHentai HTML Viewer'
image_html += element.format(FOLDER=urllib.parse.quote(folder), IMAGE=image, TITLE=title)
if image_html == '':
logger.warning('No index.html found, --gen-main paused.')
return
try:
data = main.format(STYLES=css, SCRIPTS=js, PICTURE=image_html)
with open('./main.html', 'wb') as f:
f.write(data.encode('utf-8'))
shutil.copy(os.path.dirname(__file__) + '/viewer/logo.png', './')
set_js_database()
output_dir = output_dir[:-1] if output_dir.endswith('/') else output_dir
logger.log(16, f'Main Viewer has been written to "{output_dir}/main.html"')
except Exception as e:
logger.warning(f'Writing Main Viewer failed ({e})')
def generate_cbz(doujinshi_dir, filename):
file_list = os.listdir(doujinshi_dir)
file_list.sort()
logger.info(f'Writing CBZ file to path: {filename}')
with zipfile.ZipFile(filename, 'w') as cbz_pf:
for image in file_list:
image_path = os.path.join(doujinshi_dir, image)
cbz_pf.write(image_path, image)
logger.log(16, f'Comic Book CBZ file has been written to "{filename}"')
def generate_doc(file_type='', output_dir='.', doujinshi_obj=None, regenerate=False):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, file_type)
if os.path.exists(f'{doujinshi_dir}.{file_type}') and not regenerate:
logger.info(f'Skipped {file_type} file generation: {doujinshi_dir}.{file_type} already exists')
return
if file_type == 'cbz':
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
generate_cbz(doujinshi_dir, filename)
elif file_type == 'pdf':
try:
import img2pdf
"""Write images to a PDF file using img2pdf."""
file_list = [f for f in os.listdir(doujinshi_dir) if f.lower().endswith(EXTENSIONS)]
file_list.sort()
logger.info(f'Writing PDF file to path: {filename}')
with open(filename, 'wb') as pdf_f:
full_path_list = (
[os.path.join(doujinshi_dir, image) for image in file_list]
)
pdf_f.write(img2pdf.convert(full_path_list, rotation=img2pdf.Rotation.ifvalid))
logger.log(16, f'PDF file has been written to "{filename}"')
except ImportError:
logger.error("Please install img2pdf package by using pip.")
else:
raise ValueError('invalid file type')
def generate_metadata(output_dir, doujinshi_obj=None):
doujinshi_dir, filename = parse_doujinshi_obj(output_dir, doujinshi_obj, '')
serialize_json(doujinshi_obj, doujinshi_dir)
serialize_comic_xml(doujinshi_obj, doujinshi_dir)
serialize_info_txt(doujinshi_obj, doujinshi_dir)
logger.log(16, f'Metadata files have been written to "{doujinshi_dir}"')
def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
"""
It used to be a whitelist approach allowed only alphabet and a part of symbols.
but most doujinshi's names include Japanese 2-byte characters and these was rejected.
so it is using blacklist approach now.
if filename include forbidden characters (\'/:,;*?"<>|) ,it replaces space character(" ").
"""
# maybe you can use `--format` to select a suitable filename
if not _truncate_only:
ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b'
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split())
while filename.endswith('.'):
filename = filename[:-1]
else:
filename = s
# limit `length` chars
if len(filename) >= length:
filename = filename[:length - 1] + u''
# Remove [] from filename
filename = filename.replace('[]', '').strip()
return filename
def signal_handler(_signal, _frame):
logger.error('Ctrl-C signal received. Stopping...')
sys.exit(1)
def paging(page_string):
# 1,3-5,14 -> [1, 3, 4, 5, 14]
if not page_string:
# default, the first page
return [1]
page_list = []
for i in page_string.split(','):
if '-' in i:
start, end = i.split('-')
if not (start.isdigit() and end.isdigit()):
raise Exception('Invalid page number')
page_list.extend(list(range(int(start), int(end) + 1)))
else:
if not i.isdigit():
raise Exception('Invalid page number')
page_list.append(int(i))
return page_list
class DB(object):
conn = None
cur = None
def __enter__(self):
self.conn = sqlite3.connect(constant.NHENTAI_HISTORY)
self.cur = self.conn.cursor()
self.cur.execute('CREATE TABLE IF NOT EXISTS download_history (id text)')
self.conn.commit()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.conn.close()
def clean_all(self):
self.cur.execute('DELETE FROM download_history WHERE 1')
self.conn.commit()
def add_one(self, data):
self.cur.execute('INSERT INTO download_history VALUES (?)', [data])
self.conn.commit()
def get_all(self):
data = self.cur.execute('SELECT id FROM download_history')
return [i[0] for i in data]

View File

@ -0,0 +1,25 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes, viewport-fit=cover" />
<title>{TITLE}</title>
<style>
{STYLES}
</style>
</head>
<body>
<nav id="list">
{IMAGES}</nav>
<div id="image-container">
<span id="page-num"></span>
<div id="dest"></div>
</div>
<script>
{SCRIPTS}
</script>
</body>
</html>

View File

@ -0,0 +1,87 @@
const pages = Array.from(document.querySelectorAll('img.image-item'));
let currentPage = 0;
function changePage(pageNum) {
const previous = pages[currentPage];
const current = pages[pageNum];
if (current == null) {
return;
}
previous.classList.remove('current');
current.classList.add('current');
currentPage = pageNum;
const display = document.getElementById('dest');
display.style.backgroundImage = `url("${current.src}")`;
scroll(0,0)
document.getElementById('page-num')
.innerText = [
(pageNum + 1).toLocaleString(),
pages.length.toLocaleString()
].join('\u200a/\u200a');
}
changePage(0);
document.getElementById('list').onclick = event => {
if (pages.includes(event.target)) {
changePage(pages.indexOf(event.target));
}
};
document.getElementById('image-container').onclick = event => {
const width = document.getElementById('image-container').clientWidth;
const clickPos = event.clientX / width;
if (clickPos < 0.5) {
changePage(currentPage - 1);
} else {
changePage(currentPage + 1);
}
};
document.onkeypress = event => {
switch (event.key.toLowerCase()) {
// Previous Image
case 'w':
scrollBy(0, -40);
break;
case 'a':
changePage(currentPage - 1);
break;
// Return to previous page
case 'q':
window.history.go(-1);
break;
// Next Image
case ' ':
case 's':
scrollBy(0, 40);
break;
case 'd':
changePage(currentPage + 1);
break;
}// remove arrow cause it won't work
};
document.onkeydown = event =>{
switch (event.keyCode) {
case 37: //left
changePage(currentPage - 1);
break;
case 38: //up
changePage(currentPage - 1);
break;
case 39: //right
changePage(currentPage + 1);
break;
case 40: //down
changePage(currentPage + 1);
break;
}
};

View File

@ -0,0 +1,70 @@
*, *::after, *::before {
box-sizing: border-box;
}
img {
vertical-align: middle;
}
html, body {
display: flex;
background-color: #e8e6e6;
height: 100%;
width: 100%;
padding: 0;
margin: 0;
font-family: sans-serif;
}
#list {
height: 2000px;
overflow: scroll;
width: 260px;
text-align: center;
}
#list img {
width: 200px;
padding: 10px;
border-radius: 10px;
margin: 15px 0;
cursor: pointer;
}
#list img.current {
background: #0003;
}
#image-container {
flex: auto;
height: 2000px;
background: #222;
color: #fff;
text-align: center;
cursor: pointer;
-webkit-user-select: none;
user-select: none;
position: relative;
}
#image-container #dest {
height: 2000px;
width: 100%;
background-size: contain;
background-repeat: no-repeat;
background-position: top;
}
#image-container #page-num {
position: static;
font-size: 14pt;
left: 10px;
bottom: 5px;
font-weight: bold;
opacity: 0.75;
text-shadow: /* Duplicate the same shadow to make it very strong */
0 0 2px #222,
0 0 2px #222,
0 0 2px #222;
}

BIN
nhentai/viewer/logo.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 KiB

332
nhentai/viewer/main.css Normal file
View File

@ -0,0 +1,332 @@
/*! normalize.css v5.0.0 | MIT License | github.com/necolas/normalize.css */
/* Original from https://static.nhentai.net/css/main_style.9bb9b703e601.css */
a {
background-color: transparent;
-webkit-text-decoration-skip: objects
}
img {
border-style: none
}
html {
box-sizing: border-box
}
*,:after,:before {
box-sizing: inherit
}
body,html {
font-family: 'Noto Sans',sans-serif;
font-size: 14px;
line-height: 1.42857143;
height: 100%;
margin: 0;
text-align: center;
color: #34495e;
background-color: #fff;
-webkit-font-smoothing: antialiased;
-moz-osx-font-smoothing: grayscale
}
a {
text-decoration: none;
color: #34495e
}
blockquote {
border: 0
}
.container {
display: block;
clear: both;
margin-left: 15rem;
margin-right: 0.5rem;
margin-bottom: 5px;
margin-top: 5px;
padding: 4px;
border-radius: 9px;
background-color: #ecf0f1;
width: 100% - 15rem;
max-width: 1500px
}
.gallery,.gallery-favorite,.thumb-container {
display: inline-block;
vertical-align: top
}
.gallery img,.gallery-favorite img,.thumb-container img {
display: block;
max-width: 100%;
height: auto
}
@media screen and (min-width: 980px) {
.gallery,.gallery-favorite,.thumb-container {
width:19%;
margin: 3px;
}
}
@media screen and (max-width: 979px) {
.gallery,.gallery-favorite,.thumb-container {
width:24%;
margin: 2px
}
}
@media screen and (max-width: 772px) {
.gallery,.gallery-favorite,.thumb-container {
width:32%;
margin: 1.5px
}
}
@media screen and (max-width: 500px) {
.gallery,.gallery-favorite,.thumb-container {
width:49%;
margin: .5px
}
}
.gallery a,.gallery-favorite a {
display: block
}
.gallery a img,.gallery-favorite a img {
position: absolute
}
.caption {
line-height: 15px;
left: 0;
right: 0;
top: 100%;
position: absolute;
z-index: 10;
overflow: hidden;
width: 100%;
max-height: 34px;
padding: 3px;
background-color: #fff;
font-weight: 700;
display: block;
text-align: center;
text-decoration: none;
color: #34495e
}
.gallery {
position: relative;
margin-bottom: 3em
}
.gallery:hover .caption {
max-height: 100%;
box-shadow: 0 10px 20px rgba(100,100,100,.5)
}
.gallery-favorite .gallery {
width: 100%
}
.sidenav {
height: 100%;
width: 15rem;
position: fixed;
z-index: 1;
top: 0;
left: 0;
background-color: #0d0d0d;
overflow: hidden;
padding-top: 20px;
-webkit-touch-callout: none; /* iOS Safari */
-webkit-user-select: none; /* Safari */
-khtml-user-select: none; /* Konqueror HTML */
-moz-user-select: none; /* Old versions of Firefox */
-ms-user-select: none; /* Internet Explorer/Edge */
user-select: none;
}
.sidenav a {
background-color: #eee;
padding: 5px 0px 5px 15px;
text-decoration: none;
font-size: 15px;
color: #0d0d0d;
display: block;
text-align: left;
}
.sidenav img {
width:100%;
padding: 0px 5px 0px 5px;
}
.sidenav h1 {
font-size: 1.5em;
margin: 0px 0px 10px;
}
.sidenav a:hover {
color: white;
background-color: #EC2754;
}
.accordion {
font-weight: bold;
background-color: #eee;
color: #444;
padding: 10px 0px 5px 8px;
width: 100%;
border: none;
text-align: left;
outline: none;
font-size: 15px;
transition: 0.4s;
cursor:pointer;
}
.accordion:hover {
background-color: #ddd;
}
.accordion.active{
background-color:#ddd;
}
.nav-btn {
font-weight: bold;
background-color: #eee;
color: #444;
padding: 8px 8px 5px 9px;
width: 100%;
border: none;
text-align: left;
outline: none;
font-size: 15px;
}
.hidden {
display:none;
}
.nav-btn a{
font-weight: normal;
padding-right: 10px;
border-radius: 15px;
cursor: crosshair
}
.options {
display:block;
padding: 0px 0px 0px 0px;
background-color: #eee;
max-height: 0;
overflow: hidden;
transition: max-height 0.2s ease-out;
cursor:pointer;
}
.search{
background-color: #eee;
padding-right:40px;
white-space: nowrap;
padding-top: 5px;
height:43px;
}
.search input{
border-top-right-radius:10px;
padding-top:0;
padding-bottom:0;
font-size:1em;
width:100%;
height:38px;
vertical-align:top;
}
.btn{
border-top-left-radius:10px;
color:#fff;
font-size:100%;
padding: 8px;
width:38px;
background-color:#ed2553;
}
#tags{
text-align:left;
display: flex;
width:15rem;
justify-content: start;
margin: 2px 2px 2px 0px;
flex-wrap: wrap;
}
.btn-2{
font-weight:700;
padding-right:0.5rem;
padding-left:0.5rem;
color:#fff;
border:0;
font-size:100%;
height:1.25rem;
outline: 0;
border-radius: 0.3rem;
cursor: pointer;
margin:0.15rem;
transition: all 1s linear;
}
.btn-2#parody{
background-color: red;
}
.btn-2#character{
background-color: blue;
}
.btn-2#tag{
background-color: green;
}
.btn-2#artist{
background-color: fuchsia;
}
.btn-2#group{
background-color: teal;
}
.btn-2.hover{
filter: saturate(20%)
}
input,input:focus{
border:none;
outline:0;
}
html.theme-black,html.theme-black body {
color: #d9d9d9;
background-color: #0d0d0d
}
html.theme-black #thumbnail-container,html.theme-black .container {
background-color: #1f1f1f
}
html.theme-black .gallery:hover .caption {
box-shadow: 0 10px 20px rgba(0,0,0,.5)
}
html.theme-black .caption {
background-color: #404040;
color: #d9d9d9
}

51
nhentai/viewer/main.html Normal file
View File

@ -0,0 +1,51 @@
<!doctype html>
<html lang="en" class=" theme-black">
<head>
<meta charset="utf-8" />
<meta name="theme-color" content="#1f1f1f" />
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes, viewport-fit=cover" />
<title>nHentai Viewer</title>
<script type="text/javascript" src="data.js"></script>
<!-- <link rel="stylesheet" href="./main.css"> -->
<style>
{STYLES}
</style>
</head>
<body>
<div id="content">
<nav class="sidenav">
<img src="logo.png">
<h1>nHentai Viewer</h1>
<button class="accordion">Language</button>
<div class="options" id="language">
<a>English</a>
<a>Japanese</a>
<a>Chinese</a>
</div>
<button class="accordion">Category</button>
<div class="options" id ="category">
<a>Doujinshi</a>
<a>Manga</a>
</div>
<button class="nav-btn hidden">Filters</button>
<div class="search">
<input autocomplete="off" type="search" id="tagfilter" name="q" value="" autocapitalize="none" required="">
<svg class="btn" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="white" d="M505 442.7L405.3 343c-4.5-4.5-10.6-7-17-7H372c27.6-35.3 44-79.7 44-128C416 93.1 322.9 0 208 0S0 93.1 0 208s93.1 208 208 208c48.3 0 92.7-16.4 128-44v16.3c0 6.4 2.5 12.5 7 17l99.7 99.7c9.4 9.4 24.6 9.4 33.9 0l28.3-28.3c9.4-9.4 9.4-24.6.1-34zM208 336c-70.7 0-128-57.2-128-128 0-70.7 57.2-128 128-128 70.7 0 128 57.2 128 128 0 70.7-57.2 128-128 128z"/></svg>
<div id="tags">
</div>
</nav>
<div class="container" id="favcontainer">
{PICTURE}
</div> <!-- container -->
</div>
<script>
{SCRIPTS}
</script>
</body>
</html>

177
nhentai/viewer/main.js Normal file
View File

@ -0,0 +1,177 @@
//------------------------------------navbar script------------------------------------
var menu = document.getElementsByClassName("accordion");
for (var i = 0; i < menu.length; i++) {
menu[i].addEventListener("click", function() {
var panel = this.nextElementSibling;
if (panel.style.maxHeight) {
this.classList.toggle("active");
panel.style.maxHeight = null;
} else {
panel.style.maxHeight = panel.scrollHeight + "px";
this.classList.toggle("active");
}
});
}
var language = document.getElementById("language").children;
for (var i = 0; i < language.length; i++){
language[i].addEventListener("click", function() {
toggler = document.getElementById("language")
toggler.style.maxHeight = null;
document.getElementsByClassName("accordion")[0].classList.toggle("active");
filter_maker(this.innerText, "language");
});
}
var category = document.getElementById("category").children;
for (var i = 0; i < category.length; i++){
category[i].addEventListener("click", function() {
document.getElementById("category").style.maxHeight = null;
document.getElementsByClassName("accordion")[1].classList.toggle("active");
filter_maker(this.innerText, "category");
});
}
//-----------------------------------------------------------------------------------
//----------------------------------Tags Script--------------------------------------
tag_maker(tags);
var tag = document.getElementsByClassName("btn-2");
for (var i = 0; i < tag.length; i++){
tag[i].addEventListener("click", function() {
filter_maker(this.innerText, this.id);
});
}
var input = document.getElementById("tagfilter");
input.addEventListener("input", function() {
var tags = document.querySelectorAll(".btn-2");
if (this.value.length > 0) {
for (var i = 0; i < tags.length; i++) {
var tag = tags[i];
var nome = tag.innerText;
var exp = new RegExp(this.value, "i");;
if (exp.test(nome)) {
tag.classList.remove("hidden");
}
else {
tag.classList.add("hidden");
}
}
} else {
for (var i = 0; i < tags.length; i++) {
var tag = tags[i];
tag.classList.add('hidden');
}
}
});
input.addEventListener('keypress', function (e) {
enter_search(e, this.value);
});
//-----------------------------------------------------------------------------------
//------------------------------------Functions--------------------------------------
function enter_search(e, input){
var count = 0;
var key = e.which || e.keyCode;
if (key === 13 && input.length > 0) {
var all_tags = document.getElementById("tags").children;
for(i = 0; i < all_tags.length; i++){
if (!all_tags[i].classList.contains("hidden")){
count++;
var tag_name = all_tags[i].innerText;
var tag_id = all_tags[i].id;
if (count>1){break}
}
}
if (count == 1){
filter_maker(tag_name, tag_id);
}
}
}
function filter_maker(text, class_value){
var check = filter_checker(text);
var nav_btn = document.getElementsByClassName("nav-btn")[0];
if (nav_btn.classList.contains("hidden")){
nav_btn.classList.toggle("hidden");
}
if (check == true){
var node = document.createElement("a");
var textnode = document.createTextNode(text);
node.appendChild(textnode);
node.classList.add(class_value);
nav_btn.appendChild(node);
filter_searcher();
}
}
function filter_searcher(){
var verifier = null;
var tags_filter = [];
var doujinshi_id = [];
var filter_tag = document.getElementsByClassName("nav-btn")[0].children;
filter_tag[filter_tag.length-1].addEventListener("click", function() {
this.remove();
try{
filter_searcher();
}
catch{
var gallery = document.getElementsByClassName("gallery-favorite");
for (var i = 0; i < gallery.length; i++){
gallery[i].classList.remove("hidden");
}
}
});
for (var i=0; i < filter_tag.length; i++){
var fclass = filter_tag[i].className;
var fname = filter_tag[i].innerText.toLowerCase();
tags_filter.push([fclass, fname])
}
for (var i=0; i < data.length; i++){
for (var j=0; j < tags_filter.length; j++){
try{
if(data[i][tags_filter[j][0]].includes(tags_filter[j][1])){
verifier = true;
}
else{
verifier = false;
break
}
}
catch{
verifier = false;
break
}
}
if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));}
}
var gallery = document.getElementsByClassName("gallery-favorite");
for (var i = 0; i < gallery.length; i++){
gtext = gallery [i].children[0].children[0].children[1].innerText;
if(doujinshi_id.includes(gtext)){
gallery[i].classList.remove("hidden");
}
else{
gallery[i].classList.add("hidden");
}
}
}
function filter_checker(text){
var filter_tags = document.getElementsByClassName("nav-btn")[0].children;
if (filter_tags == null){return true;}
for (var i=0; i < filter_tags.length; i++){
if (filter_tags[i].innerText == text){return false;}
}
return true;
}
function tag_maker(data){
for (i in data){
for (j in data[i]){
var node = document.createElement("button");
var textnode = document.createTextNode(data[i][j]);
node.appendChild(textnode);
node.classList.add("btn-2");
node.setAttribute('id', i);
node.classList.add("hidden");
document.getElementById("tags").appendChild(node);
}
}
}

View File

@ -0,0 +1,25 @@
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width, initial-scale=1, user-scalable=yes, viewport-fit=cover" />
<title>{TITLE}</title>
<style>
{STYLES}
</style>
</head>
<body>
<nav id="list" hidden=true>
{IMAGES}</nav>
<div id="image-container">
<div id="dest"></div>
<span id="page-num"></span>
</div>
<script>
{SCRIPTS}
</script>
</body>
</html>

View File

@ -0,0 +1,79 @@
const pages = Array.from(document.querySelectorAll('img.image-item'));
let currentPage = 0;
function changePage(pageNum) {
const previous = pages[currentPage];
const current = pages[pageNum];
if (current == null) {
return;
}
previous.classList.remove('current');
current.classList.add('current');
currentPage = pageNum;
const display = document.getElementById('dest');
display.style.backgroundImage = `url("${current.src}")`;
scroll(0,0)
document.getElementById('page-num')
.innerText = [
(pageNum + 1).toLocaleString(),
pages.length.toLocaleString()
].join('\u200a/\u200a');
}
changePage(0);
document.getElementById('image-container').onclick = event => {
const width = document.getElementById('image-container').clientWidth;
const clickPos = event.clientX / width;
if (clickPos < 0.5) {
changePage(currentPage - 1);
} else {
changePage(currentPage + 1);
}
};
document.onkeypress = event => {
switch (event.key.toLowerCase()) {
// Previous Image
case 'w':
scrollBy(0, -40);
break;
case 'a':
changePage(currentPage - 1);
break;
// Return to previous page
case 'q':
window.history.go(-1);
break;
// Next Image
case ' ':
case 's':
scrollBy(0, 40);
break;
case 'd':
changePage(currentPage + 1);
break;
}// remove arrow cause it won't work
};
document.onkeydown = event =>{
switch (event.keyCode) {
case 37: //left
changePage(currentPage - 1);
break;
case 38: //up
break;
case 39: //right
changePage(currentPage + 1);
break;
case 40: //down
break;
}
};

View File

@ -0,0 +1,75 @@
*, *::after, *::before {
box-sizing: border-box;
}
img {
vertical-align: middle;
}
html, body {
display: flex;
background-color: #e8e6e6;
height: 100%;
width: 100%;
padding: 0;
margin: 0;
font-family: sans-serif;
}
#list {
height: 2000px;
overflow: scroll;
width: 260px;
text-align: center;
}
#list img {
width: 200px;
padding: 10px;
border-radius: 10px;
margin: 15px 0;
cursor: pointer;
}
#list img.current {
background: #0003;
}
#image-container {
flex: auto;
height: 100%;
background: rgb(0, 0, 0);
color: rgb(100, 100, 100);
text-align: center;
cursor: pointer;
-webkit-user-select: none;
user-select: none;
position: relative;
}
#image-container #dest {
height: 2000px;
width: 100%;
background-size: contain;
background-repeat: no-repeat;
background-position: top;
margin-left: auto;
margin-right: auto;
max-width: 100%;
max-height: 100vh;
margin: auto;
}
#image-container #page-num {
position: static;
font-size: 9pt;
left: 10px;
bottom: 5px;
font-weight: bold;
opacity: 0.9;
text-shadow: /* Duplicate the same shadow to make it very strong */
0 0 2px #222,
0 0 2px #222,
0 0 2px #222;
}

351
poetry.lock generated Normal file
View File

@ -0,0 +1,351 @@
# This file is automatically @generated by Poetry 1.8.3 and should not be changed by hand.
[[package]]
name = "anyio"
version = "4.5.2"
description = "High level compatibility layer for multiple asynchronous event loop implementations"
optional = false
python-versions = ">=3.8"
files = [
{file = "anyio-4.5.2-py3-none-any.whl", hash = "sha256:c011ee36bc1e8ba40e5a81cb9df91925c218fe9b778554e0b56a21e1b5d4716f"},
{file = "anyio-4.5.2.tar.gz", hash = "sha256:23009af4ed04ce05991845451e11ef02fc7c5ed29179ac9a420e5ad0ac7ddc5b"},
]
[package.dependencies]
exceptiongroup = {version = ">=1.0.2", markers = "python_version < \"3.11\""}
idna = ">=2.8"
sniffio = ">=1.1"
typing-extensions = {version = ">=4.1", markers = "python_version < \"3.11\""}
[package.extras]
doc = ["Sphinx (>=7.4,<8.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme"]
test = ["anyio[trio]", "coverage[toml] (>=7)", "exceptiongroup (>=1.2.0)", "hypothesis (>=4.0)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "truststore (>=0.9.1)", "uvloop (>=0.21.0b1)"]
trio = ["trio (>=0.26.1)"]
[[package]]
name = "beautifulsoup4"
version = "4.12.3"
description = "Screen-scraping library"
optional = false
python-versions = ">=3.6.0"
files = [
{file = "beautifulsoup4-4.12.3-py3-none-any.whl", hash = "sha256:b80878c9f40111313e55da8ba20bdba06d8fa3969fc68304167741bbf9e082ed"},
{file = "beautifulsoup4-4.12.3.tar.gz", hash = "sha256:74e3d1928edc070d21748185c46e3fb33490f22f52a3addee9aee0f4f7781051"},
]
[package.dependencies]
soupsieve = ">1.2"
[package.extras]
cchardet = ["cchardet"]
chardet = ["chardet"]
charset-normalizer = ["charset-normalizer"]
html5lib = ["html5lib"]
lxml = ["lxml"]
[[package]]
name = "certifi"
version = "2024.12.14"
description = "Python package for providing Mozilla's CA Bundle."
optional = false
python-versions = ">=3.6"
files = [
{file = "certifi-2024.12.14-py3-none-any.whl", hash = "sha256:1275f7a45be9464efc1173084eaa30f866fe2e47d389406136d332ed4967ec56"},
{file = "certifi-2024.12.14.tar.gz", hash = "sha256:b650d30f370c2b724812bee08008be0c4163b163ddaec3f2546c1caf65f191db"},
]
[[package]]
name = "chardet"
version = "5.2.0"
description = "Universal encoding detector for Python 3"
optional = false
python-versions = ">=3.7"
files = [
{file = "chardet-5.2.0-py3-none-any.whl", hash = "sha256:e1cf59446890a00105fe7b7912492ea04b6e6f06d4b742b2c788469e34c82970"},
{file = "chardet-5.2.0.tar.gz", hash = "sha256:1b3b6ff479a8c414bc3fa2c0852995695c4a026dcd6d0633b2dd092ca39c1cf7"},
]
[[package]]
name = "charset-normalizer"
version = "3.4.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
optional = false
python-versions = ">=3.7"
files = [
{file = "charset_normalizer-3.4.1-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:91b36a978b5ae0ee86c394f5a54d6ef44db1de0815eb43de826d41d21e4af3de"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7461baadb4dc00fd9e0acbe254e3d7d2112e7f92ced2adc96e54ef6501c5f176"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e218488cd232553829be0664c2292d3af2eeeb94b32bea483cf79ac6a694e037"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80ed5e856eb7f30115aaf94e4a08114ccc8813e6ed1b5efa74f9f82e8509858f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b010a7a4fd316c3c484d482922d13044979e78d1861f0e0650423144c616a46a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4532bff1b8421fd0a320463030c7520f56a79c9024a4e88f01c537316019005a"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:d973f03c0cb71c5ed99037b870f2be986c3c05e63622c017ea9816881d2dd247"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:3a3bd0dcd373514dcec91c411ddb9632c0d7d92aed7093b8c3bbb6d69ca74408"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_ppc64le.whl", hash = "sha256:d9c3cdf5390dcd29aa8056d13e8e99526cda0305acc038b96b30352aff5ff2bb"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_s390x.whl", hash = "sha256:2bdfe3ac2e1bbe5b59a1a63721eb3b95fc9b6817ae4a46debbb4e11f6232428d"},
{file = "charset_normalizer-3.4.1-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:eab677309cdb30d047996b36d34caeda1dc91149e4fdca0b1a039b3f79d9a807"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win32.whl", hash = "sha256:c0429126cf75e16c4f0ad00ee0eae4242dc652290f940152ca8c75c3a4b6ee8f"},
{file = "charset_normalizer-3.4.1-cp310-cp310-win_amd64.whl", hash = "sha256:9f0b8b1c6d84c8034a44893aba5e767bf9c7a211e313a9605d9c617d7083829f"},
{file = "charset_normalizer-3.4.1-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:8bfa33f4f2672964266e940dd22a195989ba31669bd84629f05fab3ef4e2d125"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:28bf57629c75e810b6ae989f03c0828d64d6b26a5e205535585f96093e405ed1"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f08ff5e948271dc7e18a35641d2f11a4cd8dfd5634f55228b691e62b37125eb3"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:234ac59ea147c59ee4da87a0c0f098e9c8d169f4dc2a159ef720f1a61bbe27cd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fd4ec41f914fa74ad1b8304bbc634b3de73d2a0889bd32076342a573e0779e00"},
{file = "charset_normalizer-3.4.1-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:eea6ee1db730b3483adf394ea72f808b6e18cf3cb6454b4d86e04fa8c4327a12"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:c96836c97b1238e9c9e3fe90844c947d5afbf4f4c92762679acfe19927d81d77"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:4d86f7aff21ee58f26dcf5ae81a9addbd914115cdebcbb2217e4f0ed8982e146"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_ppc64le.whl", hash = "sha256:09b5e6733cbd160dcc09589227187e242a30a49ca5cefa5a7edd3f9d19ed53fd"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_s390x.whl", hash = "sha256:5777ee0881f9499ed0f71cc82cf873d9a0ca8af166dfa0af8ec4e675b7df48e6"},
{file = "charset_normalizer-3.4.1-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:237bdbe6159cff53b4f24f397d43c6336c6b0b42affbe857970cefbb620911c8"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win32.whl", hash = "sha256:8417cb1f36cc0bc7eaba8ccb0e04d55f0ee52df06df3ad55259b9a323555fc8b"},
{file = "charset_normalizer-3.4.1-cp311-cp311-win_amd64.whl", hash = "sha256:d7f50a1f8c450f3925cb367d011448c39239bb3eb4117c36a6d354794de4ce76"},
{file = "charset_normalizer-3.4.1-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:73d94b58ec7fecbc7366247d3b0b10a21681004153238750bb67bd9012414545"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dad3e487649f498dd991eeb901125411559b22e8d7ab25d3aeb1af367df5efd7"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c30197aa96e8eed02200a83fba2657b4c3acd0f0aa4bdc9f6c1af8e8962e0757"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2369eea1ee4a7610a860d88f268eb39b95cb588acd7235e02fd5a5601773d4fa"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc2722592d8998c870fa4e290c2eec2c1569b87fe58618e67d38b4665dfa680d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ffc9202a29ab3920fa812879e95a9e78b2465fd10be7fcbd042899695d75e616"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:804a4d582ba6e5b747c625bf1255e6b1507465494a40a2130978bda7b932c90b"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:0f55e69f030f7163dffe9fd0752b32f070566451afe180f99dbeeb81f511ad8d"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:c4c3e6da02df6fa1410a7680bd3f63d4f710232d3139089536310d027950696a"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:5df196eb874dae23dcfb968c83d4f8fdccb333330fe1fc278ac5ceeb101003a9"},
{file = "charset_normalizer-3.4.1-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:e358e64305fe12299a08e08978f51fc21fac060dcfcddd95453eabe5b93ed0e1"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win32.whl", hash = "sha256:9b23ca7ef998bc739bf6ffc077c2116917eabcc901f88da1b9856b210ef63f35"},
{file = "charset_normalizer-3.4.1-cp312-cp312-win_amd64.whl", hash = "sha256:6ff8a4a60c227ad87030d76e99cd1698345d4491638dfa6673027c48b3cd395f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-macosx_10_13_universal2.whl", hash = "sha256:aabfa34badd18f1da5ec1bc2715cadc8dca465868a4e73a0173466b688f29dda"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:22e14b5d70560b8dd51ec22863f370d1e595ac3d024cb8ad7d308b4cd95f8313"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8436c508b408b82d87dc5f62496973a1805cd46727c34440b0d29d8a2f50a6c9"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2d074908e1aecee37a7635990b2c6d504cd4766c7bc9fc86d63f9c09af3fa11b"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:955f8851919303c92343d2f66165294848d57e9bba6cf6e3625485a70a038d11"},
{file = "charset_normalizer-3.4.1-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:44ecbf16649486d4aebafeaa7ec4c9fed8b88101f4dd612dcaf65d5e815f837f"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:0924e81d3d5e70f8126529951dac65c1010cdf117bb75eb02dd12339b57749dd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:2967f74ad52c3b98de4c3b32e1a44e32975e008a9cd2a8cc8966d6a5218c5cb2"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_ppc64le.whl", hash = "sha256:c75cb2a3e389853835e84a2d8fb2b81a10645b503eca9bcb98df6b5a43eb8886"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_s390x.whl", hash = "sha256:09b26ae6b1abf0d27570633b2b078a2a20419c99d66fb2823173d73f188ce601"},
{file = "charset_normalizer-3.4.1-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:fa88b843d6e211393a37219e6a1c1df99d35e8fd90446f1118f4216e307e48cd"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win32.whl", hash = "sha256:eb8178fe3dba6450a3e024e95ac49ed3400e506fd4e9e5c32d30adda88cbd407"},
{file = "charset_normalizer-3.4.1-cp313-cp313-win_amd64.whl", hash = "sha256:b1ac5992a838106edb89654e0aebfc24f5848ae2547d22c2c3f66454daa11971"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f30bf9fd9be89ecb2360c7d94a711f00c09b976258846efe40db3d05828e8089"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:97f68b8d6831127e4787ad15e6757232e14e12060bec17091b85eb1486b91d8d"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7974a0b5ecd505609e3b19742b60cee7aa2aa2fb3151bc917e6e2646d7667dcf"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fc54db6c8593ef7d4b2a331b58653356cf04f67c960f584edb7c3d8c97e8f39e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:311f30128d7d333eebd7896965bfcfbd0065f1716ec92bd5638d7748eb6f936a"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_aarch64.whl", hash = "sha256:7d053096f67cd1241601111b698f5cad775f97ab25d81567d3f59219b5f1adbd"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_i686.whl", hash = "sha256:807f52c1f798eef6cf26beb819eeb8819b1622ddfeef9d0977a8502d4db6d534"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_ppc64le.whl", hash = "sha256:dccbe65bd2f7f7ec22c4ff99ed56faa1e9f785482b9bbd7c717e26fd723a1d1e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_s390x.whl", hash = "sha256:2fb9bd477fdea8684f78791a6de97a953c51831ee2981f8e4f583ff3b9d9687e"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-musllinux_1_2_x86_64.whl", hash = "sha256:01732659ba9b5b873fc117534143e4feefecf3b2078b0a6a2e925271bb6f4cfa"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win32.whl", hash = "sha256:7a4f97a081603d2050bfaffdefa5b02a9ec823f8348a572e39032caa8404a487"},
{file = "charset_normalizer-3.4.1-cp37-cp37m-win_amd64.whl", hash = "sha256:7b1bef6280950ee6c177b326508f86cad7ad4dff12454483b51d8b7d673a2c5d"},
{file = "charset_normalizer-3.4.1-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ecddf25bee22fe4fe3737a399d0d177d72bc22be6913acfab364b40bce1ba83c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8c60ca7339acd497a55b0ea5d506b2a2612afb2826560416f6894e8b5770d4a9"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b7b2d86dd06bfc2ade3312a83a5c364c7ec2e3498f8734282c6c3d4b07b346b8"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dd78cfcda14a1ef52584dbb008f7ac81c1328c0f58184bf9a84c49c605002da6"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6e27f48bcd0957c6d4cb9d6fa6b61d192d0b13d5ef563e5f2ae35feafc0d179c"},
{file = "charset_normalizer-3.4.1-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:01ad647cdd609225c5350561d084b42ddf732f4eeefe6e678765636791e78b9a"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:619a609aa74ae43d90ed2e89bdd784765de0a25ca761b93e196d938b8fd1dbbd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:89149166622f4db9b4b6a449256291dc87a99ee53151c74cbd82a53c8c2f6ccd"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_ppc64le.whl", hash = "sha256:7709f51f5f7c853f0fb938bcd3bc59cdfdc5203635ffd18bf354f6967ea0f824"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_s390x.whl", hash = "sha256:345b0426edd4e18138d6528aed636de7a9ed169b4aaf9d61a8c19e39d26838ca"},
{file = "charset_normalizer-3.4.1-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:0907f11d019260cdc3f94fbdb23ff9125f6b5d1039b76003b5b0ac9d6a6c9d5b"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win32.whl", hash = "sha256:ea0d8d539afa5eb2728aa1932a988a9a7af94f18582ffae4bc10b3fbdad0626e"},
{file = "charset_normalizer-3.4.1-cp38-cp38-win_amd64.whl", hash = "sha256:329ce159e82018d646c7ac45b01a430369d526569ec08516081727a20e9e4af4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b97e690a2118911e39b4042088092771b4ae3fc3aa86518f84b8cf6888dbdb41"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:78baa6d91634dfb69ec52a463534bc0df05dbd546209b79a3880a34487f4b84f"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1a2bc9f351a75ef49d664206d51f8e5ede9da246602dc2d2726837620ea034b2"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:75832c08354f595c760a804588b9357d34ec00ba1c940c15e31e96d902093770"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0af291f4fe114be0280cdd29d533696a77b5b49cfde5467176ecab32353395c4"},
{file = "charset_normalizer-3.4.1-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0167ddc8ab6508fe81860a57dd472b2ef4060e8d378f0cc555707126830f2537"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:2a75d49014d118e4198bcee5ee0a6f25856b29b12dbf7cd012791f8a6cc5c496"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:363e2f92b0f0174b2f8238240a1a30142e3db7b957a5dd5689b0e75fb717cc78"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_ppc64le.whl", hash = "sha256:ab36c8eb7e454e34e60eb55ca5d241a5d18b2c6244f6827a30e451c42410b5f7"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_s390x.whl", hash = "sha256:4c0907b1928a36d5a998d72d64d8eaa7244989f7aaaf947500d3a800c83a3fd6"},
{file = "charset_normalizer-3.4.1-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:04432ad9479fa40ec0f387795ddad4437a2b50417c69fa275e212933519ff294"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win32.whl", hash = "sha256:3bed14e9c89dcb10e8f3a29f9ccac4955aebe93c71ae803af79265c9ca5644c5"},
{file = "charset_normalizer-3.4.1-cp39-cp39-win_amd64.whl", hash = "sha256:49402233c892a461407c512a19435d1ce275543138294f7ef013f0b63d5d3765"},
{file = "charset_normalizer-3.4.1-py3-none-any.whl", hash = "sha256:d98b1668f06378c6dbefec3b92299716b931cd4e6061f3c875a71ced1780ab85"},
{file = "charset_normalizer-3.4.1.tar.gz", hash = "sha256:44251f18cd68a75b56585dd00dae26183e102cd5e0f9f1466e6df5da2ed64ea3"},
]
[[package]]
name = "exceptiongroup"
version = "1.2.2"
description = "Backport of PEP 654 (exception groups)"
optional = false
python-versions = ">=3.7"
files = [
{file = "exceptiongroup-1.2.2-py3-none-any.whl", hash = "sha256:3111b9d131c238bec2f8f516e123e14ba243563fb135d3fe885990585aa7795b"},
{file = "exceptiongroup-1.2.2.tar.gz", hash = "sha256:47c2edf7c6738fafb49fd34290706d1a1a2f4d1c6df275526b62cbb4aa5393cc"},
]
[package.extras]
test = ["pytest (>=6)"]
[[package]]
name = "h11"
version = "0.14.0"
description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
optional = false
python-versions = ">=3.7"
files = [
{file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
{file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
]
[[package]]
name = "httpcore"
version = "1.0.7"
description = "A minimal low-level HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpcore-1.0.7-py3-none-any.whl", hash = "sha256:a3fff8f43dc260d5bd363d9f9cf1830fa3a458b332856f34282de498ed420edd"},
{file = "httpcore-1.0.7.tar.gz", hash = "sha256:8551cb62a169ec7162ac7be8d4817d561f60e08eaa485234898414bb5a8a0b4c"},
]
[package.dependencies]
certifi = "*"
h11 = ">=0.13,<0.15"
[package.extras]
asyncio = ["anyio (>=4.0,<5.0)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
trio = ["trio (>=0.22.0,<1.0)"]
[[package]]
name = "httpx"
version = "0.28.1"
description = "The next generation HTTP client."
optional = false
python-versions = ">=3.8"
files = [
{file = "httpx-0.28.1-py3-none-any.whl", hash = "sha256:d909fcccc110f8c7faf814ca82a9a4d816bc5a6dbfea25d6591d6985b8ba59ad"},
{file = "httpx-0.28.1.tar.gz", hash = "sha256:75e98c5f16b0f35b567856f597f06ff2270a374470a5c2392242528e3e3e42fc"},
]
[package.dependencies]
anyio = "*"
certifi = "*"
httpcore = "==1.*"
idna = "*"
[package.extras]
brotli = ["brotli", "brotlicffi"]
cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
http2 = ["h2 (>=3,<5)"]
socks = ["socksio (==1.*)"]
zstd = ["zstandard (>=0.18.0)"]
[[package]]
name = "idna"
version = "3.10"
description = "Internationalized Domain Names in Applications (IDNA)"
optional = false
python-versions = ">=3.6"
files = [
{file = "idna-3.10-py3-none-any.whl", hash = "sha256:946d195a0d259cbba61165e88e65941f16e9b36ea6ddb97f00452bae8b1287d3"},
{file = "idna-3.10.tar.gz", hash = "sha256:12f65c9b470abda6dc35cf8e63cc574b1c52b11df2c86030af0ac09b01b13ea9"},
]
[package.extras]
all = ["flake8 (>=7.1.1)", "mypy (>=1.11.2)", "pytest (>=8.3.2)", "ruff (>=0.6.2)"]
[[package]]
name = "iso8601"
version = "1.1.0"
description = "Simple module to parse ISO 8601 dates"
optional = false
python-versions = ">=3.6.2,<4.0"
files = [
{file = "iso8601-1.1.0-py3-none-any.whl", hash = "sha256:8400e90141bf792bce2634df533dc57e3bee19ea120a87bebcd3da89a58ad73f"},
{file = "iso8601-1.1.0.tar.gz", hash = "sha256:32811e7b81deee2063ea6d2e94f8819a86d1f3811e49d23623a41fa832bef03f"},
]
[[package]]
name = "requests"
version = "2.32.3"
description = "Python HTTP for Humans."
optional = false
python-versions = ">=3.8"
files = [
{file = "requests-2.32.3-py3-none-any.whl", hash = "sha256:70761cfe03c773ceb22aa2f671b4757976145175cdfca038c02654d061d6dcc6"},
{file = "requests-2.32.3.tar.gz", hash = "sha256:55365417734eb18255590a9ff9eb97e9e1da868d4ccd6402399eaf68af20a760"},
]
[package.dependencies]
certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4"
idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<3"
[package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"]
use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
[[package]]
name = "sniffio"
version = "1.3.1"
description = "Sniff out which async library your code is running under"
optional = false
python-versions = ">=3.7"
files = [
{file = "sniffio-1.3.1-py3-none-any.whl", hash = "sha256:2f6da418d1f1e0fddd844478f41680e794e6051915791a034ff65e5f100525a2"},
{file = "sniffio-1.3.1.tar.gz", hash = "sha256:f4324edc670a0f49750a81b895f35c3adb843cca46f0530f79fc1babb23789dc"},
]
[[package]]
name = "soupsieve"
version = "2.6"
description = "A modern CSS selector implementation for Beautiful Soup."
optional = false
python-versions = ">=3.8"
files = [
{file = "soupsieve-2.6-py3-none-any.whl", hash = "sha256:e72c4ff06e4fb6e4b5a9f0f55fe6e81514581fca1515028625d0f299c602ccc9"},
{file = "soupsieve-2.6.tar.gz", hash = "sha256:e2e68417777af359ec65daac1057404a3c8a5455bb8abc36f1a9866ab1a51abb"},
]
[[package]]
name = "tabulate"
version = "0.9.0"
description = "Pretty-print tabular data"
optional = false
python-versions = ">=3.7"
files = [
{file = "tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f"},
{file = "tabulate-0.9.0.tar.gz", hash = "sha256:0095b12bf5966de529c0feb1fa08671671b3368eec77d7ef7ab114be2c068b3c"},
]
[package.extras]
widechars = ["wcwidth"]
[[package]]
name = "typing-extensions"
version = "4.12.2"
description = "Backported and Experimental Type Hints for Python 3.8+"
optional = false
python-versions = ">=3.8"
files = [
{file = "typing_extensions-4.12.2-py3-none-any.whl", hash = "sha256:04e5ca0351e0f3f85c6853954072df659d0d13fac324d0072316b67d7794700d"},
{file = "typing_extensions-4.12.2.tar.gz", hash = "sha256:1a7ead55c7e559dd4dee8856e3a88b41225abfe1ce8df57b7c13915fe121ffb8"},
]
[[package]]
name = "urllib3"
version = "1.26.20"
description = "HTTP library with thread-safe connection pooling, file post, and more."
optional = false
python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,>=2.7"
files = [
{file = "urllib3-1.26.20-py2.py3-none-any.whl", hash = "sha256:0ed14ccfbf1c30a9072c7ca157e4319b70d65f623e91e7b32fadb2853431016e"},
{file = "urllib3-1.26.20.tar.gz", hash = "sha256:40c2dc0c681e47eb8f90e7e27bf6ff7df2e677421fd46756da1161c39ca70d32"},
]
[package.extras]
brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
[metadata]
lock-version = "2.0"
python-versions = "^3.8"
content-hash = "b17c2cdd4b140f2ab8081bca7d94630e821fa2e882ac768b1bd8cf3ec58726ce"

28
pyproject.toml Normal file
View File

@ -0,0 +1,28 @@
[tool.poetry]
name = "nhentai"
version = "0.6.0-beta"
description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT"
readme = "README.rst"
include = ["nhentai/viewer/**"]
[tool.poetry.dependencies]
python = "^3.8"
requests = "^2.32.3"
soupsieve = "^2.6"
beautifulsoup4 = "^4.12.3"
tabulate = "^0.9.0"
iso8601 = "^1.1.0"
urllib3 = "^1.26.20"
httpx = "^0.28.1"
chardet = "^5.2.0"
[build-system]
requires = ["poetry-core"]
build-backend = "poetry.core.masonry.api"
[tool.poetry.scripts]
nhentai = 'nhentai.command:main'

29
qodana.yaml Executable file
View File

@ -0,0 +1,29 @@
#-------------------------------------------------------------------------------#
# Qodana analysis is configured by qodana.yaml file #
# https://www.jetbrains.com/help/qodana/qodana-yaml.html #
#-------------------------------------------------------------------------------#
version: "1.0"
#Specify inspection profile for code analysis
profile:
name: qodana.starter
#Enable inspections
#include:
# - name: <SomeEnabledInspectionId>
#Disable inspections
#exclude:
# - name: <SomeDisabledInspectionId>
# paths:
# - <path/where/not/run/inspection>
#Execute shell command before Qodana execution (Applied in CI/CD pipeline)
#bootstrap: sh ./prepare-qodana.sh
#Install IDE plugins before Qodana execution (Applied in CI/CD pipeline)
#plugins:
# - id: <plugin.id> #(plugin id can be found at https://plugins.jetbrains.com)
#Specify Qodana linter for analysis (Applied in CI/CD pipeline)
linter: jetbrains/qodana-python:2024.3

View File

@ -1,4 +0,0 @@
requests>=2.5.0
BeautifulSoup4>=4.0.0
threadpool>=1.2.7
tabulate>=0.7.5

View File

@ -1,27 +0,0 @@
from setuptools import setup, find_packages
from nhentai import __version__, __author__, __email__
with open('requirements.txt') as f:
requirements = [l for l in f.read().splitlines() if l]
setup(
name='nhentai',
version=__version__,
packages=find_packages(),
author=__author__,
author_email=__email__,
keywords='nhentai, doujinshi',
description='nhentai.net doujinshis downloader',
url='https://github.com/RicterZ/nhentai',
include_package_data=True,
zip_safe=False,
install_requires=requirements,
entry_points={
'console_scripts': [
'nhentai = nhentai.command:main',
]
},
license='MIT',
)

0
tests/__init__.py Normal file
View File

56
tests/test_download.py Normal file
View File

@ -0,0 +1,56 @@
import unittest
import os
import zipfile
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.downloader import Downloader, CompressedDownloader
from nhentai.parser import doujinshi_parser
from nhentai.doujinshi import Doujinshi
from nhentai.utils import generate_html
did = 440546
def has_jepg_file(path):
with zipfile.ZipFile(path, 'r') as zf:
return '01.jpg' in zf.namelist()
def is_zip_file(path):
try:
with zipfile.ZipFile(path, 'r') as _:
return True
except (zipfile.BadZipFile, FileNotFoundError):
return False
class TestDownload(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
self.info = Doujinshi(**doujinshi_parser(did), name_format='%i')
def test_download(self):
info = self.info
info.downloader = Downloader(path='/tmp', threads=5)
info.download()
self.assertTrue(os.path.exists(f'/tmp/{did}/01.jpg'))
generate_html('/tmp', info)
self.assertTrue(os.path.exists(f'/tmp/{did}/index.html'))
def test_zipfile_download(self):
info = self.info
info.downloader = CompressedDownloader(path='/tmp', threads=5)
info.download()
zipfile_path = f'/tmp/{did}.zip'
self.assertTrue(os.path.exists(zipfile_path))
self.assertTrue(is_zip_file(zipfile_path))
self.assertTrue(has_jepg_file(zipfile_path))
if __name__ == '__main__':
unittest.main()

26
tests/test_login.py Normal file
View File

@ -0,0 +1,26 @@
import os
import unittest
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.utils import check_cookie
class TestLogin(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
def test_cookie(self):
try:
check_cookie()
self.assertTrue(True)
except Exception as e:
self.assertIsNone(e)
if __name__ == '__main__':
unittest.main()

27
tests/test_parser.py Normal file
View File

@ -0,0 +1,27 @@
import unittest
import os
import urllib3.exceptions
from nhentai import constant
from nhentai.cmdline import load_config
from nhentai.parser import search_parser, doujinshi_parser, favorites_parser
class TestParser(unittest.TestCase):
def setUp(self) -> None:
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
load_config()
constant.CONFIG['cookie'] = os.getenv('NHENTAI_COOKIE')
constant.CONFIG['useragent'] = os.getenv('NHENTAI_UA')
def test_search(self):
result = search_parser('umaru', 'recent', [1], False)
self.assertTrue(len(result) > 0)
def test_doujinshi_parser(self):
result = doujinshi_parser(123456)
self.assertTrue(result['pages'] == 84)
def test_favorites_parser(self):
result = favorites_parser(page=[1])
self.assertTrue(len(result) > 0)