Compare commits

..

12 Commits
0.5.3 ... 0.5.4

Author SHA1 Message Date
a893f54da1 0.5.4 2023-12-28 17:46:40 +08:00
4e307911ce Merge pull request #297 from RicterZ/dependabot/pip/urllib3-1.26.18
Bump urllib3 from 1.26.14 to 1.26.18
2023-12-28 17:46:07 +08:00
f9b7f828a5 fix #298 2023-12-28 17:45:37 +08:00
092df9e539 Bump urllib3 from 1.26.14 to 1.26.18
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.14 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.14...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-17 23:59:22 +00:00
8d74866abf Update README.rst 2023-08-21 21:47:07 +08:00
bc5b7f982d Merge pull request #294 from edgar1016/master
Added --move-to-folder
2023-08-19 19:13:38 +08:00
e54f3cbd06 Added --move-to-folder 2023-08-18 18:30:14 -07:00
a31c615259 Merge pull request #284 from RicterZ/dependabot/pip/requests-2.31.0
Bump requests from 2.28.2 to 2.31.0
2023-05-25 20:40:59 +08:00
cf0b76204d Bump requests from 2.28.2 to 2.31.0
Bumps [requests](https://github.com/psf/requests) from 2.28.2 to 2.31.0.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.28.2...v2.31.0)

---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-23 06:19:34 +00:00
17402623c4 Merge pull request #282 from edgar1016/master
--page-all works with favorites
2023-04-22 13:06:40 +08:00
a1a310f06b --page-all works with favorites 2023-04-21 22:00:00 -07:00
57673da762 update version 2023-03-28 21:02:47 +08:00
7 changed files with 46 additions and 27 deletions

View File

@ -11,6 +11,8 @@ nhentai
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_ nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
=================== ===================
Manual Installation Manual Installation
=================== ===================
@ -200,6 +202,8 @@ Other options:
-P, --pdf generate PDF file -P, --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or --rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
PDF file PDF file
--move-to-folder remove files in doujinshi dir then move new file to folder
when generated CBZ or PDF file
--meta generate a metadata file in doujinshi format --meta generate a metadata file in doujinshi format
--regenerate-cbz regenerate the cbz file if exists --regenerate-cbz regenerate the cbz file if exists

View File

@ -1,3 +1,3 @@
__version__ = '0.5.3' __version__ = '0.5.4'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -112,6 +112,8 @@ def cmd_parser():
help='generate PDF file') help='generate PDF file')
parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False, parser.add_option('--rm-origin-dir', dest='rm_origin_dir', action='store_true', default=False,
help='remove downloaded doujinshi dir when generated CBZ or PDF file') help='remove downloaded doujinshi dir when generated CBZ or PDF file')
parser.add_option('--move-to-folder', dest='move_to_folder', action='store_true', default=False,
help='remove files in doujinshi dir then move new file to folder when generated CBZ or PDF file')
parser.add_option('--meta', dest='generate_metadata', action='store_true', parser.add_option('--meta', dest='generate_metadata', action='store_true',
help='generate a metadata file in doujinshi format') help='generate a metadata file in doujinshi format')
parser.add_option('--regenerate-cbz', dest='regenerate_cbz', action='store_true', default=False, parser.add_option('--regenerate-cbz', dest='regenerate_cbz', action='store_true', default=False,

View File

@ -46,7 +46,7 @@ def main():
if not options.is_download: if not options.is_download:
logger.warning('You do not specify --download option') logger.warning('You do not specify --download option')
doujinshis = favorites_parser(page=page_list) doujinshis = favorites_parser() if options.page_all else favorites_parser(page=page_list)
elif options.keyword: elif options.keyword:
if constant.CONFIG['language']: if constant.CONFIG['language']:
@ -96,9 +96,9 @@ def main():
if not options.is_nohtml and not options.is_cbz and not options.is_pdf: if not options.is_nohtml and not options.is_cbz and not options.is_pdf:
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template']) generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
elif options.is_cbz: elif options.is_cbz:
generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir) generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir, True, options.move_to_folder)
elif options.is_pdf: elif options.is_pdf:
generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir) generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir, options.move_to_folder)
if options.main_viewer: if options.main_viewer:
generate_main_html(options.output_dir) generate_main_html(options.output_dir)

View File

@ -163,7 +163,7 @@ def generate_main_html(output_dir='./'):
logger.warning(f'Writing Main Viewer failed ({e})') logger.warning(f'Writing Main Viewer failed ({e})')
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True, move_to_folder=False):
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if write_comic_info: if write_comic_info:
@ -185,10 +185,21 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
if rm_origin_dir: if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True) shutil.rmtree(doujinshi_dir, ignore_errors=True)
if move_to_folder:
for filename in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(cbz_filename, doujinshi_dir)
logger.log(16, f'Comic Book CBZ file has been written to "{doujinshi_dir}"') logger.log(16, f'Comic Book CBZ file has been written to "{doujinshi_dir}"')
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False): def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, move_to_folder=False):
try: try:
import img2pdf import img2pdf
@ -216,6 +227,17 @@ def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
if rm_origin_dir: if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True) shutil.rmtree(doujinshi_dir, ignore_errors=True)
if move_to_folder:
for filename in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(pdf_filename, doujinshi_dir)
logger.log(16, f'PDF file has been written to "{doujinshi_dir}"') logger.log(16, f'PDF file has been written to "{doujinshi_dir}"')
except ImportError: except ImportError:
@ -232,7 +254,7 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
# maybe you can use `--format` to select a suitable filename # maybe you can use `--format` to select a suitable filename
if not _truncate_only: if not _truncate_only:
ban_chars = '\\\'/:,;*?"<>|\t' ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b'
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip() filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split()) filename = ' '.join(filename.split())

29
poetry.lock generated
View File

@ -1,10 +1,9 @@
# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. # This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]] [[package]]
name = "beautifulsoup4" name = "beautifulsoup4"
version = "4.11.2" version = "4.11.2"
description = "Screen-scraping library" description = "Screen-scraping library"
category = "main"
optional = false optional = false
python-versions = ">=3.6.0" python-versions = ">=3.6.0"
files = [ files = [
@ -23,7 +22,6 @@ lxml = ["lxml"]
name = "certifi" name = "certifi"
version = "2022.12.7" version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle." description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.6"
files = [ files = [
@ -35,7 +33,6 @@ files = [
name = "charset-normalizer" name = "charset-normalizer"
version = "3.0.1" version = "3.0.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false optional = false
python-versions = "*" python-versions = "*"
files = [ files = [
@ -133,7 +130,6 @@ files = [
name = "idna" name = "idna"
version = "3.4" version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)" description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false optional = false
python-versions = ">=3.5" python-versions = ">=3.5"
files = [ files = [
@ -145,7 +141,6 @@ files = [
name = "iso8601" name = "iso8601"
version = "1.1.0" version = "1.1.0"
description = "Simple module to parse ISO 8601 dates" description = "Simple module to parse ISO 8601 dates"
category = "main"
optional = false optional = false
python-versions = ">=3.6.2,<4.0" python-versions = ">=3.6.2,<4.0"
files = [ files = [
@ -155,21 +150,20 @@ files = [
[[package]] [[package]]
name = "requests" name = "requests"
version = "2.28.2" version = "2.31.0"
description = "Python HTTP for Humans." description = "Python HTTP for Humans."
category = "main"
optional = false optional = false
python-versions = ">=3.7, <4" python-versions = ">=3.7"
files = [ files = [
{file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
{file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
] ]
[package.dependencies] [package.dependencies]
certifi = ">=2017.4.17" certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4" charset-normalizer = ">=2,<4"
idna = ">=2.5,<4" idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<1.27" urllib3 = ">=1.21.1,<3"
[package.extras] [package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"] socks = ["PySocks (>=1.5.6,!=1.5.7)"]
@ -179,7 +173,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "soupsieve" name = "soupsieve"
version = "2.4" version = "2.4"
description = "A modern CSS selector implementation for Beautiful Soup." description = "A modern CSS selector implementation for Beautiful Soup."
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
@ -191,7 +184,6 @@ files = [
name = "tabulate" name = "tabulate"
version = "0.9.0" version = "0.9.0"
description = "Pretty-print tabular data" description = "Pretty-print tabular data"
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
@ -204,18 +196,17 @@ widechars = ["wcwidth"]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "1.26.14" version = "1.26.18"
description = "HTTP library with thread-safe connection pooling, file post, and more." description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [ files = [
{file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"},
{file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"},
] ]
[package.extras] [package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "nhentai" name = "nhentai"
version = "0.5.2" version = "0.5.3"
description = "nhentai doujinshi downloader" description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"] authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT" license = "MIT"