Compare commits

...

35 Commits
dev ... 0.5.6

Author SHA1 Message Date
35c55503fa 0.5.6 2024-09-20 23:39:38 +08:00
29aac84d53 fix #336 2024-09-20 23:34:26 +08:00
4ed4523782 fix #341 2024-09-20 23:27:37 +08:00
4223326c13 Merge pull request #340 from vglint/patch-3
Fix gallery search for folders with underscore
2024-09-14 10:17:57 +08:00
a248ff98c4 Fix gallery search for folders with underscore
Gallery title names replace '_' in the folder name with ' ' (generate_main_html()). To match against these title names when searching, we must also replace '_' with ' ' for each folder name we add to the list of titles to unhide.
2024-09-13 15:56:01 -07:00
021f17d229 Merge pull request #321 from PenitentMonke/xdg-base-dir
Adhere to XDG base dir spec on Linux
2024-07-08 22:03:38 +08:00
4162eabe93 Adhere to XDG base dir spec on Linux
Change how NHENTAI_HOME is set to follow the XDG Base Directory
Specification where possible, when running on Linux.

ISSUE: 299
2024-07-07 02:40:33 -03:00
dc54a43610 Merge pull request #311 from RicterZ/dev
Dev merge to master
2024-03-28 17:56:28 +08:00
473f948565 update 2024-02-20 10:28:54 +08:00
f701485840 remove print 2024-02-20 10:27:34 +08:00
d8e4f50609 support #291 2024-02-20 10:25:44 +08:00
a893f54da1 0.5.4 2023-12-28 17:46:40 +08:00
4e307911ce Merge pull request #297 from RicterZ/dependabot/pip/urllib3-1.26.18
Bump urllib3 from 1.26.14 to 1.26.18
2023-12-28 17:46:07 +08:00
f9b7f828a5 fix #298 2023-12-28 17:45:37 +08:00
092df9e539 Bump urllib3 from 1.26.14 to 1.26.18
Bumps [urllib3](https://github.com/urllib3/urllib3) from 1.26.14 to 1.26.18.
- [Release notes](https://github.com/urllib3/urllib3/releases)
- [Changelog](https://github.com/urllib3/urllib3/blob/main/CHANGES.rst)
- [Commits](https://github.com/urllib3/urllib3/compare/1.26.14...1.26.18)

---
updated-dependencies:
- dependency-name: urllib3
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-10-17 23:59:22 +00:00
8d74866abf Update README.rst 2023-08-21 21:47:07 +08:00
bc5b7f982d Merge pull request #294 from edgar1016/master
Added --move-to-folder
2023-08-19 19:13:38 +08:00
e54f3cbd06 Added --move-to-folder 2023-08-18 18:30:14 -07:00
a31c615259 Merge pull request #284 from RicterZ/dependabot/pip/requests-2.31.0
Bump requests from 2.28.2 to 2.31.0
2023-05-25 20:40:59 +08:00
cf0b76204d Bump requests from 2.28.2 to 2.31.0
Bumps [requests](https://github.com/psf/requests) from 2.28.2 to 2.31.0.
- [Release notes](https://github.com/psf/requests/releases)
- [Changelog](https://github.com/psf/requests/blob/main/HISTORY.md)
- [Commits](https://github.com/psf/requests/compare/v2.28.2...v2.31.0)

---
updated-dependencies:
- dependency-name: requests
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>
2023-05-23 06:19:34 +00:00
17402623c4 Merge pull request #282 from edgar1016/master
--page-all works with favorites
2023-04-22 13:06:40 +08:00
a1a310f06b --page-all works with favorites 2023-04-21 22:00:00 -07:00
57673da762 update version 2023-03-28 21:02:47 +08:00
dab61291cb Merge pull request #280 from RicterZ/dev
0.5.3
2023-03-28 20:58:08 +08:00
9ed4e04241 Merge pull request #279 from RicterZ/dev
update setup informations
2023-03-28 20:56:53 +08:00
f1cc63a591 Merge pull request #278 from RicterZ/dev
fix #277
2023-03-28 20:54:49 +08:00
f534b0b47f Merge pull request #275 from RicterZ/dev
remove tests
2023-03-04 18:40:45 +08:00
458c68d5e6 Merge pull request #274 from RicterZ/dev
Dev
2023-03-04 18:39:07 +08:00
fc507d246a Merge pull request #271 from edgar1016/master
Fixed info.txt
2023-02-20 23:58:26 +08:00
3ed84c5a67 Fixed info.txt 2023-02-20 01:54:32 -07:00
61f4a43081 remove test 2023-02-20 12:58:28 +08:00
4179947f16 add %ag %g formatter #269 2023-02-20 12:55:18 +08:00
9f55223e28 use Unknown as field value if it is null #269 2023-02-20 12:47:00 +08:00
b56e5b63a9 Merge pull request #268 from RicterZ/dev
enhancement of legacy search parser
2023-02-07 19:46:09 +08:00
179852a343 Merge pull request #267 from RicterZ/dev
add counter
2023-02-06 17:51:54 +08:00
11 changed files with 130 additions and 55 deletions

View File

@ -11,6 +11,8 @@ nhentai
nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_ nhentai is a CLI tool for downloading doujinshi from `nhentai.net <https://nhentai.net>`_
GUI version: `https://github.com/edgar1016/nhentai-GUI <https://github.com/edgar1016/nhentai-GUI>`_
=================== ===================
Manual Installation Manual Installation
=================== ===================
@ -141,7 +143,9 @@ Supported doujinshi folder formatter:
- %t: Doujinshi name - %t: Doujinshi name
- %s: Doujinshi subtitle (translated name) - %s: Doujinshi subtitle (translated name)
- %a: Doujinshi authors' name - %a: Doujinshi authors' name
- %g: Doujinshi groups name
- %p: Doujinshi pretty name - %p: Doujinshi pretty name
- %ag: Doujinshi authors name or groups name
Other options: Other options:
@ -198,6 +202,8 @@ Other options:
-P, --pdf generate PDF file -P, --pdf generate PDF file
--rm-origin-dir remove downloaded doujinshi dir when generated CBZ or --rm-origin-dir remove downloaded doujinshi dir when generated CBZ or
PDF file PDF file
--move-to-folder remove files in doujinshi dir then move new file to folder
when generated CBZ or PDF file
--meta generate a metadata file in doujinshi format --meta generate a metadata file in doujinshi format
--regenerate-cbz regenerate the cbz file if exists --regenerate-cbz regenerate the cbz file if exists

View File

@ -1,3 +1,3 @@
__version__ = '0.5.3' __version__ = '0.5.6'
__author__ = 'RicterZ' __author__ = 'RicterZ'
__email__ = 'ricterzheng@gmail.com' __email__ = 'ricterzheng@gmail.com'

View File

@ -46,7 +46,7 @@ def main():
if not options.is_download: if not options.is_download:
logger.warning('You do not specify --download option') logger.warning('You do not specify --download option')
doujinshis = favorites_parser(page=page_list) doujinshis = favorites_parser() if options.page_all else favorites_parser(page=page_list)
elif options.keyword: elif options.keyword:
if constant.CONFIG['language']: if constant.CONFIG['language']:
@ -57,6 +57,10 @@ def main():
doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list, doujinshis = _search_parser(options.keyword, sorting=options.sorting, page=page_list,
is_page_all=options.page_all) is_page_all=options.page_all)
elif options.artist:
doujinshis = legacy_search_parser(options.artist, sorting=options.sorting, page=page_list,
is_page_all=options.page_all, type_='ARTIST')
elif not doujinshi_ids: elif not doujinshi_ids:
doujinshi_ids = options.id doujinshi_ids = options.id
@ -96,9 +100,9 @@ def main():
if not options.is_nohtml and not options.is_cbz and not options.is_pdf: if not options.is_nohtml and not options.is_cbz and not options.is_pdf:
generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template']) generate_html(options.output_dir, doujinshi, template=constant.CONFIG['template'])
elif options.is_cbz: elif options.is_cbz:
generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir) generate_cbz(options.output_dir, doujinshi, options.rm_origin_dir, True, options.move_to_folder)
elif options.is_pdf: elif options.is_pdf:
generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir) generate_pdf(options.output_dir, doujinshi, options.rm_origin_dir, options.move_to_folder)
if options.main_viewer: if options.main_viewer:
generate_main_html(options.output_dir) generate_main_html(options.output_dir)

View File

@ -3,6 +3,23 @@ import os
import tempfile import tempfile
from urllib.parse import urlparse from urllib.parse import urlparse
from platform import system
def get_nhentai_home() -> str:
home = os.getenv('HOME', tempfile.gettempdir())
if system() == 'Linux':
xdgdat = os.getenv('XDG_DATA_HOME')
if xdgdat and os.path.exists(os.path.join(xdgdat, 'nhentai')):
return os.path.join(xdgdat, 'nhentai')
if home and os.path.exists(os.path.join(home, '.nhentai')):
return os.path.join(home, '.nhentai')
if xdgdat:
return os.path.join(xdgdat, 'nhentai')
# Use old default path in other systems
return os.path.join(home, '.nhentai')
DEBUG = os.getenv('DEBUG', False) DEBUG = os.getenv('DEBUG', False)
@ -11,15 +28,22 @@ BASE_URL = os.getenv('NHENTAI', 'https://nhentai.net')
DETAIL_URL = f'{BASE_URL}/g' DETAIL_URL = f'{BASE_URL}/g'
LEGACY_SEARCH_URL = f'{BASE_URL}/search/' LEGACY_SEARCH_URL = f'{BASE_URL}/search/'
SEARCH_URL = f'{BASE_URL}/api/galleries/search' SEARCH_URL = f'{BASE_URL}/api/galleries/search'
ARTIST_URL = f'{BASE_URL}/artist/'
TAG_API_URL = f'{BASE_URL}/api/galleries/tagged' TAG_API_URL = f'{BASE_URL}/api/galleries/tagged'
LOGIN_URL = f'{BASE_URL}/login/' LOGIN_URL = f'{BASE_URL}/login/'
CHALLENGE_URL = f'{BASE_URL}/challenge' CHALLENGE_URL = f'{BASE_URL}/challenge'
FAV_URL = f'{BASE_URL}/favorites/' FAV_URL = f'{BASE_URL}/favorites/'
IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
NHENTAI_HOME = os.path.join(os.getenv('HOME', tempfile.gettempdir()), '.nhentai') IMAGE_URL = f'{urlparse(BASE_URL).scheme}://i.{urlparse(BASE_URL).hostname}/galleries'
IMAGE_URL_MIRRORS = [
f'{urlparse(BASE_URL).scheme}://i3.{urlparse(BASE_URL).hostname}'
f'{urlparse(BASE_URL).scheme}://i5.{urlparse(BASE_URL).hostname}'
f'{urlparse(BASE_URL).scheme}://i7.{urlparse(BASE_URL).hostname}'
]
NHENTAI_HOME = get_nhentai_home()
NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3') NHENTAI_HISTORY = os.path.join(NHENTAI_HOME, 'history.sqlite3')
NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json') NHENTAI_CONFIG_FILE = os.path.join(NHENTAI_HOME, 'config.json')
@ -30,7 +54,8 @@ CONFIG = {
'cookie': '', 'cookie': '',
'language': '', 'language': '',
'template': '', 'template': '',
'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)' 'useragent': 'nhentai command line client (https://github.com/RicterZ/nhentai)',
'max_filename': 85
} }
LANGUAGE_ISO = { LANGUAGE_ISO = {

View File

@ -20,9 +20,10 @@ class DoujinshiInfo(dict):
def __getattr__(self, item): def __getattr__(self, item):
try: try:
return dict.__getitem__(self, item) ret = dict.__getitem__(self, item)
return ret if ret else 'Unknown'
except KeyError: except KeyError:
return '' return 'Unknown'
class Doujinshi(object): class Doujinshi(object):
@ -38,8 +39,12 @@ class Doujinshi(object):
self.url = f'{DETAIL_URL}/{self.id}' self.url = f'{DETAIL_URL}/{self.id}'
self.info = DoujinshiInfo(**kwargs) self.info = DoujinshiInfo(**kwargs)
ag_value = self.info.groups if self.info.artists == 'Unknown' else self.info.artists
name_format = name_format.replace('%ag', format_filename(ag_value))
name_format = name_format.replace('%i', format_filename(str(self.id))) name_format = name_format.replace('%i', format_filename(str(self.id)))
name_format = name_format.replace('%a', format_filename(self.info.artists)) name_format = name_format.replace('%a', format_filename(self.info.artists))
name_format = name_format.replace('%g', format_filename(self.info.groups))
name_format = name_format.replace('%t', format_filename(self.name)) name_format = name_format.replace('%t', format_filename(self.name))
name_format = name_format.replace('%p', format_filename(self.pretty_name)) name_format = name_format.replace('%p', format_filename(self.pretty_name))
@ -47,15 +52,16 @@ class Doujinshi(object):
self.filename = format_filename(name_format, 255, True) self.filename = format_filename(name_format, 255, True)
self.table = [ self.table = [
["Parodies", self.info.parodies], ['Parodies', self.info.parodies],
["Doujinshi", self.name], ['Doujinshi', self.name],
["Subtitle", self.info.subtitle], ['Subtitle', self.info.subtitle],
["Characters", self.info.characters], ['Characters', self.info.characters],
["Authors", self.info.artists], ['Authors', self.info.artists],
["Languages", self.info.languages], ['Groups', self.info.groups],
["Tags", self.info.tags], ['Languages', self.info.languages],
["URL", self.url], ['Tags', self.info.tags],
["Pages", self.pages], ['URL', self.url],
['Pages', self.pages],
] ]
def __repr__(self): def __repr__(self):

View File

@ -67,10 +67,14 @@ class Downloader(Singleton):
try: try:
response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy) response = request('get', url, stream=True, timeout=self.timeout, proxies=proxy)
if response.status_code != 200: if response.status_code != 200:
raise NHentaiImageNotExistException path = urlparse(url).path
for mirror in constant.IMAGE_URL_MIRRORS:
except NHentaiImageNotExistException as e: print(f'{mirror}{path}')
raise e mirror_url = f'{mirror}{path}'
response = request('get', mirror_url, stream=True,
timeout=self.timeout, proxies=proxy)
if response.status_code == 200:
break
except Exception as e: except Exception as e:
i += 1 i += 1
@ -123,6 +127,7 @@ class Downloader(Singleton):
logger.warning(f'CBZ file "{folder}.cbz" exists, ignored download request') logger.warning(f'CBZ file "{folder}.cbz" exists, ignored download request')
return return
logger.info(f'Doujinshi will be saved at "{folder}"')
if not os.path.exists(folder): if not os.path.exists(folder):
try: try:
os.makedirs(folder) os.makedirs(folder)

View File

@ -135,6 +135,7 @@ def doujinshi_parser(id_, counter=0):
logger.warning(f'Error: {e}, ignored') logger.warning(f'Error: {e}, ignored')
return None return None
# print(response)
html = BeautifulSoup(response, 'html.parser') html = BeautifulSoup(response, 'html.parser')
doujinshi_info = html.find('div', attrs={'id': 'info'}) doujinshi_info = html.find('div', attrs={'id': 'info'})
@ -240,13 +241,21 @@ def print_doujinshi(doujinshi_list):
print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst')) print(tabulate(tabular_data=doujinshi_list, headers=headers, tablefmt='rst'))
def legacy_search_parser(keyword, sorting, page, is_page_all=False): def legacy_search_parser(keyword, sorting, page, is_page_all=False, type_='SEARCH'):
logger.info(f'Searching doujinshis of keyword {keyword}') logger.info(f'Searching doujinshis of keyword {keyword}')
result = [] result = []
if type_ not in ('SEARCH', 'ARTIST', ):
raise ValueError('Invalid type')
if is_page_all: if is_page_all:
response = request('get', url=constant.LEGACY_SEARCH_URL, if type_ == 'SEARCH':
params={'q': keyword, 'page': 1, 'sort': sorting}).content response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': 1, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': 1}).content
html = BeautifulSoup(response, 'lxml') html = BeautifulSoup(response, 'lxml')
pagination = html.find(attrs={'class': 'pagination'}) pagination = html.find(attrs={'class': 'pagination'})
last_page = pagination.find(attrs={'class': 'last'}) last_page = pagination.find(attrs={'class': 'last'})
@ -258,8 +267,13 @@ def legacy_search_parser(keyword, sorting, page, is_page_all=False):
for p in pages: for p in pages:
logger.info(f'Fetching page {p} ...') logger.info(f'Fetching page {p} ...')
response = request('get', url=constant.LEGACY_SEARCH_URL, if type_ == 'SEARCH':
params={'q': keyword, 'page': p, 'sort': sorting}).content response = request('get', url=constant.LEGACY_SEARCH_URL,
params={'q': keyword, 'page': p, 'sort': sorting}).content
else:
url = constant.ARTIST_URL + keyword + '/' + ('' if sorting == 'recent' else sorting)
response = request('get', url=url, params={'page': p}).content
if response is None: if response is None:
logger.warning(f'No result in response in page {p}') logger.warning(f'No result in response in page {p}')
continue continue
@ -313,7 +327,9 @@ def search_parser(keyword, sorting, page, is_page_all=False):
for row in response['result']: for row in response['result']:
title = row['title']['english'] title = row['title']['english']
title = title[:85] + '..' if len(title) > 85 else title title = title[:constant.CONFIG['max_filename']] + '..' if \
len(title) > constant.CONFIG['max_filename'] else title
result.append({'id': row['id'], 'title': title}) result.append({'id': row['id'], 'title': title})
not_exists_persist = False not_exists_persist = False

View File

@ -163,7 +163,7 @@ def generate_main_html(output_dir='./'):
logger.warning(f'Writing Main Viewer failed ({e})') logger.warning(f'Writing Main Viewer failed ({e})')
def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True): def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_comic_info=True, move_to_folder=False):
if doujinshi_obj is not None: if doujinshi_obj is not None:
doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename) doujinshi_dir = os.path.join(output_dir, doujinshi_obj.filename)
if os.path.exists(doujinshi_dir+".cbz"): if os.path.exists(doujinshi_dir+".cbz"):
@ -188,10 +188,21 @@ def generate_cbz(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, write_
if rm_origin_dir: if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True) shutil.rmtree(doujinshi_dir, ignore_errors=True)
if move_to_folder:
for filename in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(cbz_filename, doujinshi_dir)
logger.log(16, f'Comic Book CBZ file has been written to "{doujinshi_dir}"') logger.log(16, f'Comic Book CBZ file has been written to "{doujinshi_dir}"')
def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False): def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False, move_to_folder=False):
try: try:
import img2pdf import img2pdf
@ -219,6 +230,17 @@ def generate_pdf(output_dir='.', doujinshi_obj=None, rm_origin_dir=False):
if rm_origin_dir: if rm_origin_dir:
shutil.rmtree(doujinshi_dir, ignore_errors=True) shutil.rmtree(doujinshi_dir, ignore_errors=True)
if move_to_folder:
for filename in os.listdir(doujinshi_dir):
file_path = os.path.join(doujinshi_dir, filename)
if os.path.isfile(file_path):
try:
os.remove(file_path)
except Exception as e:
print(f"Error deleting file: {e}")
shutil.move(pdf_filename, doujinshi_dir)
logger.log(16, f'PDF file has been written to "{doujinshi_dir}"') logger.log(16, f'PDF file has been written to "{doujinshi_dir}"')
except ImportError: except ImportError:
@ -235,7 +257,7 @@ def format_filename(s, length=MAX_FIELD_LENGTH, _truncate_only=False):
# maybe you can use `--format` to select a suitable filename # maybe you can use `--format` to select a suitable filename
if not _truncate_only: if not _truncate_only:
ban_chars = '\\\'/:,;*?"<>|\t' ban_chars = '\\\'/:,;*?"<>|\t\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0a\x0b'
filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip() filename = s.translate(str.maketrans(ban_chars, ' ' * len(ban_chars))).strip()
filename = ' '.join(filename.split()) filename = ' '.join(filename.split())
@ -290,11 +312,11 @@ def generate_metadata_file(output_dir, table, doujinshi_obj=None):
f = open(os.path.join(doujinshi_dir, 'info.txt'), 'w', encoding='utf-8') f = open(os.path.join(doujinshi_dir, 'info.txt'), 'w', encoding='utf-8')
fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'CIRCLE', 'SCANLATOR', fields = ['TITLE', 'ORIGINAL TITLE', 'AUTHOR', 'ARTIST', 'GROUPS', 'CIRCLE', 'SCANLATOR',
'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES', 'TRANSLATOR', 'PUBLISHER', 'DESCRIPTION', 'STATUS', 'CHAPTERS', 'PAGES',
'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS', 'TAGS', 'TYPE', 'LANGUAGE', 'RELEASED', 'READING DIRECTION', 'CHARACTERS',
'SERIES', 'PARODY', 'URL'] 'SERIES', 'PARODY', 'URL']
special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'CHARACTERS', 'AUTHOR', special_fields = ['PARODY', 'TITLE', 'ORIGINAL TITLE', 'CHARACTERS', 'AUTHOR', 'GROUPS',
'LANGUAGE', 'TAGS', 'URL', 'PAGES'] 'LANGUAGE', 'TAGS', 'URL', 'PAGES']
for i in range(len(fields)): for i in range(len(fields)):

View File

@ -139,7 +139,7 @@ function filter_searcher(){
break break
} }
} }
if (verifier){doujinshi_id.push(data[i].Folder);} if (verifier){doujinshi_id.push(data[i].Folder.replace("_", " "));}
} }
var gallery = document.getElementsByClassName("gallery-favorite"); var gallery = document.getElementsByClassName("gallery-favorite");
for (var i = 0; i < gallery.length; i++){ for (var i = 0; i < gallery.length; i++){
@ -174,4 +174,4 @@ function tag_maker(data){
document.getElementById("tags").appendChild(node); document.getElementById("tags").appendChild(node);
} }
} }
} }

29
poetry.lock generated
View File

@ -1,10 +1,9 @@
# This file is automatically @generated by Poetry 1.4.0 and should not be changed by hand. # This file is automatically @generated by Poetry 1.6.1 and should not be changed by hand.
[[package]] [[package]]
name = "beautifulsoup4" name = "beautifulsoup4"
version = "4.11.2" version = "4.11.2"
description = "Screen-scraping library" description = "Screen-scraping library"
category = "main"
optional = false optional = false
python-versions = ">=3.6.0" python-versions = ">=3.6.0"
files = [ files = [
@ -23,7 +22,6 @@ lxml = ["lxml"]
name = "certifi" name = "certifi"
version = "2022.12.7" version = "2022.12.7"
description = "Python package for providing Mozilla's CA Bundle." description = "Python package for providing Mozilla's CA Bundle."
category = "main"
optional = false optional = false
python-versions = ">=3.6" python-versions = ">=3.6"
files = [ files = [
@ -35,7 +33,6 @@ files = [
name = "charset-normalizer" name = "charset-normalizer"
version = "3.0.1" version = "3.0.1"
description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
category = "main"
optional = false optional = false
python-versions = "*" python-versions = "*"
files = [ files = [
@ -133,7 +130,6 @@ files = [
name = "idna" name = "idna"
version = "3.4" version = "3.4"
description = "Internationalized Domain Names in Applications (IDNA)" description = "Internationalized Domain Names in Applications (IDNA)"
category = "main"
optional = false optional = false
python-versions = ">=3.5" python-versions = ">=3.5"
files = [ files = [
@ -145,7 +141,6 @@ files = [
name = "iso8601" name = "iso8601"
version = "1.1.0" version = "1.1.0"
description = "Simple module to parse ISO 8601 dates" description = "Simple module to parse ISO 8601 dates"
category = "main"
optional = false optional = false
python-versions = ">=3.6.2,<4.0" python-versions = ">=3.6.2,<4.0"
files = [ files = [
@ -155,21 +150,20 @@ files = [
[[package]] [[package]]
name = "requests" name = "requests"
version = "2.28.2" version = "2.31.0"
description = "Python HTTP for Humans." description = "Python HTTP for Humans."
category = "main"
optional = false optional = false
python-versions = ">=3.7, <4" python-versions = ">=3.7"
files = [ files = [
{file = "requests-2.28.2-py3-none-any.whl", hash = "sha256:64299f4909223da747622c030b781c0d7811e359c37124b4bd368fb8c6518baa"}, {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
{file = "requests-2.28.2.tar.gz", hash = "sha256:98b1b2782e3c6c4904938b84c0eb932721069dfdb9134313beff7c83c2df24bf"}, {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
] ]
[package.dependencies] [package.dependencies]
certifi = ">=2017.4.17" certifi = ">=2017.4.17"
charset-normalizer = ">=2,<4" charset-normalizer = ">=2,<4"
idna = ">=2.5,<4" idna = ">=2.5,<4"
urllib3 = ">=1.21.1,<1.27" urllib3 = ">=1.21.1,<3"
[package.extras] [package.extras]
socks = ["PySocks (>=1.5.6,!=1.5.7)"] socks = ["PySocks (>=1.5.6,!=1.5.7)"]
@ -179,7 +173,6 @@ use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
name = "soupsieve" name = "soupsieve"
version = "2.4" version = "2.4"
description = "A modern CSS selector implementation for Beautiful Soup." description = "A modern CSS selector implementation for Beautiful Soup."
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
@ -191,7 +184,6 @@ files = [
name = "tabulate" name = "tabulate"
version = "0.9.0" version = "0.9.0"
description = "Pretty-print tabular data" description = "Pretty-print tabular data"
category = "main"
optional = false optional = false
python-versions = ">=3.7" python-versions = ">=3.7"
files = [ files = [
@ -204,18 +196,17 @@ widechars = ["wcwidth"]
[[package]] [[package]]
name = "urllib3" name = "urllib3"
version = "1.26.14" version = "1.26.18"
description = "HTTP library with thread-safe connection pooling, file post, and more." description = "HTTP library with thread-safe connection pooling, file post, and more."
category = "main"
optional = false optional = false
python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*" python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
files = [ files = [
{file = "urllib3-1.26.14-py2.py3-none-any.whl", hash = "sha256:75edcdc2f7d85b137124a6c3c9fc3933cdeaa12ecb9a6a959f22797a0feca7e1"}, {file = "urllib3-1.26.18-py2.py3-none-any.whl", hash = "sha256:34b97092d7e0a3a8cf7cd10e386f401b3737364026c45e622aa02903dffe0f07"},
{file = "urllib3-1.26.14.tar.gz", hash = "sha256:076907bf8fd355cde77728471316625a4d2f7e713c125f51953bb5b3eecf4f72"}, {file = "urllib3-1.26.18.tar.gz", hash = "sha256:f8ecc1bba5667413457c529ab955bf8c67b45db799d159066261719e328580a0"},
] ]
[package.extras] [package.extras]
brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"] brotli = ["brotli (==1.0.9)", "brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"] secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"] socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]

View File

@ -1,6 +1,6 @@
[tool.poetry] [tool.poetry]
name = "nhentai" name = "nhentai"
version = "0.5.2" version = "0.5.3"
description = "nhentai doujinshi downloader" description = "nhentai doujinshi downloader"
authors = ["Ricter Z <ricterzheng@gmail.com>"] authors = ["Ricter Z <ricterzheng@gmail.com>"]
license = "MIT" license = "MIT"