Skip to content

Commit

Permalink
added dl_manga and page formatting
Browse files Browse the repository at this point in the history
  • Loading branch information
Proxymiity committed Jun 12, 2021
1 parent 9be60d0 commit 57efd0d
Showing 1 changed file with 41 additions and 7 deletions.
48 changes: 41 additions & 7 deletions MangaDexPy/downloader.py
Original file line number Diff line number Diff line change
@@ -1,21 +1,39 @@
from .chapter import Chapter
from MangaDexPy import APIError, Chapter, Manga
import os
import time
from requests import exceptions as rex
from pathlib import Path

# This script is provided 'as-is', as an example for library usage.
# Overriding it in your code is strongly recommended to gain control on it and fine-tune its behavior.

def dl_page(net, page, path):

def page_name_to_integer(page, pages_total):
num = ''.join([x for x in page.split("-")[0] if x.isdigit()])
leading_zeros = len(str(pages_total)) - len(num)
final_name = ""
for _ in range(leading_zeros):
final_name += "0"
final_name += num + Path(page).suffix
return final_name


def dl_page(net, page, pages_total, path):
"""Helper for dl_chapter to download pages with."""
try:
with net.client.session.get(page) as p:
with open(str(Path(path + "/" + page.rsplit("/", 1)[1])), "wb") as f:
name = page_name_to_integer(page.rsplit("/", 1)[1], pages_total)
with open(str(Path(path + "/" + name)), "wb") as f:
f.write(p.content)
success = True if p.status_code <= 400 else False
try:
cached = True if p.headers["x-cache"] == "HIT" else False
except KeyError: # No cache header returned: we're probably using upstream
cached = True
net.report(page, success, cached, len(p.content), int(p.elapsed.microseconds/1000))
except KeyError: # No cache header returned: the client is at fault
cached = False
try:
net.report(page, success, cached, len(p.content), int(p.elapsed.microseconds/1000))
except APIError:
print("Network report failed. If you're downloading from upstream, this is normal... I guess?")
print(f"Statistics for {page}\nTime: {int(p.elapsed.microseconds/1000)}, length: {len(p.content)}"
f"\nSuccess: {success}, was cached on server: {cached}\nHeaders: {p.headers}")
return True
Expand All @@ -34,9 +52,10 @@ def dl_chapter(chapter: Chapter, path, light: bool = False, time_controller: int
pages = net.pages_redux
else:
pages = net.pages
tot = len(pages)
for x in pages:
while True:
resp = dl_page(net, x, path)
resp = dl_page(net, x, tot, path)
if resp:
break
else:
Expand All @@ -45,3 +64,18 @@ def dl_chapter(chapter: Chapter, path, light: bool = False, time_controller: int
if time_controller:
time.sleep(time_controller)
print(f"Successfully downloaded and reported status for {len(net.pages)} pages.")


def dl_manga(manga: Manga, base_path, language: str = "en", light: bool = False, time_controller: int = 1):
"""Downloads an entire manga."""
bp = Path(base_path)
chs = manga.get_chapters()
chs = [x for x in chs if x.language == language]
for ch in chs:
cp = Path(str(bp) + f"/Vol.{ch.volume} Ch.{ch.chapter}")
if cp.exists():
print(f"Folder for {str(cp)} already exists, skipping chapter.")
else:
os.mkdir(str(cp))
dl_chapter(ch, str(cp), light, time_controller)
print(f"Successfully processed {len(chs)} chapters.")

0 comments on commit 57efd0d

Please sign in to comment.