Skip to content

Commit

Permalink
Merge pull request #2 from jere344/patch-2
Browse files Browse the repository at this point in the history
Update novelsemperor.py
  • Loading branch information
zGadli authored Aug 30, 2023
2 parents 42777c2 + 0f95edc commit 0493061
Showing 1 changed file with 45 additions and 39 deletions.
84 changes: 45 additions & 39 deletions sources/en/n/novelsemperor.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,13 @@
# -*- coding: utf-8 -*-
import logging
import re
from lncrawl.templates.mangastream import MangaStreamTemplate
from lncrawl.core.crawler import Crawler

logger = logging.getLogger(__name__)
search_url = 'https://novelsemperor.com/series?title=%s&type=&status='
search_url = "https://novelsemperor.com/series?title=%s&type=&status="


class NovelsEmperorCrawler(MangaStreamTemplate):
class NovelsEmperorCrawler(Crawler):
base_url = ["https://novelsemperor.com/"]

def initialize(self) -> None:
Expand All @@ -26,7 +26,7 @@ def search_novel(self, query):
results = []
for tab in soup.select("div.xlg\\:grid-cols-8.grid.grid-cols-3 div#card-real"):
a = tab.select_one("a")
title = a["href"][33:].replace('-', ' ')
title = a["href"][33:].replace("-", " ")
img = tab.select_one("img")["data-src"]
results.append(
{
Expand All @@ -41,55 +41,61 @@ def search_novel(self, query):
def read_novel_info(self):
logger.debug("Visiting %s", self.novel_url)
pagination_num = self.get_soup(self.novel_url)
pagination_num = pagination_num.select_one("ul > li:nth-last-child(2)").text
pagination_num = int(pagination_num)

soup = []
pagination_num = pagination_num.select_one("ul > li:nth-last-child(2)")

if pagination_num:
pagination_num = pagination_num["onclick"]
# We match page={number} from the link and then extract the number
regex = re.compile(r"page=\d+")
pagination_num = regex.findall(pagination_num)[0].split("=")[1]
pagination_num = int(pagination_num)
else:
pagination_num = 1

for i in range(1, pagination_num + 1):
soup.append(self.get_soup(f'{self.novel_url}?page={i}'))
futures = [
self.executor.submit(self.get_soup, f"{self.novel_url}?page={i}")
for i in range(1, pagination_num + 1)
]
page_soups = [f.result() for f in futures]

possible_title = soup[0].select_one("h2.text-2xl")
possible_title = page_soups[0].select_one("h2.text-2xl")
assert possible_title, "No novel title"
text_sm = possible_title.select_one("span")
if text_sm:
text_sm.extract()
self.novel_title = possible_title.text.strip()
logger.info("Novel title: %s", self.novel_title)

author = soup[0].select('p:nth-child(4) > span.capitalize')
author = page_soups[0].select("p:nth-child(4) > span.capitalize")
if len(author) == 2:
self.novel_author = author[0].text + " (" + author[1].text + ")"
else:
elif author[0].text != "-":
self.novel_author = author[0].text
logger.info("Novel author: %s", self.novel_author)

self.novel_cover = self.absolute_url(
soup[0].select_one("div.relative > img")["src"]
page_soups[0].select_one("div.relative > img")["src"]
)
logger.info("Novel cover: %s", self.novel_cover)

for i in range(len(soup) - 1, -1, -1):
for div in soup[i].select("div#chapters-list"):
vol_title = div.select_one("a div div span").text
vol_id = [int(x) for x in re.findall(r"\d+", vol_title)]
vol_id = vol_id[0] if len(vol_id) else len(self.volumes) + 1
self.volumes.append(
{
"id": vol_id,
"title": vol_title,
}
)

for a in div.select("a"):
ch_title = a.select_one("div div span").text
ch_id = [int(x) for x in re.findall(r"\d+", ch_title)]
ch_id = ch_id[0] if len(ch_id) else len(self.chapters) + 1
self.chapters.append(
{
"id": ch_id,
"volume": vol_id,
"title": ch_title,
"url": self.absolute_url(a["href"]),
}
)

# [sp.select("div#chapters-list a") for sp in soup]
# flattened ->
# [a for sp in soup for a in sp.select("div#chapters-list a")])
for element in reversed(
[a for soup in page_soups for a in soup.select("div#chapters-list a")]
):
ch_id = len(self.chapters) + 1
vol_id = len(self.chapters) // 100 + 1
if len(self.chapters) % 100 == 0:
self.volumes.append({"id": vol_id})
self.chapters.append(
{
"id": ch_id,
"volume": vol_id,
"title": element.select_one("div div span").text,
"url": self.absolute_url(element["href"]),
}
)
logger.debug(
"%d chapters and %d volumes found", len(self.chapters), len(self.volumes)
)
Expand Down

0 comments on commit 0493061

Please sign in to comment.