Skip to content

Commit c10b5df

Browse files
committed
Handle AO3 story with unspecified summary
1 parent 95e92de commit c10b5df

File tree

2 files changed

+18
-16
lines changed

2 files changed

+18
-16
lines changed

ffembed/ffembed.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
from bs4 import BeautifulSoup
77
from redbot.core import checks, commands, Config
88

9-
__version__ = "1.0.14"
9+
__version__ = "1.0.15"
1010

1111
BaseCog = getattr(commands, "Cog", object)
1212

@@ -107,8 +107,8 @@ async def toggle_channel(self, ctx, channel: discord.TextChannel):
107107

108108
def parse_url(self, message):
109109
url_regex = (
110-
r"http[s]?://(?:www.)?(?:(?:m.)?fanfiction.net/s/\d+"
111-
r"/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
110+
r"https?://(?:www.)?(?:(?:m.)?fanfiction.net/"
111+
r"s/\d+/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
112112
r"archiveofourown.org/works/\d+(?:/chapters/\d+)?|"
113113
r"siye.co.uk/(?:siye/)?viewstory.php\?sid=\d+(?:&chapter=\d+)?)"
114114
)
@@ -134,7 +134,7 @@ async def fetch_url(self, url):
134134
return page
135135

136136
def parse_FanFiction(self, page, url):
137-
base = "https://fanfiction.net/"
137+
base = "https://fanfiction.net"
138138
div = page.find(id="profile_top")
139139
thumbnail = div.find("img", attrs={"class": "cimage"})
140140
author = div.find("a", attrs={"class": "xcontrast_txt"})
@@ -153,10 +153,11 @@ def parse_FanFiction(self, page, url):
153153
}
154154

155155
def parse_AO3(self, page, url):
156-
base = "https://archiveofourown.org/"
156+
base = "https://archiveofourown.org"
157157
author = page.find("a", attrs={"rel": "author"})
158158
title = page.find("h2", attrs={"class": "title heading"})
159-
desc = page.find("div", attrs={"class": "summary module"}).p
159+
desc = page.find("div", attrs={"class": "summary module"})
160+
desc = "Summary not specified." if desc is None else desc.p.get_text(strip=True)
160161
date = " ".join(x.get_text() for x in page.find_all(class_="published"))
161162
words = " ".join(x.get_text() for x in page.find_all(class_="words"))
162163
chapters = " ".join(x.get_text() for x in page.find_all(class_="chapters"))
@@ -167,12 +168,12 @@ def parse_AO3(self, page, url):
167168
"author": author.get_text(strip=True),
168169
"author_link": base + author["href"],
169170
"title": title.get_text(strip=True),
170-
"desc": desc.get_text(strip=True),
171+
"desc": desc,
171172
"footer": f"{date}{words}{chapters}",
172173
}
173174

174175
def parse_SIYE(self, page, url):
175-
base = "http://siye.co.uk/"
176+
base = "http://siye.co.uk"
176177
table_cell = page.find_all("td", attrs={"align": "left"})[1].get_text()
177178
rows = table_cell.strip().split("\n")
178179
rows = [row for row in rows if ":" in row] # Handle completed story

ffpicker/ffpicker.py

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from redbot.core.utils.menus import menu, DEFAULT_CONTROLS
1010

1111

12-
__version__ = "1.1.12"
12+
__version__ = "1.1.13"
1313

1414
BaseCog = getattr(commands, "Cog", object)
1515

@@ -106,8 +106,8 @@ async def reset(self, ctx):
106106

107107
def parse_url(self, message):
108108
url_regex = (
109-
r"http[s]?://(?:www.)?(?:(?:m.)?fanfiction.net/s/\d+"
110-
r"/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
109+
r"https?://(?:www.)?(?:(?:m.)?fanfiction.net/"
110+
r"s/\d+/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
111111
r"archiveofourown.org/works/\d+(?:/chapters/\d+)?|"
112112
r"siye.co.uk/(?:siye/)?viewstory.php\?sid=\d+(?:&chapter=\d+)?)"
113113
)
@@ -133,7 +133,7 @@ async def fetch_url(self, url):
133133
return page
134134

135135
def parse_FanFiction(self, page, url):
136-
base = "https://fanfiction.net/"
136+
base = "https://fanfiction.net"
137137
div = page.find(id="profile_top")
138138
thumbnail = div.find("img", attrs={"class": "cimage"})
139139
author = div.find("a", attrs={"class": "xcontrast_txt"})
@@ -152,10 +152,11 @@ def parse_FanFiction(self, page, url):
152152
}
153153

154154
def parse_AO3(self, page, url):
155-
base = "https://archiveofourown.org/"
155+
base = "https://archiveofourown.org"
156156
author = page.find("a", attrs={"rel": "author"})
157157
title = page.find("h2", attrs={"class": "title heading"})
158-
desc = page.find("div", attrs={"class": "summary module"}).p
158+
desc = page.find("div", attrs={"class": "summary module"})
159+
desc = "Summary not specified." if desc is None else desc.p.get_text(strip=True)
159160
date = " ".join(x.get_text() for x in page.find_all(class_="published"))
160161
words = " ".join(x.get_text() for x in page.find_all(class_="words"))
161162
chapters = " ".join(x.get_text() for x in page.find_all(class_="chapters"))
@@ -166,12 +167,12 @@ def parse_AO3(self, page, url):
166167
"author": author.get_text(strip=True),
167168
"author_link": base + author["href"],
168169
"title": title.get_text(strip=True),
169-
"desc": desc.get_text(strip=True),
170+
"desc": desc,
170171
"footer": f"{date}{words}{chapters}",
171172
}
172173

173174
def parse_SIYE(self, page, url):
174-
base = "http://siye.co.uk/"
175+
base = "http://siye.co.uk"
175176
table_cell = page.find_all("td", attrs={"align": "left"})[1].get_text()
176177
rows = table_cell.strip().split("\n")
177178
rows = [row for row in rows if ":" in row] # Handle completed story

0 commit comments

Comments
 (0)