6
6
from bs4 import BeautifulSoup
7
7
from redbot .core import checks , commands , Config
8
8
9
- __version__ = "1.0.14 "
9
+ __version__ = "1.0.15 "
10
10
11
11
BaseCog = getattr (commands , "Cog" , object )
12
12
@@ -107,8 +107,8 @@ async def toggle_channel(self, ctx, channel: discord.TextChannel):
107
107
108
108
def parse_url (self , message ):
109
109
url_regex = (
110
- r"http[s] ?://(?:www.)?(?:(?:m.)?fanfiction.net/s/\d+ "
111
- r"/?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
110
+ r"https ?://(?:www.)?(?:(?:m.)?fanfiction.net/"
111
+ r"s/\d+ /?(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),])*|"
112
112
r"archiveofourown.org/works/\d+(?:/chapters/\d+)?|"
113
113
r"siye.co.uk/(?:siye/)?viewstory.php\?sid=\d+(?:&chapter=\d+)?)"
114
114
)
@@ -134,7 +134,7 @@ async def fetch_url(self, url):
134
134
return page
135
135
136
136
def parse_FanFiction (self , page , url ):
137
- base = "https://fanfiction.net/ "
137
+ base = "https://fanfiction.net"
138
138
div = page .find (id = "profile_top" )
139
139
thumbnail = div .find ("img" , attrs = {"class" : "cimage" })
140
140
author = div .find ("a" , attrs = {"class" : "xcontrast_txt" })
@@ -153,10 +153,11 @@ def parse_FanFiction(self, page, url):
153
153
}
154
154
155
155
def parse_AO3 (self , page , url ):
156
- base = "https://archiveofourown.org/ "
156
+ base = "https://archiveofourown.org"
157
157
author = page .find ("a" , attrs = {"rel" : "author" })
158
158
title = page .find ("h2" , attrs = {"class" : "title heading" })
159
- desc = page .find ("div" , attrs = {"class" : "summary module" }).p
159
+ desc = page .find ("div" , attrs = {"class" : "summary module" })
160
+ desc = "Summary not specified." if desc is None else desc .p .get_text (strip = True )
160
161
date = " " .join (x .get_text () for x in page .find_all (class_ = "published" ))
161
162
words = " " .join (x .get_text () for x in page .find_all (class_ = "words" ))
162
163
chapters = " " .join (x .get_text () for x in page .find_all (class_ = "chapters" ))
@@ -167,12 +168,12 @@ def parse_AO3(self, page, url):
167
168
"author" : author .get_text (strip = True ),
168
169
"author_link" : base + author ["href" ],
169
170
"title" : title .get_text (strip = True ),
170
- "desc" : desc . get_text ( strip = True ) ,
171
+ "desc" : desc ,
171
172
"footer" : f"{ date } ∙ { words } ∙ { chapters } " ,
172
173
}
173
174
174
175
def parse_SIYE (self , page , url ):
175
- base = "http://siye.co.uk/ "
176
+ base = "http://siye.co.uk"
176
177
table_cell = page .find_all ("td" , attrs = {"align" : "left" })[1 ].get_text ()
177
178
rows = table_cell .strip ().split ("\n " )
178
179
rows = [row for row in rows if ":" in row ] # Handle completed story
0 commit comments