Skip to content

Commit

Permalink
Merge pull request mvdctop#636 from lededev/double-exception
Browse files Browse the repository at this point in the history
bugfix
  • Loading branch information
mvdctop authored Nov 14, 2021
2 parents 5cf1205 + 8e9ea6d commit a356b25
Show file tree
Hide file tree
Showing 5 changed files with 11 additions and 18 deletions.
17 changes: 5 additions & 12 deletions AV_Data_Capture.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,18 +474,11 @@ def main():
check_update(version)

# Download Mapping Table, parallel version
down_map_tab = []
actor_xml = Path.home() / '.local' / 'share' / 'avdc' / 'mapping_actor.xml'
if not actor_xml.exists():
down_map_tab.append((
"https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/mapping_actor.xml",
actor_xml))
info_xml = Path.home() / '.local' / 'share' / 'avdc' / 'mapping_info.xml'
if not info_xml.exists():
down_map_tab.append((
"https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/mapping_info.xml",
info_xml))
res = parallel_download_files(down_map_tab)
def fmd(f):
return ('https://raw.githubusercontent.com/yoshiko2/AV_Data_Capture/master/MappingTable/' + f,
Path.home() / '.local' / 'share' / 'avdc' / f)
map_tab = (fmd('mapping_actor.xml'), fmd('mapping_info.xml'), fmd('c_number.json'))
res = parallel_download_files(((k, v) for k, v in map_tab if not v.exists()))
for i, fp in enumerate(res, start=1):
if fp and len(fp):
print(f"[+] [{i}/{len(res)}] Mapping Table Downloaded to {fp}")
Expand Down
4 changes: 2 additions & 2 deletions WebCrawler/avsox.py
Original file line number Diff line number Diff line change
Expand Up @@ -57,8 +57,8 @@ def getCover_small(html):
result = str(html.xpath('//*[@id="waterfall"]/div/a/div[1]/img/@src')).strip(" ['']")
return result
def getTag(html):
result = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
return result
x = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
return [i.strip() for i in x[2:]] if len(x) > 2 else []
def getSeries(html):
try:
result1 = str(html.xpath('//span[contains(text(),"系列:")]/../span[2]/text()')).strip(" ['']")
Expand Down
2 changes: 1 addition & 1 deletion WebCrawler/fc2.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ def getTitle_fc2com(htmlcode): #获取厂商
return result
def getActor_fc2com(htmlcode):
try:
htmtml = etree.fromstring(htmlcode, etree.HTMLParser())
html = etree.fromstring(htmlcode, etree.HTMLParser())
result = html.xpath('//*[@id="top"]/div[1]/section[1]/div/section/div[2]/ul/li[3]/a/text()')[0]
return result
except:
Expand Down
2 changes: 1 addition & 1 deletion WebCrawler/javbus.py
Original file line number Diff line number Diff line change
Expand Up @@ -72,7 +72,7 @@ def getSerise(html): #获取系列
return str(x[0]) if len(x) else ''
def getTag(html): # 获取标签
klist = html.xpath('/html/head/meta[@name="keywords"]/@content')[0].split(',')
return klist
return klist[1:]
def getExtrafanart(htmlcode): # 获取剧照
html_pather = re.compile(r'<div id=\"sample-waterfall\">[\s\S]*?</div></a>\s*?</div>')
html = html_pather.search(htmlcode)
Expand Down
4 changes: 2 additions & 2 deletions WebCrawler/javlib.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,10 +34,10 @@ def main(number: str):
)
soup = BeautifulSoup(result.text, "html.parser")
lx = html.fromstring(str(soup))

fanhao_pather = re.compile(r'<a href=".*?".*?><div class="id">(.*?)</div>')
fanhao = fanhao_pather.findall(result.text)

if "/?v=jav" in result.url:
dic = {
"title": get_title(lx, soup),
Expand Down

0 comments on commit a356b25

Please sign in to comment.