@@ -19,7 +19,6 @@ def get_attractions(url,data=None):
19
19
titles = soup .select ('div.property_title > a[target="_blank"]' )
20
20
imgs = soup .select ('img[width="160"]' )
21
21
cates = soup .select ('div.p13n_reasoning_v2' )
22
-
23
22
if data == None :
24
23
for title ,img ,cate in zip (titles ,imgs ,cates ):
25
24
data = {
@@ -32,17 +31,18 @@ def get_attractions(url,data=None):
32
31
33
32
def get_favs (url ,data = None ):
34
33
wb_data = requests .get (url ,headers = headers )
34
+ print (wb_data )
35
35
soup = BeautifulSoup (wb_data .text ,'lxml' )
36
- titles = soup .select ('a.location-name' )
37
- imgs = soup .select ( ' div.photo > div.sizedThumb > img.photo_image' )
38
- metas = soup .select ('span.format_address ' )
39
-
36
+ titles = soup .select ("div.title.titleLLR > div" )
37
+ imgs = soup .find_all ( " div" , "missing lazyMiss" )
38
+ metas = soup .select ('div.attraction_types > span ' )
39
+
40
40
if data == None :
41
41
for title ,img ,meta in zip (titles ,imgs ,metas ):
42
42
data = {
43
- 'title' :title .get_text (),
44
- 'img' :img .get ('src ' ),
45
- 'meta' :list ( meta .stripped_strings )
43
+ 'title' :title .get_text (). strip () ,
44
+ 'img' :img .get ('data-thumburl ' ),
45
+ 'meta' :meta .get_text ( )
46
46
}
47
47
print (data )
48
48
0 commit comments