11from urllib .parse import quote_plus
2+ import time
23
34from bs4 import BeautifulSoup
45
56import cloudscraper
67
78BASE_URL = "https://www.apkmirror.com"
89BASE_SEARCH = f"{ BASE_URL } /?post_type=app_release&searchtype=apk&s="
9- USER_AGENT_STRING = "Mozilla/5.0 (Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Mobile Safari/537.36"
10+ USER_AGENT_STRING = "Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0" # "Mozilla/5.0 ( Linux; Android 10; K) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/121.0.0.0 Mobile Safari/537.36"
1011HEADERS = {
1112 "User-Agent" : USER_AGENT_STRING
1213}
1314
1415scraper = cloudscraper .create_scraper ()
1516
1617def search (query ):
18+ print ("[search] Sleeping..." )
19+
20+ time .sleep (5 )
21+
1722 search_url = BASE_SEARCH + quote_plus (query )
1823 resp = scraper .get (search_url , headers = HEADERS )
1924
20- print (resp .status_code )
25+ print (f"[search] Status: { resp .status_code } " )
2126
2227 soup = BeautifulSoup (resp .text , "html.parser" )
2328 apps = []
@@ -39,9 +44,13 @@ def search(query):
3944 return apps [:5 ]
4045
4146def get_app_details ():
47+ print ("[get_app_details] Sleeping..." )
48+
49+ time .sleep (5 )
50+
4251 resp = scraper .get (search ("discord" )[0 ]["link" ], headers = HEADERS )
4352
44- print (resp .status_code )
53+ print (f"[get_app_details] Status: { resp .status_code } " )
4554
4655 soup = BeautifulSoup (resp .text , "html.parser" )
4756
@@ -55,17 +64,25 @@ def get_app_details():
5564 return architecture , android_version , dpi , download_link
5665
5766def get_download_link ():
67+ print ("[get_download_link] Sleeping..." )
68+
69+ time .sleep (5 )
70+
5871 resp = scraper .get (get_app_details ()[3 ], headers = HEADERS )
5972
60- print (resp .status_code )
73+ print (f"[get_download_link] Status: { resp .status_code } " )
6174
6275 soup = BeautifulSoup (resp .text , "html.parser" )
6376 return soup .find_all ("a" , {"class" : "downloadButton" })[0 ]["href" ]
6477
6578def get_direct_download_link ():
79+ print ("[get_direct_download_link] Sleeping..." )
80+
81+ time .sleep (5 )
82+
6683 resp = scraper .get (get_download_link (), headers = HEADERS )
6784
68- print (resp .status_code )
85+ print (f"[get_direct_download_link] Status: { resp .status_code } " )
6986
7087 soup = BeautifulSoup (resp .text , "html.parser" )
7188
0 commit comments