@@ -116,17 +116,30 @@ def download_extract_zip(self, url: str, chunk_size=128) -> None:
116116 """
117117 self .path_input .mkdir (parents = True , exist_ok = True )
118118
119+ # request_headers = {
120+ # "User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36",
121+ # }
122+
123+ # r = requests.get(url, headers=request_headers, stream=True, timeout=60)
124+
119125 request_headers = {
120- "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36" ,
126+ "User-Agent" : "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.149 Safari/537.36" ,
127+ "Connection" : "keep-alive"
128+ }
129+ proxies = {
130+ "https" : tse_constants .PROXY_LINK .value
121131 }
122132
123- r = requests .get (url , headers = request_headers , stream = True , timeout = 60 )
133+ r = requests .get (url , headers = request_headers , proxies = proxies , verify = False , timeout = 300 )
124134
125135 save_path = self .path_input / url .split ("/" )[- 1 ]
126136
127137 with open (save_path , "wb" ) as fd :
128- for chunk in r .iter_content (chunk_size = chunk_size ):
129- fd .write (chunk )
138+ fd .write (r .content )
139+
140+ # with open(save_path, "wb") as fd:
141+ # for chunk in r.iter_content(chunk_size=chunk_size):
142+ # fd.write(chunk)
130143
131144 with zipfile .ZipFile (save_path ) as z :
132145 z .extractall (self .path_input )
0 commit comments