4
4
import time
5
5
import json
6
6
7
- from requests import HTTPError , get
7
+ from requests import HTTPError
8
8
from deprecated import deprecated
9
9
from atlassian import utils
10
10
from .errors import (
@@ -2296,7 +2296,6 @@ def get_page_as_pdf(self, page_id):
2296
2296
url = "spaces/flyingpdf/pdfpageexport.action?pageId={pageId}" .format (pageId = page_id )
2297
2297
if self .api_version == "cloud" :
2298
2298
url = self .get_pdf_download_url_for_confluence_cloud (url )
2299
- return get (url ).content
2300
2299
2301
2300
return self .get (url , headers = headers , not_json_response = True )
2302
2301
@@ -2486,9 +2485,9 @@ def get_pdf_download_url_for_confluence_cloud(self, url):
2486
2485
export is initiated. Instead it starts a process in the background
2487
2486
and provides a link to download the PDF once the process completes.
2488
2487
This functions polls the long running task page and returns the
2489
- download s3 url of the PDF.
2488
+ download url of the PDF.
2490
2489
:param url: URL to initiate PDF export
2491
- :return: Download s3 url for PDF file
2490
+ :return: Download url for PDF file
2492
2491
"""
2493
2492
download_url = None
2494
2493
try :
@@ -2498,27 +2497,29 @@ def get_pdf_download_url_for_confluence_cloud(self, url):
2498
2497
response = self .get (url , headers = headers , not_json_response = True )
2499
2498
response_string = response .decode (encoding = "utf-8" , errors = "strict" )
2500
2499
task_id = response_string .split ('name="ajs-taskId" content="' )[1 ].split ('">' )[0 ]
2501
- poll_url = "/services/api/v1/task/ {0}/progress " .format (task_id )
2500
+ poll_url = "runningtaskxml.action?taskId= {0}" .format (task_id )
2502
2501
while long_running_task :
2503
2502
long_running_task_response = self .get (poll_url , headers = headers , not_json_response = True )
2504
- long_running_task_response_parts = json . loads (
2505
- long_running_task_response . decode ( encoding = "utf-8" , errors = "strict" )
2506
- )
2507
- percentage_complete = long_running_task_response_parts ["progress" ]
2508
- is_update = long_running_task_response_parts ["progress" ] == 100
2509
- current_state = long_running_task_response_parts ["state" ]
2503
+ long_running_task_response_parts = long_running_task_response . decode (
2504
+ encoding = "utf-8" , errors = "strict"
2505
+ ). split ( " \n " )
2506
+ percentage_complete = long_running_task_response_parts [6 ]. strip ()
2507
+ is_successful = long_running_task_response_parts [7 ]. strip ()
2508
+ is_complete = long_running_task_response_parts [8 ]. strip ()
2510
2509
log .info ("Sleep for 5s." )
2511
2510
time .sleep (5 )
2512
2511
log .info ("Check if export task has completed." )
2513
- if is_update and current_state == "UPLOADED_TO_S3" :
2514
- log .info (percentage_complete )
2515
- log .info ("Downloading content..." )
2516
- log .debug ("Extract taskId and download PDF." )
2517
- download_url = self .get (long_running_task_response_parts ["result" ][6 :], headers = headers )
2518
- long_running_task = False
2519
- elif not is_update and current_state == "FAILED" :
2520
- log .error ("PDF conversion not successful." )
2521
- return None
2512
+ if is_complete == "<isComplete>true</isComplete>" :
2513
+ if is_successful == "<isSuccessful>true</isSuccessful>" :
2514
+ log .info (percentage_complete )
2515
+ log .info ("Downloading content..." )
2516
+ log .debug ("Extract taskId and download PDF." )
2517
+ current_status = long_running_task_response_parts [3 ]
2518
+ download_url = current_status .split ("href="/wiki/" )[1 ].split (""" )[0 ]
2519
+ long_running_task = False
2520
+ elif is_successful == "<isSuccessful>false</isSuccessful>" :
2521
+ log .error ("PDF conversion not successful." )
2522
+ return None
2522
2523
else :
2523
2524
log .info (percentage_complete )
2524
2525
except IndexError as e :
0 commit comments