4
4
import time
5
5
import json
6
6
7
- from requests import HTTPError
7
+ from requests import HTTPError , get
8
8
from deprecated import deprecated
9
9
from atlassian import utils
10
10
from .errors import (
@@ -2296,6 +2296,7 @@ def get_page_as_pdf(self, page_id):
2296
2296
url = "spaces/flyingpdf/pdfpageexport.action?pageId={pageId}" .format (pageId = page_id )
2297
2297
if self .api_version == "cloud" :
2298
2298
url = self .get_pdf_download_url_for_confluence_cloud (url )
2299
+ return get (url ).content
2299
2300
2300
2301
return self .get (url , headers = headers , not_json_response = True )
2301
2302
@@ -2485,9 +2486,9 @@ def get_pdf_download_url_for_confluence_cloud(self, url):
2485
2486
export is initiated. Instead it starts a process in the background
2486
2487
and provides a link to download the PDF once the process completes.
2487
2488
This functions polls the long running task page and returns the
2488
- download url of the PDF.
2489
+ download s3 url of the PDF.
2489
2490
:param url: URL to initiate PDF export
2490
- :return: Download url for PDF file
2491
+ :return: Download s3 url for PDF file
2491
2492
"""
2492
2493
download_url = None
2493
2494
try :
@@ -2497,29 +2498,27 @@ def get_pdf_download_url_for_confluence_cloud(self, url):
2497
2498
response = self .get (url , headers = headers , not_json_response = True )
2498
2499
response_string = response .decode (encoding = "utf-8" , errors = "strict" )
2499
2500
task_id = response_string .split ('name="ajs-taskId" content="' )[1 ].split ('">' )[0 ]
2500
- poll_url = "runningtaskxml.action?taskId= {0}" .format (task_id )
2501
+ poll_url = "/services/api/v1/task/ {0}/progress " .format (task_id )
2501
2502
while long_running_task :
2502
2503
long_running_task_response = self .get (poll_url , headers = headers , not_json_response = True )
2503
- long_running_task_response_parts = long_running_task_response . decode (
2504
- encoding = "utf-8" , errors = "strict"
2505
- ). split ( " \n " )
2506
- percentage_complete = long_running_task_response_parts [6 ]. strip ()
2507
- is_successful = long_running_task_response_parts [7 ]. strip ()
2508
- is_complete = long_running_task_response_parts [8 ]. strip ()
2504
+ long_running_task_response_parts = json . loads (
2505
+ long_running_task_response . decode ( encoding = "utf-8" , errors = "strict" )
2506
+ )
2507
+ percentage_complete = long_running_task_response_parts ["progress" ]
2508
+ is_update = long_running_task_response_parts ["progress" ] == 100
2509
+ current_state = long_running_task_response_parts ["state" ]
2509
2510
log .info ("Sleep for 5s." )
2510
2511
time .sleep (5 )
2511
2512
log .info ("Check if export task has completed." )
2512
- if is_complete == "<isComplete>true</isComplete>" :
2513
- if is_successful == "<isSuccessful>true</isSuccessful>" :
2514
- log .info (percentage_complete )
2515
- log .info ("Downloading content..." )
2516
- log .debug ("Extract taskId and download PDF." )
2517
- current_status = long_running_task_response_parts [3 ]
2518
- download_url = current_status .split ("href="/wiki/" )[1 ].split (""" )[0 ]
2519
- long_running_task = False
2520
- elif is_successful == "<isSuccessful>false</isSuccessful>" :
2521
- log .error ("PDF conversion not successful." )
2522
- return None
2513
+ if is_update and current_state == "UPLOADED_TO_S3" :
2514
+ log .info (percentage_complete )
2515
+ log .info ("Downloading content..." )
2516
+ log .debug ("Extract taskId and download PDF." )
2517
+ download_url = self .get (long_running_task_response_parts ["result" ][6 :], headers = headers )
2518
+ long_running_task = False
2519
+ elif not is_update and current_state == "FAILED" :
2520
+ log .error ("PDF conversion not successful." )
2521
+ return None
2523
2522
else :
2524
2523
log .info (percentage_complete )
2525
2524
except IndexError as e :
0 commit comments