diff --git a/README.md b/README.md index 003ae01..4dcd2d0 100644 --- a/README.md +++ b/README.md @@ -146,6 +146,7 @@ Optional `opts` include: deployTimeout: 1.2e6, // 20 mins parallelHash: 100, // number of parallel hashing calls parallelUpload: 4, // number of files to upload in parallel + maxRetry: 5, // number of times to try on failed file uploads filter: filename => { /* return false to filter a file from the deploy */ }, tmpDir: tempy.directory(), // a temporary directory to zip loose files into statusCb: statusObj => { diff --git a/package.json b/package.json index 7c53cc3..d966b86 100644 --- a/package.json +++ b/package.json @@ -30,6 +30,7 @@ "dependencies": { "@netlify/open-api": "^0.4.1", "archiver": "^3.0.0", + "backoff": "^2.5.0", "clean-deep": "^3.0.2", "flush-write-stream": "^1.0.3", "folder-walker": "^3.2.0", diff --git a/src/deploy/index.js b/src/deploy/index.js index 20f13e2..9fb8a6e 100644 --- a/src/deploy/index.js +++ b/src/deploy/index.js @@ -19,7 +19,8 @@ module.exports = async (api, siteId, dir, opts) => { concurrentHash: 100, // concurrent file hash calls concurrentUpload: 15, // Number of concurrent uploads filter: defaultFilter, - syncFileLimit: 7000, + syncFileLimit: 7000, // number of files + maxRetry: 5, // number of times to retry an upload statusCb: statusObj => { /* default to noop */ /* statusObj: { diff --git a/src/deploy/upload-files.js b/src/deploy/upload-files.js index 3b2ee0c..b83cafc 100644 --- a/src/deploy/upload-files.js +++ b/src/deploy/upload-files.js @@ -1,9 +1,10 @@ const pMap = require('p-map') const fs = require('fs') +const backoff = require('backoff') module.exports = uploadFiles -async function uploadFiles(api, deployId, uploadList, { concurrentUpload, statusCb }) { - if (!concurrentUpload || !statusCb) throw new Error('Missing required option concurrentUpload') +async function uploadFiles(api, deployId, uploadList, { concurrentUpload, statusCb, maxRetry }) { + if (!concurrentUpload || !statusCb || !maxRetry) throw new Error('Missing required option concurrentUpload') statusCb({ type: 'upload', msg: `Uploading ${uploadList.length} files`, @@ -22,20 +23,28 @@ async function uploadFiles(api, deployId, uploadList, { concurrentUpload, status let response switch (assetType) { case 'file': { - response = await api.uploadDeployFile({ - body: readStream, - deployId, - path: encodeURI(normalizedPath) - }) + response = await retryUpload( + () => + api.uploadDeployFile({ + body: readStream, + deployId, + path: encodeURI(normalizedPath) + }), + maxRetry + ) break } case 'function': { - response = await api.uploadDeployFunction({ - body: readStream, - deployId, - name: encodeURI(normalizedPath), - runtime - }) + response = await await retryUpload( + () => + api.uploadDeployFunction({ + body: readStream, + deployId, + name: encodeURI(normalizedPath), + runtime + }), + maxRetry + ) break } default: { @@ -56,3 +65,43 @@ async function uploadFiles(api, deployId, uploadList, { concurrentUpload, status }) return results } + +function retryUpload(uploadFn, maxRetry) { + return new Promise((resolve, reject) => { + const fibonacciBackoff = backoff.fibonacci({ + randomisationFactor: 0.5, + initialDelay: 100, + maxDelay: 10000 + }) + fibonacciBackoff.failAfter(maxRetry) + + fibonacciBackoff.on('backoff', (number, delay) => { + // Do something when backoff starts, e.g. show to the + // user the delay before next reconnection attempt. + console.log(number + ' ' + delay + 'ms') + }) + + fibonacciBackoff.on('ready', tryUpload) + + fibonacciBackoff.on('fail', () => { + // Do something when the maximum number of backoffs is + // reached, e.g. ask the user to check its connection. + console.log('fail') + }) + + function tryUpload(number, delay) { + uploadFn() + .then(results => resolve(results)) + .catch(e => { + if (e.name === 'FetchError') { + console.log(e) + fibonacciBackoff.backoff() + } else { + return reject(e) + } + }) + } + + tryUpload(0, 0) + }) +}