-
Notifications
You must be signed in to change notification settings - Fork 8
/
nextflow.config
executable file
·629 lines (528 loc) · 21.4 KB
/
nextflow.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
/**
=========================================================================================
NANOME(Nanopore methylation) pipeline for Oxford Nanopore sequencing
=========================================================================================
NANOME Analysis Pipeline.
#### Homepage / Documentation
https://github.com/LabShengLi/nanome
@Author : Yang Liu
@FileName : nextflow.config
@Software : NANOME project
@Organization : JAX Sheng Li Lab
----------------------------------------------------------------------------------------
**/
params {
// nanome running software env for Conda, Docker and Singularity
conda_base_dir='/opt/conda' // sample: /home/liuya/anaconda3
conda_name = "nanome" // sample: /projects/li-lab/yang/anaconda3/envs/nanome
conda_cache = 'local_conda_cache'
docker_name = "liuyangzzu/nanome:v2.0.6" // main container
singularity_name = "docker://liuyangzzu/nanome:v2.0.6"
singularity_cache = 'local_singularity_cache'
containerOptions = null // or "--gpus all" for docker
tombo_docker_name = "liuyangzzu/nanome:v1.4" // used for tombo, resquiggle
clair3_docker_name = "hkubal/clair3:latest" // used for variant call
deepsignal2_docker_name = "liuyangzzu/deepsignal2:v1.0" // used for deepsignal v2
guppy_stable_name = "liuyangzzu/guppy_stable:v6.3.8" // solve issues for guppy meth call in v6.4.6
// process and executor configurations
executor = null
queueSize = 50
tracedir = 'NANOME_trace'
help = false
echo = false
cacheStrategy = 'lenient'
errorStrategy = 'ignore' // or 'ignore' 'terminate'
maxRetries = 5
// number of processors for a task
processors = 2
// Default input params for pipeline running
// dsname = 'TestData'
// input = 'https://raw.githubusercontent.com/LabShengLi/nanome/master/inputs/test.demo.filelist.txt'
dsname = null
input = null
outdir = "results"
// Data type, can be human, ecoli, etc.
genome = "hg38"
dataType = null // default is infered by NANOME
chrSet = null // chomosommes used, default will apply to Human/Ecoli chromosomes, else need to specify such as 'chr1 chr2'
cleanAnalyses = false // true if clean previous analysis in fast5 inputs
deduplicate = false // true if deduplicate read-level outputs for tools
sort = false // true if sort read level unified outputs
// true if clean work dir when pipeline complete
cleanup = false // clean work dir after workflow finished
cleanStep = true // clean after each process finished, optimize disk usage
//##################################################################
//############### Reserved by tools default settings ###############
//##################################################################
//##################################################################
// Default tool running configuration, top 4 as default
runNanopolish = true
runMegalodon = true
runDeepSignal1 = false
runGuppy = false
runGuppyGcf52ref= false // Guppy readlevel extract software, not certified by us
runNANOME = true // NANOME concensus
runDeepSignal = true // run DeepSignal v2
runNewTool = false // run new added tool in interface
runQC = true
runAlignment = true
stopDeepSignal = false // used for switch between two HPC: gpu and cpu
newModuleConfigs = null
runTombo = false
runDeepMod = false
runMETEORE = false
runResquiggle = false
runBasecall = true // even user provided basecalled input, this step need to run for prepare input for later steps
skipBasecall= false // if user prepared basecalled input, want to skip basecall processing
runMethcall = true
runCombine = true
runEval = false
readEvalOptions = null // additional options
siteEvalOptions = null
bg_encode = 'encode'
bg1 = null
bg2 = null
runidSuffix = 'BS_2Rep'
genome_annotation_dir = null
min_bgtruth_cov = 1
toolcov_cutoff = 1
llr_cutoff_nanopolish = null // eg. ":-2.0:2.0"
llr_cutoff_megalodon = null
llr_cutoff_deepsignal = null
llr_cutoff_nanome = null
// if perform evaluations after callings
outputQC = true // output the QC report for basecall
skipQC = false // if skip QC analysis
outputIntermediate = false // if keep each batch outputs
outputRaw = true // if output the raw combined outputs for each tool's format
outputGenomeBrowser = false
outputBam = false // if output basecalled merged bam
outputONTCoverage = false // if output ONT coverage
// meth type
hmc = false // true if running 5hmc for megalodon
filter_fail_fq = false // true if filter out failed fastq files for basecall
filter_fail_methcall = true // true if filter out failed bamout files for guppy methcall
multi_to_single_fast5 = false // if perform multi to single fast5 step
consensus_by_chr = true // if compute nanome consensus by chr, this will accelerate
force_llr2 = false // if force to llr2 instead of llr e base, esp for megalodon
// phasing params
phasing = false // true if running clair3 and whatshap for phasing
ctg_name = null // variant calling region
phase_manner1 = true
phase_manner2 = true
phasing_tools='nanopolish,megalodon,nanome,deepsignal,guppy'
//======================================================
//======================================================
// Tools' specific additional options
// for compute intensive jobs, we use processors*times as multiprocessing options
highProcTimes = 4
mediumProcTimes = 2 // for normal process speedup, e.g., Tombo, Nanopolish, etc.
lowProcTimes = 1 // for large memory process, e.g., megalodon, use conservative time 1 is reasonable
reduceProcTimes = 1 // can be 0.5 for reduce the process, e.g., nanopolish, resquiggle, may set to 0.5 for large scale data but limit memory
// File name used for reference genome in pipeline, default values
GENOME_DIR = "reference_genome"
GENOME_FN = "ref.fasta"
CHROM_SIZE_FN = "chrom.sizes"
// Resquiggle specifications
BasecallGroupName = "Basecall_1D_000" // Basecall ID name used by resquiggle
BasecallSubGroupName = "BaseCalled_template"
ResquiggleCorrectedGroup = "RawGenomeCorrected_000"
tomboResquiggleOptions = null // '--signal-length-range 0 500000 --sequence-length-range 0 50000', ref: tombo resquiggle --print-advanced-arguments
tomboMultiprocessRegionSize = 1000 // tombo methylation calling options
tomboThreadsPerProcess = 1 // --threads-per-process , set to 4 or more for faster, but danger for memory issues
feature_extract = false // if extract tombo resquiggle features using deepsignal v1
publishResquiggle = false // if publish resquiggle symlink
// DeepSignal v1 model names
deepsignalDir = null // default is get model online, or specify the name of model dir
DEEPSIGNAL_MODEL_DIR = 'model.CpG.R9.4_1D.human_hx1.bn17.sn360.v0.1.7+'
DEEPSIGNAL_MODEL = 'bn_17.sn_360.epoch_9.ckpt'
// DeepSignal v2 model names
DEEPSIGNAL2_MODEL_FILE = "https://storage.googleapis.com/jax-nanopore-01-project-data/nanome-input/model.dp2.CG.R9.4_1D.human_hx1.bn17_sn16.both_bilstm.b17_s16_epoch4.ckpt.tar.gz"
DEEPSIGNAL2_MODEL_NAME = "model.dp2.CG.R9.4_1D.human_hx1.bn17_sn16.both_bilstm.b17_s16_epoch4.ckpt"
// DeepMod options
// DeepMod default used model specifications
DeepModGithub = "https://github.com/WGLab/DeepMod/archive/refs/tags/v0.1.3.tar.gz"
DEEPMOD_RNN_MODEL = "rnn_conmodC_P100wd21_f7ne1u0_4/mod_train_conmodC_P100wd21_f3ne1u0"
DEEPMOD_CLUSTER_MODEL = "na12878_cluster_train_mod-keep_prob0.7-nb25-chr1/Cg.cov5.nb25"
useDeepModCluster = false
moveOption = false // options of move table
// Guppy model specificatoins
guppyDir = null // default is in PATH var
// Suggested model by Guppy basecall
GUPPY_BASECALL_MODEL = "dna_r9.4.1_450bps_hac.cfg"
// Suggested model by Guppy methcall
// GUPPY_METHCALL_MODEL="dna_r9.4.1_450bps_modbases_dam-dcm-cpg_hac.cfg" // for Guppy v4.2.2
// GUPPY_METHCALL_MODEL = 'dna_r9.4.1_450bps_modbases_5mc_hac.cfg' // for Guppy v6.0.1
GUPPY_METHCALL_MODEL = 'dna_r9.4.1_450bps_modbases_5mc_cg_hac.cfg'
// number of threads, used in methylation call, too large may cause out of memory
// extract read level predictions cutoffs
guppy_canon_threshold = 0.33
guppy_mod_threshold = 0.66
guppy_num_callers = 2
guppy_cpu_threads_per_caller = 2
guppy_gpu_runners_per_device = 2
// Megalodon model and options
rerio = false
rerioDir = null // default is online rerio github model
rerioGithub = 'https://github.com/nanoporetech/rerio'
MEGALODON_MODEL = "res_dna_r941_min_modbases_5mC_v001.cfg"
remoraModel = 'dna_r9.4.1_e8'
GUPPY_TIMEOUT = 800 // For CPU running for Megalodon, it may need be enlarged, or else Megalodon will be empty outputs
READS_PER_GUPPY_BATCH = 100
SAMTOOLS_PATH = "samtools"
// METEORE Github
METEOREDir = false // default is online github, or else specify locations
METEORE_GITHUB_ONLINE = "https://github.com/comprna/METEORE/archive/refs/tags/v1.0.0.tar.gz"
METEOREDirName = "METEORE-1.0.0"
// concensus model
NANOME_MODEL = 'nanome_cs' // or 'NA12878', 'NA12878_XGBoost_NA_top3'
NANOME_CONSENSUS_TOOLS = 'Nanopolish,Megalodon,DeepSignal' // or 'Megalodon DeepSignal' for NANOME2T
CS_MODEL_SPEC = 'xgboost_basic_w' // cs model short spec
CS_MODEL_FILE = 'xgboost_basic_w' // cs file name or key dict if exists
// NANOME_MODEL_FILE = null // model file, i.e., '/pod/2/li-lab/wadee/progress/nanome/7-25/models/xgboost_seq_weight_model.joblib'
// NANOME_MODEL_BASE = 'XGBoost' // base nmodel, or RF
// NANOME_SPECIFIC_MODEL_TYPE = 'xgboost_seq_weight' // specific model, for --specific_model_type
// Clair3 config
CLAIR3_MODEL_NAME = "r941_prom_sup_g5014"
CLAIR3_var_pct_phasing = 0.80
CLAIR3_phasing_qual = 20
PHASE_meth_score_cutoff = 1.5 // default meth LLR score cutoff for phasing
// Lifebit cloudOS config used, please set to 'conf/executors/lifebit.config'
config = null // 'conf/executors/local.config'
}
try {
// Include input files from google cloud storage
includeConfig 'conf/executors/gcp_input.config'
} catch (Exception e) {
System.err.println("WARNING: Could not load config file: conf/executors/gcp_input.config")
}
// Running on different platforms
profiles {
// Default profile used when user not specify, ref: https://www.nextflow.io/docs/latest/config.html#config-profiles
// For Lifebit CloudOS running, please set --config as 'conf/executors/lifebit.config'
standard { if (params.config) {includeConfig params.config} }
test { includeConfig 'conf/examples/test.config' }
test_human { includeConfig 'conf/examples/test_human.config' }
jax { includeConfig 'conf/executors/jaxhpc_input.config' }
conda {
process.conda = params.conda_name
conda.cacheDir = params.conda_cache
}
docker {
params {
containerOptions = null // users using GPU need to set to "--gpus all"
}
process {
container = params.docker_name
containerOptions = params.containerOptions // or "--gpus all" Note: this is not compatible with GitHub citest/naive docker users
withName: 'Tombo|DeepMod|METEORE' {
container = params.tombo_docker_name
}
withName: 'CLAIR3' {
container = params.clair3_docker_name
}
withName: 'DEEPSIGNAL2' {
container = params.deepsignal2_docker_name
}
withName: 'Guppy6' {
container = params.guppy_stable_name
}
}
docker{
enabled = true
// runOptions = params.containerOptions // pass CUDA var to process for docker container, --gpus all, ref:https://docs.docker.com/engine/reference/commandline/run/
// temp = 'auto'
envWhitelist = 'CUDA_VISIBLE_DEVICES,HDF5_PLUGIN_PATH' // Ref: https://www.nextflow.io/docs/latest/config.html#scope-docker
}
env {
// for container, fast5 gz format need export this env
HDF5_PLUGIN_PATH = "/opt/conda/envs/nanome/hdf5/lib/plugin"
}
}
singularity {
params {
// bind /flashscratch, using gpu by default
containerOptions = "--nv --bind /flashscratch"
}
process {
container = params.singularity_name
containerOptions = params.containerOptions // "--nv"
withName: 'Tombo|DeepMod|METEORE' {
// container = "docker://${params.tombo_docker_name}"
// reserve local image for singularity, or append docker:// header for online image
container = params.tombo_docker_name.startsWith("/") ?
params.tombo_docker_name : "docker://${params.tombo_docker_name}"
}
withName: 'CLAIR3' {
// container = "docker://${params.clair3_docker_name}"
container = params.clair3_docker_name.startsWith("/") ?
params.clair3_docker_name : "docker://${params.clair3_docker_name}"
}
withName: 'DEEPSIGNAL2' {
// container = "docker://${params.deepsignal2_docker_name}"
container = params.deepsignal2_docker_name.startsWith("/") ?
params.deepsignal2_docker_name : "docker://${params.deepsignal2_docker_name}"
}
withName: 'Guppy6' {
container = params.guppy_stable_name.startsWith("/") ?
params.guppy_stable_name : "docker://${params.guppy_stable_name}"
}
}
singularity {
enabled = true
autoMounts = true
cacheDir = params.singularity_cache
envWhitelist = 'CUDA_VISIBLE_DEVICES,HDF5_PLUGIN_PATH' // Ref: https://github.com/nextflow-io/nextflow/issues/776
}
env {
// for container, fast5 gz format need export this env
HDF5_PLUGIN_PATH = "/opt/conda/envs/nanome/hdf5/lib/plugin"
}
}
hpc { // general hpc configuration
params {
// hpc slurm default parameters
queue = 'gpu'
qos = 'inference'
processors = 4
memory = '32GB'
time = '5h'
gresOptions = 'gpu:v100:1' // null/false for no need for gpu resources
// Defaults max resource
max_memory = 300.GB
max_cpus = 16
max_time = 336.h
queueSize = 50 // max number of job submit
}
process {
executor = 'slurm'
queue = params.queue
qos = params.qos
cpus = params.processors
memory = params.memory
time = params.time
clusterOptions = "-q ${params.qos} ${params.gresOptions ? '--gres=' + params.gresOptions : ' '} "
}
executor {
queueSize = params.queueSize
}
}
sumner { // jax hpc sumner configuration
params{
max_cpus = 72
max_memory = 768.GB
queue = 'compute,high_mem' // sumner support multiple partitions
qos = 'batch'
processors = 8
memory = '32GB'
time = '3d'
gresOptions = null
queueSize = 300 // max number of job submit
}
process{
executor = "slurm"
module = "slurm:singularity"
queue = params.queue
cpus = params.processors
memory = params.memory
time = params.time
clusterOptions = "-q ${params.qos} ${params.gresOptions ? '--gres=' + params.gresOptions: ' ' } "
}
executor {
queueSize = params.queueSize
}
}
winter { // jax hpc winter configuration
params{
max_cpus = 72
max_memory = 768.GB
queue = 'gpu' // winter only have one partition
qos = 'inference' // or use training, time can be 14 days
processors = 8
memory = '32GB'
time = '6h'
gresOptions = 'gpu:v100:1' // null/false if no gpu needed
queueSize = 24
}
process{
executor = "slurm"
module = "slurm:singularity"
queue = params.queue
cpus = params.processors
memory = params.memory
time = params.time
clusterOptions = "-q ${params.qos} ${params.gresOptions ? '--gres=' + params.gresOptions : ' '}"
}
executor {
queueSize = params.queueSize
}
}
// Google cloud computing platform
// ref doc: https://www.nextflow.io/docs/latest/google.html
// ref doc: https://www.nextflow.io/docs/latest/google.html#configuration
// ref doc: https://www.nextflow.io/docs/latest/google.html#google-lifesciences-config
google {
params{
//##################################################################
//############### Reserved by google cloud computing ###############
//##################################################################
//##################################################################
// Google cloud computing configurations defaults
// used for google computing platform, ref: https://cloud.google.com/compute/docs/regions-zones#available
// for exit code error info, ref: https://cloud.google.com/life-sciences/docs/troubleshooting#error_codes
projectCloud = null // e.g., 'jax-nanopore-01'
locationCloud = 'us'
regionCloud = 'us-east1'
zoneCloud = null // use region instead of zone can get GPU from more zones
debugCloud = true
sshDaemonCloud = true
bootDiskSizeCloud = 30.GB
preemptibleCloud = true // save costs using preemptible way
networkCloud = 'default'
subnetworkCloud = 'default'
// Example: "n1-standard-8", or custom-[NUMBER_OF_CPUS]-[AMOUNT_OF_MEMORY]
machineType = null //"n1-standard-8" or "n1-highmem-8", ref: https://cloud.google.com/compute/docs/general-purpose-machines#n1-shared-core
processors = 8 // for 8 cpus, max mem is 52 GB in GCP. Memory must be between 0.9 GB per vCPU, up to 6.5 GB per vCPU.
memory = '30 GB'
gpuType = 'nvidia-tesla-p100' // or 'nvidia-tesla-t4', lower price than 'nvidia-tesla-v100', ref: https://cloud.google.com/compute/gpus-pricing
gpuNumber = 1
lowDiskSize = 100.GB // for test and check
midDiskSize = 150.GB // for methylation
highDiskSize = 200.GB // for untar, basecall and resquiggle, need much disk sizes
//overide default params for GCP
errorStrategy = 'ignore'
}
executor {
name = 'google-lifesciences'
pollInterval = '30 sec'
}
google {
project = params.projectCloud
// use region instead of zone, a region contains many zones: zone = 'us-east1-c'
location = params.locationCloud
region = params.regionCloud
zone = params.zoneCloud
lifeSciences.debug = params.debugCloud
lifeSciences.preemptible = params.preemptibleCloud
lifeSciences.sshDaemon = params.sshDaemonCloud
lifeSciences.bootDiskSize = params.bootDiskSizeCloud
lifeSciences.network = params.networkCloud
lifeSciences.subnetwork = params.subnetworkCloud
lifeSciences.usePrivateAddress = false
enableRequesterPaysBuckets = true
}
env {
PATH = null
}
// Include nanome input from google cloud params
// includeConfig 'conf/gc_params.config'
process {
// Machine types ref: https://cloud.google.com/solutions/sql-server-performance-tuning-compute-engine.pdf?hl=en
// or: https://cloud.google.com/compute/docs/general-purpose-machines#n1-standard
machineType = params.machineType
cpus = params.processors
memory = params.memory
time = null
disk = params.midDiskSize
maxRetries = params.maxRetries
echo = params.echo
// Ref: https://cloud.google.com/life-sciences/docs/troubleshooting
errorStrategy = {task.attempt == process.maxRetries ?
params.errorStrategy : task.exitStatus in [2, 10, 14] ? 'retry' : params.errorStrategy }
withName: 'ENVCHECK' {
// download Rerio model may encounter exitstatus:1, need retry
errorStrategy = {task.attempt == process.maxRetries ?
params.errorStrategy : task.exitStatus in [1, 2, 10, 14] ? 'retry' : params.errorStrategy }
}
withName: 'ENVCHECK|BASECALL|Guppy|Guppy6|MEGALODON|DEEPSIGNAL2' { // allocate gpu
accelerator = [request: params.gpuNumber, type: params.gpuType]
beforeScript = "export CUDA_VISIBLE_DEVICES=0" // pass CUDA var to process, since GCP do not export it
containerOptions = { workflow.containerEngine == "singularity" ? '--nv':
( workflow.containerEngine == "docker" ? '--gpus all': null ) }
}
withName: 'UNTAR|BASECALL|Guppy|Guppy6|RESQUIGGLE' { // allocate high disk size
disk = params.highDiskSize
}
}
}
}
env {
// Local test, specify the guppy dir in PATH
PATH = ! params.guppyDir ? '$PATH': ["${params.guppyDir}/bin", '$PATH'].join(':')
}
process {
cache = params.cacheStrategy
errorStrategy = params.errorStrategy
echo = params.echo
maxRetries = params.maxRetries
cpus = 2
memory = '4GB'
time = '5h'
withName: 'ENVCHECK' {
// allow retry if download Rerio model failed
errorStrategy = {task.attempt >= process.maxRetries ?
params.errorStrategy : task.exitStatus in [1] ? 'retry' : params.errorStrategy }
}
}
executor {
name = params.executor
queueSize = params.queueSize
}
cleanup = params.cleanup
dag {
file = "${params.tracedir}/NANOME_dag_${params.dsname}.svg"
overwrite = true
}
report {
file = "${params.tracedir}/NANOME_report_${params.dsname}.html"
overwrite = true
}
timeline {
file = "${params.tracedir}/NANOME_timeline_${params.dsname}.html"
overwrite = true
}
trace {
file = "${params.tracedir}/NANOME_trace_${params.dsname}.txt"
overwrite = true
}
manifest {
name = 'LabShengLi/nanome'
author = 'Yang Liu'
homePage = 'https://github.com/LabShengLi/nanome'
description = 'NANOME (Nanopore methylation) pipeline for Oxford Nanopore sequencing by JAX Li Lab'
mainScript = 'main.nf'
nextflowVersion = '>=20.07.1'
version = '2.0.0'
}
// Function to ensure that resource requirements don't go beyond
// a maximum limit
def check_max(obj, type) {
if (type == 'memory') {
try {
if (obj.compareTo(params.max_memory as nextflow.util.MemoryUnit) == 1)
return params.max_memory as nextflow.util.MemoryUnit
else
return obj
} catch (all) {
println " ### ERROR ### Max memory '${params.max_memory}' is not valid! Using default value: $obj"
return obj
}
} else if (type == 'time') {
try {
if (obj.compareTo(params.max_time as nextflow.util.Duration) == 1)
return params.max_time as nextflow.util.Duration
else
return obj
} catch (all) {
println " ### ERROR ### Max time '${params.max_time}' is not valid! Using default value: $obj"
return obj
}
} else if (type == 'cpus') {
try {
return Math.min( obj, params.max_cpus as int )
} catch (all) {
println " ### ERROR ### Max cpus '${params.max_cpus}' is not valid! Using default value: $obj"
return obj
}
}
}