-
Notifications
You must be signed in to change notification settings - Fork 10
/
Copy pathnextflow.config
80 lines (71 loc) · 2.39 KB
/
nextflow.config
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
/**
* This configuration file specifies a default pipeline setup for running brain
* decoding on a local machine with a GPU. (See the project README for minimal
* computing specs to run the pipeline.)
*
* While this pipeline does *work* running on a local machine, we recommend
* deploying on a high-performance cluster. See the file
* `nextflow.slurm.config` for an example deployment configuration for SLURM
* clusters.
*/
process {
executor = "local"
/**
* Pipeline processes are assigned "labels" according to their
* computational requirements. Here we can specify the actual effects of
* each label when the pipeline runs on your system.
*
* This first label, `small`, describes simple tasks which can easily run
* on a single CPU with minimal RAM -- e.g. downloading a file.
*/
withLabel: 'small' {
executor = 'local'
memory = '1G'
}
/**
* `medium` describes tasks which require moderate host memory, e.g.
* loading brain images and learning linear regression models.
*/
withLabel: 'medium' {
time = '1d'
memory = '8G'
}
/**
* `gpu_medium` describes tasks which require a GPU with moderate memory
* and moderate host RAM, for e.g. holding a dataset in memory and running
* neural network feed-forward inference.
*/
withLabel: 'gpu_medium' {
containerOptions = "--nv"
}
/**
* `gpu_large` describes tasks which require a GPU with lots of memory and
* large host RAM, for e.g. holding a training dataset in memory and
* running neural network training.
*/
withLabel: 'gpu_large' {
containerOptions = "--nv"
time = '1d'
memory = '16G'
containerOptions = "--nv"
}
}
/**
* You can limit the maximum number of parallel executing processes using
* the variable below. This may be relevant when running with a single GPU,
* for example.
*/
executor {
queueSize = 1
}
// There should be no need to edit below this line.
//////////////////////////////////////////
params.bert_container = "library://jon/default/bert:base-gpu"
params.structural_probes_container = "library://jon/default/structural-probes:latest"
params.decoding_container = "library://jon/default/nn-decoding:emnlp2019"
singularity {
enabled = true
envWhitelist = "CUDA_VISIBLE_DEVICES"
autoMounts = true
}
report.enabled = true