@@ -57,7 +57,7 @@ site.rstampede {
57
57
jobQueue : "development" # Select queue from (development, normal, large)
58
58
maxJobTime : "00:25:00" # Time requested per job
59
59
jobOptions.slurm{
60
- "reservation" : "mpierce_27 "
60
+ "reservation" : "XSEDE_2016_1 "
61
61
}
62
62
}
63
63
}
@@ -176,6 +176,34 @@ site.comet {
176
176
app.ALL { executable: "*" } # All apps to be found from commandline
177
177
}
178
178
179
+ # Configuration for Comet with XSEDE Tutorial Resrevation - running on a slurm compute node, submitting back to Comet
180
+
181
+ # Do *NOT* run swift on Comet login nodes. There are memory limits which prevent swift from running
182
+ # properly on these machines.
183
+ site.rcomet {
184
+ execution {
185
+ type : "coaster" # Use coasters to run on remote sites
186
+ URL : "comet.sdsc.xsede.org" # Comet login URL - not used for local:slurm
187
+ jobManager: "local:slurm" # use slurm commands to submit jobs locally
188
+ options {
189
+ maxJobs : 4 # Max jobs submitted to LRM
190
+ nodeGranularity : 1 # Nodes per job
191
+ maxNodesPerJob : 1 # Nodes per job
192
+ tasksPerNode : 24 # Tasks per Node
193
+ maxJobTime : "00:30:00" # Time requested per job
194
+ jobOptions.slurm{
195
+ "reservation" : "XSEDE16Mats"
196
+ }
197
+ jobQueue : "compute" # Submit to compute partition (from shared)
198
+ }
199
+ }
200
+ staging : "local" # Stage files from "local" fs to compute nodes
201
+ workDirectory : "/tmp/"${env.USER}"/swiftwork" # Work dir on compute nodes
202
+ maxParallelTasks : 101 # Maximum number of parallel tasks
203
+ initialParallelTasks: 100 # Maximum number of tasks at start
204
+ app.ALL { executable: "*" } # All apps to be found from commandline
205
+ }
206
+
179
207
# Instructions for Trestles
180
208
# 1. Do *NOT* run on the Trestles login nodes. There are memory limits which prevent swift from running
181
209
# properly on these machines.
0 commit comments