File tree Expand file tree Collapse file tree 2 files changed +58
-0
lines changed Expand file tree Collapse file tree 2 files changed +58
-0
lines changed Original file line number Diff line number Diff line change
1
+ #! /bin/bash
2
+
3
+ sbatch -J bswift -o bswift.%j.out -p shared --nodes=1 --ntasks-per-node=8 --mem=5G --export=ALL -t 00:10:00 << END
4
+ #! /bin/bash
5
+
6
+ echo $0 : Started at $( date)
7
+ echo $0 : Running in dir $PWD
8
+ echo $0 : Running on host $( hostname)
9
+
10
+ swift $*
11
+
12
+ echo $0 : Swift script submitted
13
+
14
+ END
15
+
16
+ # For reference:
17
+
18
+ cat > /dev/null << END
19
+
20
+ #SBATCH -p shared
21
+ #SBATCH --nodes=1
22
+ #SBATCH --ntasks-per-node=8
23
+ #SBATCH --mem=40G
24
+ #SBATCH -t 01:00:00
25
+ #SBATCH -J HPL.8
26
+ #SBATCH -o HPL.8.%j.%N.out
27
+ #SBATCH -e HPL.8.%j.%N.err
28
+ #SBATCH --export=ALL
29
+
30
+ export MV2_SHOW_CPU_BINDING=1
31
+ ibrun -np 8 ./xhpl.exe
32
+
33
+ END
Original file line number Diff line number Diff line change @@ -151,6 +151,31 @@ site.gordon {
151
151
app.ALL { executable: "*" } # All tasks to be found from commandline
152
152
}
153
153
154
+ # Configuration for Comet - running on a slurm compute node, submitting back to Comet
155
+
156
+ # Do *NOT* run swift on Comet login nodes. There are memory limits which prevent swift from running
157
+ # properly on these machines.
158
+ site.comet {
159
+ execution {
160
+ type : "coaster" # Use coasters to run on remote sites
161
+ URL : "comet.sdsc.xsede.org" # Comet login URL - not used for local:slurm
162
+ jobManager: "local:slurm" # use slurm commands to submit jobs locally
163
+ options {
164
+ maxJobs : 4 # Max jobs submitted to LRM
165
+ nodeGranularity : 1 # Nodes per job
166
+ maxNodesPerJob : 1 # Nodes per job
167
+ tasksPerNode : 24 # Tasks per Node
168
+ maxJobTime : "00:30:00" # Time requested per job
169
+ jobQueue : "compute" # Submit to compute partition (from shared)
170
+ }
171
+ }
172
+ staging : "local" # Stage files from "local" fs to compute nodes
173
+ workDirectory : "/tmp/"${env.USER}"/swiftwork" # Work dir on compute nodes
174
+ maxParallelTasks : 101 # Maximum number of parallel tasks
175
+ initialParallelTasks: 100 # Maximum number of tasks at start
176
+ app.ALL { executable: "*" } # All apps to be found from commandline
177
+ }
178
+
154
179
# Instructions for Trestles
155
180
# 1. Do *NOT* run on the Trestles login nodes. There are memory limits which prevent swift from running
156
181
# properly on these machines.
You can’t perform that action at this time.
0 commit comments