forked from QEC-pages/programming-workshop
-
Notifications
You must be signed in to change notification settings - Fork 0
/
product.sh
executable file
·177 lines (141 loc) · 4.77 KB
/
product.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
#!/bin/zsh -l
#SBATCH --nodes=1
#SBATCH --ntasks=1
#SBATCH --cpus-per-task=60
#SBATCH --mem-per-cpu=1G
#SBATCH --time=7-00:00:00
#SBATCH --output=log/product.stdout --open-mode=append
#SBATCH --error=log/product.stderror --open-mode=append
#SBATCH --mail-user=wzeng002@ucr.edu
#SBATCH --mail-type=ALL
#SBATCH --job-name="product"
#SBATCH -p batch,intel # This is the default partition, you can use any of the following; intel, batch, highmem, gpu, short
#SBATCH --export=ALL,ON_SBATCH=TRUE #add environment variable
# module itpp already load on zsh and bash
# Load samtools
# module load samtools
# Change directory to where you submitted the job from, so that relative paths resolve properly
# no need. the default folder is $SLURM_SUBMIT_DIR
WORK_STATION=HEAD
case $ON_SRUN in
"TRUE")
case $ON_SBATCH in
"TRUE")
# echo "on sbatch"
WORK_STATION=SBATCH
;;
*)
# echo "on srun"
WORK_STATION=SRUN
pkill .product
;;
esac
;;
*)
WORK_STATION=HEAD
# echo "not on srun or sbatch"
;;
esac
# Concatenate BAMs
# samtools cat -h header.sam -o out.bam in1.bam in2.bam
# Print name of node
echo
echo "============= The World is Unitary ============"
echo
echo start job on `hostname` `date`
#./run_counter_concatenation.sh
#!/bin/zsh
# record
# 408 size 8
# 407 size 7
# job name should be short, for search reason
job_name=product
index=499
# 250-266 for random code on cherenkov
max_trial=1000000
# 1: two random code; 2: identical reverse A B; 3: identical A B
sub_mode=1
#na_input=7
n_low=9
n_high=9
k_low=1
k_high=1
# note for run info
note="[n_low=$n_low, n_high=$n_high, k_low=$k_low, k_high=$k_high sub_mode=$sub_mode]"
#echo "========== new history file " `date` >> log/history.stdout
#cat log/product.stdout >> log/history.stdout
logfile=log/${job_name}${index}-size${na_input}.log
statusfile=log/status-${job_name}${index}-size${na_input}.log
echo check logfile: $logfile statusfile:$statusfile
#index=221 #218-221 - for reduced code.
#index=200-217 for concatenation
#index=1-112 #for hypergraph
make counter_concatenation.out
cp counter_concatenation.out .${job_name}$index.out
#add index by 1 while rerun this script
#the number of simultaneous process is limited by max_process.
case `hostname` in
"Chenrenkov")
folder=data/random2
;;
*)
folder=data_hpcc/random3
;;
esac
# TODO: remove above case when job finished
folder=data/random3
#echo start job on `hostname` size$na_input run$index $max_process/$max_trial `date`
#echo $SLURM_JOB_ID $SLURM_JOB_NAME $SLURM_SUBMIT_DIR
#echo data folder $folder, log file: $logfile
echo start job on $WORK_STATION:`hostname` size$na_input run$index max_process:$max_process/max_trial:$max_trial `date` > $logfile
echo SLURM_JOB_ID:$SLURM_JOB_ID SLURM_JOB_NAME:$SLURM_JOB_NAME SLURM_JOB_DIR:$SLURM_SUBMIT_DIR >> $logfile
echo note:$note, sub_mode:$sub_mode, n_low:$n_low, n_high=$n_high, data_folder:$folder, log_file:$logfile status_file:$statusfile >> $logfile
# duplicate info to stdout
cat $logfile
cat $logfile > $statusfile
# size 9 32768
(( i = 1 ))
(( bi = 2 ))
# it is actually 60.
(( num_cores = 60 ))
(( max_process = num_cores + 10 ))
while (( i < max_trial ))
do
# control number of processes according to the speed and number of cores
num_process=`pgrep -c ${job_name}`
# echo -n num_process: $num_process ,
# echo max_process: $max_process
if (( num_process < num_cores )) then
(( max_process = max_process + max_process / 10 ))
fi
if (( num_process > num_cores * 2 )) then
(( max_process = max_process - max_process / 10 ))
fi
for (( j = num_process ; j < max_process ; j++ ))
do
title=$folder/trial$index-$i
# ./random_concatenation.out 1 $title >>data/result/result$index-$i.log &
#./counter_concatenation.out mode=1 title=$title debug=0 &
#./.product$index.out mode=1 title=$title debug=0 na_input=$na_input seed=$i >> $logfile &
./.product$index.out mode=1 sub_mode=$sub_mode title=$title debug=0 n_low=$n_low n_high=$n_high k_low=$k_low k_high=$k_high seed=$i note=$note >> $logfile &
#>>data/result/result$index-$i.log &
#1 for generate random code
#./random_concatenation.out >>data/result/result$index-$i.log &
if (( i == bi )) then
#echo -n "[$bi]" >> $logfile
#the following is a bit strange, and show different output using less and cat
#echo -ne ${max_trial} $title `date` \\r
echo $num_process `date` $na_input $title
echo $num_process `date` $na_input $title >> $statusfile
#echo ${max_trial} $title `date` >> $logfile
# add 20 % for next check point
(( bi = bi + bi / 3 + 2 ))
fi
(( i++ ))
done
sleep 0.2
done
wait
echo >> $logfile
echo finish job on `hostname` $index $max_process/$max_trial `date` >> $logfile
echo finish job on `hostname` $index $max_process/$max_trial `date`