-
Notifications
You must be signed in to change notification settings - Fork 37
/
smoke.sh
executable file
·122 lines (95 loc) · 2.96 KB
/
smoke.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
#!/usr/bin/env bash
set -e;
M=/mnt;
P=/build;
H=$(hostname);
T=600;
V=patchy;
export PATH=$PATH:$P/install/sbin
function cleanup()
{
killall -15 glusterfs glusterfsd glusterd 2>&1 || true;
killall -9 glusterfs glusterfsd glusterd 2>&1 || true;
umount -l $M 2>&1 || true;
rm -rf /build/dbench-logs
rm -rf /var/lib/glusterd /var/log/glusterfs/* /etc/glusterd $P/export;
}
function start_fs_with_arbiter()
{
mkdir -p $P/export;
chmod 0755 $P/export;
glusterd;
gluster --mode=script volume create $V replica 3 arbiter 1 $H:$P/export/export{1,2,3,4,5,6} force;
gluster volume start $V;
gluster volume set $V performance.write-behind off;
glusterfs -s $H --volfile-id $V $M;
# mount -t glusterfs $H:/$V $M;
}
function start_fs_with_disperse()
{
mkdir -p $P/export;
chmod 0755 $P/export;
glusterd;
gluster --mode=script volume create $V disperse 6 redundancy 2 $H:$P/export/export{1,2,3,4,5,6,7,8,9,10,11,12} force;
gluster volume start $V;
gluster volume set $V performance.write-behind off;
glusterfs -s $H --volfile-id $V $M;
# mount -t glusterfs $H:/$V $M;
}
function run_tests()
{
cd $M;
(sleep 1; dbench -s -t 60 10 > /build/dbench-logs) &
(sleep 1; /opt/qa/tools/posix_compliance.sh) &
wait %2
wait %3
rm -rf clients;
cd -;
}
function watchdog ()
{
# insurance against hangs during the test
sleep $1;
echo "Kicking in watchdog after $1 secs";
# Get core
set -x
local client_pid=$(ps aux | grep glusterfs | grep -v glusterfsd | grep patchy | awk '{print $2}')
if [ ! -z $client_pid ]; then gcore -o /var/log/glusterfs/gluster-gdb.core $client_pid; fi
# Get statedumps
local mount_pid=$(ps auxww | grep glusterfs | grep -E "volfile-id[ =]/?$V " | awk '{print $2}' | head -1)
if [ ! -z $mount_pid ]; then kill -USR1 $mount_pid; fi
gluster volume statedump $V
sleep 5; #Give some time for the statedumps to be generated
# Kill the gluster processes so FINISH is triggered
killall -15 glusterfs glusterfsd glusterd 2>&1 || true;
killall -9 glusterfs glusterfsd glusterd 2>&1 || true;
umount -l $M 2>&1 || true;
set +x
}
function finish ()
{
RET=$?
if [ $RET -ne 0 ]; then
cat /build/dbench-logs || true
fi
#Move statedumps to be archived
mv /var/run/gluster/*dump* /var/log/glusterfs/ || true
tar -czf $WORKSPACE/glusterfs-logs.tgz /var/log/glusterfs /var/log/messages* || true
scp -o "UserKnownHostsFile=/dev/null" -o "StrictHostKeyChecking=no" -i $LOG_KEY glusterfs-logs.tgz "_logs-collector@http.int.rht.gluster.org:/var/www/glusterfs-logs/$JOB_NAME-logs-$BUILD_ID.tgz" || true;
echo "Logs stored in https://ci-logs.gluster.org/$JOB_NAME-logs-$BUILD_ID.tgz";
cleanup;
kill %1;
}
function main ()
{
cleanup;
watchdog $T &
trap finish EXIT;
set -x;
start_fs_with_arbiter;
run_tests;
cleanup;
#start_fs_with_disperse;
#run_tests;
}
main "$@";