Skip to content

Commit 18febbf

Browse files
committed
Re-add femu-scripts/
1 parent d3bd68a commit 18febbf

28 files changed

+1493
-0
lines changed

femu-scripts/aff.sh

Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
#!/bin/bash
2+
3+
spid=$1
4+
5+
for ((i = spid, j = 5; i <= spid + 19; i++, j++)); do
6+
taskset -cp $j $i
7+
echo "----------"
8+
done

femu-scripts/dp-run.sh

Lines changed: 74 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
#!/bin/bash
2+
3+
IMGDIR=$HOME/images
4+
5+
is_mounted=$(mount | grep "/mnt/tmpfs")
6+
7+
if [[ $is_mounted == "" ]]; then
8+
sudo mkdir -p /mnt/tmpfs
9+
# huge=always
10+
sudo mount -t tmpfs -o size=4G,huge=always tmpfs /mnt/tmpfs
11+
fi
12+
13+
14+
# every time we run a new SSD
15+
sudo rm -rf /mnt/tmpfs/test1.raw
16+
17+
[[ ! -e /mnt/tmpfs/test1.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test1.raw 4G
18+
19+
# huge page related settings
20+
#echo 25000 | sudo tee /proc/sys/vm/nr_hugepages
21+
22+
[[ ! -d /dev/hugepages2M ]] && sudo mkdir /dev/hugepages2M && sudo mount -t hugetlbfs none /dev/hugepages2M -o pagesize=2M
23+
24+
25+
#-object iothread,id=iothread0 \
26+
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
27+
#-nographic \
28+
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
29+
#-kernel /home/huaicheng/git/linux/arch/x86_64/boot/bzImage \
30+
#-append "root=/dev/vda1 console=ttyS0,115200n8 console=tty0" \
31+
#-virtfs local,path=/home/huaicheng/share/,security_model=passthrough,mount_tag=host_share \
32+
33+
#must come before all other qemu options!!!!!!
34+
#-trace events=/tmp/events \
35+
#-object memory-backend-file,id=mem1,size=8G,mem-path=/dev/hugepages2M \
36+
#-device pc-dimm,id=dimm1,memdev=mem1 \
37+
38+
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
39+
#-drive file=$IMGDIR/vmdata.qcow2,if=virtio,aio=native,cache=none,format=qcow2,id=hd1 \
40+
41+
sudo x86_64-softmmu/qemu-system-x86_64 \
42+
-name "nvme-FEMU-test" \
43+
-enable-kvm \
44+
-cpu host \
45+
-smp 16 \
46+
-m 8G,slots=2,maxmem=32G \
47+
-object memory-backend-file,id=mem1,size=8G,mem-path=/dev/hugepages2M \
48+
-device pc-dimm,id=dimm1,memdev=mem1 \
49+
-drive file=$IMGDIR/u14s.qcow2,if=ide,aio=native,cache=none,format=qcow2,id=hd0 \
50+
-drive file=$IMGDIR/vmdata.qcow2,if=ide,aio=native,cache=none,format=qcow2,id=hd1 \
51+
-drive file=/mnt/tmpfs/test1.raw,if=none,aio=threads,format=raw,id=id0 \
52+
-object iothread,id=iothread0 \
53+
-device virtio-blk-pci,iothread=iothread0,drive=id0 \
54+
-net user,hostfwd=tcp::8080-:22 \
55+
-net nic,model=virtio \
56+
-nographic \
57+
-qmp unix:./qmp-sock,server,nowait | tee /media/log
58+
#-object iothread,id=iothread0 \
59+
#-display none \
60+
#-nographic \
61+
#-monitor stdio \
62+
#-s -S \
63+
#
64+
65+
#sleep 10
66+
67+
#./pin.sh
68+
#sshsim "~/tsc.sh"
69+
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
70+
#sshsim "echo 0 | sudo tee /sys/kernel/debug/tracing/tracing_on"
71+
72+
echo "VM is up, enjoy it :)"
73+
74+
wait

femu-scripts/dp.sh

Lines changed: 61 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,61 @@
1+
#!/bin/bash
2+
3+
IMGDIR=$HOME/images
4+
5+
is_mounted=$(mount | grep "/mnt/tmpfs")
6+
7+
if [[ $is_mounted == "" ]]; then
8+
sudo mkdir -p /mnt/tmpfs
9+
sudo mount -t tmpfs -o size=4G tmpfs /mnt/tmpfs
10+
fi
11+
12+
# every time we run a new SSD
13+
sudo rm -rf /mnt/tmpfs/test1.raw
14+
15+
[[ ! -e /mnt/tmpfs/test1.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test1.raw 4G
16+
17+
#-object iothread,id=iothread0 \
18+
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
19+
#-nographic \
20+
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
21+
#-kernel /home/huaicheng/git/linux/arch/x86_64/boot/bzImage \
22+
#-append "root=/dev/vda1 console=ttyS0,115200n8 console=tty0" \
23+
#-virtfs local,path=/home/huaicheng/share/,security_model=passthrough,mount_tag=host_share \
24+
25+
#-trace events=/tmp/events \
26+
27+
sudo x86_64-softmmu/qemu-system-x86_64 \
28+
-name "nvme-FEMU-test" \
29+
-smp 4 \
30+
-m 8192 \
31+
-cpu host \
32+
-enable-kvm \
33+
-device virtio-scsi-pci,id=scsi0 \
34+
-device scsi-hd,drive=hd0 \
35+
-drive file=$IMGDIR/u14s.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
36+
-device virtio-scsi-pci,id=scsi1 \
37+
-device scsi-hd,drive=hd1 \
38+
-drive file=$IMGDIR/vmdata.raw,if=none,aio=native,cache=none,format=raw,id=hd1 \
39+
-drive file=/mnt/tmpfs/test1.raw,if=none,aio=threads,format=raw,id=id0 \
40+
-object iothread,id=iothread0,poll-max-ns=32768 \
41+
-device virtio-blk-pci,iothread=iothread0,drive=id0,serial=serial0,id=nvme0 \
42+
-net user,hostfwd=tcp::8080-:22 \
43+
-net nic,model=virtio \
44+
-nographic \
45+
-qmp unix:./qmp-sock,server,nowait | tee /media/log
46+
#-display none \
47+
#-nographic \
48+
#-monitor stdio \
49+
#-s -S \
50+
#
51+
52+
#sleep 10
53+
54+
#./pin.sh
55+
#sshsim "~/tsc.sh"
56+
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
57+
#sshsim "echo 0 | sudo tee /sys/kernel/debug/tracing/tracing_on"
58+
59+
echo "VM is up, enjoy it :)"
60+
61+
wait

femu-scripts/f.sh

Lines changed: 63 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,63 @@
1+
#!/bin/bash
2+
3+
IMGDIR=$HOME/images
4+
5+
is_mounted=$(mount | grep "/mnt/tmpfs")
6+
7+
if [[ $is_mounted == "" ]]; then
8+
sudo mkdir -p /mnt/tmpfs
9+
# huge=always
10+
sudo mount -t tmpfs -o size=4G tmpfs /mnt/tmpfs
11+
fi
12+
13+
# every time we run a new SSD
14+
sudo rm -rf /mnt/tmpfs/test1.raw
15+
16+
[[ ! -e /mnt/tmpfs/test1.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test1.raw 4G
17+
18+
#-object iothread,id=iothread0 \
19+
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
20+
#-nographic \
21+
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
22+
#-kernel /home/huaicheng/git/linux/arch/x86_64/boot/bzImage \
23+
#-append "root=/dev/vda1 console=ttyS0,115200n8 console=tty0" \
24+
#-virtfs local,path=/home/huaicheng/share/,security_model=passthrough,mount_tag=host_share \
25+
26+
#must come before all other qemu options!!!!!!
27+
#-trace events=/tmp/events \
28+
#-object memory-backend-file,id=mem1,size=8G,mem-path=/dev/hugepages2M \
29+
#-device pc-dimm,id=dimm1,memdev=mem1 \
30+
31+
sudo x86_64-softmmu/qemu-system-x86_64 \
32+
-name "nvme-FEMU-test" \
33+
-enable-kvm \
34+
-cpu host \
35+
-smp 4 \
36+
-m 8G \
37+
-device virtio-scsi-pci,id=scsi0 \
38+
-device scsi-hd,drive=hd0 \
39+
-drive file=/home/huaicheng/images/u14s.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
40+
-device virtio-scsi-pci,id=scsi1 \
41+
-device scsi-hd,drive=hd1 \
42+
-drive file=$IMGDIR/vmdata.raw,if=none,aio=native,cache=none,format=raw,id=hd1 \
43+
-net user,hostfwd=tcp::8080-:22 \
44+
-net nic,model=virtio \
45+
-nographic \
46+
#-qmp unix:./qmp-sock,server,nowait | tee /media/log
47+
#-object iothread,id=iothread0 \
48+
#-display none \
49+
#-nographic \
50+
#-monitor stdio \
51+
#-s -S \
52+
#
53+
54+
#sleep 10
55+
56+
#./pin.sh
57+
#sshsim "~/tsc.sh"
58+
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
59+
#sshsim "echo 0 | sudo tee /sys/kernel/debug/tracing/tracing_on"
60+
61+
echo "VM is up, enjoy it :)"
62+
63+
wait

femu-scripts/femu-compile.sh

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,13 @@
1+
#!/bin/bash
2+
3+
NRCPUS="$(cat /proc/cpuinfo | grep "vendor_id" | wc -l)"
4+
5+
make clean
6+
# --disable-werror --extra-cflags=-w
7+
../configure --enable-kvm --target-list=x86_64-softmmu --disable-werror #--disable-git-update
8+
make -j $NRCPUS
9+
10+
echo ""
11+
echo "===> FEMU compilation done ..."
12+
echo ""
13+
exit

femu-scripts/femu-copy-scripts.sh

Lines changed: 21 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,21 @@
1+
#!/bin/bash
2+
# Huaicheng <huaicheng@cs.uchicago.edu>
3+
# Copy necessary scripts for running FEMU
4+
5+
FSD="../femu-scripts"
6+
7+
CPL=(pkgdep.sh femu-compile.sh run-whitebox.sh run-blackbox.sh run-nossd.sh run-zns.sh pin.sh ftk)
8+
9+
echo ""
10+
echo "==> Copying following FEMU script to current directory:"
11+
for f in "${CPL[@]}"
12+
do
13+
if [[ ! -e $FSD/$f ]]; then
14+
echo "Make sure you are under build-femu/ directory!"
15+
exit
16+
fi
17+
cp -r $FSD/$f . && echo " --> $f"
18+
done
19+
echo "Done!"
20+
echo ""
21+

femu-scripts/femu-run.sh

Lines changed: 79 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,79 @@
1+
#!/bin/bash
2+
# Huaicheng Li <huaicheng@cs.uchicago.edu>
3+
# Run VM with FEMU support
4+
5+
IMGDIR=$HOME/images
6+
7+
is_mounted=$(mount | grep "/mnt/tmpfs")
8+
9+
if [[ $is_mounted == "" ]]; then
10+
sudo mkdir -p /mnt/tmpfs
11+
# huge=always
12+
#sudo mount -t tmpfs -o size=4G,huge=always tmpfs /mnt/tmpfs
13+
fi
14+
15+
16+
# every time we run a new SSD
17+
sudo rm -rf /mnt/tmpfs/test1.raw
18+
sudo rm -rf /mnt/tmpfs/test2.raw
19+
20+
[[ ! -e /mnt/tmpfs/test1.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test1.raw 4G
21+
[[ ! -e /mnt/tmpfs/test2.raw ]] && ./qemu-img create -f raw /mnt/tmpfs/test2.raw 4G
22+
23+
# huge page related settings
24+
#echo 25000 | sudo tee /proc/sys/vm/nr_hugepages
25+
26+
[[ ! -d /dev/hugepages2M ]] && sudo mkdir /dev/hugepages2M && sudo mount -t hugetlbfs none /dev/hugepages2M -o pagesize=2M
27+
28+
29+
#-object iothread,id=iothread0 \
30+
#-device virtio-blk-pci,iothread=iothread0,drive=id0 \
31+
#-nographic \
32+
#-device nvme,drive=id0,serial=serial0,id=nvme0 \
33+
#-kernel /home/huaicheng/git/linux/arch/x86_64/boot/bzImage \
34+
#-append "root=/dev/vda1 console=ttyS0,115200n8 console=tty0" \
35+
#-virtfs local,path=/home/huaicheng/share/,security_model=passthrough,mount_tag=host_share \
36+
37+
#must come before all other qemu options!!!!!!
38+
#-trace events=/tmp/events \
39+
#-object memory-backend-file,id=mem1,size=8G,mem-path=/dev/hugepages2M \
40+
#-device pc-dimm,id=dimm1,memdev=mem1 \
41+
42+
43+
sudo x86_64-softmmu/qemu-system-x86_64 \
44+
-name "nvme-FEMU-test" \
45+
-enable-kvm \
46+
-cpu host \
47+
-smp 4 \
48+
-m 8G \
49+
-device virtio-scsi-pci,id=scsi0 \
50+
-device scsi-hd,drive=hd0 \
51+
-drive file=$IMGDIR/u14s.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd0 \
52+
-device virtio-scsi-pci,id=scsi1 \
53+
-device scsi-hd,drive=hd1 \
54+
-drive file=$IMGDIR/vmdata.qcow2,if=none,aio=native,cache=none,format=qcow2,id=hd1 \
55+
-drive file=/mnt/tmpfs/test1.raw,if=none,aio=threads,format=raw,id=id0 \
56+
-device nvme,drive=id0,serial=serial0,id=nvme0 \
57+
-drive file=/mnt/tmpfs/test2.raw,if=none,aio=threads,format=raw,id=id1 \
58+
-device nvme,drive=id1,serial=serial1,id=nvme1 \
59+
-net user,hostfwd=tcp::8080-:22 \
60+
-net nic,model=virtio \
61+
-nographic \
62+
-qmp unix:./qmp-sock,server,nowait | tee /media/log
63+
#-object iothread,id=iothread0 \
64+
#-display none \
65+
#-nographic \
66+
#-monitor stdio \
67+
#-s -S \
68+
#
69+
70+
#sleep 10
71+
72+
#./pin.sh
73+
#sshsim "~/tsc.sh"
74+
#sshsim "echo 0 | sudo tee /proc/sys/kernel/timer_migration"
75+
#sshsim "echo 0 | sudo tee /sys/kernel/debug/tracing/tracing_on"
76+
77+
echo "VM is up, enjoy it :)"
78+
79+
wait

femu-scripts/ftk/qmp-vcpu-pin

Lines changed: 45 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,45 @@
1+
#!/usr/bin/python
2+
# QEMU vCPU pinning tool
3+
#
4+
# Copyright (C) 2016 Red Hat Inc.
5+
#
6+
# Authors:
7+
# Maxime Coquelin <maxime.coquelin@redhat.com>
8+
#
9+
# This work is licensed under the terms of the GNU GPL, version 2. See
10+
# the COPYING file in the top-level directory
11+
import argparse
12+
import json
13+
import os
14+
15+
from subprocess import call
16+
from qmp import QEMUMonitorProtocol
17+
18+
pinned = []
19+
20+
parser = argparse.ArgumentParser(description='Pin QEMU vCPUs to physical CPUs')
21+
parser.add_argument('-s', '--server', type=str, required=True,
22+
help='QMP server path or address:port')
23+
parser.add_argument('cpu', type=int, nargs='+',
24+
help='Physical CPUs IDs')
25+
args = parser.parse_args()
26+
27+
devnull = open(os.devnull, 'w')
28+
29+
srv = QEMUMonitorProtocol(args.server)
30+
srv.connect()
31+
32+
for vcpu in srv.command('query-cpus'):
33+
vcpuid = vcpu['CPU']
34+
tid = vcpu['thread_id']
35+
if tid in pinned:
36+
print 'vCPU{}\'s tid {} already pinned, skipping'.format(vcpuid, tid)
37+
continue
38+
39+
cpuid = args.cpu[vcpuid % len(args.cpu)]
40+
print 'Pin vCPU {} (tid {}) to physical CPU {}'.format(vcpuid, tid, cpuid)
41+
try:
42+
call(['taskset', '-pc', str(cpuid), str(tid)], stdout=devnull)
43+
pinned.append(tid)
44+
except OSError:
45+
print 'Failed to pin vCPU{} to CPU{}'.format(vcpuid, cpuid)

0 commit comments

Comments
 (0)