2020-04-05 01:50:48 +00:00
|
|
|
#!/bin/bash
|
2020-09-25 01:33:09 +00:00
|
|
|
#
|
|
|
|
# script to create cpusets to isolate CPU threads for KVM/libvirt VMs
|
|
|
|
# requires 'cset'
|
2020-04-05 01:50:48 +00:00
|
|
|
# available on Fedora through my COPR:
|
|
|
|
# - https://copr.fedorainfracloud.org/coprs/jlay/cpuset/
|
2020-09-25 01:33:09 +00:00
|
|
|
# review comments below regarding CPUs and XML entries
|
|
|
|
#
|
2020-04-05 01:50:48 +00:00
|
|
|
# this script:
|
|
|
|
# - isolates host tasks from the VM CPUs
|
|
|
|
# - applies some generic tweaks
|
|
|
|
# - resets host isolation on VM shutdown
|
2020-09-25 01:33:09 +00:00
|
|
|
#
|
|
|
|
# define the CPUs (cores/threads) to isolate for the VM
|
2020-11-10 01:22:44 +00:00
|
|
|
VM_CORES='0-20,24-44'
|
2020-09-25 01:33:09 +00:00
|
|
|
# Tip: refer to 'virsh capabilities | grep siblings'
|
|
|
|
# this will show CPU topology information for the host
|
|
|
|
# allows one to pick optimal layout for the VM
|
|
|
|
#
|
|
|
|
# the pinned CPUs are definend like so in the VM libvirt XML:
|
2020-04-05 01:50:48 +00:00
|
|
|
# <vcpupin vcpu='0' cpuset='0'/>
|
2020-09-25 01:33:09 +00:00
|
|
|
# <vcpupin vcpu='1' cpuset='24'/>
|
|
|
|
# <vcpupin vcpu='2' cpuset='1'/>
|
|
|
|
# <vcpupin vcpu='3' cpuset='25'/>
|
|
|
|
# [...]
|
|
|
|
# <emulatorpin cpuset='10-11'/>
|
|
|
|
#
|
|
|
|
# NOTE: the pinned emulator CPUs must be included in VM_CORES
|
|
|
|
# probably any iothreads too
|
|
|
|
#
|
|
|
|
# example above is based on a threadripper 3960x:
|
|
|
|
# - one NUMA node
|
|
|
|
# - SMT enabled
|
|
|
|
#
|
2020-04-05 01:50:48 +00:00
|
|
|
# reduce kernel jitter
|
2020-09-25 02:32:56 +00:00
|
|
|
sudo sh -c 'sysctl vm.stat_interval=120 ; sysctl kernel.watchdog=0'
|
2020-04-05 01:50:48 +00:00
|
|
|
|
2020-09-25 01:33:09 +00:00
|
|
|
# attempt to reset shielding, we want to create it with current params
|
|
|
|
# and remove any existing slices from previous VM boots
|
|
|
|
# we want it to be created with current params, such as possibly-changed pinned CPUs
|
2020-11-10 01:22:44 +00:00
|
|
|
sudo cset shield --reset --sysset=host.slice --userset=windows10.slice || echo "something happened resetting shielding"
|
|
|
|
|
|
|
|
# destroy old cpusets
|
|
|
|
sudo cset set -d windows10.slice || echo "ignoring failure to destroy VM cpuset"
|
|
|
|
sudo cset set -d host.slice || echo "ignoring failure to destroy host cpuset"
|
2020-04-05 01:50:48 +00:00
|
|
|
|
|
|
|
# shield cores
|
2020-09-25 01:33:09 +00:00
|
|
|
# userset slice must match the partition defined in libvirt xml, eg:
|
|
|
|
#
|
|
|
|
# </cputune>
|
|
|
|
# <resource>
|
|
|
|
# <partition>/windows10</partition>
|
|
|
|
# </resource>
|
|
|
|
# <os>
|
2020-04-05 01:50:48 +00:00
|
|
|
sudo cset shield -c $VM_CORES --sysset=host.slice --userset=windows10.slice
|
|
|
|
|
|
|
|
# start the VM
|
2020-09-25 01:33:09 +00:00
|
|
|
virsh start windows10
|
2020-04-05 01:50:48 +00:00
|
|
|
|
2020-09-25 02:32:56 +00:00
|
|
|
# determine QEMU vCPU PIDs
|
|
|
|
QEMU_PIDS=$(sudo egrep 'vcpu.*pid' /var/run/libvirt/qemu/windows10.xml | cut -d\' -f4)
|
|
|
|
|
|
|
|
# set higher priority for vCPU PIDs
|
|
|
|
sudo renice -20 -p ${QEMU_PIDS}
|
|
|
|
# either of these attempts at real time seem to freeze on boot
|
|
|
|
# try a different kernel (not 5.8.11)
|
|
|
|
#for CPUPID in ${QEMU_PIDS}; do
|
|
|
|
## sudo chrt -f -p 99 ${CPUPID}
|
|
|
|
# sudo chrt -r -p 99 ${CPUPID}
|
|
|
|
#done
|
2020-04-05 01:50:48 +00:00
|
|
|
|
|
|
|
echo "Waiting for windows10 VM to stop before resetting cpusets"
|
|
|
|
while true; do
|
|
|
|
# chill a bit
|
|
|
|
sleep 10
|
|
|
|
# get vm state, check if it's off
|
2020-09-25 02:32:56 +00:00
|
|
|
if test -f "/var/run/libvirt/qemu/windows10.xml"; then
|
|
|
|
VM_STATE="on" # doesn't do much (yet?), just here to fill a spot
|
|
|
|
else
|
|
|
|
VM_STATE="off"
|
2020-04-05 01:50:48 +00:00
|
|
|
echo -e "windows10 VM shut down, setting cpusets back to normal\n"
|
|
|
|
sudo cset shield --reset --sysset=host.slice --userset=windows10.slice
|
|
|
|
exit 0
|
|
|
|
fi
|
|
|
|
done
|