I have been testing IO performance on instances and have recently tried using a partition created through lxd init
instead of a loop device to see if performance would increase. However, to my surprise the performance actually remained relatively slow compared to the native performance.
I ran the following test on host (native ext4):
root@lxd01:~$ dd if=/dev/urandom of=/root/input bs=128k count=75k
76800+0 records in
76800+0 records out
10066329600 bytes (10 GB, 9.4 GiB) copied, 56.7984 s, 177 MB/s
root@lxd01:~# sync; echo 3 > /proc/sys/vm/drop_caches
root@lxd01:~# dd if=/root/input of=/root/test bs=128k count=75k conv=fdatasync
76800+0 records in
76800+0 records out
10066329600 bytes (10 GB, 9.4 GiB) copied, 113.324 s, 88.8 MB/s
And then the same for the container (ZFS partition):
root@lxd01:~# lxc exec container-test -- dd if=/dev/urandom of=/root/input bs=128k count=75k
76800+0 records in
76800+0 records out
10066329600 bytes (10 GB, 9.4 GiB) copied, 262.79 s, 38.3 MB/s
root@lxd01:~# sync; echo 3 > /proc/sys/vm/drop_caches
root@lxd01:~# lxc exec container-test -- dd if=/root/input of=/root/test bs=128k count=75k conv=fdatasync
76800+0 records in
76800+0 records out
10066329600 bytes (10 GB, 9.4 GiB) copied, 355.324 s, 28.3 MB/s
Though I was expecting a performance difference between ext4 and zfs, it seems that the performance on zfs is much worse than I had anticipated. Does anyone else have a similar experience?
Output of lxc config show
:
architecture: x86_64
config:
image.architecture: amd64
image.description: ubuntu 20.04 LTS amd64 (release) (20201210)
image.label: release
image.os: ubuntu
image.release: focal
image.serial: "20201210"
image.type: squashfs
image.version: "20.04"
limits.cpu: "4"
limits.memory: 4GB
limits.memory.enforce: hard
volatile.base_image: e0c3495ffd489748aa5151628fa56619e6143958f041223cb4970731ef939cb6
volatile.eth0.host_name: vethb0f298bb
volatile.eth0.hwaddr: 00:16:3e:3c:36:37
volatile.idmap.base: "0"
volatile.idmap.current: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.idmap.next: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.last_state.idmap: '[{"Isuid":true,"Isgid":false,"Hostid":1000000,"Nsid":0,"Maprange":1000000000},{"Isuid":false,"Isgid":true,"Hostid":1000000,"Nsid":0,"Maprange":1000000000}]'
volatile.last_state.power: RUNNING
volatile.uuid: 441ff04e-f3e2-4fb9-a3c2-63f627a16a9d
devices:
root:
path: /
pool: partition
size: 70GB
type: disk
ephemeral: false
profiles:
- default
stateful: false
description: ""
Output of zpool status
:
pool: partition
state: ONLINE
scan: none requested
config:
NAME STATE READ WRITE CKSUM
partition ONLINE 0 0 0
sda4 ONLINE 0 0 0
errors: No known data errors
Some extra information, the machine I’m running on is setup with RAID 6 with 4 SATA disks. Any input is appreciated.