From 937bc35df74d46968471cc1355e04c9f90898dc8 Mon Sep 17 00:00:00 2001 From: Paul Buetow Date: Sun, 13 Jul 2025 16:49:44 +0300 Subject: Update content for html --- about/resources.html | 200 +- ...04-09-jails-and-zfs-on-freebsd-with-puppet.html | 1 + ...22-07-30-lets-encrypt-with-openbsd-and-rex.html | 1 + .../2024-01-13-one-reason-why-i-love-openbsd.html | 1 + ...-04-01-KISS-high-availability-with-OpenBSD.html | 1 + ...4-11-17-f3s-kubernetes-with-freebsd-part-1.html | 2 + ...4-12-03-f3s-kubernetes-with-freebsd-part-2.html | 2 + ...5-02-01-f3s-kubernetes-with-freebsd-part-3.html | 2 + ...5-04-05-f3s-kubernetes-with-freebsd-part-4.html | 2 + ...5-05-11-f3s-kubernetes-with-freebsd-part-5.html | 6 +- ...5-07-14-f3s-kubernetes-with-freebsd-part-6.html | 1824 ++++++++++++ .../DRAFT-f3s-kubernetes-with-freebsd-part-6.html | 3028 -------------------- gemfeed/atom.xml | 2014 +++++++++++-- .../f3s-kubernetes-with-freebsd-part-6/zrepl.png | Bin 0 -> 166760 bytes gemfeed/index.html | 1 + index.html | 3 +- uptime-stats.html | 32 +- 17 files changed, 3795 insertions(+), 3325 deletions(-) create mode 100644 gemfeed/2025-07-14-f3s-kubernetes-with-freebsd-part-6.html delete mode 100644 gemfeed/DRAFT-f3s-kubernetes-with-freebsd-part-6.html create mode 100644 gemfeed/f3s-kubernetes-with-freebsd-part-6/zrepl.png diff --git a/about/resources.html b/about/resources.html index 4999a93b..bce81a04 100644 --- a/about/resources.html +++ b/about/resources.html @@ -50,107 +50,107 @@ In random order:


Technical references



I didn't read them from the beginning to the end, but I am using them to look up things. The books are in random order:


Self-development and soft-skills books



In random order:


Here are notes of mine for some of the books

@@ -159,31 +159,31 @@ Some of these were in-person with exams; others were online learning lectures only. In random order:


Technical guides



These are not whole books, but guides (smaller or larger) which I found very useful. in random order:


Podcasts



@@ -192,60 +192,60 @@ In random order:


Podcasts I liked



I liked them but am not listening to them anymore. The podcasts have either "finished" (no more episodes) or I stopped listening to them due to time constraints or a shift in my interests.


Newsletters I like



This is a mix of tech and non-tech newsletters I am subscribed to. In random order:


Magazines I like(d)



This is a mix of tech I like(d). I may not be a current subscriber, but now and then, I buy an issue. In random order:


Formal education



diff --git a/gemfeed/2016-04-09-jails-and-zfs-on-freebsd-with-puppet.html b/gemfeed/2016-04-09-jails-and-zfs-on-freebsd-with-puppet.html index 42edf3f1..4205e8e4 100644 --- a/gemfeed/2016-04-09-jails-and-zfs-on-freebsd-with-puppet.html +++ b/gemfeed/2016-04-09-jails-and-zfs-on-freebsd-with-puppet.html @@ -413,6 +413,7 @@ Notice: Finished catalog run in 206.09 seconds
Other *BSD related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2022-07-30-lets-encrypt-with-openbsd-and-rex.html b/gemfeed/2022-07-30-lets-encrypt-with-openbsd-and-rex.html index c1b6b68d..3a9bda14 100644 --- a/gemfeed/2022-07-30-lets-encrypt-with-openbsd-and-rex.html +++ b/gemfeed/2022-07-30-lets-encrypt-with-openbsd-and-rex.html @@ -692,6 +692,7 @@ rex commons
Other *BSD related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2024-01-13-one-reason-why-i-love-openbsd.html b/gemfeed/2024-01-13-one-reason-why-i-love-openbsd.html index ebd1e99d..4247f231 100644 --- a/gemfeed/2024-01-13-one-reason-why-i-love-openbsd.html +++ b/gemfeed/2024-01-13-one-reason-why-i-love-openbsd.html @@ -70,6 +70,7 @@ $ doas reboot # Just in case, reboot one more time Other *BSD related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2024-04-01-KISS-high-availability-with-OpenBSD.html b/gemfeed/2024-04-01-KISS-high-availability-with-OpenBSD.html index 26ae0590..abd8b509 100644 --- a/gemfeed/2024-04-01-KISS-high-availability-with-OpenBSD.html +++ b/gemfeed/2024-04-01-KISS-high-availability-with-OpenBSD.html @@ -331,6 +331,7 @@ http://www.gnu.org/software/src-highlite -->
Other *BSD and KISS related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2024-11-17-f3s-kubernetes-with-freebsd-part-1.html b/gemfeed/2024-11-17-f3s-kubernetes-with-freebsd-part-1.html index 27f58263..c3b7ccf8 100644 --- a/gemfeed/2024-11-17-f3s-kubernetes-with-freebsd-part-1.html +++ b/gemfeed/2024-11-17-f3s-kubernetes-with-freebsd-part-1.html @@ -26,6 +26,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -177,6 +178,7 @@
Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2024-12-03-f3s-kubernetes-with-freebsd-part-2.html b/gemfeed/2024-12-03-f3s-kubernetes-with-freebsd-part-2.html index 8aa50e18..00a90744 100644 --- a/gemfeed/2024-12-03-f3s-kubernetes-with-freebsd-part-2.html +++ b/gemfeed/2024-12-03-f3s-kubernetes-with-freebsd-part-2.html @@ -26,6 +26,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -356,6 +357,7 @@ dev.cpu.0.freq: 2922
Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2025-02-01-f3s-kubernetes-with-freebsd-part-3.html b/gemfeed/2025-02-01-f3s-kubernetes-with-freebsd-part-3.html index bf3b1e99..9b145edc 100644 --- a/gemfeed/2025-02-01-f3s-kubernetes-with-freebsd-part-3.html +++ b/gemfeed/2025-02-01-f3s-kubernetes-with-freebsd-part-3.html @@ -22,6 +22,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts (You are currently reading this)
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -417,6 +418,7 @@ Jan 26 17:36:32 f2 apcupsd[2159]: apcupsd shutdown succeeded
Other BSD related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts (You are currently reading this)
diff --git a/gemfeed/2025-04-05-f3s-kubernetes-with-freebsd-part-4.html b/gemfeed/2025-04-05-f3s-kubernetes-with-freebsd-part-4.html index 536097cb..0c3eecd6 100644 --- a/gemfeed/2025-04-05-f3s-kubernetes-with-freebsd-part-4.html +++ b/gemfeed/2025-04-05-f3s-kubernetes-with-freebsd-part-4.html @@ -22,6 +22,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs (You are currently reading this)
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -598,6 +599,7 @@ Apr 4 23: Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs (You are currently reading this)
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2025-05-11-f3s-kubernetes-with-freebsd-part-5.html b/gemfeed/2025-05-11-f3s-kubernetes-with-freebsd-part-5.html index 05aa458f..f2721f86 100644 --- a/gemfeed/2025-05-11-f3s-kubernetes-with-freebsd-part-5.html +++ b/gemfeed/2025-05-11-f3s-kubernetes-with-freebsd-part-5.html @@ -26,6 +26,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network (You are currently reading this)
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -1000,10 +1001,13 @@ peer: 2htXdNcxzpI2FdPDJy4T4VGtm1wpMEQu1AkQHjNY6F8=
Having a mesh network on our hosts is great for securing all the traffic between them for our future k3s setup. A self-managed WireGuard mesh network is better than Tailscale as it eliminates reliance on a third party and provides full control over the configuration. It reduces unnecessary abstraction and "magic," enabling easier debugging and ensuring full ownership of our network.

-I look forward to the next blog post in this series. We may start setting up k3s or take a first look at the NFS server (for persistent storage) side of things. I hope you liked all the posts so far in this series.
+Read the next post of this series:
+
+f3s: Kubernetes with FreeBSD - Part 6: Storage

Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network (You are currently reading this)
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
diff --git a/gemfeed/2025-07-14-f3s-kubernetes-with-freebsd-part-6.html b/gemfeed/2025-07-14-f3s-kubernetes-with-freebsd-part-6.html new file mode 100644 index 00000000..327407c9 --- /dev/null +++ b/gemfeed/2025-07-14-f3s-kubernetes-with-freebsd-part-6.html @@ -0,0 +1,1824 @@ + + + + +f3s: Kubernetes with FreeBSD - Part 6: Storage + + + + + +

+Home | Markdown | Gemini +

+

f3s: Kubernetes with FreeBSD - Part 6: Storage


+
+Published at 2025-07-13T16:44:29+03:00
+
+This is the sixth blog post about the f3s series for self-hosting demands in a home lab. f3s? The "f" stands for FreeBSD, and the "3s" stands for k3s, the Kubernetes distribution used on FreeBSD-based physical machines.
+
+2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
+2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
+2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
+2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
+2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage (You are currently reading this)
+
+f3s logo
+
+

Table of Contents


+
+
+

Introduction


+
+In the previous posts, we set up a FreeBSD-based Kubernetes cluster using k3s. While the base system works well, Kubernetes workloads often require persistent storage for databases, configuration files, and application data. Local storage on each node has significant limitations:
+
+
    +
  • No data sharing: Pods (once we run Kubernetes) on different nodes can't access the same data
  • +
  • Pod mobility: If a pod moves to another node, it loses access to its data
  • +
  • No redundancy: Hardware failure means data loss
  • +

+This post implements a robust storage solution using:
+
+
    +
  • CARP: For high availability with automatic IP failover
  • +
  • NFS over stunnel: For secure, encrypted network storage
  • +
  • ZFS: For data integrity, encryption, and efficient snapshots
  • +
  • zrepl: For continuous ZFS replication between nodes
  • +

+The result is a highly available, encrypted storage system that survives node failures while providing shared storage to all Kubernetes pods.
+
+Other than what was mentioned in the first post of this blog series, we aren't using HAST, but zrepl for data replication. Read more about it later in this blog post.
+
+

Additional storage capacity


+
+We add 1 TB of additional storage to each of the nodes (f0, f1, f2) in the form of an SSD drive. The Beelink mini PCs have enough space in the chassis for the extra space.
+
+
+
+Upgrading the storage was as easy as unscrewing, plugging the drive in, and then screwing it back together again. The procedure was uneventful! We're using two different SSD models (Samsung 870 EVO and Crucial BX500) to avoid simultaneous failures from the same manufacturing batch.
+
+We then create the zdata ZFS pool on all three nodes:
+
+ +
paul@f0:~ % doas zpool create -m /data zdata /dev/ada1
+paul@f0:~ % zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G  12.1M   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  29.0G   443G        -         -     0%     6%  1.00x    ONLINE  -
+
+paul@f0:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
+paul@f0:/ %
+
+
+To verify that we have a different SSD on the second node (the third node has the same drive as the first):
+
+ +
paul@f1:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<CT1000BX500SSD1 M6CR072>          at scbus1 target 0 lun 0 (pass1,ada1)
+
+
+

ZFS encryption keys


+
+ZFS native encryption requires encryption keys to unlock datasets. We need a secure method to store these keys that balances security with operational needs:
+
+
    +
  • Security: Keys must not be stored on the same disks they encrypt
  • +
  • Availability: Keys must be available at boot for automatic mounting
  • +
  • Portability: Keys should be easily moved between systems for recovery
  • +

+Using USB flash drives as hardware key storage provides a convenient and elegant solution. The encrypted data is unreadable without physical access to the USB key, protecting against disk theft or improper disposal. In production environments, you may use enterprise key management systems; however, for a home lab, USB keys offer good security with minimal complexity.
+
+

UFS on USB keys


+
+We'll format the USB drives with UFS (Unix File System) rather than ZFS for simplicity. There is no need to use ZFS.
+
+Let's see the USB keys:
+
+USB keys
+
+To verify that the USB key (flash disk) is there:
+
+
+paul@f0:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
+<Generic Flash Disk 8.07>          at scbus2 target 0 lun 0 (da0,pass2)
+paul@f0:/ %
+
+
+Let's create the UFS file system and mount it (done on all three nodes f0, f1 and f2):
+
+ +
paul@f0:/ % doas newfs /dev/da0
+/dev/da0: 15000.0MB (30720000 sectors) block size 32768, fragment size 4096
+        using 24 cylinder groups of 625.22MB, 20007 blks, 80128 inodes.
+        with soft updates
+super-block backups (for fsck_ffs -b #) at:
+ 192, 1280640, 2561088, 3841536, 5121984, 6402432, 7682880, 8963328, 10243776,
+11524224, 12804672, 14085120, 15365568, 16646016, 17926464, 19206912,k 20487360,
+...
+
+paul@f0:/ % echo '/dev/da0 /keys ufs rw 0 2' | doas tee -a /etc/fstab
+/dev/da0 /keys ufs rw 0 2
+paul@f0:/ % doas mkdir /keys
+paul@f0:/ % doas mount /keys
+paul@f0:/ % df | grep keys
+/dev/da0             14877596       8  13687384     0%    /keys
+
+
+USB keys stuck in
+
+

Generating encryption keys


+
+The following keys will later be used to encrypt the ZFS file systems. They will be stored on all three nodes, serving as a backup in case one of the keys is lost or corrupted. When we later replicate encrypted ZFS volumes from one node to another, the keys must also be available on the destination node.
+
+
+paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas chown root *
+paul@f0:/keys % doas chmod 400 *
+
+paul@f0:/keys % ls -l
+total 20
+*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:zdata.key
+*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:zdata.key
+*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:zdata.key
+
+
+After creation, these are copied to the other two nodes, f1 and f2, into the /keys partition (I won't provide the commands here; create a tarball, copy it over, and extract it on the destination nodes).
+
+

Configuring zdata ZFS pool encryption


+
+Let's encrypt our zdata ZFS pool. We are not encrypting the whole pool, but everything within the zdata/enc data set:
+
+ +
paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \
+  keylocation=file:///keys/`hostname`:zdata.key zdata/enc
+paul@f0:/ % zfs list | grep zdata
+zdata                                          836K   899G    96K  /data
+zdata/enc                                      200K   899G   200K  /data/enc
+
+paul@f0:/keys % zfs get all zdata/enc | grep -E -i '(encryption|key)'
+zdata/enc  encryption            aes-256-gcm                               -
+zdata/enc  keylocation           file:///keys/f0.lan.buetow.org:zdata.key  local
+zdata/enc  keyformat             raw                                       -
+zdata/enc  encryptionroot        zdata/enc                                 -
+zdata/enc  keystatus             available                                 -
+
+
+All future data sets within zdata/enc will inherit the same encryption key.
+
+

Migrating Bhyve VMs to an encrypted bhyve ZFS volume


+
+We set up Bhyve VMs in a previous blog post. Their ZFS data sets rely on zroot, which is the default ZFS pool on the internal 512GB NVME drive. They aren't encrypted yet, so we encrypt the VM data sets as well now. To do so, we first shut down the VMs on all three nodes:
+
+ +
paul@f0:/keys % doas vm stop rocky
+Sending ACPI shutdown to rocky
+
+paul@f0:/keys % doas vm list
+NAME     DATASTORE  LOADER     CPU  MEMORY  VNC  AUTO     STATE
+rocky    default    uefi       4    14G     -    Yes [1]  Stopped
+
+
+After this, we rename the unencrypted data set to _old, create a new encrypted data set, and also snapshot it as @hamburger.
+
+ +
paul@f0:/keys % doas zfs rename zroot/bhyve zroot/bhyve_old
+paul@f0:/keys % doas zfs set mountpoint=/mnt zroot/bhyve_old
+paul@f0:/keys % doas zfs snapshot zroot/bhyve_old/rocky@hamburger
+
+paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \
+  keylocation=file:///keys/`hostname`:bhyve.key zroot/bhyve
+paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve zroot/bhyve
+paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve/rocky zroot/bhyve/rocky
+
+
+Once done, we import the snapshot into the encrypted dataset and also copy some other metadata files from vm-bhyve back over.
+
+
+paul@f0:/keys % doas zfs send zroot/bhyve_old/rocky@hamburger | \
+  doas zfs recv zroot/bhyve/rocky
+paul@f0:/keys % doas cp -Rp /mnt/.config /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.img /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.templates /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.iso /zroot/bhyve/
+
+
+We also have to make encrypted ZFS data sets mount automatically on boot:
+
+ +
paul@f0:/keys % doas sysrc zfskeys_enable=YES
+zfskeys_enable:  -> YES
+paul@f0:/keys % doas vm init
+paul@f0:/keys % doas reboot
+.
+.
+.
+paul@f0:~ % doas vm list
+paul@f0:~ % doas vm list
+NAME     DATASTORE  LOADER     CPU  MEMORY  VNC           AUTO     STATE
+rocky    default    uefi       4    14G     0.0.0.0:5900  Yes [1]  Running (2265)
+
+
+As you can see, the VM is running. This means the encrypted zroot/bhyve was mounted successfully after the reboot! Now we can destroy the old, unencrypted, and now unused bhyve dataset:
+
+ +
paul@f0:~ % doas zfs destroy -R zroot/bhyve_old
+
+
+To verify once again that zroot/bhyve and zroot/bhyve/rocky are now both encrypted, we run:
+
+ +
paul@f0:~ % zfs get all zroot/bhyve | grep -E '(encryption|key)'
+zroot/bhyve  encryption            aes-256-gcm                               -
+zroot/bhyve  keylocation           file:///keys/f0.lan.buetow.org:bhyve.key  local
+zroot/bhyve  keyformat             raw                                       -
+zroot/bhyve  encryptionroot        zroot/bhyve                               -
+zroot/bhyve  keystatus             available                                 -
+
+paul@f0:~ % zfs get all zroot/bhyve/rocky | grep -E '(encryption|key)'
+zroot/bhyve/rocky  encryption            aes-256-gcm            -
+zroot/bhyve/rocky  keylocation           none                   default
+zroot/bhyve/rocky  keyformat             raw                    -
+zroot/bhyve/rocky  encryptionroot        zroot/bhyve            -
+zroot/bhyve/rocky  keystatus             available              -
+
+
+

ZFS Replication with zrepl


+
+Data replication is the cornerstone of high availability. While CARP handles IP failover (see later in this post), we need continuous data replication to ensure the backup server has current data when it becomes active. Without replication, failover would result in data loss or require shared storage (like iSCSI), which introduces a single point of failure.
+
+

Understanding Replication Requirements


+
+Our storage system has different replication needs:
+
+
    +
  • NFS data (/data/nfs/k3svolumes): Soon, it will contain active Kubernetes persistent volumes. Needs frequent replication (every minute) to minimise data loss during failover.
  • +
  • VM data (/zroot/bhyve/fedora): Contains VM images that change less frequently. Can tolerate longer replication intervals (every 10 minutes).
  • +

+The 1-minute replication window is perfectly acceptable for my personal use cases. This isn't a high-frequency trading system or a real-time database—it's storage for personal projects, development work, and home lab experiments. Losing at most 1 minute of work in a disaster scenario is a reasonable trade-off for the reliability and simplicity of snapshot-based replication. Additionally, in the case of a "1 minute of data loss," I would likely still have the data available on the client side.
+
+Why use zrepl instead of HAST? While HAST (Highly Available Storage) is FreeBSD's native solution for high-availability storage and supports synchronous replication—thus eliminating the mentioned 1-minute window—I've chosen zrepl for several important reasons:
+
+
    +
  • HAST can cause ZFS corruption: HAST operates at the block level and doesn't understand ZFS's transactional semantics. During failover, in-flight transactions can lead to corrupted zpools. I've experienced this firsthand (I am confident I have configured something wrong) - the automatic failover would trigger while ZFS was still writing, resulting in an unmountable pool.
  • +
  • ZFS-aware replication: zrepl understands ZFS datasets and snapshots. It replicates at the dataset level, ensuring each snapshot is a consistent point-in-time copy. This is fundamentally safer than block-level replication.
  • +
  • Snapshot history: With zrepl, you get multiple recovery points (every minute for NFS data in our setup). If corruption occurs, you can roll back to any previous snapshot. HAST only gives you the current state.
  • +
  • Easier recovery: When something goes wrong with zrepl, you still have intact snapshots on both sides. With HAST, a corrupted primary often means a corrupted secondary as well.
  • +

+FreeBSD HAST
+
+

Installing zrepl


+
+First, install zrepl on both hosts involved (we will replicate data from f0 to f1):
+
+ +
paul@f0:~ % doas pkg install -y zrepl
+
+
+Then, we verify the pools and datasets on both hosts:
+
+ +
# On f0
+paul@f0:~ % doas zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G  1.03M   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  26.7G   445G        -         -     0%     5%  1.00x    ONLINE  -
+
+paul@f0:~ % doas zfs list -r zdata/enc
+NAME        USED  AVAIL  REFER  MOUNTPOINT
+zdata/enc   200K   899G   200K  /data/enc
+
+# On f1
+paul@f1:~ % doas zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G   956K   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  11.7G   460G        -         -     0%     2%  1.00x    ONLINE  -
+
+paul@f1:~ % doas zfs list -r zdata/enc
+NAME        USED  AVAIL  REFER  MOUNTPOINT
+zdata/enc   200K   899G   200K  /data/enc
+
+
+Since we have a WireGuard tunnel between f0 and f1, we'll use TCP transport over the secure tunnel instead of SSH. First, check the WireGuard IP addresses:
+
+ +
# Check WireGuard interface IPs
+paul@f0:~ % ifconfig wg0 | grep inet
+	inet 192.168.2.130 netmask 0xffffff00
+
+paul@f1:~ % ifconfig wg0 | grep inet
+	inet 192.168.2.131 netmask 0xffffff00
+
+
+Let's create a dedicated dataset for NFS data that will be replicated:
+
+ +
# Create the nfsdata dataset that will hold all data exposed via NFS
+paul@f0:~ % doas zfs create zdata/enc/nfsdata
+
+
+Afterwards, we create the zrepl configuration on f0:
+
+ +
paul@f0:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
+global:
+  logging:
+    - type: stdout
+      level: info
+      format: human
+
+jobs:
+  - name: f0_to_f1_nfsdata
+    type: push
+    connect:
+      type: tcp
+      address: "192.168.2.131:8888"
+    filesystems:
+      "zdata/enc/nfsdata": true
+    send:
+      encrypted: true
+    snapshotting:
+      type: periodic
+      prefix: zrepl_
+      interval: 1m
+    pruning:
+      keep_sender:
+        - type: last_n
+          count: 10
+      keep_receiver:
+        - type: last_n
+          count: 10
+
+  - name: f0_to_f1_fedora
+    type: push
+    connect:
+      type: tcp
+      address: "192.168.2.131:8888"
+    filesystems:
+      "zroot/bhyve/fedora": true
+    send:
+      encrypted: true
+    snapshotting:
+      type: periodic
+      prefix: zrepl_
+      interval: 10m
+    pruning:
+      keep_sender:
+        - type: last_n
+          count: 10
+      keep_receiver:
+        - type: last_n
+          count: 10
+EOF
+
+
+ We're using two separate replication jobs with different intervals:
+
+
    +
  • f0_to_f1_nfsdata: Replicates NFS data every minute for faster failover recovery
  • +
  • f0_to_f1_fedora: Replicates Fedora VM every ten minutes (less critical)
  • +

+The Fedora VM is only used for development purposes, so it doesn't require as frequent replication as the NFS data. It's off-topic to this blog series, but it showcases, hows zrepl's flexibility in handling different datasets with varying replication needs.
+
+Furthermore:
+
+
    +
  • We're specifically replicating zdata/enc/nfsdata instead of the entire zdata/enc dataset. This dedicated dataset will contain all the data we later want to expose via NFS, keeping a clear separation between replicated NFS data and other local encrypted data.
  • +
  • The send: encrypted: false option turns off ZFS native encryption for the replication stream. Since we're using a WireGuard tunnel between f0 and f1, the data is already encrypted in transit. Disabling ZFS stream encryption reduces CPU overhead and improves replication performance.
  • +

+

Configuring zrepl on f1 (sink)


+
+On f1 (the sink, meaning it's the node receiving the replication data), we configure zrepl to receive the data as follows:
+
+ +
# First, create a dedicated sink dataset
+paul@f1:~ % doas zfs create zdata/sink
+
+paul@f1:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
+global:
+  logging:
+    - type: stdout
+      level: info
+      format: human
+
+jobs:
+  - name: sink
+    type: sink
+    serve:
+      type: tcp
+      listen: "192.168.2.131:8888"
+      clients:
+        "192.168.2.130": "f0"
+    recv:
+      placeholder:
+        encryption: inherit
+    root_fs: "zdata/sink"
+EOF
+
+
+

Enabling and starting zrepl services


+
+We then enable and start zrepl on both hosts via:
+
+ +
# On f0
+paul@f0:~ % doas sysrc zrepl_enable=YES
+zrepl_enable:  -> YES
+paul@f0:~ % doas service `zrepl` start
+Starting zrepl.
+
+# On f1
+paul@f1:~ % doas sysrc zrepl_enable=YES
+zrepl_enable:  -> YES
+paul@f1:~ % doas service `zrepl` start
+Starting zrepl.
+
+
+To check the replication status, we run:
+
+ +
# On f0, check `zrepl` status (use raw mode for non-tty)
+paul@f0:~ % doas pkg install jq
+paul@f0:~ % doas zrepl status --mode raw | grep -A2 "Replication" | jq .
+"Replication":{"StartAt":"2025-07-01T22:31:48.712143123+03:00"...
+
+# Check if services are running
+paul@f0:~ % doas service zrepl status
+zrepl is running as pid 2649.
+
+paul@f1:~ % doas service zrepl status
+zrepl is running as pid 2574.
+
+# Check for `zrepl` snapshots on source
+paul@f0:~ % doas zfs list -t snapshot -r zdata/enc | grep zrepl
+zdata/enc@zrepl_20250701_193148_000    0B      -   176K  -
+
+# On f1, verify the replicated datasets  
+paul@f1:~ % doas zfs list -r zdata | grep f0
+zdata/f0             576K   899G   200K  none
+zdata/f0/zdata       376K   899G   200K  none
+zdata/f0/zdata/enc   176K   899G   176K  none
+
+# Check replicated snapshots on f1
+paul@f1:~ % doas zfs list -t snapshot -r zdata | grep zrepl
+zdata/f0/zdata/enc@zrepl_20250701_193148_000     0B      -   176K  -
+zdata/f0/zdata/enc@zrepl_20250701_194148_000     0B      -   176K  -
+.
+.
+.
+
+
+

Monitoring replication


+
+You can monitor the replication progress with:
+
+ +
paul@f0:~ % doas zrepl status
+
+
+zrepl status
+
+With this setup, both zdata/enc/nfsdata and zroot/bhyve/fedora on f0 will be automatically replicated to f1 every 1 minute (or 10 minutes in the case of the Fedora VM), with encrypted snapshots preserved on both sides. The pruning policy ensures that we keep the last 10 snapshots while managing disk space efficiently.
+
+The replicated data appears on f1 under zdata/sink/ with the source host and dataset hierarchy preserved:
+
+
    +
  • zdata/enc/nfsdatazdata/sink/f0/zdata/enc/nfsdata
  • +
  • zroot/bhyve/fedorazdata/sink/f0/zroot/bhyve/fedora
  • +

+This is by design - zrepl preserves the complete path from the source to ensure there are no conflicts when replicating from multiple sources.
+
+

Verifying replication after reboot


+
+The zrepl service is configured to start automatically at boot. After rebooting both hosts:
+
+ +
paul@f0:~ % uptime
+11:17PM  up 1 min, 0 users, load averages: 0.16, 0.06, 0.02
+
+paul@f0:~ % doas service `zrepl` status
+zrepl is running as pid 2366.
+
+paul@f1:~ % doas service `zrepl` status
+zrepl is running as pid 2309.
+
+# Check that new snapshots are being created and replicated
+paul@f0:~ % doas zfs list -t snapshot | grep `zrepl` | tail -2
+zdata/enc/nfsdata@zrepl_20250701_202530_000                0B      -   200K  -
+zroot/bhyve/fedora@zrepl_20250701_202530_000               0B      -  2.97G  -
+.
+.
+.
+
+paul@f1:~ % doas zfs list -t snapshot -r zdata/sink | grep 202530
+zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_202530_000      0B      -   176K  -
+zdata/sink/f0/zroot/bhyve/fedora@zrepl_20250701_202530_000     0B      -  2.97G  -
+.
+.
+.
+
+
+The timestamps confirm that replication resumed automatically after the reboot, ensuring continuous data protection. We can also write a test file to the NFS data directory on f0 and verify whether it appears on f1 after a minute.
+
+

Understanding Failover Limitations and Design Decisions


+
+Our system intentionally fails over to a read-only copy of the replica in the event of the primary's failure. This is due to the nature of zrepl, which only replicates data in one direction. If we mount the data set on the sink node in read-write mode, it would cause the ZFS dataset to diverge from the original, and the replication would break. It can still be mounted read-write on the sink node in case of a genuine issue on the primary node, but that step is left intentionally manual. Therefore, we don't need to fix the replication later on manually.
+
+So in summary:
+
+
    +
  • Split-brain prevention: Automatic failover to a read-write copy can cause both nodes to become active simultaneously if network communication fails. This leads to data divergence that's extremely difficult to resolve.
  • +
  • False positive protection: Temporary network issues or high load can trigger unwanted failovers. Manual intervention ensures that failovers occur only when truly necessary.
  • +
  • Data integrity over availability: For storage systems, data consistency is paramount. A few minutes of downtime is preferable to data corruption in this specific use case.
  • +
  • Simplified recovery: With manual failover, you always know which dataset is authoritative, making recovery more straightforward.
  • +

+

Mounting the NFS datasets


+
+To make the NFS data accessible on both nodes, we need to mount it. On f0, this is straightforward:
+
+ +
# On f0 - set mountpoint for the primary nfsdata
+paul@f0:~ % doas zfs set mountpoint=/data/nfs zdata/enc/nfsdata
+paul@f0:~ % doas mkdir -p /data/nfs
+
+# Verify it's mounted
+paul@f0:~ % df -h /data/nfs
+Filesystem           Size    Used   Avail Capacity  Mounted on
+zdata/enc/nfsdata    899G    204K    899G     0%    /data/nfs
+
+
+On f1, we need to handle the encryption key and mount the standby copy:
+
+ +
# On f1 - first check encryption status
+paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
+NAME                             PROPERTY   VALUE        SOURCE
+zdata/sink/f0/zdata/enc/nfsdata  keystatus  unavailable  -
+
+# Load the encryption key (using f0's key stored on the USB)
+paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
+    zdata/sink/f0/zdata/enc/nfsdata
+
+# Set mountpoint and mount (same path as f0 for easier failover)
+paul@f1:~ % doas mkdir -p /data/nfs
+paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
+paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
+
+# Make it read-only to prevent accidental writes that would break replication
+paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
+
+# Verify
+paul@f1:~ % df -h /data/nfs
+Filesystem                         Size    Used   Avail Capacity  Mounted on
+zdata/sink/f0/zdata/enc/nfsdata    896G    204K    896G     0%    /data/nfs
+
+
+Note: The dataset is mounted at the same path (/data/nfs) on both hosts to simplify failover procedures. The dataset on f1 is set to readonly=on to prevent accidental modifications, which, as mentioned earlier, would break replication. If we did, replication from f0 to f1 would fail like this:
+
+cannot receive incremental stream: destination zdata/sink/f0/zdata/enc/nfsdata has been modified since most recent snapshot
+
+To fix a broken replication after accidental writes, we can do:
+
+ +
# Option 1: Rollback to the last common snapshot (loses local changes)
+paul@f1:~ % doas zfs rollback zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_204054_000
+
+# Option 2: Make it read-only to prevent accidents again
+paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
+
+
+And replication should work again!
+
+

Troubleshooting: Files not appearing in replication


+
+If you write files to /data/nfs/ on f0 but they don't appear on f1, check if the dataset is mounted on f0?
+
+ +
paul@f0:~ % doas zfs list -o name,mountpoint,mounted | grep nfsdata
+zdata/enc/nfsdata                             /data/nfs             yes
+
+
+If it shows no, the dataset isn't mounted! This means files are being written to the root filesystem, not ZFS. Next, we should check whether the encryption key is loaded:
+
+ +
paul@f0:~ % doas zfs get keystatus zdata/enc/nfsdata
+NAME               PROPERTY   VALUE        SOURCE
+zdata/enc/nfsdata  keystatus  available    -
+# If "unavailable", load the key:
+paul@f0:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
+paul@f0:~ % doas zfs mount zdata/enc/nfsdata
+
+
+You can also verify that files are in the snapshot (not just the directory):
+
+ +
paul@f0:~ % ls -la /data/nfs/.zfs/snapshot/zrepl_*/
+
+
+This issue commonly occurs after a reboot if the encryption keys aren't configured to load automatically.
+
+

Configuring automatic key loading on boot


+
+To ensure all additional encrypted datasets are mounted automatically after reboot as well, we do:
+
+ +
# On f0 - configure all encrypted datasets
+paul@f0:~ % doas sysrc zfskeys_enable=YES
+zfskeys_enable: YES -> YES
+paul@f0:~ % doas sysrc zfskeys_datasets="zdata/enc zdata/enc/nfsdata zroot/bhyve"
+zfskeys_datasets:  -> zdata/enc zdata/enc/nfsdata zroot/bhyve
+
+# Set correct key locations for all datasets
+paul@f0:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
+
+# On f1 - include the replicated dataset
+paul@f1:~ % doas sysrc zfskeys_enable=YES
+zfskeys_enable: YES -> YES
+paul@f1:~ % doas sysrc zfskeys_datasets="zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata"
+zfskeys_datasets:  -> zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata
+
+# Set key location for replicated dataset
+paul@f1:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
+
+
+Important notes:
+
+
    +
  • Each encryption root needs its own key load entry
  • +
  • The replicated dataset on f1 uses the same encryption key as the source on f0
  • +
  • Always verify datasets are mounted after reboot with zfs list -o name,mounted
  • +
  • Critical: Always ensure the replicated dataset on f1 remains read-only with doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
  • +

+

CARP (Common Address Redundancy Protocol)


+
+High availability is crucial for storage systems. If the storage server goes down, all NFS clients (which will also be Kubernetes pods later on in this series) lose access to their persistent data. CARP provides a solution by creating a virtual IP address that automatically migrates to a different server during failures. This means that clients point to that VIP for NFS mounts and are always contacting the current primary node.
+
+

How CARP Works


+
+In our case, CARP allows two hosts (f0 and f1) to share a virtual IP address (VIP). The hosts communicate using multicast to elect a MASTER, while the other remain as BACKUP. When the MASTER fails, the BACKUP automatically promotes itself, and the VIP is reassigned to the new MASTER. This happens within seconds.
+
+Key benefits for our storage system:
+
+
    +
  • Automatic failover: No manual intervention is required for basic failures, although there are a few limitations. The backup will have read-only access to the available data by default, as we have already learned.
  • +
  • Transparent to clients: Pods continue using the same IP address
  • +
  • Works with stunnel: Behind the VIP, there will be a stunnel process running, which ensures encrypted connections follow the active server.
  • +

+FreeBSD CARP
+Stunnel
+
+

Configuring CARP


+
+First, we add the CARP configuration to /etc/rc.conf on both f0 and f1:
+
+ +
# The virtual IP 192.168.1.138 will float between f0 and f1
+ifconfig_re0_alias0="inet vhid 1 pass testpass alias 192.168.1.138/32"
+
+
+Whereas:
+
+
    +
  • vhid 1: Virtual Host ID - must match on all CARP members
  • +
  • pass testpass: Password for CARP authentication (if you follow this, use a different password!)
  • +
  • alias 192.168.1.138/32: The virtual IP address with a /32 netmask
  • +

+Next, update /etc/hosts on all nodes (f0, f1, f2, r0, r1, r2) to resolve the VIP hostname:
+
+
+192.168.1.138 f3s-storage-ha f3s-storage-ha.lan f3s-storage-ha.lan.buetow.org
+
+
+This allows clients to connect to f3s-storage-ha regardless of which physical server is currently the MASTER.
+
+

CARP State Change Notifications


+
+To correctly manage services during failover, we need to detect CARP state changes. FreeBSD's devd system can notify us when CARP transitions between MASTER and BACKUP states.
+
+Add this to /etc/devd.conf on both f0 and f1:
+
+ +
paul@f0:~ % cat <<END | doas tee -a /etc/devd.conf
+notify 0 {
+        match "system"          "CARP";
+        match "subsystem"       "[0-9]+@[0-9a-z.]+";
+        match "type"            "(MASTER|BACKUP)";
+        action "/usr/local/bin/carpcontrol.sh $subsystem $type";
+};
+END
+
+paul@f0:~ % doas service devd restart
+
+
+Next, we create the CARP control script that will restart stunnel when the CARP state changes:
+
+ +
paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
+#!/bin/sh
+# CARP state change control script
+
+case "$1" in
+    MASTER)
+        logger "CARP state changed to MASTER, starting services"
+        ;;
+    BACKUP)
+        logger "CARP state changed to BACKUP, stopping services"
+        ;;
+    *)
+        logger "CARP state changed to $1 (unhandled)"
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+# Copy the same script to f1
+paul@f0:~ % scp /usr/local/bin/carpcontrol.sh f1:/tmp/
+paul@f1:~ % doas mv /tmp/carpcontrol.sh /usr/local/bin/
+paul@f1:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+
+Note that carpcontrol.sh doesn't do anything useful yet. We will provide more details (including starting and stopping services upon failover) later in this blog post.
+
+To enable CARP in /boot/loader.conf, run:
+
+ +
paul@f0:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf
+carp_load="YES"
+paul@f1:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf  
+carp_load="YES"
+
+
+Then reboot both hosts or run doas kldload carp to load the module immediately.
+
+

NFS Server Configuration


+
+With ZFS replication in place, we can now set up NFS servers on both f0 and f1 to export the replicated data. Since native NFS over TLS (RFC 9289) has compatibility issues between Linux and FreeBSD (not digging into the details here, but I couldn't get it to work), we'll use stunnel to provide encryption.
+
+

Setting up NFS on f0 (Primary)


+
+First, enable the NFS services in rc.conf:
+
+ +
paul@f0:~ % doas sysrc nfs_server_enable=YES
+nfs_server_enable: YES -> YES
+paul@f0:~ % doas sysrc nfsv4_server_enable=YES
+nfsv4_server_enable: YES -> YES
+paul@f0:~ % doas sysrc nfsuserd_enable=YES
+nfsuserd_enable: YES -> YES
+paul@f0:~ % doas sysrc mountd_enable=YES
+mountd_enable: NO -> YES
+paul@f0:~ % doas sysrc rpcbind_enable=YES
+rpcbind_enable: NO -> YES
+
+
+And we also create a dedicated directory for Kubernetes volumes:
+
+ +
# First, ensure the dataset is mounted
+paul@f0:~ % doas zfs get mounted zdata/enc/nfsdata
+NAME               PROPERTY  VALUE    SOURCE
+zdata/enc/nfsdata  mounted   yes      -
+
+# Create the k3svolumes directory
+paul@f0:~ % doas mkdir -p /data/nfs/k3svolumes
+paul@f0:~ % doas chmod 755 /data/nfs/k3svolumes
+
+
+We also create the /etc/exports file. Since we're using stunnel for encryption, ALL clients must connect through stunnel, which appears as localhost (127.0.0.1) to the NFS server:
+
+ +
paul@f0:~ % doas tee /etc/exports <<'EOF'
+V4: /data/nfs -sec=sys
+/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
+EOF
+
+
+The exports configuration:
+
+
    +
  • V4: /data/nfs -sec=sys: Sets the NFSv4 root directory to /data/nfs
  • +
  • -maproot=root: Maps root user from client to root on server
  • +
  • -network 127.0.0.1: Only accepts connections from localhost (stunnel)
  • +

+To start the NFS services, we run:
+
+ +
paul@f0:~ % doas service rpcbind start
+Starting rpcbind.
+paul@f0:~ % doas service mountd start
+Starting mountd.
+paul@f0:~ % doas service nfsd start
+Starting nfsd.
+paul@f0:~ % doas service nfsuserd start
+Starting nfsuserd.
+
+
+

Configuring Stunnel for NFS Encryption with CARP Failover


+
+Using stunnel with client certificate authentication for NFS encryption provides several advantages:
+
+
    +
  • Compatibility: Works with any NFS version and between different operating systems
  • +
  • Strong encryption: Uses TLS/SSL with configurable cipher suites
  • +
  • Transparent: Applications don't need modification, encryption happens at the transport layer
  • +
  • Performance: Minimal overhead (~2% in benchmarks)
  • +
  • Flexibility: Can encrypt any TCP-based protocol, not just NFS
  • +
  • Strong Authentication: Client certificates provide cryptographic proof of identity
  • +
  • Access Control: Only clients with valid certificates signed by your CA can connect
  • +
  • Certificate Revocation: You can revoke access by removing certificates from the CA
  • +

+Stunnel integrates seamlessly with our CARP setup:
+
+
+                    CARP VIP (192.168.1.138)
+                           |
+    f0 (MASTER) ←---------→|←---------→ f1 (BACKUP)
+    stunnel:2323           |           stunnel:stopped
+    nfsd:2049              |           nfsd:stopped
+                           |
+                    Clients connect here
+
+
+The key insight is that stunnel binds to the CARP VIP. When CARP fails over, the VIP is moved to the new master, and stunnel starts there automatically. Clients maintain their connection to the same IP throughout.
+
+

Creating a Certificate Authority for Client Authentication


+
+First, create a CA to sign both server and client certificates:
+
+ +
# On f0 - Create CA
+paul@f0:~ % doas mkdir -p /usr/local/etc/stunnel/ca
+paul@f0:~ % cd /usr/local/etc/stunnel/ca
+paul@f0:~ % doas openssl genrsa -out ca-key.pem 4096
+paul@f0:~ % doas openssl req -new -x509 -days 3650 -key ca-key.pem -out ca-cert.pem \
+  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=F3S Stunnel CA'
+
+# Create server certificate
+paul@f0:~ % cd /usr/local/etc/stunnel
+paul@f0:~ % doas openssl genrsa -out server-key.pem 4096
+paul@f0:~ % doas openssl req -new -key server-key.pem -out server.csr \
+  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=f3s-storage-ha.lan'
+paul@f0:~ % doas openssl x509 -req -days 3650 -in server.csr -CA ca/ca-cert.pem \
+  -CAkey ca/ca-key.pem -CAcreateserial -out server-cert.pem
+
+# Create client certificates for authorised clients
+paul@f0:~ % cd /usr/local/etc/stunnel/ca
+paul@f0:~ % doas sh -c 'for client in r0 r1 r2 earth; do 
+  openssl genrsa -out ${client}-key.pem 4096
+  openssl req -new -key ${client}-key.pem -out ${client}.csr \
+    -subj "/C=US/ST=State/L=City/O=F3S Storage/CN=${client}.lan.buetow.org"
+  openssl x509 -req -days 3650 -in ${client}.csr -CA ca-cert.pem \
+    -CAkey ca-key.pem -CAcreateserial -out ${client}-cert.pem
+done'
+
+
+

Install and Configure Stunnel on f0


+
+ +
# Install stunnel
+paul@f0:~ % doas pkg install -y stunnel
+
+# Configure stunnel server with client certificate authentication
+paul@f0:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
+cert = /usr/local/etc/stunnel/server-cert.pem
+key = /usr/local/etc/stunnel/server-key.pem
+
+setuid = stunnel
+setgid = stunnel
+
+[nfs-tls]
+accept = 192.168.1.138:2323
+connect = 127.0.0.1:2049
+CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
+verify = 2
+requireCert = yes
+EOF
+
+# Enable and start stunnel
+paul@f0:~ % doas sysrc stunnel_enable=YES
+stunnel_enable:  -> YES
+paul@f0:~ % doas service stunnel start
+Starting stunnel.
+
+# Restart stunnel to apply the CARP VIP binding
+paul@f0:~ % doas service stunnel restart
+Stopping stunnel.
+Starting stunnel.
+
+
+The configuration includes:
+
+
    +
  • verify = 2: Verify client certificate and fail if not provided
  • +
  • requireCert = yes: Client must present a valid certificate
  • +
  • CAfile: Path to the CA certificate that signed the client certificates
  • +

+

Setting up NFS on f1 (Standby)


+
+Repeat the same configuration on f1:
+
+ +
paul@f1:~ % doas sysrc nfs_server_enable=YES
+nfs_server_enable: NO -> YES
+paul@f1:~ % doas sysrc nfsv4_server_enable=YES
+nfsv4_server_enable: NO -> YES
+paul@f1:~ % doas sysrc nfsuserd_enable=YES
+nfsuserd_enable: NO -> YES
+paul@f1:~ % doas sysrc mountd_enable=YES
+mountd_enable: NO -> YES
+paul@f1:~ % doas sysrc rpcbind_enable=YES
+rpcbind_enable: NO -> YES
+
+paul@f1:~ % doas tee /etc/exports <<'EOF'
+V4: /data/nfs -sec=sys
+/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
+EOF
+
+paul@f1:~ % doas service rpcbind start
+Starting rpcbind.
+paul@f1:~ % doas service mountd start
+Starting mountd.
+paul@f1:~ % doas service nfsd start
+Starting nfsd.
+paul@f1:~ % doas service nfsuserd start
+Starting nfsuserd.
+
+
+And to configure stunnel on f1, we run:
+
+ +
# Install stunnel
+paul@f1:~ % doas pkg install -y stunnel
+
+# Copy certificates from f0
+paul@f0:~ % doas tar -cf /tmp/stunnel-certs.tar -C /usr/local/etc/stunnel server-cert.pem server-key.pem ca
+paul@f0:~ % scp /tmp/stunnel-certs.tar f1:/tmp/
+
+paul@f1:~ % cd /usr/local/etc/stunnel && doas tar -xf /tmp/stunnel-certs.tar
+
+# Configure stunnel server on f1 with client certificate authentication
+paul@f1:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
+cert = /usr/local/etc/stunnel/server-cert.pem
+key = /usr/local/etc/stunnel/server-key.pem
+
+setuid = stunnel
+setgid = stunnel
+
+[nfs-tls]
+accept = 192.168.1.138:2323
+connect = 127.0.0.1:2049
+CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
+verify = 2
+requireCert = yes
+EOF
+
+# Enable and start stunnel
+paul@f1:~ % doas sysrc stunnel_enable=YES
+stunnel_enable:  -> YES
+paul@f1:~ % doas service stunnel start
+Starting stunnel.
+
+# Restart stunnel to apply the CARP VIP binding
+paul@f1:~ % doas service stunnel restart
+Stopping stunnel.
+Starting stunnel.
+
+
+

CARP Control Script for Clean Failover


+
+With stunnel configured to bind to the CARP VIP (192.168.1.138), only the server that is currently the CARP MASTER will accept stunnel connections. This provides automatic failover for encrypted NFS:
+
+
    +
  • When f0 is CARP MASTER: stunnel on f0 accepts connections on 192.168.1.138:2323
  • +
  • When f1 becomes CARP MASTER: stunnel on f1 starts accepting connections on 192.168.1.138:2323
  • +
  • The backup server's stunnel process will fail to bind to the VIP and won't accept connections
  • +

+This ensures that clients always connect to the active NFS server through the CARP VIP. To ensure clean failover behaviour and prevent stale file handles, we'll update our carpcontrol.sh script so that:
+
+
    +
  • Stops NFS services on BACKUP nodes (preventing split-brain scenarios)
  • +
  • Starts NFS services only on the MASTER node
  • +
  • Manages stunnel binding to the CARP VIP
  • +

+This approach ensures clients can only connect to the active server, eliminating stale handles from the inactive server:
+
+ +
# Create CARP control script on both f0 and f1
+paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
+#!/bin/sh
+# CARP state change control script
+
+case "$1" in
+    MASTER)
+        logger "CARP state changed to MASTER, starting services"
+        service rpcbind start >/dev/null 2>&1
+        service mountd start >/dev/null 2>&1
+        service nfsd start >/dev/null 2>&1
+        service nfsuserd start >/dev/null 2>&1
+        service stunnel restart >/dev/null 2>&1
+        logger "CARP MASTER: NFS and stunnel services started"
+        ;;
+    BACKUP)
+        logger "CARP state changed to BACKUP, stopping services"
+        service stunnel stop >/dev/null 2>&1
+        service nfsd stop >/dev/null 2>&1
+        service mountd stop >/dev/null 2>&1
+        service nfsuserd stop >/dev/null 2>&1
+        logger "CARP BACKUP: NFS and stunnel services stopped"
+        ;;
+    *)
+        logger "CARP state changed to $1 (unhandled)"
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+
+

CARP Management Script


+
+To simplify CARP state management and failover testing, create this helper script on both f0 and f1:
+
+ +
# Create the CARP management script
+paul@f0:~ % doas tee /usr/local/bin/carp <<'EOF'
+#!/bin/sh
+# CARP state management script
+# Usage: carp [master|backup|auto-failback enable|auto-failback disable]
+# Without arguments: shows current state
+
+# Find the interface with CARP configured
+CARP_IF=$(ifconfig -l | xargs -n1 | while read if; do
+    ifconfig "$if" 2>/dev/null | grep -q "carp:" && echo "$if" && break
+done)
+
+if [ -z "$CARP_IF" ]; then
+    echo "Error: No CARP interface found"
+    exit 1
+fi
+
+# Get CARP VHID
+VHID=$(ifconfig "$CARP_IF" | grep "carp:" | sed -n 's/.*vhid \([0-9]*\).*/\1/p')
+
+if [ -z "$VHID" ]; then
+    echo "Error: Could not determine CARP VHID"
+    exit 1
+fi
+
+# Function to get the current state
+get_state() {
+    ifconfig "$CARP_IF" | grep "carp:" | awk '{print $2}'
+}
+
+# Check for auto-failback block file
+BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
+check_auto_failback() {
+    if [ -f "$BLOCK_FILE" ]; then
+        echo "WARNING: Auto-failback is DISABLED (file exists: $BLOCK_FILE)"
+    fi
+}
+
+# Main logic
+case "$1" in
+    "")
+        # No argument - show current state
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    master)
+        # Force to MASTER state
+        echo "Setting CARP to MASTER state..."
+        ifconfig "$CARP_IF" vhid "$VHID" state master
+        sleep 1
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    backup)
+        # Force to BACKUP state
+        echo "Setting CARP to BACKUP state..."
+        ifconfig "$CARP_IF" vhid "$VHID" state backup
+        sleep 1
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    auto-failback)
+        case "$2" in
+            enable)
+                if [ -f "$BLOCK_FILE" ]; then
+                    rm "$BLOCK_FILE"
+                    echo "Auto-failback ENABLED (removed $BLOCK_FILE)"
+                else
+                    echo "Auto-failback was already enabled"
+                fi
+                ;;
+            disable)
+                if [ ! -f "$BLOCK_FILE" ]; then
+                    touch "$BLOCK_FILE"
+                    echo "Auto-failback DISABLED (created $BLOCK_FILE)"
+                else
+                    echo "Auto-failback was already disabled"
+                fi
+                ;;
+            *)
+                echo "Usage: $0 auto-failback [enable|disable]"
+                echo "  enable:  Remove block file to allow automatic failback"
+                echo "  disable: Create block file to prevent automatic failback"
+                exit 1
+                ;;
+        esac
+        ;;
+    *)
+        echo "Usage: $0 [master|backup|auto-failback enable|auto-failback disable]"
+        echo "  Without arguments: show current CARP state"
+        echo "  master: force this node to become CARP MASTER"
+        echo "  backup: force this node to become CARP BACKUP"
+        echo "  auto-failback enable:  allow automatic failback to f0"
+        echo "  auto-failback disable: prevent automatic failback to f0"
+        exit 1
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carp
+
+# Copy to f1 as well
+paul@f0:~ % scp /usr/local/bin/carp f1:/tmp/
+paul@f1:~ % doas cp /tmp/carp /usr/local/bin/carp && doas chmod +x /usr/local/bin/carp
+
+
+Now you can easily manage CARP states and auto-failback:
+
+ +
# Check current CARP state
+paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+
+# If auto-failback is disabled, you'll see a warning
+paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+WARNING: Auto-failback is DISABLED (file exists: /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+# Force f0 to become BACKUP (triggers failover to f1)
+paul@f0:~ % doas carp backup
+Setting CARP to BACKUP state...
+CARP state on re0 (vhid 1): BACKUP
+
+# Disable auto-failback (useful for maintenance)
+paul@f0:~ % doas carp auto-failback disable
+Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+# Enable auto-failback
+paul@f0:~ % doas carp auto-failback enable
+Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+

Automatic Failback After Reboot


+
+When f0 reboots (planned or unplanned), f1 takes over as CARP MASTER. To ensure f0 automatically reclaims its primary role once it's fully operational, we'll implement an automatic failback mechanism. With:
+
+ +
paul@f0:~ % doas tee /usr/local/bin/carp-auto-failback.sh <<'EOF'
+#!/bin/sh
+# CARP automatic failback script for f0
+# Ensures f0 reclaims MASTER role after reboot when storage is ready
+
+LOGFILE="/var/log/carp-auto-failback.log"
+MARKER_FILE="/data/nfs/nfs.DO_NOT_REMOVE"
+BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
+
+log_message() {
+    echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOGFILE"
+}
+
+# Check if we're already MASTER
+CURRENT_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
+if [ "$CURRENT_STATE" = "MASTER" ]; then
+    exit 0
+fi
+
+# Check if /data/nfs is mounted
+if ! mount | grep -q "on /data/nfs "; then
+    log_message "SKIP: /data/nfs not mounted"
+    exit 0
+fi
+
+# Check if the marker file exists (identifies that the ZFS data set is properly mounted)
+if [ ! -f "$MARKER_FILE" ]; then
+    log_message "SKIP: Marker file $MARKER_FILE not found"
+    exit 0
+fi
+
+# Check if failback is blocked (for maintenance)
+if [ -f "$BLOCK_FILE" ]; then
+    log_message "SKIP: Failback blocked by $BLOCK_FILE"
+    exit 0
+fi
+
+# Check if NFS services are running (ensure we're fully ready)
+if ! service nfsd status >/dev/null 2>&1; then
+    log_message "SKIP: NFS services not yet running"
+    exit 0
+fi
+
+# All conditions met - promote to MASTER
+log_message "CONDITIONS MET: Promoting to MASTER (was $CURRENT_STATE)"
+/usr/local/bin/carp master
+
+# Log result
+sleep 2
+NEW_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
+log_message "Failback complete: State is now $NEW_STATE"
+
+# If successful, log to the system log too
+if [ "$NEW_STATE" = "MASTER" ]; then
+    logger "CARP: f0 automatically reclaimed MASTER role"
+fi
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carp-auto-failback.sh
+
+
+The marker file identifies that the ZFS data set is mounted correctly. We create it with:
+
+ +
paul@f0:~ % doas touch /data/nfs/nfs.DO_NOT_REMOVE
+
+
+We add a cron job to check every minute:
+
+ +
paul@f0:~ % echo "* * * * * /usr/local/bin/carp-auto-failback.sh" | doas crontab -
+
+
+The enhanced CARP script provides integrated control over auto-failback. To temporarily turn off automatic failback (e.g., for f0 maintenance), we run:
+
+ +
paul@f0:~ % doas carp auto-failback disable
+Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+And to re-enable it:
+
+ +
paul@f0:~ % doas carp auto-failback enable
+Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+To check whether auto-failback is enabled, we run:
+
+ +
paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+# If disabled, you'll see: WARNING: Auto-failback is DISABLED
+
+
+The failback attempts are logged to /var/log/carp-auto-failback.log!
+
+So, in summary:
+
+
    +
  • After f0 reboots: f1 is MASTER, f0 boots as BACKUP
  • +
  • Cron runs every minute: Checks if conditions are met (Is f0 currently BACKUP? (don't run if already MASTER)), (Is /data/nfs mounted? (ZFS datasets are ready)), (Does marker file exist? (confirms this is primary storage)), (Is failback blocked? (admin can prevent failback)), (Are NFS services running? (system is fully ready))
  • +
  • Failback occurs: Typically 2-3 minutes after boot completes
  • +
  • Logging: All attempts logged for troubleshooting
  • +

+This ensures f0 automatically resumes its role as primary storage server after any reboot, while providing administrative control when needed.
+
+

Client Configuration for Stunnel


+
+To mount NFS shares with stunnel encryption, clients must install and configure stunnel using their client certificates.
+
+

Configuring Rocky Linux Clients (r0, r1, r2)


+
+On the Rocky Linux VMs, we run:
+
+ +
# Install stunnel on client (example for `r0`)
+[root@r0 ~]# dnf install -y stunnel nfs-utils
+
+# Copy client certificate and CA certificate from f0
+[root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/r0-key.pem /etc/stunnel/
+[root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/ca-cert.pem /etc/stunnel/
+
+# Configure stunnel client with certificate authentication
+[root@r0 ~]# tee /etc/stunnel/stunnel.conf <<'EOF'
+cert = /etc/stunnel/r0-key.pem
+CAfile = /etc/stunnel/ca-cert.pem
+client = yes
+verify = 2
+
+[nfs-ha]
+accept = 127.0.0.1:2323
+connect = 192.168.1.138:2323
+EOF
+
+# Enable and start stunnel
+[root@r0 ~]# systemctl enable --now stunnel
+
+# Repeat for r1 and r2 with their respective certificates
+
+
+Note: Each client must use its certificate file (r0-key.pem, r1-key.pem, r2-key.pem, or earth-key.pem - the latter is for my Laptop, which can also mount the NFS shares).
+
+

Testing NFS Mount with Stunnel


+
+To mount NFS through the stunnel encrypted tunnel, we run:
+
+ +
# Create a mount point
+[root@r0 ~]# mkdir -p /data/nfs/k3svolumes
+
+# Mount through stunnel (using localhost and NFSv4)
+[root@r0 ~]# mount -t nfs4 -o port=2323 127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes
+
+# Verify mount
+[root@r0 ~]# mount | grep k3svolumes
+127.0.0.1:/data/nfs/k3svolumes on /data/nfs/k3svolumes type nfs4 (rw,relatime,vers=4.2,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,port=2323,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1)
+
+# For persistent mount, add to /etc/fstab:
+127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,_netdev 0 0
+
+
+Note: The mount uses localhost (127.0.0.1) because stunnel is listening locally and forwarding the encrypted traffic to the remote server.
+
+

Testing CARP Failover with mounted clients and stale file handles:


+
+To test the failover process:
+
+ +
# On f0 (current MASTER) - trigger failover
+paul@f0:~ % doas ifconfig re0 vhid 1 state backup
+
+# On f1 - verify it becomes MASTER
+paul@f1:~ % ifconfig re0 | grep carp
+    inet 192.168.1.138 netmask 0xffffffff broadcast 192.168.1.138 vhid 1
+
+# Check stunnel is now listening on f1
+paul@f1:~ % doas sockstat -l | grep 2323
+stunnel  stunnel    4567  3  tcp4   192.168.1.138:2323    *:*
+
+# On client - verify NFS mount still works
+[root@r0 ~]# ls /data/nfs/k3svolumes/
+[root@r0 ~]# echo "Test after failover" > /data/nfs/k3svolumes/failover-test.txt
+
+
+After a CARP failover, NFS clients may experience "Stale file handle" errors because they cached file handles from the previous server. To resolve this manually, we can run:
+
+ +
# Force unmount and remount
+[root@r0 ~]# umount -f /data/nfs/k3svolumes
+[root@r0 ~]# mount /data/nfs/k3svolumes
+
+
+For the automatic recovery, we create a script:
+
+ +
[root@r0 ~]# cat > /usr/local/bin/check-nfs-mount.sh << 'EOF'
+#!/bin/bash
+# Fast NFS mount health monitor - runs every 10 seconds via systemd timer
+
+MOUNT_POINT="/data/nfs/k3svolumes"
+LOCK_FILE="/var/run/nfs-mount-check.lock"
+STATE_FILE="/var/run/nfs-mount.state"
+
+# Use a lock file to prevent concurrent runs
+if [ -f "$LOCK_FILE" ]; then
+    exit 0
+fi
+touch "$LOCK_FILE"
+trap "rm -f $LOCK_FILE" EXIT
+
+# Quick check - try to stat a directory with a very short timeout
+if timeout 2s stat "$MOUNT_POINT" >/dev/null 2>&1; then
+    # Mount appears healthy
+    if [ -f "$STATE_FILE" ]; then
+        # Was previously unhealthy, log recovery
+        echo "NFS mount recovered at $(date)" | systemd-cat -t nfs-monitor -p info
+        rm -f "$STATE_FILE"
+    fi
+    exit 0
+fi
+
+# Mount is unhealthy
+if [ ! -f "$STATE_FILE" ]; then
+    # First detection of unhealthy state
+    echo "NFS mount unhealthy detected at $(date)" | systemd-cat -t nfs-monitor -p warning
+    touch "$STATE_FILE"
+fi
+
+# Try to fix
+echo "Attempting to fix stale NFS mount at $(date)" | systemd-cat -t nfs-monitor -p notice
+umount -f "$MOUNT_POINT" 2>/dev/null
+sleep 1
+
+if mount "$MOUNT_POINT"; then
+    echo "NFS mount fixed at $(date)" | systemd-cat -t nfs-monitor -p info
+    rm -f "$STATE_FILE"
+else
+    echo "Failed to fix NFS mount at $(date)" | systemd-cat -t nfs-monitor -p err
+fi
+EOF
+[root@r0 ~]# chmod +x /usr/local/bin/check-nfs-mount.sh
+
+
+And we create the systemd service as follows:
+
+ +
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.service << 'EOF'
+[Unit]
+Description=NFS Mount Health Monitor
+After=network-online.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/check-nfs-mount.sh
+StandardOutput=journal
+StandardError=journal
+EOF
+
+
+And we also create the systemd timer (runs every 10 seconds):
+
+ +
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.timer << 'EOF'
+[Unit]
+Description=Run NFS Mount Health Monitor every 10 seconds
+Requires=nfs-mount-monitor.service
+
+[Timer]
+OnBootSec=30s
+OnUnitActiveSec=10s
+AccuracySec=1s
+
+[Install]
+WantedBy=timers.target
+EOF
+
+
+To enable and start the timer, we run:
+
+ +
[root@r0 ~]# systemctl daemon-reload
+[root@r0 ~]# systemctl enable nfs-mount-monitor.timer
+[root@r0 ~]# systemctl start nfs-mount-monitor.timer
+
+# Check status
+[root@r0 ~]# systemctl status nfs-mount-monitor.timer
+● nfs-mount-monitor.timer - Run NFS Mount Health Monitor every 10 seconds
+     Loaded: loaded (/etc/systemd/system/nfs-mount-monitor.timer; enabled)
+     Active: active (waiting) since Sat 2025-07-06 10:00:00 EEST
+    Trigger: Sat 2025-07-06 10:00:10 EEST; 8s left
+
+# Monitor logs
+[root@r0 ~]# journalctl -u nfs-mount-monitor -f
+
+
+Note: Stale file handles are inherent to NFS failover because file handles are server-specific. The best approach depends on your application's tolerance for brief disruptions. Of course, all the changes made to r0 above must also be applied to r1 and r2.
+
+

Complete Failover Test


+
+Here's a comprehensive test of the failover behaviour with all optimisations in place:
+
+ +
# 1. Check the initial state
+paul@f0:~ % ifconfig re0 | grep carp
+    carp: MASTER vhid 1 advbase 1 advskew 0
+paul@f1:~ % ifconfig re0 | grep carp
+    carp: BACKUP vhid 1 advbase 1 advskew 0
+
+# 2. Create a test file from a client
+[root@r0 ~]# echo "test before failover" > /data/nfs/k3svolumes/test-before.txt
+
+# 3. Trigger failover (f0 → f1)
+paul@f0:~ % doas ifconfig re0 vhid 1 state backup
+
+# 4. Monitor client behaviour
+[root@r0 ~]# ls /data/nfs/k3svolumes/
+ls: cannot access '/data/nfs/k3svolumes/': Stale file handle
+
+# 5. Check automatic recovery (within 10 seconds)
+[root@r0 ~]# journalctl -u nfs-mount-monitor -f
+Jul 06 10:15:32 r0 nfs-monitor[1234]: NFS mount unhealthy detected at Sun Jul 6 10:15:32 EEST 2025
+Jul 06 10:15:32 r0 nfs-monitor[1234]: Attempting to fix stale NFS mount at Sun Jul 6 10:15:32 EEST 2025
+Jul 06 10:15:33 r0 nfs-monitor[1234]: NFS mount fixed at Sun Jul 6 10:15:33 EEST 2025
+
+
+Failover Timeline:
+
+
    +
  • 0 seconds: CARP failover triggered
  • +
  • 0-2 seconds: Clients get "Stale file handle" errors (not hanging)
  • +
  • 3-10 seconds: Soft mounts ensure quick failure of operations
  • +
  • Within 10 seconds: Automatic recovery via systemd timer
  • +

+Benefits of the Optimised Setup:
+
+
    +
  • No hanging processes - Soft mounts fail quickly
  • +
  • Clean failover - Old server stops serving immediately
  • +
  • Fast automatic recovery - No manual intervention needed
  • +
  • Predictable timing - Recovery within 10 seconds with systemd timer
  • +
  • Better visibility - systemd journal provides detailed logs
  • +

+Important Considerations:
+
+
    +
  • Recent writes (within 1 minute) may not be visible after failover due to replication lag
  • +
  • Applications should handle brief NFS errors gracefully
  • +
  • For zero-downtime requirements, consider synchronous replication or distributed storage (see "Future storage explorations" section later in this blog post)
  • +

+

Conclusion


+
+We've built a robust, encrypted storage system for our FreeBSD-based Kubernetes cluster that provides:
+
+
    +
  • High Availability: CARP ensures the storage VIP moves automatically during failures
  • +
  • Data Protection: ZFS encryption protects data at rest, stunnel protects data in transit
  • +
  • Continuous Replication: 1-minute RPO for the data, automated via zrepl
  • +
  • Secure Access: Client certificate authentication prevents unauthorised access
  • +

+Some key lessons learned are:
+
+
    +
  • Stunnel vs Native NFS/TLS: While native encryption would be ideal, stunnel provides better cross-platform compatibility
  • +
  • Manual vs Automatic Failover: For storage systems, controlled failover often prevents more problems than it causes
  • +
  • Client Compatibility: Different NFS implementations behave differently - test thoroughly
  • +

+

Future Storage Explorations


+
+While zrepl provides excellent snapshot-based replication for disaster recovery, there are other storage technologies worth exploring for the f3s project:
+
+

MinIO for S3-Compatible Object Storage


+
+MinIO is a high-performance, S3-compatible object storage system that could complement our ZFS-based storage. Some potential use cases:
+
+
    +
  • S3 API compatibility: Many modern applications expect S3-style object storage APIs. MinIO could provide this interface while using our ZFS storage as the backend.
  • +
  • Multi-site replication: MinIO supports active-active replication across multiple sites, which could work well with our f0/f1/f2 node setup.
  • +
  • Kubernetes native: MinIO has excellent Kubernetes integration with operators and CSI drivers, making it ideal for the f3s k3s environment.
  • +

+

MooseFS for Distributed High Availability


+
+MooseFS is a fault-tolerant, distributed file system that could provide proper high-availability storage:
+
+
    +
  • True HA: Unlike our current setup, which requires manual failover, MooseFS provides automatic failover with no single point of failure.
  • +
  • POSIX compliance: Applications can use MooseFS like any regular filesystem, no code changes needed.
  • +
  • Flexible redundancy: Configure different replication levels per directory or file, optimising storage efficiency.
  • +
  • FreeBSD support: MooseFS has native FreeBSD support, making it a natural fit for the f3s project.
  • +

+Both technologies could run on top of our encrypted ZFS volumes, combining ZFS's data integrity and encryption features with distributed storage capabilities. This would be particularly interesting for workloads that need either S3-compatible APIs (MinIO) or transparent distributed POSIX storage (MooseFS).
+
+I'm looking forward to the next post in this series, where we will set up k3s (Kubernetes) on the Linux VMs.
+
+Other *BSD-related posts:
+
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage (You are currently reading this)
+2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
+2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
+2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
+2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
+2024-04-01 KISS high-availability with OpenBSD
+2024-01-13 One reason why I love OpenBSD
+2022-10-30 Installing DTail on OpenBSD
+2022-07-30 Let's Encrypt with OpenBSD and Rex
+2016-04-09 Jails and ZFS with Puppet on FreeBSD
+
+E-Mail your comments to paul@nospam.buetow.org
+
+Back to the main site
+ + + diff --git a/gemfeed/DRAFT-f3s-kubernetes-with-freebsd-part-6.html b/gemfeed/DRAFT-f3s-kubernetes-with-freebsd-part-6.html deleted file mode 100644 index 44e4e743..00000000 --- a/gemfeed/DRAFT-f3s-kubernetes-with-freebsd-part-6.html +++ /dev/null @@ -1,3028 +0,0 @@ - - - - -f3s: Kubernetes with FreeBSD - Part 6: Storage - - - - - -

-Home | Markdown | Gemini -

-

f3s: Kubernetes with FreeBSD - Part 6: Storage


-
-Published at 2025-04-04T23:21:01+03:00
-
-This is the sixth blog post about the f3s series for self-hosting demands in a home lab. f3s? The "f" stands for FreeBSD, and the "3s" stands for k3s, the Kubernetes distribution used on FreeBSD-based physical machines.
-
-2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
-2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
-2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
-2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
-2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
-
-f3s logo
-
-

Table of Contents


-
-
-

Introduction


-
-In the previous posts, we set up a FreeBSD-based Kubernetes cluster using k3s. While the base system works well, Kubernetes workloads often require persistent storage for databases, configuration files, and application data. Local storage on each node has significant limitations:
-
-
    -
  • No data sharing: Pods on different nodes can't access the same data
  • -
  • Pod mobility: If a pod moves to another node, it loses access to its data
  • -
  • No redundancy: Hardware failure means data loss
  • -

-This post implements a robust storage solution using:
-
-
    -
  • CARP: For high availability with automatic IP failover
  • -
  • NFS over stunnel: For secure, encrypted network storage
  • -
  • ZFS: For data integrity, encryption, and efficient snapshots
  • -
  • zrepl: For continuous ZFS replication between nodes
  • -

-The end result is a highly available, encrypted storage system that survives node failures while providing shared storage to all Kubernetes pods.
-
-

Additional storage capacity


-
-We add to each of the nodes (f0, f1, f2) additional 1TB storage in form of an SSD drive. The Beelink mini PCs have enough space in the chassis for the additional space.
-
-
-
-Upgrading the storage was as easy as unscrewing, plugging the drive in, and then screwing it together again. So the procedure was pretty uneventful! We're using two different SSD models (Samsung 870 EVO and Crucial BX500) to avoid simultaneous failures from the same manufacturing batch.
-
-We then create the zdata ZFS pool on all three nodes:
-
- -
paul@f0:~ % doas zpool create -m /data zdata /dev/ada1
-paul@f0:~ % zpool list
-NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
-zdata   928G  12.1M   928G        -         -     0%     0%  1.00x    ONLINE  -
-zroot   472G  29.0G   443G        -         -     0%     6%  1.00x    ONLINE  -
-
-paul@f0:/ % doas camcontrol devlist
-<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
-<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
-paul@f0:/ %
-
-
-To verify that we have a different SSD on the second node (the third node has the same drive as the first):
-
- -
paul@f1:/ % doas camcontrol devlist
-<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
-<CT1000BX500SSD1 M6CR072>          at scbus1 target 0 lun 0 (pass1,ada1)
-
-
-

ZFS encryption keys


-
-ZFS native encryption requires encryption keys to unlock datasets. We need a secure method to store these keys that balances security with operational needs:
-
-
    -
  • Security: Keys must not be stored on the same disks they encrypt
  • -
  • Availability: Keys must be available at boot for automatic mounting
  • -
  • Portability: Keys should be easily moved between systems for recovery
  • -

-Using USB flash drives as hardware key storage provides an elegant solution. The encrypted data is unreadable without physical access to the USB key, protecting against disk theft or improper disposal. In production environments, you might use enterprise key management systems, but for a home lab, USB keys offer good security with minimal complexity.
-
-

UFS on USB keys


-
-We'll format the USB drives with UFS (Unix File System) rather than ZFS for simplicity. There is no need to use ZFS.
-
-Let's see the USB keys:
-
-USB keys
-
-To verify, that the USB key (flash disk) is there:
-
-
-paul@f0:/ % doas camcontrol devlist
-<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
-<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
-<Generic Flash Disk 8.07>          at scbus2 target 0 lun 0 (da0,pass2)
-paul@f0:/ %
-
-
-Let's create the UFS file system and mount it (done on all 3 nodes f0, f1 and f2):
-
- -
paul@f0:/ % doas newfs /dev/da0
-/dev/da0: 15000.0MB (30720000 sectors) block size 32768, fragment size 4096
-        using 24 cylinder groups of 625.22MB, 20007 blks, 80128 inodes.
-        with soft updates
-super-block backups (for fsck_ffs -b #) at:
- 192, 1280640, 2561088, 3841536, 5121984, 6402432, 7682880, 8963328, 10243776,
-11524224, 12804672, 14085120, 15365568, 16646016, 17926464, 19206912,k 20487360,
-...
-
-paul@f0:/ % echo '/dev/da0 /keys ufs rw 0 2' | doas tee -a /etc/fstab
-/dev/da0 /keys ufs rw 0 2
-paul@f0:/ % doas mkdir /keys
-paul@f0:/ % doas mount /keys
-paul@f0:/ % df | grep keys
-/dev/da0             14877596       8  13687384     0%    /keys
-
-
-USB keys sticked in
-
-

Generating encryption keys


-
-The following keys will later be used to encrypt the ZFS file systems. They will be stored on all three nodes, serving as a backup in case one of the keys is lost. When we later replicate encrypted ZFS volumes from one node to another, the keys must also be available on the destination node.
-
-
-paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:bhyve.key 32
-paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:bhyve.key 32
-paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:bhyve.key 32
-paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:zdata.key 32
-paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:zdata.key 32
-paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:zdata.key 32
-paul@f0:/keys % doas chown root *
-paul@f0:/keys % doas chmod 400 *
-
-paul@f0:/keys % ls -l
-total 20
-*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:bhyve.key
-*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:bhyve.key
-*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:bhyve.key
-*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:zdata.key
-*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:zdata.key
-*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:zdata.key
-
-
-After creation, these are copied to the other two nodes, f1 and f2, into the /keys partition (I won't provide the commands here; just create a tarball, copy it over, and extract it on the destination nodes).
-
-

Configuring zdata ZFS pool encryption


-
-Let's encrypt our zdata ZFS pool. Actually, we are not encrypting the whole pool, but everythig within the zdata/enc data set:
-
- -
paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o keylocation=file:///keys/`hostname`:zdata.key zdata/enc
-paul@f0:/ % zfs list | grep zdata
-zdata                                          836K   899G    96K  /data
-zdata/enc                                      200K   899G   200K  /data/enc
-
-paul@f0:/keys % zfs get all zdata/enc | grep -E -i '(encryption|key)'
-zdata/enc  encryption            aes-256-gcm                               -
-zdata/enc  keylocation           file:///keys/f0.lan.buetow.org:zdata.key  local
-zdata/enc  keyformat             raw                                       -
-zdata/enc  encryptionroot        zdata/enc                                 -
-zdata/enc  keystatus             available                                 -
-
-
-All future data sets within zdata/enc will inherit the same encription key.
-
-

Migrating Bhyve VMs to encrypted bhyve ZFS volume


-
-We set up Bhyve VMs in one of the previous blog posts. Their ZFS data sets rely on zroot, which is the default ZFS pool on the internal 512GB NVME drive. They aren't encrypted yet, so we encrypt the VM data sets as well now. To do so, we first shut down the VMs on all 3 nodes:
-
- -
paul@f0:/keys % doas vm stop rocky
-Sending ACPI shutdown to rocky
-
-paul@f0:/keys % doas vm list
-NAME     DATASTORE  LOADER     CPU  MEMORY  VNC  AUTO     STATE
-rocky    default    uefi       4    14G     -    Yes [1]  Stopped
-
-
-After this, we rename the unencrypted data set to _old and create a new encrypted data set and we also snapshot it as @hamburger!
-
- -
paul@f0:/keys % doas zfs rename zroot/bhyve zroot/bhyve_old
-paul@f0:/keys % doas zfs set mountpoint=/mnt zroot/bhyve_old
-paul@f0:/keys % doas zfs snapshot zroot/bhyve_old/rocky@hamburger
-
-paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \
-  keylocation=file:///keys/`hostname`:bhyve.key zroot/bhyve
-paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve zroot/bhyve
-paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve/rocky zroot/bhyve/rocky
-
-
-Once done, we import the snapshot into the encrypted dataset and also copy some other metadata files from vm-bhyve back over.
-
-
-paul@f0:/keys % doas zfs send zroot/bhyve_old/rocky@hamburger | \
-  doas zfs recv zroot/bhyve/rocky
-paul@f0:/keys % doas cp -Rp /mnt/.config /zroot/bhyve/
-paul@f0:/keys % doas cp -Rp /mnt/.img /zroot/bhyve/
-paul@f0:/keys % doas cp -Rp /mnt/.templates /zroot/bhyve/
-paul@f0:/keys % doas cp -Rp /mnt/.iso /zroot/bhyve/
-
-
-We also have to make encrypted ZFS data sets mount automatically on boot:
-
- -
paul@f0:/keys % doas sysrc zfskeys_enable=YES
-zfskeys_enable:  -> YES
-paul@f0:/keys % doas vm init
-paul@f0:/keys % doas reboot
-.
-.
-.
-paul@f0:~ % doas vm list
-paul@f0:~ % doas vm list
-NAME     DATASTORE  LOADER     CPU  MEMORY  VNC           AUTO     STATE
-rocky    default    uefi       4    14G     0.0.0.0:5900  Yes [1]  Running (2265)
-
-
-As you can see, the VM is running. This means the encrypted zroot/bhyve was mounted successfully after the reboot! Now we can destroy the old, unencrypted, and now unused bhyve dataset:
-
- -
paul@f0:~ % doas zfs destroy -R zroot/bhyve_old
-
-
-To verify once again that zroot/bhyve and zroot/bhyve/rocky are now both encrypted, we run:
-
- -
paul@f0:~ % zfs get all zroot/bhyve | grep -E '(encryption|key)'
-zroot/bhyve  encryption            aes-256-gcm                               -
-zroot/bhyve  keylocation           file:///keys/f0.lan.buetow.org:bhyve.key  local
-zroot/bhyve  keyformat             raw                                       -
-zroot/bhyve  encryptionroot        zroot/bhyve                               -
-zroot/bhyve  keystatus             available                                 -
-
-paul@f0:~ % zfs get all zroot/bhyve/rocky | grep -E '(encryption|key)'
-zroot/bhyve/rocky  encryption            aes-256-gcm            -
-zroot/bhyve/rocky  keylocation           none                   default
-zroot/bhyve/rocky  keyformat             raw                    -
-zroot/bhyve/rocky  encryptionroot        zroot/bhyve            -
-zroot/bhyve/rocky  keystatus             available              -
-
-
-

ZFS Replication with zrepl


-
-Data replication is the cornerstone of high availability. While CARP handles IP failover (see later in this post), we need continuous data replication to ensure the backup server has current data when it becomes active. Without replication, failover would result in data loss or require shared storage (like iSCSI), which introduces a single point of failure.
-
-

Understanding Replication Requirements


-
-Our storage system has different replication needs:
-
-
    -
  • NFS data (/data/nfs/k3svolumes): Contains active Kubernetes persistent volumes. Needs frequent replication (every minute) to minimize data loss during failover.
  • -
  • VM data (/zroot/bhyve/fedora): Contains VM images that change less frequently. Can tolerate longer replication intervals (every 10 minutes).
  • -

-The replication frequency determines your Recovery Point Objective (RPO) - the maximum acceptable data loss. With 1-minute replication, you lose at most 1 minute of changes during an unplanned failover.
-
-

Why zrepl instead of HAST?


-
-While HAST (Highly Available Storage) is FreeBSD's native solution for high-availability storage, I've chosen zrepl for several important reasons:
-
-
    -
  • HAST can cause ZFS corruption: HAST operates at the block level and doesn't understand ZFS's transactional semantics. During failover, in-flight transactions can lead to corrupted zpools. I've experienced this firsthand - the automatic failover would trigger while ZFS was still writing, resulting in an unmountable pool.
  • -
  • ZFS-aware replication: zrepl understands ZFS datasets and snapshots. It replicates at the dataset level, ensuring each snapshot is a consistent point-in-time copy. This is fundamentally safer than block-level replication.
  • -
  • Snapshot history: With zrepl, you get multiple recovery points (every minute for NFS data in our setup). If corruption occurs, you can roll back to any previous snapshot. HAST only gives you the current state.
  • -
  • Easier recovery: When something goes wrong with zrepl, you still have intact snapshots on both sides. With HAST, a corrupted primary often means a corrupted secondary too.
  • -

-The 1-minute replication window is perfectly acceptable for my personal use cases. This isn't a high-frequency trading system or a real-time database—it's storage for personal projects, development work, and home lab experiments. Losing at most 1 minute of work in a disaster scenario is a reasonable trade-off for the reliability and simplicity of snapshot-based replication. Also, in the case of "1 minute of data loss," I would very likely still have the data available on the client side.
-
-

Installing zrepl


-
-First, install zrepl on both hosts involved (we will replicate data from f0 to f1):
-
- -
paul@f0:~ % doas pkg install -y zrepl
-
-
-

Checking ZFS pools


-
-Verify the pools and datasets on both hosts:
-
- -
# On f0
-paul@f0:~ % doas zpool list
-NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
-zdata   928G  1.03M   928G        -         -     0%     0%  1.00x    ONLINE  -
-zroot   472G  26.7G   445G        -         -     0%     5%  1.00x    ONLINE  -
-
-paul@f0:~ % doas zfs list -r zdata/enc
-NAME        USED  AVAIL  REFER  MOUNTPOINT
-zdata/enc   200K   899G   200K  /data/enc
-
-# On f1
-paul@f1:~ % doas zpool list
-NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
-zdata   928G   956K   928G        -         -     0%     0%  1.00x    ONLINE  -
-zroot   472G  11.7G   460G        -         -     0%     2%  1.00x    ONLINE  -
-
-paul@f1:~ % doas zfs list -r zdata/enc
-NAME        USED  AVAIL  REFER  MOUNTPOINT
-zdata/enc   200K   899G   200K  /data/enc
-
-
-

Configuring zrepl with WireGuard tunnel


-
-Since we have a WireGuard tunnel between f0 and f1, we'll use TCP transport over the secure tunnel instead of SSH. First, check the WireGuard IP addresses:
-
- -
# Check WireGuard interface IPs
-paul@f0:~ % ifconfig wg0 | grep inet
-	inet 192.168.2.130 netmask 0xffffff00
-
-paul@f1:~ % ifconfig wg0 | grep inet
-	inet 192.168.2.131 netmask 0xffffff00
-
-
-

Configuring zrepl on f0 (source)


-
-First, create a dedicated dataset for NFS data that will be replicated:
-
- -
# Create the nfsdata dataset that will hold all data exposed via NFS
-paul@f0:~ % doas zfs create zdata/enc/nfsdata
-
-
-Create the zrepl configuration on f0:
-
- -
paul@f0:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
-global:
-  logging:
-    - type: stdout
-      level: info
-      format: human
-
-jobs:
-  - name: f0_to_f1_nfsdata
-    type: push
-    connect:
-      type: tcp
-      address: "192.168.2.131:8888"
-    filesystems:
-      "zdata/enc/nfsdata": true
-    send:
-      encrypted: true
-    snapshotting:
-      type: periodic
-      prefix: zrepl_
-      interval: 1m
-    pruning:
-      keep_sender:
-        - type: last_n
-          count: 10
-      keep_receiver:
-        - type: last_n
-          count: 10
-
-  - name: f0_to_f1_fedora
-    type: push
-    connect:
-      type: tcp
-      address: "192.168.2.131:8888"
-    filesystems:
-      "zroot/bhyve/fedora": true
-    send:
-      encrypted: true
-    snapshotting:
-      type: periodic
-      prefix: zrepl_
-      interval: 10m
-    pruning:
-      keep_sender:
-        - type: last_n
-          count: 10
-      keep_receiver:
-        - type: last_n
-          count: 10
-EOF
-
-
- We're using two separate replication jobs with different intervals:
-
-
    -
  • f0_to_f1_nfsdata: Replicates NFS data every minute for faster failover recovery
  • -
  • f0_to_f1_fedora: Replicates Fedora VM every 10 minutes (less critical for NFS operations)
  • -

-The Fedora is only used for development purposes, so it doesn't require as frequent replication as the NFS data. It's off-topic to this blog series, but it showcases, hows zrepl's flexibility in handling different datasets with varying replication needs.
-
-Furthermore:
-
-
    -
  • We're specifically replicating zdata/enc/nfsdata instead of the entire zdata/enc dataset. This dedicated dataset will contain all the data we later want to expose via NFS, keeping a clear separation between replicated NFS data and other local encrypted data.
  • -
  • The send: encrypted: false option disables ZFS native encryption for the replication stream. Since we're using a WireGuard tunnel between f0 and f1, the data is already encrypted in transit. Disabling ZFS stream encryption reduces CPU overhead and improves replication performance.
  • -

-

Configuring zrepl on f1 (sink)


-
-On f1 we configure zrepl to receive the data as follows:
-
- -
# First create a dedicated sink dataset
-paul@f1:~ % doas zfs create zdata/sink
-
-paul@f1:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
-global:
-  logging:
-    - type: stdout
-      level: info
-      format: human
-
-jobs:
-  - name: sink
-    type: sink
-    serve:
-      type: tcp
-      listen: "192.168.2.131:8888"
-      clients:
-        "192.168.2.130": "f0"
-    recv:
-      placeholder:
-        encryption: inherit
-    root_fs: "zdata/sink"
-EOF
-
-
-

Enabling and starting zrepl services


-
-Enable and start zrepl on both hosts:
-
- -
# On f0
-paul@f0:~ % doas sysrc zrepl_enable=YES
-zrepl_enable:  -> YES
-paul@f0:~ % doas service `zrepl` start
-Starting zrepl.
-
-# On f1
-paul@f1:~ % doas sysrc zrepl_enable=YES
-zrepl_enable:  -> YES
-paul@f1:~ % doas service `zrepl` start
-Starting zrepl.
-
-
-

Verifying replication


-
-To check the replication status, we run:
-
- -
# On f0, check `zrepl` status (use raw mode for non-tty)
-paul@f0:~ % doas `zrepl` status --mode raw | grep -A2 "Replication"
-"Replication":{"StartAt":"2025-07-01T22:31:48.712143123+03:00"...
-
-# Check if services are running
-paul@f0:~ % doas service `zrepl` status
-zrepl is running as pid 2649.
-
-paul@f1:~ % doas service `zrepl` status
-zrepl is running as pid 2574.
-
-# Check for `zrepl` snapshots on source
-paul@f0:~ % doas zfs list -t snapshot -r zdata/enc | grep zrepl
-zdata/enc@zrepl_20250701_193148_000    0B      -   176K  -
-
-# On f1, verify the replicated datasets  
-paul@f1:~ % doas zfs list -r zdata | grep f0
-zdata/f0             576K   899G   200K  none
-zdata/f0/zdata       376K   899G   200K  none
-zdata/f0/zdata/enc   176K   899G   176K  none
-
-# Check replicated snapshots on f1
-paul@f1:~ % doas zfs list -t snapshot -r zdata | grep zrepl
-zdata/f0/zdata/enc@zrepl_20250701_193148_000     0B      -   176K  -
-zdata/f0/zdata/enc@zrepl_20250701_194148_000     0B      -   176K  -
-
-
-

Monitoring replication


-
-You can monitor the replication progress with:
-
- -
# Real-time status
-paul@f0:~ % doas `zrepl` status --mode interactive
-
-# Check specific job details
-paul@f0:~ % doas `zrepl` status --job f0_to_f1
-
-
-With this setup, both zdata/enc/nfsdata and zroot/bhyve/fedora on f0 will be automatically replicated to f1 every 1 (or 10 in case of the Fedora VM) minutes, with encrypted snapshots preserved on both sides. The pruning policy ensures that we keep the last 10 snapshots while managing disk space efficiently.
-
-The replicated data appears on f1 under zdata/sink/ with the source host and dataset hierarchy preserved:
-
-
    -
  • zdata/enc/nfsdatazdata/sink/f0/zdata/enc/nfsdata
  • -
  • zroot/bhyve/fedorazdata/sink/f0/zroot/bhyve/fedora
  • -

-This is by design - zrepl preserves the complete path from the source to ensure there are no conflicts when replicating from multiple sources. The replication uses the WireGuard tunnel for secure, encrypted transport between nodes.
-
-

Verifying replication after reboot


-
-The zrepl service is configured to start automatically at boot. After rebooting both hosts:
-
- -
paul@f0:~ % uptime
-11:17PM  up 1 min, 0 users, load averages: 0.16, 0.06, 0.02
-
-paul@f0:~ % doas service `zrepl` status
-zrepl is running as pid 2366.
-
-paul@f1:~ % doas service `zrepl` status
-zrepl is running as pid 2309.
-
-# Check that new snapshots are being created and replicated
-paul@f0:~ % doas zfs list -t snapshot | grep `zrepl` | tail -2
-zdata/enc/nfsdata@zrepl_20250701_202530_000                0B      -   200K  -
-zroot/bhyve/fedora@zrepl_20250701_202530_000               0B      -  2.97G  -
-
-paul@f1:~ % doas zfs list -t snapshot -r zdata/sink | grep 202530
-zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_202530_000      0B      -   176K  -
-zdata/sink/f0/zroot/bhyve/fedora@zrepl_20250701_202530_000     0B      -  2.97G  -
-
-
-The timestamps confirm that replication resumed automatically after the reboot, ensuring continuous data protection.
-
-

Understanding Failover Limitations and Design Decisions


-
-
-
-#### Why Manual Failover?
-
-This storage system intentionally uses manual failover rather than automatic failover. This might seem counterintuitive for a "high availability" system, but it's a deliberate design choice based on real-world experience:
-
-1. Split-brain prevention: Automatic failover can cause both nodes to become active simultaneously if network communication fails. This leads to data divergence that's extremely difficult to resolve.
-
-2. False positive protection: Temporary network issues or high load can trigger unwanted failovers. Manual intervention ensures failovers only occur when truly necessary.
-
-3. Data integrity over availability: For storage systems, data consistency is paramount. A few minutes of downtime is preferable to data corruption or loss.
-
-4. Simplified recovery: With manual failover, you always know which dataset is authoritative, making recovery straightforward.
-
-#### Current Failover Process
-
-The replicated datasets on f1 are intentionally not mounted (mountpoint=none). In case f0 fails:
-
- -
# Manual steps needed on f1 to activate the replicated data:
-paul@f1:~ % doas zfs set mountpoint=/data/nfsdata zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
-
-
-However, this creates a split-brain problem: when f0 comes back online, both systems would have diverged data. Resolving this requires careful manual intervention to:
-
-1. Stop the original replication
-2. Sync changes from f1 back to f0
-3. Re-establish normal replication
-
-For true high-availability NFS, you might consider:
-
-
    -
  • Shared storage (like iSCSI) with proper clustering
  • -
  • GlusterFS or similar distributed filesystems
  • -
  • Manual failover with ZFS replication (as we have here)
  • -

-Note: While HAST+CARP is often suggested for HA storage, it can cause filesystem corruption in practice, especially with ZFS. The block-level replication of HAST doesn't understand ZFS's transactional model, leading to inconsistent states during failover.
-
-The current zrepl setup, despite requiring manual intervention, is actually safer because:
-
-
    -
  • ZFS snapshots are always consistent
  • -
  • Replication is ZFS-aware (not just block-level)
  • -
  • You have full control over the failover process
  • -
  • No risk of split-brain corruption
  • -

-

Mounting the NFS datasets


-
-To make the nfsdata accessible on both nodes, we need to mount them. On f0, this is straightforward:
-
- -
# On f0 - set mountpoint for the primary nfsdata
-paul@f0:~ % doas zfs set mountpoint=/data/nfs zdata/enc/nfsdata
-paul@f0:~ % doas mkdir -p /data/nfs
-
-# Verify it's mounted
-paul@f0:~ % df -h /data/nfs
-Filesystem           Size    Used   Avail Capacity  Mounted on
-zdata/enc/nfsdata    899G    204K    899G     0%    /data/nfs
-
-
-On f1, we need to handle the encryption key and mount the standby copy:
-
- -
# On f1 - first check encryption status
-paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
-NAME                             PROPERTY   VALUE        SOURCE
-zdata/sink/f0/zdata/enc/nfsdata  keystatus  unavailable  -
-
-# Load the encryption key (using f0's key stored on the USB)
-paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
-    zdata/sink/f0/zdata/enc/nfsdata
-
-# Set mountpoint and mount (same path as f0 for easier failover)
-paul@f1:~ % doas mkdir -p /data/nfs
-paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
-
-# Make it read-only to prevent accidental writes that would break replication
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-
-# Verify
-paul@f1:~ % df -h /data/nfs
-Filesystem                         Size    Used   Avail Capacity  Mounted on
-zdata/sink/f0/zdata/enc/nfsdata    896G    204K    896G     0%    /data/nfs
-
-
-Note: The dataset is mounted at the same path (/data/nfs) on both hosts to simplify failover procedures. The dataset on f1 is set to readonly=on to prevent accidental modifications that would break replication.
-
-CRITICAL WARNING: Do NOT write to /data/nfs/ on f1! Any modifications will break the replication. If you accidentally write to it, you'll see this error:
-
-
-cannot receive incremental stream: destination zdata/sink/f0/zdata/enc/nfsdata has been modified
-since most recent snapshot
-
-
-To fix a broken replication after accidental writes:
- -
# Option 1: Rollback to the last common snapshot (loses local changes)
-paul@f1:~ % doas zfs rollback zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_204054_000
-
-# Option 2: Make it read-only to prevent accidents
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-
-
-

Failback scenario: Syncing changes from f1 back to f0


-
-In a disaster recovery scenario where f0 has failed and f1 has taken over, you'll need to sync changes back when f0 returns. Here's how to failback:
-
- -
# On f1: First, make the dataset writable (if it was readonly)
-paul@f1:~ % doas zfs set readonly=off zdata/sink/f0/zdata/enc/nfsdata
-
-# Create a snapshot of the current state
-paul@f1:~ % doas zfs snapshot zdata/sink/f0/zdata/enc/nfsdata@failback
-
-# On f0: Stop any services using the dataset
-paul@f0:~ % doas service nfsd stop  # If NFS is running
-
-# Send the snapshot from f1 to f0, forcing a rollback
-# This WILL DESTROY any data on f0 that's not on f1!
-paul@f1:~ % doas zfs send -R zdata/sink/f0/zdata/enc/nfsdata@failback | \
-    ssh f0 "doas zfs recv -F zdata/enc/nfsdata"
-
-# Alternative: If you want to see what would be received first
-paul@f1:~ % doas zfs send -R zdata/sink/f0/zdata/enc/nfsdata@failback | \
-    ssh f0 "doas zfs recv -nv -F zdata/enc/nfsdata"
-
-# After successful sync, on f0:
-paul@f0:~ % doas zfs destroy zdata/enc/nfsdata@failback
-
-# On f1: Make it readonly again and destroy the failback snapshot
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs destroy zdata/sink/f0/zdata/enc/nfsdata@failback
-
-# Stop `zrepl` services first - CRITICAL!
-paul@f0:~ % doas service `zrepl` stop
-paul@f1:~ % doas service `zrepl` stop
-
-# Clean up any `zrepl` snapshots on f0
-paul@f0:~ % doas zfs list -t snapshot -r zdata/enc/nfsdata | grep `zrepl` | \
-    awk '{print $1}' | xargs -I {} doas zfs destroy {}
-
-# Clean up and destroy the entire replicated structure on f1
-# First release any holds
-paul@f1:~ % doas zfs holds -r zdata/sink/f0 | grep -v NAME | \
-    awk '{print $2, $1}' | while read tag snap; do 
-        doas zfs release "$tag" "$snap"
-    done
-
-# Then destroy the entire f0 tree
-paul@f1:~ % doas zfs destroy -rf zdata/sink/f0
-
-# Create parent dataset structure on f1
-paul@f1:~ % doas zfs create -p zdata/sink/f0/zdata/enc
-
-# Create a fresh manual snapshot to establish baseline
-paul@f0:~ % doas zfs snapshot zdata/enc/nfsdata@manual_baseline
-
-# Send this snapshot to f1
-paul@f0:~ % doas zfs send -w zdata/enc/nfsdata@manual_baseline | \
-    ssh f1 "doas zfs recv zdata/sink/f0/zdata/enc/nfsdata"
-
-# Clean up the manual snapshot
-paul@f0:~ % doas zfs destroy zdata/enc/nfsdata@manual_baseline
-paul@f1:~ % doas zfs destroy zdata/sink/f0/zdata/enc/nfsdata@manual_baseline
-
-# Set mountpoint and make readonly on f1
-paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-
-# Load encryption key and mount on f1
-paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
-    zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
-
-# Now restart `zrepl` services
-paul@f0:~ % doas service `zrepl` start
-paul@f1:~ % doas service `zrepl` start
-
-# Verify replication is working
-paul@f0:~ % doas `zrepl` status --job f0_to_f1
-
-
-Important notes about failback:
-
-
    -
  • The -F flag forces a rollback on f0, destroying any local changes
  • -
  • Replication often won't resume automatically after a forced receive
  • -
  • You must clean up old zrepl snapshots on both sides
  • -
  • Creating a manual snapshot helps re-establish the replication relationship
  • -
  • Always verify replication status after the failback procedure
  • -
  • The first replication after failback will be a full send of the current state
  • -

-

Testing the failback scenario


-
-Here's a real test of the failback procedure:
-
- -
# Simulate failure: Stop replication on f0
-paul@f0:~ % doas service `zrepl` stop
-
-# On f1: Take over by making the dataset writable
-paul@f1:~ % doas zfs set readonly=off zdata/sink/f0/zdata/enc/nfsdata
-
-# Write some data on f1 during the "outage"
-paul@f1:~ % echo 'Data written on f1 during failover' | doas tee /data/nfs/failover-data.txt
-Data written on f1 during failover
-
-# Now perform failback when f0 comes back online
-# Create snapshot on f1
-paul@f1:~ % doas zfs snapshot zdata/sink/f0/zdata/enc/nfsdata@failback
-
-# Send data back to f0 (note: we had to send to a temporary dataset due to holds)
-paul@f1:~ % doas zfs send -Rw zdata/sink/f0/zdata/enc/nfsdata@failback | \
-    ssh f0 "doas zfs recv -F zdata/enc/nfsdata_temp"
-
-# On f0: Rename datasets to complete failback
-paul@f0:~ % doas zfs set mountpoint=none zdata/enc/nfsdata
-paul@f0:~ % doas zfs rename zdata/enc/nfsdata zdata/enc/nfsdata_old
-paul@f0:~ % doas zfs rename zdata/enc/nfsdata_temp zdata/enc/nfsdata
-
-# Load encryption key and mount
-paul@f0:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
-paul@f0:~ % doas zfs mount zdata/enc/nfsdata
-
-# Verify the data from f1 is now on f0
-paul@f0:~ % ls -la /data/nfs/
-total 18
-drwxr-xr-x  2 root wheel  4 Jul  2 00:01 .
-drwxr-xr-x  4 root wheel  4 Jul  1 23:41 ..
-*rw-r--r--  1 root wheel 35 Jul  2 00:01 failover-data.txt
-*rw-r--r--  1 root wheel 12 Jul  1 23:34 hello.txt
-
-
-Success! The failover data from f1 is now on f0. To resume normal replication, you would need to:
-
-1. Clean up old snapshots on both sides
-2. Create a new manual baseline snapshot
-3. Restart zrepl services
-
-Key learnings from the test:
-
-
    -
  • The -w flag is essential for encrypted datasets
  • -
  • Dataset holds can complicate the process (consider sending to a temporary dataset)
  • -
  • The encryption key must be loaded after receiving the dataset
  • -
  • Always verify data integrity before resuming normal operations
  • -

-

Troubleshooting: Files not appearing in replication


-
-If you write files to /data/nfs/ on f0 but they don't appear on f1, check:
-
- -
# 1. Is the dataset actually mounted on f0?
-paul@f0:~ % doas zfs list -o name,mountpoint,mounted | grep nfsdata
-zdata/enc/nfsdata                             /data/nfs             yes
-
-# If it shows "no", the dataset isn't mounted!
-# This means files are being written to the root filesystem, not ZFS
-
-# 2. Check if encryption key is loaded
-paul@f0:~ % doas zfs get keystatus zdata/enc/nfsdata
-NAME               PROPERTY   VALUE        SOURCE
-zdata/enc/nfsdata  keystatus  available    -
-
-# If "unavailable", load the key:
-paul@f0:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
-paul@f0:~ % doas zfs mount zdata/enc/nfsdata
-
-# 3. Verify files are in the snapshot (not just the directory)
-paul@f0:~ % ls -la /data/nfs/.zfs/snapshot/zrepl_*/
-
-
-This issue commonly occurs after reboot if the encryption keys aren't configured to load automatically.
-
-

Configuring automatic key loading on boot


-
-To ensure all encrypted datasets are mounted automatically after reboot:
-
- -
# On f0 - configure all encrypted datasets
-paul@f0:~ % doas sysrc zfskeys_enable=YES
-zfskeys_enable: NO -> YES
-paul@f0:~ % doas sysrc zfskeys_datasets="zdata/enc zdata/enc/nfsdata zroot/bhyve"
-zfskeys_datasets:  -> zdata/enc zdata/enc/nfsdata zroot/bhyve
-
-# Set correct key locations for all datasets
-paul@f0:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
-
-# On f1 - include the replicated dataset
-paul@f1:~ % doas sysrc zfskeys_enable=YES
-zfskeys_enable: NO -> YES
-paul@f1:~ % doas sysrc zfskeys_datasets="zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata"
-zfskeys_datasets:  -> zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata
-
-# Set key location for replicated dataset
-paul@f1:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
-
-
-Important notes:
-
    -
  • Each encryption root needs its own key load entry - child datasets don't inherit key loading
  • -
  • The replicated dataset on f1 uses the same encryption key as the source on f0
  • -
  • Always verify datasets are mounted after reboot with zfs list -o name,mounted
  • -
  • Critical: Always ensure the replicated dataset on f1 remains read-only with doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
  • -

-

Troubleshooting: Replication broken due to modified destination


-
-If you see the error "cannot receive incremental stream: destination has been modified since most recent snapshot", it means the read-only flag was accidentally removed on f1. To fix without a full resync:
-
- -
# Stop `zrepl` on both servers
-paul@f0:~ % doas service `zrepl` stop
-paul@f1:~ % doas service `zrepl` stop
-
-# Find the last common snapshot
-paul@f0:~ % doas zfs list -t snapshot -o name,creation zdata/enc/nfsdata
-paul@f1:~ % doas zfs list -t snapshot -o name,creation zdata/sink/f0/zdata/enc/nfsdata
-
-# Rollback f1 to the last common snapshot (example: @zrepl_20250705_000007_000)
-paul@f1:~ % doas zfs rollback -r zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250705_000007_000
-
-# Ensure the dataset is read-only
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-
-# Restart zrepl
-paul@f0:~ % doas service `zrepl` start
-paul@f1:~ % doas service `zrepl` start
-
-
-

Forcing a full resync


-
-If replication gets out of sync and incremental updates fail:
-
- -
# Stop services
-paul@f0:~ % doas service `zrepl` stop
-paul@f1:~ % doas service `zrepl` stop
-
-# On f1: Release holds and destroy the dataset
-paul@f1:~ % doas zfs holds -r zdata/sink/f0/zdata/enc/nfsdata | \
-    grep -v NAME | awk '{print $2, $1}' | \
-    while read tag snap; do doas zfs release "$tag" "$snap"; done
-paul@f1:~ % doas zfs destroy -rf zdata/sink/f0/zdata/enc/nfsdata
-
-# On f0: Create fresh snapshot
-paul@f0:~ % doas zfs snapshot zdata/enc/nfsdata@resync
-
-# Send full dataset
-paul@f0:~ % doas zfs send -Rw zdata/enc/nfsdata@resync | \
-    ssh f1 "doas zfs recv zdata/sink/f0/zdata/enc/nfsdata"
-
-# Configure f1
-paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
-    zdata/sink/f0/zdata/enc/nfsdata
-paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
-
-# Clean up and restart
-paul@f0:~ % doas zfs destroy zdata/enc/nfsdata@resync
-paul@f1:~ % doas zfs destroy zdata/sink/f0/zdata/enc/nfsdata@resync
-paul@f0:~ % doas service `zrepl` start
-paul@f1:~ % doas service `zrepl` start
-
-
-ZFS auto scrubbing....~?
-
-Backup of the keys on the key locations (all keys on all 3 USB keys)
-
-

CARP (Common Address Redundancy Protocol)


-
-High availability is crucial for storage systems. If the storage server goes down, all pods lose access to their persistent data. CARP provides a solution by creating a virtual IP address that automatically moves between servers during failures.
-
-

How CARP Works


-
-CARP allows two hosts to share a virtual IP address (VIP). The hosts communicate using multicast to elect a MASTER, while the other remain as BACKUP. When the MASTER fails, a BACKUP automatically promotes itself, and the VIP moves to the new MASTER. This happens within seconds.
-
-Key benefits for our storage system:
-
-
    -
  • Automatic failover: No manual intervention is required for basic failures, although there are a few limitations. The backup will only have read-only access to the available data, as we will learn later. However, we could manually promote it to read-write if needed.
  • -
  • Transparent to clients: Pods continue using the same IP address
  • -
  • Works with stunnel: Behind the VIP there will be a stunnel process running, which ensures encrypted connections follow the active server
  • -
  • Simple configuration
  • -

-

Configuring CARP


-
-First, add the CARP configuration to /etc/rc.conf on both f0 and f1:
-
- -
# The virtual IP 192.168.1.138 will float between f0 and f1
-ifconfig_re0_alias0="inet vhid 1 pass testpass alias 192.168.1.138/32"
-
-
-Whereas:
-
-
    -
  • vhid 1: Virtual Host ID - must match on all CARP members
  • -
  • pass testpass: Password for CARP authentication (if you follow this, use a different password!)
  • -
  • alias 192.168.1.138/32: The virtual IP address with a /32 netmask
  • -

-Next, update /etc/hosts on all nodes (n0, n1, n2, r0, r1, r2) to resolve the VIP hostname:
-
-
-192.168.1.138 f3s-storage-ha f3s-storage-ha.lan f3s-storage-ha.lan.buetow.org
-
-
-This allows clients to connect to f3s-storage-ha regardless of which physical server is currently the MASTER.
-
-

CARP State Change Notifications


-
-To properly manage services during failover, we need to detect CARP state changes. FreeBSD's devd system can notify us when CARP transitions between MASTER and BACKUP states.
-
-Add this to /etc/devd.conf on both f0 and f1:
-
-paul@f0:~ % cat <<END | doas tee -a /etc/devd.conf
-notify 0 {
- match "system" "CARP";
- match "subsystem" "[0-9]+@[0-9a-z.]+";
- match "type" "(MASTER|BACKUP)";
- action "/usr/local/bin/carpcontrol.sh $subsystem $type";
-};
-END
-
-Next, create the CARP control script that will restart stunnel when CARP state changes:
-
- -
paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
-#!/bin/sh
-# CARP state change control script
-
-case "$1" in
-    MASTER)
-        logger "CARP state changed to MASTER, starting services"
-        service rpcbind start >/dev/null 2>&1
-        service mountd start >/dev/null 2>&1
-        service nfsd start >/dev/null 2>&1
-        service nfsuserd start >/dev/null 2>&1
-        service stunnel restart >/dev/null 2>&1
-        logger "CARP MASTER: NFS and stunnel services started"
-        ;;
-    BACKUP)
-        logger "CARP state changed to BACKUP, stopping services"
-        service stunnel stop >/dev/null 2>&1
-        service nfsd stop >/dev/null 2>&1
-        service mountd stop >/dev/null 2>&1
-        service nfsuserd stop >/dev/null 2>&1
-        logger "CARP BACKUP: NFS and stunnel services stopped"
-        ;;
-    *)
-        logger "CARP state changed to $1 (unhandled)"
-        ;;
-esac
-EOF
-
-paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
-
-# Copy the same script to f1
-paul@f0:~ % scp /usr/local/bin/carpcontrol.sh f1:/tmp/
-paul@f1:~ % doas mv /tmp/carpcontrol.sh /usr/local/bin/
-paul@f1:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
-
-
-Note that we perform several tasks in the carpcontrol.sh script, which starts and/or stops all the services required for an NFS server running over an encrypted tunnel (via stunnel). We will set up all those services later in this blog post!
-
-To enable CARP in /boot/loader.conf, run:
-
- -
paul@f0:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf
-carp_load="YES"
-paul@f1:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf  
-carp_load="YES"
-
-
-Then reboot both hosts or run doas kldload carp to load the module immediately.
-
-

Future Storage Explorations


-
-While zrepl provides excellent snapshot-based replication for disaster recovery, there are other storage technologies worth exploring for the f3s project:
-
-

MinIO for S3-Compatible Object Storage


-
-MinIO is a high-performance, S3-compatible object storage system that could complement our ZFS-based storage. Some potential use cases:
-
-
    -
  • S3 API compatibility: Many modern applications expect S3-style object storage APIs. MinIO could provide this interface while using our ZFS storage as the backend.
  • -
  • Multi-site replication: MinIO supports active-active replication across multiple sites, which could work well with our f0/f1/f2 node setup.
  • -
  • Kubernetes native: MinIO has excellent Kubernetes integration with operators and CSI drivers, making it ideal for the f3s k3s environment.
  • -

-

MooseFS for Distributed High Availability


-
-MooseFS is a fault-tolerant, distributed file system that could provide true high-availability storage:
-
-
    -
  • True HA: Unlike our current setup which requires manual failover, MooseFS provides automatic failover with no single point of failure.
  • -
  • POSIX compliance: Applications can use MooseFS like any regular filesystem, no code changes needed.
  • -
  • Flexible redundancy: Configure different replication levels per directory or file, optimizing storage efficiency.
  • -
  • FreeBSD support: MooseFS has native FreeBSD support, making it a natural fit for the f3s project.
  • -

-Both technologies could potentially run on top of our encrypted ZFS volumes, combining ZFS's data integrity and encryption features with distributed storage capabilities. This would be particularly interesting for workloads that need either S3-compatible APIs (MinIO) or transparent distributed POSIX storage (MooseFS).
-
-

NFS Server Configuration


-
-With ZFS replication in place, we can now set up NFS servers on both f0 and f1 to export the replicated data. Since native NFS over TLS (RFC 9289) has compatibility issues between Linux and FreeBSD, we'll use stunnel to provide encryption.
-
-

Setting up NFS on f0 (Primary)


-
-First, enable the NFS services in rc.conf:
-
- -
paul@f0:~ % doas sysrc nfs_server_enable=YES
-nfs_server_enable: YES -> YES
-paul@f0:~ % doas sysrc nfsv4_server_enable=YES
-nfsv4_server_enable: YES -> YES
-paul@f0:~ % doas sysrc nfsuserd_enable=YES
-nfsuserd_enable: YES -> YES
-paul@f0:~ % doas sysrc mountd_enable=YES
-mountd_enable: NO -> YES
-paul@f0:~ % doas sysrc rpcbind_enable=YES
-rpcbind_enable: NO -> YES
-
-
-Create a dedicated directory for Kubernetes volumes:
-
- -
# First ensure the dataset is mounted
-paul@f0:~ % doas zfs get mounted zdata/enc/nfsdata
-NAME               PROPERTY  VALUE    SOURCE
-zdata/enc/nfsdata  mounted   yes      -
-
-# Create the k3svolumes directory
-paul@f0:~ % doas mkdir -p /data/nfs/k3svolumes
-paul@f0:~ % doas chmod 755 /data/nfs/k3svolumes
-
-# This directory will be replicated to f1 automatically
-
-
-Create the /etc/exports file. Since we're using stunnel for encryption, ALL clients must connect through stunnel, which appears as localhost (127.0.0.1) to the NFS server:
-
- -
paul@f0:~ % doas tee /etc/exports <<'EOF'
-V4: /data/nfs -sec=sys
-/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
-EOF
-
-
-The exports configuration:
-
-
    -
  • V4: /data/nfs -sec=sys: Sets the NFSv4 root directory to /data/nfs
  • -
  • /data/nfs -alldirs: Allows mounting any subdirectory under /data/nfs
  • -
  • -maproot=root: Maps root user from client to root on server (needed for Kubernetes and ownership changes)
  • -
  • -network 127.0.0.1: Only accepts connections from localhost (stunnel)
  • -

-Note:
-
    -
  • ALL clients (r0, r1, r2, laptop) must connect through stunnel for encryption
  • -
  • Stunnel proxies connections through localhost, so only 127.0.0.1 needs access
  • -
  • With NFSv4, clients mount using relative paths (e.g., /k3svolumes instead of /data/nfs/k3svolumes)
  • -

-Start the NFS services:
-
- -
paul@f0:~ % doas service rpcbind start
-Starting rpcbind.
-paul@f0:~ % doas service mountd start
-Starting mountd.
-paul@f0:~ % doas service nfsd start
-Starting nfsd.
-paul@f0:~ % doas service nfsuserd start
-Starting nfsuserd.
-
-
-

Configuring Stunnel for NFS Encryption with CARP Failover


-
-#### Why Not Native NFS over TLS?
-
-FreeBSD 13+ supports native NFS over TLS (RFC 9289), which would be the ideal solution. However, there are significant compatibility challenges:
-
-
    -
  • Linux client support is incomplete: Most Linux distributions don't fully support NFS over TLS yet
  • -
  • Certificate management differs: FreeBSD and Linux handle TLS certificates differently for NFS
  • -
  • Kernel module requirements: Requires specific kernel modules that may not be available
  • -

-Stunnel provides a more compatible solution that works reliably across all operating systems while offering equivalent security.
-
-#### Stunnel Architecture with CARP
-
-Stunnel integrates seamlessly with our CARP setup:
-
-
-                    CARP VIP (192.168.1.138)
-                           |
-    f0 (MASTER) ←---------→|←---------→ f1 (BACKUP)
-    stunnel:2323           |           stunnel:stopped
-    nfsd:2049              |           nfsd:stopped
-                           |
-                    Clients connect here
-
-
-The key insight is that stunnel binds to the CARP VIP. When CARP fails over, the VIP moves to the new MASTER, and stunnel starts there automatically. Clients maintain their connection to the same IP throughout.
-
-#### Creating a Certificate Authority for Client Authentication
-
-First, create a CA to sign both server and client certificates:
-
- -
# On f0 - Create CA
-paul@f0:~ % doas mkdir -p /usr/local/etc/stunnel/ca
-paul@f0:~ % cd /usr/local/etc/stunnel/ca
-paul@f0:~ % doas openssl genrsa -out ca-key.pem 4096
-paul@f0:~ % doas openssl req -new -x509 -days 3650 -key ca-key.pem -out ca-cert.pem \
-  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=F3S Stunnel CA'
-
-# Create server certificate
-paul@f0:~ % cd /usr/local/etc/stunnel
-paul@f0:~ % doas openssl genrsa -out server-key.pem 4096
-paul@f0:~ % doas openssl req -new -key server-key.pem -out server.csr \
-  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=f3s-storage-ha.lan'
-paul@f0:~ % doas openssl x509 -req -days 3650 -in server.csr -CA ca/ca-cert.pem \
-  -CAkey ca/ca-key.pem -CAcreateserial -out server-cert.pem
-
-# Create client certificates for authorized clients
-paul@f0:~ % cd /usr/local/etc/stunnel/ca
-paul@f0:~ % doas sh -c 'for client in r0 r1 r2 earth; do 
-  openssl genrsa -out ${client}-key.pem 4096
-  openssl req -new -key ${client}-key.pem -out ${client}.csr \
-    -subj "/C=US/ST=State/L=City/O=F3S Storage/CN=${client}.lan.buetow.org"
-  openssl x509 -req -days 3650 -in ${client}.csr -CA ca-cert.pem \
-    -CAkey ca-key.pem -CAcreateserial -out ${client}-cert.pem
-done'
-
-
-#### Install and Configure Stunnel on f0
-
- -
# Install stunnel
-paul@f0:~ % doas pkg install -y stunnel
-
-# Configure stunnel server with client certificate authentication
-paul@f0:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
-cert = /usr/local/etc/stunnel/server-cert.pem
-key = /usr/local/etc/stunnel/server-key.pem
-
-setuid = stunnel
-setgid = stunnel
-
-[nfs-tls]
-accept = 192.168.1.138:2323
-connect = 127.0.0.1:2049
-CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
-verify = 2
-requireCert = yes
-EOF
-
-# Enable and start stunnel
-paul@f0:~ % doas sysrc stunnel_enable=YES
-stunnel_enable:  -> YES
-paul@f0:~ % doas service stunnel start
-Starting stunnel.
-
-# Restart stunnel to apply the CARP VIP binding
-paul@f0:~ % doas service stunnel restart
-Stopping stunnel.
-Starting stunnel.
-
-
-The configuration includes:
-
    -
  • verify = 2: Verify client certificate and fail if not provided
  • -
  • requireCert = yes: Client must present a valid certificate
  • -
  • CAfile: Path to the CA certificate that signed the client certificates
  • -

-

Setting up NFS on f1 (Standby)


-
-Repeat the same configuration on f1:
-
- -
paul@f1:~ % doas sysrc nfs_server_enable=YES
-nfs_server_enable: NO -> YES
-paul@f1:~ % doas sysrc nfsv4_server_enable=YES
-nfsv4_server_enable: NO -> YES
-paul@f1:~ % doas sysrc nfsuserd_enable=YES
-nfsuserd_enable: NO -> YES
-paul@f1:~ % doas sysrc mountd_enable=YES
-mountd_enable: NO -> YES
-paul@f1:~ % doas sysrc rpcbind_enable=YES
-rpcbind_enable: NO -> YES
-
-paul@f1:~ % doas tee /etc/exports <<'EOF'
-V4: /data/nfs -sec=sys
-/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
-EOF
-
-paul@f1:~ % doas service rpcbind start
-Starting rpcbind.
-paul@f1:~ % doas service mountd start
-Starting mountd.
-paul@f1:~ % doas service nfsd start
-Starting nfsd.
-paul@f1:~ % doas service nfsuserd start
-Starting nfsuserd.
-
-
-Configure stunnel on f1:
-
- -
# Install stunnel
-paul@f1:~ % doas pkg install -y stunnel
-
-# Copy certificates from f0
-paul@f0:~ % doas tar -cf /tmp/stunnel-certs.tar -C /usr/local/etc/stunnel server-cert.pem server-key.pem ca
-paul@f0:~ % scp /tmp/stunnel-certs.tar f1:/tmp/
-paul@f1:~ % cd /usr/local/etc/stunnel && doas tar -xf /tmp/stunnel-certs.tar
-
-# Configure stunnel server on f1 with client certificate authentication
-paul@f1:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
-cert = /usr/local/etc/stunnel/server-cert.pem
-key = /usr/local/etc/stunnel/server-key.pem
-
-setuid = stunnel
-setgid = stunnel
-
-[nfs-tls]
-accept = 192.168.1.138:2323
-connect = 127.0.0.1:2049
-CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
-verify = 2
-requireCert = yes
-EOF
-
-# Enable and start stunnel
-paul@f1:~ % doas sysrc stunnel_enable=YES
-stunnel_enable:  -> YES
-paul@f1:~ % doas service stunnel start
-Starting stunnel.
-
-# Restart stunnel to apply the CARP VIP binding
-paul@f1:~ % doas service stunnel restart
-Stopping stunnel.
-Starting stunnel.
-
-
-

How Stunnel Works with CARP


-
-With stunnel configured to bind to the CARP VIP (192.168.1.138), only the server that is currently the CARP MASTER will accept stunnel connections. This provides automatic failover for encrypted NFS:
-
-
    -
  • When f0 is CARP MASTER: stunnel on f0 accepts connections on 192.168.1.138:2323
  • -
  • When f1 becomes CARP MASTER: stunnel on f1 starts accepting connections on 192.168.1.138:2323
  • -
  • The backup server's stunnel process will fail to bind to the VIP and won't accept connections
  • -

-This ensures that clients always connect to the active NFS server through the CARP VIP.
-
-

CARP Control Script for Clean Failover


-
-To ensure clean failover behavior and prevent stale file handles, we'll create a control script that:
-
    -
  • Stops NFS services on BACKUP nodes (preventing split-brain scenarios)
  • -
  • Starts NFS services only on the MASTER node
  • -
  • Manages stunnel binding to the CARP VIP
  • -

-This approach ensures clients can only connect to the active server, eliminating stale handles from the inactive server:
-
- -
# Create CARP control script on both f0 and f1
-paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
-#!/bin/sh
-# CARP state change control script
-
-case "$1" in
-    MASTER)
-        logger "CARP state changed to MASTER, starting services"
-        service rpcbind start >/dev/null 2>&1
-        service mountd start >/dev/null 2>&1
-        service nfsd start >/dev/null 2>&1
-        service nfsuserd start >/dev/null 2>&1
-        service stunnel restart >/dev/null 2>&1
-        logger "CARP MASTER: NFS and stunnel services started"
-        ;;
-    BACKUP)
-        logger "CARP state changed to BACKUP, stopping services"
-        service stunnel stop >/dev/null 2>&1
-        service nfsd stop >/dev/null 2>&1
-        service mountd stop >/dev/null 2>&1
-        service nfsuserd stop >/dev/null 2>&1
-        logger "CARP BACKUP: NFS and stunnel services stopped"
-        ;;
-    *)
-        logger "CARP state changed to $1 (unhandled)"
-        ;;
-esac
-EOF
-
-paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
-
-# Add to devd configuration
-paul@f0:~ % doas tee -a /etc/devd.conf <<'EOF'
-
-# CARP state change notifications
-notify 0 {
-    match "system" "CARP";
-    match "subsystem" "[0-9]+@[a-z]+[0-9]+";
-    match "type" "(MASTER|BACKUP)";
-    action "/usr/local/bin/carpcontrol.sh $type";
-};
-EOF
-
-# Restart devd to apply changes
-paul@f0:~ % doas service devd restart
-
-
-This enhanced script ensures that:
-
    -
  • Only the MASTER node runs NFS and stunnel services
  • -
  • BACKUP nodes have all services stopped, preventing any client connections
  • -
  • Failovers are clean with no possibility of accessing the wrong server
  • -
  • Stale file handles are minimized because the old server immediately stops responding
  • -

-

CARP Management Script


-
-To simplify CARP state management and failover testing, create this helper script on both f0 and f1:
-
- -
# Create the CARP management script
-paul@f0:~ % doas tee /usr/local/bin/carp <<'EOF'
-#!/bin/sh
-# CARP state management script
-# Usage: carp [master|backup|auto-failback enable|auto-failback disable]
-# Without arguments: shows current state
-
-# Find the interface with CARP configured
-CARP_IF=$(ifconfig -l | xargs -n1 | while read if; do
-    ifconfig "$if" 2>/dev/null | grep -q "carp:" && echo "$if" && break
-done)
-
-if [ -z "$CARP_IF" ]; then
-    echo "Error: No CARP interface found"
-    exit 1
-fi
-
-# Get CARP VHID
-VHID=$(ifconfig "$CARP_IF" | grep "carp:" | sed -n 's/.*vhid \([0-9]*\).*/\1/p')
-
-if [ -z "$VHID" ]; then
-    echo "Error: Could not determine CARP VHID"
-    exit 1
-fi
-
-# Function to get current state
-get_state() {
-    ifconfig "$CARP_IF" | grep "carp:" | awk '{print $2}'
-}
-
-# Check for auto-failback block file
-BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
-check_auto_failback() {
-    if [ -f "$BLOCK_FILE" ]; then
-        echo "WARNING: Auto-failback is DISABLED (file exists: $BLOCK_FILE)"
-    fi
-}
-
-# Main logic
-case "$1" in
-    "")
-        # No argument - show current state
-        STATE=$(get_state)
-        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
-        check_auto_failback
-        ;;
-    master)
-        # Force to MASTER state
-        echo "Setting CARP to MASTER state..."
-        ifconfig "$CARP_IF" vhid "$VHID" state master
-        sleep 1
-        STATE=$(get_state)
-        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
-        check_auto_failback
-        ;;
-    backup)
-        # Force to BACKUP state
-        echo "Setting CARP to BACKUP state..."
-        ifconfig "$CARP_IF" vhid "$VHID" state backup
-        sleep 1
-        STATE=$(get_state)
-        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
-        check_auto_failback
-        ;;
-    auto-failback)
-        case "$2" in
-            enable)
-                if [ -f "$BLOCK_FILE" ]; then
-                    rm "$BLOCK_FILE"
-                    echo "Auto-failback ENABLED (removed $BLOCK_FILE)"
-                else
-                    echo "Auto-failback was already enabled"
-                fi
-                ;;
-            disable)
-                if [ ! -f "$BLOCK_FILE" ]; then
-                    touch "$BLOCK_FILE"
-                    echo "Auto-failback DISABLED (created $BLOCK_FILE)"
-                else
-                    echo "Auto-failback was already disabled"
-                fi
-                ;;
-            *)
-                echo "Usage: $0 auto-failback [enable|disable]"
-                echo "  enable:  Remove block file to allow automatic failback"
-                echo "  disable: Create block file to prevent automatic failback"
-                exit 1
-                ;;
-        esac
-        ;;
-    *)
-        echo "Usage: $0 [master|backup|auto-failback enable|auto-failback disable]"
-        echo "  Without arguments: show current CARP state"
-        echo "  master: force this node to become CARP MASTER"
-        echo "  backup: force this node to become CARP BACKUP"
-        echo "  auto-failback enable:  allow automatic failback to f0"
-        echo "  auto-failback disable: prevent automatic failback to f0"
-        exit 1
-        ;;
-esac
-EOF
-
-paul@f0:~ % doas chmod +x /usr/local/bin/carp
-
-# Copy to f1 as well
-paul@f0:~ % scp /usr/local/bin/carp f1:/tmp/
-paul@f1:~ % doas cp /tmp/carp /usr/local/bin/carp && doas chmod +x /usr/local/bin/carp
-
-
-Now you can easily manage CARP states and auto-failback:
-
- -
# Check current CARP state
-paul@f0:~ % doas carp
-CARP state on re0 (vhid 1): MASTER
-
-# If auto-failback is disabled, you'll see a warning
-paul@f0:~ % doas carp
-CARP state on re0 (vhid 1): MASTER
-WARNING: Auto-failback is DISABLED (file exists: /data/nfs/nfs.NO_AUTO_FAILBACK)
-
-# Force f0 to become BACKUP (triggers failover to f1)
-paul@f0:~ % doas carp backup
-Setting CARP to BACKUP state...
-CARP state on re0 (vhid 1): BACKUP
-
-# Disable auto-failback (useful for maintenance)
-paul@f0:~ % doas carp auto-failback disable
-Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
-
-# Enable auto-failback
-paul@f0:~ % doas carp auto-failback enable
-Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
-
-
-This enhanced script:
-- Shows warnings when auto-failback is disabled
-- Provides easy control over the auto-failback feature
-- Makes failover testing and maintenance simpler
-
-

Automatic Failback After Reboot


-
-When f0 reboots (planned or unplanned), f1 takes over as CARP MASTER. To ensure f0 automatically reclaims its primary role once it's fully operational, we'll implement an automatic failback mechanism.
-
-#### Why Automatic Failback?
-
-- **Primary node preference**: f0 has the primary storage; it should be MASTER when available
-- **Post-reboot automation**: Eliminates manual intervention after every f0 reboot
-- **Maintenance flexibility**: Can be disabled when you want f1 to remain MASTER
-
-#### The Auto-Failback Script
-
-Create this script on f0 only (not on f1):
-
- -
paul@f0:~ % doas tee /usr/local/bin/carp-auto-failback.sh <<'EOF'
-#!/bin/sh
-# CARP automatic failback script for f0
-# Ensures f0 reclaims MASTER role after reboot when storage is ready
-
-LOGFILE="/var/log/carp-auto-failback.log"
-MARKER_FILE="/data/nfs/nfs.DO_NOT_REMOVE"
-BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
-
-log_message() {
-    echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOGFILE"
-}
-
-# Check if we're already MASTER
-CURRENT_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
-if [ "$CURRENT_STATE" = "MASTER" ]; then
-    exit 0
-fi
-
-# Check if /data/nfs is mounted
-if ! mount | grep -q "on /data/nfs "; then
-    log_message "SKIP: /data/nfs not mounted"
-    exit 0
-fi
-
-# Check if marker file exists (identifies this as primary storage)
-if [ ! -f "$MARKER_FILE" ]; then
-    log_message "SKIP: Marker file $MARKER_FILE not found"
-    exit 0
-fi
-
-# Check if failback is blocked (for maintenance)
-if [ -f "$BLOCK_FILE" ]; then
-    log_message "SKIP: Failback blocked by $BLOCK_FILE"
-    exit 0
-fi
-
-# Check if NFS services are running (ensure we're fully ready)
-if ! service nfsd status >/dev/null 2>&1; then
-    log_message "SKIP: NFS services not yet running"
-    exit 0
-fi
-
-# All conditions met - promote to MASTER
-log_message "CONDITIONS MET: Promoting to MASTER (was $CURRENT_STATE)"
-/usr/local/bin/carp master
-
-# Log result
-sleep 2
-NEW_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
-log_message "Failback complete: State is now $NEW_STATE"
-
-# If successful, log to system log too
-if [ "$NEW_STATE" = "MASTER" ]; then
-    logger "CARP: f0 automatically reclaimed MASTER role"
-fi
-EOF
-
-paul@f0:~ % doas chmod +x /usr/local/bin/carp-auto-failback.sh
-
-
-#### Setting Up the Marker File
-
-The marker file identifies f0's primary storage. Create it once:
-
- -
paul@f0:~ % doas touch /data/nfs/nfs.DO_NOT_REMOVE
-
-
-This file will be replicated to f1, but since f1 mounts the dataset at a different path, it won't trigger failback there.
-
-#### Configuring Cron
-
-Add a cron job to check every minute:
-
- -
paul@f0:~ % echo "* * * * * /usr/local/bin/carp-auto-failback.sh" | doas crontab -
-
-
-#### Managing Automatic Failback
-
-The enhanced CARP script provides integrated control over auto-failback:
-
-**To temporarily disable automatic failback** (e.g., for f0 maintenance):
- -
paul@f0:~ % doas carp auto-failback disable
-Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
-
-
-**To re-enable automatic failback**:
- -
paul@f0:~ % doas carp auto-failback enable
-Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
-
-
-**To check if auto-failback is enabled**:
- -
paul@f0:~ % doas carp
-CARP state on re0 (vhid 1): MASTER
-# If disabled, you'll see: WARNING: Auto-failback is DISABLED
-
-
-**To monitor failback attempts**:
- -
paul@f0:~ % tail -f /var/log/carp-auto-failback.log
-
-
-#### How It Works
-
-1. **After f0 reboots**: f1 is MASTER, f0 boots as BACKUP
-2. **Cron runs every minute**: Checks if conditions are met
-3. **Safety checks**:
- - Is f0 currently BACKUP? (don't run if already MASTER)
- - Is /data/nfs mounted? (ZFS datasets are ready)
- - Does marker file exist? (confirms this is primary storage)
- - Is failback blocked? (admin can prevent failback)
- - Are NFS services running? (system is fully ready)
-4. **Failback occurs**: Typically 2-3 minutes after boot completes
-5. **Logging**: All attempts logged for troubleshooting
-
-This ensures f0 automatically resumes its role as primary storage server after any reboot, while providing administrative control when needed.
-
-

Verifying Stunnel and CARP Status


-
-First, check which host is currently CARP MASTER:
-
- -
# On f0 - check CARP status
-paul@f0:~ % ifconfig re0 | grep carp
-	inet 192.168.1.130 netmask 0xffffff00 broadcast 192.168.1.255
-	inet 192.168.1.138 netmask 0xffffffff broadcast 192.168.1.138 vhid 1
-
-# If f0 is MASTER, verify stunnel is listening on the VIP
-paul@f0:~ % doas sockstat -l | grep 2323
-stunnel  stunnel    1234  3  tcp4   192.168.1.138:2323    *:*
-
-# On f1 - check CARP status  
-paul@f1:~ % ifconfig re0 | grep carp
-	inet 192.168.1.131 netmask 0xffffff00 broadcast 192.168.1.255
-
-# If f1 is BACKUP, stunnel won't be able to bind to the VIP
-paul@f1:~ % doas tail /var/log/messages | grep stunnel
-Jul  4 12:34:56 f1 stunnel: [!] bind: 192.168.1.138:2323: Can't assign requested address (49)
-
-
-

Verifying NFS Exports


-
-Check that the exports are active on both servers:
-
- -
# On f0
-paul@f0:~ % doas showmount -e localhost
-Exports list on localhost:
-/data/nfs                          127.0.0.1
-
-# On f1
-paul@f1:~ % doas showmount -e localhost
-Exports list on localhost:
-/data/nfs                          127.0.0.1
-
-
-

Client Configuration for Stunnel


-
-To mount NFS shares with stunnel encryption, clients need to install and configure stunnel with their client certificates.
-
-#### Preparing Client Certificates
-
-On f0, prepare the client certificate packages:
-
- -
# Create combined certificate/key files for each client
-paul@f0:~ % cd /usr/local/etc/stunnel/ca
-paul@f0:~ % doas sh -c 'for client in r0 r1 r2 earth; do
-  cat ${client}-cert.pem ${client}-key.pem > /tmp/${client}-stunnel.pem
-done'
-
-
-#### Configuring Rocky Linux Clients (r0, r1, r2)
-
- -
# Install stunnel on client (example for r0)
-[root@r0 ~]# dnf install -y stunnel
-
-# Copy client certificate and CA certificate from f0
-[root@r0 ~]# scp f0:/tmp/r0-stunnel.pem /etc/stunnel/
-[root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/ca-cert.pem /etc/stunnel/
-
-# Configure stunnel client with certificate authentication
-[root@r0 ~]# tee /etc/stunnel/stunnel.conf <<'EOF'
-cert = /etc/stunnel/r0-stunnel.pem
-CAfile = /etc/stunnel/ca-cert.pem
-client = yes
-verify = 2
-
-[nfs-ha]
-accept = 127.0.0.1:2323
-connect = 192.168.1.138:2323
-EOF
-
-# Enable and start stunnel
-[root@r0 ~]# systemctl enable --now stunnel
-
-# Repeat for r1 and r2 with their respective certificates
-
-
-Note: Each client must use its own certificate file (r0-stunnel.pem, r1-stunnel.pem, r2-stunnel.pem, or earth-stunnel.pem).
-
-

Testing NFS Mount with Stunnel


-
-Mount NFS through the stunnel encrypted tunnel:
-
- -
# Create mount point
-[root@r0 ~]# mkdir -p /data/nfs/k3svolumes
-
-# Mount through stunnel (using localhost and NFSv4)
-[root@r0 ~]# mount -t nfs4 -o port=2323 127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes
-
-# Verify mount
-[root@r0 ~]# mount | grep k3svolumes
-127.0.0.1:/data/nfs/k3svolumes on /data/nfs/k3svolumes type nfs4 (rw,relatime,vers=4.2,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,port=2323,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1)
-
-# For persistent mount, add to /etc/fstab:
-127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,_netdev 0 0
-
-
-Note: The mount uses localhost (127.0.0.1) because stunnel is listening locally and forwarding the encrypted traffic to the remote server.
-
-Verify the file was written and replicated:
-
- -
# Check on f0
-paul@f0:~ % cat /data/nfs/test-r0.txt
-Test from r0
-
-# After replication interval (5 minutes), check on f1
-paul@f1:~ % cat /data/nfs/test-r0.txt
-Test from r0
-
-
-

Important: Encryption Keys for Replicated Datasets


-
-When using encrypted ZFS datasets with raw sends (send -w), the replicated datasets on f1 need the encryption keys loaded to access the data:
-
- -
# Check encryption status on f1
-paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
-NAME                             PROPERTY   VALUE        SOURCE
-zdata/sink/f0/zdata/enc/nfsdata  keystatus  unavailable  -
-
-# Load the encryption key (uses the same key as f0)
-paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
-
-# Mount the dataset
-paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
-
-# Configure automatic key loading on boot
-paul@f1:~ % doas sysrc zfskeys_datasets="zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata"
-zfskeys_datasets:  -> zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata
-
-
-This ensures that after a reboot, f1 will automatically load the encryption keys and mount all encrypted datasets, including the replicated ones.
-
-

NFS Failover with CARP and Stunnel


-
-With NFS servers running on both f0 and f1 and stunnel bound to the CARP VIP:
-
-
    -
  • Automatic failover: When f0 fails, CARP automatically promotes f1 to MASTER
  • -
  • Stunnel failover: The carpcontrol.sh script automatically starts stunnel on the new MASTER
  • -
  • Client transparency: Clients always connect to 192.168.1.138:2323, which routes to the active server
  • -
  • No connection disruption: Existing NFS mounts continue working through the same VIP
  • -
  • Data consistency: ZFS replication ensures f1 has recent data (within 5-minute window)
  • -
  • Read-only replica: The replicated dataset on f1 is always mounted read-only to prevent breaking replication
  • -
  • Manual intervention required for full RW failover: When f1 becomes MASTER, you must:
  • -

- 2. Make the replicated dataset writable: doas zfs set readonly=off zdata/sink/f0/zdata/enc/nfsdata
- 3. Ensure encryption keys are loaded (should be automatic with zfskeys_enable)
- 4. NFS will automatically start serving read/write requests through the VIP
-
-Important: The /data/nfs mount on f1 remains read-only during normal operation to ensure replication integrity. In case of a failover, clients can still read data immediately, but write operations require the manual steps above to promote f1 to full read-write mode.
-
-

Testing CARP Failover


-
-To test the failover process:
-
- -
# On f0 (current MASTER) - trigger failover
-paul@f0:~ % doas ifconfig re0 vhid 1 state backup
-
-# On f1 - verify it becomes MASTER
-paul@f1:~ % ifconfig re0 | grep carp
-    inet 192.168.1.138 netmask 0xffffffff broadcast 192.168.1.138 vhid 1
-
-# Check stunnel is now listening on f1
-paul@f1:~ % doas sockstat -l | grep 2323
-stunnel  stunnel    4567  3  tcp4   192.168.1.138:2323    *:*
-
-# On client - verify NFS mount still works
-[root@r0 ~]# ls /data/nfs/k3svolumes/
-[root@r0 ~]# echo "Test after failover" > /data/nfs/k3svolumes/failover-test.txt
-
-
-

Handling Stale File Handles After Failover


-
-After a CARP failover, NFS clients may experience "Stale file handle" errors because they cached file handles from the previous server. To resolve this:
-
-Manual recovery (immediate fix):
- -
# Force unmount and remount
-[root@r0 ~]# umount -f /data/nfs/k3svolumes
-[root@r0 ~]# mount /data/nfs/k3svolumes
-
-
-Automatic recovery options:
-
-1. Use soft mounts with shorter timeouts in /etc/fstab:
-
-127.0.0.1:/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,_netdev,soft,timeo=10,retrans=2,intr 0 0
-
-
-2. Create an automatic recovery system using systemd timers (checks every 10 seconds):
-
-First, create the monitoring script:
- -
[root@r0 ~]# cat > /usr/local/bin/check-nfs-mount.sh << 'EOF'
-#!/bin/bash
-# Fast NFS mount health monitor - runs every 10 seconds via systemd timer
-
-MOUNT_POINT="/data/nfs/k3svolumes"
-LOCK_FILE="/var/run/nfs-mount-check.lock"
-STATE_FILE="/var/run/nfs-mount.state"
-
-# Use a lock file to prevent concurrent runs
-if [ -f "$LOCK_FILE" ]; then
-    exit 0
-fi
-touch "$LOCK_FILE"
-trap "rm -f $LOCK_FILE" EXIT
-
-# Quick check - try to stat a directory with very short timeout
-if timeout 2s stat "$MOUNT_POINT" >/dev/null 2>&1; then
-    # Mount appears healthy
-    if [ -f "$STATE_FILE" ]; then
-        # Was previously unhealthy, log recovery
-        echo "NFS mount recovered at $(date)" | systemd-cat -t nfs-monitor -p info
-        rm -f "$STATE_FILE"
-    fi
-    exit 0
-fi
-
-# Mount is unhealthy
-if [ ! -f "$STATE_FILE" ]; then
-    # First detection of unhealthy state
-    echo "NFS mount unhealthy detected at $(date)" | systemd-cat -t nfs-monitor -p warning
-    touch "$STATE_FILE"
-fi
-
-# Try to fix
-echo "Attempting to fix stale NFS mount at $(date)" | systemd-cat -t nfs-monitor -p notice
-umount -f "$MOUNT_POINT" 2>/dev/null
-sleep 1
-
-if mount "$MOUNT_POINT"; then
-    echo "NFS mount fixed at $(date)" | systemd-cat -t nfs-monitor -p info
-    rm -f "$STATE_FILE"
-else
-    echo "Failed to fix NFS mount at $(date)" | systemd-cat -t nfs-monitor -p err
-fi
-EOF
-[root@r0 ~]# chmod +x /usr/local/bin/check-nfs-mount.sh
-
-
-Create the systemd service:
- -
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.service << 'EOF'
-[Unit]
-Description=NFS Mount Health Monitor
-After=network-online.target
-
-[Service]
-Type=oneshot
-ExecStart=/usr/local/bin/check-nfs-mount.sh
-StandardOutput=journal
-StandardError=journal
-EOF
-
-
-Create the systemd timer (runs every 10 seconds):
- -
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.timer << 'EOF'
-[Unit]
-Description=Run NFS Mount Health Monitor every 10 seconds
-Requires=nfs-mount-monitor.service
-
-[Timer]
-OnBootSec=30s
-OnUnitActiveSec=10s
-AccuracySec=1s
-
-[Install]
-WantedBy=timers.target
-EOF
-
-
-Enable and start the timer:
- -
[root@r0 ~]# systemctl daemon-reload
-[root@r0 ~]# systemctl enable nfs-mount-monitor.timer
-[root@r0 ~]# systemctl start nfs-mount-monitor.timer
-
-# Check status
-[root@r0 ~]# systemctl status nfs-mount-monitor.timer
-● nfs-mount-monitor.timer - Run NFS Mount Health Monitor every 10 seconds
-     Loaded: loaded (/etc/systemd/system/nfs-mount-monitor.timer; enabled)
-     Active: active (waiting) since Sat 2025-07-06 10:00:00 EEST
-    Trigger: Sat 2025-07-06 10:00:10 EEST; 8s left
-
-# Monitor logs
-[root@r0 ~]# journalctl -u nfs-mount-monitor -f
-
-
-3. For Kubernetes, use liveness probes that restart pods when NFS becomes stale
-
-Note: Stale file handles are inherent to NFS failover because file handles are server-specific. The best approach depends on your application's tolerance for brief disruptions.
-
-

Complete Failover Test


-
-Here's a comprehensive test of the failover behavior with all optimizations in place:
-
- -
# 1. Check initial state
-paul@f0:~ % ifconfig re0 | grep carp
-    carp: MASTER vhid 1 advbase 1 advskew 0
-paul@f1:~ % ifconfig re0 | grep carp
-    carp: BACKUP vhid 1 advbase 1 advskew 0
-
-# 2. Create a test file from a client
-[root@r0 ~]# echo "test before failover" > /data/nfs/k3svolumes/test-before.txt
-
-# 3. Trigger failover (f0 → f1)
-paul@f0:~ % doas ifconfig re0 vhid 1 state backup
-
-# 4. Monitor client behavior
-[root@r0 ~]# ls /data/nfs/k3svolumes/
-ls: cannot access '/data/nfs/k3svolumes/': Stale file handle
-
-# 5. Check automatic recovery (within 10 seconds)
-[root@r0 ~]# journalctl -u nfs-mount-monitor -f
-Jul 06 10:15:32 r0 nfs-monitor[1234]: NFS mount unhealthy detected at Sun Jul 6 10:15:32 EEST 2025
-Jul 06 10:15:32 r0 nfs-monitor[1234]: Attempting to fix stale NFS mount at Sun Jul 6 10:15:32 EEST 2025
-Jul 06 10:15:33 r0 nfs-monitor[1234]: NFS mount fixed at Sun Jul 6 10:15:33 EEST 2025
-
-
-Failover Timeline:
-
    -
  • 0 seconds: CARP failover triggered
  • -
  • 0-2 seconds: Clients get "Stale file handle" errors (not hanging)
  • -
  • 3-10 seconds: Soft mounts ensure quick failure of operations
  • -
  • Within 10 seconds: Automatic recovery via systemd timer
  • -

-Benefits of the Optimized Setup:
-1. No hanging processes - Soft mounts fail quickly
-2. Clean failover - Old server stops serving immediately
-3. Fast automatic recovery - No manual intervention needed
-4. Predictable timing - Recovery within 10 seconds with systemd timer
-5. Better visibility - systemd journal provides detailed logs
-
-Important Considerations:
-
    -
  • Recent writes (within 5 minutes) may not be visible after failover due to replication lag
  • -
  • Applications should handle brief NFS errors gracefully
  • -
  • For zero-downtime requirements, consider synchronous replication or distributed storage
  • -

-

Verifying Replication Status


-
-To check if replication is working correctly:
-
- -
# Check replication status
-paul@f0:~ % doas `zrepl` status
-
-# Check recent snapshots on source
-paul@f0:~ % doas zfs list -t snapshot -o name,creation zdata/enc/nfsdata | tail -5
-
-# Check recent snapshots on destination
-paul@f1:~ % doas zfs list -t snapshot -o name,creation zdata/sink/f0/zdata/enc/nfsdata | tail -5
-
-# Verify data appears on f1 (should be read-only)
-paul@f1:~ % ls -la /data/nfs/k3svolumes/
-
-
-Important: If you see "connection refused" errors in zrepl logs, ensure:
-
    -
  • Both servers have zrepl running (doas service zrepl status)
  • -
  • No firewall or hosts.allow rules are blocking port 8888
  • -
  • WireGuard is up if using WireGuard IPs for replication
  • -

-

Post-Reboot Verification


-
-After rebooting the FreeBSD servers, verify the complete stack:
-
- -
# Check CARP status on all servers
-paul@f0:~ % ifconfig re0 | grep carp
-paul@f1:~ % ifconfig re0 | grep carp
-
-# Verify stunnel is running on the MASTER
-paul@f0:~ % doas sockstat -l | grep 2323
-
-# Check NFS is exported
-paul@f0:~ % doas showmount -e localhost
-
-# Verify all r servers have NFS mounted
-[root@r0 ~]# mount | grep nfs
-[root@r1 ~]# mount | grep nfs
-[root@r2 ~]# mount | grep nfs
-
-# Test write access
-[root@r0 ~]# echo "Test after reboot $(date)" > /data/nfs/k3svolumes/test-reboot.txt
-
-# Verify `zrepl` is running and replicating
-paul@f0:~ % doas service `zrepl` status
-paul@f1:~ % doas service `zrepl` status
-
-
-

Integration with Kubernetes


-
-In your Kubernetes manifests, you can now create PersistentVolumes using the NFS servers:
-
-
-apiVersion: v1
-kind: PersistentVolume
-metadata:
-  name: nfs-pv
-spec:
-  capacity:
-    storage: 100Gi
-  accessModes:
-    - ReadWriteMany
-  nfs:
-    server: 192.168.1.138  # f3s-storage-ha.lan (CARP virtual IP)
-    path: /data/nfs/k3svolumes
-  mountOptions:
-    - nfsvers=4
-    - tcp
-    - hard
-    - intr
-
-
-Using the CARP virtual IP (192.168.1.138) instead of direct server IPs ensures that Kubernetes workloads continue to access storage even if the primary NFS server fails. For encryption, configure stunnel on the Kubernetes nodes.
-
-

Security Benefits of Stunnel with Client Certificates


-
-Using stunnel with client certificate authentication for NFS encryption provides several advantages:
-
-
    -
  • Compatibility: Works with any NFS version and between different operating systems
  • -
  • Strong encryption: Uses TLS/SSL with configurable cipher suites
  • -
  • Transparent: Applications don't need modification, encryption happens at transport layer
  • -
  • Performance: Minimal overhead (~2% in benchmarks)
  • -
  • Flexibility: Can encrypt any TCP-based protocol, not just NFS
  • -
  • Strong Authentication: Client certificates provide cryptographic proof of identity
  • -
  • Access Control: Only clients with valid certificates signed by your CA can connect
  • -
  • Certificate Revocation: You can revoke access by removing certificates from the CA
  • -

-

Laptop/Workstation Access


-
-For development workstations like "earth" (laptop), the same stunnel configuration works, but there's an important caveat with NFSv4:
-
- -
# Install stunnel
-sudo dnf install stunnel
-
-# Configure stunnel (/etc/stunnel/stunnel.conf)
-cert = /etc/stunnel/earth-stunnel.pem
-CAfile = /etc/stunnel/ca-cert.pem
-client = yes
-verify = 2
-
-[nfs-ha]
-accept = 127.0.0.1:2323
-connect = 192.168.1.138:2323
-
-# Enable and start stunnel
-sudo systemctl enable --now stunnel
-
-# Mount NFS through stunnel
-sudo mount -t nfs4 -o port=2323 127.0.0.1:/ /data/nfs
-
-# Make persistent in /etc/fstab
-127.0.0.1:/ /data/nfs nfs4 port=2323,hard,intr,_netdev 0 0
-
-
-#### Important: NFSv4 and Stunnel on Newer Linux Clients
-
-On newer Linux distributions (like Fedora 42+), NFSv4 only uses the specified port for initial mount negotiation, but then establishes data connections directly to port 2049, bypassing stunnel. This doesn't occur on Rocky Linux 9 VMs, which properly route all traffic through the specified port.
-
-To ensure all NFS traffic goes through the encrypted tunnel on affected systems, you need to use iptables:
-
- -
# Redirect all NFS traffic to the CARP VIP through stunnel
-sudo iptables -t nat -A OUTPUT -d 192.168.1.138 -p tcp --dport 2049 -j DNAT --to-destination 127.0.0.1:2323
-
-# Make it persistent (example for Fedora)
-sudo dnf install iptables-services
-sudo service iptables save
-sudo systemctl enable iptables
-
-# Or create a startup script
-cat > ~/setup-nfs-stunnel.sh << 'EOF'
-#!/bin/bash
-# Ensure NFSv4 data connections go through stunnel
-sudo iptables -t nat -D OUTPUT -d 192.168.1.138 -p tcp --dport 2049 -j DNAT --to-destination 127.0.0.1:2323 2>/dev/null
-sudo iptables -t nat -A OUTPUT -d 192.168.1.138 -p tcp --dport 2049 -j DNAT --to-destination 127.0.0.1:2323
-EOF
-chmod +x ~/setup-nfs-stunnel.sh
-
-
-To verify all traffic is encrypted:
- -
# Check active connections
-sudo ss -tnp | grep -E ":2049|:2323"
-# You should see connections to localhost:2323 (stunnel), not direct to the CARP VIP
-
-# Monitor stunnel logs
-journalctl -u stunnel -f
-# You should see connection logs for all NFS operations
-
-
-Note: The laptop has full access to /data/nfs with the -alldirs export option, while Kubernetes nodes are restricted to /data/nfs/k3svolumes.
-
-The client certificate requirement ensures that:
-
    -
  • Only authorized clients (r0, r1, r2, and earth) can establish stunnel connections
  • -
  • Each client has a unique identity that can be individually managed
  • -
  • Stolen IP addresses alone cannot grant access without the corresponding certificate
  • -
  • Access can be revoked without changing the server configuration
  • -

-The combination of ZFS encryption at rest and stunnel in transit ensures data is protected throughout its lifecycle.
-
-This configuration provides a solid foundation for shared storage in the f3s Kubernetes cluster, with automatic replication and encrypted transport.
-
-

Mounting NFS on Rocky Linux 9


-
-

Installing and Configuring NFS Clients on r0, r1, and r2


-
-First, install the necessary packages on all three Rocky Linux nodes:
-
- -
# On r0, r1, and r2
-dnf install -y nfs-utils stunnel
-
-
-

Configuring Stunnel Client on All Nodes


-
-Copy the certificate and configure stunnel on each Rocky Linux node:
-
- -
# On r0
-scp f0:/usr/local/etc/stunnel/stunnel.pem /etc/stunnel/
-tee /etc/stunnel/stunnel.conf <<'EOF'
-cert = /etc/stunnel/stunnel.pem
-client = yes
-
-[nfs-ha]
-accept = 127.0.0.1:2323
-connect = 192.168.1.138:2323
-EOF
-
-systemctl enable --now stunnel
-
-# Repeat the same configuration on r1 and r2
-
-
-

Setting Up NFS Mounts


-
-Create mount points and configure persistent mounts on all nodes:
-
- -
# On r0, r1, and r2
-mkdir -p /data/nfs/k3svolumes
-
-# Add to /etc/fstab for persistent mount (note the NFSv4 relative path)
-echo '127.0.0.1:/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,hard,intr,_netdev 0 0' >> /etc/fstab
-
-# Mount the share
-mount /data/nfs/k3svolumes
-
-
-

Comprehensive NFS Mount Testing


-
-Here's a detailed test plan to verify NFS mounts are working correctly on all nodes:
-
-#### Test 1: Verify Mount Status on All Nodes
-
- -
# On r0
-[root@r0 ~]# mount | grep k3svolumes
-# Expected output:
-# 127.0.0.1:/data/nfs/k3svolumes on /data/nfs/k3svolumes type nfs4 (rw,relatime,vers=4.2,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,port=2323,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1)
-
-# On r1
-[root@r1 ~]# mount | grep k3svolumes
-# Should show similar output
-
-# On r2
-[root@r2 ~]# mount | grep k3svolumes
-# Should show similar output
-
-
-#### Test 2: Verify Stunnel Connectivity
-
- -
# On r0
-[root@r0 ~]# systemctl status stunnel
-# Should show: Active: active (running)
-
-[root@r0 ~]# ss -tnl | grep 2323
-# Should show: LISTEN 0 128 127.0.0.1:2323 0.0.0.0:*
-
-# Test connection to CARP VIP
-[root@r0 ~]# nc -zv 192.168.1.138 2323
-# Should show: Connection to 192.168.1.138 2323 port [tcp/*] succeeded!
-
-# Repeat on r1 and r2
-
-
-#### Test 3: File Creation and Visibility Test
-
- -
# On r0 - Create test file
-[root@r0 ~]# echo "Test from r0 - $(date)" > /data/nfs/k3svolumes/test-r0.txt
-[root@r0 ~]# ls -la /data/nfs/k3svolumes/test-r0.txt
-# Should show the file with timestamp
-
-# On r1 - Create test file and check r0's file
-[root@r1 ~]# echo "Test from r1 - $(date)" > /data/nfs/k3svolumes/test-r1.txt
-[root@r1 ~]# ls -la /data/nfs/k3svolumes/
-# Should show both test-r0.txt and test-r1.txt
-
-# On r2 - Create test file and check all files
-[root@r2 ~]# echo "Test from r2 - $(date)" > /data/nfs/k3svolumes/test-r2.txt
-[root@r2 ~]# ls -la /data/nfs/k3svolumes/
-# Should show all three files: test-r0.txt, test-r1.txt, test-r2.txt
-
-
-#### Test 4: Verify Files on Storage Servers
-
- -
# On f0 (primary storage)
-paul@f0:~ % ls -la /data/nfs/k3svolumes/
-# Should show all three test files
-
-# Wait 5 minutes for replication, then check on f1
-paul@f1:~ % ls -la /data/nfs/k3svolumes/
-# Should show all three test files (after replication)
-
-
-#### Test 5: Performance and Concurrent Access Test
-
- -
# On r0 - Write large file
-[root@r0 ~]# dd if=/dev/zero of=/data/nfs/k3svolumes/test-large-r0.dat bs=1M count=100
-# Should complete without errors
-
-# On r1 - Read the file while r2 writes
-[root@r1 ~]# dd if=/data/nfs/k3svolumes/test-large-r0.dat of=/dev/null bs=1M &
-# Simultaneously on r2
-[root@r2 ~]# dd if=/dev/zero of=/data/nfs/k3svolumes/test-large-r2.dat bs=1M count=100
-
-# Check for any errors or performance issues
-
-
-#### Test 6: Directory Operations Test
-
- -
# On r0 - Create directory structure
-[root@r0 ~]# mkdir -p /data/nfs/k3svolumes/test-dir/subdir1/subdir2
-[root@r0 ~]# echo "Deep file" > /data/nfs/k3svolumes/test-dir/subdir1/subdir2/deep.txt
-
-# On r1 - Verify and add files
-[root@r1 ~]# ls -la /data/nfs/k3svolumes/test-dir/subdir1/subdir2/
-[root@r1 ~]# echo "Another file from r1" > /data/nfs/k3svolumes/test-dir/subdir1/file-r1.txt
-
-# On r2 - Verify complete structure
-[root@r2 ~]# find /data/nfs/k3svolumes/test-dir -type f
-# Should show both files
-
-
-#### Test 7: Permission and Ownership Test
-
- -
# On r0 - Create files with different permissions
-[root@r0 ~]# touch /data/nfs/k3svolumes/test-perms-644.txt
-[root@r0 ~]# chmod 644 /data/nfs/k3svolumes/test-perms-644.txt
-[root@r0 ~]# touch /data/nfs/k3svolumes/test-perms-755.txt
-[root@r0 ~]# chmod 755 /data/nfs/k3svolumes/test-perms-755.txt
-
-# On r1 and r2 - Verify permissions are preserved
-[root@r1 ~]# ls -l /data/nfs/k3svolumes/test-perms-*.txt
-[root@r2 ~]# ls -l /data/nfs/k3svolumes/test-perms-*.txt
-# Permissions should match what was set on r0
-
-
-#### Test 8: Failover Test (Optional but Recommended)
-
- -
# On f0 - Trigger CARP failover
-paul@f0:~ % doas ifconfig re0 vhid 1 state backup
-
-# On all Rocky nodes - Verify mounts still work
-[root@r0 ~]# echo "Test during failover from r0 - $(date)" > /data/nfs/k3svolumes/failover-test-r0.txt
-[root@r1 ~]# echo "Test during failover from r1 - $(date)" > /data/nfs/k3svolumes/failover-test-r1.txt
-[root@r2 ~]# echo "Test during failover from r2 - $(date)" > /data/nfs/k3svolumes/failover-test-r2.txt
-
-# Verify all files are accessible
-[root@r0 ~]# ls -la /data/nfs/k3svolumes/failover-test-*.txt
-
-# On f1 - Verify it's now MASTER
-paul@f1:~ % ifconfig re0 | grep carp
-# Should show the VIP 192.168.1.138
-
-# Restore f0 as MASTER
-paul@f0:~ % doas ifconfig re0 vhid 1 state master
-
-
-

Troubleshooting Common Issues


-
-#### Mount Hangs or Times Out
-
- -
# Check stunnel connectivity
-systemctl status stunnel
-ss -tnl | grep 2323
-telnet 127.0.0.1 2323
-
-# Check if you can reach the CARP VIP
-ping 192.168.1.138
-nc -zv 192.168.1.138 2323
-
-# Check for firewall issues
-iptables -L -n | grep 2323
-
-
-#### Permission Denied Errors
-
- -
# Verify the export allows your IP
-# On f0 or f1
-doas showmount -e localhost
-
-# Check if SELinux is blocking (on Rocky Linux)
-getenforce
-# If enforcing, try:
-setenforce 0  # Temporary for testing
-# Or add proper SELinux context:
-setsebool -P use_nfs_home_dirs 1
-
-
-#### Files Not Visible Across Nodes
-
- -
# Force NFS cache refresh
-# On the affected node
-umount /data/nfs/k3svolumes
-mount /data/nfs/k3svolumes
-
-# Check NFS version
-nfsstat -m
-# Should show NFSv4
-
-
-#### I/O Errors When Accessing NFS Mount
-
-I/O errors can have several causes:
-
-1. Missing localhost in exports (most common with stunnel):
- - Since stunnel proxies connections, the NFS server sees requests from 127.0.0.1
- - Ensure your exports include localhost access:
- ```
- /data/nfs/k3svolumes -maproot=root -network 127.0.0.1 -mask 255.255.255.255
- ```
-
-2. Stunnel connection issues or CARP failover:
-
- -
# On the affected node (e.g., r0)
-# Check stunnel is running
-systemctl status stunnel
-
-# Restart stunnel to re-establish connection
-systemctl restart stunnel
-
-# Force remount
-umount -f -l /data/nfs/k3svolumes
-mount -t nfs4 -o port=2323,hard,intr 127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes
-
-# Check which FreeBSD host is CARP MASTER
-# On f0
-ssh f0 "ifconfig re0 | grep carp"
-# On f1
-ssh f1 "ifconfig re0 | grep carp"
-
-# Verify stunnel on MASTER is bound to VIP
-# On the MASTER host
-ssh <master-host> "sockstat -l | grep 2323"
-
-# Debug stunnel connection
-openssl s_client -connect 192.168.1.138:2323 </dev/null
-
-# If persistent I/O errors, check logs
-journalctl -u stunnel -n 50
-dmesg | tail -20 | grep -i nfs
-
-
-

Comprehensive Production Test Results


-
-After implementing all the improvements (enhanced CARP control script, soft mounts, and automatic recovery), here's a complete test of the setup including reboots and failovers:
-
-#### Test Scenario: Full System Reboot and Failover
-
-
-1. Initial state: Rebooted all servers (f0, f1, f2)
-   - Result: f1 became CARP MASTER after reboot (not always f0)
-   - NFS accessible and writable from all clients
-   
-2. Created test file from laptop:
-   paul@earth:~ % echo "Post-reboot test at $(date)" > /data/nfs/k3svolumes/reboot-test.txt
-   
-3. Verified 1-minute replication to f1:
-   - File appeared on f1 within 70 seconds
-   - Content identical on both servers
-   
-4. Performed failover from f0 to f1:
-   paul@f0:~ % doas ifconfig re0 vhid 1 state backup
-   - f1 immediately became MASTER
-   - Clients experienced "Stale file handle" errors
-   - With soft mounts: No hanging, immediate error response
-   
-5. Recovery time:
-   - Manual recovery: Immediate with umount/mount
-   - Automatic recovery: Within 10 seconds via systemd timer
-   - No data loss during failover
-   
-6. Failback to f0:
-   paul@f1:~ % doas ifconfig re0 vhid 1 state backup
-   - f0 reclaimed MASTER status
-   - Similar stale handle behavior
-   - Recovery within 10 seconds
-
-
-#### Key Findings
-
-1. CARP Master Selection: After reboot, either f0 or f1 can become MASTER. This is normal CARP behavior and doesn't affect functionality.
-
-2. Stale File Handles: Despite all optimizations, NFS clients still experience stale file handles during failover. This is inherent to NFS protocol design. However:
- - Soft mounts prevent hanging
- - Automatic recovery works reliably
- - No data loss occurs
-
-3. Replication Timing: The 1-minute replication interval for NFS data ensures minimal data loss window during unplanned failovers. The Fedora VM replication runs every 10 minutes, which is sufficient for less critical VM data.
-
-4. Service Management: The enhanced carpcontrol.sh script successfully stops services on BACKUP nodes, preventing split-brain scenarios.
-
-

Performance Considerations


-
-

Encryption Overhead


-
-Stunnel adds CPU overhead for TLS encryption/decryption. On modern hardware, the impact is minimal:
-
-
    -
  • Beelink Mini PCs: With hardware AES acceleration, expect 5-10% CPU overhead
  • -
  • Network throughput: Gigabit Ethernet is usually the bottleneck, not TLS
  • -
  • Latency: Adds <1ms in LAN environments
  • -

-For reference, with AES-256-GCM on a typical mini PC:
-
    -
  • Sequential reads: ~110 MB/s (near line-speed for gigabit)
  • -
  • Sequential writes: ~105 MB/s
  • -
  • Random 4K IOPS: ~15% reduction compared to unencrypted
  • -

-

Replication Bandwidth


-
-ZFS replication with zrepl is efficient, only sending changed blocks:
-
-
    -
  • Initial sync: Full dataset size (can be large)
  • -
  • Incremental: Typically <1% of dataset size per snapshot
  • -
  • Network usage: With 1-minute intervals and moderate changes, expect 10-50 MB/minute
  • -

-To monitor replication bandwidth:
- -
# On f0, check network usage on WireGuard interface
-doas systat -ifstat 1
-# Look for wg0 traffic during replication
-
-
-

NFS Tuning


-
-For optimal performance with Kubernetes workloads:
-
- -
# On NFS server (f0/f1) - /etc/sysctl.conf
-vfs.nfsd.async=1                    # Enable async writes (careful with data integrity)
-vfs.nfsd.cachetcp=1                 # Cache TCP connections
-vfs.nfsd.tcphighwater=64            # Increase TCP connection limit
-
-# On NFS clients - mount options
-rsize=131072,wsize=131072           # Larger read/write buffers
-hard,intr                           # Hard mount with interruption
-vers=4.2                            # Use latest NFSv4.2 for best performance
-
-
-

ZFS Tuning


-
-Key ZFS settings for NFS storage:
-
- -
# Set on the NFS dataset
-zfs set compression=lz4 zdata/enc/nfsdata              # Fast compression
-zfs set atime=off zdata/enc/nfsdata                    # Disable access time updates
-zfs set redundant_metadata=most zdata/enc/nfsdata      # Protect metadata
-
-
-

Monitoring


-
-Monitor system performance to identify bottlenecks:
-
- -
# CPU and memory
-doas top -P
-
-# Disk I/O
-doas gstat -p
-
-# Network traffic
-doas netstat -w 1 -h
-
-# ZFS statistics
-doas zpool iostat -v 1
-
-# NFS statistics
-doas nfsstat -s -w 1
-
-
-

Cleanup After Testing


-
- -
# Remove test files (run on any node)
-rm -f /data/nfs/k3svolumes/test-*.txt
-rm -f /data/nfs/k3svolumes/test-large-*.dat
-rm -f /data/nfs/k3svolumes/failover-test-*.txt
-rm -f /data/nfs/k3svolumes/test-perms-*.txt
-rm -rf /data/nfs/k3svolumes/test-dir
-
-
-This comprehensive testing ensures that:
-
    -
  • All nodes can mount the NFS share
  • -
  • Files created on one node are visible on all others
  • -
  • The encrypted stunnel connection is working
  • -
  • Permissions and ownership are preserved
  • -
  • The setup can handle concurrent access
  • -
  • Failover works correctly (if tested)
  • -

-

Conclusion


-
-We've built a robust, encrypted storage system for our FreeBSD-based Kubernetes cluster that provides:
-
-

What We Achieved


-
-
    -
  • High Availability: CARP ensures the storage VIP moves automatically during failures
  • -
  • Data Protection: ZFS encryption protects data at rest, stunnel protects data in transit
  • -
  • Continuous Replication: 1-minute RPO for critical data, automated via zrepl
  • -
  • Secure Access: Client certificate authentication prevents unauthorized access
  • -
  • Kubernetes Integration: Shared storage accessible from all cluster nodes
  • -

-

Architecture Benefits


-
-This design prioritizes data integrity over pure availability:
-
    -
  • Manual failover prevents split-brain scenarios
  • -
  • Certificate-based authentication provides strong security
  • -
  • Encrypted replication protects data even over untrusted networks
  • -
  • ZFS snapshots enable point-in-time recovery
  • -

-

Lessons Learned


-
-1. Stunnel vs Native NFS/TLS: While native encryption would be ideal, stunnel provides better cross-platform compatibility
-2. Manual vs Automatic Failover: For storage systems, controlled failover often prevents more problems than it causes
-3. Replication Frequency: Balance between data protection (RPO) and system load
-4. Client Compatibility: Different NFS implementations behave differently - test thoroughly
-
-

Next Steps


-
-With reliable storage in place, we can now:
-
    -
  • Deploy stateful applications on Kubernetes
  • -
  • Set up databases with persistent volumes
  • -
  • Create shared configuration stores
  • -
  • Implement backup strategies using ZFS snapshots
  • -

-The storage layer is the foundation for any serious Kubernetes deployment. By building it on FreeBSD with ZFS, CARP, and stunnel, we get enterprise-grade features on commodity hardware.
-
-

References


-
-
    -
  • FreeBSD CARP documentation: https://docs.freebsd.org/en/books/handbook/advanced-networking/#carp
  • -
  • ZFS encryption guide: https://docs.freebsd.org/en/books/handbook/zfs/#zfs-encryption
  • -
  • Stunnel documentation: https://www.stunnel.org/docs.html
  • -
  • zrepl documentation: https://zrepl.github.io/
  • -

-Other *BSD-related posts:
-
-2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
-2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
-2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
-2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
-2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
-2024-04-01 KISS high-availability with OpenBSD
-2024-01-13 One reason why I love OpenBSD
-2022-10-30 Installing DTail on OpenBSD
-2022-07-30 Let's Encrypt with OpenBSD and Rex
-2016-04-09 Jails and ZFS with Puppet on FreeBSD
-
-E-Mail your comments to paul@nospam.buetow.org
-
-Back to the main site
- - - diff --git a/gemfeed/atom.xml b/gemfeed/atom.xml index 70ee5c08..cda6264c 100644 --- a/gemfeed/atom.xml +++ b/gemfeed/atom.xml @@ -1,11 +1,1830 @@ - 2025-07-12T22:45:27+03:00 + 2025-07-13T16:45:56+03:00 foo.zone feed To be in the .zone! https://foo.zone/ + + f3s: Kubernetes with FreeBSD - Part 6: Storage + + https://foo.zone/gemfeed/2025-07-14-f3s-kubernetes-with-freebsd-part-6.html + 2025-07-13T16:44:29+03:00 + + Paul Buetow aka snonux + paul@dev.buetow.org + + This is the sixth blog post about the f3s series for self-hosting demands in a home lab. f3s? The 'f' stands for FreeBSD, and the '3s' stands for k3s, the Kubernetes distribution used on FreeBSD-based physical machines. + +
+

f3s: Kubernetes with FreeBSD - Part 6: Storage


+
+Published at 2025-07-13T16:44:29+03:00
+
+This is the sixth blog post about the f3s series for self-hosting demands in a home lab. f3s? The "f" stands for FreeBSD, and the "3s" stands for k3s, the Kubernetes distribution used on FreeBSD-based physical machines.
+
+2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
+2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
+2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
+2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
+2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage (You are currently reading this)
+
+f3s logo
+
+

Table of Contents


+
+
+

Introduction


+
+In the previous posts, we set up a FreeBSD-based Kubernetes cluster using k3s. While the base system works well, Kubernetes workloads often require persistent storage for databases, configuration files, and application data. Local storage on each node has significant limitations:
+
+
    +
  • No data sharing: Pods (once we run Kubernetes) on different nodes can't access the same data
  • +
  • Pod mobility: If a pod moves to another node, it loses access to its data
  • +
  • No redundancy: Hardware failure means data loss
  • +

+This post implements a robust storage solution using:
+
+
    +
  • CARP: For high availability with automatic IP failover
  • +
  • NFS over stunnel: For secure, encrypted network storage
  • +
  • ZFS: For data integrity, encryption, and efficient snapshots
  • +
  • zrepl: For continuous ZFS replication between nodes
  • +

+The result is a highly available, encrypted storage system that survives node failures while providing shared storage to all Kubernetes pods.
+
+Other than what was mentioned in the first post of this blog series, we aren't using HAST, but zrepl for data replication. Read more about it later in this blog post.
+
+

Additional storage capacity


+
+We add 1 TB of additional storage to each of the nodes (f0, f1, f2) in the form of an SSD drive. The Beelink mini PCs have enough space in the chassis for the extra space.
+
+
+
+Upgrading the storage was as easy as unscrewing, plugging the drive in, and then screwing it back together again. The procedure was uneventful! We're using two different SSD models (Samsung 870 EVO and Crucial BX500) to avoid simultaneous failures from the same manufacturing batch.
+
+We then create the zdata ZFS pool on all three nodes:
+
+ +
paul@f0:~ % doas zpool create -m /data zdata /dev/ada1
+paul@f0:~ % zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G  12.1M   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  29.0G   443G        -         -     0%     6%  1.00x    ONLINE  -
+
+paul@f0:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
+paul@f0:/ %
+
+
+To verify that we have a different SSD on the second node (the third node has the same drive as the first):
+
+ +
paul@f1:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<CT1000BX500SSD1 M6CR072>          at scbus1 target 0 lun 0 (pass1,ada1)
+
+
+

ZFS encryption keys


+
+ZFS native encryption requires encryption keys to unlock datasets. We need a secure method to store these keys that balances security with operational needs:
+
+
    +
  • Security: Keys must not be stored on the same disks they encrypt
  • +
  • Availability: Keys must be available at boot for automatic mounting
  • +
  • Portability: Keys should be easily moved between systems for recovery
  • +

+Using USB flash drives as hardware key storage provides a convenient and elegant solution. The encrypted data is unreadable without physical access to the USB key, protecting against disk theft or improper disposal. In production environments, you may use enterprise key management systems; however, for a home lab, USB keys offer good security with minimal complexity.
+
+

UFS on USB keys


+
+We'll format the USB drives with UFS (Unix File System) rather than ZFS for simplicity. There is no need to use ZFS.
+
+Let's see the USB keys:
+
+USB keys
+
+To verify that the USB key (flash disk) is there:
+
+
+paul@f0:/ % doas camcontrol devlist
+<512GB SSD D910R170>               at scbus0 target 0 lun 0 (pass0,ada0)
+<Samsung SSD 870 EVO 1TB SVT03B6Q>  at scbus1 target 0 lun 0 (pass1,ada1)
+<Generic Flash Disk 8.07>          at scbus2 target 0 lun 0 (da0,pass2)
+paul@f0:/ %
+
+
+Let's create the UFS file system and mount it (done on all three nodes f0, f1 and f2):
+
+ +
paul@f0:/ % doas newfs /dev/da0
+/dev/da0: 15000.0MB (30720000 sectors) block size 32768, fragment size 4096
+        using 24 cylinder groups of 625.22MB, 20007 blks, 80128 inodes.
+        with soft updates
+super-block backups (for fsck_ffs -b #) at:
+ 192, 1280640, 2561088, 3841536, 5121984, 6402432, 7682880, 8963328, 10243776,
+11524224, 12804672, 14085120, 15365568, 16646016, 17926464, 19206912,k 20487360,
+...
+
+paul@f0:/ % echo '/dev/da0 /keys ufs rw 0 2' | doas tee -a /etc/fstab
+/dev/da0 /keys ufs rw 0 2
+paul@f0:/ % doas mkdir /keys
+paul@f0:/ % doas mount /keys
+paul@f0:/ % df | grep keys
+/dev/da0             14877596       8  13687384     0%    /keys
+
+
+USB keys stuck in
+
+

Generating encryption keys


+
+The following keys will later be used to encrypt the ZFS file systems. They will be stored on all three nodes, serving as a backup in case one of the keys is lost or corrupted. When we later replicate encrypted ZFS volumes from one node to another, the keys must also be available on the destination node.
+
+
+paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:bhyve.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:zdata.key 32
+paul@f0:/keys % doas chown root *
+paul@f0:/keys % doas chmod 400 *
+
+paul@f0:/keys % ls -l
+total 20
+*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:bhyve.key
+*r--------  1 root wheel 32 May 25 13:07 f0.lan.buetow.org:zdata.key
+*r--------  1 root wheel 32 May 25 13:07 f1.lan.buetow.org:zdata.key
+*r--------  1 root wheel 32 May 25 13:07 f2.lan.buetow.org:zdata.key
+
+
+After creation, these are copied to the other two nodes, f1 and f2, into the /keys partition (I won't provide the commands here; create a tarball, copy it over, and extract it on the destination nodes).
+
+

Configuring zdata ZFS pool encryption


+
+Let's encrypt our zdata ZFS pool. We are not encrypting the whole pool, but everything within the zdata/enc data set:
+
+ +
paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \
+  keylocation=file:///keys/`hostname`:zdata.key zdata/enc
+paul@f0:/ % zfs list | grep zdata
+zdata                                          836K   899G    96K  /data
+zdata/enc                                      200K   899G   200K  /data/enc
+
+paul@f0:/keys % zfs get all zdata/enc | grep -E -i '(encryption|key)'
+zdata/enc  encryption            aes-256-gcm                               -
+zdata/enc  keylocation           file:///keys/f0.lan.buetow.org:zdata.key  local
+zdata/enc  keyformat             raw                                       -
+zdata/enc  encryptionroot        zdata/enc                                 -
+zdata/enc  keystatus             available                                 -
+
+
+All future data sets within zdata/enc will inherit the same encryption key.
+
+

Migrating Bhyve VMs to an encrypted bhyve ZFS volume


+
+We set up Bhyve VMs in a previous blog post. Their ZFS data sets rely on zroot, which is the default ZFS pool on the internal 512GB NVME drive. They aren't encrypted yet, so we encrypt the VM data sets as well now. To do so, we first shut down the VMs on all three nodes:
+
+ +
paul@f0:/keys % doas vm stop rocky
+Sending ACPI shutdown to rocky
+
+paul@f0:/keys % doas vm list
+NAME     DATASTORE  LOADER     CPU  MEMORY  VNC  AUTO     STATE
+rocky    default    uefi       4    14G     -    Yes [1]  Stopped
+
+
+After this, we rename the unencrypted data set to _old, create a new encrypted data set, and also snapshot it as @hamburger.
+
+ +
paul@f0:/keys % doas zfs rename zroot/bhyve zroot/bhyve_old
+paul@f0:/keys % doas zfs set mountpoint=/mnt zroot/bhyve_old
+paul@f0:/keys % doas zfs snapshot zroot/bhyve_old/rocky@hamburger
+
+paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \
+  keylocation=file:///keys/`hostname`:bhyve.key zroot/bhyve
+paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve zroot/bhyve
+paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve/rocky zroot/bhyve/rocky
+
+
+Once done, we import the snapshot into the encrypted dataset and also copy some other metadata files from vm-bhyve back over.
+
+
+paul@f0:/keys % doas zfs send zroot/bhyve_old/rocky@hamburger | \
+  doas zfs recv zroot/bhyve/rocky
+paul@f0:/keys % doas cp -Rp /mnt/.config /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.img /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.templates /zroot/bhyve/
+paul@f0:/keys % doas cp -Rp /mnt/.iso /zroot/bhyve/
+
+
+We also have to make encrypted ZFS data sets mount automatically on boot:
+
+ +
paul@f0:/keys % doas sysrc zfskeys_enable=YES
+zfskeys_enable:  -> YES
+paul@f0:/keys % doas vm init
+paul@f0:/keys % doas reboot
+.
+.
+.
+paul@f0:~ % doas vm list
+paul@f0:~ % doas vm list
+NAME     DATASTORE  LOADER     CPU  MEMORY  VNC           AUTO     STATE
+rocky    default    uefi       4    14G     0.0.0.0:5900  Yes [1]  Running (2265)
+
+
+As you can see, the VM is running. This means the encrypted zroot/bhyve was mounted successfully after the reboot! Now we can destroy the old, unencrypted, and now unused bhyve dataset:
+
+ +
paul@f0:~ % doas zfs destroy -R zroot/bhyve_old
+
+
+To verify once again that zroot/bhyve and zroot/bhyve/rocky are now both encrypted, we run:
+
+ +
paul@f0:~ % zfs get all zroot/bhyve | grep -E '(encryption|key)'
+zroot/bhyve  encryption            aes-256-gcm                               -
+zroot/bhyve  keylocation           file:///keys/f0.lan.buetow.org:bhyve.key  local
+zroot/bhyve  keyformat             raw                                       -
+zroot/bhyve  encryptionroot        zroot/bhyve                               -
+zroot/bhyve  keystatus             available                                 -
+
+paul@f0:~ % zfs get all zroot/bhyve/rocky | grep -E '(encryption|key)'
+zroot/bhyve/rocky  encryption            aes-256-gcm            -
+zroot/bhyve/rocky  keylocation           none                   default
+zroot/bhyve/rocky  keyformat             raw                    -
+zroot/bhyve/rocky  encryptionroot        zroot/bhyve            -
+zroot/bhyve/rocky  keystatus             available              -
+
+
+

ZFS Replication with zrepl


+
+Data replication is the cornerstone of high availability. While CARP handles IP failover (see later in this post), we need continuous data replication to ensure the backup server has current data when it becomes active. Without replication, failover would result in data loss or require shared storage (like iSCSI), which introduces a single point of failure.
+
+

Understanding Replication Requirements


+
+Our storage system has different replication needs:
+
+
    +
  • NFS data (/data/nfs/k3svolumes): Soon, it will contain active Kubernetes persistent volumes. Needs frequent replication (every minute) to minimise data loss during failover.
  • +
  • VM data (/zroot/bhyve/fedora): Contains VM images that change less frequently. Can tolerate longer replication intervals (every 10 minutes).
  • +

+The 1-minute replication window is perfectly acceptable for my personal use cases. This isn't a high-frequency trading system or a real-time database—it's storage for personal projects, development work, and home lab experiments. Losing at most 1 minute of work in a disaster scenario is a reasonable trade-off for the reliability and simplicity of snapshot-based replication. Additionally, in the case of a "1 minute of data loss," I would likely still have the data available on the client side.
+
+Why use zrepl instead of HAST? While HAST (Highly Available Storage) is FreeBSD's native solution for high-availability storage and supports synchronous replication—thus eliminating the mentioned 1-minute window—I've chosen zrepl for several important reasons:
+
+
    +
  • HAST can cause ZFS corruption: HAST operates at the block level and doesn't understand ZFS's transactional semantics. During failover, in-flight transactions can lead to corrupted zpools. I've experienced this firsthand (I am confident I have configured something wrong) - the automatic failover would trigger while ZFS was still writing, resulting in an unmountable pool.
  • +
  • ZFS-aware replication: zrepl understands ZFS datasets and snapshots. It replicates at the dataset level, ensuring each snapshot is a consistent point-in-time copy. This is fundamentally safer than block-level replication.
  • +
  • Snapshot history: With zrepl, you get multiple recovery points (every minute for NFS data in our setup). If corruption occurs, you can roll back to any previous snapshot. HAST only gives you the current state.
  • +
  • Easier recovery: When something goes wrong with zrepl, you still have intact snapshots on both sides. With HAST, a corrupted primary often means a corrupted secondary as well.
  • +

+FreeBSD HAST
+
+

Installing zrepl


+
+First, install zrepl on both hosts involved (we will replicate data from f0 to f1):
+
+ +
paul@f0:~ % doas pkg install -y zrepl
+
+
+Then, we verify the pools and datasets on both hosts:
+
+ +
# On f0
+paul@f0:~ % doas zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G  1.03M   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  26.7G   445G        -         -     0%     5%  1.00x    ONLINE  -
+
+paul@f0:~ % doas zfs list -r zdata/enc
+NAME        USED  AVAIL  REFER  MOUNTPOINT
+zdata/enc   200K   899G   200K  /data/enc
+
+# On f1
+paul@f1:~ % doas zpool list
+NAME    SIZE  ALLOC   FREE  CKPOINT  EXPANDSZ   FRAG    CAP  DEDUP    HEALTH  ALTROOT
+zdata   928G   956K   928G        -         -     0%     0%  1.00x    ONLINE  -
+zroot   472G  11.7G   460G        -         -     0%     2%  1.00x    ONLINE  -
+
+paul@f1:~ % doas zfs list -r zdata/enc
+NAME        USED  AVAIL  REFER  MOUNTPOINT
+zdata/enc   200K   899G   200K  /data/enc
+
+
+Since we have a WireGuard tunnel between f0 and f1, we'll use TCP transport over the secure tunnel instead of SSH. First, check the WireGuard IP addresses:
+
+ +
# Check WireGuard interface IPs
+paul@f0:~ % ifconfig wg0 | grep inet
+	inet 192.168.2.130 netmask 0xffffff00
+
+paul@f1:~ % ifconfig wg0 | grep inet
+	inet 192.168.2.131 netmask 0xffffff00
+
+
+Let's create a dedicated dataset for NFS data that will be replicated:
+
+ +
# Create the nfsdata dataset that will hold all data exposed via NFS
+paul@f0:~ % doas zfs create zdata/enc/nfsdata
+
+
+Afterwards, we create the zrepl configuration on f0:
+
+ +
paul@f0:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
+global:
+  logging:
+    - type: stdout
+      level: info
+      format: human
+
+jobs:
+  - name: f0_to_f1_nfsdata
+    type: push
+    connect:
+      type: tcp
+      address: "192.168.2.131:8888"
+    filesystems:
+      "zdata/enc/nfsdata": true
+    send:
+      encrypted: true
+    snapshotting:
+      type: periodic
+      prefix: zrepl_
+      interval: 1m
+    pruning:
+      keep_sender:
+        - type: last_n
+          count: 10
+      keep_receiver:
+        - type: last_n
+          count: 10
+
+  - name: f0_to_f1_fedora
+    type: push
+    connect:
+      type: tcp
+      address: "192.168.2.131:8888"
+    filesystems:
+      "zroot/bhyve/fedora": true
+    send:
+      encrypted: true
+    snapshotting:
+      type: periodic
+      prefix: zrepl_
+      interval: 10m
+    pruning:
+      keep_sender:
+        - type: last_n
+          count: 10
+      keep_receiver:
+        - type: last_n
+          count: 10
+EOF
+
+
+ We're using two separate replication jobs with different intervals:
+
+
    +
  • f0_to_f1_nfsdata: Replicates NFS data every minute for faster failover recovery
  • +
  • f0_to_f1_fedora: Replicates Fedora VM every ten minutes (less critical)
  • +

+The Fedora VM is only used for development purposes, so it doesn't require as frequent replication as the NFS data. It's off-topic to this blog series, but it showcases, hows zrepl's flexibility in handling different datasets with varying replication needs.
+
+Furthermore:
+
+
    +
  • We're specifically replicating zdata/enc/nfsdata instead of the entire zdata/enc dataset. This dedicated dataset will contain all the data we later want to expose via NFS, keeping a clear separation between replicated NFS data and other local encrypted data.
  • +
  • The send: encrypted: false option turns off ZFS native encryption for the replication stream. Since we're using a WireGuard tunnel between f0 and f1, the data is already encrypted in transit. Disabling ZFS stream encryption reduces CPU overhead and improves replication performance.
  • +

+

Configuring zrepl on f1 (sink)


+
+On f1 (the sink, meaning it's the node receiving the replication data), we configure zrepl to receive the data as follows:
+
+ +
# First, create a dedicated sink dataset
+paul@f1:~ % doas zfs create zdata/sink
+
+paul@f1:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
+global:
+  logging:
+    - type: stdout
+      level: info
+      format: human
+
+jobs:
+  - name: sink
+    type: sink
+    serve:
+      type: tcp
+      listen: "192.168.2.131:8888"
+      clients:
+        "192.168.2.130": "f0"
+    recv:
+      placeholder:
+        encryption: inherit
+    root_fs: "zdata/sink"
+EOF
+
+
+

Enabling and starting zrepl services


+
+We then enable and start zrepl on both hosts via:
+
+ +
# On f0
+paul@f0:~ % doas sysrc zrepl_enable=YES
+zrepl_enable:  -> YES
+paul@f0:~ % doas service `zrepl` start
+Starting zrepl.
+
+# On f1
+paul@f1:~ % doas sysrc zrepl_enable=YES
+zrepl_enable:  -> YES
+paul@f1:~ % doas service `zrepl` start
+Starting zrepl.
+
+
+To check the replication status, we run:
+
+ +
# On f0, check `zrepl` status (use raw mode for non-tty)
+paul@f0:~ % doas pkg install jq
+paul@f0:~ % doas zrepl status --mode raw | grep -A2 "Replication" | jq .
+"Replication":{"StartAt":"2025-07-01T22:31:48.712143123+03:00"...
+
+# Check if services are running
+paul@f0:~ % doas service zrepl status
+zrepl is running as pid 2649.
+
+paul@f1:~ % doas service zrepl status
+zrepl is running as pid 2574.
+
+# Check for `zrepl` snapshots on source
+paul@f0:~ % doas zfs list -t snapshot -r zdata/enc | grep zrepl
+zdata/enc@zrepl_20250701_193148_000    0B      -   176K  -
+
+# On f1, verify the replicated datasets  
+paul@f1:~ % doas zfs list -r zdata | grep f0
+zdata/f0             576K   899G   200K  none
+zdata/f0/zdata       376K   899G   200K  none
+zdata/f0/zdata/enc   176K   899G   176K  none
+
+# Check replicated snapshots on f1
+paul@f1:~ % doas zfs list -t snapshot -r zdata | grep zrepl
+zdata/f0/zdata/enc@zrepl_20250701_193148_000     0B      -   176K  -
+zdata/f0/zdata/enc@zrepl_20250701_194148_000     0B      -   176K  -
+.
+.
+.
+
+
+

Monitoring replication


+
+You can monitor the replication progress with:
+
+ +
paul@f0:~ % doas zrepl status
+
+
+zrepl status
+
+With this setup, both zdata/enc/nfsdata and zroot/bhyve/fedora on f0 will be automatically replicated to f1 every 1 minute (or 10 minutes in the case of the Fedora VM), with encrypted snapshots preserved on both sides. The pruning policy ensures that we keep the last 10 snapshots while managing disk space efficiently.
+
+The replicated data appears on f1 under zdata/sink/ with the source host and dataset hierarchy preserved:
+
+
    +
  • zdata/enc/nfsdatazdata/sink/f0/zdata/enc/nfsdata
  • +
  • zroot/bhyve/fedorazdata/sink/f0/zroot/bhyve/fedora
  • +

+This is by design - zrepl preserves the complete path from the source to ensure there are no conflicts when replicating from multiple sources.
+
+

Verifying replication after reboot


+
+The zrepl service is configured to start automatically at boot. After rebooting both hosts:
+
+ +
paul@f0:~ % uptime
+11:17PM  up 1 min, 0 users, load averages: 0.16, 0.06, 0.02
+
+paul@f0:~ % doas service `zrepl` status
+zrepl is running as pid 2366.
+
+paul@f1:~ % doas service `zrepl` status
+zrepl is running as pid 2309.
+
+# Check that new snapshots are being created and replicated
+paul@f0:~ % doas zfs list -t snapshot | grep `zrepl` | tail -2
+zdata/enc/nfsdata@zrepl_20250701_202530_000                0B      -   200K  -
+zroot/bhyve/fedora@zrepl_20250701_202530_000               0B      -  2.97G  -
+.
+.
+.
+
+paul@f1:~ % doas zfs list -t snapshot -r zdata/sink | grep 202530
+zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_202530_000      0B      -   176K  -
+zdata/sink/f0/zroot/bhyve/fedora@zrepl_20250701_202530_000     0B      -  2.97G  -
+.
+.
+.
+
+
+The timestamps confirm that replication resumed automatically after the reboot, ensuring continuous data protection. We can also write a test file to the NFS data directory on f0 and verify whether it appears on f1 after a minute.
+
+

Understanding Failover Limitations and Design Decisions


+
+Our system intentionally fails over to a read-only copy of the replica in the event of the primary's failure. This is due to the nature of zrepl, which only replicates data in one direction. If we mount the data set on the sink node in read-write mode, it would cause the ZFS dataset to diverge from the original, and the replication would break. It can still be mounted read-write on the sink node in case of a genuine issue on the primary node, but that step is left intentionally manual. Therefore, we don't need to fix the replication later on manually.
+
+So in summary:
+
+
    +
  • Split-brain prevention: Automatic failover to a read-write copy can cause both nodes to become active simultaneously if network communication fails. This leads to data divergence that's extremely difficult to resolve.
  • +
  • False positive protection: Temporary network issues or high load can trigger unwanted failovers. Manual intervention ensures that failovers occur only when truly necessary.
  • +
  • Data integrity over availability: For storage systems, data consistency is paramount. A few minutes of downtime is preferable to data corruption in this specific use case.
  • +
  • Simplified recovery: With manual failover, you always know which dataset is authoritative, making recovery more straightforward.
  • +

+

Mounting the NFS datasets


+
+To make the NFS data accessible on both nodes, we need to mount it. On f0, this is straightforward:
+
+ +
# On f0 - set mountpoint for the primary nfsdata
+paul@f0:~ % doas zfs set mountpoint=/data/nfs zdata/enc/nfsdata
+paul@f0:~ % doas mkdir -p /data/nfs
+
+# Verify it's mounted
+paul@f0:~ % df -h /data/nfs
+Filesystem           Size    Used   Avail Capacity  Mounted on
+zdata/enc/nfsdata    899G    204K    899G     0%    /data/nfs
+
+
+On f1, we need to handle the encryption key and mount the standby copy:
+
+ +
# On f1 - first check encryption status
+paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
+NAME                             PROPERTY   VALUE        SOURCE
+zdata/sink/f0/zdata/enc/nfsdata  keystatus  unavailable  -
+
+# Load the encryption key (using f0's key stored on the USB)
+paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
+    zdata/sink/f0/zdata/enc/nfsdata
+
+# Set mountpoint and mount (same path as f0 for easier failover)
+paul@f1:~ % doas mkdir -p /data/nfs
+paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
+paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
+
+# Make it read-only to prevent accidental writes that would break replication
+paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
+
+# Verify
+paul@f1:~ % df -h /data/nfs
+Filesystem                         Size    Used   Avail Capacity  Mounted on
+zdata/sink/f0/zdata/enc/nfsdata    896G    204K    896G     0%    /data/nfs
+
+
+Note: The dataset is mounted at the same path (/data/nfs) on both hosts to simplify failover procedures. The dataset on f1 is set to readonly=on to prevent accidental modifications, which, as mentioned earlier, would break replication. If we did, replication from f0 to f1 would fail like this:
+
+cannot receive incremental stream: destination zdata/sink/f0/zdata/enc/nfsdata has been modified since most recent snapshot
+
+To fix a broken replication after accidental writes, we can do:
+
+ +
# Option 1: Rollback to the last common snapshot (loses local changes)
+paul@f1:~ % doas zfs rollback zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_204054_000
+
+# Option 2: Make it read-only to prevent accidents again
+paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
+
+
+And replication should work again!
+
+

Troubleshooting: Files not appearing in replication


+
+If you write files to /data/nfs/ on f0 but they don't appear on f1, check if the dataset is mounted on f0?
+
+ +
paul@f0:~ % doas zfs list -o name,mountpoint,mounted | grep nfsdata
+zdata/enc/nfsdata                             /data/nfs             yes
+
+
+If it shows no, the dataset isn't mounted! This means files are being written to the root filesystem, not ZFS. Next, we should check whether the encryption key is loaded:
+
+ +
paul@f0:~ % doas zfs get keystatus zdata/enc/nfsdata
+NAME               PROPERTY   VALUE        SOURCE
+zdata/enc/nfsdata  keystatus  available    -
+# If "unavailable", load the key:
+paul@f0:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
+paul@f0:~ % doas zfs mount zdata/enc/nfsdata
+
+
+You can also verify that files are in the snapshot (not just the directory):
+
+ +
paul@f0:~ % ls -la /data/nfs/.zfs/snapshot/zrepl_*/
+
+
+This issue commonly occurs after a reboot if the encryption keys aren't configured to load automatically.
+
+

Configuring automatic key loading on boot


+
+To ensure all additional encrypted datasets are mounted automatically after reboot as well, we do:
+
+ +
# On f0 - configure all encrypted datasets
+paul@f0:~ % doas sysrc zfskeys_enable=YES
+zfskeys_enable: YES -> YES
+paul@f0:~ % doas sysrc zfskeys_datasets="zdata/enc zdata/enc/nfsdata zroot/bhyve"
+zfskeys_datasets:  -> zdata/enc zdata/enc/nfsdata zroot/bhyve
+
+# Set correct key locations for all datasets
+paul@f0:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
+
+# On f1 - include the replicated dataset
+paul@f1:~ % doas sysrc zfskeys_enable=YES
+zfskeys_enable: YES -> YES
+paul@f1:~ % doas sysrc zfskeys_datasets="zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata"
+zfskeys_datasets:  -> zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata
+
+# Set key location for replicated dataset
+paul@f1:~ % doas zfs set keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
+
+
+Important notes:
+
+
    +
  • Each encryption root needs its own key load entry
  • +
  • The replicated dataset on f1 uses the same encryption key as the source on f0
  • +
  • Always verify datasets are mounted after reboot with zfs list -o name,mounted
  • +
  • Critical: Always ensure the replicated dataset on f1 remains read-only with doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
  • +

+

CARP (Common Address Redundancy Protocol)


+
+High availability is crucial for storage systems. If the storage server goes down, all NFS clients (which will also be Kubernetes pods later on in this series) lose access to their persistent data. CARP provides a solution by creating a virtual IP address that automatically migrates to a different server during failures. This means that clients point to that VIP for NFS mounts and are always contacting the current primary node.
+
+

How CARP Works


+
+In our case, CARP allows two hosts (f0 and f1) to share a virtual IP address (VIP). The hosts communicate using multicast to elect a MASTER, while the other remain as BACKUP. When the MASTER fails, the BACKUP automatically promotes itself, and the VIP is reassigned to the new MASTER. This happens within seconds.
+
+Key benefits for our storage system:
+
+
    +
  • Automatic failover: No manual intervention is required for basic failures, although there are a few limitations. The backup will have read-only access to the available data by default, as we have already learned.
  • +
  • Transparent to clients: Pods continue using the same IP address
  • +
  • Works with stunnel: Behind the VIP, there will be a stunnel process running, which ensures encrypted connections follow the active server.
  • +

+FreeBSD CARP
+Stunnel
+
+

Configuring CARP


+
+First, we add the CARP configuration to /etc/rc.conf on both f0 and f1:
+
+ +
# The virtual IP 192.168.1.138 will float between f0 and f1
+ifconfig_re0_alias0="inet vhid 1 pass testpass alias 192.168.1.138/32"
+
+
+Whereas:
+
+
    +
  • vhid 1: Virtual Host ID - must match on all CARP members
  • +
  • pass testpass: Password for CARP authentication (if you follow this, use a different password!)
  • +
  • alias 192.168.1.138/32: The virtual IP address with a /32 netmask
  • +

+Next, update /etc/hosts on all nodes (f0, f1, f2, r0, r1, r2) to resolve the VIP hostname:
+
+
+192.168.1.138 f3s-storage-ha f3s-storage-ha.lan f3s-storage-ha.lan.buetow.org
+
+
+This allows clients to connect to f3s-storage-ha regardless of which physical server is currently the MASTER.
+
+

CARP State Change Notifications


+
+To correctly manage services during failover, we need to detect CARP state changes. FreeBSD's devd system can notify us when CARP transitions between MASTER and BACKUP states.
+
+Add this to /etc/devd.conf on both f0 and f1:
+
+ +
paul@f0:~ % cat <<END | doas tee -a /etc/devd.conf
+notify 0 {
+        match "system"          "CARP";
+        match "subsystem"       "[0-9]+@[0-9a-z.]+";
+        match "type"            "(MASTER|BACKUP)";
+        action "/usr/local/bin/carpcontrol.sh $subsystem $type";
+};
+END
+
+paul@f0:~ % doas service devd restart
+
+
+Next, we create the CARP control script that will restart stunnel when the CARP state changes:
+
+ +
paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
+#!/bin/sh
+# CARP state change control script
+
+case "$1" in
+    MASTER)
+        logger "CARP state changed to MASTER, starting services"
+        ;;
+    BACKUP)
+        logger "CARP state changed to BACKUP, stopping services"
+        ;;
+    *)
+        logger "CARP state changed to $1 (unhandled)"
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+# Copy the same script to f1
+paul@f0:~ % scp /usr/local/bin/carpcontrol.sh f1:/tmp/
+paul@f1:~ % doas mv /tmp/carpcontrol.sh /usr/local/bin/
+paul@f1:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+
+Note that carpcontrol.sh doesn't do anything useful yet. We will provide more details (including starting and stopping services upon failover) later in this blog post.
+
+To enable CARP in /boot/loader.conf, run:
+
+ +
paul@f0:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf
+carp_load="YES"
+paul@f1:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf  
+carp_load="YES"
+
+
+Then reboot both hosts or run doas kldload carp to load the module immediately.
+
+

NFS Server Configuration


+
+With ZFS replication in place, we can now set up NFS servers on both f0 and f1 to export the replicated data. Since native NFS over TLS (RFC 9289) has compatibility issues between Linux and FreeBSD (not digging into the details here, but I couldn't get it to work), we'll use stunnel to provide encryption.
+
+

Setting up NFS on f0 (Primary)


+
+First, enable the NFS services in rc.conf:
+
+ +
paul@f0:~ % doas sysrc nfs_server_enable=YES
+nfs_server_enable: YES -> YES
+paul@f0:~ % doas sysrc nfsv4_server_enable=YES
+nfsv4_server_enable: YES -> YES
+paul@f0:~ % doas sysrc nfsuserd_enable=YES
+nfsuserd_enable: YES -> YES
+paul@f0:~ % doas sysrc mountd_enable=YES
+mountd_enable: NO -> YES
+paul@f0:~ % doas sysrc rpcbind_enable=YES
+rpcbind_enable: NO -> YES
+
+
+And we also create a dedicated directory for Kubernetes volumes:
+
+ +
# First, ensure the dataset is mounted
+paul@f0:~ % doas zfs get mounted zdata/enc/nfsdata
+NAME               PROPERTY  VALUE    SOURCE
+zdata/enc/nfsdata  mounted   yes      -
+
+# Create the k3svolumes directory
+paul@f0:~ % doas mkdir -p /data/nfs/k3svolumes
+paul@f0:~ % doas chmod 755 /data/nfs/k3svolumes
+
+
+We also create the /etc/exports file. Since we're using stunnel for encryption, ALL clients must connect through stunnel, which appears as localhost (127.0.0.1) to the NFS server:
+
+ +
paul@f0:~ % doas tee /etc/exports <<'EOF'
+V4: /data/nfs -sec=sys
+/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
+EOF
+
+
+The exports configuration:
+
+
    +
  • V4: /data/nfs -sec=sys: Sets the NFSv4 root directory to /data/nfs
  • +
  • -maproot=root: Maps root user from client to root on server
  • +
  • -network 127.0.0.1: Only accepts connections from localhost (stunnel)
  • +

+To start the NFS services, we run:
+
+ +
paul@f0:~ % doas service rpcbind start
+Starting rpcbind.
+paul@f0:~ % doas service mountd start
+Starting mountd.
+paul@f0:~ % doas service nfsd start
+Starting nfsd.
+paul@f0:~ % doas service nfsuserd start
+Starting nfsuserd.
+
+
+

Configuring Stunnel for NFS Encryption with CARP Failover


+
+Using stunnel with client certificate authentication for NFS encryption provides several advantages:
+
+
    +
  • Compatibility: Works with any NFS version and between different operating systems
  • +
  • Strong encryption: Uses TLS/SSL with configurable cipher suites
  • +
  • Transparent: Applications don't need modification, encryption happens at the transport layer
  • +
  • Performance: Minimal overhead (~2% in benchmarks)
  • +
  • Flexibility: Can encrypt any TCP-based protocol, not just NFS
  • +
  • Strong Authentication: Client certificates provide cryptographic proof of identity
  • +
  • Access Control: Only clients with valid certificates signed by your CA can connect
  • +
  • Certificate Revocation: You can revoke access by removing certificates from the CA
  • +

+Stunnel integrates seamlessly with our CARP setup:
+
+
+                    CARP VIP (192.168.1.138)
+                           |
+    f0 (MASTER) ←---------→|←---------→ f1 (BACKUP)
+    stunnel:2323           |           stunnel:stopped
+    nfsd:2049              |           nfsd:stopped
+                           |
+                    Clients connect here
+
+
+The key insight is that stunnel binds to the CARP VIP. When CARP fails over, the VIP is moved to the new master, and stunnel starts there automatically. Clients maintain their connection to the same IP throughout.
+
+

Creating a Certificate Authority for Client Authentication


+
+First, create a CA to sign both server and client certificates:
+
+ +
# On f0 - Create CA
+paul@f0:~ % doas mkdir -p /usr/local/etc/stunnel/ca
+paul@f0:~ % cd /usr/local/etc/stunnel/ca
+paul@f0:~ % doas openssl genrsa -out ca-key.pem 4096
+paul@f0:~ % doas openssl req -new -x509 -days 3650 -key ca-key.pem -out ca-cert.pem \
+  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=F3S Stunnel CA'
+
+# Create server certificate
+paul@f0:~ % cd /usr/local/etc/stunnel
+paul@f0:~ % doas openssl genrsa -out server-key.pem 4096
+paul@f0:~ % doas openssl req -new -key server-key.pem -out server.csr \
+  -subj '/C=US/ST=State/L=City/O=F3S Storage/CN=f3s-storage-ha.lan'
+paul@f0:~ % doas openssl x509 -req -days 3650 -in server.csr -CA ca/ca-cert.pem \
+  -CAkey ca/ca-key.pem -CAcreateserial -out server-cert.pem
+
+# Create client certificates for authorised clients
+paul@f0:~ % cd /usr/local/etc/stunnel/ca
+paul@f0:~ % doas sh -c 'for client in r0 r1 r2 earth; do 
+  openssl genrsa -out ${client}-key.pem 4096
+  openssl req -new -key ${client}-key.pem -out ${client}.csr \
+    -subj "/C=US/ST=State/L=City/O=F3S Storage/CN=${client}.lan.buetow.org"
+  openssl x509 -req -days 3650 -in ${client}.csr -CA ca-cert.pem \
+    -CAkey ca-key.pem -CAcreateserial -out ${client}-cert.pem
+done'
+
+
+

Install and Configure Stunnel on f0


+
+ +
# Install stunnel
+paul@f0:~ % doas pkg install -y stunnel
+
+# Configure stunnel server with client certificate authentication
+paul@f0:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
+cert = /usr/local/etc/stunnel/server-cert.pem
+key = /usr/local/etc/stunnel/server-key.pem
+
+setuid = stunnel
+setgid = stunnel
+
+[nfs-tls]
+accept = 192.168.1.138:2323
+connect = 127.0.0.1:2049
+CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
+verify = 2
+requireCert = yes
+EOF
+
+# Enable and start stunnel
+paul@f0:~ % doas sysrc stunnel_enable=YES
+stunnel_enable:  -> YES
+paul@f0:~ % doas service stunnel start
+Starting stunnel.
+
+# Restart stunnel to apply the CARP VIP binding
+paul@f0:~ % doas service stunnel restart
+Stopping stunnel.
+Starting stunnel.
+
+
+The configuration includes:
+
+
    +
  • verify = 2: Verify client certificate and fail if not provided
  • +
  • requireCert = yes: Client must present a valid certificate
  • +
  • CAfile: Path to the CA certificate that signed the client certificates
  • +

+

Setting up NFS on f1 (Standby)


+
+Repeat the same configuration on f1:
+
+ +
paul@f1:~ % doas sysrc nfs_server_enable=YES
+nfs_server_enable: NO -> YES
+paul@f1:~ % doas sysrc nfsv4_server_enable=YES
+nfsv4_server_enable: NO -> YES
+paul@f1:~ % doas sysrc nfsuserd_enable=YES
+nfsuserd_enable: NO -> YES
+paul@f1:~ % doas sysrc mountd_enable=YES
+mountd_enable: NO -> YES
+paul@f1:~ % doas sysrc rpcbind_enable=YES
+rpcbind_enable: NO -> YES
+
+paul@f1:~ % doas tee /etc/exports <<'EOF'
+V4: /data/nfs -sec=sys
+/data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255
+EOF
+
+paul@f1:~ % doas service rpcbind start
+Starting rpcbind.
+paul@f1:~ % doas service mountd start
+Starting mountd.
+paul@f1:~ % doas service nfsd start
+Starting nfsd.
+paul@f1:~ % doas service nfsuserd start
+Starting nfsuserd.
+
+
+And to configure stunnel on f1, we run:
+
+ +
# Install stunnel
+paul@f1:~ % doas pkg install -y stunnel
+
+# Copy certificates from f0
+paul@f0:~ % doas tar -cf /tmp/stunnel-certs.tar -C /usr/local/etc/stunnel server-cert.pem server-key.pem ca
+paul@f0:~ % scp /tmp/stunnel-certs.tar f1:/tmp/
+
+paul@f1:~ % cd /usr/local/etc/stunnel && doas tar -xf /tmp/stunnel-certs.tar
+
+# Configure stunnel server on f1 with client certificate authentication
+paul@f1:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF'
+cert = /usr/local/etc/stunnel/server-cert.pem
+key = /usr/local/etc/stunnel/server-key.pem
+
+setuid = stunnel
+setgid = stunnel
+
+[nfs-tls]
+accept = 192.168.1.138:2323
+connect = 127.0.0.1:2049
+CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem
+verify = 2
+requireCert = yes
+EOF
+
+# Enable and start stunnel
+paul@f1:~ % doas sysrc stunnel_enable=YES
+stunnel_enable:  -> YES
+paul@f1:~ % doas service stunnel start
+Starting stunnel.
+
+# Restart stunnel to apply the CARP VIP binding
+paul@f1:~ % doas service stunnel restart
+Stopping stunnel.
+Starting stunnel.
+
+
+

CARP Control Script for Clean Failover


+
+With stunnel configured to bind to the CARP VIP (192.168.1.138), only the server that is currently the CARP MASTER will accept stunnel connections. This provides automatic failover for encrypted NFS:
+
+
    +
  • When f0 is CARP MASTER: stunnel on f0 accepts connections on 192.168.1.138:2323
  • +
  • When f1 becomes CARP MASTER: stunnel on f1 starts accepting connections on 192.168.1.138:2323
  • +
  • The backup server's stunnel process will fail to bind to the VIP and won't accept connections
  • +

+This ensures that clients always connect to the active NFS server through the CARP VIP. To ensure clean failover behaviour and prevent stale file handles, we'll update our carpcontrol.sh script so that:
+
+
    +
  • Stops NFS services on BACKUP nodes (preventing split-brain scenarios)
  • +
  • Starts NFS services only on the MASTER node
  • +
  • Manages stunnel binding to the CARP VIP
  • +

+This approach ensures clients can only connect to the active server, eliminating stale handles from the inactive server:
+
+ +
# Create CARP control script on both f0 and f1
+paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
+#!/bin/sh
+# CARP state change control script
+
+case "$1" in
+    MASTER)
+        logger "CARP state changed to MASTER, starting services"
+        service rpcbind start >/dev/null 2>&1
+        service mountd start >/dev/null 2>&1
+        service nfsd start >/dev/null 2>&1
+        service nfsuserd start >/dev/null 2>&1
+        service stunnel restart >/dev/null 2>&1
+        logger "CARP MASTER: NFS and stunnel services started"
+        ;;
+    BACKUP)
+        logger "CARP state changed to BACKUP, stopping services"
+        service stunnel stop >/dev/null 2>&1
+        service nfsd stop >/dev/null 2>&1
+        service mountd stop >/dev/null 2>&1
+        service nfsuserd stop >/dev/null 2>&1
+        logger "CARP BACKUP: NFS and stunnel services stopped"
+        ;;
+    *)
+        logger "CARP state changed to $1 (unhandled)"
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
+
+
+

CARP Management Script


+
+To simplify CARP state management and failover testing, create this helper script on both f0 and f1:
+
+ +
# Create the CARP management script
+paul@f0:~ % doas tee /usr/local/bin/carp <<'EOF'
+#!/bin/sh
+# CARP state management script
+# Usage: carp [master|backup|auto-failback enable|auto-failback disable]
+# Without arguments: shows current state
+
+# Find the interface with CARP configured
+CARP_IF=$(ifconfig -l | xargs -n1 | while read if; do
+    ifconfig "$if" 2>/dev/null | grep -q "carp:" && echo "$if" && break
+done)
+
+if [ -z "$CARP_IF" ]; then
+    echo "Error: No CARP interface found"
+    exit 1
+fi
+
+# Get CARP VHID
+VHID=$(ifconfig "$CARP_IF" | grep "carp:" | sed -n 's/.*vhid \([0-9]*\).*/\1/p')
+
+if [ -z "$VHID" ]; then
+    echo "Error: Could not determine CARP VHID"
+    exit 1
+fi
+
+# Function to get the current state
+get_state() {
+    ifconfig "$CARP_IF" | grep "carp:" | awk '{print $2}'
+}
+
+# Check for auto-failback block file
+BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
+check_auto_failback() {
+    if [ -f "$BLOCK_FILE" ]; then
+        echo "WARNING: Auto-failback is DISABLED (file exists: $BLOCK_FILE)"
+    fi
+}
+
+# Main logic
+case "$1" in
+    "")
+        # No argument - show current state
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    master)
+        # Force to MASTER state
+        echo "Setting CARP to MASTER state..."
+        ifconfig "$CARP_IF" vhid "$VHID" state master
+        sleep 1
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    backup)
+        # Force to BACKUP state
+        echo "Setting CARP to BACKUP state..."
+        ifconfig "$CARP_IF" vhid "$VHID" state backup
+        sleep 1
+        STATE=$(get_state)
+        echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
+        check_auto_failback
+        ;;
+    auto-failback)
+        case "$2" in
+            enable)
+                if [ -f "$BLOCK_FILE" ]; then
+                    rm "$BLOCK_FILE"
+                    echo "Auto-failback ENABLED (removed $BLOCK_FILE)"
+                else
+                    echo "Auto-failback was already enabled"
+                fi
+                ;;
+            disable)
+                if [ ! -f "$BLOCK_FILE" ]; then
+                    touch "$BLOCK_FILE"
+                    echo "Auto-failback DISABLED (created $BLOCK_FILE)"
+                else
+                    echo "Auto-failback was already disabled"
+                fi
+                ;;
+            *)
+                echo "Usage: $0 auto-failback [enable|disable]"
+                echo "  enable:  Remove block file to allow automatic failback"
+                echo "  disable: Create block file to prevent automatic failback"
+                exit 1
+                ;;
+        esac
+        ;;
+    *)
+        echo "Usage: $0 [master|backup|auto-failback enable|auto-failback disable]"
+        echo "  Without arguments: show current CARP state"
+        echo "  master: force this node to become CARP MASTER"
+        echo "  backup: force this node to become CARP BACKUP"
+        echo "  auto-failback enable:  allow automatic failback to f0"
+        echo "  auto-failback disable: prevent automatic failback to f0"
+        exit 1
+        ;;
+esac
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carp
+
+# Copy to f1 as well
+paul@f0:~ % scp /usr/local/bin/carp f1:/tmp/
+paul@f1:~ % doas cp /tmp/carp /usr/local/bin/carp && doas chmod +x /usr/local/bin/carp
+
+
+Now you can easily manage CARP states and auto-failback:
+
+ +
# Check current CARP state
+paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+
+# If auto-failback is disabled, you'll see a warning
+paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+WARNING: Auto-failback is DISABLED (file exists: /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+# Force f0 to become BACKUP (triggers failover to f1)
+paul@f0:~ % doas carp backup
+Setting CARP to BACKUP state...
+CARP state on re0 (vhid 1): BACKUP
+
+# Disable auto-failback (useful for maintenance)
+paul@f0:~ % doas carp auto-failback disable
+Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+# Enable auto-failback
+paul@f0:~ % doas carp auto-failback enable
+Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+

Automatic Failback After Reboot


+
+When f0 reboots (planned or unplanned), f1 takes over as CARP MASTER. To ensure f0 automatically reclaims its primary role once it's fully operational, we'll implement an automatic failback mechanism. With:
+
+ +
paul@f0:~ % doas tee /usr/local/bin/carp-auto-failback.sh <<'EOF'
+#!/bin/sh
+# CARP automatic failback script for f0
+# Ensures f0 reclaims MASTER role after reboot when storage is ready
+
+LOGFILE="/var/log/carp-auto-failback.log"
+MARKER_FILE="/data/nfs/nfs.DO_NOT_REMOVE"
+BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
+
+log_message() {
+    echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOGFILE"
+}
+
+# Check if we're already MASTER
+CURRENT_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
+if [ "$CURRENT_STATE" = "MASTER" ]; then
+    exit 0
+fi
+
+# Check if /data/nfs is mounted
+if ! mount | grep -q "on /data/nfs "; then
+    log_message "SKIP: /data/nfs not mounted"
+    exit 0
+fi
+
+# Check if the marker file exists (identifies that the ZFS data set is properly mounted)
+if [ ! -f "$MARKER_FILE" ]; then
+    log_message "SKIP: Marker file $MARKER_FILE not found"
+    exit 0
+fi
+
+# Check if failback is blocked (for maintenance)
+if [ -f "$BLOCK_FILE" ]; then
+    log_message "SKIP: Failback blocked by $BLOCK_FILE"
+    exit 0
+fi
+
+# Check if NFS services are running (ensure we're fully ready)
+if ! service nfsd status >/dev/null 2>&1; then
+    log_message "SKIP: NFS services not yet running"
+    exit 0
+fi
+
+# All conditions met - promote to MASTER
+log_message "CONDITIONS MET: Promoting to MASTER (was $CURRENT_STATE)"
+/usr/local/bin/carp master
+
+# Log result
+sleep 2
+NEW_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
+log_message "Failback complete: State is now $NEW_STATE"
+
+# If successful, log to the system log too
+if [ "$NEW_STATE" = "MASTER" ]; then
+    logger "CARP: f0 automatically reclaimed MASTER role"
+fi
+EOF
+
+paul@f0:~ % doas chmod +x /usr/local/bin/carp-auto-failback.sh
+
+
+The marker file identifies that the ZFS data set is mounted correctly. We create it with:
+
+ +
paul@f0:~ % doas touch /data/nfs/nfs.DO_NOT_REMOVE
+
+
+We add a cron job to check every minute:
+
+ +
paul@f0:~ % echo "* * * * * /usr/local/bin/carp-auto-failback.sh" | doas crontab -
+
+
+The enhanced CARP script provides integrated control over auto-failback. To temporarily turn off automatic failback (e.g., for f0 maintenance), we run:
+
+ +
paul@f0:~ % doas carp auto-failback disable
+Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+And to re-enable it:
+
+ +
paul@f0:~ % doas carp auto-failback enable
+Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
+
+
+To check whether auto-failback is enabled, we run:
+
+ +
paul@f0:~ % doas carp
+CARP state on re0 (vhid 1): MASTER
+# If disabled, you'll see: WARNING: Auto-failback is DISABLED
+
+
+The failback attempts are logged to /var/log/carp-auto-failback.log!
+
+So, in summary:
+
+
    +
  • After f0 reboots: f1 is MASTER, f0 boots as BACKUP
  • +
  • Cron runs every minute: Checks if conditions are met (Is f0 currently BACKUP? (don't run if already MASTER)), (Is /data/nfs mounted? (ZFS datasets are ready)), (Does marker file exist? (confirms this is primary storage)), (Is failback blocked? (admin can prevent failback)), (Are NFS services running? (system is fully ready))
  • +
  • Failback occurs: Typically 2-3 minutes after boot completes
  • +
  • Logging: All attempts logged for troubleshooting
  • +

+This ensures f0 automatically resumes its role as primary storage server after any reboot, while providing administrative control when needed.
+
+

Client Configuration for Stunnel


+
+To mount NFS shares with stunnel encryption, clients must install and configure stunnel using their client certificates.
+
+

Configuring Rocky Linux Clients (r0, r1, r2)


+
+On the Rocky Linux VMs, we run:
+
+ +
# Install stunnel on client (example for `r0`)
+[root@r0 ~]# dnf install -y stunnel nfs-utils
+
+# Copy client certificate and CA certificate from f0
+[root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/r0-key.pem /etc/stunnel/
+[root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/ca-cert.pem /etc/stunnel/
+
+# Configure stunnel client with certificate authentication
+[root@r0 ~]# tee /etc/stunnel/stunnel.conf <<'EOF'
+cert = /etc/stunnel/r0-key.pem
+CAfile = /etc/stunnel/ca-cert.pem
+client = yes
+verify = 2
+
+[nfs-ha]
+accept = 127.0.0.1:2323
+connect = 192.168.1.138:2323
+EOF
+
+# Enable and start stunnel
+[root@r0 ~]# systemctl enable --now stunnel
+
+# Repeat for r1 and r2 with their respective certificates
+
+
+Note: Each client must use its certificate file (r0-key.pem, r1-key.pem, r2-key.pem, or earth-key.pem - the latter is for my Laptop, which can also mount the NFS shares).
+
+

Testing NFS Mount with Stunnel


+
+To mount NFS through the stunnel encrypted tunnel, we run:
+
+ +
# Create a mount point
+[root@r0 ~]# mkdir -p /data/nfs/k3svolumes
+
+# Mount through stunnel (using localhost and NFSv4)
+[root@r0 ~]# mount -t nfs4 -o port=2323 127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes
+
+# Verify mount
+[root@r0 ~]# mount | grep k3svolumes
+127.0.0.1:/data/nfs/k3svolumes on /data/nfs/k3svolumes type nfs4 (rw,relatime,vers=4.2,rsize=131072,wsize=131072,namlen=255,hard,proto=tcp,port=2323,timeo=600,retrans=2,sec=sys,clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1)
+
+# For persistent mount, add to /etc/fstab:
+127.0.0.1:/data/nfs/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,_netdev 0 0
+
+
+Note: The mount uses localhost (127.0.0.1) because stunnel is listening locally and forwarding the encrypted traffic to the remote server.
+
+

Testing CARP Failover with mounted clients and stale file handles:


+
+To test the failover process:
+
+ +
# On f0 (current MASTER) - trigger failover
+paul@f0:~ % doas ifconfig re0 vhid 1 state backup
+
+# On f1 - verify it becomes MASTER
+paul@f1:~ % ifconfig re0 | grep carp
+    inet 192.168.1.138 netmask 0xffffffff broadcast 192.168.1.138 vhid 1
+
+# Check stunnel is now listening on f1
+paul@f1:~ % doas sockstat -l | grep 2323
+stunnel  stunnel    4567  3  tcp4   192.168.1.138:2323    *:*
+
+# On client - verify NFS mount still works
+[root@r0 ~]# ls /data/nfs/k3svolumes/
+[root@r0 ~]# echo "Test after failover" > /data/nfs/k3svolumes/failover-test.txt
+
+
+After a CARP failover, NFS clients may experience "Stale file handle" errors because they cached file handles from the previous server. To resolve this manually, we can run:
+
+ +
# Force unmount and remount
+[root@r0 ~]# umount -f /data/nfs/k3svolumes
+[root@r0 ~]# mount /data/nfs/k3svolumes
+
+
+For the automatic recovery, we create a script:
+
+ +
[root@r0 ~]# cat > /usr/local/bin/check-nfs-mount.sh << 'EOF'
+#!/bin/bash
+# Fast NFS mount health monitor - runs every 10 seconds via systemd timer
+
+MOUNT_POINT="/data/nfs/k3svolumes"
+LOCK_FILE="/var/run/nfs-mount-check.lock"
+STATE_FILE="/var/run/nfs-mount.state"
+
+# Use a lock file to prevent concurrent runs
+if [ -f "$LOCK_FILE" ]; then
+    exit 0
+fi
+touch "$LOCK_FILE"
+trap "rm -f $LOCK_FILE" EXIT
+
+# Quick check - try to stat a directory with a very short timeout
+if timeout 2s stat "$MOUNT_POINT" >/dev/null 2>&1; then
+    # Mount appears healthy
+    if [ -f "$STATE_FILE" ]; then
+        # Was previously unhealthy, log recovery
+        echo "NFS mount recovered at $(date)" | systemd-cat -t nfs-monitor -p info
+        rm -f "$STATE_FILE"
+    fi
+    exit 0
+fi
+
+# Mount is unhealthy
+if [ ! -f "$STATE_FILE" ]; then
+    # First detection of unhealthy state
+    echo "NFS mount unhealthy detected at $(date)" | systemd-cat -t nfs-monitor -p warning
+    touch "$STATE_FILE"
+fi
+
+# Try to fix
+echo "Attempting to fix stale NFS mount at $(date)" | systemd-cat -t nfs-monitor -p notice
+umount -f "$MOUNT_POINT" 2>/dev/null
+sleep 1
+
+if mount "$MOUNT_POINT"; then
+    echo "NFS mount fixed at $(date)" | systemd-cat -t nfs-monitor -p info
+    rm -f "$STATE_FILE"
+else
+    echo "Failed to fix NFS mount at $(date)" | systemd-cat -t nfs-monitor -p err
+fi
+EOF
+[root@r0 ~]# chmod +x /usr/local/bin/check-nfs-mount.sh
+
+
+And we create the systemd service as follows:
+
+ +
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.service << 'EOF'
+[Unit]
+Description=NFS Mount Health Monitor
+After=network-online.target
+
+[Service]
+Type=oneshot
+ExecStart=/usr/local/bin/check-nfs-mount.sh
+StandardOutput=journal
+StandardError=journal
+EOF
+
+
+And we also create the systemd timer (runs every 10 seconds):
+
+ +
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.timer << 'EOF'
+[Unit]
+Description=Run NFS Mount Health Monitor every 10 seconds
+Requires=nfs-mount-monitor.service
+
+[Timer]
+OnBootSec=30s
+OnUnitActiveSec=10s
+AccuracySec=1s
+
+[Install]
+WantedBy=timers.target
+EOF
+
+
+To enable and start the timer, we run:
+
+ +
[root@r0 ~]# systemctl daemon-reload
+[root@r0 ~]# systemctl enable nfs-mount-monitor.timer
+[root@r0 ~]# systemctl start nfs-mount-monitor.timer
+
+# Check status
+[root@r0 ~]# systemctl status nfs-mount-monitor.timer
+● nfs-mount-monitor.timer - Run NFS Mount Health Monitor every 10 seconds
+     Loaded: loaded (/etc/systemd/system/nfs-mount-monitor.timer; enabled)
+     Active: active (waiting) since Sat 2025-07-06 10:00:00 EEST
+    Trigger: Sat 2025-07-06 10:00:10 EEST; 8s left
+
+# Monitor logs
+[root@r0 ~]# journalctl -u nfs-mount-monitor -f
+
+
+Note: Stale file handles are inherent to NFS failover because file handles are server-specific. The best approach depends on your application's tolerance for brief disruptions. Of course, all the changes made to r0 above must also be applied to r1 and r2.
+
+

Complete Failover Test


+
+Here's a comprehensive test of the failover behaviour with all optimisations in place:
+
+ +
# 1. Check the initial state
+paul@f0:~ % ifconfig re0 | grep carp
+    carp: MASTER vhid 1 advbase 1 advskew 0
+paul@f1:~ % ifconfig re0 | grep carp
+    carp: BACKUP vhid 1 advbase 1 advskew 0
+
+# 2. Create a test file from a client
+[root@r0 ~]# echo "test before failover" > /data/nfs/k3svolumes/test-before.txt
+
+# 3. Trigger failover (f0 → f1)
+paul@f0:~ % doas ifconfig re0 vhid 1 state backup
+
+# 4. Monitor client behaviour
+[root@r0 ~]# ls /data/nfs/k3svolumes/
+ls: cannot access '/data/nfs/k3svolumes/': Stale file handle
+
+# 5. Check automatic recovery (within 10 seconds)
+[root@r0 ~]# journalctl -u nfs-mount-monitor -f
+Jul 06 10:15:32 r0 nfs-monitor[1234]: NFS mount unhealthy detected at Sun Jul 6 10:15:32 EEST 2025
+Jul 06 10:15:32 r0 nfs-monitor[1234]: Attempting to fix stale NFS mount at Sun Jul 6 10:15:32 EEST 2025
+Jul 06 10:15:33 r0 nfs-monitor[1234]: NFS mount fixed at Sun Jul 6 10:15:33 EEST 2025
+
+
+Failover Timeline:
+
+
    +
  • 0 seconds: CARP failover triggered
  • +
  • 0-2 seconds: Clients get "Stale file handle" errors (not hanging)
  • +
  • 3-10 seconds: Soft mounts ensure quick failure of operations
  • +
  • Within 10 seconds: Automatic recovery via systemd timer
  • +

+Benefits of the Optimised Setup:
+
+
    +
  • No hanging processes - Soft mounts fail quickly
  • +
  • Clean failover - Old server stops serving immediately
  • +
  • Fast automatic recovery - No manual intervention needed
  • +
  • Predictable timing - Recovery within 10 seconds with systemd timer
  • +
  • Better visibility - systemd journal provides detailed logs
  • +

+Important Considerations:
+
+
    +
  • Recent writes (within 1 minute) may not be visible after failover due to replication lag
  • +
  • Applications should handle brief NFS errors gracefully
  • +
  • For zero-downtime requirements, consider synchronous replication or distributed storage (see "Future storage explorations" section later in this blog post)
  • +

+

Conclusion


+
+We've built a robust, encrypted storage system for our FreeBSD-based Kubernetes cluster that provides:
+
+
    +
  • High Availability: CARP ensures the storage VIP moves automatically during failures
  • +
  • Data Protection: ZFS encryption protects data at rest, stunnel protects data in transit
  • +
  • Continuous Replication: 1-minute RPO for the data, automated via zrepl
  • +
  • Secure Access: Client certificate authentication prevents unauthorised access
  • +

+Some key lessons learned are:
+
+
    +
  • Stunnel vs Native NFS/TLS: While native encryption would be ideal, stunnel provides better cross-platform compatibility
  • +
  • Manual vs Automatic Failover: For storage systems, controlled failover often prevents more problems than it causes
  • +
  • Client Compatibility: Different NFS implementations behave differently - test thoroughly
  • +

+

Future Storage Explorations


+
+While zrepl provides excellent snapshot-based replication for disaster recovery, there are other storage technologies worth exploring for the f3s project:
+
+

MinIO for S3-Compatible Object Storage


+
+MinIO is a high-performance, S3-compatible object storage system that could complement our ZFS-based storage. Some potential use cases:
+
+
    +
  • S3 API compatibility: Many modern applications expect S3-style object storage APIs. MinIO could provide this interface while using our ZFS storage as the backend.
  • +
  • Multi-site replication: MinIO supports active-active replication across multiple sites, which could work well with our f0/f1/f2 node setup.
  • +
  • Kubernetes native: MinIO has excellent Kubernetes integration with operators and CSI drivers, making it ideal for the f3s k3s environment.
  • +

+

MooseFS for Distributed High Availability


+
+MooseFS is a fault-tolerant, distributed file system that could provide proper high-availability storage:
+
+
    +
  • True HA: Unlike our current setup, which requires manual failover, MooseFS provides automatic failover with no single point of failure.
  • +
  • POSIX compliance: Applications can use MooseFS like any regular filesystem, no code changes needed.
  • +
  • Flexible redundancy: Configure different replication levels per directory or file, optimising storage efficiency.
  • +
  • FreeBSD support: MooseFS has native FreeBSD support, making it a natural fit for the f3s project.
  • +

+Both technologies could run on top of our encrypted ZFS volumes, combining ZFS's data integrity and encryption features with distributed storage capabilities. This would be particularly interesting for workloads that need either S3-compatible APIs (MinIO) or transparent distributed POSIX storage (MooseFS).
+
+I'm looking forward to the next post in this series, where we will set up k3s (Kubernetes) on the Linux VMs.
+
+Other *BSD-related posts:
+
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage (You are currently reading this)
+2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
+2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
+2024-12-03 f3s: Kubernetes with FreeBSD - Part 2: Hardware and base installation
+2024-11-17 f3s: Kubernetes with FreeBSD - Part 1: Setting the stage
+2024-04-01 KISS high-availability with OpenBSD
+2024-01-13 One reason why I love OpenBSD
+2022-10-30 Installing DTail on OpenBSD
+2022-07-30 Let's Encrypt with OpenBSD and Rex
+2016-04-09 Jails and ZFS with Puppet on FreeBSD
+
+E-Mail your comments to paul@nospam.buetow.org
+
+Back to the main site
+
+
+
Posts from January to June 2025 @@ -1044,6 +2863,7 @@ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network (You are currently reading this)
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -2018,10 +3838,13 @@ peer: 2htXdNcxzpI2FdPDJy4T4VGtm1wpMEQu1AkQHjNY6F8=
Having a mesh network on our hosts is great for securing all the traffic between them for our future k3s setup. A self-managed WireGuard mesh network is better than Tailscale as it eliminates reliance on a third party and provides full control over the configuration. It reduces unnecessary abstraction and "magic," enabling easier debugging and ensuring full ownership of our network.

-I look forward to the next blog post in this series. We may start setting up k3s or take a first look at the NFS server (for persistent storage) side of things. I hope you liked all the posts so far in this series.
+Read the next post of this series:
+
+f3s: Kubernetes with FreeBSD - Part 6: Storage

Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network (You are currently reading this)
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -2601,6 +4424,7 @@ __ejm\___/________dwb`---`______________________ 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs (You are currently reading this)
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -3177,6 +5001,7 @@ Apr 4 23: Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs (You are currently reading this)
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -3899,6 +5724,7 @@ This is perl, v5.8.8 built 0.freq: 2922
Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -5296,6 +7125,7 @@ dev.cpu.0.freq: 2922 2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage

f3s logo

@@ -5447,6 +7277,7 @@ dev.cpu.0.freq: 2922
Other *BSD-related posts:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -7947,6 +9778,7 @@ http://www.gnu.org/software/src-highlite -->
Other *BSD and KISS related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -8312,6 +10144,7 @@ $ doas reboot # Just in case, reboot one more time Other *BSD related posts are:

+2025-07-14 f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-05-11 f3s: Kubernetes with FreeBSD - Part 5: WireGuard mesh network
2025-04-05 f3s: Kubernetes with FreeBSD - Part 4: Rocky Linux Bhyve VMs
2025-02-01 f3s: Kubernetes with FreeBSD - Part 3: Protecting from power cuts
@@ -10888,183 +12721,6 @@ no1 in 455 days, 18:52:44 | at Sun Jul 21 07:37:51 2024 2011-05-07 Perl Daemon (Service Framework)
2008-06-26 Perl Poetry

-Back to the main site
- - -
- - 'Never split the difference' book notes - - https://foo.zone/gemfeed/2023-04-01-never-split-the-difference-book-notes.html - 2023-04-01T20:00:17+03:00 - - Paul Buetow aka snonux - paul@dev.buetow.org - - These are my personal takeaways after reading 'Never split the difference' by Chris Voss. Note that the book contains much more knowledge wisdom and that these notes only contain points I personally found worth writing down. This is mainly for my own use, but you might find it helpful too. - -
-

"Never split the difference" book notes


-
-Published at 2023-04-01T20:00:17+03:00
-
-These are my personal takeaways after reading "Never split the difference" by Chris Voss. Note that the book contains much more knowledge wisdom and that these notes only contain points I personally found worth writing down. This is mainly for my own use, but you might find it helpful too.
-
-
-         ,..........   ..........,
-     ,..,'          '.'          ',..,
-    ,' ,'            :            ', ',
-   ,' ,'             :             ', ',
-  ,' ,'              :              ', ',
- ,' ,'............., : ,.............', ',
-,'  '............   '.'   ............'  ',
- '''''''''''''''''';''';''''''''''''''''''
-                    '''
-
-
-

Table of Contents


-
-
-

Tactical listening, spreading empathy


-
-Be a mirror, copy each other to be comfy with each other to build up trust. Mirroring is mainly body language. A mirror is to repeat the words the other just said. Simple but effective.
-
-
    -
  • A mirror needs space and silence between the words. At least 4 seconds.
  • -
  • A mirror might be awkward to be used at first, especially with a question coupled to it.
  • -
  • We fear what's different and are drawn to what is similar.
  • -

-Mirror training is like Jedi training. Simple but effective. A mirror needs space. Be silent after "you want this?"
-
-

Mindset of discovery


-
-Try to have multiple realities in your mind and use facts to distinguish between real and false.
-
-
    -
  • Focus on what the counterpart has to say and what he needs and wants. Understanding him makes him vulnerable.
  • -
  • Empathy understanding the other person from his perspective, but it does not mean agreeing with him.
  • -
  • Detect and label the emotions of others for your powers.
  • -
  • To be understood seems to solve all problems magically.
  • -

-Try: to put a label on someone's emotion and then be silent. Wait for the other to reveal himself. "You seem unhappy about this?"
-
-

More tips


-
-
    -
  • Put on a poker face and don't show emotions.
  • -
  • Slow things down. Don't be a problem solver.
  • -
  • Smile while you are talking, even on the phone. Be easy and encouraging.
  • -
  • Being right is not the key to successful negotiation; being mindful is.
  • -
  • Be in the safe zone of empathy and acknowledge bad news.
  • -

-

"No" starts the conversation


-
-When the opponent starts with a "no", he feels in control and comfortable. That's why he has to start with "no".
-
-
    -
  • "Yes" and "maybe" might be worthless, but "no" starts the conversation.
  • -
  • If someone is saying "no" to you, he will be open to what you have to say next.
  • -
  • "No" is not stopping the negotiation but will open up opportunities you were not thinking about before.
  • -
  • Start with "no". Great negotiators seek "no" because that's when the great discussions begin.
  • -
  • A "no" can be scary if you are not used to it. If your biggest fear is "no", then you can't negotiate.
  • -

-Get a "That's right" when negotiating. Don't get a "you're right". You can summarise the opponent to get a "that's right".
-
-

Win-win


-
-Win-win is a naive approach when encountering the win-lose counterpart, but always cooperate. Don't compromise, and don't split the difference. We don't compromise because it's right; we do it because it is easy. You must embrace the hard stuff; that's where the great deals are.
-
-

On Deadlines


-
-
    -
  • All deadlines are imaginary.
  • -
  • Most of the time, deadlines unsettle us without a good reason.
  • -
  • They push a deal to a conclusion.
  • -
  • They rush the counterpart to cause pressure and anxiety.
  • -

-

Analyse the opponent


-
-
    -
  • Understand the motivation of people behind the table as well.
  • -
  • Ask how affected they will be.
  • -
  • Determine your and the opposite negotiation style. Accommodation, analyst, assertive.
  • -
  • Treat them how they need to be treated.
  • -

-The person on the other side is never the issue; the problem is the issue. Keep this in mind to avoid emotional issues with the person and focus on the problem, not the person. The bond is essential; never create an enemy.
-
-

Use different ways of saying "no."


-
-I had paid my rent always in time. I had positive experiences with the building and would be sad for the landlord to lose a good tenant. I am looking for a win-win agreement between us. Pulling out the research, other neighbours offer much lower prices even if your building is a better location and services. How can I effort 200 more....
-
-...then put an extreme anker.
-
-You always have to embrace thoughtful confrontation for good negotiation and life. Don't avoid honest, clear conflict. It will give you the best deals. Compromises are mostly bad deals for both sides. Most people don't negotiate a win-win but a win-lose. Know the best and worst outcomes and what is acceptable for you.
-
-

Calibrated question


-
-Calibrated questions. Give the opponent a sense of power. Ask open-how questions to get the opponent to solve your problem and move him in your direction. Calibrated questions are the best tools. Summarise everything, and then ask, "how I am supposed to do that?". Asking for help this way with a calibrated question is a powerful tool for joint problem solving
-
-Being calm and respectful is essential. Without control of your emotions, it won't work. The counterpart will have no idea how constrained they are with your question. Avoid questions which get a yes or short answers. Use "why?".
-
-Counterparts are more involved if these are their solutions. The counterpart must answer with "that's right", not "you are right". He has to own the problem. If not, then add more why questions.
-
-
    -
  • Tone and body language need to align with what people are saying.
  • -
  • Deal with it via a labelled question.
  • -
  • Liers tend to talk with "them" and "their" and not with "I".
  • -
  • Also, liars tend to talk in complex sentences.
  • -

-Prepare 3 to 5 calibrated questions for your counterpart. Be curious what is really motivating the other side. You can get out the "Black Swan".
-
-

The black swan


-
-What we don't know can break our deal. Uncovering it can bring us unexpected success. You get what you ask for in this world, but you must learn to ask correctly. Reveal the black swan by asking questions.
-
-

More


-
-Establish a range at top places like corp. I get... (e.g. remote London on a project basis). Set a high salary range and not a number. Also, check on LinkedIn premium for the salaries.
-
-
    -
  • Give an unexpected gift, e.g. show them my pet project and publicity for engineering.
  • -
  • Use an odd number, which makes you seem to have thought a lot about the sum and calculated it.
  • -
  • Define success and metrics for your next raise.
  • -
  • What does it take to be successful here? Ask the question, and they will tell you and guide you.
  • -
  • Set an extreme anker. Make the counterpart the illusion of losing something.
  • -
  • Hope-based deals. Hope is not a strategy.
  • -
  • Tactical empathy, listening as a martial art. It is emotional intelligence on steroids.
  • -
  • Being right isn't the key to a successful negotiation, but having the correct mindset is.
  • -
  • Don't shop the groceries when you are hungry.
  • -

-Slow.... it.... down....
-
-E-Mail your comments to paul@nospam.buetow.org :-)
-
-Other book notes of mine are:
-
-2025-06-07 "A Monk's Guide to Happiness" book notes
-2025-04-19 "When: The Scientific Secrets of Perfect Timing" book notes
-2024-10-24 "Staff Engineer" book notes
-2024-07-07 "The Stoic Challenge" book notes
-2024-05-01 "Slow Productivity" book notes
-2023-11-11 "Mind Management" book notes
-2023-07-17 "Software Developmers Career Guide and Soft Skills" book notes
-2023-05-06 "The Obstacle is the Way" book notes
-2023-04-01 "Never split the difference" book notes (You are currently reading this)
-2023-03-16 "The Pragmatic Programmer" book notes
-
Back to the main site
diff --git a/gemfeed/f3s-kubernetes-with-freebsd-part-6/zrepl.png b/gemfeed/f3s-kubernetes-with-freebsd-part-6/zrepl.png new file mode 100644 index 00000000..e016bb55 Binary files /dev/null and b/gemfeed/f3s-kubernetes-with-freebsd-part-6/zrepl.png differ diff --git a/gemfeed/index.html b/gemfeed/index.html index 188cd25b..e8198020 100644 --- a/gemfeed/index.html +++ b/gemfeed/index.html @@ -15,6 +15,7 @@

To be in the .zone!



+2025-07-14 - f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-07-01 - Posts from January to June 2025
2025-06-22 - Task Samurai: An agentic coding learning experiment
2025-06-07 - 'A Monk's Guide to Happiness' book notes
diff --git a/index.html b/index.html index 72c768cf..d1c4ac33 100644 --- a/index.html +++ b/index.html @@ -13,7 +13,7 @@

Hello!



-This site was generated at 2025-07-12T22:45:27+03:00 by Gemtexter
+This site was generated at 2025-07-13T16:48:21+03:00 by Gemtexter

Welcome to the foo.zone!

@@ -43,6 +43,7 @@

Posts



+2025-07-14 - f3s: Kubernetes with FreeBSD - Part 6: Storage
2025-07-01 - Posts from January to June 2025
2025-06-22 - Task Samurai: An agentic coding learning experiment
2025-06-07 - 'A Monk's Guide to Happiness' book notes
diff --git a/uptime-stats.html b/uptime-stats.html index cf4a1181..df0224bf 100644 --- a/uptime-stats.html +++ b/uptime-stats.html @@ -13,7 +13,7 @@

My machine uptime stats



-This site was last updated at 2025-07-12T22:45:27+03:00
+This site was last updated at 2025-07-13T16:48:21+03:00

The following stats were collected via uptimed on all of my personal computers over many years and the output was generated by guprecords, the global uptime records stats analyser of mine.

@@ -36,19 +36,19 @@ +-----+----------------+-------+------------------------------+ | 1. | alphacentauri | 671 | FreeBSD 11.4-RELEASE-p7 | | 2. | mars | 207 | Linux 3.2.0-4-amd64 | -| 3. | *earth | 191 | Linux 6.15.4-200.fc42.x86_64 | +| 3. | *earth | 192 | Linux 6.15.4-200.fc42.x86_64 | | 4. | callisto | 153 | Linux 4.0.4-303.fc22.x86_64 | | 5. | dionysus | 136 | FreeBSD 13.0-RELEASE-p11 | | 6. | tauceti-e | 120 | Linux 3.2.0-4-amd64 | | 7. | *makemake | 76 | Linux 6.9.9-200.fc40.x86_64 | | 8. | *uranus | 59 | NetBSD 10.1 | | 9. | pluto | 51 | Linux 3.2.0-4-amd64 | -| 10. | *mega-m3-pro | 50 | Darwin 24.5.0 | -| 11. | mega15289 | 50 | Darwin 23.4.0 | +| 10. | mega15289 | 50 | Darwin 23.4.0 | +| 11. | *mega-m3-pro | 50 | Darwin 24.5.0 | | 12. | *t450 | 43 | FreeBSD 14.2-RELEASE | | 13. | *fishfinger | 43 | OpenBSD 7.6 | -| 14. | phobos | 40 | Linux 3.4.0-CM-g1dd7cdf | -| 15. | mega8477 | 40 | Darwin 13.4.0 | +| 14. | mega8477 | 40 | Darwin 13.4.0 | +| 15. | phobos | 40 | Linux 3.4.0-CM-g1dd7cdf | | 16. | *blowfish | 38 | OpenBSD 7.6 | | 17. | sun | 33 | FreeBSD 10.3-RELEASE-p24 | | 18. | f2 | 25 | FreeBSD 14.2-RELEASE-p1 | @@ -68,7 +68,7 @@ | 1. | vulcan | 4 years, 5 months, 6 days | Linux 3.10.0-1160.81.1.el7.x86_64 | | 2. | sun | 3 years, 9 months, 26 days | FreeBSD 10.3-RELEASE-p24 | | 3. | *uranus | 3 years, 9 months, 5 days | NetBSD 10.1 | -| 4. | *earth | 3 years, 6 months, 30 days | Linux 6.15.4-200.fc42.x86_64 | +| 4. | *earth | 3 years, 7 months, 1 days | Linux 6.15.4-200.fc42.x86_64 | | 5. | *blowfish | 3 years, 5 months, 16 days | OpenBSD 7.6 | | 6. | uugrn | 3 years, 5 months, 5 days | FreeBSD 11.2-RELEASE-p4 | | 7. | deltavega | 3 years, 1 months, 21 days | Linux 3.10.0-1160.11.1.el7.x86_64 | @@ -163,7 +163,7 @@ | 3. | alphacentauri | 6 years, 9 months, 13 days | FreeBSD 11.4-RELEASE-p7 | | 4. | vulcan | 4 years, 5 months, 6 days | Linux 3.10.0-1160.81.1.el7.x86_64 | | 5. | *makemake | 4 years, 4 months, 7 days | Linux 6.9.9-200.fc40.x86_64 | -| 6. | *earth | 3 years, 12 months, 17 days | Linux 6.15.4-200.fc42.x86_64 | +| 6. | *earth | 3 years, 12 months, 18 days | Linux 6.15.4-200.fc42.x86_64 | | 7. | sun | 3 years, 10 months, 2 days | FreeBSD 10.3-RELEASE-p24 | | 8. | *blowfish | 3 years, 5 months, 17 days | OpenBSD 7.6 | | 9. | uugrn | 3 years, 5 months, 5 days | FreeBSD 11.2-RELEASE-p4 | @@ -191,7 +191,7 @@ +-----+----------------+-------+ | 1. | FreeBSD 10... | 551 | | 2. | Linux 3... | 550 | -| 3. | *Linux 6... | 171 | +| 3. | *Linux 6... | 172 | | 4. | Linux 5... | 162 | | 5. | Linux 4... | 161 | | 6. | FreeBSD 11... | 153 | @@ -201,14 +201,14 @@ | 10. | Darwin 13... | 40 | | 11. | Darwin 23... | 33 | | 12. | FreeBSD 5... | 25 | -| 13. | Linux 2... | 22 | -| 14. | *Darwin 24... | 22 | +| 13. | *Darwin 24... | 22 | +| 14. | Linux 2... | 22 | | 15. | Darwin 21... | 17 | | 16. | Darwin 15... | 15 | | 17. | Darwin 22... | 12 | | 18. | Darwin 18... | 11 | -| 19. | FreeBSD 6... | 10 | -| 20. | FreeBSD 7... | 10 | +| 19. | FreeBSD 7... | 10 | +| 20. | OpenBSD 4... | 10 | +-----+----------------+-------+
@@ -224,7 +224,7 @@ | 2. | *OpenBSD 7... | 6 years, 9 months, 24 days | | 3. | FreeBSD 10... | 5 years, 9 months, 9 days | | 4. | Linux 5... | 4 years, 10 months, 21 days | -| 5. | *Linux 6... | 2 years, 9 months, 27 days | +| 5. | *Linux 6... | 2 years, 9 months, 28 days | | 6. | Linux 4... | 2 years, 7 months, 22 days | | 7. | FreeBSD 11... | 2 years, 4 months, 28 days | | 8. | Linux 2... | 1 years, 11 months, 21 days | @@ -282,7 +282,7 @@ +-----+------------+-------+ | Pos | KernelName | Boots | +-----+------------+-------+ -| 1. | *Linux | 1066 | +| 1. | *Linux | 1067 | | 2. | *FreeBSD | 944 | | 3. | *Darwin | 155 | | 4. | *OpenBSD | 101 | @@ -298,7 +298,7 @@ +-----+------------+------------------------------+ | Pos | KernelName | Uptime | +-----+------------+------------------------------+ -| 1. | *Linux | 27 years, 10 months, 19 days | +| 1. | *Linux | 27 years, 10 months, 20 days | | 2. | *FreeBSD | 11 years, 5 months, 3 days | | 3. | *OpenBSD | 7 years, 5 months, 5 days | | 4. | *Darwin | 4 years, 9 months, 26 days | -- cgit v1.2.3