summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2026-03-11 10:07:24 +0200
committerPaul Buetow <paul@buetow.org>2026-03-11 10:07:24 +0200
commit872cb926ed1d35a55243ede3fcbac16e0d3a0cc0 (patch)
tree4fbdde6a6fcd1b63294373ccc07265ec883d9e7f
parentee75979b5d94ae18f930ff91e5b2d51cd554b60d (diff)
Update content for html
-rw-r--r--about/index.html2
-rw-r--r--about/paul.jpgbin0 -> 122201 bytes
-rwxr-xr-xgemfeed/create-f3-sync-taskwarrior-tasks.sh102
-rw-r--r--gemfeed/f3-sync-plan.md389
4 files changed, 493 insertions, 0 deletions
diff --git a/about/index.html b/about/index.html
index 3bedd4b8..4c89b8de 100644
--- a/about/index.html
+++ b/about/index.html
@@ -22,6 +22,8 @@
<li>Education: Diplom-Informatiker (FH) (Diploma from a German University of Applied Sciences, before they had international Bachelor and Masters programs)</li>
<li>E-Mail: <span class='inlinecode'>paul@nospam.buetow.org</span></li>
</ul><br />
+<a href='./paul.jpg'><img alt='Paul Buetow' title='Paul Buetow' src='./paul.jpg' /></a><br />
+<br />
<h2 style='display: inline' id='my-sites'>My sites</h2><br />
<br />
<a class='textlink' href='../'>My blog here at foo.zone</a><br />
diff --git a/about/paul.jpg b/about/paul.jpg
new file mode 100644
index 00000000..8acac9b8
--- /dev/null
+++ b/about/paul.jpg
Binary files differ
diff --git a/gemfeed/create-f3-sync-taskwarrior-tasks.sh b/gemfeed/create-f3-sync-taskwarrior-tasks.sh
new file mode 100755
index 00000000..ec896f60
--- /dev/null
+++ b/gemfeed/create-f3-sync-taskwarrior-tasks.sh
@@ -0,0 +1,102 @@
+#!/usr/bin/env bash
+set -euo pipefail
+
+project_name() {
+ basename -s .git "$(git remote get-url origin 2>/dev/null)" 2>/dev/null || \
+ basename "$(git rev-parse --show-toplevel)"
+}
+
+PROJECT="$(project_name)"
+PLAN_PATH="/home/paul/git/foo.zone-content/gemtext/gemfeed/f3-sync-plan.md"
+TASK_FILTER="project:${PROJECT} +agent +f3sync"
+COMMON_TAGS=(project:"${PROJECT}" +agent +f3sync +ops)
+AGENT_ANNOTATION="Agent: be aware of taskwarrior-task-management skill. When working on this task, also load and apply: (1) the best-practices skill for the programming language used in the project, (2) solid-principles, and (3) beyond-solid-principles. When all tests and sub-agent reviews pass, automatically progress to the next task in the list."
+PLAN_ANNOTATION="Plan: ${PLAN_PATH}"
+REFS_ANNOTATION="Refs: ${PLAN_PATH}; 2024-12-03-f3s-kubernetes-with-freebsd-part-2.gmi.tpl; 2025-02-01-f3s-kubernetes-with-freebsd-part-3.gmi.tpl; 2025-04-05-f3s-kubernetes-with-freebsd-part-4.gmi.tpl; 2025-05-11-f3s-kubernetes-with-freebsd-part-5.gmi.tpl; 2025-07-14-f3s-kubernetes-with-freebsd-part-6.gmi.tpl; 2025-10-02-f3s-kubernetes-with-freebsd-part-7.gmi.tpl; 2025-12-07-f3s-kubernetes-with-freebsd-part-8.gmi.tpl"
+
+if ! command -v task >/dev/null 2>&1; then
+ echo "task command not found" >&2
+ exit 1
+fi
+
+if ! command -v git >/dev/null 2>&1; then
+ echo "git command not found" >&2
+ exit 1
+fi
+
+if [[ ! -f "${PLAN_PATH}" ]]; then
+ echo "plan file not found: ${PLAN_PATH}" >&2
+ exit 1
+fi
+
+existing_count="$(task ${TASK_FILTER} count 2>/dev/null | tr -dc '0-9')"
+if [[ -n "${existing_count}" && "${existing_count}" != "0" && "${FORCE:-0}" != "1" ]]; then
+ echo "Existing f3 sync tasks already present for project ${PROJECT}." >&2
+ echo "Filter: ${TASK_FILTER}" >&2
+ echo "Re-run with FORCE=1 to create another set." >&2
+ exit 1
+fi
+
+extract_id() {
+ sed -n 's/Created task \([0-9][0-9]*\).*/\1/p'
+}
+
+add_task() {
+ local description="$1"
+ local depends_arg="${2:-}"
+ local output
+ local id
+
+ if [[ -n "${depends_arg}" ]]; then
+ output="$(task "${COMMON_TAGS[@]}" add "${description}" "depends:${depends_arg}")"
+ else
+ output="$(task "${COMMON_TAGS[@]}" add "${description}")"
+ fi
+
+ id="$(printf '%s\n' "${output}" | extract_id)"
+ if [[ -z "${id}" ]]; then
+ echo "failed to parse task id from output:" >&2
+ printf '%s\n' "${output}" >&2
+ exit 1
+ fi
+
+ task project:"${PROJECT}" +agent "${id}" annotate "${AGENT_ANNOTATION}" >/dev/null
+ task project:"${PROJECT}" +agent "${id}" annotate "${PLAN_ANNOTATION}" >/dev/null
+ task project:"${PROJECT}" +agent "${id}" annotate "${REFS_ANNOTATION}" >/dev/null
+ task project:"${PROJECT}" +agent "${id}" annotate "UUID: $(task project:"${PROJECT}" +agent "${id}" _uuid)" >/dev/null
+
+ printf '%s\n' "${id}"
+}
+
+echo "Creating Taskwarrior tasks for project ${PROJECT}"
+echo "Using plan ${PLAN_PATH}"
+
+id1="$(add_task "f3 sync: install FreeBSD on f3 and obtain initial access" "")"
+id2="$(add_task "f3 sync: convert f3 from DHCP to static LAN networking in rc.conf" "${id1}")"
+id3="$(add_task "f3 sync: apply the common FreeBSD baseline on f3" "${id2}")"
+id4="$(add_task "f3 sync: update /etc/hosts on FreeBSD, Rocky, OpenBSD, and admin systems for f3" "${id3}")"
+id5="$(add_task "f3 sync: configure apcupsd on f3 like f2 using f0 as UPS master" "${id4}")"
+id6="$(add_task "f3 sync: add f3 to the WireGuard mesh and assign 192.168.2.133 and fd42:beef:cafe:2::133" "${id4},${id5}")"
+id7="$(add_task "f3 sync: mirror the real local storage layout of f2 on f3 without introducing zrepl" "${id6}")"
+id8="$(add_task "f3 sync: configure vm-bhyve and the optional host-local Rocky VM on f3 if f2 has it" "${id3},${id6},${id7}")"
+id9="$(add_task "f3 sync: install node_exporter on f3 and add Prometheus scrape config" "${id6}")"
+id10="$(add_task "f3 sync: mirror required local FreeBSD service users from f2 onto f3" "${id3},${id7}")"
+id11="$(add_task "f3 sync: run final end-to-end validation for f3 rollout" "${id2},${id3},${id4},${id5},${id6},${id7},${id8},${id9},${id10}")"
+
+cat <<EOF
+Created tasks:
+ ${id1} install FreeBSD on f3 and obtain initial access
+ ${id2} convert f3 from DHCP to static LAN networking in rc.conf
+ ${id3} apply the common FreeBSD baseline on f3
+ ${id4} update /etc/hosts on all involved systems
+ ${id5} configure apcupsd on f3
+ ${id6} add f3 to the WireGuard mesh
+ ${id7} mirror the real local storage layout of f2 on f3
+ ${id8} configure vm-bhyve and optional host-local Rocky VM on f3
+ ${id9} install node_exporter on f3 and update Prometheus
+ ${id10} mirror required local FreeBSD service users onto f3
+ ${id11} final end-to-end validation
+
+List them with:
+ task ${TASK_FILTER} list
+EOF
diff --git a/gemfeed/f3-sync-plan.md b/gemfeed/f3-sync-plan.md
new file mode 100644
index 00000000..9ebc48bd
--- /dev/null
+++ b/gemfeed/f3-sync-plan.md
@@ -0,0 +1,389 @@
+# f3 Host Expansion Plan
+
+## Goal
+
+Bring `f3.lan.buetow.org` into the `f3s` environment so it is aligned with the real role of `f2`: a non-`zrepl`, non-CARP, non-NFS-HA FreeBSD node that participates in the common host baseline, WireGuard mesh, UPS shutdown coordination, local virtualization pattern, and monitoring.
+
+This plan intentionally excludes:
+
+- `zrepl`
+- CARP
+- NFS server HA failover
+- `stunnel` NFS server role
+- `carpcontrol.sh` and related failover automation
+
+## Addressing Plan
+
+Use the next free numbers in the existing address sequences.
+
+- LAN IPv4: `192.168.1.133/24`
+- Host short name: `f3`
+- Host FQDN: `f3.lan.buetow.org`
+- WireGuard IPv4: `192.168.2.133/24`
+- WireGuard IPv6: `fd42:beef:cafe:2::133`
+
+Expected host entries:
+
+```txt
+192.168.1.133 f3 f3.lan f3.lan.buetow.org
+192.168.2.133 f3.wg0 f3.wg0.wan.buetow.org
+fd42:beef:cafe:2::133 f3.wg0 f3.wg0.wan.buetow.org
+```
+
+## Scope
+
+### Systems that need direct changes
+
+- `f3`
+- `f0`
+- `f1`
+- `f2`
+- `r0`
+- `r1`
+- `r2`
+- OpenBSD edge hosts that carry the static WireGuard host list
+- Any admin machine with the same hand-maintained `/etc/hosts` map
+- Prometheus scrape configuration
+
+### Assumptions
+
+- The FreeBSD ethernet interface on `f3` is `re0`, matching the earlier nodes. Verify with `ifconfig` before writing `rc.conf`.
+- The home router/default gateway remains `192.168.1.1`.
+- `f0` remains the UPS master node with the USB-connected APC.
+- `f2` does not run `zrepl`, and `f3` should mirror that fact.
+- If `f2` runs a local Rocky VM via `vm-bhyve`, `f3` should do the same. If `f2` does not, skip the VM provisioning section.
+- If `f2` carries the extra SSD, USB key store, and encrypted datasets in practice, mirror that layout on `f3`; otherwise align with the real host, not the generalized blog wording.
+
+## Global References
+
+Primary local references from the blog series:
+
+- `2024-12-03-f3s-kubernetes-with-freebsd-part-2.gmi.tpl`
+- `2025-02-01-f3s-kubernetes-with-freebsd-part-3.gmi.tpl`
+- `2025-04-05-f3s-kubernetes-with-freebsd-part-4.gmi.tpl`
+- `2025-05-11-f3s-kubernetes-with-freebsd-part-5.gmi.tpl`
+- `2025-07-14-f3s-kubernetes-with-freebsd-part-6.gmi.tpl`
+- `2025-10-02-f3s-kubernetes-with-freebsd-part-7.gmi.tpl`
+- `2025-12-07-f3s-kubernetes-with-freebsd-part-8.gmi.tpl`
+
+This file is the authoritative execution plan for the rollout.
+
+## Step 1: Install FreeBSD on f3 and Obtain Initial Access
+
+Install FreeBSD on `f3` with the same baseline choices used on the original hosts:
+
+- Guided ZFS on root with pool `zroot`
+- Unencrypted root
+- Enable SSH daemon
+- Enable NTP service and time sync
+- Enable `powerd`
+- Create user `paul`
+- Add `paul` to group `wheel`
+
+Initial access can be via:
+
+- the console, or
+- a temporary DHCP address assigned by the router
+
+Before making persistent network changes, confirm:
+
+- detected interface name via `ifconfig`
+- temporary DHCP lease currently in use
+- outbound connectivity and SSH reachability
+
+## Step 2: Convert f3 from DHCP to Static LAN Networking
+
+After logging in over the temporary DHCP address, configure `rc.conf` to match the static pattern used by the other nodes.
+
+First verify the interface name:
+
+```sh
+ifconfig
+```
+
+Then configure:
+
+```sh
+doas sysrc hostname="f3.lan.buetow.org"
+doas sysrc ifconfig_re0="inet 192.168.1.133 netmask 255.255.255.0"
+doas sysrc defaultrouter="192.168.1.1"
+```
+
+Apply the change:
+
+```sh
+doas service netif restart
+doas service routing restart
+```
+
+Reconnect over:
+
+```sh
+ssh paul@192.168.1.133
+```
+
+Validation:
+
+- `hostname` returns `f3.lan.buetow.org`
+- `ifconfig re0` shows `192.168.1.133`
+- default route points to `192.168.1.1`
+- host is reachable by SSH on the static address
+
+## Step 3: Apply the Common FreeBSD Baseline on f3
+
+Install the common package baseline:
+
+```sh
+doas pkg install helix doas zfs-periodic uptimed
+```
+
+Apply the baseline settings:
+
+- copy `/usr/local/etc/doas.conf.sample` to `/usr/local/etc/doas.conf`
+- add the same `zfs-periodic` retention policy for `zroot`
+- configure `uptimed`
+- fully patch the host using `freebsd-update`, `pkg update`, and `pkg upgrade`
+
+Validation:
+
+- `doas` works for `paul`
+- `uprecords` works
+- periodic ZFS snapshot settings are present in `/etc/periodic.conf`
+
+## Step 4: Update /etc/hosts Everywhere
+
+Update `/etc/hosts` on all involved systems so `f3` is resolvable consistently.
+
+### Add on FreeBSD hosts
+
+- `f0`
+- `f1`
+- `f2`
+- `f3`
+
+Add:
+
+```txt
+192.168.1.133 f3 f3.lan f3.lan.buetow.org
+192.168.2.133 f3.wg0 f3.wg0.wan.buetow.org
+fd42:beef:cafe:2::133 f3.wg0 f3.wg0.wan.buetow.org
+```
+
+### Add on Rocky nodes
+
+- `r0`
+- `r1`
+- `r2`
+
+Add the same three lines above.
+
+### Add on OpenBSD edge hosts
+
+Any edge node with the mesh host list should get the WireGuard entries:
+
+```txt
+192.168.2.133 f3.wg0 f3.wg0.wan.buetow.org
+fd42:beef:cafe:2::133 f3.wg0 f3.wg0.wan.buetow.org
+```
+
+If those systems also maintain LAN-side entries for FreeBSD hosts, add the LAN line as well.
+
+### Add on admin machines
+
+Any laptop or workstation with the same static map should be updated.
+
+Validation:
+
+- `ping f3.lan.buetow.org` works from LAN systems
+- `ping f3.wg0.wan.buetow.org` works once WireGuard is configured
+- no duplicate or conflicting host entries are introduced
+
+## Step 5: Configure UPS Partner Behavior on f3
+
+Install `apcupsd` on `f3` and configure it like `f2`, consuming UPS state remotely from `f0`.
+
+Required intent:
+
+- `UPSCABLE ether`
+- `UPSTYPE net`
+- `DEVICE f0.lan.buetow.org:3551`
+- `BATTERYLEVEL 10`
+- `MINUTES 6`
+
+Enable and start the service:
+
+```sh
+doas sysrc apcupsd_enable=YES
+doas service apcupsd start
+```
+
+Validation:
+
+- `apcaccess` on `f3` returns data
+- `apcaccess -h f0.lan.buetow.org` works from `f3`
+- service is enabled for boot
+
+## Step 6: Add f3 to the WireGuard Mesh
+
+Install and enable WireGuard on `f3`:
+
+```sh
+doas pkg install wireguard-tools
+doas sysrc wireguard_interfaces=wg0
+doas sysrc wireguard_enable=YES
+doas mkdir -p /usr/local/etc/wireguard
+doas touch /usr/local/etc/wireguard/wg0.conf
+```
+
+Configure `wg0` with:
+
+- IPv4 `192.168.2.133/24`
+- IPv6 `fd42:beef:cafe:2::133`
+
+Then update all existing mesh peers to include `f3`:
+
+- `f0`
+- `f1`
+- `f2`
+- `r0`
+- `r1`
+- `r2`
+- OpenBSD edge hosts
+
+Because the topology is full mesh, this is a cluster-wide change, not a host-local change.
+
+Validation:
+
+- `wg show` on `f3` shows all intended peers
+- existing nodes show `f3` as a peer
+- `ping 192.168.2.130`, `.131`, `.132` from `f3` works
+- `ping fd42:beef:cafe:2::130` etc. works if IPv6 WG is enabled end-to-end
+
+## Step 7: Mirror the Real Local Storage Layout of f2 on f3
+
+This step depends on what `f2` actually runs today.
+
+If `f2` has the extra SSD, USB key filesystem, and encrypted datasets, do the same on `f3`:
+
+- install the second SSD
+- create `zdata`
+- create and mount `/keys`
+- generate `f3.lan.buetow.org:bhyve.key`
+- generate `f3.lan.buetow.org:zdata.key`
+- replicate the shared key distribution model used on the existing storage-capable hosts
+- create encrypted datasets and enable automatic key loading
+
+If `f2` does not use this storage path in practice, skip it and keep `f3` aligned with the actual host.
+
+Validation:
+
+- `zpool list` shows the expected pools
+- `/keys` is mounted at boot if used
+- encrypted datasets load keys correctly after reboot if used
+
+## Step 8: Configure vm-bhyve and the Host-Local Rocky VM on f3
+
+Only do this if `f2` has the normal host-local VM pattern.
+
+On `f3`:
+
+- install `vm-bhyve` and `bhyve-firmware`
+- enable `vm_enable`
+- set `vm_dir=zfs:zroot/bhyve`
+- create the `public` switch on `re0`
+- configure autostart with `vm_list="rocky"`
+- provision the Rocky VM analogous to the existing `r2` pattern
+
+If this expands the cluster with a new Rocky guest, define the next matching addresses:
+
+- LAN IPv4 for VM: `192.168.1.123/24`
+- WG IPv4 for VM: `192.168.2.123/24`
+- WG IPv6 for VM: `fd42:beef:cafe:2::123`
+- Hostname: `r3.lan.buetow.org`
+
+If no new VM should exist on `f3`, explicitly skip this step.
+
+Validation:
+
+- `vm list` shows the expected guest
+- guest boots cleanly
+- guest has static network config if created
+- `/etc/hosts` updates are extended for `r3` if `r3` is created
+
+## Step 9: Install and Integrate Monitoring for f3
+
+Install `node_exporter` on `f3`:
+
+```sh
+doas pkg install -y node_exporter
+doas sysrc node_exporter_enable=YES
+doas sysrc node_exporter_args='--web.listen-address=192.168.2.133:9100'
+doas service node_exporter start
+```
+
+Update Prometheus additional scrape config to include:
+
+```yaml
+- '192.168.2.133:9100' # f3 via WireGuard
+```
+
+If you use the ZFS textfile collector script on the FreeBSD hosts, deploy the same script and cron entry to `f3` if and only if `f3` has the same relevant ZFS layout.
+
+Validation:
+
+- `curl -s http://192.168.2.133:9100/metrics | head -3`
+- Prometheus target becomes healthy
+- Grafana Node Exporter dashboards show `f3`
+
+## Step 10: Mirror Required Local FreeBSD Service Users on f3
+
+If `f2` carries local UID/GID mappings required for shared storage ownership, reproduce them on `f3`.
+
+Known example from the blog:
+
+```sh
+doas pw groupadd postgres -g 999
+doas pw useradd postgres -u 999 -g postgres -d /var/db/postgres -s /usr/sbin/nologin
+```
+
+Only add the accounts that actually exist on `f2` today.
+
+Validation:
+
+- `id postgres` matches expected UID/GID if created
+- user list on `f3` matches `f2` for the intended service accounts
+
+## Step 11: End-to-End Validation
+
+Perform a final verification pass:
+
+- `f3` is reachable on `192.168.1.133`
+- hostname and `/etc/hosts` are correct everywhere
+- `apcupsd` on `f3` reads from `f0`
+- WireGuard connectivity is established across the mesh
+- monitoring is scraping `f3`
+- optional local storage layout matches `f2`
+- optional VM layout matches `f2`
+- no accidental `zrepl`, CARP, NFS HA, or server-side `stunnel` config was added
+
+Document any intentional deviations from `f2`.
+
+## Task Breakdown
+
+The implementation should be tracked as one task per major step:
+
+1. Install FreeBSD and obtain initial access on `f3`
+2. Convert `f3` from DHCP to static LAN networking
+3. Apply the common FreeBSD baseline on `f3`
+4. Update `/etc/hosts` on all involved systems
+5. Configure `apcupsd` on `f3`
+6. Add `f3` to the WireGuard mesh
+7. Mirror the real local storage layout of `f2` on `f3`
+8. Configure `vm-bhyve` and optional host-local Rocky VM on `f3`
+9. Install and integrate monitoring for `f3`
+10. Mirror required local service users on `f3`
+11. Run final end-to-end validation
+
+All implementation tasks should reference this file directly:
+
+- `/home/paul/git/foo.zone-content/gemtext/gemfeed/f3-sync-plan.md`