
configure do
given { hostname is :earth }
file '/tmp/test/wg0.conf' do
requires '/etc/hosts.test'
manage directory
from template
'content with <%= 1 + 2 %>'
end
file '/etc/hosts.test' do
line '192.168.1.101 earth'
end
end
file '/etc/hosts.test' do line '192.168.1.101 earth' end
given { hostname is :earth }
given { hostname is :earth }
class HostCondition
def initialize
@current_hostname = Socket.gethostname.to_sym
end
def method_missing(name, *args, &)
case name
when :hostname
@left = @current_hostname
self # allow chaining: hostname is :earth
when :is
@left == args.first
else
super
end
end
end
HostCondition.new.hostname.is(:earth)
configure do
file './.file_example.rcmtmp' do
from template
'One plus two is <%= 1 + 2 %>!'
end
end
configure do
file './.file_example.rcmtmp' do
line 'Whats up?'
is absent
end
end
configure do
file original do
path './.dir_example.rcmtmp/foo/backup-me.txt'
manage directory
'original_content'
end
file new do
path './.dir_example.rcmtmp/foo/backup-me.txt'
manage directory
requires file original
'new_content'
end
end
configure do
given { hostname Socket.gethostname }
end
configure do
directory './.directory_example.rcmtmp' do
is present
end
directory delete do
path './.directory_example.rcmtmp'
is absent
end
end
configure do
touch './.mode_example.rcmtmp' do
mode 0o600
end
directory './.mode_example_dir.rcmtmp' do
mode 0o705
end
end
configure do
notify hello dear world do
thank you to be part of you
end
end
configure do touch './.touch_example.rcmtmp' end
configure do
notify foo do
requires notify bar and requires notify baz
'foo_message'
end
notify bar
notify baz do
requires notify bar
'baz_message'
end
end
configure do
symlink './.symlink_example.rcmtmp' do
manage directory
'./.symlink_target_example.rcmtmp'
end
end
configure do notify :foo notify :foo # raises RCM::DSL::DuplicateResource end
___
/ \ resilience
| o | <---------- learning
\___/

┌─ Loadbars 0.13.0 ─────────────────────────────────────────┐ │ │ │ ████ ████ ████ ██ ████ ████ ████ ██ ░░██ ░░██ │ │ ████ ████ ████ ██ ████ ████ ████ ██ ░░██ ░░██ │ │ ████ ████ ████ ██ ████ ████ ████ ██ ░░██ ░░██ │ │ CPU cpu0 cpu1 mem CPU cpu0 cpu1 mem net net │ │ └──── host1 ────┘ └──── host2 ────┘ │ └───────────────────────────────────────────────────────────┘
Key Action ───── ────────────────────────────────────────────────── 1 Toggle CPU (aggregate / per-core / off) 2 / m Toggle memory bars 3 / n Toggle network bars 4 / l Toggle load average bars 5 Toggle disk I/O (aggregate / per-device / off) r Reset load and disk auto-scale peaks e Toggle extended (peak line on CPU; disk util line) g Toggle global average CPU line i Toggle global I/O average line s Toggle host separator lines h Print hotkey list to stdout q Quit w Write current settings to ~/.loadbarsrc a / y CPU average samples up / down d / c Net average samples up / down b / x Disk average samples up / down f / v Link scale up / down Arrows Resize window
loadbars --hosts server1,server2,server3
loadbars --hosts root@server1,root@server2
loadbars servername{01..50}.example.com --showcores 1
loadbars --cluster production
mage build ./loadbars --hosts localhost mage install # to ~/go/bin mage test
┌─────────────────┐
│ ● ● AIR │ ← air-quality monitor
├─────────────────┤
│ ╔═╗ CD │ ← CD transport
│ ║ ◉║ S/PDIF │
│ ╚═╝ │
├─────────────────┤
│ ▓▓▓ USB PWR │ ← PinePower
├─────────────────┤
│ ░░░ (phones) │ ← 1U "empty" shelf
├─────────────────┤
│ ◉◉◉◉◉ LAN │ ← 5-port switch
├─────────────────┤
│ [E50] [L50] │ ← DAC + AMP
│ DAC AMP │
└─────────────────┘
RackMate T0






bind-key e run-shell -b "tmux display-message -p '#{pane_id}'
> /tmp/tmux-edit-target-#{client_pid} \;
tmux popup -E -w 90% -h 35% -x 5% -y 65% -d '#{pane_current_path}'
\"~/scripts/tmux-edit-send /tmp/tmux-edit-target-#{client_pid}\""
┌────────────────────┐ ┌───────────────┐ ┌─────────────────────┐ ┌─────────────────────┐
│ Cursor input box │-->| tmux keybind │-->| popup runs script │-->| capture + prefill │
│ (prompt pane) │ │ prefix + e │ │ tmux-edit-send │ │ temp file │
└────────────────────┘ └───────────────┘ └─────────────────────┘ └─────────────────────┘
|
v
┌────────────────────┐ ┌────────────────────┐ ┌────────────────────┐ ┌────────────────────┐
│ Cursor input box │<--| send-keys back |<--| close editor+popup |<--| edit temp file |
│ (prompt pane) │ │ to original pane │ │ (exit $EDITOR) │ │ in $EDITOR │
└────────────────────┘ └────────────────────┘ └────────────────────┘ └────────────────────┘

#!/usr/bin/env bash
set -u -o pipefail
LOG_ENABLED=0
log_file="${TMPDIR:-/tmp}/tmux-edit-send.log"
log() {
if [ "$LOG_ENABLED" -eq 1 ]; then
printf '%s\n' "$*" >> "$log_file"
fi
}
# Read the target pane id from a temp file created by tmux binding.
read_target_from_file() {
local file_path="$1"
local pane_id
if [ -n "$file_path" ] && [ -f "$file_path" ]; then
pane_id="$(sed -n '1p' "$file_path" | tr -d '[:space:]')"
# Ensure pane ID has % prefix
if [ -n "$pane_id" ] && [[ "$pane_id" != %* ]]; then
pane_id="%${pane_id}"
fi
printf '%s' "$pane_id"
fi
}
# Read the target pane id from tmux environment if present.
read_target_from_env() {
local env_line pane_id
env_line="$(tmux show-environment -g TMUX_EDIT_TARGET 2>/dev/null || true)"
case "$env_line" in
TMUX_EDIT_TARGET=*)
pane_id="${env_line#TMUX_EDIT_TARGET=}"
# Ensure pane ID has % prefix
if [ -n "$pane_id" ] && [[ "$pane_id" != %* ]] && [[ "$pane_id" =~ ^[0-9]+$ ]]; then
pane_id="%${pane_id}"
fi
printf '%s' "$pane_id"
;;
esac
}
# Resolve the target pane id, falling back to the last pane.
resolve_target_pane() {
local candidate="$1"
local current_pane last_pane
current_pane="$(tmux display-message -p "#{pane_id}" 2>/dev/null || true)"
log "current pane=${current_pane:-<empty>}"
# Ensure candidate has % prefix if it's a pane ID
if [ -n "$candidate" ] && [[ "$candidate" =~ ^[0-9]+$ ]]; then
candidate="%${candidate}"
log "normalized candidate to $candidate"
fi
if [ -n "$candidate" ] && [[ "$candidate" == *"#{"* ]]; then
log "format target detected, clearing"
candidate=""
fi
if [ -z "$candidate" ]; then
candidate="$(tmux display-message -p "#{last_pane}" 2>/dev/null || true)"
log "using last pane as fallback: $candidate"
elif [ "$candidate" = "$current_pane" ]; then
last_pane="$(tmux display-message -p "#{last_pane}" 2>/dev/null || true)"
if [ -n "$last_pane" ]; then
candidate="$last_pane"
log "candidate was current, using last pane: $candidate"
fi
fi
printf '%s' "$candidate"
}
# Capture the latest multi-line prompt content from the pane.
capture_prompt_text() {
local target="$1"
tmux capture-pane -p -t "$target" -S -2000 2>/dev/null | awk '
function trim_box(line) {
sub(/^ *│ ?/, "", line)
sub(/ *│ *$/, "", line)
sub(/[[:space:]]+$/, "", line)
return line
}
/^ *│ *→/ && index($0,"INSERT")==0 && index($0,"Add a follow-up")==0 {
if (text != "") last = text
text = ""
capture = 1
line = $0
sub(/^.*→ ?/, "", line)
line = trim_box(line)
if (line != "") text = line
next
}
capture {
if ($0 ~ /^ *└/) {
capture = 0
if (text != "") last = text
next
}
if ($0 ~ /^ *│/ && index($0,"INSERT")==0 && index($0,"Add a follow-up")==0) {
line = trim_box($0)
if (line != "") {
if (text != "") text = text " " line
else text = line
}
}
}
END {
if (text != "") last = text
if (last != "") print last
}
'
}
# Write captured prompt text into the temp file if available.
prefill_tmpfile() {
local tmpfile="$1"
local prompt_text="$2"
if [ -n "$prompt_text" ]; then
printf '%s\n' "$prompt_text" > "$tmpfile"
fi
}
# Ensure the target pane exists before sending keys.
validate_target_pane() {
local target="$1"
local pane target_found
if [ -z "$target" ]; then
log "error: no target pane determined"
echo "Could not determine target pane." >&2
return 1
fi
target_found=0
log "validate: looking for target='$target' in all panes:"
for pane in $(tmux list-panes -a -F "#{pane_id}" 2>/dev/null || true); do
log "validate: checking pane='$pane'"
if [ "$pane" = "$target" ]; then
target_found=1
log "validate: MATCH FOUND!"
break
fi
done
if [ "$target_found" -ne 1 ]; then
log "error: target pane not found: $target"
echo "Target pane not found: $target" >&2
return 1
fi
log "validate: target pane validated successfully"
}
# Send temp file contents to the target pane line by line.
send_content() {
local target="$1"
local tmpfile="$2"
local prompt_text="$3"
local first_line=1
local line
log "send_content: target=$target, prompt_text='$prompt_text'"
while IFS= read -r line || [ -n "$line" ]; do
log "send_content: read line='$line'"
if [ "$first_line" -eq 1 ] && [ -n "$prompt_text" ]; then
if [[ "$line" == "$prompt_text"* ]]; then
local old_line="$line"
line="${line#"$prompt_text"}"
log "send_content: stripped prompt, was='$old_line' now='$line'"
fi
fi
first_line=0
log "send_content: sending line='$line'"
tmux send-keys -t "$target" -l "$line"
tmux send-keys -t "$target" Enter
done < "$tmpfile"
log "sent content to $target"
}
# Main entry point.
main() {
local target_file="${1:-}"
local target
local editor="${EDITOR:-vi}"
local tmpfile
local prompt_text
log "=== tmux-edit-send starting ==="
log "target_file=$target_file"
log "EDITOR=$editor"
target="$(read_target_from_file "$target_file" || true)"
if [ -n "$target" ]; then
log "file target=${target:-<empty>}"
rm -f "$target_file"
fi
if [ -z "$target" ]; then
target="${TMUX_EDIT_TARGET:-}"
fi
log "env target=${target:-<empty>}"
if [ -z "$target" ]; then
target="$(read_target_from_env || true)"
fi
log "tmux env target=${target:-<empty>}"
target="$(resolve_target_pane "$target")"
log "fallback target=${target:-<empty>}"
tmpfile="$(mktemp)"
log "created tmpfile=$tmpfile"
if [ ! -f "$tmpfile" ]; then
log "ERROR: mktemp failed to create file"
echo "ERROR: mktemp failed" >&2
exit 1
fi
mv "$tmpfile" "${tmpfile}.md" 2>&1 | while read -r line; do log "mv output: $line"; done
tmpfile="${tmpfile}.md"
log "renamed to tmpfile=$tmpfile"
if [ ! -f "$tmpfile" ]; then
log "ERROR: tmpfile does not exist after rename"
echo "ERROR: tmpfile rename failed" >&2
exit 1
fi
trap 'rm -f "$tmpfile"' EXIT
log "capturing prompt text from target=$target"
prompt_text="$(capture_prompt_text "$target")"
log "captured prompt_text='$prompt_text'"
prefill_tmpfile "$tmpfile" "$prompt_text"
log "prefilled tmpfile"
log "launching editor: $editor $tmpfile"
"$editor" "$tmpfile"
local editor_exit=$?
log "editor exited with status $editor_exit"
if [ ! -s "$tmpfile" ]; then
log "empty file, nothing sent"
exit 0
fi
log "tmpfile contents:"
log "$(cat "$tmpfile")"
log "validating target pane"
validate_target_pane "$target"
log "sending content to target=$target"
send_content "$target" "$tmpfile" "$prompt_text"
log "=== tmux-edit-send completed ==="
}
main "$@"
⣿⣿⣿⣿⣿⣿⡿⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣿⣿⣏⠀⢶⣆⡘⠉⠙⠛⠿⠿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣿⣿⠋⣤⣄⠘⠃⢠⣀⣀⠀⠀⠀⠀⠀⠉⠉⠛⠛⠿⢿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣿⡿⠀⡉⠻⡟⠀⠈⠉⠙⠛⠷⠶⣦⣤⣄⣀⠀⠀⠀⠀⠀⣾⣿⣿⣿⣿ ⣿⣿⣿⣿⡄⠸⢿⣤⠀⢠⣤⣀⡀⠀⠀⠀⠀⠀⠉⠙⠛⠻⠶⠀⢰⣿⣿⠻⣿⣿ ⣿⣿⣿⣿⠠⣶⣆⡉⠀⠀⠈⠉⠙⠛⠳⠶⠦⣤⣤⣄⣀⡀⢀⣴⠟⠋⠙⢷⣬⣿ ⣿⣿⣿⠏⣠⡄⠹⠁⠰⢶⣤⣤⣀⡀⠀⠀⠀⠀⠀⠉⢉⣿⠟⠁⠀⠀⣠⣾⣿⣿ ⣿⣿⡿⠂⠙⠻⡆⠀⠀⠀⠀⠈⠉⠛⠛⠷⠶⣦⣤⣴⠟⠁⠀⠀⣠⣾⣿⣿⣿⣿ ⣿⣿⡇⠸⣿⣄⠀⠰⠶⢶⣤⣄⣀⡀⠀⠀⠀⣴⣟⠁⠀⠀⣠⣾⣿⣿⣿⣿⣿⣿ ⣿⡟⠀⣶⣀⠃⠀⠀⠀⠀⠀⠈⠉⠙⠛⠓⢾⡟⢙⣷⣤⢾⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⠋⣀⡉⠻⠀⠘⠛⠻⠶⢶⣤⣤⣀⡀⢠⠿⠟⠛⠉⠁⣸⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⡀⠛⠳⠆⠀⠀⠀⠀⠀⠀⠀⠉⠉⠛⠛⠷⠶⣦⠄⢀⣿⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣶⣦⣀⣀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⠀⣸⣿⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣶⣶⣤⣤⣀⣀⠀⠀⠀⢠⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿ ⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿⣷⣶⣾⣿⣿⣿⣿⣿⣿⣿⣿⣿⣿

#!/usr/bin/env bash
convert () {
find . -name \*.note \
| while read -r note; do
echo supernote-tool convert -a -t pdf "$note" "${note/.note/.pdf}"
supernote-tool convert -a -t pdf "$note" "${note/.note/.pdf}.tmp"
mv "${note/.note/.pdf}.tmp" "${note/.note/.pdf}"
du -hs "$note" "${note/.note/.pdf}"
echo
done
}
# Make the PDFs available on my Phone as well
copy () {
if [ ! -d ~/Documents/Supernote ]; then
echo "Directory ~/Documents/Supernote does not exist, skipping"
exit 1
fi
rsync -delete -av --include='*/' --include='*.pdf' --exclude='*' . ~/Documents/Supernote/
echo This was copied from $(pwd) so dont edit manually >~/Documents/Supernote/README.txt
}
convert
copy

Art by Donovan Bake
__...--~~~~~-._ _.-~~~~~--...__
// `V' \\
// | \\
//__...--~~~~~~-._ | _.-~~~~~~--...__\\
//__.....----~~~~._\ | /_.~~~~----.....__\\
====================\\|//====================
dwb `---`




[ApplicationPreferences] SideloadedMode=true



┌─────────────────────────────────────────────────────────────────────────┐ │ X-RAG Kubernetes Cluster │ ├─────────────────────────────────────────────────────────────────────────┤ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │ │ Search UI │ │Search Svc │ │Embed Service│ │ Indexer │ │ │ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘ │ │ │ │ │ │ │ │ └────────────────┴────────────────┴────────────────┘ │ │ │ │ │ ▼ │ │ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐ │ │ │ Weaviate │ │ Kafka │ │ MinIO │ │ │ └─────────────┘ └─────────────┘ └─────────────┘ │ └─────────────────────────────────────────────────────────────────────────┘
$ docker ps --format "table {{.Names}}\t{{.Image}}"
NAMES IMAGE
xrag-k8-control-plane kindest/node:v1.32.0
xrag-k8-worker kindest/node:v1.32.0
xrag-k8-worker2 kindest/node:v1.32.0
┌─────────────────────────────────────────────────────────────────────────┐ │ Docker Host │ ├─────────────────────────────────────────────────────────────────────────┤ │ ┌───────────────────┐ ┌───────────────────┐ ┌───────────────────┐ │ │ │ xrag-k8-control │ │ xrag-k8-worker │ │ xrag-k8-worker2 │ │ │ │ -plane (container)│ │ (container) │ │ (container) │ │ │ │ │ │ │ │ │ │ │ │ K8s API server │ │ Pods: │ │ Pods: │ │ │ │ etcd, scheduler │ │ • search-ui │ │ • weaviate │ │ │ │ │ │ • search-service │ │ • kafka │ │ │ │ │ │ • embedding-svc │ │ • prometheus │ │ │ │ │ │ • indexer │ │ • grafana │ │ │ └───────────────────┘ └───────────────────┘ └───────────────────┘ │ └─────────────────────────────────────────────────────────────────────────┘
$ kubectl get pods -n monitoring NAME READY STATUS alloy-84ddf4cd8c-7phjp 1/1 Running grafana-6fcc89b4d6-pnh8l 1/1 Running kube-state-metrics-5d954c569f-2r45n 1/1 Running loki-8c9bbf744-sc2p5 1/1 Running node-exporter-kb8zz 1/1 Running node-exporter-zcrdz 1/1 Running node-exporter-zmskc 1/1 Running prometheus-7f755f675-dqcht 1/1 Running tempo-55df7dbcdd-t8fg9 1/1 Running
┌──────────────────────────────────────────────────────────────────────┐ │ LOGS PIPELINE │ ├──────────────────────────────────────────────────────────────────────┤ │ Applications write to stdout → containerd stores in /var/log/pods │ │ │ │ │ File tail │ │ ▼ │ │ Grafana Alloy (DaemonSet) │ │ Discovers pods, extracts metadata │ │ │ │ │ HTTP POST /loki/api/v1/push │ │ ▼ │ │ Grafana Loki │ │ Indexes labels, stores chunks │ └──────────────────────────────────────────────────────────────────────┘
loki.source.kubernetes "pod_logs" {
targets = discovery.relabel.pod_logs.output
forward_to = [loki.process.pod_logs.receiver]
}
loki.write "default" {
endpoint {
url = "http://loki.monitoring.svc.cluster.local:3100/loki/api/v1/push"
}
}
{namespace="rag-system", container="search-ui"} |= "ERROR"
from prometheus_client import Histogram, Counter, Gauge
search_duration = Histogram(
"search_service_request_duration_seconds",
"Total duration of Search Service requests",
["method"],
buckets=[0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 20.0, 30.0, 60.0],
)
errors_total = Counter(
"search_service_errors_total",
"Error count by type",
["method", "error_type"],
)
┌─────────────┐ ┌─────────────┐ ┌─────────────┐ ┌─────────────┐
│ search-ui │ │search-svc │ │embed-svc │ │ indexer │
│ OTel Meter │ │ OTel Meter │ │ OTel Meter │ │ OTel Meter │
│ │ │ │ │ │ │ │ │ │ │ │
│ OTLPExporter│ │ OTLPExporter│ │ OTLPExporter│ │ OTLPExporter│
└──────┬──────┘ └──────┬──────┘ └──────┬──────┘ └──────┬──────┘
│ │ │ │
└────────────────┴────────────────┴────────────────┘
│
▼ OTLP/gRPC (port 4317)
┌─────────────────────┐
│ Grafana Alloy │
└──────────┬──────────┘
│ prometheus.remote_write
▼
┌─────────────────────┐
│ Prometheus │
└─────────────────────┘
otelcol.receiver.otlp "default" {
grpc { endpoint = "0.0.0.0:4317" }
http { endpoint = "0.0.0.0:4318" }
output {
metrics = [otelcol.processor.batch.metrics.input]
traces = [otelcol.processor.batch.traces.input]
}
}
otelcol.processor.batch "metrics" {
timeout = "5s"
send_batch_size = 1000
output { metrics = [otelcol.exporter.prometheus.default.input] }
}
otelcol.exporter.prometheus "default" {
forward_to = [prometheus.remote_write.prom.receiver]
}
prometheus.scrape "kubelet_resource" {
targets = discovery.relabel.kubelet.output
job_name = "kubelet-resource"
scheme = "https"
scrape_interval = "30s"
bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
tls_config { insecure_skip_verify = true }
forward_to = [prometheus.remote_write.prom.receiver]
}
prometheus.scrape "cadvisor" {
targets = discovery.relabel.cadvisor.output
job_name = "cadvisor"
scheme = "https"
scrape_interval = "60s"
bearer_token_file = "/var/run/secrets/kubernetes.io/serviceaccount/token"
tls_config { insecure_skip_verify = true }
forward_to = [prometheus.relabel.cadvisor_filter.receiver]
}
prometheus.scrape "kube_state_metrics" {
targets = [
{"__address__" = "kube-state-metrics.monitoring.svc.cluster.local:8080"},
]
job_name = "kube-state-metrics"
scrape_interval = "30s"
forward_to = [prometheus.relabel.kube_state_filter.receiver]
}
$ curl -s 'http://localhost:9090/api/v1/label/__name__/values' \
| jq -r '.data[]' | grep -c '^redis_'
25
$ curl -s 'http://localhost:9090/api/v1/label/__name__/values' \
| jq -r '.data[]' | grep -c '^kafka_'
12
$ curl -s 'http://localhost:9090/api/v1/label/__name__/values' \
| jq -r '.data[]' | grep -c '^minio_'
16
prometheus.scrape "redis_exporter" {
targets = [
{"__address__" = "xrag-redis.rag-system.svc.cluster.local:9121"},
]
job_name = "redis"
scrape_interval = "30s"
forward_to = [prometheus.relabel.redis_filter.receiver]
}
prometheus.scrape "kafka_exporter" {
targets = [
{"__address__" = "kafka-exporter.rag-system.svc.cluster.local:9308"},
]
job_name = "kafka"
scrape_interval = "30s"
forward_to = [prometheus.relabel.kafka_filter.receiver]
}
prometheus.scrape "minio" {
targets = [
{"__address__" = "xrag-minio.rag-system.svc.cluster.local:9000"},
]
job_name = "minio"
metrics_path = "/minio/v2/metrics/cluster"
scrape_interval = "30s"
forward_to = [prometheus.relabel.minio_filter.receiver]
}
sum by (consumergroup, topic) (kafka_consumergroup_lag)
redis_keyspace_hits_total / (redis_keyspace_hits_total + redis_keyspace_misses_total)
┌─────────────────────────┐
│ Root Span │
│ POST /api/search │
│ span_id: a1b2c3d4... │
│ parent: (none) │
└───────────┬─────────────┘
│
┌─────────────────────┴─────────────────────┐
│ │
▼ ▼
┌─────────────────────────┐ ┌─────────────────────────┐
│ Child Span │ │ Child Span │
│ gRPC Search │ │ render_template │
│ span_id: e5f6g7h8... │ │ span_id: i9j0k1l2... │
│ parent: a1b2c3d4... │ │ parent: a1b2c3d4... │
└───────────┬─────────────┘ └─────────────────────────┘
│
├──────────────────┬──────────────────┐
▼ ▼ ▼
┌────────────┐ ┌────────────┐ ┌────────────┐
│ Grandchild │ │ Grandchild │ │ Grandchild │
│ embedding │ │ vector │ │ llm.rag │
│ .generate │ │ _search │ │ _completion│
└────────────┘ └────────────┘ └────────────┘
traceparent: 00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01
│ │ │ │
│ │ │ └── flags
│ │ └── parent span ID (16 hex)
│ └── trace ID (32 hex)
└── version
from opentelemetry.instrumentation.fastapi import FastAPIInstrumentor
from opentelemetry.instrumentation.grpc import GrpcAioInstrumentorClient
# Auto-instrument frameworks
FastAPIInstrumentor.instrument_app(app)
GrpcAioInstrumentorClient().instrument()
# Manual spans for custom operations
with tracer.start_as_current_span("llm.rag_completion") as span:
span.set_attribute("llm.model", model_name)
result = await generate_answer(query, context)
Metadata: [
("traceparent", "00-0af7651916cd43dd8448eb211c80319c-b7ad6b7169203331-01"),
("content-type", "application/grpc"),
]
Trace ID: 0af7651916cd43dd8448eb211c80319c ├─ [search-ui] POST /api/search (300ms) │ │ │ ├─ [search-service] Search (gRPC server) (275ms) │ │ │ │ │ ├─ [search-service] embedding.generate (50ms) │ │ │ └─ [embedding-service] Embed (45ms) │ │ │ └─ POST https://api.openai.com (35ms) │ │ │ │ │ ├─ [search-service] vector_search.query (100ms) │ │ │ │ │ └─ [search-service] llm.rag_completion (120ms) │ └─ openai.chat (115ms)
otelcol.processor.batch "traces" {
timeout = "5s"
send_batch_size = 500
output { traces = [otelcol.exporter.otlp.tempo.input] }
}
otelcol.exporter.otlp "tempo" {
client {
endpoint = "tempo.monitoring.svc.cluster.local:4317"
tls { insecure = true }
}
}
$ curl -s -X POST http://localhost:8082/ingest \
-H "Content-Type: application/json" \
-d '{
"text": "This is the X-RAG Observability Guide...",
"metadata": {
"title": "X-RAG Observability Guide",
"source_file": "docs/OBSERVABILITY.md",
"type": "markdown"
},
"namespace": "default"
}' | jq .
{
"document_id": "8538656a-ba99-406c-8da7-87c5f0dda34d",
"status": "accepted",
"minio_bucket": "documents",
"minio_key": "8538656a-ba99-406c-8da7-87c5f0dda34d.json",
"message": "Document accepted for processing"
}
$ curl -s -G "http://localhost:3200/api/search" \
--data-urlencode 'q={name="POST /ingest"}' \
--data-urlencode 'limit=3' | jq '.traces[0].traceID'
"b3fc896a1cf32b425b8e8c46c86c76f7"
$ curl -s "http://localhost:3200/api/traces/b3fc896a1cf32b425b8e8c46c86c76f7" \
| jq '[.batches[] | ... | {service, span}] | unique'
[
{ "service": "ingestion-api", "span": "POST /ingest" },
{ "service": "ingestion-api", "span": "storage.upload" },
{ "service": "ingestion-api", "span": "messaging.publish" },
{ "service": "indexer", "span": "indexer.process_document" },
{ "service": "indexer", "span": "document.duplicate_check" },
{ "service": "indexer", "span": "document.pipeline" },
{ "service": "indexer", "span": "storage.download" },
{ "service": "indexer", "span": "/xrag.embedding.EmbeddingService/EmbedBatch" },
{ "service": "embedding-service", "span": "openai.embeddings" },
{ "service": "indexer", "span": "db.insert" }
]
ingestion-api | POST /ingest | 16ms ← HTTP response returns
ingestion-api | storage.upload | 13ms ← Save to MinIO
ingestion-api | messaging.publish | 1ms ← Publish to Kafka
| |
| ~~~ Kafka queue ~~~ | ← Async boundary
| |
indexer | indexer.process_document | 1799ms ← Consumer picks up message
indexer | document.duplicate_check | 1ms
indexer | document.pipeline | 1796ms
indexer | storage.download | 1ms ← Fetch from MinIO
indexer | EmbedBatch (gRPC) | 754ms ← Call embedding service
embedding-svc | openai.embeddings | 752ms ← OpenAI API
indexer | db.insert | 1038ms ← Store in Weaviate


$ curl -s -X POST http://localhost:8080/api/search \
-H "Content-Type: application/json" \
-d '{"query": "What is RAG?", "namespace": "default", "mode": "hybrid", "top_k": 5}' | jq .
{
"answer": "I don't have enough information to answer this question.",
"sources": [
{
"id": "71adbc34-56c1-4f75-9248-4ed38094ac69",
"content": "# X-RAG Observability Guide This document describes...",
"score": 0.8292956352233887,
"metadata": {
"source": "docs/OBSERVABILITY.md",
"type": "markdown",
"namespace": "default"
}
}
],
"metadata": {
"namespace": "default",
"num_sources": "5",
"cache_hit": "False",
"mode": "hybrid",
"top_k": "5",
"trace_id": "9df981cac91857b228eca42b501c98c6"
}
}
$ curl -s "http://localhost:3200/api/traces/9df981cac91857b228eca42b501c98c6" \
| jq '.batches[].scopeSpans[].spans[]
| {name, service: .attributes[]
| select(.key=="service.name")
| .value.stringValue}'
Total request: 2138ms ├── gRPC to search-service: 2135ms │ ├── Embedding generation: 649ms │ │ └── OpenAI embeddings API: 640ms │ ├── Vector search (Weaviate): 13ms │ └── LLM answer generation: 1468ms │ └── OpenAI chat API: 1463ms
$ curl -s -G "http://localhost:3200/api/search" \
--data-urlencode 'q={resource.service.name="search-service"}' \
--data-urlencode 'limit=5' | jq '.traces[:2] | .[].rootTraceName'
"/xrag.search.SearchService/Search"
"GET /health/ready"
# Find slow searches (> 2 seconds)
{resource.service.name="search-ui" && name="POST /api/search"} | duration > 2s
# Find errors
{status=error}
# Find OpenAI calls
{name=~"openai.*"}


{namespace="rag-system"} |= "trace_id=abc123" |= "error"



$ git clone https://codeberg.org/snonux/conf.git $ cd conf $ git checkout 15a86f3 # Last commit before ArgoCD migration $ cd f3s/prometheus/
$ kubectl create namespace monitoring namespace/monitoring created
$ helm repo add prometheus-community https://prometheus-community.github.io/helm-charts $ helm repo update
[root@r0 ~]# mkdir -p /data/nfs/k3svolumes/prometheus/data [root@r0 ~]# mkdir -p /data/nfs/k3svolumes/grafana/data
$ cd conf/f3s/prometheus
$ just install
kubectl apply -f persistent-volumes.yaml
persistentvolume/prometheus-data-pv created
persistentvolume/grafana-data-pv created
persistentvolumeclaim/grafana-data-pvc created
helm install prometheus prometheus-community/kube-prometheus-stack \
--namespace monitoring -f persistence-values.yaml
NAME: prometheus
LAST DEPLOYED: ...
NAMESPACE: monitoring
STATUS: deployed
kubeEtcd:
enabled: true
endpoints:
- 192.168.2.120
- 192.168.2.121
- 192.168.2.122
service:
enabled: true
port: 2381
targetPort: 2381
kubeControllerManager:
enabled: true
endpoints:
- 192.168.2.120
- 192.168.2.121
- 192.168.2.122
service:
enabled: true
port: 10257
targetPort: 10257
serviceMonitor:
enabled: true
https: true
insecureSkipVerify: true
[root@r0 ~]# cat >> /etc/rancher/k3s/config.yaml << 'EOF' kube-controller-manager-arg: - bind-address=0.0.0.0 EOF [root@r0 ~]# systemctl restart k3s
$ kubectl get svc -n monitoring prometheus-kube-prometheus-prometheus NAME TYPE CLUSTER-IP PORT(S) prometheus-kube-prometheus-prometheus ClusterIP 10.43.152.163 9090/TCP,8080/TCP


[root@r0 ~]# mkdir -p /data/nfs/k3svolumes/loki/data
$ cd conf/f3s/loki $ just install helm repo add grafana https://grafana.github.io/helm-charts || true helm repo update kubectl apply -f persistent-volumes.yaml persistentvolume/loki-data-pv created persistentvolumeclaim/loki-data-pvc created helm install loki grafana/loki --namespace monitoring -f values.yaml NAME: loki LAST DEPLOYED: ... NAMESPACE: monitoring STATUS: deployed ... helm install alloy grafana/alloy --namespace monitoring -f alloy-values.yaml NAME: alloy LAST DEPLOYED: ... NAMESPACE: monitoring STATUS: deployed
discovery.kubernetes "pods" {
role = "pod"
}
discovery.relabel "pods" {
targets = discovery.kubernetes.pods.targets
rule {
source_labels = ["__meta_kubernetes_namespace"]
target_label = "namespace"
}
rule {
source_labels = ["__meta_kubernetes_pod_name"]
target_label = "pod"
}
rule {
source_labels = ["__meta_kubernetes_pod_container_name"]
target_label = "container"
}
rule {
source_labels = ["__meta_kubernetes_pod_label_app"]
target_label = "app"
}
}
loki.source.kubernetes "pods" {
targets = discovery.relabel.pods.output
forward_to = [loki.write.default.receiver]
}
loki.write "default" {
endpoint {
url = "http://loki.monitoring.svc.cluster.local:3100/loki/api/v1/push"
}
}
$ kubectl get svc -n monitoring loki NAME TYPE CLUSTER-IP PORT(S) loki ClusterIP 10.43.64.60 3100/TCP,9095/TCP

$ kubectl get pods -n monitoring NAME READY STATUS RESTARTS AGE alertmanager-prometheus-kube-prometheus-alertmanager-0 2/2 Running 0 42d alloy-g5fgj 2/2 Running 0 29m alloy-nfw8w 2/2 Running 0 29m alloy-tg9vj 2/2 Running 0 29m loki-0 2/2 Running 0 25m prometheus-grafana-868f9dc7cf-lg2vl 3/3 Running 0 42d prometheus-kube-prometheus-operator-8d7bbc48c-p4sf4 1/1 Running 0 42d prometheus-kube-state-metrics-7c5fb9d798-hh2fx 1/1 Running 0 42d prometheus-prometheus-kube-prometheus-prometheus-0 2/2 Running 0 42d prometheus-prometheus-node-exporter-2nsg9 1/1 Running 0 42d prometheus-prometheus-node-exporter-mqr25 1/1 Running 0 42d prometheus-prometheus-node-exporter-wp4ds 1/1 Running 0 42d
$ kubectl get svc -n monitoring NAME TYPE CLUSTER-IP PORT(S) alertmanager-operated ClusterIP None 9093/TCP,9094/TCP alloy ClusterIP 10.43.74.14 12345/TCP loki ClusterIP 10.43.64.60 3100/TCP,9095/TCP loki-headless ClusterIP None 3100/TCP prometheus-grafana ClusterIP 10.43.46.82 80/TCP prometheus-kube-prometheus-alertmanager ClusterIP 10.43.208.43 9093/TCP,8080/TCP prometheus-kube-prometheus-operator ClusterIP 10.43.246.121 443/TCP prometheus-kube-prometheus-prometheus ClusterIP 10.43.152.163 9090/TCP,8080/TCP prometheus-kube-state-metrics ClusterIP 10.43.64.26 8080/TCP prometheus-prometheus-node-exporter ClusterIP 10.43.127.242 9100/TCP
# All logs from the services namespace
{namespace="services"}
# Logs from pods matching a pattern
{pod=~"miniflux.*"}
# Filter by log content
{namespace="services"} |= "error"
# Parse JSON logs and filter
{namespace="services"} | json | level="error"
paul@f0:~ % doas pkg install -y node_exporter
paul@f0:~ % doas sysrc node_exporter_enable=YES node_exporter_enable: -> YES
paul@f0:~ % doas sysrc node_exporter_args='--web.listen-address=192.168.2.130:9100' node_exporter_args: -> --web.listen-address=192.168.2.130:9100
paul@f0:~ % doas service node_exporter start Starting node_exporter.
paul@f0:~ % curl -s http://192.168.2.130:9100/metrics | head -3
# HELP go_gc_duration_seconds A summary of the wall-time pause...
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 0
- job_name: 'node-exporter'
static_configs:
- targets:
- '192.168.2.130:9100' # f0 via WireGuard
- '192.168.2.131:9100' # f1 via WireGuard
- '192.168.2.132:9100' # f2 via WireGuard
labels:
os: freebsd
$ kubectl create secret generic additional-scrape-configs \
--from-file=additional-scrape-configs.yaml \
-n monitoring
prometheus:
prometheusSpec:
additionalScrapeConfigsSecret:
enabled: true
name: additional-scrape-configs
key: additional-scrape-configs.yaml
$ just upgrade

apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: freebsd-memory-rules
namespace: monitoring
labels:
release: prometheus
spec:
groups:
- name: freebsd-memory
rules:
- record: node_memory_MemTotal_bytes
expr: node_memory_size_bytes{os="freebsd"}
- record: node_memory_MemAvailable_bytes
expr: |
node_memory_free_bytes{os="freebsd"}
+ node_memory_inactive_bytes{os="freebsd"}
+ node_memory_cache_bytes{os="freebsd"}
- record: node_memory_MemFree_bytes
expr: node_memory_free_bytes{os="freebsd"}
- record: node_memory_Buffers_bytes
expr: node_memory_buffer_bytes{os="freebsd"}
- record: node_memory_Cached_bytes
expr: node_memory_cache_bytes{os="freebsd"}
blowfish:~ $ doas pkg_add node_exporter quirks-7.103 signed on 2025-10-13T22:55:16Z The following new rcscripts were installed: /etc/rc.d/node_exporter See rcctl(8) for details.
blowfish:~ $ doas rcctl enable node_exporter
blowfish:~ $ doas rcctl set node_exporter flags '--web.listen-address=192.168.2.110:9100'
blowfish:~ $ doas rcctl start node_exporter node_exporter(ok)
blowfish:~ $ curl -s http://192.168.2.110:9100/metrics | head -3
# HELP go_gc_duration_seconds A summary of the wall-time pause...
# TYPE go_gc_duration_seconds summary
go_gc_duration_seconds{quantile="0"} 0
- job_name: 'node-exporter'
static_configs:
- targets:
- '192.168.2.130:9100' # f0 via WireGuard
- '192.168.2.131:9100' # f1 via WireGuard
- '192.168.2.132:9100' # f2 via WireGuard
labels:
os: freebsd
- targets:
- '192.168.2.110:9100' # blowfish via WireGuard
- '192.168.2.111:9100' # fishfinger via WireGuard
labels:
os: openbsd
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: openbsd-memory-rules
namespace: monitoring
labels:
release: prometheus
spec:
groups:
- name: openbsd-memory
rules:
- record: node_memory_MemTotal_bytes
expr: node_memory_size_bytes{os="openbsd"}
labels:
os: openbsd
- record: node_memory_MemAvailable_bytes
expr: |
node_memory_free_bytes{os="openbsd"}
+ node_memory_inactive_bytes{os="openbsd"}
+ node_memory_cache_bytes{os="openbsd"}
labels:
os: openbsd
- record: node_memory_MemFree_bytes
expr: node_memory_free_bytes{os="openbsd"}
labels:
os: openbsd
- record: node_memory_Cached_bytes
expr: node_memory_cache_bytes{os="openbsd"}
labels:
os: openbsd
,.......... ..........,
,..,' '.' ',..,
,' ,' : ', ',
,' ,' : ', ',
,' ,' : ', ',
,' ,'............., : ,.............', ',
,' '............ '.' ............' ',
'''''''''''''''''';''';''''''''''''''''''
'''
$b="24P7cP3dP31P3bPaP28P24P64P31P2cP24P64P32P2cP24P73P2cP24P67P2cP24P7
2P29P3dP28P22P31P30P30P30P30P22P2cP22P31P30P30P30P30P30P22P2cP22P4aP75
P7 3P
74 P2
0P 41P6eP6fP74P 68P65P72P20P50 P65P72P6cP2 0P48P 61
P6 3P6bP65P72P22P 29P3bPaP40P6dP 3dP73P70P6cP6 9P74P 20
P2 fP2fP 2cP22P 2cP2eP3aP21P2 bP2aP 30P4f P40P2 2P
3b PaP24 P6eP3 dP6c P65P6 eP67 P74P6 8P
20 P24P7 3P3bP aP24 P75P3 dP22 P20P2 2P
78 P24P6 eP3bP aPaP 70P72 P69P 6eP74 P2
0P 22P5c P6eP20 P20P 24P75 P5cP7 2P22P 3b
Pa PaP66P6fP72P2 8P24P7aP20P 3dP20P31P3bP 20P24 P7
aP 3cP3dP24P6 eP3bP20P24 P7aP2bP2bP 29P20 P7
bP aPaP9 P77P28P24P6 4P31P29P 3bPaP 9P
24 P72P3 dP69 P6eP74P28 P72P6 1P
6e P64P2 8P24 P6eP2 9P29P 3bPaP 9P
24 P67P3 dP73 P75P6 2P73P 74P72 P2
0P 24P73 P2cP24P72P2cP 31P3b PaP9P 24P67P20P3fP20 P6
4P 6fP20 P9P7bP20PaP9P9 P9P9P 9P66P 6fP72P20P28P24 P6
bP 3dP30 P3bP24P6bP3cP3 9P3bP 24P6bP 2bP2bP29P20P7b Pa
P9 P9
P9 P9
P9 P9P73P75P6 2P73 P74P 72P2 8P24P75P2c P24P72 P2
cP 31P29P3dP24P 6dP5 bP24 P6bP 5dP3bP20Pa P9P9 P9P9 P9
P9 P70P 72P69 P6eP 74P2 0P22 P20P20P24P 75P 5cP 72
P2 2P3b PaP9 P9P9 P9P9 P9P7 7P28 P24 P6 4P
32 P29P 3bPa P9P9 P9P9 P9P7 dPaP 9P9 P9
P9 P9P7 3P75 P62P 73P7 4P72 P28P 24P7 5P
2c P24P 72P2c P31P 29P3 dP24 P67P3bP20P aP9P9 P9
P9 P7dP20PaP9P 9P3a P20P 72P6 5P64P6fP3b PaP9 P7
3P 75P62P73P 74P7 2P28 P24P 73P2cP24P7 2P2c P3
1P 29P3dP2 2P30 P22P 3bPa P9P7 0P7 2P
69 P6eP74P2 0P22 P20P 20P2 4P75 P5c P7
2P 22P3 bPaPa P7dP aPaP 77P2 0P28 P24 P6
4P 32P2 9P3bP aP70 P72P 69P6 eP74 P2 0P2 2P
20 P20P 24P75 P20P21P5cP7 2P22P3bPaP 73P6cP65P6 5P7 0P20 P3
2P 3bPa P70P7 2P69P6eP74P 20P22P20P2 0P24P75P20 P21P 5cP6 eP
22 P3bP aPaP7 3P75P62P2 0P77P20P7b PaP9P24P6c P3dP73 P6
8P 69
P6 6P
74P3bPaP9P66P6fP72P28P24P6aP3dP30P3bP24P6aP3cP24P6cP3bP24P6aP2bP2bP29P
7bP7dPaP7dP";$b=~s/\s//g;split /P/,$b;foreach(@_){$c.=chr hex};eval $c
The above Perl script prints out "Just Another Perl Hacker !" in an
animation of sorts.
fishfinger$ grep foostats /etc/daily.local perl /usr/local/bin/foostats.pl --parse-logs --replicate --report
package foo;
sub hello {
print "Hello from package foo\n";
}
package bar;
sub hello {
print "Hello from package bar\n";
}
package foo {
sub hello {
print "Hello from package foo\n";
}
}
package bar {
sub hello {
print "Hello from package bar\n";
}
}
for my $elem (@{$array_ref}) {
print "$elem\n";
}
for my $elem ($array_ref->@*) {
print "$elem\n";
}
print for keys $hash->{stats}->%*;
use v5.38; print "Hello, world!\n"; # old way say "Hello, world!"; # new way
use v5.38;
sub process_lines (@lines) {
my sub trim ($str) {
$str =~ s/^\s+|\s+$//gr;
}
return [ map { trim($_) } @lines ];
}
my @raw = (" foo ", " bar", "baz ");
my $cleaned = process_lines(@raw);
say for @$cleaned; # prints "foo", "bar", "baz"
use feature qw(refaliasing);
my $hash = { foo => 42 };
\my $foo = \$hash->{foo};
$foo = 99;
print $hash->{foo}; # prints 99
sub counter {
state $count = 0;
$count++;
return $count;
}
say counter(); # 1
say counter(); # 2
say counter(); # 3
# Old way
sub greet_old { my $name = shift; print "Hello, $name!\n" }
# Another old way
sub greet_old2 ($) { my $name = shift; print "Hello, $name!\n" }
# New way
sub greet ($name) { say "Hello, $name!"; }
greet("Alice"); # prints "Hello, Alice!"
my $anon = sub ($name) {
say "Hello, $name!";
};
$anon->("World"); # prints "Hello, World!"
my $foo; $foo //= 42; say $foo; # prints 42 $foo //= 99; say $foo; # still prints 42, because $foo was already defined
use feature qw(defer);
sub parse_log_file ($path) {
open my $fh, '<', $path or die "Cannot open $path: $!";
defer { close $fh };
while (my $line = <$fh>) {
# ... parsing logic that might throw an exception ...
}
# $fh is automatically closed here
}

# At the top level, self is the main object p self # => main p self.class # => Object def foo # Inside a method, self is the object that received the call p self end foo # => main
obj = "a string" def obj.shout self.upcase + "!" end p obj.shout # => "A STRING!" obj2 = "another string" # obj2.shout would raise a NoMethodError
MyClass = Class.new do
def say_hello
puts "Hello from a dynamically created class!"
end
end
instance = MyClass.new
instance.say_hello
# => Hello from a dynamically created class!
# For ranges, it checks for inclusion
p (1..5) === 3 # => true
# For classes, it checks if the object is an instance of the class
p String === "hello" # => true
# For regexes, it checks for a match
p /llo/ === "hello" # => true
def check(value)
case value
when String
"It's a string"
when (1..10)
"It's a number between 1 and 10"
else
"Something else"
end
end
p check(5) # => "It's a number between 1 and 10"
def my_iterator
puts "Entering the method"
yield
puts "Back in the method"
yield
end
my_iterator { puts "Inside the block" }
# Entering the method
# Inside the block
# Back in the method
# Inside the block
def with_return
result = yield(5)
puts "The block returned #{result}"
end
with_return { |n| n * 2 }
# => The block returned 10
# Two strings with the same content are different objects
p "foo".object_id
p "foo".object_id
# Two symbols with the same content are the same object
p :foo.object_id
p :foo.object_id
# Modern hash syntax uses symbols as keys
my_hash = { name: "Paul", language: "Ruby" }
p my_hash[:name] # => "Paul"
# Array of strings p %w[one two three] # => ["one", "two", "three"] # Array of symbols p %i[one two three] # => [:one, :two, :three]
arr = [10, 20, 30, 40, 50]
p arr.values_at(0, 2, 4)
# => [10, 30, 50]
hash = { a: 1, b: 2, c: 3 }
p hash.values_at(:a, :c)
# => [1, 3]

$ git clone https://codeberg.org/snonux/conf.git $ cd conf $ git checkout 15a86f3 # Last commit before ArgoCD migration $ cd f3s/
dnf update -y reboot
paul@f0:~ % doas freebsd-update fetch
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % doas freebsd-update -r 14.3-RELEASE upgrade
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % doas freebsd-update install
paul@f0:~ % doas pkg update
paul@f0:~ % doas pkg upgrade
paul@f0:~ % doas reboot
.
.
.
paul@f0:~ % uname -a
FreeBSD f0.lan.buetow.org 14.3-RELEASE FreeBSD 14.3-RELEASE
releng/14.3-n271432-8c9ce319fef7 GENERIC amd64
[root@r0 ~]# echo -n SECRET_TOKEN > ~/.k3s_token
[root@r0 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --cluster-init \
--node-ip=192.168.2.120 \
--advertise-address=192.168.2.120 \
--tls-san=r0.wg0.wan.buetow.org
[INFO] Finding release for channel stable
[INFO] Using v1.32.6+k3s1 as release
.
.
.
[INFO] systemd: Starting k3s
[root@r1 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --server https://r0.wg0.wan.buetow.org:6443 \
--node-ip=192.168.2.121 \
--advertise-address=192.168.2.121 \
--tls-san=r1.wg0.wan.buetow.org
[root@r2 ~]# curl -sfL https://get.k3s.io | K3S_TOKEN=$(cat ~/.k3s_token) \
sh -s - server --server https://r0.wg0.wan.buetow.org:6443 \
--node-ip=192.168.2.122 \
--advertise-address=192.168.2.122 \
--tls-san=r2.wg0.wan.buetow.org
.
.
.
[root@r0 ~]# kubectl get nodes NAME STATUS ROLES AGE VERSION r0.lan.buetow.org Ready control-plane,etcd,master 4m44s v1.32.6+k3s1 r1.lan.buetow.org Ready control-plane,etcd,master 3m13s v1.32.6+k3s1 r2.lan.buetow.org Ready control-plane,etcd,master 30s v1.32.6+k3s1 [root@r0 ~]# kubectl get pods --all-namespaces NAMESPACE NAME READY STATUS RESTARTS AGE kube-system coredns-5688667fd4-fs2jj 1/1 Running 0 5m27s kube-system helm-install-traefik-crd-f9hgd 0/1 Completed 0 5m27s kube-system helm-install-traefik-zqqqk 0/1 Completed 2 5m27s kube-system local-path-provisioner-774c6665dc-jqlnc 1/1 Running 0 5m27s kube-system metrics-server-6f4c6675d5-5xpmp 1/1 Running 0 5m27s kube-system svclb-traefik-411cec5b-cdp2l 2/2 Running 0 78s kube-system svclb-traefik-411cec5b-f625r 2/2 Running 0 4m58s kube-system svclb-traefik-411cec5b-twrd7 2/2 Running 0 4m2s kube-system traefik-c98fdf6fb-lt6fx 1/1 Running 0 4m58s
> ~ kubectl create namespace test namespace/test created > ~ kubectl get namespaces NAME STATUS AGE default Active 6h11m kube-node-lease Active 6h11m kube-public Active 6h11m kube-system Active 6h11m test Active 5s > ~ kubectl config set-context --current --namespace=test Context "default" modified.
> ~ cat <<END > apache-deployment.yaml
# Apache HTTP Server Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: apache-deployment
spec:
replicas: 1
selector:
matchLabels:
app: apache
template:
metadata:
labels:
app: apache
spec:
containers:
- name: apache
image: httpd:latest
ports:
# Container port where Apache listens
- containerPort: 80
END
> ~ kubectl apply -f apache-deployment.yaml
deployment.apps/apache-deployment created
> ~ kubectl get all
NAME READY STATUS RESTARTS AGE
pod/apache-deployment-5fd955856f-4pjmf 1/1 Running 0 7s
NAME READY UP-TO-DATE AVAILABLE AGE
deployment.apps/apache-deployment 1/1 1 1 7s
NAME DESIRED CURRENT READY AGE
replicaset.apps/apache-deployment-5fd955856f 1 1 1 7s
> ~ cat <<END > apache-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: apache
name: apache-service
spec:
ports:
- name: web
port: 80
protocol: TCP
# Expose port 80 on the service
targetPort: 80
selector:
# Link this service to pods with the label app=apache
app: apache
END
> ~ kubectl apply -f apache-service.yaml
service/apache-service created
> ~ kubectl get service
NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE
apache-service ClusterIP 10.43.249.165 <none> 80/TCP 4s
> ~ cat <<END > apache-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apache-ingress
namespace: test
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: standby.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: www.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
END
> ~ kubectl apply -f apache-ingress.yaml
ingress.networking.k8s.io/apache-ingress created
> ~ kubectl describe ingress
Name: apache-ingress
Labels: <none>
Namespace: test
Address: 192.168.2.120,192.168.2.121,192.168.2.122
Ingress Class: traefik
Default backend: <default>
Rules:
Host Path Backends
---- ---- --------
f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
standby.f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
www.f3s.foo.zone
/ apache-service:80 (10.42.1.11:80)
Annotations: spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
Events: <none>
> ~ curl -H "Host: www.f3s.foo.zone" http://r0.lan.buetow.org:80 <html><body><h1>It works!</h1></body></html>
> ~ cat <<END > apache-deployment.yaml
# Apache HTTP Server Deployment
apiVersion: apps/v1
kind: Deployment
metadata:
name: apache-deployment
namespace: test
spec:
replicas: 2
selector:
matchLabels:
app: apache
template:
metadata:
labels:
app: apache
spec:
containers:
- name: apache
image: httpd:latest
ports:
# Container port where Apache listens
- containerPort: 80
readinessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 5
periodSeconds: 10
livenessProbe:
httpGet:
path: /
port: 80
initialDelaySeconds: 15
periodSeconds: 10
volumeMounts:
- name: apache-htdocs
mountPath: /usr/local/apache2/htdocs/
volumes:
- name: apache-htdocs
persistentVolumeClaim:
claimName: example-apache-pvc
END
> ~ cat <<END > apache-ingress.yaml
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: apache-ingress
namespace: test
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web
spec:
rules:
- host: f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: standby.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
- host: www.f3s.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: apache-service
port:
number: 80
END
> ~ cat <<END > apache-persistent-volume.yaml
apiVersion: v1
kind: PersistentVolume
metadata:
name: example-apache-pv
spec:
capacity:
storage: 1Gi
volumeMode: Filesystem
accessModes:
- ReadWriteOnce
persistentVolumeReclaimPolicy: Retain
hostPath:
path: /data/nfs/k3svolumes/example-apache-volume-claim
type: Directory
---
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: example-apache-pvc
namespace: test
spec:
storageClassName: ""
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi
END
> ~ cat <<END > apache-service.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app: apache
name: apache-service
namespace: test
spec:
ports:
- name: web
port: 80
protocol: TCP
# Expose port 80 on the service
targetPort: 80
selector:
# Link this service to pods with the label app=apache
app: apache
END
> ~ kubectl apply -f apache-persistent-volume.yaml > ~ kubectl apply -f apache-service.yaml > ~ kubectl apply -f apache-deployment.yaml > ~ kubectl apply -f apache-ingress.yaml
> ~ kubectl get pods
NAME READY STATUS RESTARTS AGE
apache-deployment-5b96bd6b6b-fv2jx 0/1 ContainerCreating 0 9m15s
apache-deployment-5b96bd6b6b-ax2ji 0/1 ContainerCreating 0 9m15s
> ~ kubectl describe pod apache-deployment-5b96bd6b6b-fv2jx | tail -n 5
Events:
Type Reason Age From Message
---- ------ ---- ---- -------
Normal Scheduled 9m34s default-scheduler Successfully
assigned test/apache-deployment-5b96bd6b6b-fv2jx to r2.lan.buetow.org
Warning FailedMount 80s (x12 over 9m34s) kubelet MountVolume.SetUp
failed for volume "example-apache-pv" : hostPath type check failed:
/data/nfs/k3svolumes/example-apache is not a directory
[root@r0 ~]# mkdir /data/nfs/k3svolumes/example-apache-volume-claim/ [root@r0 ~]# cat <<END > /data/nfs/k3svolumes/example-apache-volume-claim/index.html <!DOCTYPE html> <html> <head> <title>Hello, it works</title> </head> <body> <h1>Hello, it works!</h1> <p>This site is served via a PVC!</p> </body> </html> END
> ~ kubectl delete pod apache-deployment-5b96bd6b6b-fv2jx > ~ curl -H "Host: www.f3s.foo.zone" http://r0.lan.buetow.org:80 <!DOCTYPE html> <html> <head> <title>Hello, it works</title> </head> <body> <h1>Hello, it works!</h1> <p>This site is served via a PVC!</p> </body> </html>
> ~ kubectl -n kube-system scale deployment traefik --replicas=2
> ~ kubectl -n kube-system get pods -l app.kubernetes.io/name=traefik kube-system traefik-c98fdf6fb-97kqk 1/1 Running 19 (53d ago) 64d kube-system traefik-c98fdf6fb-9npg2 1/1 Running 11 (53d ago) 61d
> ~ curl https://f3s.foo.zone <html><body><h1>It works!</h1></body></html> > ~ curl https://www.f3s.foo.zone <html><body><h1>It works!</h1></body></html> > ~ curl https://standby.f3s.foo.zone <html><body><h1>It works!</h1></body></html>
table <f3s> {
192.168.2.120
192.168.2.121
192.168.2.122
}
http protocol "https" {
# TLS certificates for all f3s services
tls keypair f3s.foo.zone
tls keypair www.f3s.foo.zone
tls keypair standby.f3s.foo.zone
tls keypair anki.f3s.foo.zone
tls keypair www.anki.f3s.foo.zone
tls keypair standby.anki.f3s.foo.zone
tls keypair bag.f3s.foo.zone
tls keypair www.bag.f3s.foo.zone
tls keypair standby.bag.f3s.foo.zone
tls keypair flux.f3s.foo.zone
tls keypair www.flux.f3s.foo.zone
tls keypair standby.flux.f3s.foo.zone
tls keypair audiobookshelf.f3s.foo.zone
tls keypair www.audiobookshelf.f3s.foo.zone
tls keypair standby.audiobookshelf.f3s.foo.zone
tls keypair gpodder.f3s.foo.zone
tls keypair www.gpodder.f3s.foo.zone
tls keypair standby.gpodder.f3s.foo.zone
tls keypair radicale.f3s.foo.zone
tls keypair www.radicale.f3s.foo.zone
tls keypair standby.radicale.f3s.foo.zone
tls keypair vault.f3s.foo.zone
tls keypair www.vault.f3s.foo.zone
tls keypair standby.vault.f3s.foo.zone
tls keypair syncthing.f3s.foo.zone
tls keypair www.syncthing.f3s.foo.zone
tls keypair standby.syncthing.f3s.foo.zone
tls keypair uprecords.f3s.foo.zone
tls keypair www.uprecords.f3s.foo.zone
tls keypair standby.uprecords.f3s.foo.zone
# Explicitly route non-f3s hosts to localhost
match request header "Host" value "foo.zone" forward to <localhost>
match request header "Host" value "www.foo.zone" forward to <localhost>
match request header "Host" value "dtail.dev" forward to <localhost>
# ... other non-f3s hosts ...
# NOTE: f3s hosts have NO match rules here!
# They use relay-level failover (f3s -> localhost backup)
# See the relay configuration below for automatic failover details
}
relay "https4" {
listen on 46.23.94.99 port 443 tls
protocol "https"
# Primary: f3s cluster (with health checks) - Falls back to localhost when all hosts down
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
relay "https6" {
listen on 2a03:6000:6f67:624::99 port 443 tls
protocol "https"
# Primary: f3s cluster (with health checks) - Falls back to localhost when all hosts down
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
# NEW configuration - supports automatic failover
http protocol "https" {
# Explicitly route non-f3s hosts to localhost
match request header "Host" value "foo.zone" forward to <localhost>
match request header "Host" value "dtail.dev" forward to <localhost>
# ... other non-f3s hosts ...
# f3s hosts have NO protocol rules - they use relay-level failover
# (no match rules for f3s.foo.zone, anki.f3s.foo.zone, etc.)
}
relay "https4" {
# f3s FIRST (with health checks), localhost as BACKUP
forward to <f3s> port 80 check tcp
forward to <localhost> port 8080
}
# OpenBSD httpd.conf
# Fallback for f3s hosts - serve fallback page for ALL paths
server "f3s.foo.zone" {
listen on * port 8080
log style forwarded
location * {
# Rewrite all requests to /index.html to show fallback page regardless of path
request rewrite "/index.html"
root "/htdocs/f3s_fallback"
}
}
server "anki.f3s.foo.zone" {
listen on * port 8080
log style forwarded
location * {
request rewrite "/index.html"
root "/htdocs/f3s_fallback"
}
}
# ... similar blocks for all f3s hostnames ...
<!DOCTYPE html>
<html>
<head>
<title>Server turned off</title>
<style>
body {
font-family: sans-serif;
text-align: center;
padding-top: 50px;
}
.container {
max-width: 600px;
margin: 0 auto;
}
</style>
</head>
<body>
<div class="container">
<h1>Server turned off</h1>
<p>The servers are all currently turned off.</p>
<p>Please try again later.</p>
<p>Or email <a href="mailto:paul@nospam.buetow.org">paul@nospam.buetow.org</a>
- so I can turn them back on for you!</p>
</div>
</body>
</html>
Internet → OpenBSD relayd (TLS termination, Let's Encrypt)
→ WireGuard tunnel
→ k3s Traefik :80 (HTTP)
→ Service
LAN → FreeBSD CARP VIP (192.168.1.138)
→ FreeBSD relayd (TCP forwarding)
→ k3s Traefik :443 (TLS termination, cert-manager)
→ Service
$ cd conf/f3s/cert-manager $ just install kubectl apply -f cert-manager.yaml # ... cert-manager CRDs and resources created ... kubectl apply -f self-signed-issuer.yaml clusterissuer.cert-manager.io/selfsigned-issuer created clusterissuer.cert-manager.io/selfsigned-ca-issuer created kubectl apply -f ca-certificate.yaml certificate.cert-manager.io/selfsigned-ca created kubectl apply -f wildcard-certificate.yaml certificate.cert-manager.io/f3s-lan-wildcard created
$ kubectl get certificate -n cert-manager NAME READY SECRET AGE f3s-lan-wildcard True f3s-lan-tls 5m selfsigned-ca True selfsigned-ca-secret 5m
$ kubectl get secret f3s-lan-tls -n cert-manager -o yaml | \
sed 's/namespace: cert-manager/namespace: services/' | \
kubectl apply -f -
paul@f0:~ % doas pkg install -y relayd
# k3s nodes backend table
table <k3s_nodes> { 192.168.1.120 192.168.1.121 192.168.1.122 }
# TCP forwarding to Traefik (no TLS termination)
relay "lan_http" {
listen on 192.168.1.138 port 80
forward to <k3s_nodes> port 80 check tcp
}
relay "lan_https" {
listen on 192.168.1.138 port 443
forward to <k3s_nodes> port 443 check tcp
}
# Basic PF rules for relayd set skip on lo0 pass in quick pass out quick
paul@f0:~ % doas sysrc pf_enable=YES pflog_enable=YES relayd_enable=YES paul@f0:~ % doas service pf start paul@f0:~ % doas service pflog start paul@f0:~ % doas service relayd start
paul@f0:~ % doas sockstat -4 -l | grep 192.168.1.138 _relayd relayd 2903 11 tcp4 192.168.1.138:80 *:* _relayd relayd 2903 12 tcp4 192.168.1.138:443 *:*
---
# LAN Ingress for f3s.lan.foo.zone
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
name: ingress-lan
namespace: services
annotations:
spec.ingressClassName: traefik
traefik.ingress.kubernetes.io/router.entrypoints: web,websecure
spec:
tls:
- hosts:
- f3s.lan.foo.zone
secretName: f3s-lan-tls
rules:
- host: f3s.lan.foo.zone
http:
paths:
- path: /
pathType: Prefix
backend:
service:
name: service
port:
number: 4533
$ kubectl apply -f ingress-lan.yaml ingress.networking.k8s.io/ingress-lan created $ curl -k https://f3s.lan.foo.zone HTTP/2 302 location: /app/
$ sudo tee -a /etc/hosts << 'EOF' # f3s LAN services 192.168.1.138 f3s.lan.foo.zone EOF
$ kubectl get secret selfsigned-ca-secret -n cert-manager -o jsonpath='{.data.ca\.crt}' | \
base64 -d > f3s-lan-ca.crt
$ sudo cp f3s-lan-ca.crt /etc/pki/ca-trust/source/anchors/ $ sudo update-ca-trust
[root@r0 ~]# mkdir -p /data/nfs/k3svolumes/registry
$ git clone https://codeberg.org/snonux/conf/f3s.git $ cd conf/f3s/examples/conf/f3s/registry $ helm upgrade --install registry ./helm-chart --namespace infra --create-namespace
$ kubectl get pods --namespace infra NAME READY STATUS RESTARTS AGE docker-registry-6bc9bb46bb-6grkr 1/1 Running 6 (53d ago) 54d $ kubectl get svc docker-registry-service -n infra NAME TYPE CLUSTER-IP EXTERNAL-IP PORT(S) AGE docker-registry-service NodePort 10.43.141.56 <none> 5000:30001/TCP 54d
$ cat <<"EOF" | sudo tee /etc/docker/daemon.json >/dev/null
{
"insecure-registries": [
"r0.lan.buetow.org:30001",
"r1.lan.buetow.org:30001",
"r2.lan.buetow.org:30001"
]
}
EOF
$ sudo systemctl restart docker
$ for node in r0 r1 r2; do
> ssh root@$node "echo '127.0.0.1 registry.lan.buetow.org' >> /etc/hosts"
> done
$ for node in r0 r1 r2; do
> ssh root@$node "cat <<'EOF' > /etc/rancher/k3s/registries.yaml
mirrors:
"registry.lan.buetow.org:30001":
endpoint:
- "http://localhost:30001"
EOF
systemctl restart k3s"
> done
$ docker tag my-app:latest r0.lan.buetow.org:30001/my-app:latest $ docker push r0.lan.buetow.org:30001/my-app:latest
image: docker-registry-service:5000/my-app:latest
$ kubectl run registry-test \ > --image=docker-registry-service:5000/my-app:latest \ > --restart=Never -n test --command -- sleep 300
$ cd conf/f3s/examples/conf/f3s/anki-sync-server/docker-image
$ docker build -t anki-sync-server:25.07.5b --build-arg ANKI_VERSION=25.07.5 .
$ docker tag anki-sync-server:25.07.5b \
r0.lan.buetow.org:30001/anki-sync-server:25.07.5b
$ docker push r0.lan.buetow.org:30001/anki-sync-server:25.07.5b
$ ssh root@r0 "mkdir -p /data/nfs/k3svolumes/anki-sync-server/anki_data"
$ kubectl create namespace services
$ kubectl create secret generic anki-sync-server-secret \
--from-literal=SYNC_USER1='paul:SECRETPASSWORD' \
-n services
$ cd ../helm-chart $ helm upgrade --install anki-sync-server . -n services
containers:
- name: anki-sync-server image: registry.lan.buetow.org:30001/anki-sync-server:25.07.5b
volumeMounts:
- name: anki-data
mountPath: /anki_data
$ kubectl get pods -n services $ kubectl get ingress anki-sync-server-ingress -n services $ curl https://anki.f3s.foo.zone/health
> ~ kubectl exec -n services deploy/miniflux-postgres -- id postgres uid=999(postgres) gid=999(postgres) groups=999(postgres) [root@r0 ~]# id postgres uid=999(postgres) gid=999(postgres) groups=999(postgres) paul@f0:~ % doas id postgres uid=999(postgres) gid=99(postgres) groups=999(postgres)
[root@r0 ~]# groupadd --gid 999 postgres
[root@r0 ~]# useradd --uid 999 --gid 999 \
--home-dir /var/lib/pgsql \
--shell /sbin/nologin postgres
paul@f0:~ % doas pw groupadd postgres -g 999
paul@f0:~ % doas pw useradd postgres -u 999 -g postgres \
-d /var/db/postgres -s /usr/sbin/nologin
# Persistent volume lives on the NFS export
hostPath:
path: /data/nfs/k3svolumes/miniflux/data
type: Directory
...
containers:
- name: miniflux-postgres
image: postgres:17
volumeMounts:
- name: miniflux-postgres-data
mountPath: /var/lib/postgresql/data
$ cd examples/conf/f3s/miniflux/helm-chart
$ mkdir -p /data/nfs/k3svolumes/miniflux/data
$ kubectl create secret generic miniflux-db-password \
--from-literal=fluxdb_password='YOUR_PASSWORD' -n services
$ kubectl create secret generic miniflux-admin-password \
--from-literal=admin_password='YOUR_ADMIN_PASSWORD' -n services
$ helm upgrade --install miniflux . -n services --create-namespace
$ kubectl get all --namespace=services | grep mini pod/miniflux-postgres-556444cb8d-xvv2p 1/1 Running 0 54d pod/miniflux-server-85d7c64664-stmt9 1/1 Running 0 54d service/miniflux ClusterIP 10.43.47.80 <none> 8080/TCP 54d service/miniflux-postgres ClusterIP 10.43.139.50 <none> 5432/TCP 54d deployment.apps/miniflux-postgres 1/1 1 1 54d deployment.apps/miniflux-server 1/1 1 1 54d replicaset.apps/miniflux-postgres-556444cb8d 1 1 1 54d replicaset.apps/miniflux-server-85d7c64664 1 1 1 54d
'\ '\ '\ '\ . . |>18>>
\ \ \ \ . ' . |
O>> O>> O>> O>> . 'o |
\ .\. .. .\. .. .\. .. . |
/\ . /\ . /\ . /\ . . |
/ / . / / .'. / / .'. / / .' . |
jgs^^^^^^^`^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
Art by Joan Stark, mod. by Paul Buetow
somecommand \
| tee >(command1) >(command2) \
| command3
printf 'a\nb\n' \
| tee >(sed 's/.*/X:&/; s/$/ :c1/') >(tr a-z A-Z | sed 's/$/ :c2/') \
| sed 's/$/ :c3/'
a :c3 b :c3 A :c2 :c3 B :c2 :c3 X:a :c1 :c3 X:b :c1 :c3
/bin/sh -c 'echo hi | tee >(cat)'
# /bin/sh: 1: Syntax error: "(" unexpected
set -o pipefail printf 'ok\n' | tee >(false) | cat >/dev/null echo $? # 1 because a side branch failed
ssh "$SSH_USER@$SSH_HOST" <<EOF
# Go to the work directory
cd "$WORK_DIR"
# Make a git pull
git pull
# Export environment variables required for the service to run
export AUTH_TOKEN="$APP_AUTH_TOKEN"
# Start the service
docker compose up -d --build
EOF
FOO=bar cat <<'EOF' $FOO is not expanded here EOF
WORK_DIR="/tmp/my work"
ssh host <<EOF
cd $WORK_DIR # may break if unquoted
cd "$WORK_DIR" # safe
EOF
ssh host <<'EOF'
set -euo pipefail
false # causes immediate failure
echo never
EOF
cat <<-EOF > script.sh #!/usr/bin/env bash echo "tab-indented content is dedented" EOF
#!/usr/bin/env bash
set -euo pipefail
super() {
local -r fn=${FUNCNAME[1]}
# Split name on :: and dispatch to base implementation
local -a parts=( ${fn//::/ } )
"${parts[0]}::base::${parts[2]}" "$@"
}
foo::base::greet() { echo "base: $@"; }
foo::german::greet() { super "Guten Tag, $@!"; }
foo::english::greet() { super "Good day, $@!"; }
for lang in german english; do
foo::$lang::greet Paul
done
base: Guten Tag, Paul! base: Good day, Paul!
user_name=paul declare -n ref=user_name echo "$ref" # paul ref=julia echo "$user_name" # julia
paul julia
make_var() {
local idx=$1; shift
local name="slot_$idx"
printf -v "$name" '%s' "$*" # create variable slot_$idx
}
get_var() {
local idx=$1
local -n ref="slot_$idx" # bind ref to slot_$idx
printf '%s\n' "$ref"
}
make_var 7 "seven"
get_var 7
seven
foo() { echo foo; }
function foo { echo foo; }
function foo() { echo foo; }
deploy_check() { test -f deploy.yaml; }
smoke_test() { curl -fsS http://localhost/healthz >/dev/null; }
if deploy_check || smoke_test; then
echo "All good."
else
echo "Something failed." >&2
fi
deploy_check || smoke_test && echo ok || echo fail >&2
cat > /tmp/ctx.txt <<EOF one foo two three bar EOF grep -C1 foo /tmp/ctx.txt
one foo two
mkdir -p /tmp/golf/foo /tmp/golf/src printf 'bar\n' > /tmp/golf/src/a.txt printf 'bar\n' > /tmp/golf/foo/skip.txt grep -R --exclude-dir=foo 'bar' /tmp/golf
/tmp/golf/src/a.txt:bar
printf 'A\nB\nC\n' > /tmp/s.txt sed -e '1iHEAD' -e '3iMID' /tmp/s.txt
HEAD A B MID C
printf 'a b c\nx y z\n' > /tmp/t.txt
cat /tmp/t.txt
echo
awk 'NF{NF-=1};1' /tmp/t.txt
a b c x y z a b x y
find . -type f -name '*.log' -print0 | xargs -0 rm -f
printf 'a\0b c\0' | xargs -0 -I{} printf '<%s>\n' {}
<a> <b c>
cfg=$(<config.ini)
mapfile -t lines < <(grep -v '^#' config.ini)
printf '%s\n' "${lines[@]}"
printf -v msg 'Hello %s, id=%04d' "$USER" 42 echo "$msg"
Hello paul, id=0042
mapfile -d '' -t files < <(find . -type f -print0)
printf '%s\n' "${files[@]}"
LC_ALL=C tr -dc 'A-Za-z0-9_' </dev/urandom | head -c 16; echo
openssl rand -base64 16 | tr -d '\n' | cut -c1-22
yes | rm -r large_directory # auto-confirm yes n | dangerous-command # auto-decline yes anything | head -n1 # prints one line: anything
true() { return 1; }
false() { return 0; }
true || echo 'true failed'
false && echo 'false succeeded'
# Bypass function with builtin/command
builtin true # returns 0
command true # returns 0
rbash -c 'cd /' # cd: restricted rbash -c 'PATH=/tmp' # PATH: restricted rbash -c 'echo hi > out' # redirection: restricted rbash -c '/bin/echo hi' # commands with /: restricted rbash -c 'exec ls' # exec: restricted
# Prefer grep -i foo file <file grep -i foo # or feed via redirection # Over cat file | grep -i foo
cat file1 file2 | grep -i foo
lockdir=/tmp/myjob.lock
if mkdir "$lockdir" 2>/dev/null; then
trap 'rmdir "$lockdir"' EXIT INT TERM
# critical section
do_work
else
echo "Another instance is running" >&2
exit 1
fi
find . -name '*.log' -exec gzip -9 {} +
shopt -s extglob ls -d -- !(.git|node_modules) 2>/dev/null
/\_/\ /\_/\ /\_/\ ( o.o ) WHOA!( o.o ) WHOA!( o.o ) > ^ < > ^ < > ^ < / \ MEOW! / \ MOEEW!/ \ /_____\ /_____\ /_____\
[::]
_| |_
/ o o \ |
| ∆ | <-- Ollama / \
| \___/ | / \
\_______/ LLM --> / 30B \
| | / Qwen3 \
/| |\ / Coder \
/_| |_\_________________/ quantised \
brew install ollama rehash ollama serve

ollama pull qwen2.5-coder:14b-instruct
time echo "Write a function in golang to print out the Nth fibonacci number, \
only the function without the boilerplate" | ollama run qwen2.5-coder:14b-instruct
Output:
func fibonacci(n int) int {
if n <= 1 {
return n
}
a, b := 0, 1
for i := 2; i <= n; i++ {
a, b = b, a+b
}
return b
}
Execution Metrics:
Executed in 4.90 secs fish external
usr time 15.54 millis 0.31 millis 15.24 millis
sys time 19.68 millis 1.02 millis 18.66 millis
Executed in 1.83 secs fish external usr time 17.82 millis 4.40 millis 13.42 millis sys time 17.07 millis 1.57 millis 15.50 millis
brew install aider mkdir -p ~/git/aitest && cd ~/git/aitest && git init aider --model ollama_chat/qwen2.5-coder:14b-instruct
Create a Go project with these files: * `cmd/aitest/main.go`: CLI entry point * `internal/version.go`: Version information (0.0.0), should be printed when the program was started with `-version` flag * `internal/count.go`: File counting functionality, the program should print out the number of files in a given subdirectory (the directory is provided as a command line flag with `-dir`), if none flag is given, no counting should be done * `README.md`: Installation and usage instructions
* Update import paths to match module name, github.com/yourname/aitest should be aitest in main.go * The package names of internal/count.go and internal/version.go should be internal, and not count and version.

go build cmd/aitest/main.go ./main -v 0.0.0 ./main -dir . Number of files in directory .: 4
package main
import (
"flag"
"fmt"
"os"
"aitest/internal"
)
func main() {
var versionFlag bool
flag.BoolVar(&versionFlag, "v", false, "print version")
dir := flag.String("dir", "", "directory to count files in")
flag.Parse()
if versionFlag {
fmt.Println(internal.GetVersion())
return
}
if *dir != "" {
fileCount, err := internal.CountFiles(*dir)
if err != nil {
fmt.Fprintf(os.Stderr, "Error counting files: %v\n", err)
os.Exit(1)
}
fmt.Printf("Number of files in directory %s: %d\n", *dir, fileCount)
} else {
fmt.Println("No directory specified. No count given.")
}
}
package internal
var Version = "0.0.0"
func GetVersion() string {
return Version
}
package internal
import (
"os"
)
func CountFiles(dir string) (int, error) {
files, err := os.ReadDir(dir)
if err != nil {
return 0, err
}
count := 0
for _, file := range files {
if !file.IsDir() {
count++
}
}
return count, nil
}
cargo install lsp-ai
[[language]]
name = "go"
auto-format= true
diagnostic-severity = "hint"
formatter = { command = "goimports" }
language-servers = [ "gopls", "golangci-lint-lsp", "lsp-ai", "gpt" ]
[language-server.lsp-ai]
command = "lsp-ai"
[language-server.lsp-ai.config.memory]
file_store = { }
[language-server.lsp-ai.config.models.model1]
type = "ollama"
model = "qwen2.5-coder"
[language-server.lsp-ai.config.models.model2]
type = "ollama"
model = "mistral-nemo:latest"
[language-server.lsp-ai.config.models.model3]
type = "ollama"
model = "deepseek-r1:14b"
[language-server.lsp-ai.config.completion]
model = "model1"
[language-server.lsp-ai.config.completion.parameters]
max_tokens = 64
max_context = 8096
## Configure the messages per your needs
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "system"
content = "Instructions:\n- You are an AI programming assistant.\n- Given a
piece of code with the cursor location marked by \"<CURSOR>\", replace
\"<CURSOR>\" with the correct code or comment.\n- First, think step-by-step.\n
- Describe your plan for what to build in pseudocode, written out in great
detail.\n- Then output the code replacing the \"<CURSOR>\"\n- Ensure that your
completion fits within the language context of the provided code snippet (e.g.,
Go, Ruby, Bash, Java, Puppet DSL).\n\nRules:\n- Only respond with code or
comments.\n- Only replace \"<CURSOR>\"; do not include any previously written
code.\n- Never include \"<CURSOR>\" in your response\n- If the cursor is within
a comment, complete the comment meaningfully.\n- Handle ambiguous cases by
providing the most contextually appropriate completion.\n- Be consistent with
your responses."
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "func greet(name) {\n print(f\"Hello, {<CURSOR>}\")\n}"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "assistant"
content = "name"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "func sum(a, b) {\n return a + <CURSOR>\n}"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "assistant"
content = "b"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "func multiply(a, b int ) int {\n a * <CURSOR>\n}"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "assistant"
content = "b"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "// <CURSOR>\nfunc add(a, b) {\n return a + b\n}"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "assistant"
content = "Adds two numbers"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "// This function checks if a number is even\n<CURSOR>"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "assistant"
content = "func is_even(n) {\n return n % 2 == 0\n}"
[[language-server.lsp-ai.config.completion.parameters.messages]]
role = "user"
content = "{CODE}"



paul@f0:~ % doas zpool create -m /data zdata /dev/ada1 paul@f0:~ % zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT zdata 928G 12.1M 928G - - 0% 0% 1.00x ONLINE - zroot 472G 29.0G 443G - - 0% 6% 1.00x ONLINE - paul@f0:/ % doas camcontrol devlist <512GB SSD D910R170> at scbus0 target 0 lun 0 (pass0,ada0) <Samsung SSD 870 EVO 1TB SVT03B6Q> at scbus1 target 0 lun 0 (pass1,ada1) paul@f0:/ %
paul@f1:/ % doas camcontrol devlist <512GB SSD D910R170> at scbus0 target 0 lun 0 (pass0,ada0) <CT1000BX500SSD1 M6CR072> at scbus1 target 0 lun 0 (pass1,ada1)

paul@f0:/ % doas camcontrol devlist <512GB SSD D910R170> at scbus0 target 0 lun 0 (pass0,ada0) <Samsung SSD 870 EVO 1TB SVT03B6Q> at scbus1 target 0 lun 0 (pass1,ada1) <Generic Flash Disk 8.07> at scbus2 target 0 lun 0 (da0,pass2) paul@f0:/ %
paul@f0:/ % doas newfs /dev/da0
/dev/da0: 15000.0MB (30720000 sectors) block size 32768, fragment size 4096
using 24 cylinder groups of 625.22MB, 20007 blks, 80128 inodes.
with soft updates
super-block backups (for fsck_ffs -b #) at:
192, 1280640, 2561088, 3841536, 5121984, 6402432, 7682880, 8963328, 10243776,
11524224, 12804672, 14085120, 15365568, 16646016, 17926464, 19206912,k 20487360,
...
paul@f0:/ % echo '/dev/da0 /keys ufs rw 0 2' | doas tee -a /etc/fstab
/dev/da0 /keys ufs rw 0 2
paul@f0:/ % doas mkdir /keys
paul@f0:/ % doas mount /keys
paul@f0:/ % df | grep keys
/dev/da0 14877596 8 13687384 0% /keys

paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:bhyve.key 32 paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:bhyve.key 32 paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:bhyve.key 32 paul@f0:/keys % doas openssl rand -out /keys/f0.lan.buetow.org:zdata.key 32 paul@f0:/keys % doas openssl rand -out /keys/f1.lan.buetow.org:zdata.key 32 paul@f0:/keys % doas openssl rand -out /keys/f2.lan.buetow.org:zdata.key 32 paul@f0:/keys % doas chown root * paul@f0:/keys % doas chmod 400 * paul@f0:/keys % ls -l total 20 *r-------- 1 root wheel 32 May 25 13:07 f0.lan.buetow.org:bhyve.key *r-------- 1 root wheel 32 May 25 13:07 f1.lan.buetow.org:bhyve.key *r-------- 1 root wheel 32 May 25 13:07 f2.lan.buetow.org:bhyve.key *r-------- 1 root wheel 32 May 25 13:07 f0.lan.buetow.org:zdata.key *r-------- 1 root wheel 32 May 25 13:07 f1.lan.buetow.org:zdata.key *r-------- 1 root wheel 32 May 25 13:07 f2.lan.buetow.org:zdata.key
paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \ keylocation=file:///keys/`hostname`:zdata.key zdata/enc paul@f0:/ % zfs list | grep zdata zdata 836K 899G 96K /data zdata/enc 200K 899G 200K /data/enc paul@f0:/keys % zfs get all zdata/enc | grep -E -i '(encryption|key)' zdata/enc encryption aes-256-gcm - zdata/enc keylocation file:///keys/f0.lan.buetow.org:zdata.key local zdata/enc keyformat raw - zdata/enc encryptionroot zdata/enc - zdata/enc keystatus available -
paul@f0:/keys % doas vm stop rocky Sending ACPI shutdown to rocky paul@f0:/keys % doas vm list NAME DATASTORE LOADER CPU MEMORY VNC AUTO STATE rocky default uefi 4 14G - Yes [1] Stopped
paul@f0:/keys % doas zfs rename zroot/bhyve zroot/bhyve_old paul@f0:/keys % doas zfs set mountpoint=/mnt zroot/bhyve_old paul@f0:/keys % doas zfs snapshot zroot/bhyve_old/rocky@hamburger paul@f0:/keys % doas zfs create -o encryption=on -o keyformat=raw -o \ keylocation=file:///keys/`hostname`:bhyve.key zroot/bhyve paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve zroot/bhyve paul@f0:/keys % doas zfs set mountpoint=/zroot/bhyve/rocky zroot/bhyve/rocky
paul@f0:/keys % doas zfs send zroot/bhyve_old/rocky@hamburger | \ doas zfs recv zroot/bhyve/rocky paul@f0:/keys % doas cp -Rp /mnt/.config /zroot/bhyve/ paul@f0:/keys % doas cp -Rp /mnt/.img /zroot/bhyve/ paul@f0:/keys % doas cp -Rp /mnt/.templates /zroot/bhyve/ paul@f0:/keys % doas cp -Rp /mnt/.iso /zroot/bhyve/
paul@f0:/keys % doas sysrc zfskeys_enable=YES zfskeys_enable: -> YES paul@f0:/keys % doas vm init paul@f0:/keys % doas reboot . . . paul@f0:~ % doas vm list paul@f0:~ % doas vm list NAME DATASTORE LOADER CPU MEMORY VNC AUTO STATE rocky default uefi 4 14G 0.0.0.0:5900 Yes [1] Running (2265)
paul@f0:~ % doas zfs destroy -R zroot/bhyve_old
paul@f0:~ % zfs get all zroot/bhyve | grep -E '(encryption|key)' zroot/bhyve encryption aes-256-gcm - zroot/bhyve keylocation file:///keys/f0.lan.buetow.org:bhyve.key local zroot/bhyve keyformat raw - zroot/bhyve encryptionroot zroot/bhyve - zroot/bhyve keystatus available - paul@f0:~ % zfs get all zroot/bhyve/rocky | grep -E '(encryption|key)' zroot/bhyve/rocky encryption aes-256-gcm - zroot/bhyve/rocky keylocation none default zroot/bhyve/rocky keyformat raw - zroot/bhyve/rocky encryptionroot zroot/bhyve - zroot/bhyve/rocky keystatus available -
paul@f0:~ % doas pkg install -y zrepl
# On f0 paul@f0:~ % doas zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT zdata 928G 1.03M 928G - - 0% 0% 1.00x ONLINE - zroot 472G 26.7G 445G - - 0% 5% 1.00x ONLINE - paul@f0:~ % doas zfs list -r zdata/enc NAME USED AVAIL REFER MOUNTPOINT zdata/enc 200K 899G 200K /data/enc # On f1 paul@f1:~ % doas zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT zdata 928G 956K 928G - - 0% 0% 1.00x ONLINE - zroot 472G 11.7G 460G - - 0% 2% 1.00x ONLINE - paul@f1:~ % doas zfs list -r zdata/enc NAME USED AVAIL REFER MOUNTPOINT zdata/enc 200K 899G 200K /data/enc
# Check WireGuard interface IPs paul@f0:~ % ifconfig wg0 | grep inet inet 192.168.2.130 netmask 0xffffff00 paul@f1:~ % ifconfig wg0 | grep inet inet 192.168.2.131 netmask 0xffffff00
# Create the nfsdata dataset that will hold all data exposed via NFS paul@f0:~ % doas zfs create zdata/enc/nfsdata
paul@f0:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
global:
logging:
- type: stdout
level: info
format: human
jobs:
- name: f0_to_f1_nfsdata
type: push
connect:
type: tcp
address: "192.168.2.131:8888"
filesystems:
"zdata/enc/nfsdata": true
send:
encrypted: true
snapshotting:
type: periodic
prefix: zrepl_
interval: 1m
pruning:
keep_sender:
- type: last_n
count: 10
- type: grid
grid: 4x7d | 6x30d
regex: "^zrepl_.*"
keep_receiver:
- type: last_n
count: 10
- type: grid
grid: 4x7d | 6x30d
regex: "^zrepl_.*"
- name: f0_to_f1_freebsd
type: push
connect:
type: tcp
address: "192.168.2.131:8888"
filesystems:
"zroot/bhyve/freebsd": true
send:
encrypted: true
snapshotting:
type: periodic
prefix: zrepl_
interval: 10m
pruning:
keep_sender:
- type: last_n
count: 10
- type: grid
grid: 4x7d
regex: "^zrepl_.*"
keep_receiver:
- type: last_n
count: 10
- type: grid
grid: 4x7d
regex: "^zrepl_.*"
EOF
# First, create a dedicated sink dataset
paul@f1:~ % doas zfs create zdata/sink
paul@f1:~ % doas tee /usr/local/etc/zrepl/zrepl.yml <<'EOF'
global:
logging:
- type: stdout
level: info
format: human
jobs:
- name: sink
type: sink
serve:
type: tcp
listen: "192.168.2.131:8888"
clients:
"192.168.2.130": "f0"
recv:
placeholder:
encryption: inherit
root_fs: "zdata/sink"
EOF
# On f0 paul@f0:~ % doas sysrc zrepl_enable=YES zrepl_enable: -> YES paul@f0:~ % doas service `zrepl` start Starting zrepl. # On f1 paul@f1:~ % doas sysrc zrepl_enable=YES zrepl_enable: -> YES paul@f1:~ % doas service `zrepl` start Starting zrepl.
# On f0, check `zrepl` status (use raw mode for non-tty)
paul@f0:~ % doas pkg install jq
paul@f0:~ % doas zrepl status --mode raw | grep -A2 "Replication" | jq .
"Replication":{"StartAt":"2025-07-01T22:31:48.712143123+03:00"...
# Check if services are running
paul@f0:~ % doas service zrepl status
zrepl is running as pid 2649.
paul@f1:~ % doas service zrepl status
zrepl is running as pid 2574.
# Check for `zrepl` snapshots on source
paul@f0:~ % doas zfs list -t snapshot -r zdata/enc | grep zrepl
zdata/enc@zrepl_20250701_193148_000 0B - 176K -
# On f1, verify the replicated datasets
paul@f1:~ % doas zfs list -r zdata | grep f0
zdata/f0 576K 899G 200K none
zdata/f0/zdata 376K 899G 200K none
zdata/f0/zdata/enc 176K 899G 176K none
# Check replicated snapshots on f1
paul@f1:~ % doas zfs list -t snapshot -r zdata | grep zrepl
zdata/f0/zdata/enc@zrepl_20250701_193148_000 0B - 176K -
zdata/f0/zdata/enc@zrepl_20250701_194148_000 0B - 176K -
.
.
.
paul@f0:~ % doas zrepl status

paul@f0:~ % uptime 11:17PM up 1 min, 0 users, load averages: 0.16, 0.06, 0.02 paul@f0:~ % doas service `zrepl` status zrepl is running as pid 2366. paul@f1:~ % doas service `zrepl` status zrepl is running as pid 2309. # Check that new snapshots are being created and replicated paul@f0:~ % doas zfs list -t snapshot | grep `zrepl` | tail -2 zdata/enc/nfsdata@zrepl_20250701_202530_000 0B - 200K - zroot/bhyve/freebsd@zrepl_20250701_202530_000 0B - 2.97G - . . . paul@f1:~ % doas zfs list -t snapshot -r zdata/sink | grep 202530 zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_202530_000 0B - 176K - zdata/sink/f0/zroot/bhyve/freebsd@zrepl_20250701_202530_000 0B - 2.97G - . . .
# On f0 - set mountpoint for the primary nfsdata paul@f0:~ % doas zfs set mountpoint=/data/nfs zdata/enc/nfsdata paul@f0:~ % doas mkdir -p /data/nfs # Verify it's mounted paul@f0:~ % df -h /data/nfs Filesystem Size Used Avail Capacity Mounted on zdata/enc/nfsdata 899G 204K 899G 0% /data/nfs
# On f1 - first check encryption status
paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
NAME PROPERTY VALUE SOURCE
zdata/sink/f0/zdata/enc/nfsdata keystatus unavailable -
# Load the encryption key (using f0's key stored on the USB)
paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
zdata/sink/f0/zdata/enc/nfsdata
# Set mountpoint and mount (same path as f0 for easier failover)
paul@f1:~ % doas mkdir -p /data/nfs
paul@f1:~ % doas zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
paul@f1:~ % doas zfs mount zdata/sink/f0/zdata/enc/nfsdata
# Make it read-only to prevent accidental writes that would break replication
paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
# Verify
paul@f1:~ % df -h /data/nfs
Filesystem Size Used Avail Capacity Mounted on
zdata/sink/f0/zdata/enc/nfsdata 896G 204K 896G 0% /data/nfs
# Option 1: Rollback to the last common snapshot (loses local changes) paul@f1:~ % doas zfs rollback zdata/sink/f0/zdata/enc/nfsdata@zrepl_20250701_204054_000 # Option 2: Make it read-only to prevent accidents again paul@f1:~ % doas zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
paul@f0:~ % doas zfs list -o name,mountpoint,mounted | grep nfsdata zdata/enc/nfsdata /data/nfs yes
paul@f0:~ % doas zfs get keystatus zdata/enc/nfsdata NAME PROPERTY VALUE SOURCE zdata/enc/nfsdata keystatus available - # If "unavailable", load the key: paul@f0:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata paul@f0:~ % doas zfs mount zdata/enc/nfsdata
paul@f0:~ % ls -la /data/nfs/.zfs/snapshot/zrepl_*/
# On f0 - configure all encrypted datasets paul@f0:~ % doas sysrc zfskeys_enable=YES zfskeys_enable: YES -> YES paul@f0:~ % doas sysrc zfskeys_datasets="zdata/enc zdata/enc/nfsdata zroot/bhyve" zfskeys_datasets: -> zdata/enc zdata/enc/nfsdata zroot/bhyve # Set correct key locations for all datasets paul@f0:~ % doas zfs set \ keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata # On f1 - include the replicated dataset paul@f1:~ % doas sysrc zfskeys_enable=YES zfskeys_enable: YES -> YES paul@f1:~ % doas sysrc \ zfskeys_datasets="zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata" zfskeys_datasets: -> zdata/enc zroot/bhyve zdata/sink/f0/zdata/enc/nfsdata # Set key location for replicated dataset paul@f1:~ % doas zfs set \ keylocation=file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
# Check service status on both f0 and f1 paul@f0:~ % doas service zrepl status paul@f1:~ % doas service zrepl status # If not running, start the service paul@f0:~ % doas service zrepl start paul@f1:~ % doas service zrepl start
# Check detailed status (use --mode raw for non-tty environments) paul@f0:~ % doas zrepl status --mode raw # Look for error messages in the replication section # Common errors include "no common snapshot" or connection failures
no common snapshot or suitable bookmark between sender and receiver
# First, identify the destination dataset on f1 paul@f1:~ % doas zfs list | grep sink # Check existing snapshots on the problematic dataset paul@f1:~ % doas zfs list -t snapshot | grep nfsdata # If you see snapshots with different naming (e.g., @daily-*, @weekly-*) # these conflict with zrepl's @zrepl_* snapshots # Destroy the entire destination dataset to allow clean replication paul@f1:~ % doas zfs destroy -r zdata/sink/f0/zdata/enc/nfsdata # For VM replication, do the same for the freebsd dataset paul@f1:~ % doas zfs destroy -r zdata/sink/f0/zroot/bhyve/freebsd # Wake up zrepl to start fresh replication paul@f0:~ % doas zrepl signal wakeup f0_to_f1_nfsdata paul@f0:~ % doas zrepl signal wakeup f0_to_f1_freebsd # Check replication status paul@f0:~ % doas zrepl status --mode raw
# Look for "stepping" state and active zfs send processes paul@f0:~ % doas zrepl status --mode raw | grep -A5 "State.*stepping" # Check for active ZFS commands paul@f0:~ % doas zrepl status --mode raw | grep -A10 "ZFSCmds.*Active" # Monitor progress - bytes replicated should be increasing paul@f0:~ % doas zrepl status --mode raw | grep BytesReplicated
# Test connectivity between nodes paul@f0:~ % nc -zv 192.168.2.131 8888 # Check if zrepl is listening on f1 paul@f1:~ % doas netstat -an | grep 8888 # Verify WireGuard tunnel is working paul@f0:~ % ping 192.168.2.131
# Verify encryption keys are available on both nodes
paul@f0:~ % doas zfs get keystatus zdata/enc/nfsdata
paul@f1:~ % doas zfs get keystatus zdata/sink/f0/zdata/enc/nfsdata
# Load keys if unavailable
paul@f1:~ % doas zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key \
zdata/sink/f0/zdata/enc/nfsdata
# Monitor replication progress (run repeatedly to check status) paul@f0:~ % doas zrepl status --mode raw | grep -A10 BytesReplicated # Or install watch from ports and use it paul@f0:~ % doas pkg install watch paul@f0:~ % watch -n 5 'doas zrepl status --mode raw | grep -A10 BytesReplicated' # Check for new snapshots being created paul@f0:~ % doas zfs list -t snapshot | grep zrepl | tail -5 # Verify snapshots appear on receiver paul@f1:~ % doas zfs list -t snapshot -r zdata/sink | grep zrepl | tail -5
# On f0 - The virtual IP 192.168.1.138 will float between f0 and f1 ifconfig_re0_alias0="inet vhid 1 pass testpass alias 192.168.1.138/32" # On f1 - Higher advskew means lower priority, so f0 wins elections ifconfig_re0_alias0="inet vhid 1 advskew 100 pass testpass alias 192.168.1.138/32"
192.168.2.138 f3s-storage-ha f3s-storage-ha.wg0 f3s-storage-ha.wg0.wan.buetow.org fd42:beef:cafe:2::138 f3s-storage-ha f3s-storage-ha.wg0 f3s-storage-ha.wg0.wan.buetow.org
paul@f0:~ % cat <<END | doas tee -a /etc/devd.conf
notify 0 {
match "system" "CARP";
match "subsystem" "[0-9]+@[0-9a-z.]+";
match "type" "(MASTER|BACKUP)";
action "/usr/local/bin/carpcontrol.sh $subsystem $type";
};
END
paul@f0:~ % doas service devd restart
paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
#!/bin/sh
# CARP state change control script
case "$2" in
MASTER)
logger "CARP state changed to MASTER, starting services"
;;
BACKUP)
logger "CARP state changed to BACKUP, stopping services"
;;
*)
logger "CARP state changed to $2 (unhandled)"
;;
esac
EOF
paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
# Copy the same script to f1
paul@f0:~ % scp /usr/local/bin/carpcontrol.sh f1:/tmp/
paul@f1:~ % doas mv /tmp/carpcontrol.sh /usr/local/bin/
paul@f1:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
paul@f0:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf carp_load="YES" paul@f1:~ % echo 'carp_load="YES"' | doas tee -a /boot/loader.conf carp_load="YES"
paul@f0:~ % doas sysrc nfs_server_enable=YES nfs_server_enable: YES -> YES paul@f0:~ % doas sysrc nfsv4_server_enable=YES nfsv4_server_enable: YES -> YES paul@f0:~ % doas sysrc nfsuserd_enable=YES nfsuserd_enable: YES -> YES paul@f0:~ % doas sysrc nfsuserd_flags="-domain lan.buetow.org" nfsuserd_flags: "" -> "-domain lan.buetow.org" paul@f0:~ % doas sysrc mountd_enable=YES mountd_enable: NO -> YES paul@f0:~ % doas sysrc rpcbind_enable=YES rpcbind_enable: NO -> YES
# First, ensure the dataset is mounted paul@f0:~ % doas zfs get mounted zdata/enc/nfsdata NAME PROPERTY VALUE SOURCE zdata/enc/nfsdata mounted yes - # Create the k3svolumes directory paul@f0:~ % doas mkdir -p /data/nfs/k3svolumes paul@f0:~ % doas chmod 755 /data/nfs/k3svolumes
paul@f0:~ % doas tee /etc/exports <<'EOF' V4: /data/nfs -sec=sys /data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255 EOF
paul@f0:~ % doas service rpcbind start Starting rpcbind. paul@f0:~ % doas service mountd start Starting mountd. paul@f0:~ % doas service nfsd start Starting nfsd. paul@f0:~ % doas service nfsuserd start Starting nfsuserd.
CARP VIP (192.168.1.138)
|
f0 (MASTER) ←---------→|←---------→ f1 (BACKUP)
stunnel:2323 | stunnel:stopped
nfsd:2049 | nfsd:stopped
|
Clients connect here
# On f0 - Create CA
paul@f0:~ % doas mkdir -p /usr/local/etc/stunnel/ca
paul@f0:~ % cd /usr/local/etc/stunnel/ca
paul@f0:~ % doas openssl genrsa -out ca-key.pem 4096
paul@f0:~ % doas openssl req -new -x509 -days 3650 -key ca-key.pem -out ca-cert.pem \
-subj '/C=US/ST=State/L=City/O=F3S Storage/CN=F3S Stunnel CA'
# Create server certificate
paul@f0:~ % cd /usr/local/etc/stunnel
paul@f0:~ % doas openssl genrsa -out server-key.pem 4096
paul@f0:~ % doas openssl req -new -key server-key.pem -out server.csr \
-subj '/C=US/ST=State/L=City/O=F3S Storage/CN=f3s-storage-ha.lan'
paul@f0:~ % doas openssl x509 -req -days 3650 -in server.csr -CA ca/ca-cert.pem \
-CAkey ca/ca-key.pem -CAcreateserial -out server-cert.pem
# Create client certificates for authorised clients
paul@f0:~ % cd /usr/local/etc/stunnel/ca
paul@f0:~ % doas sh -c 'for client in r0 r1 r2 earth; do
openssl genrsa -out ${client}-key.pem 4096
openssl req -new -key ${client}-key.pem -out ${client}.csr \
-subj "/C=US/ST=State/L=City/O=F3S Storage/CN=${client}.lan.buetow.org"
openssl x509 -req -days 3650 -in ${client}.csr -CA ca-cert.pem \
-CAkey ca-key.pem -CAcreateserial -out ${client}-cert.pem
# Combine cert and key into a single file for stunnel client
cat ${client}-cert.pem ${client}-key.pem > ${client}-stunnel.pem
done'
# Install stunnel paul@f0:~ % doas pkg install -y stunnel # Configure stunnel server with client certificate authentication paul@f0:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF' cert = /usr/local/etc/stunnel/server-cert.pem key = /usr/local/etc/stunnel/server-key.pem setuid = stunnel setgid = stunnel [nfs-tls] accept = 192.168.1.138:2323 connect = 127.0.0.1:2049 CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem verify = 2 requireCert = yes EOF # Enable and start stunnel paul@f0:~ % doas sysrc stunnel_enable=YES stunnel_enable: -> YES paul@f0:~ % doas service stunnel start Starting stunnel. # Restart stunnel to apply the CARP VIP binding paul@f0:~ % doas service stunnel restart Stopping stunnel. Starting stunnel.
paul@f1:~ % doas sysrc nfs_server_enable=YES nfs_server_enable: NO -> YES paul@f1:~ % doas sysrc nfsv4_server_enable=YES nfsv4_server_enable: NO -> YES paul@f1:~ % doas sysrc nfsuserd_enable=YES nfsuserd_enable: NO -> YES paul@f1:~ % doas sysrc mountd_enable=YES mountd_enable: NO -> YES paul@f1:~ % doas sysrc rpcbind_enable=YES rpcbind_enable: NO -> YES paul@f1:~ % doas tee /etc/exports <<'EOF' V4: /data/nfs -sec=sys /data/nfs -alldirs -maproot=root -network 127.0.0.1 -mask 255.255.255.255 EOF paul@f1:~ % doas service rpcbind start Starting rpcbind. paul@f1:~ % doas service mountd start Starting mountd. paul@f1:~ % doas service nfsd start Starting nfsd. paul@f1:~ % doas service nfsuserd start Starting nfsuserd.
# Install stunnel paul@f1:~ % doas pkg install -y stunnel # Copy certificates from f0 paul@f0:~ % doas tar -cf /tmp/stunnel-certs.tar \ -C /usr/local/etc/stunnel server-cert.pem server-key.pem ca paul@f0:~ % scp /tmp/stunnel-certs.tar f1:/tmp/ paul@f1:~ % cd /usr/local/etc/stunnel && doas tar -xf /tmp/stunnel-certs.tar # Configure stunnel server on f1 with client certificate authentication paul@f1:~ % doas tee /usr/local/etc/stunnel/stunnel.conf <<'EOF' cert = /usr/local/etc/stunnel/server-cert.pem key = /usr/local/etc/stunnel/server-key.pem setuid = stunnel setgid = stunnel [nfs-tls] accept = 192.168.1.138:2323 connect = 127.0.0.1:2049 CAfile = /usr/local/etc/stunnel/ca/ca-cert.pem verify = 2 requireCert = yes EOF # Enable and start stunnel paul@f1:~ % doas sysrc stunnel_enable=YES stunnel_enable: -> YES paul@f1:~ % doas service stunnel start Starting stunnel. # Restart stunnel to apply the CARP VIP binding paul@f1:~ % doas service stunnel restart Stopping stunnel. Starting stunnel.
# Create CARP control script on both f0 and f1
paul@f0:~ % doas tee /usr/local/bin/carpcontrol.sh <<'EOF'
#!/bin/sh
# CARP state change control script
HOSTNAME=`hostname`
if [ ! -f /data/nfs/nfs.DO_NOT_REMOVE ]; then
logger '/data/nfs not mounted, mounting it now!'
if [ "$HOSTNAME" = 'f0.lan.buetow.org' ]; then
zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/enc/nfsdata
zfs set mountpoint=/data/nfs zdata/enc/nfsdata
else
zfs load-key -L file:///keys/f0.lan.buetow.org:zdata.key zdata/sink/f0/zdata/enc/nfsdata
zfs set mountpoint=/data/nfs zdata/sink/f0/zdata/enc/nfsdata
zfs mount zdata/sink/f0/zdata/enc/nfsdata
zfs set readonly=on zdata/sink/f0/zdata/enc/nfsdata
fi
service nfsd stop 2>&1
service mountd stop 2>&1
fi
case "$2" in
MASTER)
logger "CARP state changed to MASTER, starting services"
service rpcbind start >/dev/null 2>&1
service mountd start >/dev/null 2>&1
service nfsd start >/dev/null 2>&1
service nfsuserd start >/dev/null 2>&1
service stunnel restart >/dev/null 2>&1
logger "CARP MASTER: NFS and stunnel services started"
;;
BACKUP)
logger "CARP state changed to BACKUP, stopping services"
service stunnel stop >/dev/null 2>&1
service nfsd stop >/dev/null 2>&1
service mountd stop >/dev/null 2>&1
service nfsuserd stop >/dev/null 2>&1
logger "CARP BACKUP: NFS and stunnel services stopped"
;;
*)
logger "CARP state changed to $2 (unhandled)"
;;
esac
EOF
paul@f0:~ % doas chmod +x /usr/local/bin/carpcontrol.sh
# Create the CARP management script
paul@f0:~ % doas tee /usr/local/bin/carp <<'EOF'
#!/bin/sh
# CARP state management script
# Usage: carp [master|backup|auto-failback enable|auto-failback disable]
# Without arguments: shows current state
# Find the interface with CARP configured
CARP_IF=$(ifconfig -l | xargs -n1 | while read if; do
ifconfig "$if" 2>/dev/null | grep -q "carp:" && echo "$if" && break
done)
if [ -z "$CARP_IF" ]; then
echo "Error: No CARP interface found"
exit 1
fi
# Get CARP VHID
VHID=$(ifconfig "$CARP_IF" | grep "carp:" | sed -n 's/.*vhid \([0-9]*\).*/\1/p')
if [ -z "$VHID" ]; then
echo "Error: Could not determine CARP VHID"
exit 1
fi
# Function to get the current state
get_state() {
ifconfig "$CARP_IF" | grep "carp:" | awk '{print $2}'
}
# Check for auto-failback block file
BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
check_auto_failback() {
if [ -f "$BLOCK_FILE" ]; then
echo "WARNING: Auto-failback is DISABLED (file exists: $BLOCK_FILE)"
fi
}
# Main logic
case "$1" in
"")
# No argument - show current state
STATE=$(get_state)
echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
check_auto_failback
;;
master)
# Force to MASTER state
echo "Setting CARP to MASTER state..."
ifconfig "$CARP_IF" vhid "$VHID" state master
sleep 1
STATE=$(get_state)
echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
check_auto_failback
;;
backup)
# Force to BACKUP state
echo "Setting CARP to BACKUP state..."
ifconfig "$CARP_IF" vhid "$VHID" state backup
sleep 1
STATE=$(get_state)
echo "CARP state on $CARP_IF (vhid $VHID): $STATE"
check_auto_failback
;;
auto-failback)
case "$2" in
enable)
if [ -f "$BLOCK_FILE" ]; then
rm "$BLOCK_FILE"
echo "Auto-failback ENABLED (removed $BLOCK_FILE)"
else
echo "Auto-failback was already enabled"
fi
;;
disable)
if [ ! -f "$BLOCK_FILE" ]; then
touch "$BLOCK_FILE"
echo "Auto-failback DISABLED (created $BLOCK_FILE)"
else
echo "Auto-failback was already disabled"
fi
;;
*)
echo "Usage: $0 auto-failback [enable|disable]"
echo " enable: Remove block file to allow automatic failback"
echo " disable: Create block file to prevent automatic failback"
exit 1
;;
esac
;;
*)
echo "Usage: $0 [master|backup|auto-failback enable|auto-failback disable]"
echo " Without arguments: show current CARP state"
echo " master: force this node to become CARP MASTER"
echo " backup: force this node to become CARP BACKUP"
echo " auto-failback enable: allow automatic failback to f0"
echo " auto-failback disable: prevent automatic failback to f0"
exit 1
;;
esac
EOF
paul@f0:~ % doas chmod +x /usr/local/bin/carp
# Copy to f1 as well
paul@f0:~ % scp /usr/local/bin/carp f1:/tmp/
paul@f1:~ % doas cp /tmp/carp /usr/local/bin/carp && doas chmod +x /usr/local/bin/carp
# Check current CARP state paul@f0:~ % doas carp CARP state on re0 (vhid 1): MASTER # If auto-failback is disabled, you'll see a warning paul@f0:~ % doas carp CARP state on re0 (vhid 1): MASTER WARNING: Auto-failback is DISABLED (file exists: /data/nfs/nfs.NO_AUTO_FAILBACK) # Force f0 to become BACKUP (triggers failover to f1) paul@f0:~ % doas carp backup Setting CARP to BACKUP state... CARP state on re0 (vhid 1): BACKUP # Disable auto-failback (useful for maintenance) paul@f0:~ % doas carp auto-failback disable Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK) # Enable auto-failback paul@f0:~ % doas carp auto-failback enable Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
paul@f0:~ % doas tee /usr/local/bin/carp-auto-failback.sh <<'EOF'
#!/bin/sh
# CARP automatic failback script for f0
# Ensures f0 reclaims MASTER role after reboot when storage is ready
LOGFILE="/var/log/carp-auto-failback.log"
MARKER_FILE="/data/nfs/nfs.DO_NOT_REMOVE"
BLOCK_FILE="/data/nfs/nfs.NO_AUTO_FAILBACK"
log_message() {
echo "$(date '+%Y-%m-%d %H:%M:%S') - $1" >> "$LOGFILE"
}
# Check if we're already MASTER
CURRENT_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
if [ "$CURRENT_STATE" = "MASTER" ]; then
exit 0
fi
# Check if /data/nfs is mounted
if ! mount | grep -q "on /data/nfs "; then
log_message "SKIP: /data/nfs not mounted"
exit 0
fi
# Check if the marker file exists
# (identifies that the ZFS data set is properly mounted)
if [ ! -f "$MARKER_FILE" ]; then
log_message "SKIP: Marker file $MARKER_FILE not found"
exit 0
fi
# Check if failback is blocked (for maintenance)
if [ -f "$BLOCK_FILE" ]; then
log_message "SKIP: Failback blocked by $BLOCK_FILE"
exit 0
fi
# All conditions met - promote to MASTER
log_message "CONDITIONS MET: Promoting to MASTER (was $CURRENT_STATE)"
/usr/local/bin/carp master
# Log result
sleep 2
NEW_STATE=$(/usr/local/bin/carp | awk '{print $NF}')
log_message "Failback complete: State is now $NEW_STATE"
# If successful, log to the system log too
if [ "$NEW_STATE" = "MASTER" ]; then
logger "CARP: f0 automatically reclaimed MASTER role"
fi
EOF
paul@f0:~ % doas chmod +x /usr/local/bin/carp-auto-failback.sh
paul@f0:~ % doas touch /data/nfs/nfs.DO_NOT_REMOVE
paul@f0:~ % echo "* * * * * /usr/local/bin/carp-auto-failback.sh" | doas crontab -
paul@f0:~ % doas carp auto-failback disable Auto-failback DISABLED (created /data/nfs/nfs.NO_AUTO_FAILBACK)
paul@f0:~ % doas carp auto-failback enable Auto-failback ENABLED (removed /data/nfs/nfs.NO_AUTO_FAILBACK)
paul@f0:~ % doas carp CARP state on re0 (vhid 1): MASTER # If disabled, you'll see: WARNING: Auto-failback is DISABLED
# Install stunnel on client (example for `r0`) [root@r0 ~]# dnf install -y stunnel nfs-utils # Copy client certificate and CA certificate from f0 [root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/r0-stunnel.pem /etc/stunnel/ [root@r0 ~]# scp f0:/usr/local/etc/stunnel/ca/ca-cert.pem /etc/stunnel/ # Configure stunnel client with certificate authentication [root@r0 ~]# tee /etc/stunnel/stunnel.conf <<'EOF' cert = /etc/stunnel/r0-stunnel.pem CAfile = /etc/stunnel/ca-cert.pem client = yes verify = 2 [nfs-ha] accept = 127.0.0.1:2323 connect = 192.168.1.138:2323 EOF # Enable and start stunnel [root@r0 ~]# systemctl enable --now stunnel # Repeat for r1 and r2 with their respective certificates
[General] Domain = lan.buetow.org . . .
[root@r0 ~]# echo 'fs.inotify.max_user_instances = 512' > /etc/sysctl.d/99-inotify.conf [root@r0 ~]# sysctl -w fs.inotify.max_user_instances=512
[root@r0 ~]# systemctl start nfs-idmapd [root@r0 ~]# systemctl enable --now nfs-client.target
# Create a mount point [root@r0 ~]# mkdir -p /data/nfs/k3svolumes # Mount through stunnel (using localhost and NFSv4) [root@r0 ~]# mount -t nfs4 -o port=2323 127.0.0.1:/k3svolumes /data/nfs/k3svolumes # Verify mount [root@r0 ~]# mount | grep k3svolumes 127.0.0.1:/k3svolumes on /data/nfs/k3svolumes type nfs4 (rw,relatime,vers=4.2,rsize=131072,wsize=131072, namlen=255,hard,proto=tcp,port=2323,timeo=600,retrans=2,sec=sys, clientaddr=127.0.0.1,local_lock=none,addr=127.0.0.1) # For persistent mount, add to /etc/fstab: 127.0.0.1:/k3svolumes /data/nfs/k3svolumes nfs4 port=2323,_netdev,soft,timeo=10,retrans=2,intr 0 0
# On f0 (current MASTER) - trigger failover
paul@f0:~ % doas ifconfig re0 vhid 1 state backup
# On f1 - verify it becomes MASTER
paul@f1:~ % ifconfig re0 | grep carp
inet 192.168.1.138 netmask 0xffffffff broadcast 192.168.1.138 vhid 1
# Check stunnel is now listening on f1
paul@f1:~ % doas sockstat -l | grep 2323
stunnel stunnel 4567 3 tcp4 192.168.1.138:2323 *:*
# On client - verify NFS mount still works
[root@r0 ~]# ls /data/nfs/k3svolumes/
[root@r0 ~]# echo "Test after failover" > /data/nfs/k3svolumes/failover-test.txt
# Force unmount and remount [root@r0 ~]# umount -f /data/nfs/k3svolumes [root@r0 ~]# mount /data/nfs/k3svolumes
[root@r0 ~]# cat > /usr/local/bin/check-nfs-mount.sh << 'EOF'
#!/bin/bash
# Fast NFS mount health monitor - runs every 10 seconds via systemd timer
MOUNT_POINT="/data/nfs/k3svolumes"
LOCK_FILE="/var/run/nfs-mount-check.lock"
# Use a lock file to prevent concurrent runs
if [ -f "$LOCK_FILE" ]; then
exit 0
fi
touch "$LOCK_FILE"
trap "rm -f $LOCK_FILE" EXIT
fix_mount () {
echo "Attempting to remount NFS mount $MOUNT_POINT"
if mount -o remount -f "$MOUNT_POINT" 2>/dev/null; then
echo "Remount command issued for $MOUNT_POINT"
else
echo "Failed to remount NFS mount $MOUNT_POINT"
fi
echo "Checking if $MOUNT_POINT is a mountpoint"
if mountpoint "$MOUNT_POINT" >/dev/null 2>&1; then
echo "$MOUNT_POINT is a valid mountpoint"
else
echo "$MOUNT_POINT is not a valid mountpoint, attempting mount"
if mount "$MOUNT_POINT"; then
echo "Successfully mounted $MOUNT_POINT"
return
else
echo "Failed to mount $MOUNT_POINT"
fi
fi
echo "Attempting to unmount $MOUNT_POINT"
if umount -f "$MOUNT_POINT" 2>/dev/null; then
echo "Successfully unmounted $MOUNT_POINT"
else
echo "Failed to unmount $MOUNT_POINT (it might not be mounted)"
fi
echo "Attempting to mount $MOUNT_POINT"
if mount "$MOUNT_POINT"; then
echo "NFS mount $MOUNT_POINT mounted successfully"
return
else
echo "Failed to mount NFS mount $MOUNT_POINT"
fi
echo "Failed to fix NFS mount $MOUNT_POINT"
exit 1
}
if ! mountpoint "$MOUNT_POINT" >/dev/null 2>&1; then
echo "NFS mount $MOUNT_POINT not found"
fix_mount
fi
if ! timeout 2s stat "$MOUNT_POINT" >/dev/null 2>&1; then
echo "NFS mount $MOUNT_POINT appears to be unresponsive"
fix_mount
fi
EOF
[root@r0 ~]# chmod +x /usr/local/bin/check-nfs-mount.sh
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.service << 'EOF' [Unit] Description=NFS Mount Health Monitor After=network-online.target [Service] Type=oneshot ExecStart=/usr/local/bin/check-nfs-mount.sh StandardOutput=journal StandardError=journal EOF
[root@r0 ~]# cat > /etc/systemd/system/nfs-mount-monitor.timer << 'EOF' [Unit] Description=Run NFS Mount Health Monitor every 10 seconds Requires=nfs-mount-monitor.service [Timer] OnBootSec=30s OnUnitActiveSec=10s AccuracySec=1s [Install] WantedBy=timers.target EOF
[root@r0 ~]# systemctl daemon-reload
[root@r0 ~]# systemctl enable nfs-mount-monitor.timer
[root@r0 ~]# systemctl start nfs-mount-monitor.timer
# Check status
[root@r0 ~]# systemctl status nfs-mount-monitor.timer
● nfs-mount-monitor.timer - Run NFS Mount Health Monitor every 10 seconds
Loaded: loaded (/etc/systemd/system/nfs-mount-monitor.timer; enabled)
Active: active (waiting) since Sat 2025-07-06 10:00:00 EEST
Trigger: Sat 2025-07-06 10:00:10 EEST; 8s left
# Monitor logs
[root@r0 ~]# journalctl -u nfs-mount-monitor -f
# 1. Check the initial state
paul@f0:~ % ifconfig re0 | grep carp
carp: MASTER vhid 1 advbase 1 advskew 0
paul@f1:~ % ifconfig re0 | grep carp
carp: BACKUP vhid 1 advbase 1 advskew 100
# 2. Create a test file from a client
[root@r0 ~]# echo "test before failover" > /data/nfs/k3svolumes/test-before.txt
# 3. Trigger failover (f0 → f1)
paul@f0:~ % doas ifconfig re0 vhid 1 state backup
# 4. Monitor client behaviour
[root@r0 ~]# ls /data/nfs/k3svolumes/
ls: cannot access '/data/nfs/k3svolumes/': Stale file handle
# 5. Check automatic recovery (within 10 seconds)
[root@r0 ~]# journalctl -u nfs-mount-monitor -f
Jul 06 10:15:32 r0 nfs-monitor[1234]: NFS mount unhealthy detected at \
Sun Jul 6 10:15:32 EEST 2025
Jul 06 10:15:32 r0 nfs-monitor[1234]: Attempting to fix stale NFS mount at \
Sun Jul 6 10:15:32 EEST 2025
Jul 06 10:15:33 r0 nfs-monitor[1234]: NFS mount fixed at \
Sun Jul 6 10:15:33 EEST 2025
paul@f0:~ % doas zpool online -e /dev/ada1
paul@f0:~ % doas zpool list NAME SIZE ALLOC FREE CKPOINT EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT zdata 3.63T 677G 2.97T - - 3% 18% 1.00x ONLINE - zroot 472G 68.4G 404G - - 13% 14% 1.00x ONLINE - paul@f0:~ % doas camcontrol devlist <512GB SSD D910R170> at scbus0 target 0 lun 0 (pass0,ada0) <SD Ultra 3D 4TB 530500WD> at scbus1 target 0 lun 0 (pass1,ada1) <Generic Flash Disk 8.07> at scbus2 target 0 lun 0 (da0,pass2)
paul@f1:~ % doas camcontrol devlist <512GB SSD D910R170> at scbus0 target 0 lun 0 (pass0,ada0) <WD Blue SA510 2.5 4TB 530500WD> at scbus1 target 0 lun 0 (pass1,ada1) <Generic Flash Disk 8.07> at scbus2 target 0 lun 0 (da0,pass2)



paul@f0:~ % doas freebsd-update fetch paul@f0:~ % doas freebsd-update install paul@f0:~ % doas shutdown -r now .. .. paul@f0:~ % doas pkg update paul@f0:~ % doas pkg upgrade paul@f0:~ % reboot
paul@f0:~ % doas pkg install wireguard-tools paul@f0:~ % doas sysrc wireguard_interfaces=wg0 wireguard_interfaces: -> wg0 paul@f0:~ % doas sysrc wireguard_enable=YES wireguard_enable: -> YES paul@f0:~ % doas mkdir -p /usr/local/etc/wireguard paul@f0:~ % doas touch /usr/local/etc/wireguard/wg0.conf paul@f0:~ % doas service wireguard start paul@f0:~ % doas wg show interface: wg0 public key: L+V9o0fNYkMVKNqsX7spBzD/9oSvxM/C7ZCZX1jLO3Q= private key: (hidden) listening port: 20246
paul@f0:~ % cat <<END | doas tee -a /etc/hosts 192.168.1.120 r0 r0.lan r0.lan.buetow.org 192.168.1.121 r1 r1.lan r1.lan.buetow.org 192.168.1.122 r2 r2.lan r2.lan.buetow.org 192.168.2.130 f0.wg0 f0.wg0.wan.buetow.org 192.168.2.131 f1.wg0 f1.wg0.wan.buetow.org 192.168.2.132 f2.wg0 f2.wg0.wan.buetow.org 192.168.2.120 r0.wg0 r0.wg0.wan.buetow.org 192.168.2.121 r1.wg0 r1.wg0.wan.buetow.org 192.168.2.122 r2.wg0 r2.wg0.wan.buetow.org 192.168.2.110 blowfish.wg0 blowfish.wg0.wan.buetow.org 192.168.2.111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org fd42:beef:cafe:2::130 f0.wg0 f0.wg0.wan.buetow.org fd42:beef:cafe:2::131 f1.wg0 f1.wg0.wan.buetow.org fd42:beef:cafe:2::132 f2.wg0 f2.wg0.wan.buetow.org fd42:beef:cafe:2::120 r0.wg0 r0.wg0.wan.buetow.org fd42:beef:cafe:2::121 r1.wg0 r1.wg0.wan.buetow.org fd42:beef:cafe:2::122 r2.wg0 r2.wg0.wan.buetow.org fd42:beef:cafe:2::110 blowfish.wg0 blowfish.wg0.wan.buetow.org fd42:beef:cafe:2::111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org END
[root@r0 ~] dnf update -y [root@r0 ~] reboot
[root@r0 ~] dnf install -y wireguard-tools [root@r0 ~] mkdir -p /etc/wireguard [root@r0 ~] touch /etc/wireguard/wg0.conf [root@r0 ~] systemctl enable wg-quick@wg0.service [root@r0 ~] systemctl start wg-quick@wg0.service [root@r0 ~] systemctl disable firewalld
[root@r0 ~] cat <<END >>/etc/hosts 192.168.1.130 f0 f0.lan f0.lan.buetow.org 192.168.1.131 f1 f1.lan f1.lan.buetow.org 192.168.1.132 f2 f2.lan f2.lan.buetow.org 192.168.2.130 f0.wg0 f0.wg0.wan.buetow.org 192.168.2.131 f1.wg0 f1.wg0.wan.buetow.org 192.168.2.132 f2.wg0 f2.wg0.wan.buetow.org 192.168.2.120 r0.wg0 r0.wg0.wan.buetow.org 192.168.2.121 r1.wg0 r1.wg0.wan.buetow.org 192.168.2.122 r2.wg0 r2.wg0.wan.buetow.org 192.168.2.110 blowfish.wg0 blowfish.wg0.wan.buetow.org 192.168.2.111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org fd42:beef:cafe:2::130 f0.wg0 f0.wg0.wan.buetow.org fd42:beef:cafe:2::131 f1.wg0 f1.wg0.wan.buetow.org fd42:beef:cafe:2::132 f2.wg0 f2.wg0.wan.buetow.org fd42:beef:cafe:2::120 r0.wg0 r0.wg0.wan.buetow.org fd42:beef:cafe:2::121 r1.wg0 r1.wg0.wan.buetow.org fd42:beef:cafe:2::122 r2.wg0 r2.wg0.wan.buetow.org fd42:beef:cafe:2::110 blowfish.wg0 blowfish.wg0.wan.buetow.org fd42:beef:cafe:2::111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org END
[root@r0 ~] dnf install -y policycoreutils-python-utils [root@r0 ~] semanage permissive -a wireguard_t [root@r0 ~] reboot
blowfish$ doas pkg_add wireguard-tools blowfish$ doas mkdir /etc/wireguard blowfish$ doas touch /etc/wireguard/wg0.conf blowsish$ cat <<END | doas tee /etc/hostname.wg0 inet 192.168.2.110 255.255.255.0 NONE up !/usr/local/bin/wg setconf wg0 /etc/wireguard/wg0.conf END
blowfish$ cat <<END | doas tee -a /etc/hosts 192.168.2.130 f0.wg0 f0.wg0.wan.buetow.org 192.168.2.131 f1.wg0 f1.wg0.wan.buetow.org 192.168.2.132 f2.wg0 f2.wg0.wan.buetow.org 192.168.2.120 r0.wg0 r0.wg0.wan.buetow.org 192.168.2.121 r1.wg0 r1.wg0.wan.buetow.org 192.168.2.122 r2.wg0 r2.wg0.wan.buetow.org 192.168.2.110 blowfish.wg0 blowfish.wg0.wan.buetow.org 192.168.2.111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org 192.168.2.200 earth.wg0 earth.wg0.wan.buetow.org 192.168.2.201 pixel7pro.wg0 pixel7pro.wg0.wan.buetow.org fd42:beef:cafe:2::130 f0.wg0 f0.wg0.wan.buetow.org fd42:beef:cafe:2::131 f1.wg0 f1.wg0.wan.buetow.org fd42:beef:cafe:2::132 f2.wg0 f2.wg0.wan.buetow.org fd42:beef:cafe:2::120 r0.wg0 r0.wg0.wan.buetow.org fd42:beef:cafe:2::121 r1.wg0 r1.wg0.wan.buetow.org fd42:beef:cafe:2::122 r2.wg0 r2.wg0.wan.buetow.org fd42:beef:cafe:2::110 blowfish.wg0 blowfish.wg0.wan.buetow.org fd42:beef:cafe:2::111 fishfinger.wg0 fishfinger.wg0.wan.buetow.org fd42:beef:cafe:2::200 earth.wg0 earth.wg0.wan.buetow.org fd42:beef:cafe:2::201 pixel7pro.wg0 pixel7pro.wg0.wan.buetow.org END
# NAT for WireGuard clients to access internet match out on vio0 from 192.168.2.0/24 to any nat-to (vio0) # Allow inbound traffic on WireGuard interface pass in on wg0 # Allow all UDP traffic on WireGuard port pass in inet proto udp from any to any port 56709
blowfish$ doas pfctl -f /etc/pf.conf
[Interface] # f0.wg0.wan.buetow.org Address = 192.168.2.130 PrivateKey = ************************** ListenPort = 56709 [Peer] # f1.lan.buetow.org as f1.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.131/32 Endpoint = 192.168.1.131:56709 # No KeepAlive configured [Peer] # f2.lan.buetow.org as f2.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.132/32 Endpoint = 192.168.1.132:56709 # No KeepAlive configured [Peer] # r0.lan.buetow.org as r0.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.120/32 Endpoint = 192.168.1.120:56709 # No KeepAlive configured [Peer] # r1.lan.buetow.org as r1.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.121/32 Endpoint = 192.168.1.121:56709 # No KeepAlive configured [Peer] # r2.lan.buetow.org as r2.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.122/32 Endpoint = 192.168.1.122:56709 # No KeepAlive configured [Peer] # blowfish.buetow.org as blowfish.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.110/32 Endpoint = 23.88.35.144:56709 PersistentKeepalive = 25 [Peer] # fishfinger.buetow.org as fishfinger.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 192.168.2.111/32 Endpoint = 46.23.94.99:56709 PersistentKeepalive = 25
[Interface] # pixel7pro.wg0.wan.buetow.org Address = 192.168.2.201 PrivateKey = ************************** ListenPort = 56709 DNS = 1.1.1.1, 8.8.8.8 [Peer] # blowfish.buetow.org as blowfish.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 0.0.0.0/0, ::/0 Endpoint = 23.88.35.144:56709 PersistentKeepalive = 25 [Peer] # fishfinger.buetow.org as fishfinger.wg0.wan.buetow.org PublicKey = ************************** PresharedKey = ************************** AllowedIPs = 0.0.0.0/0, ::/0 Endpoint = 46.23.94.99:56709 PersistentKeepalive = 25
> git clone https://codeberg.org/snonux/wireguardmeshgenerator > cd ./wireguardmeshgenerator > bundle install > sudo dnf install -y wireguard-tools
---
hosts:
f0:
os: FreeBSD
ssh:
user: paul
conf_dir: /usr/local/etc/wireguard
sudo_cmd: doas
reload_cmd: service wireguard reload
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.130'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.130'
ipv6: 'fd42:beef:cafe:2::130'
exclude_peers:
- earth
- pixel7pro
f1:
os: FreeBSD
ssh:
user: paul
conf_dir: /usr/local/etc/wireguard
sudo_cmd: doas
reload_cmd: service wireguard reload
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.131'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.131'
ipv6: 'fd42:beef:cafe:2::131'
exclude_peers:
- earth
- pixel7pro
f2:
os: FreeBSD
ssh:
user: paul
conf_dir: /usr/local/etc/wireguard
sudo_cmd: doas
reload_cmd: service wireguard reload
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.132'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.132'
ipv6: 'fd42:beef:cafe:2::132'
exclude_peers:
- earth
- pixel7pro
r0:
os: Linux
ssh:
user: root
conf_dir: /etc/wireguard
sudo_cmd:
reload_cmd: systemctl reload wg-quick@wg0.service
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.120'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.120'
ipv6: 'fd42:beef:cafe:2::120'
exclude_peers:
- earth
- pixel7pro
r1:
os: Linux
ssh:
user: root
conf_dir: /etc/wireguard
sudo_cmd:
reload_cmd: systemctl reload wg-quick@wg0.service
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.121'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.121'
ipv6: 'fd42:beef:cafe:2::121'
exclude_peers:
- earth
- pixel7pro
r2:
os: Linux
ssh:
user: root
conf_dir: /etc/wireguard
sudo_cmd:
reload_cmd: systemctl reload wg-quick@wg0.service
lan:
domain: 'lan.buetow.org'
ip: '192.168.1.122'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.122'
ipv6: 'fd42:beef:cafe:2::122'
exclude_peers:
- earth
- pixel7pro
blowfish:
os: OpenBSD
ssh:
user: rex
port: 2
conf_dir: /etc/wireguard
sudo_cmd: doas
reload_cmd: sh /etc/netstart wg0
internet:
domain: 'buetow.org'
ip: '23.88.35.144'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.110'
ipv6: 'fd42:beef:cafe:2::110'
exclude_peers:
- earth
- pixel7pro
fishfinger:
os: OpenBSD
ssh:
user: rex
port: 2
conf_dir: /etc/wireguard
sudo_cmd: doas
reload_cmd: sh /etc/netstart wg0
internet:
domain: 'buetow.org'
ip: '46.23.94.99'
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.111'
ipv6: 'fd42:beef:cafe:2::111'
exclude_peers:
- earth
- pixel7pro
earth:
os: Linux
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.200'
ipv6: 'fd42:beef:cafe:2::200'
exclude_peers:
- f0
- f1
- f2
- r0
- r1
- r2
- pixel7pro
pixel7pro:
os: Android
wg0:
domain: 'wg0.wan.buetow.org'
ip: '192.168.2.201'
ipv6: 'fd42:beef:cafe:2::201'
exclude_peers:
- f0
- f1
- f2
- r0
- r1
- r2
- earth
begin
options = { hosts: [] }
OptionParser.new do |opts|
opts.banner = 'Usage: wireguardmeshgenerator.rb [options]'
opts.on('--generate', 'Generate Wireguard configs') do
options[:generate] = true
end
opts.on('--install', 'Install Wireguard configs') do
options[:install] = true
end
opts.on('--clean', 'Clean Wireguard configs') do
options[:clean] = true
end
opts.on('--hosts=HOSTS', 'Comma separated hosts to configure') do |hosts|
options[:hosts] = hosts.split(',')
end
end.parse!
conf = YAML.load_file('wireguardmeshgenerator.yaml').freeze
conf['hosts'].keys.select { options[:hosts].empty? || options[:hosts].include?(_1) }
.each do |host|
# Generate Wireguard configuration for the host reload!
WireguardConfig.new(host, conf['hosts']).generate! if options[:generate]
# Install Wireguard configuration for the host.
InstallConfig.new(host, conf['hosts']).upload!.install!.reload! if options[:install]
# Clean Wireguard configuration for the host.
WireguardConfig.new(host, conf['hosts']).clean! if options[:clean]
end
rescue StandardError => e
puts "Error: #{e.message}"
puts e.backtrace.join("\n")
exit 2
end
task :generate do ruby 'wireguardmeshgenerator.rb', '--generate' end task :clean do ruby 'wireguardmeshgenerator.rb', '--clean' end task :install do ruby 'wireguardmeshgenerator.rb', '--install' end task default: :generate
> rake generate /usr/bin/ruby wireguardmeshgenerator.rb --generate Generating dist/f0/etc/wireguard/wg0.conf Generating dist/f1/etc/wireguard/wg0.conf Generating dist/f2/etc/wireguard/wg0.conf Generating dist/r0/etc/wireguard/wg0.conf Generating dist/r1/etc/wireguard/wg0.conf Generating dist/r2/etc/wireguard/wg0.conf Generating dist/blowfish/etc/wireguard/wg0.conf Generating dist/fishfinger/etc/wireguard/wg0.conf Generating dist/earth/etc/wireguard/wg0.conf Generating dist/pixel7pro/etc/wireguard/wg0.conf
> find keys/ -type f keys/f0/priv.key keys/f0/pub.key keys/psk/f0_f1.key keys/psk/f0_f2.key keys/psk/f0_r0.key keys/psk/f0_r1.key keys/psk/f0_r2.key keys/psk/blowfish_f0.key keys/psk/f0_fishfinger.key keys/psk/f1_f2.key keys/psk/f1_r0.key keys/psk/f1_r1.key keys/psk/f1_r2.key keys/psk/blowfish_f1.key keys/psk/f1_fishfinger.key keys/psk/f2_r0.key keys/psk/f2_r1.key keys/psk/f2_r2.key keys/psk/blowfish_f2.key keys/psk/f2_fishfinger.key keys/psk/r0_r1.key keys/psk/r0_r2.key keys/psk/blowfish_r0.key keys/psk/fishfinger_r0.key keys/psk/r1_r2.key keys/psk/blowfish_r1.key keys/psk/fishfinger_r1.key keys/psk/blowfish_r2.key keys/psk/fishfinger_r2.key keys/psk/blowfish_fishfinger.key keys/psk/blowfish_earth.key keys/psk/earth_fishfinger.key keys/psk/blowfish_pixel7pro.key keys/psk/fishfinger_pixel7pro.key keys/f1/priv.key keys/f1/pub.key keys/f2/priv.key keys/f2/pub.key keys/r0/priv.key keys/r0/pub.key keys/r1/priv.key keys/r1/pub.key keys/r2/priv.key keys/r2/pub.key keys/blowfish/priv.key keys/blowfish/pub.key keys/fishfinger/priv.key keys/fishfinger/pub.key keys/earth/priv.key keys/earth/pub.key keys/pixel7pro/priv.key keys/pixel7pro/pub.key
> rake install /usr/bin/ruby wireguardmeshgenerator.rb --install Uploading dist/f0/etc/wireguard/wg0.conf to f0.lan.buetow.org:. Installing Wireguard config on f0 Uploading cmd.sh to f0.lan.buetow.org:. + [ ! -d /usr/local/etc/wireguard ] + doas chmod 700 /usr/local/etc/wireguard + doas mv -v wg0.conf /usr/local/etc/wireguard wg0.conf -> /usr/local/etc/wireguard/wg0.conf + doas chmod 644 /usr/local/etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on f0 Uploading cmd.sh to f0.lan.buetow.org:. + doas service wireguard reload + rm cmd.sh Uploading dist/f1/etc/wireguard/wg0.conf to f1.lan.buetow.org:. Installing Wireguard config on f1 Uploading cmd.sh to f1.lan.buetow.org:. + [ ! -d /usr/local/etc/wireguard ] + doas chmod 700 /usr/local/etc/wireguard + doas mv -v wg0.conf /usr/local/etc/wireguard wg0.conf -> /usr/local/etc/wireguard/wg0.conf + doas chmod 644 /usr/local/etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on f1 Uploading cmd.sh to f1.lan.buetow.org:. + doas service wireguard reload + rm cmd.sh Uploading dist/f2/etc/wireguard/wg0.conf to f2.lan.buetow.org:. Installing Wireguard config on f2 Uploading cmd.sh to f2.lan.buetow.org:. + [ ! -d /usr/local/etc/wireguard ] + doas chmod 700 /usr/local/etc/wireguard + doas mv -v wg0.conf /usr/local/etc/wireguard wg0.conf -> /usr/local/etc/wireguard/wg0.conf + doas chmod 644 /usr/local/etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on f2 Uploading cmd.sh to f2.lan.buetow.org:. + doas service wireguard reload + rm cmd.sh Uploading dist/r0/etc/wireguard/wg0.conf to r0.lan.buetow.org:. Installing Wireguard config on r0 Uploading cmd.sh to r0.lan.buetow.org:. + '[' '!' -d /etc/wireguard ']' + chmod 700 /etc/wireguard + mv -v wg0.conf /etc/wireguard renamed 'wg0.conf' -> '/etc/wireguard/wg0.conf' + chmod 644 /etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on r0 Uploading cmd.sh to r0.lan.buetow.org:. + systemctl reload wg-quick@wg0.service + rm cmd.sh Uploading dist/r1/etc/wireguard/wg0.conf to r1.lan.buetow.org:. Installing Wireguard config on r1 Uploading cmd.sh to r1.lan.buetow.org:. + '[' '!' -d /etc/wireguard ']' + chmod 700 /etc/wireguard + mv -v wg0.conf /etc/wireguard renamed 'wg0.conf' -> '/etc/wireguard/wg0.conf' + chmod 644 /etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on r1 Uploading cmd.sh to r1.lan.buetow.org:. + systemctl reload wg-quick@wg0.service + rm cmd.sh Uploading dist/r2/etc/wireguard/wg0.conf to r2.lan.buetow.org:. Installing Wireguard config on r2 Uploading cmd.sh to r2.lan.buetow.org:. + '[' '!' -d /etc/wireguard ']' + chmod 700 /etc/wireguard + mv -v wg0.conf /etc/wireguard renamed 'wg0.conf' -> '/etc/wireguard/wg0.conf' + chmod 644 /etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on r2 Uploading cmd.sh to r2.lan.buetow.org:. + systemctl reload wg-quick@wg0.service + rm cmd.sh Uploading dist/blowfish/etc/wireguard/wg0.conf to blowfish.buetow.org:. Installing Wireguard config on blowfish Uploading cmd.sh to blowfish.buetow.org:. + [ ! -d /etc/wireguard ] + doas chmod 700 /etc/wireguard + doas mv -v wg0.conf /etc/wireguard wg0.conf -> /etc/wireguard/wg0.conf + doas chmod 644 /etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on blowfish Uploading cmd.sh to blowfish.buetow.org:. + doas sh /etc/netstart wg0 + rm cmd.sh Uploading dist/fishfinger/etc/wireguard/wg0.conf to fishfinger.buetow.org:. Installing Wireguard config on fishfinger Uploading cmd.sh to fishfinger.buetow.org:. + [ ! -d /etc/wireguard ] + doas chmod 700 /etc/wireguard + doas mv -v wg0.conf /etc/wireguard wg0.conf -> /etc/wireguard/wg0.conf + doas chmod 644 /etc/wireguard/wg0.conf + rm cmd.sh Reloading Wireguard on fishfinger Uploading cmd.sh to fishfinger.buetow.org:. + doas sh /etc/netstart wg0 + rm cmd.sh
> rake clean > rake generate > rake install
> sudo dnf install qrencode > qrencode -t ansiutf8 < dist/pixel7pro/etc/wireguard/wg0.conf
> sudo cp dist/earth/etc/wireguard/wg0.conf /etc/wireguard/ > sudo chmod 600 /etc/wireguard/wg0.conf > sudo systemctl start wg-quick@wg0.service # Start manually > sudo systemctl disable wg-quick@wg0.service # Prevent auto-start
fd42:beef:cafe:2::110/64 - blowfish.wg0 (OpenBSD gateway) fd42:beef:cafe:2::111/64 - fishfinger.wg0 (OpenBSD gateway) fd42:beef:cafe:2::120/64 - r0.wg0 (Rocky Linux VM) fd42:beef:cafe:2::121/64 - r1.wg0 (Rocky Linux VM) fd42:beef:cafe:2::122/64 - r2.wg0 (Rocky Linux VM) fd42:beef:cafe:2::130/64 - f0.wg0 (FreeBSD host) fd42:beef:cafe:2::131/64 - f1.wg0 (FreeBSD host) fd42:beef:cafe:2::132/64 - f2.wg0 (FreeBSD host) fd42:beef:cafe:2::200/64 - earth.wg0 (roaming laptop) fd42:beef:cafe:2::201/64 - pixel7pro.wg0 (roaming phone)
def address
return '# No Address = ... for OpenBSD here' if hosts[myself]['os'] == 'OpenBSD'
ipv4 = hosts[myself]['wg0']['ip']
ipv6 = hosts[myself]['wg0']['ipv6']
# WireGuard supports multiple Address directives for dual-stack
if ipv6
"Address = #{ipv4}\nAddress = #{ipv6}/64"
else
"Address = #{ipv4}"
end
end
if is_roaming
allowed_ips = '0.0.0.0/0, ::/0'
else
# For mesh peers, allow both IPv4 and IPv6 if present
ipv4 = data['wg0']['ip']
ipv6 = data['wg0']['ipv6']
allowed_ips = ipv6 ? "#{ipv4}/32, #{ipv6}/128" : "#{ipv4}/32"
end
# NAT for WireGuard clients to access internet (IPv4) match out on vio0 from 192.168.2.0/24 to any nat-to (vio0) # NAT66 for WireGuard clients to access internet (IPv6) # Uses NPTv6 (Network Prefix Translation) to translate ULA to public IPv6 match out on vio0 inet6 from fd42:beef:cafe:2::/64 to any nat-to (vio0) # Allow all UDP traffic on WireGuard port (IPv4 and IPv6) pass in inet proto udp from any to any port 56709 pass in inet6 proto udp from any to any port 56709
rex@blowfish:~ $ doas vi /etc/hostname.wg0
inet 192.168.2.110 255.255.255.0 NONE inet6 fd42:beef:cafe:2::110 64 up !/usr/local/bin/wg setconf wg0 /etc/wireguard/wg0.conf
rex@blowfish:~ $ doas sh /etc/netstart wg0 rex@blowfish:~ $ ifconfig wg0 | grep inet6 inet6 fd42:beef:cafe:2::110 prefixlen 64
# From r0 (Rocky Linux VM) root@r0:~ # ping -c 2 192.168.2.130 # IPv4 to f0 64 bytes from 192.168.2.130: icmp_seq=1 ttl=64 time=2.12 ms 64 bytes from 192.168.2.130: icmp_seq=2 ttl=64 time=0.681 ms root@r0:~ # ping6 -c 2 fd42:beef:cafe:2::130 # IPv6 to f0 64 bytes from fd42:beef:cafe:2::130: icmp_seq=1 ttl=64 time=2.16 ms 64 bytes from fd42:beef:cafe:2::130: icmp_seq=2 ttl=64 time=0.909 ms
paul@f0:~ % doas wg show interface: wg0 public key: Jm6YItMt94++dIeOyVi1I9AhNt2qQcryxCZezoX7X2Y= private key: (hidden) listening port: 56709 peer: 8PvGZH1NohHpZPVJyjhctBX9xblsNvYBhpg68FsFcns= preshared key: (hidden) endpoint: 46.23.94.99:56709 allowed ips: 192.168.2.111/32, fd42:beef:cafe:2::111/128 latest handshake: 1 minute, 46 seconds ago transfer: 124 B received, 1.75 KiB sent persistent keepalive: every 25 seconds peer: Xow+d3qVXgUMk4pcRSQ6Fe+vhYBa3VDyHX/4jrGoKns= preshared key: (hidden) endpoint: 23.88.35.144:56709 allowed ips: 192.168.2.110/32, fd42:beef:cafe:2::110/128 latest handshake: 1 minute, 52 seconds ago transfer: 124 B received, 1.60 KiB sent persistent keepalive: every 25 seconds peer: s3e93XoY7dPUQgLiVO4d8x/SRCFgEew+/wP7+zwgehI= preshared key: (hidden) endpoint: 192.168.1.120:56709 allowed ips: 192.168.2.120/32, fd42:beef:cafe:2::120/128 peer: 2htXdNcxzpI2FdPDJy4T4VGtm1wpMEQu1AkQHjNY6F8= preshared key: (hidden) endpoint: 192.168.1.131:56709 allowed ips: 192.168.2.131/32, fd42:beef:cafe:2::131/128 peer: 0Y/H20W8YIbF7DA1sMwMacLI8WS9yG+1/QO7m2oyllg= preshared key: (hidden) endpoint: 192.168.1.122:56709 allowed ips: 192.168.2.122/32, fd42:beef:cafe:2::122/128 peer: Hhy9kMPOOjChXV2RA5WeCGs+J0FE3rcNPDw/TLSn7i8= preshared key: (hidden) endpoint: 192.168.1.121:56709 allowed ips: 192.168.2.121/32, fd42:beef:cafe:2::121/128 peer: SlGVsACE1wiaRoGvCR3f7AuHfRS+1jjhS+YwEJ2HvF0= preshared key: (hidden) endpoint: 192.168.1.132:56709 allowed ips: 192.168.2.132/32, fd42:beef:cafe:2::132/128
paul@f0:~ % foreach peer ( f1 f2 r0 r1 r2 blowfish fishfinger ) foreach? ping -c2 $peer.wg0 foreach? echo foreach? end PING f1.wg0 (192.168.2.131): 56 data bytes 64 bytes from 192.168.2.131: icmp_seq=0 ttl=64 time=0.334 ms 64 bytes from 192.168.2.131: icmp_seq=1 ttl=64 time=0.260 ms --- f1.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 0.260/0.297/0.334/0.037 ms PING f2.wg0 (192.168.2.132): 56 data bytes 64 bytes from 192.168.2.132: icmp_seq=0 ttl=64 time=0.323 ms 64 bytes from 192.168.2.132: icmp_seq=1 ttl=64 time=0.303 ms --- f2.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 0.303/0.313/0.323/0.010 ms PING r0.wg0 (192.168.2.120): 56 data bytes 64 bytes from 192.168.2.120: icmp_seq=0 ttl=64 time=0.716 ms 64 bytes from 192.168.2.120: icmp_seq=1 ttl=64 time=0.406 ms --- r0.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 0.406/0.561/0.716/0.155 ms PING r1.wg0 (192.168.2.121): 56 data bytes 64 bytes from 192.168.2.121: icmp_seq=0 ttl=64 time=0.639 ms 64 bytes from 192.168.2.121: icmp_seq=1 ttl=64 time=0.629 ms --- r1.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 0.629/0.634/0.639/0.005 ms PING r2.wg0 (192.168.2.122): 56 data bytes 64 bytes from 192.168.2.122: icmp_seq=0 ttl=64 time=0.569 ms 64 bytes from 192.168.2.122: icmp_seq=1 ttl=64 time=0.479 ms --- r2.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 0.479/0.524/0.569/0.045 ms PING blowfish.wg0 (192.168.2.110): 56 data bytes 64 bytes from 192.168.2.110: icmp_seq=0 ttl=255 time=35.745 ms 64 bytes from 192.168.2.110: icmp_seq=1 ttl=255 time=35.481 ms --- blowfish.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 35.481/35.613/35.745/0.132 ms PING fishfinger.wg0 (192.168.2.111): 56 data bytes 64 bytes from 192.168.2.111: icmp_seq=0 ttl=255 time=33.992 ms 64 bytes from 192.168.2.111: icmp_seq=1 ttl=255 time=33.751 ms --- fishfinger.wg0 ping statistics --- 2 packets transmitted, 2 packets received, 0.0% packet loss round-trip min/avg/max/stddev = 33.751/33.872/33.992/0.120 ms
paul@f0:~ % doas wg show interface: wg0 public key: Jm6YItMt94++dIeOyVi1I9AhNt2qQcryxCZezoX7X2Y= private key: (hidden) listening port: 56709 peer: 0Y/H20W8YIbF7DA1sMwMacLI8WS9yG+1/QO7m2oyllg= preshared key: (hidden) endpoint: 192.168.1.122:56709 allowed ips: 192.168.2.122/32, fd42:beef:cafe:2::122/128 latest handshake: 10 seconds ago transfer: 440 B received, 532 B sent peer: Hhy9kMPOOjChXV2RA5WeCGs+J0FE3rcNPDw/TLSn7i8= preshared key: (hidden) endpoint: 192.168.1.121:56709 allowed ips: 192.168.2.121/32, fd42:beef:cafe:2::121/128 latest handshake: 12 seconds ago transfer: 440 B received, 564 B sent peer: s3e93XoY7dPUQgLiVO4d8x/SRCFgEew+/wP7+zwgehI= preshared key: (hidden) endpoint: 192.168.1.120:56709 allowed ips: 192.168.2.120/32, fd42:beef:cafe:2::120/128 latest handshake: 14 seconds ago transfer: 440 B received, 564 B sent peer: SlGVsACE1wiaRoGvCR3f7AuHfRS+1jjhS+YwEJ2HvF0= preshared key: (hidden) endpoint: 192.168.1.132:56709 allowed ips: 192.168.2.132/32, fd42:beef:cafe:2::132/128 latest handshake: 17 seconds ago transfer: 472 B received, 564 B sent peer: Xow+d3qVXgUMk4pcRSQ6Fe+vhYBa3VDyHX/4jrGoKns= preshared key: (hidden) endpoint: 23.88.35.144:56709 allowed ips: 192.168.2.110/32, fd42:beef:cafe:2::110/128 latest handshake: 55 seconds ago transfer: 472 B received, 596 B sent persistent keepalive: every 25 seconds peer: 8PvGZH1NohHpZPVJyjhctBX9xblsNvYBhpg68FsFcns= preshared key: (hidden) endpoint: 46.23.94.99:56709 allowed ips: 192.168.2.111/32, fd42:beef:cafe:2::111/128 latest handshake: 55 seconds ago transfer: 472 B received, 596 B sent persistent keepalive: every 25 seconds peer: 2htXdNcxzpI2FdPDJy4T4VGtm1wpMEQu1AkQHjNY6F8= preshared key: (hidden) endpoint: 192.168.1.131:56709 allowed ips: 192.168.2.131/32, fd42:beef:cafe:2::131/128
qrencode -t ansiutf8 < dist/pixel7pro/etc/wireguard/wg0-blowfish.conf qrencode -t ansiutf8 < dist/pixel7pro/etc/wireguard/wg0-fishfinger.conf
sudo cp dist/earth/etc/wireguard/wg0-blowfish.conf /etc/wireguard/ sudo cp dist/earth/etc/wireguard/wg0-fishfinger.conf /etc/wireguard/
# Start with blowfish gateway earth$ sudo systemctl start wg-quick@wg0-blowfish.service # Or start with fishfinger gateway earth$ sudo systemctl start wg-quick@wg0-fishfinger.service # Check tunnel status (example with blowfish gateway) earth$ sudo wg show interface: wg0 public key: Mc1CpSS3rbLN9A2w9c75XugQyXUkGPHKI2iCGbh8DRo= private key: (hidden) listening port: 56709 fwmark: 0xca6c peer: Xow+d3qVXgUMk4pcRSQ6Fe+vhYBa3VDyHX/4jrGoKns= preshared key: (hidden) endpoint: 23.88.35.144:56709 allowed ips: 0.0.0.0/0, ::/0 latest handshake: 5 seconds ago transfer: 15.89 KiB received, 32.15 KiB sent persistent keepalive: every 25 seconds
earth$ sudo systemctl stop wg-quick@wg0-blowfish.service # Or if using fishfinger: earth$ sudo systemctl stop wg-quick@wg0-fishfinger.service earth$ sudo wg show # No output - WireGuard interface is down
# Switch from blowfish to fishfinger earth$ sudo systemctl stop wg-quick@wg0-blowfish.service earth$ sudo systemctl start wg-quick@wg0-fishfinger.service
# From earth laptop: earth$ ping -c2 blowfish.wg0 earth$ ping -c2 fishfinger.wg0 earth$ curl https://ifconfig.me # Should show gateway's public IP
_______ s
|.-----.| s
|| Tmux|| s
||_.-._|| |\ \\\\__ o s
`--)-(--` | \_/ o \ o s
__[=== o]__ > _ (( <_ oo s
|:::::::::::|\ | / \__+___/ s
jgs `-=========-`() |/ |/ s
mod. by Paul B.
alias tn 'tmux::new' alias ta 'tmux::attach' alias tx 'tmux::remote' alias ts 'tmux::search' alias tssh 'tmux::cluster_ssh' alias tm tmux alias tl 'tmux list-sessions' alias foo 'tmux::new foo' alias bar 'tmux::new bar' alias baz 'tmux::new baz'
# Create new session and if alread exists attach to it
function tmux::new
set -l session $argv[1]
_tmux::cleanup_default
if test -z "$session"
tmux::new (string join "" T (date +%s))
else
tmux new-session -d -s $session
tmux -2 attach-session -t $session || tmux -2 switch-client -t $session
end
end
function _tmux::cleanup_default
tmux list-sessions | string match -r '^T.*: ' | string match -v -r attached | string split ':' | while read -l s
echo "Killing $s"
tmux kill-session -t "$s"
end
end
function tmux::attach
set -l session $argv[1]
if test -z "$session"
tmux attach-session || tmux::new
else
tmux attach-session -t $session || tmux::new $session
end
end
function tmux::remote
set -l server $argv[1]
tmux new -s $server "ssh -A -t $server 'tmux attach-session || tmux'" || tmux attach-session -d -t $server
end
set-option -g prefix C-g
function tmux::search
set -l session (tmux list-sessions | fzf | cut -d: -f1)
if test -z "$TMUX"
tmux attach-session -t $session
else
tmux switch -t $session
end
end

function tmux::cluster_ssh
if test -f "$argv[1]"
tmux::tssh_from_file $argv[1]
return
end
tmux::tssh_from_argument $argv
end
function tmux::tssh_from_argument
set -l session $argv[1]
set first_server_or_container $argv[2]
set remaining_servers $argv[3..-1]
if test -z "$first_server_or_container"
set first_server_or_container $session
end
tmux new-session -d -s $session (_tmux::connect_command "$first_server_or_container")
if not tmux list-session | grep "^$session:"
echo "Could not create session $session"
return 2
end
for server_or_container in $remaining_servers
tmux split-window -t $session "tmux select-layout tiled; $(_tmux::connect_command "$server_or_container")"
end
tmux setw -t $session synchronize-panes on
tmux -2 attach-session -t $session || tmux -2 switch-client -t $session
end
bind-key p setw synchronize-panes off bind-key P setw synchronize-panes on
function tmux::tssh_from_file
set -l serverlist $argv[1]
set -l session (basename $serverlist | cut -d. -f1)
tmux::tssh_from_argument $session (awk '{ print $1 }' $serverlist | sed 's/.lan./.lan/g')
end
$ tssh fish blowfish.buetow.org fishfinger.buetow.org \
fishbone.buetow.org user@octopus.buetow.org
$ tssh manyservers.txt
bind-key -T copy-mode-vi 'v' send -X begin-selection bind-key -T copy-mode-vi 'y' send -X copy-selection-and-cancel
source ~/.config/tmux/tmux.local.conf set-option -g allow-rename off set-option -g history-limit 100000 set-option -g status-bg '#444444' set-option -g status-fg '#ffa500' set-option -s escape-time 0
set-window-option -g mode-keys vi bind-key -T copy-mode-vi 'v' send -X begin-selection bind-key -T copy-mode-vi 'y' send -X copy-selection-and-cancel
bind-key h select-pane -L bind-key j select-pane -D bind-key k select-pane -U bind-key l select-pane -R bind-key H resize-pane -L 5 bind-key J resize-pane -D 5 bind-key K resize-pane -U 5 bind-key L resize-pane -R 5
bind-key c new-window -c '#{pane_current_path}'
bind-key F new-window -n "session-switcher" "tmux list-sessions | fzf | cut -d: -f1 | xargs tmux switch-client -t"
bind-key T choose-tree

bind-key p setw synchronize-panes off bind-key P setw synchronize-panes on bind-key r source-file ~/.config/tmux/tmux.conf \; display-message "tmux.conf reloaded"
__ (`/\ `=\/\ __...--~~~~~-._ _.-~~~~~--...__ `=\/\ \ / \\ `=\/ V \\ //_\___--~~~~~~-._ | _.-~~~~~~--...__\\ // ) (..----~~~~._\ | /_.~~~~----.....__\\ ===( INK )==========\\|//==================== __ejm\___/________dwb`---`______________________

paul@f0:~ % dmesg | grep 'Features2=.*POPCNT' Features2=0x7ffafbbf<SSE3,PCLMULQDQ,DTES64,MON,DS_CPL,VMX,EST,TM2,SSSE3,SDBG, FMA,CX16,xTPR,PDCM,PCID,SSE4.1,SSE4.2,x2APIC,MOVBE,POPCNT,TSCDLT,AESNI,XSAVE, OSXSAVE,AVX,F16C,RDRAND>
paul@f0:~ % doas pkg install vm-bhyve bhyve-firmware paul@f0:~ % doas sysrc vm_enable=YES vm_enable: -> YES paul@f0:~ % doas sysrc vm_dir=zfs:zroot/bhyve vm_dir: -> zfs:zroot/bhyve paul@f0:~ % doas zfs create zroot/bhyve paul@f0:~ % doas vm init paul@f0:~ % doas vm switch create public paul@f0:~ % doas vm switch add public re0
paul@f0:~ % zfs list | grep bhyve zroot/bhyve 1.74M 453G 1.74M /zroot/bhyve
paul@f0:~ % doas ln -s /zroot/bhyve/ /bhyve
paul@f0:~ % doas vm list NAME DATASTORE LOADER CPU MEMORY VNC AUTO STATE
paul@f0:~ % doas vm iso \ https://download.rockylinux.org/pub/rocky/9/isos/x86_64/Rocky-9.5-x86_64-minimal.iso /zroot/bhyve/.iso/Rocky-9.5-x86_64-minimal.iso 1808 MB 4780 kBps 06m28s paul@f0:/bhyve % doas vm create rocky
paul@f0:/bhyve/rocky % cat rocky.conf loader="bhyveload" cpu=1 memory=256M network0_type="virtio-net" network0_switch="public" disk0_type="virtio-blk" disk0_name="disk0.img" uuid="1c4655ac-c828-11ef-a920-e8ff1ed71ca0" network0_mac="58:9c:fc:0d:13:3f"
guest="linux" loader="uefi" uefi_vars="yes" cpu=4 memory=14G network0_type="virtio-net" network0_switch="public" disk0_type="virtio-blk" disk0_name="disk0.img" graphics="yes" graphics_vga=io uuid="1c45400b-c828-11ef-8871-e8ff1ed71cac" network0_mac="58:9c:fc:0d:13:3f"
paul@f0:~ % doas vm install rocky Rocky-9.5-x86_64-minimal.iso Starting rocky * found guest in /zroot/bhyve/rocky * booting... paul@f0:/bhyve/rocky % doas vm list NAME DATASTORE LOADER CPU MEMORY VNC AUTO STATE rocky default uefi 4 14G 0.0.0.0:5900 No Locked (f0.lan.buetow.org) paul@f0:/bhyve/rocky % doas sockstat -4 | grep 5900 root bhyve 6079 8 tcp4 *:5900 *:*
paul@f0:/bhyve/rocky % doas vm stop rocky paul@f0:/bhyve/rocky % doas truncate -s 100G disk0.img paul@f0:/bhyve/rocky % doas vm install rocky Rocky-9.5-x86_64-minimal.iso




paul@f0:/bhyve/rocky % cat <<END | doas tee -a /etc/rc.conf vm_list="rocky" vm_delay="5"
paul@f0:~ % doas vm list NAME DATASTORE LOADER CPU MEMORY VNC AUTO STATE rocky default uefi 4 14G 0.0.0.0:5900 Yes [1] Running (2063)
192.168.1.130 f0 f0.lan f0.lan.buetow.org 192.168.1.131 f1 f1.lan f1.lan.buetow.org 192.168.1.132 f2 f2.lan f2.lan.buetow.org
paul@f0:/bhyve/rocky % cat <<END | doas tee -a /etc/hosts 192.168.1.120 r0 r0.lan r0.lan.buetow.org 192.168.1.121 r1 r1.lan r1.lan.buetow.org 192.168.1.122 r2 r2.lan r2.lan.buetow.org END
[root@r0 ~] % nmcli connection modify enp0s5 ipv4.address 192.168.1.120/24 [root@r0 ~] % nmcli connection modify enp0s5 ipv4.gateway 192.168.1.1 [root@r0 ~] % nmcli connection modify enp0s5 ipv4.DNS 192.168.1.1 [root@r0 ~] % nmcli connection modify enp0s5 ipv4.method manual [root@r0 ~] % nmcli connection down enp0s5 [root@r0 ~] % nmcli connection up enp0s5 [root@r0 ~] % hostnamectl set-hostname r0.lan.buetow.org [root@r0 ~] % cat <<END >>/etc/hosts 192.168.1.120 r0 r0.lan r0.lan.buetow.org 192.168.1.121 r1 r1.lan r1.lan.buetow.org 192.168.1.122 r2 r2.lan r2.lan.buetow.org END
% for i in 0 1 2; do ssh-copy-id root@r$i.lan.buetow.org; done
[root@r0 ~] % dnf update [root@r0 ~] % reboot
package main
import "testing"
func BenchmarkCPUSilly1(b *testing.B) {
for i := 0; i < b.N; i++ {
_ = i * i
}
}
func BenchmarkCPUSilly2(b *testing.B) {
var sillyResult float64
for i := 0; i < b.N; i++ {
sillyResult += float64(i)
sillyResult *= float64(i)
divisor := float64(i) + 1
if divisor > 0 {
sillyResult /= divisor
}
}
_ = sillyResult // to avoid compiler optimization
}
paul@f0:~ % doas pkg install git go paul@f0:~ % mkdir ~/git && cd ~/git && \ git clone https://codeberg.org/snonux/sillybench && \ cd sillybench
paul@f0:~/git/sillybench % go version go version go1.24.1 freebsd/amd64 paul@f0:~/git/sillybench % go test -bench=. goos: freebsd goarch: amd64 pkg: codeberg.org/snonux/sillybench cpu: Intel(R) N100 BenchmarkCPUSilly1-4 1000000000 0.4022 ns/op BenchmarkCPUSilly2-4 1000000000 0.4027 ns/op PASS ok codeberg.org/snonux/sillybench 0.891s
[root@r0 ~]# dnf install golang git [root@r0 ~]# mkdir ~/git && cd ~/git && \ git clone https://codeberg.org/snonux/sillybench && \ cd sillybench
[root@r0 sillybench]# go version go version go1.22.9 (Red Hat 1.22.9-2.el9_5) linux/amd64 [root@r0 sillybench]# go test -bench=. goos: linux goarch: amd64 pkg: codeberg.org/snonux/sillybench cpu: Intel(R) N100 BenchmarkCPUSilly1-4 1000000000 0.4347 ns/op BenchmarkCPUSilly2-4 1000000000 0.4345 ns/op
root@freebsd:~/git/sillybench # go test -bench=. goos: freebsd goarch: amd64 pkg: codeberg.org/snonux/sillybench cpu: Intel(R) N100 BenchmarkCPUSilly1 1000000000 0.4273 ns/op BenchmarkCPUSilly2 1000000000 0.4286 ns/op PASS ok codeberg.org/snonux/sillybench 0.949s
paul@f0:~ % doas ubench -s 1 Unix Benchmark Utility v.0.3 Copyright (C) July, 1999 PhysTech, Inc. Author: Sergei Viznyuk <sv@phystech.com> http://www.phystech.com/download/ubench.html FreeBSD 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 Ubench Single CPU: 671010 (0.40s) Ubench Single MEM: 1705237 (0.48s) ----------------------------------- Ubench Single AVG: 1188123
paul@f0:~ % doas ubench Unix Benchmark Utility v.0.3 Copyright (C) July, 1999 PhysTech, Inc. Author: Sergei Viznyuk <sv@phystech.com> http://www.phystech.com/download/ubench.html FreeBSD 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 Ubench CPU: 2660220 Ubench MEM: 3095182 -------------------- Ubench AVG: 2877701
root@freebsd:~ # ubench -s 1 Unix Benchmark Utility v.0.3 Copyright (C) July, 1999 PhysTech, Inc. Author: Sergei Viznyuk <sv@phystech.com> http://www.phystech.com/download/ubench.html FreeBSD 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 Ubench Single CPU: 672792 (0.40s) Ubench Single MEM: 852757 (0.48s) ----------------------------------- Ubench Single AVG: 762774
root@freebsd:~ # ubench Unix Benchmark Utility v.0.3 Copyright (C) July, 1999 PhysTech, Inc. Author: Sergei Viznyuk <sv@phystech.com> http://www.phystech.com/download/ubench.html FreeBSD 14.2-RELEASE-p1 FreeBSD 14.2-RELEASE-p1 GENERIC amd64 Ubench CPU: 2652857 swap_pager: out of swap space swp_pager_getswapspace(27): failed swap_pager: out of swap space swp_pager_getswapspace(18): failed Apr 4 23:02:43 freebsd kernel: pid 862 (ubench), jid 0, uid 0, was killed: failed to reclaim memory swp_pager_getswapspace(6): failed Apr 4 23:02:46 freebsd kernel: pid 863 (ubench), jid 0, uid 0, was killed: failed to reclaim memory Apr 4 23:02:47 freebsd kernel: pid 864 (ubench), jid 0, uid 0, was killed: failed to reclaim memory Apr 4 23:02:48 freebsd kernel: pid 865 (ubench), jid 0, uid 0, was killed: failed to reclaim memory Apr 4 23:02:49 freebsd kernel: pid 861 (ubench), jid 0, uid 0, was killed: failed to reclaim memory Apr 4 23:02:51 freebsd kernel: pid 839 (ubench), jid 0, uid 0, was killed: failed to reclaim memory
PID USERNAME THR PRI NICE SIZE RES STATE C TIME WCPU COMMAND 7449 root 14 20 0 14G 78M kqread 2 2:12 399.81% bhyve
{"level":"warn","msg":"apply request took too long","took":"4.996516657s","expected-duration":"100ms"}
{"level":"warn","msg":"slow fdatasync","took":"1.328469363s","expected-duration":"1s"}
[root@r0 ~]# dd if=/dev/zero of=/tmp/test bs=4k count=2000 oflag=dsync 8192000 bytes copied, 31.7058 s, 258 kB/s
[root@r0 ~]# cat > /etc/dracut.conf.d/nvme.conf << EOF add_drivers+=" nvme nvme_core " hostonly=no EOF [root@r0 ~]# sed -i 's/# use_devicesfile = 1/use_devicesfile = 0/' /etc/lvm/lvm.conf [root@r0 ~]# dracut -f [root@r0 ~]# shutdown -h now
paul@f0:~ % doas vm stop rocky paul@f0:~ % doas vm configure rocky
disk0_type="nvme"
paul@f0:~ % doas vm start rocky
[root@r0 ~]# dd if=/dev/zero of=/tmp/test bs=4k count=2000 oflag=dsync 8192000 bytes copied, 0.330718 s, 24.8 MB/s
etcd_disk_wal_fsync_duration_seconds_bucket{le="0.001"} 347
etcd_disk_wal_fsync_duration_seconds_bucket{le="0.002"} 396
etcd_disk_wal_fsync_duration_seconds_bucket{le="0.004"} 408

git clone https://codeberg.org/snonux/gos.git cd gos
go build -o gos ./cmd/gos go build -o gosc ./cmd/gosc sudo mv gos ~/go/bin sudo mv gosc ~/go/bin
go-task install
{
"MastodonURL": "https://mastodon.example.com",
"MastodonAccessToken": "your-mastodon-access-token",
"LinkedInClientID": "your-linkedin-client-id",
"LinkedInSecret": "your-linkedin-client-secret",
"LinkedInRedirectURL": "http://localhost:8080/callback",
}
./gos --dry
./gos

This is a sample message to be posted on social media platforms. Maybe add a link here: https://foo.zone #foo #cool #gos #golang
share:mastodon The content of the post here
share:mastodon The content of the post is here https://some.foo/link #some #hashtags
share:mastodon,ask,prio Hello wold :-)
share:mastodon,ask,prio Hello World :-)
~/.gosdir/db/platforms/linkedin/foo.share:-mastodon.txt.20241022-102343.queued
./db/platforms/linkedin/foo.share:-mastodon.txt.20241112-121323.posted
gos --geminiSummaryFor 202410,202411,202412
gos --gemtexterEnable --geminiSummaryFor 202410,202411,202412
gos --gemtexterEnable --geminiSummaryFor 202410,202411,202412 --geminiCapsules "foo.zone,paul.buetow.org"
/\_/\ /\_/\ ( o.o ) WHOA!! ( o.o ) > ^ < > ^ < / \ MOEEW! / \ /______\ /______\


package main
import "log"
type fun func() string
func (f fun) Bar() string {
return "Bar"
}
func main() {
var f fun = func() string {
return "Foo"
}
log.Println("Example 1: ", f())
log.Println("Example 2: ", f.Bar())
log.Println("Example 3: ", fun(f.Bar).Bar())
log.Println("Example 4: ", fun(fun(f.Bar).Bar).Bar())
}
❯ go run main.go 2025/02/07 22:56:14 Example 1: Foo 2025/02/07 22:56:14 Example 2: Bar 2025/02/07 22:56:14 Example 3: Bar 2025/02/07 22:56:14 Example 4: Bar
❯ touch Maß ❯ ls -l -rw-r--r--@ 1 paul wheel 0 Feb 7 23:02 Maß ❯ touch Mass ❯ ls -l -rw-r--r--@ 1 paul wheel 0 Feb 7 23:02 Maß ❯ rm Mass ❯ ls -l ❯ touch Mass ❯ ls -ltr -rw-r--r--@ 1 paul wheel 0 Feb 7 23:02 Mass ❯ rm Maß ❯ ls -l
ADFS::4.$.Documents.Techwriter.Myfile
arr = {10, 20, 30, 40, 50}
print(arr[1]) -- Accessing the first element
❯ lua foo.lua 10
# (C) 2006 by Paul C. Buetow
Christmas:{time;#!!!
Children: do tell $wishes;
Santa: for $each (@children) {
BEGIN { read $each, $their, wishes and study them; use Memoize#ing
} use constant gift, 'wrapping';
package Gifts; pack $each, gift and bless $each and goto deliver
or do import if not local $available,!!! HO, HO, HO;
redo Santa, pipe $gifts, to_childs;
redo Santa and do return if last one, is, delivered;
deliver: gift and require diagnostics if our $gifts ,not break;
do{ use NEXT; time; tied $gifts} if broken and dump the, broken, ones;
The_children: sleep and wait for (each %gift) and try { to => untie $gifts };
redo Santa, pipe $gifts, to_childs;
redo Santa and do return if last one, is, delivered;
The_christmas_tree: formline s/ /childrens/, $gifts;
alarm and warn if not exists $Christmas{ tree}, @t, $ENV{HOME};
write <<EMail
to the parents to buy a new christmas tree!!!!111
and send the
EMail
;wait and redo deliver until defined local $tree;
redo Santa, pipe $gifts, to_childs;
redo Santa and do return if last one, is, delivered ;}
END {} our $mission and do sleep until next Christmas ;}
__END__
This is perl, v5.8.8 built for i386-freebsd-64int


paul@f0: ~ % doas freebsd-update fetch paul@f0: ~ % doas freebsd-update install paul@f0: ~ % doas freebsd-update -r 14.2-RELEASE upgrade paul@f0: ~ % doas freebsd-update install paul@f0: ~ % doas shutdown -r now
paul@f0: ~ % doas freebsd-update install paul@f0: ~ % doas pkg update paul@f0: ~ % doas pkg upgrade paul@f0: ~ % doas shutdown -r now
paul@f0:~ % uname -a FreeBSD f0.lan.buetow.org 14.2-RELEASE FreeBSD 14.2-RELEASE releng/14.2-n269506-c8918d6c7412 GENERIC amd64


paul@f0: ~ % doas dmesg | grep UPS ugen0.2: <American Power Conversion Back-UPS BX750MI> at usbus0
paul@f0: ~ % doas install apcupsd
paul@f0:/usr/local/etc/apcupsd % diff -u apcupsd.conf.sample apcupsd.conf --- apcupsd.conf.sample 2024-11-01 16:40:42.000000000 +0200 +++ apcupsd.conf 2024-12-03 10:58:24.009501000 +0200 @@ -31,7 +31,7 @@ # 940-1524C, 940-0024G, 940-0095A, 940-0095B, # 940-0095C, 940-0625A, M-04-02-2000 # -UPSCABLE smart +UPSCABLE usb # To get apcupsd to work, in addition to defining the cable # above, you must also define a UPSTYPE, which corresponds to @@ -88,8 +88,10 @@ # that apcupsd binds to that particular unit # (helpful if you have more than one USB UPS). # -UPSTYPE apcsmart -DEVICE /dev/usv +UPSTYPE usb +DEVICE # POLLTIME <int> # Interval (in seconds) at which apcupsd polls the UPS for status. This
# If during a power failure, the remaining battery percentage # (as reported by the UPS) is below or equal to BATTERYLEVEL, # apcupsd will initiate a system shutdown. BATTERYLEVEL 5 # If during a power failure, the remaining runtime in minutes # (as calculated internally by the UPS) is below or equal to MINUTES, # apcupsd, will initiate a system shutdown. MINUTES 3
paul@f0:/usr/local/etc/apcupsd % doas sysrc apcupsd_enable=YES apcupsd_enable: -> YES paul@f0:/usr/local/etc/apcupsd % doas service apcupsd start Starting apcupsd.
paul@f0:~ % apcaccess APC : 001,035,0857 DATE : 2025-01-26 14:43:27 +0200 HOSTNAME : f0.lan.buetow.org VERSION : 3.14.14 (31 May 2016) freebsd UPSNAME : f0.lan.buetow.org CABLE : USB Cable DRIVER : USB UPS Driver UPSMODE : Stand Alone STARTTIME: 2025-01-26 14:43:25 +0200 MODEL : Back-UPS BX750MI STATUS : ONLINE LINEV : 230.0 Volts LOADPCT : 4.0 Percent BCHARGE : 100.0 Percent TIMELEFT : 65.3 Minutes MBATTCHG : 5 Percent MINTIMEL : 3 Minutes MAXTIME : 0 Seconds SENSE : Medium LOTRANS : 145.0 Volts HITRANS : 295.0 Volts ALARMDEL : No alarm BATTV : 13.6 Volts LASTXFER : Automatic or explicit self test NUMXFERS : 0 TONBATT : 0 Seconds CUMONBATT: 0 Seconds XOFFBATT : N/A SELFTEST : NG STATFLAG : 0x05000008 SERIALNO : 9B2414A03599 BATTDATE : 2001-01-01 NOMINV : 230 Volts NOMBATTV : 12.0 Volts NOMPOWER : 410 Watts END APC : 2025-01-26 14:44:06 +0200
paul@f1:~ % apcaccess -h f0.lan.buetow.org | grep Percent LOADPCT : 12.0 Percent BCHARGE : 94.0 Percent MBATTCHG : 5 Percent
paul@f2:/usr/local/etc/apcupsd % diff -u apcupsd.conf.sample apcupsd.conf --- apcupsd.conf.sample 2024-11-01 16:40:42.000000000 +0200 +++ apcupsd.conf 2025-01-26 15:52:45.108469000 +0200 @@ -31,7 +31,7 @@ # 940-1524C, 940-0024G, 940-0095A, 940-0095B, # 940-0095C, 940-0625A, M-04-02-2000 # -UPSCABLE smart +UPSCABLE ether # To get apcupsd to work, in addition to defining the cable # above, you must also define a UPSTYPE, which corresponds to @@ -52,7 +52,6 @@ # Network Information Server. This is used if the # UPS powering your computer is connected to a # different computer for monitoring. -# # snmp hostname:port:vendor:community # SNMP network link to an SNMP-enabled UPS device. # Hostname is the ip address or hostname of the UPS @@ -88,8 +87,8 @@ # that apcupsd binds to that particular unit # (helpful if you have more than one USB UPS). # -UPSTYPE apcsmart -DEVICE /dev/usv +UPSTYPE net +DEVICE f0.lan.buetow.org:3551 # POLLTIME <int> # Interval (in seconds) at which apcupsd polls the UPS for status. This @@ -147,12 +146,12 @@ # If during a power failure, the remaining battery percentage # (as reported by the UPS) is below or equal to BATTERYLEVEL, # apcupsd will initiate a system shutdown. -BATTERYLEVEL 5 +BATTERYLEVEL 10 # If during a power failure, the remaining runtime in minutes # (as calculated internally by the UPS) is below or equal to MINUTES, # apcupsd, will initiate a system shutdown. -MINUTES 3 +MINUTES 6 # If during a power failure, the UPS has run on batteries for TIMEOUT # many seconds or longer, apcupsd will initiate a system shutdown.So I also ran the following commands on f1 and f2:
paul@f1:/usr/local/etc/apcupsd % doas sysrc apcupsd_enable=YES apcupsd_enable: -> YES paul@f1:/usr/local/etc/apcupsd % doas service apcupsd start Starting apcupsd.
paul@f1:~ % doas apcaccess | grep Percent LOADPCT : 5.0 Percent BCHARGE : 95.0 Percent MBATTCHG : 5 Percent
Broadcast Message from root@f0.lan.buetow.org
(no tty) at 15:03 EET...
Power failure. Running on UPS batteries.
paul@f0:/usr/local/etc/apcupsd % apcaccess -p TIMELEFT 63.9 Minutes
Broadcast Message from root@f0.lan.buetow.org
(no tty) at 15:08 EET...
*** FINAL System shutdown message from root@f0.lan.buetow.org ***
System going down IMMEDIATELY
apcupsd initiated shutdown
Jan 26 17:36:24 f2 apcupsd[2159]: Power failure. Jan 26 17:36:30 f2 apcupsd[2159]: Running on UPS batteries. Jan 26 17:36:30 f2 apcupsd[2159]: Battery charge below low limit. Jan 26 17:36:30 f2 apcupsd[2159]: Initiating system shutdown! Jan 26 17:36:30 f2 apcupsd[2159]: User logins prohibited Jan 26 17:36:32 f2 apcupsd[2159]: apcupsd exiting, signal 15 Jan 26 17:36:32 f2 apcupsd[2159]: apcupsd shutdown succeeded



[paul@earth]~/Downloads% sudo dd \ if=FreeBSD-14.1-RELEASE-amd64-bootonly.iso \ of=/dev/sda conv=sync

root@f0:~ # freebsd-update fetch root@f0:~ # freebsd-update install root@f0:~ # freebsd-update reboot
root@f0:~ # cat <<END >>/etc/hosts 192.168.1.130 f0 f0.lan f0.lan.buetow.org 192.168.1.131 f1 f1.lan f1.lan.buetow.org 192.168.1.132 f2 f2.lan f2.lan.buetow.org END
root@f0:~ # pkg install helix doas zfs-periodic uptimed
root@f0:~ # cp /usr/local/etc/doas.conf.sample /usr/local/etc/doas.conf
root@f0:~ # cat <<END >>/etc/periodic.conf daily_zfs_snapshot_enable="YES" daily_zfs_snapshot_pools="zroot" daily_zfs_snapshot_keep="7" weekly_zfs_snapshot_enable="YES" weekly_zfs_snapshot_pools="zroot" weekly_zfs_snapshot_keep="5" monthly_zfs_snapshot_enable="YES" monthly_zfs_snapshot_pools="zroot" monthly_zfs_snapshot_keep="6" END
root@f0:~ # cp /usr/local/mimecast/etc/uptimed.conf-dist \ /usr/local/mimecast/etc/uptimed.conf root@f0:~ # hx /usr/local/mimecast/etc/uptimed.conf
root@f0:~ # service uptimed enable root@f0:~ # service uptimed start
root@f0:~ # uprecords
# Uptime | System Boot up
----------------------------+---------------------------------------------------
-> 1 0 days, 00:07:34 | FreeBSD 14.1-RELEASE Mon Dec 2 12:21:44 2024
----------------------------+---------------------------------------------------
NewRec 0 days, 00:07:33 | since Mon Dec 2 12:21:44 2024
up 0 days, 00:07:34 | since Mon Dec 2 12:21:44 2024
down 0 days, 00:00:00 | since Mon Dec 2 12:21:44 2024
%up 100.000 | since Mon Dec 2 12:21:44 2024
paul@f0:~ % ifconfig re0
re0: flags=1008843<UP,BROADCAST,RUNNING,SIMPLEX,MULTICAST,LOWER_UP> metric 0 mtu 1500
options=8209b<RXCSUM,TXCSUM,VLAN_MTU,VLAN_HWTAGGING,VLAN_HWCSUM,WOL_MAGIC,LINKSTATE>
ether e8:ff:1e:d7:1c:ac
inet 192.168.1.130 netmask 0xffffff00 broadcast 192.168.1.255
inet6 fe80::eaff:1eff:fed7:1cac%re0 prefixlen 64 scopeid 0x1
inet6 fd22:c702:acb7:0:eaff:1eff:fed7:1cac prefixlen 64 detached autoconf
inet6 2a01:5a8:304:1d5c:eaff:1eff:fed7:1cac prefixlen 64 autoconf pltime 10800 vltime 14400
media: Ethernet autoselect (1000baseT <full-duplex>)
status: active
nd6 options=23<PERFORMNUD,ACCEPT_RTADV,AUTO_LINKLOCAL>
paul@f0:~ % sysctl hw.physmem hw.physmem: 16902905856
paul@f0:~ % sysctl dev.cpu | grep freq: dev.cpu.3.freq: 705 dev.cpu.2.freq: 705 dev.cpu.1.freq: 604 dev.cpu.0.freq: 604
paul@f0:~ % doas pkg install ubench paul@f0:~ % rehash # For tcsh to find the newly installed command paul@f0:~ % ubench & paul@f0:~ % sysctl dev.cpu | grep freq: dev.cpu.3.freq: 2922 dev.cpu.2.freq: 2922 dev.cpu.1.freq: 2923 dev.cpu.0.freq: 2922

[paul@earth]~% sudo dnf install -y wol
#!/bin/bash
# Wake-on-LAN and shutdown script for f3s cluster (f0, f1, f2)
# MAC addresses
F0_MAC="e8:ff:1e:d7:1c:ac" # f0 (192.168.1.130)
F1_MAC="e8:ff:1e:d7:1e:44" # f1 (192.168.1.131)
F2_MAC="e8:ff:1e:d7:1c:a0" # f2 (192.168.1.132)
# IP addresses
F0_IP="192.168.1.130"
F1_IP="192.168.1.131"
F2_IP="192.168.1.132"
# SSH user
SSH_USER="paul"
# Broadcast address for your LAN
BROADCAST="192.168.1.255"
wake() {
local name=$1
local mac=$2
echo "Sending WoL packet to $name ($mac)..."
wol -i "$BROADCAST" "$mac"
}
shutdown_host() {
local name=$1
local ip=$2
echo "Shutting down $name ($ip)..."
ssh -o ConnectTimeout=5 "$SSH_USER@$ip" "doas poweroff" 2>/dev/null && \
echo " ✓ Shutdown command sent to $name" || \
echo " ✗ Failed to reach $name (already down?)"
}
ACTION="${1:-all}"
case "$ACTION" in
f0) wake "f0" "$F0_MAC" ;;
f1) wake "f1" "$F1_MAC" ;;
f2) wake "f2" "$F2_MAC" ;;
all|"")
wake "f0" "$F0_MAC"
wake "f1" "$F1_MAC"
wake "f2" "$F2_MAC"
;;
shutdown|poweroff|down)
shutdown_host "f0" "$F0_IP"
shutdown_host "f1" "$F1_IP"
shutdown_host "f2" "$F2_IP"
echo ""
echo "✓ Shutdown commands sent to all machines."
exit 0
;;
*)
echo "Usage: $0 [f0|f1|f2|all|shutdown]"
exit 1
;;
esac
echo ""
echo "✓ WoL packets sent. Machines should boot in a few seconds."
[paul@earth]~% wol-f3s # Wake all three [paul@earth]~% wol-f3s f0 # Wake only f0 [paul@earth]~% wol-f3s shutdown # Shutdown all three via SSH
[paul@earth]~% wol-f3s shutdown Shutting down f0 (192.168.1.130)... ✓ Shutdown command sent to f0 Shutting down f1 (192.168.1.131)... ✓ Shutdown command sent to f1 Shutting down f2 (192.168.1.132)... ✓ Shutdown command sent to f2 ✓ Shutdown commands sent to all machines.
[paul@earth]~% wol-f3s Sending WoL packet to f0 (e8:ff:1e:d7:1c:ac)... Waking up e8:ff:1e:d7:1c:ac... Sending WoL packet to f1 (e8:ff:1e:d7:1e:44)... Waking up e8:ff:1e:d7:1e:44... Sending WoL packet to f2 (e8:ff:1e:d7:1c:a0)... Waking up e8:ff:1e:d7:1c:a0... ✓ WoL packets sent. Machines should boot in a few seconds.
[paul@earth]~% wol-f3s shutdown



,.......... ..........,
,..,' '.' ',..,
,' ,' : ', ',
,' ,' : ', ',
,' ,' : ', ',
,' ,'............., : ,.............', ',
,' '............ '.' ............' ',
'''''''''''''''''';''';''''''''''''''''''
'''
-=[ typewriters ]=- 1/98
.-------.
.-------. _|~~ ~~ |_
_|~~ ~~ |_ .-------. =(_|_______|_)
=(_|_______|_)= _|~~ ~~ |_ |:::::::::| .-------.
|:::::::::| =(_|_______|_) |:::::::[]| _|~~ ~~ |_
|:::::::[]| |:::::::::| |o=======.| =(_|_______|_)
|o=======.| |:::::::[]| `"""""""""` |:::::::::|
jgs `"""""""""` |o=======.| |:::::::[]|
mod. by Paul Buetow `"""""""""` |o=======.|
`"""""""""`
<< template::inline::toc
declare -xr HTML_THEME_DIR=./extras/html/themes/simple
__..._ _...__
_..-" `Y` "-._
\ Once upon | /
\\ a time..| //
\\\ | ///
\\\ _..---.|.---.._ ///
jgs \\`_..---.Y.---.._`//
||====================================================================|| ||//$\\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\//$\\|| ||(100)==================| FEDERAL SPONSOR NOTE |================(100)|| ||\\$// ~ '------========--------' \\$//|| ||<< / /$\ // ____ \\ \ >>|| ||>>| 12 //L\\ // ///..) \\ L38036133B 12 |<<|| ||<<| \\ // || <|| >\ || |>>|| ||>>| \$/ || $$ --/ || One Hundred |<<|| ||<<| L38036133B *\\ |\_/ //* series |>>|| ||>>| 12 *\\/___\_//* 1989 |<<|| ||<<\ Open Source ______/Franklin\________ Supporting />>|| ||//$\ ~| SPONSORING AND FUNDING |~ /$\\|| ||(100)=================== AWESOME OPEN SOURCE =================(100)|| ||\\$//\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\\$//|| ||====================================================================||
,---,---,---,---,---,---,---,---,---,---,---,---,---,-------,
|1/2| 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9 | 0 | + | ' | <- |
|---'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-----|
| ->| | Q | W | E | R | T | Y | U | I | O | P | ] | ^ | |
|-----',--',--',--',--',--',--',--',--',--',--',--',--'| |
| Caps | A | S | D | F | G | H | J | K | L | \ | [ | * | |
|----,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'-,-'---'----|
| | < | Z | X | C | V | B | N | M | , | . | - | |
|----'-,-',--'--,'---'---'---'---'---'---'-,-'---',--,------|
| ctrl | | alt | |altgr | | ctrl |
'------' '-----'--------------------------'------' '------'
Nieminen Mika





,.......... ..........,
,..,' '.' ',..,
,' ,' : ', ',
,' ,' : ', ',
,' ,' : ', ',
,' ,'............., : ,.............', ',
,' '............ '.' ............' ',
'''''''''''''''''';''';''''''''''''''''''
'''
/\_/\ WHOA!! ( o.o ) > ^ < / - \ / \ /______\ \
❯ traceroute -m 60 bad.horse traceroute to bad.horse (162.252.205.157), 60 hops max, 60 byte packets 1 _gateway (192.168.1.1) 5.237 ms 5.264 ms 6.009 ms 2 77-85-0-2.ip.btc-net.bg (77.85.0.2) 8.753 ms 7.112 ms 8.336 ms 3 212-39-69-103.ip.btc-net.bg (212.39.69.103) 9.434 ms 9.268 ms 9.986 ms 4 * * * 5 xe-1-2-0.mpr1.fra4.de.above.net (80.81.194.26) 39.812 ms 39.030 ms 39.772 ms 6 * ae12.cs1.fra6.de.eth.zayo.com (64.125.26.172) 123.576 ms * 7 * * * 8 * * * 9 ae10.cr1.lhr15.uk.eth.zayo.com (64.125.29.17) 119.097 ms 119.478 ms 120.767 ms 10 ae2.cr1.lhr11.uk.zip.zayo.com (64.125.24.140) 120.398 ms 121.147 ms 120.948 ms 11 * * * 12 ae25.mpr1.yyz1.ca.zip.zayo.com (64.125.23.117) 145.072 ms * 181.773 ms 13 ae5.mpr1.tor3.ca.zip.zayo.com (64.125.23.118) 168.239 ms 168.158 ms 168.137 ms 14 64.124.217.237.IDIA-265104-ZYO.zip.zayo.com (64.124.217.237) 168.026 ms 167.999 ms 165.451 ms 15 * * * 16 t00.toroc1.on.ca.sn11.net (162.252.204.2) 131.598 ms 131.308 ms 131.482 ms 17 bad.horse (162.252.205.130) 131.430 ms 145.914 ms 130.514 ms 18 bad.horse (162.252.205.131) 136.634 ms 145.295 ms 135.631 ms 19 bad.horse (162.252.205.132) 139.158 ms 148.363 ms 138.934 ms 20 bad.horse (162.252.205.133) 145.395 ms 148.054 ms 147.140 ms 21 he.rides.across.the.nation (162.252.205.134) 149.687 ms 147.731 ms 150.135 ms 22 the.thoroughbred.of.sin (162.252.205.135) 156.644 ms 155.155 ms 156.447 ms 23 he.got.the.application (162.252.205.136) 161.187 ms 162.318 ms 162.674 ms 24 that.you.just.sent.in (162.252.205.137) 166.763 ms 166.675 ms 164.243 ms 25 it.needs.evaluation (162.252.205.138) 172.073 ms 171.919 ms 171.390 ms 26 so.let.the.games.begin (162.252.205.139) 175.386 ms 174.180 ms 175.965 ms 27 a.heinous.crime (162.252.205.140) 180.857 ms 180.766 ms 180.192 ms 28 a.show.of.force (162.252.205.141) 187.942 ms 186.669 ms 186.986 ms 29 a.murder.would.be.nice.of.course (162.252.205.142) 191.349 ms 191.939 ms 190.740 ms 30 bad.horse (162.252.205.143) 195.425 ms 195.716 ms 196.186 ms 31 bad.horse (162.252.205.144) 199.238 ms 200.620 ms 200.318 ms 32 bad.horse (162.252.205.145) 207.554 ms 206.729 ms 205.201 ms 33 he-s.bad (162.252.205.146) 211.087 ms 211.649 ms 211.712 ms 34 the.evil.league.of.evil (162.252.205.147) 212.657 ms 216.777 ms 216.589 ms 35 is.watching.so.beware (162.252.205.148) 220.911 ms 220.326 ms 221.961 ms 36 the.grade.that.you.receive (162.252.205.149) 225.384 ms 225.696 ms 225.640 ms 37 will.be.your.last.we.swear (162.252.205.150) 232.312 ms 230.989 ms 230.919 ms 38 so.make.the.bad.horse.gleeful (162.252.205.151) 235.761 ms 235.291 ms 235.585 ms 39 or.he-ll.make.you.his.mare (162.252.205.152) 241.350 ms 239.407 ms 238.394 ms 40 o_o (162.252.205.153) 246.154 ms 247.650 ms 247.110 ms 41 you-re.saddled.up (162.252.205.154) 250.925 ms 250.401 ms 250.619 ms 42 there-s.no.recourse (162.252.205.155) 256.071 ms 251.154 ms 255.340 ms 43 it-s.hi-ho.silver (162.252.205.156) 260.152 ms 261.775 ms 261.544 ms 44 signed.bad.horse (162.252.205.157) 262.430 ms 261.410 ms 261.365 ms
#include <stdio.h>
int main(void) {
int array[5] = { 1, 2, 3, 4, 5 };
for (int i = 0; i < 5; i++)
printf("%d\n", array[i]);
for (int i = 0; i < 5; i++)
printf("%d\n", i[array]);
for (int i = 0; i < 5; i++)
printf("%d\n", *(i + array));
}
#include <stdio.h>
int main(void) {
int $array[5] = { 1, 2, 3, 4, 5 };
for (int $i = 0; $i < 5; $i++)
printf("%d\n", $array[$i]);
for (int $i = 0; $i < 5; $i++)
printf("%d\n", $i[$array]);
for (int $i = 0; $i < 5; $i++)
printf("%d\n", *($i + $array));
}
#!/usr/bin/ksh93
typeset -T Point_t=(
integer -h 'x coordinate' x=0
integer -h 'y coordinate' y=0
typeset -h 'point color' color="red"
function getcolor {
print -r ${_.color}
}
function setcolor {
_.color=$1
}
setxy() {
_.x=$1; _.y=$2
}
getxy() {
print -r "(${_.x},${_.y})"
}
)
Point_t point
echo "Initial coordinates are (${point.x},${point.y}). Color is ${point.color}"
point.setxy 5 6
point.setcolor blue
echo "New coordinates are ${point.getxy}. Color is ${point.getcolor}"
exit 0
package main
import "fmt"
func main() {
var i int
f := func() *int {
return &i
}
*f()++
fmt.Println(i)
}
def _token:
def _re($re; f):
( . as {$remain, $string_stack}
| $remain
| match($re; "m").string
| f as $token
| { result: ($token | del(.string_stack))
, remain: $remain[length:]
, string_stack:
( if $token.string_stack == null then $string_stack
else $token.string_stack
end
)
}
);
if .remain == "" then empty
else
( . as {$string_stack}
| _re("^\\s+"; {whitespace: .})
// _re("^#[^\n]*"; {comment: .})
// _re("^\\.[_a-zA-Z][_a-zA-Z0-9]*"; {index: .[1:]})
// _re("^[_a-zA-Z][_a-zA-Z0-9]*"; {ident: .})
// _re("^@[_a-zA-Z][_a-zA-Z0-9]*"; {at_ident: .})
// _re("^\\$[_a-zA-Z][_a-zA-Z0-9]*"; {binding: .})
# 1.23, .123, 123e2, 1.23e2, 123E2, 1.23e+2, 1.23E-2 or 123
// _re("^(?:[0-9]*\\.[0-9]+|[0-9]+)(?:[eE][-\\+]?[0-9]+)?"; {number: .})
// _re("^\"(?:[^\"\\\\]|\\\\.)*?\\\\\\(";
( .[1:-2]
| _unescape
| {string_start: ., string_stack: ($string_stack+["\\("])}
)
)
.
.
.
(?:(?:\r\n)?[ \t])*(?:(?:(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t] )+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?: \r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:( ?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*))*@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\0 31]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\ ](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+ (?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?: (?:\r\n)?[ \t])*))*|(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z |(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n) ?[ \t])*)*\<(?:(?:\r\n)?[ \t])*(?:@(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\ r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n) ?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t] )*))*(?:,@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])* )(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t] )+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*) *:(?:(?:\r\n)?[ \t])*)?(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+ |\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r \n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?: \r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t ]))*"(?:(?:\r\n)?[ \t])*))*@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031 ]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\]( ?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(? :(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(? :\r\n)?[ \t])*))*\>(?:(?:\r\n)?[ \t])*)|(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(? :(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)? [ \t]))*"(?:(?:\r\n)?[ \t])*)*:(?:(?:\r\n)?[ \t])*(?:(?:(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]| \\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<> @,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|" (?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*))*@(?:(?:\r\n)?[ \t] )*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\ ".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(? :[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[ \]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*|(?:[^()<>@,;:\\".\[\] \000- \031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|( ?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)*\<(?:(?:\r\n)?[ \t])*(?:@(?:[^()<>@,; :\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([ ^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\" .\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\ ]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*(?:,@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\ [\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\ r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\] |\\.)*\](?:(?:\r\n)?[ \t])*))*)*:(?:(?:\r\n)?[ \t])*)?(?:[^()<>@,;:\\".\[\] \0 00-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\ .|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[^()<>@, ;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|"(? :[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*))*@(?:(?:\r\n)?[ \t])* (?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\". \[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t])*(?:[ ^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\] ]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*\>(?:(?:\r\n)?[ \t])*)(?:,\s*( ?:(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\ ".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)(?:\.(?:( ?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[ \["()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t ])*))*@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t ])+|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(? :\.(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+| \Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*|(?: [^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\".\[\ ]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)*\<(?:(?:\r\n) ?[ \t])*(?:@(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\[" ()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n) ?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<> @,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*(?:,@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@, ;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\.(?:(?:\r\n)?[ \t] )*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\ ".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*)*:(?:(?:\r\n)?[ \t])*)? (?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\["()<>@,;:\\". \[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t])*)(?:\.(?:(?: \r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z|(?=[\[ "()<>@,;:\\".\[\]]))|"(?:[^\"\r\\]|\\.|(?:(?:\r\n)?[ \t]))*"(?:(?:\r\n)?[ \t]) *))*@(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t]) +|\Z|(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*)(?:\ .(?:(?:\r\n)?[ \t])*(?:[^()<>@,;:\\".\[\] \000-\031]+(?:(?:(?:\r\n)?[ \t])+|\Z |(?=[\["()<>@,;:\\".\[\]]))|\[([^\[\]\r\\]|\\.)*\](?:(?:\r\n)?[ \t])*))*\>(?:( ?:\r\n)?[ \t])*))*)?;\s*)
_______
|.-----.|
|| Tmux||
||_.-._||
`--)-(--`
__[=== o]___
|:::::::::::|\
jgs `-=========-`()
mod. by Paul B.
alias tm=tmux alias tl='tmux list-sessions' alias tn=tmux::new alias ta=tmux::attach alias tx=tmux::remote alias ts=tmux::search alias tssh=tmux::cluster_ssh
# Create new session and if already exists attach to it
tmux::new () {
readonly session=$1
local date=date
if where gdate &>/dev/null; then
date=gdate
fi
tmux::cleanup_default
if [ -z "$session" ]; then
tmux::new T$($date +%s)
else
tmux new-session -d -s $session
tmux -2 attach-session -t $session || tmux -2 switch-client -t $session
fi
}
alias tn=tmux::new
tmux::cleanup_default () {
local s
tmux list-sessions | grep '^T.*: ' | grep -F -v attached |
cut -d: -f1 | while read -r s; do
echo "Killing $s"
tmux kill-session -t "$s"
done
}
tmux::attach () {
readonly session=$1
if [ -z "$session" ]; then
tmux attach-session || tmux::new
else
tmux attach-session -t $session || tmux::new $session
fi
}
alias ta=tmux::attach
tmux::remote () {
readonly server=$1
tmux new -s $server "ssh -t $server 'tmux attach-session || tmux'" || \
tmux attach-session -d -t $server
}
alias tr=tmux::remote
set-option -g prefix C-g
tmux::search () {
local -r session=$(tmux list-sessions | fzf | cut -d: -f1)
if [ -z "$TMUX" ]; then
tmux attach-session -t $session
else
tmux switch -t $session
fi
}
alias ts=tmux::search

tmux::cluster_ssh () {
if [ -f "$1" ]; then
tmux::tssh_from_file $1
return
fi
tmux::tssh_from_argument $@
}
alias tssh=tmux::cluster_ssh
tmux::tssh_from_argument () {
local -r session=$1; shift
local first_server=$1; shift
tmux new-session -d -s $session "ssh -t $first_server"
if ! tmux list-session | grep "^$session:"; then
echo "Could not create session $session"
return 2
fi
for server in "${@[@]}"; do
tmux split-window -t $session "tmux select-layout tiled; ssh -t $server"
done
tmux setw -t $session synchronize-panes on
tmux -2 attach-session -t $session | tmux -2 switch-client -t $session
}
bind-key p setw synchronize-panes off bind-key P setw synchronize-panes on
tmux::tssh_from_file () {
local -r serverlist=$1; shift
local -r session=$(basename $serverlist | cut -d. -f1)
tmux::tssh_from_argument $session $(awk '{ print $1} ' $serverlist | sed 's/.lan./.lan/g')
}
$ tssh fish blowfish.buetow.org fishfinger.buetow.org \
fishbone.buetow.org user@octopus.buetow.org
$ tssh manyservers.txt
bind-key -T copy-mode-vi 'v' send -X begin-selection bind-key -T copy-mode-vi 'y' send -X copy-selection-and-cancel
source ~/.config/tmux/tmux.local.conf set-option -g allow-rename off set-option -g history-limit 100000 set-option -g status-bg '#444444' set-option -g status-fg '#ffa500' set-option -s escape-time 0
set-window-option -g mode-keys vi bind-key -T copy-mode-vi 'v' send -X begin-selection bind-key -T copy-mode-vi 'y' send -X copy-selection-and-cancel
bind-key h select-pane -L bind-key j select-pane -D bind-key k select-pane -U bind-key l select-pane -R bind-key H resize-pane -L 5 bind-key J resize-pane -D 5 bind-key K resize-pane -U 5 bind-key L resize-pane -R 5
bind-key c new-window -c '#{pane_current_path}'
bind-key F new-window -n "session-switcher" "tmux list-sessions | fzf | cut -d: -f1 | xargs tmux switch-client -t"
bind-key T choose-tree

bind-key p setw synchronize-panes off bind-key P setw synchronize-panes on bind-key r source-file ~/.config/tmux/tmux.conf \; display-message "tmux.conf reloaded"
Art by Laura Brown .'`~~~~~~~~~~~`'. ( .'11 12 1'. ) | :10 \ 2: | | :9 @-> 3: | | :8 4; | '. '..7 6 5..' .' ~-------------~ ldb
Cluster :UK, :uk01 do
Customer.C1A1.segments.volumes.each do |volume|
puts volume.usage_stats
volume.move_off! if volume.over_subscribed?
end
end