summaryrefslogtreecommitdiff
path: root/gemfeed/examples/conf/frontends
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2025-10-02 11:28:53 +0300
committerPaul Buetow <paul@buetow.org>2025-10-02 11:28:53 +0300
commitc0f9ecf5e0b075db8e54ef1235ec80878e418398 (patch)
treed729aef5835fdfa173277c4189342976e33c6446 /gemfeed/examples/conf/frontends
parenta96adfd84d903c50d75c8771cdcc78dd5e942618 (diff)
Update content for html
Diffstat (limited to 'gemfeed/examples/conf/frontends')
-rw-r--r--gemfeed/examples/conf/frontends/README.md3
-rw-r--r--gemfeed/examples/conf/frontends/Rexfile648
-rw-r--r--gemfeed/examples/conf/frontends/etc/acme-client.conf.tpl41
-rw-r--r--gemfeed/examples/conf/frontends/etc/dserver/dtail.json.tpl127
-rw-r--r--gemfeed/examples/conf/frontends/etc/gogios.cron.tpl3
-rw-r--r--gemfeed/examples/conf/frontends/etc/gogios.json.tpl98
-rw-r--r--gemfeed/examples/conf/frontends/etc/gorum.json.tpl18
-rw-r--r--gemfeed/examples/conf/frontends/etc/httpd.conf.tpl184
-rw-r--r--gemfeed/examples/conf/frontends/etc/inetd.conf2
-rw-r--r--gemfeed/examples/conf/frontends/etc/login.conf.d/inetd3
-rw-r--r--gemfeed/examples/conf/frontends/etc/mail/aliases103
-rw-r--r--gemfeed/examples/conf/frontends/etc/mail/smtpd.conf.tpl23
-rw-r--r--gemfeed/examples/conf/frontends/etc/mail/virtualdomains20
-rw-r--r--gemfeed/examples/conf/frontends/etc/mail/virtualusers5
-rw-r--r--gemfeed/examples/conf/frontends/etc/myname.tpl1
-rw-r--r--gemfeed/examples/conf/frontends/etc/newsyslog.conf14
-rw-r--r--gemfeed/examples/conf/frontends/etc/rc.conf.local5
-rwxr-xr-xgemfeed/examples/conf/frontends/etc/rc.d/dserver.tpl16
-rwxr-xr-xgemfeed/examples/conf/frontends/etc/rc.d/gorum.tpl16
-rw-r--r--gemfeed/examples/conf/frontends/etc/relayd.conf.tpl86
-rw-r--r--gemfeed/examples/conf/frontends/etc/rsyncd.conf.tpl28
-rw-r--r--gemfeed/examples/conf/frontends/etc/taskrc.tpl40
-rw-r--r--gemfeed/examples/conf/frontends/etc/tmux.conf24
-rw-r--r--gemfeed/examples/conf/frontends/scripts/acme.sh.tpl68
-rw-r--r--gemfeed/examples/conf/frontends/scripts/dns-failover.ksh133
-rw-r--r--gemfeed/examples/conf/frontends/scripts/dserver-update-key-cache.sh.tpl34
-rw-r--r--gemfeed/examples/conf/frontends/scripts/fooodds.txt191
-rw-r--r--gemfeed/examples/conf/frontends/scripts/foostats.pl1910
-rw-r--r--gemfeed/examples/conf/frontends/scripts/gemtexter.sh.tpl65
-rw-r--r--gemfeed/examples/conf/frontends/scripts/rsync.sh.tpl8
-rw-r--r--gemfeed/examples/conf/frontends/scripts/taskwarrior.sh.tpl5
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/etc/key.conf.tpl4
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.master.tpl17
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.slave.tpl17
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/buetow.org.zone.tpl124
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/dtail.dev.zone.tpl21
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/foo.zone.zone.tpl34
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/irregular.ninja.zone.tpl23
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/paul.cyou.zone.tpl20
-rw-r--r--gemfeed/examples/conf/frontends/var/nsd/zones/master/snonux.foo.zone.tpl20
-rw-r--r--gemfeed/examples/conf/frontends/var/www/htdocs/buetow.org/self/index.txt.tpl1
41 files changed, 4203 insertions, 0 deletions
diff --git a/gemfeed/examples/conf/frontends/README.md b/gemfeed/examples/conf/frontends/README.md
new file mode 100644
index 00000000..e2d59d95
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/README.md
@@ -0,0 +1,3 @@
+# Frontends
+
+Rexify my internet facing frontend servers!
diff --git a/gemfeed/examples/conf/frontends/Rexfile b/gemfeed/examples/conf/frontends/Rexfile
new file mode 100644
index 00000000..0079387e
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/Rexfile
@@ -0,0 +1,648 @@
+# How to use:
+#
+# rex commons
+#
+# Why use Rex to automate my servers? Because Rex is KISS, Puppet, SALT and Chef
+# are not. So, why not use Ansible then? To use Ansible correctly you should also
+# install Python on the target machines (not mandatory, though. But better).
+# Rex is programmed in Perl and there is already Perl in the base system of OpenBSD.
+# Also, I find Perl > Python (my personal opinion).
+
+use Rex -feature => [ '1.14', 'exec_autodie' ];
+use Rex::Logger;
+use File::Slurp;
+
+# REX CONFIG SECTION
+
+group frontends => 'blowfish.buetow.org:2', 'fishfinger.buetow.org:2';
+our $ircbouncer_server = 'fishfinger.buetow.org:2';
+group ircbouncer => $ircbouncer_server;
+group openbsd_canary => 'fishfinger.buetow.org:2';
+
+user 'rex';
+sudo TRUE;
+
+parallelism 5;
+
+# CUSTOM (PERL-ish) CONFIG SECTION (what Rex can't do by itself)
+# Note we using anonymous subs here. This is so we can pass the subs as
+# Rex template variables too.
+
+our %ips = (
+ 'fishfinger' => {
+ 'ipv4' => '46.23.94.99',
+ 'ipv6' => '2a03:6000:6f67:624::99',
+ },
+ 'blowfish' => {
+ 'ipv4' => '23.88.35.144',
+ 'ipv6' => '2a01:4f8:c17:20f1::42',
+ },
+ 'domain' => 'buetow.org',
+);
+
+$ips{current_master} = $ips{fishfinger};
+$ips{current_master}{fqdn} = 'fishfinger.' . $ips{domain};
+
+$ips{current_standby} = $ips{blowfish};
+$ips{current_standby}{fqdn} = 'blowfish.' . $ips{domain};
+
+# Gather IPv6 addresses based on hostname.
+our $ipv6address = sub {
+ my $hostname = shift;
+ my $ip = $ips{$hostname}{ipv6};
+ unless ( defined $ip ) {
+ Rex::Logger::info( "Unable to determine IPv6 address for $hostname", 'error' );
+ return '::1';
+ }
+ return $ip;
+};
+
+# Bootstrapping the FQDN based on the server IP as the hostname and domain
+# facts aren't set yet due to the myname file in the first place.
+our $fqdns = sub {
+ my $ipv4 = shift;
+ while ( my ( $hostname, $ips ) = each %ips ) {
+ return "$hostname." . $ips{domain} if $ips->{ipv4} eq $ipv4;
+ }
+ Rex::Logger::info( "Unable to determine hostname for $ipv4", 'error' );
+ return 'HOSTNAME-UNKNOWN.' . $ips{domain};
+};
+
+# TODO: Rename rexfilesecrets.txt to confsecrets.txt?! Or wait for RCM migration.
+# The secret store. Note to myself: "geheim cat rexfilesecrets.txt"
+our $secrets = sub { read_file './secrets/' . shift };
+
+our @dns_zones = qw/buetow.org dtail.dev foo.zone irregular.ninja snonux.foo paul.cyou/;
+our @dns_zones_remove = qw//;
+
+# k3s cluster running on FreeBSD in my LAN
+our @f3s_hosts =
+ qw/f3s.buetow.org anki.f3s.buetow.org bag.f3s.buetow.org flux.f3s.buetow.org audiobookshelf.f3s.buetow.org gpodder.f3s.buetow.org radicale.f3s.buetow.org vault.f3s.buetow.org syncthing.f3s.buetow.org uprecords.f3s.buetow.org/;
+
+# optionally, only enable manually for temp time, as no password protection yet
+# push @f3s_hosts, 'registry.f3s.buetow.org';
+
+our @acme_hosts =
+ qw/buetow.org git.buetow.org paul.buetow.org joern.buetow.org dory.buetow.org ecat.buetow.org blog.buetow.org fotos.buetow.org znc.buetow.org dtail.dev foo.zone stats.foo.zone irregular.ninja alt.irregular.ninja snonux.foo/;
+push @acme_hosts, @f3s_hosts;
+
+# UTILITY TASKS
+
+task 'id', group => 'frontends', sub { say run 'id' };
+task 'dump_info', group => 'frontends', sub { dump_system_information };
+
+# OPENBSD TASKS SECTION
+
+desc 'Install base stuff';
+task 'base',
+ group => 'frontends',
+ sub {
+ pkg 'figlet', ensure => present;
+ pkg 'tig', ensure => present;
+ pkg 'vger', ensure => present;
+ pkg 'zsh', ensure => present;
+ pkg 'bash', ensure => present;
+ pkg 'helix', ensure => present;
+
+ my @pkg_scripts = qw/uptimed httpd dserver icinga2/;
+ push @pkg_scripts, 'znc' if connection->server eq $ircbouncer_server;
+ my $pkg_scripts = join ' ', @pkg_scripts;
+ append_if_no_such_line '/etc/rc.conf.local', "pkg_scripts=\"$pkg_scripts\"";
+ run 'touch /etc/rc.local';
+
+ file '/etc/myname',
+ content => template( './etc/myname.tpl', fqdns => $fqdns ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+ };
+
+desc 'Setup uptimed';
+task 'uptimed',
+ group => 'frontends',
+ sub {
+ pkg 'uptimed', ensure => present;
+ service 'uptimed', ensure => 'started';
+ };
+
+desc 'Setup rsync';
+task 'rsync',
+ group => 'frontends',
+ sub {
+ pkg 'rsync', ensure => present;
+
+ # Not required, as we use rsyncd via inetd
+ # append_if_no_such_line '/etc/rc.conf.local', 'rsyncd_flags=';
+
+ file '/etc/rsyncd.conf',
+ content => template('./etc/rsyncd.conf.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+
+ file '/usr/local/bin/rsync.sh',
+ content => template('./scripts/rsync.sh.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '755';
+
+ file '/tmp/rsync.cron',
+ ensure => 'file',
+ content => "*/5\t*\t*\t*\t*\t-ns /usr/local/bin/rsync.sh",
+ mode => '600';
+
+ run '{ crontab -l -u root ; cat /tmp/rsync.cron; } | uniq | crontab -u root -';
+ run 'rm /tmp/rsync.cron';
+ };
+
+desc 'Configure the gemtexter sites';
+task 'gemtexter',
+ group => 'frontends',
+ sub {
+ file '/usr/local/bin/gemtexter.sh',
+ content => template('./scripts/gemtexter.sh.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '744';
+
+ file '/etc/daily.local',
+ ensure => 'present',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+
+ append_if_no_such_line '/etc/daily.local', '/usr/local/bin/gemtexter.sh';
+ };
+
+desc 'Configure taskwarrior reminder';
+task 'taskwarrior',
+ group => 'frontends',
+ sub {
+ pkg 'taskwarrior', ensure => present;
+
+ file '/usr/local/bin/taskwarrior.sh',
+ content => template('./scripts/taskwarrior.sh.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '500';
+
+ file '/etc/taskrc',
+ content => template('./etc/taskrc.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '600';
+
+ append_if_no_such_line '/etc/daily.local', '/usr/local/bin/taskwarrior.sh';
+ };
+
+desc 'Configure ACME client';
+task 'acme',
+ group => 'frontends',
+ sub {
+ file '/etc/acme-client.conf',
+ content => template( './etc/acme-client.conf.tpl', acme_hosts => \@acme_hosts ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+
+ file '/usr/local/bin/acme.sh',
+ content => template( './scripts/acme.sh.tpl', acme_hosts => \@acme_hosts ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '744';
+
+ file '/etc/daily.local',
+ ensure => 'present',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+
+ append_if_no_such_line '/etc/daily.local', '/usr/local/bin/acme.sh';
+ };
+
+desc 'Invoke ACME client';
+task 'acme_invoke',
+ group => 'frontends',
+ sub {
+ say run '/usr/local/bin/acme.sh';
+ };
+
+desc 'Setup httpd';
+task 'httpd',
+ group => 'frontends',
+ sub {
+ append_if_no_such_line '/etc/rc.conf.local', 'httpd_flags=';
+
+ file '/etc/httpd.conf',
+ content => template( './etc/httpd.conf.tpl', acme_hosts => \@acme_hosts ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { service 'httpd' => 'restart' };
+
+ file '/var/www/htdocs/buetow.org', ensure => 'directory';
+ file '/var/www/htdocs/buetow.org/self', ensure => 'directory';
+
+ # For failover health-check.
+ file '/var/www/htdocs/buetow.org/self/index.txt',
+ ensure => 'file',
+ content => template('./var/www/htdocs/buetow.org/self/index.txt.tpl');
+
+ service 'httpd', ensure => 'started';
+ };
+
+desc 'Setup inetd';
+task 'inetd',
+ group => 'frontends',
+ sub {
+ append_if_no_such_line '/etc/rc.conf.local', 'inetd_flags=';
+
+ file '/etc/login.conf.d/inetd',
+ source => './etc/login.conf.d/inetd',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+
+ file '/etc/inetd.conf',
+ source => './etc/inetd.conf',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { service 'inetd' => 'restart' };
+
+ service 'inetd', ensure => 'started';
+ };
+
+desc 'Setup relayd';
+task 'relayd',
+ group => 'frontends',
+ sub {
+ append_if_no_such_line '/etc/rc.conf.local', 'relayd_flags=';
+
+ file '/etc/relayd.conf',
+ content => template(
+ './etc/relayd.conf.tpl',
+ ipv6address => $ipv6address,
+ f3s_hosts => \@f3s_hosts,
+ acme_hosts => \@acme_hosts
+ ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '600',
+ on_change => sub { service 'relayd' => 'restart' };
+
+ service 'relayd', ensure => 'started';
+ append_if_no_such_line '/etc/daily.local', '/usr/sbin/rcctl start relayd';
+ };
+
+desc 'Setup OpenSMTPD';
+task 'smtpd',
+ group => 'frontends',
+ sub {
+ Rex::Logger::info('Dealing with mail aliases');
+ file '/etc/mail/aliases',
+ source => './etc/mail/aliases',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { say run 'newaliases' };
+
+ Rex::Logger::info('Dealing with mail virtual domains');
+ file '/etc/mail/virtualdomains',
+ source => './etc/mail/virtualdomains',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { service 'smtpd' => 'restart' };
+
+ Rex::Logger::info('Dealing with mail virtual users');
+ file '/etc/mail/virtualusers',
+ source => './etc/mail/virtualusers',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { service 'smtpd' => 'restart' };
+
+ Rex::Logger::info('Dealing with smtpd.conf');
+ file '/etc/mail/smtpd.conf',
+ content => template('./etc/mail/smtpd.conf.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { service 'smtpd' => 'restart' };
+
+ service 'smtpd', ensure => 'started';
+ };
+
+desc 'Setup DNS server(s)';
+task 'nsd',
+ group => 'frontends',
+ sub {
+ my $restart = FALSE;
+ append_if_no_such_line '/etc/rc.conf.local', 'nsd_flags=';
+
+ Rex::Logger::info('Dealing with master DNS key');
+ file '/var/nsd/etc/key.conf',
+ content => template( './var/nsd/etc/key.conf.tpl', nsd_key => $secrets->('/var/nsd/etc/nsd_key.txt') ),
+ owner => 'root',
+ group => '_nsd',
+ mode => '640',
+ on_change => sub { $restart = TRUE };
+
+ Rex::Logger::info('Dealing with master DNS config');
+ file '/var/nsd/etc/nsd.conf',
+ content => template( './var/nsd/etc/nsd.conf.master.tpl', dns_zones => \@dns_zones, ),
+ owner => 'root',
+ group => '_nsd',
+ mode => '640',
+ on_change => sub { $restart = TRUE };
+
+ for my $zone (@dns_zones) {
+ Rex::Logger::info("Dealing with DNS zone $zone");
+ file "/var/nsd/zones/master/$zone.zone",
+ content => template(
+ "./var/nsd/zones/master/$zone.zone.tpl",
+ ips => \%ips,
+ f3s_hosts => \@f3s_hosts
+ ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '644',
+ on_change => sub { $restart = TRUE };
+ }
+
+ for my $zone (@dns_zones_remove) {
+ Rex::Logger::info("Dealing with DNS zone removal $zone");
+ file "/var/nsd/zones/master/$zone.zone", ensure => 'absent';
+ }
+
+ service 'nsd' => 'restart' if $restart;
+ service 'nsd', ensure => 'started';
+ };
+
+desc 'Setup DNS failover script(s)';
+task 'nsd_failover',
+ group => 'frontends',
+ sub {
+ file '/usr/local/bin/dns-failover.ksh',
+ source => './scripts/dns-failover.ksh',
+ owner => 'root',
+ group => 'wheel',
+ mode => '500';
+
+ file '/tmp/root.cron',
+ ensure => 'file',
+ content => "*\t*\t*\t*\t*\t-ns /usr/local/bin/dns-failover.ksh",
+ mode => '600';
+
+ run '{ crontab -l -u root ; cat /tmp/root.cron; } | uniq | crontab -u root -';
+ run 'rm /tmp/root.cron';
+ };
+
+desc 'Setup DTail';
+task 'dtail',
+ group => 'frontends',
+ sub {
+ my $restart = FALSE;
+
+ run 'adduser -class nologin -group _dserver -batch _dserver', unless => 'id _dserver';
+ run 'usermod -d /var/run/dserver _dserver';
+
+ file '/etc/rc.d/dserver',
+ content => template('./etc/rc.d/dserver.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '755',
+ on_change => sub { $restart = TRUE };
+
+ file '/etc/dserver',
+ ensure => 'directory',
+ owner => 'root',
+ group => 'wheel',
+ mode => '755';
+
+ file '/etc/dserver/dtail.json',
+ content => template('./etc/dserver/dtail.json.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '755',
+ on_change => sub { $restart = TRUE };
+
+ file '/usr/local/bin/dserver-update-key-cache.sh',
+ content => template('./scripts/dserver-update-key-cache.sh.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '500';
+
+ append_if_no_such_line '/etc/daily.local', '/usr/local/bin/dserver-update-key-cache.sh';
+
+ service 'dserver' => 'restart' if $restart;
+ service 'dserver', ensure => 'started';
+ };
+
+desc 'Installing Gogios binary';
+task 'gogios_install',
+ group => 'frontends',
+ sub {
+ file '/usr/local/bin/gogios',
+ source => 'usr/local/bin/gogios',
+ mode => '0755';
+ owner => 'root',
+ group => 'root';
+ };
+
+desc 'Setup Gogios monitoring system';
+task 'gogios',
+ group => 'frontends',
+ sub {
+ pkg 'monitoring-plugins', ensure => present;
+ pkg 'nrpe', ensure => present;
+
+ my $gogios_path = '/usr/local/bin/gogios';
+
+ unless ( is_file($gogios_path) ) {
+ Rex::Logger::info( "Gogios not installed to $gogios_path! Run task 'gogios_install'", 'error' );
+ }
+
+ run 'adduser -group _gogios -batch _gogios', unless => 'id _gogios';
+ run 'usermod -d /var/run/gogios _gogios';
+
+ file '/etc/gogios.json',
+ content => template( './etc/gogios.json.tpl', acme_hosts => \@acme_hosts ),
+ owner => 'root',
+ group => 'wheel',
+ mode => '744';
+
+ file '/var/run/gogios',
+ ensure => 'directory',
+ owner => '_gogios',
+ group => '_gogios',
+ mode => '755';
+
+ file '/tmp/gogios.cron',
+ ensure => 'file',
+ content => template( './etc/gogios.cron.tpl', gogios_path => $gogios_path ),
+ mode => '600';
+
+ run 'cat /tmp/gogios.cron | crontab -u _gogios -';
+ run 'rm /tmp/gogios.cron';
+
+ append_if_no_such_line '/etc/rc.local', 'if [ ! -d /var/run/gogios ]; then mkdir /var/run/gogios; fi';
+ append_if_no_such_line '/etc/rc.local', 'chown _gogios /var/run/gogios';
+ };
+
+use Rex::Commands::Cron;
+
+desc 'Cron test';
+task 'cron_test',
+ group => 'openbsd_canary',
+ sub {
+ cron
+ add => '_gogios',
+ {
+ minute => '5',
+ hour => '*',
+ command => '/bin/ls',
+ };
+ };
+
+desc 'Installing Gorum binary';
+task 'gorum_install',
+ group => 'frontends',
+ sub {
+ file '/usr/local/bin/gorum',
+ source => 'usr/local/bin/gorum',
+ mode => '0755';
+ owner => 'root',
+ group => 'root';
+ };
+
+desc 'Setup Gorum quorum system';
+task 'gorum',
+ group => 'frontends',
+ sub {
+ my $restart = FALSE;
+ my $gorum_path = '/usr/local/bin/gorum';
+
+ unless ( is_file($gorum_path) ) {
+ Rex::Logger::info( "gorum not installed to $gorum_path! Run task 'gorum_install'", 'error' );
+ }
+
+ run 'adduser -class nologin -group _gorum -batch _gorum', unless => 'id _gorum';
+ run 'usermod -d /var/run/gorum _gorum';
+
+ file '/etc/gorum.json',
+ content => template('./etc/gorum.json.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '744',
+ on_change => sub { $restart = TRUE };
+
+ file '/var/run/gorum',
+ ensure => 'directory',
+ owner => '_gorum',
+ group => '_gorum',
+ mode => '755';
+
+ file '/etc/rc.d/gorum',
+ content => template('./etc/rc.d/gorum.tpl'),
+ owner => 'root',
+ group => 'wheel',
+ mode => '755',
+ on_change => sub { $restart = TRUE };
+
+ service 'gorum' => 'restart' if $restart;
+ service 'gorum', ensure => 'started';
+ };
+
+desc 'Setup Foostats';
+task 'foostats',
+ group => 'frontends',
+ sub {
+ use File::Copy;
+ for my $file (qw/foostats.pl fooodds.txt/) {
+ Rex::Logger::info("Dealing with $file");
+ my $git_script_path = $ENV{HOME} . '/git/foostats/' . $file;
+ copy( $git_script_path, './scripts/' . $file ) if -f $git_script_path;
+ }
+
+ file '/usr/local/bin/foostats.pl',
+ source => './scripts/foostats.pl',
+ owner => 'root',
+ group => 'wheel',
+ mode => '500';
+
+ file '/var/www/htdocs/buetow.org/self/foostats/fooodds.txt',
+ source => './scripts/fooodds.txt',
+ owner => 'root',
+ group => 'wheel',
+ mode => '440';
+
+ file '/var/www/htdocs/gemtexter/stats.foo.zone',
+ ensure => 'directory',
+ owner => 'root',
+ group => 'wheel',
+ mode => '755';
+
+ file '/var/gemini/stats.foo.zone',
+ ensure => 'directory',
+ owner => 'root',
+ group => 'wheel',
+ mode => '755';
+
+ append_if_no_such_line '/etc/daily.local', 'perl /usr/local/bin/foostats.pl --parse-logs --replicate --report';
+
+ my @deps = qw(p5-Digest-SHA3 p5-PerlIO-gzip p5-JSON p5-String-Util p5-LWP-Protocol-https);
+ pkg $_, ensure => present for @deps;
+
+ # For now, custom syslog config only required for foostats (to keep some logs for longer)
+ # Later, could move out to a separate task here in the Rexfile.
+ file '/etc/newsyslog.conf',
+ source => './etc/newsyslog.conf',
+ owner => 'root',
+ group => 'wheel',
+ mode => '644';
+ };
+
+desc 'Setup IRC bouncer';
+task 'ircbouncer',
+ group => 'ircbouncer',
+ sub {
+ pkg 'znc', ensure => present;
+
+ # Requires runtime config in /var/znc before it can start.
+ # => geheim search znc.conf
+ service 'znc', ensure => 'started';
+ };
+
+# COMBINED TASKS SECTION
+
+desc 'Common configs of all hosts';
+task 'commons',
+ group => 'frontends',
+ sub {
+ run_task 'base';
+ run_task 'nsd';
+ run_task 'nsd_failover';
+ run_task 'uptimed';
+ run_task 'httpd';
+ run_task 'gemtexter';
+ run_task 'taskwarrior';
+ run_task 'acme';
+ run_task 'acme_invoke';
+ run_task 'inetd';
+ run_task 'relayd';
+ run_task 'smtpd';
+ run_task 'rsync';
+ run_task 'gogios';
+
+ # run_task 'gorum';
+ run_task 'foostats';
+
+ # Requires installing the binaries first!
+ #run_task 'dtail';
+ };
+
+1;
+
+# vim: syntax=perl
diff --git a/gemfeed/examples/conf/frontends/etc/acme-client.conf.tpl b/gemfeed/examples/conf/frontends/etc/acme-client.conf.tpl
new file mode 100644
index 00000000..b52f5b0e
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/acme-client.conf.tpl
@@ -0,0 +1,41 @@
+#
+# $OpenBSD: acme-client.conf,v 1.4 2020/09/17 09:13:06 florian Exp $
+#
+authority letsencrypt {
+ api url "https://acme-v02.api.letsencrypt.org/directory"
+ account key "/etc/acme/letsencrypt-privkey.pem"
+}
+
+authority letsencrypt-staging {
+ api url "https://acme-staging-v02.api.letsencrypt.org/directory"
+ account key "/etc/acme/letsencrypt-staging-privkey.pem"
+}
+
+authority buypass {
+ api url "https://api.buypass.com/acme/directory"
+ account key "/etc/acme/buypass-privkey.pem"
+ contact "mailto:me@example.com"
+}
+
+authority buypass-test {
+ api url "https://api.test4.buypass.no/acme/directory"
+ account key "/etc/acme/buypass-test-privkey.pem"
+ contact "mailto:me@example.com"
+}
+
+<% for my $host (@$acme_hosts) { -%>
+<% for my $prefix ('', 'www.', 'standby.') { -%>
+domain <%= $prefix.$host %> {
+ domain key "/etc/ssl/private/<%= $prefix.$host %>.key"
+ domain full chain certificate "/etc/ssl/<%= $prefix.$host %>.fullchain.pem"
+ sign with letsencrypt
+}
+<% } -%>
+<% } -%>
+
+# For the server itself (e.g. TLS, or monitoring)
+domain <%= "$hostname.$domain" %> {
+ domain key "/etc/ssl/private/<%= "$hostname.$domain" %>.key"
+ domain full chain certificate "/etc/ssl/<%= "$hostname.$domain" %>.fullchain.pem"
+ sign with letsencrypt
+}
diff --git a/gemfeed/examples/conf/frontends/etc/dserver/dtail.json.tpl b/gemfeed/examples/conf/frontends/etc/dserver/dtail.json.tpl
new file mode 100644
index 00000000..6b96fbad
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/dserver/dtail.json.tpl
@@ -0,0 +1,127 @@
+{
+ "Client": {
+ "TermColorsEnable": true,
+ "TermColors": {
+ "Remote": {
+ "DelimiterAttr": "Dim",
+ "DelimiterBg": "Blue",
+ "DelimiterFg": "Cyan",
+ "RemoteAttr": "Dim",
+ "RemoteBg": "Blue",
+ "RemoteFg": "White",
+ "CountAttr": "Dim",
+ "CountBg": "Blue",
+ "CountFg": "White",
+ "HostnameAttr": "Bold",
+ "HostnameBg": "Blue",
+ "HostnameFg": "White",
+ "IDAttr": "Dim",
+ "IDBg": "Blue",
+ "IDFg": "White",
+ "StatsOkAttr": "None",
+ "StatsOkBg": "Green",
+ "StatsOkFg": "Black",
+ "StatsWarnAttr": "None",
+ "StatsWarnBg": "Red",
+ "StatsWarnFg": "White",
+ "TextAttr": "None",
+ "TextBg": "Black",
+ "TextFg": "White"
+ },
+ "Client": {
+ "DelimiterAttr": "Dim",
+ "DelimiterBg": "Yellow",
+ "DelimiterFg": "Black",
+ "ClientAttr": "Dim",
+ "ClientBg": "Yellow",
+ "ClientFg": "Black",
+ "HostnameAttr": "Dim",
+ "HostnameBg": "Yellow",
+ "HostnameFg": "Black",
+ "TextAttr": "None",
+ "TextBg": "Black",
+ "TextFg": "White"
+ },
+ "Server": {
+ "DelimiterAttr": "AttrDim",
+ "DelimiterBg": "BgCyan",
+ "DelimiterFg": "FgBlack",
+ "ServerAttr": "AttrDim",
+ "ServerBg": "BgCyan",
+ "ServerFg": "FgBlack",
+ "HostnameAttr": "AttrBold",
+ "HostnameBg": "BgCyan",
+ "HostnameFg": "FgBlack",
+ "TextAttr": "AttrNone",
+ "TextBg": "BgBlack",
+ "TextFg": "FgWhite"
+ },
+ "Common": {
+ "SeverityErrorAttr": "AttrBold",
+ "SeverityErrorBg": "BgRed",
+ "SeverityErrorFg": "FgWhite",
+ "SeverityFatalAttr": "AttrBold",
+ "SeverityFatalBg": "BgMagenta",
+ "SeverityFatalFg": "FgWhite",
+ "SeverityWarnAttr": "AttrBold",
+ "SeverityWarnBg": "BgBlack",
+ "SeverityWarnFg": "FgWhite"
+ },
+ "MaprTable": {
+ "DataAttr": "AttrNone",
+ "DataBg": "BgBlue",
+ "DataFg": "FgWhite",
+ "DelimiterAttr": "AttrDim",
+ "DelimiterBg": "BgBlue",
+ "DelimiterFg": "FgWhite",
+ "HeaderAttr": "AttrBold",
+ "HeaderBg": "BgBlue",
+ "HeaderFg": "FgWhite",
+ "HeaderDelimiterAttr": "AttrDim",
+ "HeaderDelimiterBg": "BgBlue",
+ "HeaderDelimiterFg": "FgWhite",
+ "HeaderSortKeyAttr": "AttrUnderline",
+ "HeaderGroupKeyAttr": "AttrReverse",
+ "RawQueryAttr": "AttrDim",
+ "RawQueryBg": "BgBlack",
+ "RawQueryFg": "FgCyan"
+ }
+ }
+ },
+ "Server": {
+ "SSHBindAddress": "0.0.0.0",
+ "HostKeyFile": "cache/ssh_host_key",
+ "HostKeyBits": 2048,
+ "MapreduceLogFormat": "default",
+ "MaxConcurrentCats": 2,
+ "MaxConcurrentTails": 50,
+ "MaxConnections": 50,
+ "MaxLineLength": 1048576,
+ "Permissions": {
+ "Default": [
+ "readfiles:^/.*$"
+ ],
+ "Users": {
+ "paul": [
+ "readfiles:^/.*$"
+ ],
+ "pbuetow": [
+ "readfiles:^/.*$"
+ ],
+ "jamesblake": [
+ "readfiles:^/tmp/foo.log$",
+ "readfiles:^/.*$",
+ "readfiles:!^/tmp/bar.log$"
+ ]
+ }
+ }
+ },
+ "Common": {
+ "LogDir": "/var/log/dserver",
+ "Logger": "Fout",
+ "LogRotation": "Daily",
+ "CacheDir": "cache",
+ "SSHPort": 2222,
+ "LogLevel": "Info"
+ }
+}
diff --git a/gemfeed/examples/conf/frontends/etc/gogios.cron.tpl b/gemfeed/examples/conf/frontends/etc/gogios.cron.tpl
new file mode 100644
index 00000000..fc6299c3
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/gogios.cron.tpl
@@ -0,0 +1,3 @@
+0 7 * * * <%= $gogios_path %> -renotify >/dev/null
+*/5 8-22 * * * -s <%= $gogios_path %> >/dev/null
+0 3 * * 0 <%= $gogios_path %> -force >/dev/null
diff --git a/gemfeed/examples/conf/frontends/etc/gogios.json.tpl b/gemfeed/examples/conf/frontends/etc/gogios.json.tpl
new file mode 100644
index 00000000..683f9de8
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/gogios.json.tpl
@@ -0,0 +1,98 @@
+<% our $plugin_dir = '/usr/local/libexec/nagios'; -%>
+{
+ "EmailTo": "paul",
+ "EmailFrom": "gogios@mx.buetow.org",
+ "CheckTimeoutS": 10,
+ "CheckConcurrency": 3,
+ "StateDir": "/var/run/gogios",
+ "Checks": {
+ <% for my $host (qw(master standby)) { -%>
+ <% for my $proto (4, 6) { -%>
+ "Check Ping<%= $proto %> <%= $host %>.buetow.org": {
+ "Plugin": "<%= $plugin_dir %>/check_ping",
+ "Args": ["-H", "<%= $host %>.buetow.org", "-<%= $proto %>", "-w", "100,10%", "-c", "200,15%"],
+ "Retries": 3,
+ "RetryInterval": 3
+ },
+ <% } -%>
+ <% } -%>
+ <% for my $host (qw(fishfinger blowfish)) { -%>
+ "Check DTail <%= $host %>.buetow.org": {
+ "Plugin": "/usr/local/bin/dtailhealth",
+ "Args": ["--server", "<%= $host %>.buetow.org:2222"],
+ "DependsOn": ["Check Ping4 <%= $host %>.buetow.org", "Check Ping6 <%= $host %>.buetow.org"]
+ },
+ <% } -%>
+ <% for my $host (qw(fishfinger blowfish)) { -%>
+ <% for my $proto (4, 6) { -%>
+ "Check Ping<%= $proto %> <%= $host %>.buetow.org": {
+ "Plugin": "<%= $plugin_dir %>/check_ping",
+ "Args": ["-H", "<%= $host %>.buetow.org", "-<%= $proto %>", "-w", "100,10%", "-c", "200,15%"],
+ "Retries": 3,
+ "RetryInterval": 3
+ },
+ <% } -%>
+ "Check TLS Certificate <%= $host %>.buetow.org": {
+ "Plugin": "<%= $plugin_dir %>/check_http",
+ "Args": ["--sni", "-H", "<%= $host %>.buetow.org", "-C", "20" ],
+ "DependsOn": ["Check Ping4 <%= $host %>.buetow.org", "Check Ping6 <%= $host %>.buetow.org"]
+ },
+ <% } -%>
+ <% for my $host (@$acme_hosts) { -%>
+ <% for my $prefix ('', 'standby.', 'www.') { -%>
+ <% my $depends_on = $prefix eq 'standby.' ? 'standby.buetow.org' : 'master.buetow.org'; -%>
+ "Check TLS Certificate <%= $prefix . $host %>": {
+ "Plugin": "<%= $plugin_dir %>/check_http",
+ "Args": ["--sni", "-H", "<%= $prefix . $host %>", "-C", "20" ],
+ "DependsOn": ["Check Ping4 <%= $depends_on %>", "Check Ping6 <%= $depends_on %>"]
+ },
+ <% for my $proto (4, 6) { -%>
+ "Check HTTP IPv<%= $proto %> <%= $prefix . $host %>": {
+ "Plugin": "<%= $plugin_dir %>/check_http",
+ "Args": ["<%= $prefix . $host %>", "-<%= $proto %>"],
+ "DependsOn": ["Check Ping<%= $proto %> <%= $depends_on %>"]
+ },
+ <% } -%>
+ <% } -%>
+ <% } -%>
+ <% for my $host (qw(fishfinger blowfish)) { -%>
+ <% for my $proto (4, 6) { -%>
+ "Check Dig <%= $host %>.buetow.org IPv<%= $proto %>": {
+ "Plugin": "<%= $plugin_dir %>/check_dig",
+ "Args": ["-H", "<%= $host %>.buetow.org", "-l", "buetow.org", "-<%= $proto %>"],
+ "DependsOn": ["Check Ping<%= $proto %> <%= $host %>.buetow.org"]
+ },
+ "Check SMTP <%= $host %>.buetow.org IPv<%= $proto %>": {
+ "Plugin": "<%= $plugin_dir %>/check_smtp",
+ "Args": ["-H", "<%= $host %>.buetow.org", "-<%= $proto %>"],
+ "DependsOn": ["Check Ping<%= $proto %> <%= $host %>.buetow.org"]
+ },
+ "Check Gemini TCP <%= $host %>.buetow.org IPv<%= $proto %>": {
+ "Plugin": "<%= $plugin_dir %>/check_tcp",
+ "Args": ["-H", "<%= $host %>.buetow.org", "-p", "1965", "-<%= $proto %>"],
+ "DependsOn": ["Check Ping<%= $proto %> <%= $host %>.buetow.org"]
+ },
+ <% } -%>
+ <% } -%>
+ "Check Users <%= $hostname %>": {
+ "Plugin": "<%= $plugin_dir %>/check_users",
+ "Args": ["-w", "2", "-c", "3"]
+ },
+ "Check SWAP <%= $hostname %>": {
+ "Plugin": "<%= $plugin_dir %>/check_swap",
+ "Args": ["-w", "95%", "-c", "90%"]
+ },
+ "Check Procs <%= $hostname %>": {
+ "Plugin": "<%= $plugin_dir %>/check_procs",
+ "Args": ["-w", "80", "-c", "100"]
+ },
+ "Check Disk <%= $hostname %>": {
+ "Plugin": "<%= $plugin_dir %>/check_disk",
+ "Args": ["-w", "30%", "-c", "10%"]
+ },
+ "Check Load <%= $hostname %>": {
+ "Plugin": "<%= $plugin_dir %>/check_load",
+ "Args": ["-w", "2,1,1", "-c", "4,3,3"]
+ }
+ }
+}
diff --git a/gemfeed/examples/conf/frontends/etc/gorum.json.tpl b/gemfeed/examples/conf/frontends/etc/gorum.json.tpl
new file mode 100644
index 00000000..247a9dbf
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/gorum.json.tpl
@@ -0,0 +1,18 @@
+{
+ "StateDir": "/var/run/gorum",
+ "Address": "<%= $hostname.'.'.$domain %>:4321",
+ "EmailTo": "",
+ "EmailFrom": "gorum@mx.buetow.org",
+ "Nodes": {
+ "Blowfish": {
+ "Hostname": "blowfish.buetow.org",
+ "Port": 4321,
+ "Priority": 100
+ },
+ "Fishfinger": {
+ "Hostname": "fishfinger.buetow.org",
+ "Port": 4321,
+ "Priority": 50
+ }
+ }
+}
diff --git a/gemfeed/examples/conf/frontends/etc/httpd.conf.tpl b/gemfeed/examples/conf/frontends/etc/httpd.conf.tpl
new file mode 100644
index 00000000..c3a2764e
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/httpd.conf.tpl
@@ -0,0 +1,184 @@
+<% our @prefixes = ('', 'www.', 'standby.'); -%>
+# Plain HTTP for ACME and HTTPS redirect
+<% for my $host (@$acme_hosts) { for my $prefix (@prefixes) { -%>
+server "<%= $prefix.$host %>" {
+ listen on * port 80
+ log style forwarded
+ location "/.well-known/acme-challenge/*" {
+ root "/acme"
+ request strip 2
+ }
+ location * {
+ block return 302 "https://$HTTP_HOST$REQUEST_URI"
+ }
+}
+<% } } -%>
+
+# Current server's FQDN (e.g. for mail server ACME cert requests)
+server "<%= "$hostname.$domain" %>" {
+ listen on * port 80
+ log style forwarded
+ location "/.well-known/acme-challenge/*" {
+ root "/acme"
+ request strip 2
+ }
+ location * {
+ block return 302 "https://<%= "$hostname.$domain" %>"
+ }
+}
+
+server "<%= "$hostname.$domain" %>" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/buetow.org/self"
+ directory auto index
+ }
+}
+
+# Gemtexter hosts
+<% for my $host (qw/foo.zone stats.foo.zone/) { for my $prefix (@prefixes) { -%>
+server "<%= $prefix.$host %>" {
+ listen on * port 8080
+ log style forwarded
+ location "/.git*" {
+ block return 302 "https://<%= $prefix.$host %>"
+ }
+ location * {
+ <% if ($prefix eq 'www.') { -%>
+ block return 302 "https://<%= $host %>$REQUEST_URI"
+ <% } else { -%>
+ root "/htdocs/gemtexter/<%= $host %>"
+ directory auto index
+ <% } -%>
+ }
+}
+<% } } -%>
+
+# Redirect to paul.buetow.org
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ block return 302 "https://paul.buetow.org$REQUEST_URI"
+ }
+}
+
+# Redirect blog to foo.zone
+server "<%= $prefix %>blog.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ block return 302 "https://foo.zone$REQUEST_URI"
+ }
+}
+
+server "<%= $prefix %>snonux.foo" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ block return 302 "https://foo.zone/about$REQUEST_URI"
+ }
+}
+
+server "<%= $prefix %>paul.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ block return 302 "https://foo.zone/about$REQUEST_URI"
+ }
+}
+<% } -%>
+
+# Redirect to gitub.dtail.dev
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>dtail.dev" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ block return 302 "https://github.dtail.dev$REQUEST_URI"
+ }
+}
+<% } -%>
+
+# Irregular Ninja special hosts
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>irregular.ninja" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/irregular.ninja"
+ directory auto index
+ }
+}
+<% } -%>
+
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>alt.irregular.ninja" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/alt.irregular.ninja"
+ directory auto index
+ }
+}
+<% } -%>
+
+# joern special host
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>joern.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/joern/"
+ directory auto index
+ }
+}
+<% } -%>
+
+# Dory special host
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>dory.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/joern/dory.buetow.org"
+ directory auto index
+ }
+}
+<% } -%>
+
+# ecat special host
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>ecat.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ location * {
+ root "/htdocs/joern/ecat.buetow.org"
+ directory auto index
+ }
+}
+<% } -%>
+
+<% for my $prefix (@prefixes) { -%>
+server "<%= $prefix %>fotos.buetow.org" {
+ listen on * port 8080
+ log style forwarded
+ root "/htdocs/buetow.org/fotos"
+ directory auto index
+}
+<% } -%>
+
+# Defaults
+server "default" {
+ listen on * port 80
+ log style forwarded
+ block return 302 "https://foo.zone$REQUEST_URI"
+}
+
+server "default" {
+ listen on * port 8080
+ log style forwarded
+ block return 302 "https://foo.zone$REQUEST_URI"
+}
diff --git a/gemfeed/examples/conf/frontends/etc/inetd.conf b/gemfeed/examples/conf/frontends/etc/inetd.conf
new file mode 100644
index 00000000..13163877
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/inetd.conf
@@ -0,0 +1,2 @@
+127.0.0.1:11965 stream tcp nowait www /usr/local/bin/vger vger -v
+rsync stream tcp nowait root /usr/local/bin/rsync rsyncd --daemon
diff --git a/gemfeed/examples/conf/frontends/etc/login.conf.d/inetd b/gemfeed/examples/conf/frontends/etc/login.conf.d/inetd
new file mode 100644
index 00000000..c8620c41
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/login.conf.d/inetd
@@ -0,0 +1,3 @@
+inetd:\
+ :maxproc=10:\
+ :tc=daemon:
diff --git a/gemfeed/examples/conf/frontends/etc/mail/aliases b/gemfeed/examples/conf/frontends/etc/mail/aliases
new file mode 100644
index 00000000..91bf1d06
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/mail/aliases
@@ -0,0 +1,103 @@
+#
+# $OpenBSD: aliases,v 1.68 2020/01/24 06:17:37 tedu Exp $
+#
+# Aliases in this file will NOT be expanded in the header from
+# Mail, but WILL be visible over networks or from /usr/libexec/mail.local.
+#
+# >>>>>>>>>> The program "newaliases" must be run after
+# >> NOTE >> this file is updated for any changes to
+# >>>>>>>>>> show through to smtpd.
+#
+
+# Basic system aliases -- these MUST be present
+MAILER-DAEMON: postmaster
+postmaster: root
+
+# General redirections for important pseudo accounts
+daemon: root
+ftp-bugs: root
+operator: root
+www: root
+admin: root
+
+# Redirections for pseudo accounts that should not receive mail
+_bgpd: /dev/null
+_dhcp: /dev/null
+_dpb: /dev/null
+_dvmrpd: /dev/null
+_eigrpd: /dev/null
+_file: /dev/null
+_fingerd: /dev/null
+_ftp: /dev/null
+_hostapd: /dev/null
+_identd: /dev/null
+_iked: /dev/null
+_isakmpd: /dev/null
+_iscsid: /dev/null
+_ldapd: /dev/null
+_ldpd: /dev/null
+_mopd: /dev/null
+_nsd: /dev/null
+_ntp: /dev/null
+_ospfd: /dev/null
+_ospf6d: /dev/null
+_pbuild: /dev/null
+_pfetch: /dev/null
+_pflogd: /dev/null
+_ping: /dev/null
+_pkgfetch: /dev/null
+_pkguntar: /dev/null
+_portmap: /dev/null
+_ppp: /dev/null
+_rad: /dev/null
+_radiusd: /dev/null
+_rbootd: /dev/null
+_relayd: /dev/null
+_ripd: /dev/null
+_rstatd: /dev/null
+_rusersd: /dev/null
+_rwalld: /dev/null
+_smtpd: /dev/null
+_smtpq: /dev/null
+_sndio: /dev/null
+_snmpd: /dev/null
+_spamd: /dev/null
+_switchd: /dev/null
+_syslogd: /dev/null
+_tcpdump: /dev/null
+_traceroute: /dev/null
+_tftpd: /dev/null
+_unbound: /dev/null
+_unwind: /dev/null
+_vmd: /dev/null
+_x11: /dev/null
+_ypldap: /dev/null
+bin: /dev/null
+build: /dev/null
+nobody: /dev/null
+_tftp_proxy: /dev/null
+_ftp_proxy: /dev/null
+_sndiop: /dev/null
+_syspatch: /dev/null
+_slaacd: /dev/null
+sshd: /dev/null
+
+# Well-known aliases -- these should be filled in!
+root: paul
+manager: root
+dumper: root
+
+# RFC 2142: NETWORK OPERATIONS MAILBOX NAMES
+abuse: root
+noc: root
+security: root
+
+# RFC 2142: SUPPORT MAILBOX NAMES FOR SPECIFIC INTERNET SERVICES
+hostmaster: root
+# usenet: root
+# news: usenet
+webmaster: root
+# ftp: root
+
+paul: paul.buetow@protonmail.com
+albena: albena.buetow@protonmail.com
diff --git a/gemfeed/examples/conf/frontends/etc/mail/smtpd.conf.tpl b/gemfeed/examples/conf/frontends/etc/mail/smtpd.conf.tpl
new file mode 100644
index 00000000..7764b345
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/mail/smtpd.conf.tpl
@@ -0,0 +1,23 @@
+# This is the smtpd server system-wide configuration file.
+# See smtpd.conf(5) for more information.
+
+# I used https://www.checktls.com/TestReceiver for testing.
+
+pki "buetow_org_tls" cert "/etc/ssl/<%= "$hostname.$domain" %>.fullchain.pem"
+pki "buetow_org_tls" key "/etc/ssl/private/<%= "$hostname.$domain" %>.key"
+
+table aliases file:/etc/mail/aliases
+table virtualdomains file:/etc/mail/virtualdomains
+table virtualusers file:/etc/mail/virtualusers
+
+listen on socket
+listen on all tls pki "buetow_org_tls" hostname "<%= "$hostname.$domain" %>"
+#listen on all
+
+action localmail mbox alias <aliases>
+action receive mbox virtual <virtualusers>
+action outbound relay
+
+match from any for domain <virtualdomains> action receive
+match from local for local action localmail
+match from local for any action outbound
diff --git a/gemfeed/examples/conf/frontends/etc/mail/virtualdomains b/gemfeed/examples/conf/frontends/etc/mail/virtualdomains
new file mode 100644
index 00000000..b59554ac
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/mail/virtualdomains
@@ -0,0 +1,20 @@
+buetow.org
+paul.buetow.org
+mx.buetow.org
+de.buetow.org
+bg.buetow.org
+uk.buetow.org
+us.buetow.org
+es.buetow.org
+dev.buetow.org
+oss.buetow.org
+ex.buetow.org
+xxx.buetow.org
+newsletter.buetow.org
+gadgets.buetow.org
+orders.buetow.org
+nospam.buetow.org
+snonux.foo
+dtail.dev
+foo.zone
+paul.cyou
diff --git a/gemfeed/examples/conf/frontends/etc/mail/virtualusers b/gemfeed/examples/conf/frontends/etc/mail/virtualusers
new file mode 100644
index 00000000..6cfac58b
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/mail/virtualusers
@@ -0,0 +1,5 @@
+albena@buetow.org albena.buetow@protonmail.com
+joern@buetow.org df2hbradio@gmail.com
+dory@buetow.org df2hbradio@gmail.com
+ecat@buetow.org df2hbradio@gmail.com
+@ paul.buetow@protonmail.com
diff --git a/gemfeed/examples/conf/frontends/etc/myname.tpl b/gemfeed/examples/conf/frontends/etc/myname.tpl
new file mode 100644
index 00000000..dcd4ca04
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/myname.tpl
@@ -0,0 +1 @@
+<%= $fqdns->($vio0_ip) %>
diff --git a/gemfeed/examples/conf/frontends/etc/newsyslog.conf b/gemfeed/examples/conf/frontends/etc/newsyslog.conf
new file mode 100644
index 00000000..bbd1aa55
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/newsyslog.conf
@@ -0,0 +1,14 @@
+# logfile_name owner:group mode count size when flags
+/var/cron/log root:wheel 600 3 10 * Z
+/var/log/authlog root:wheel 640 7 * 168 Z
+/var/log/daemon 640 14 300 * Z
+/var/log/lpd-errs 640 7 10 * Z
+/var/log/maillog 640 7 * 24 Z
+/var/log/messages 644 5 300 * Z
+/var/log/secure 600 7 * 168 Z
+/var/log/wtmp 644 7 * $M1D4 B ""
+/var/log/xferlog 640 7 250 * Z
+/var/log/pflog 600 3 250 * ZB "pkill -HUP -u root -U root -t - -x pflogd"
+/var/www/logs/access.log 644 14 * $W0 Z "pkill -USR1 -u root -U root -x httpd"
+/var/www/logs/error.log 644 7 250 * Z "pkill -USR1 -u root -U root -x httpd"
+/var/log/fooodds 640 7 300 * Z
diff --git a/gemfeed/examples/conf/frontends/etc/rc.conf.local b/gemfeed/examples/conf/frontends/etc/rc.conf.local
new file mode 100644
index 00000000..842f16d7
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/rc.conf.local
@@ -0,0 +1,5 @@
+httpd_flags=
+inetd_flags=
+nsd_flags=
+pkg_scripts="uptimed httpd"
+relayd_flags=
diff --git a/gemfeed/examples/conf/frontends/etc/rc.d/dserver.tpl b/gemfeed/examples/conf/frontends/etc/rc.d/dserver.tpl
new file mode 100755
index 00000000..aec80f54
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/rc.d/dserver.tpl
@@ -0,0 +1,16 @@
+#!/bin/ksh
+
+daemon="/usr/local/bin/dserver"
+daemon_flags="-cfg /etc/dserver/dtail.json"
+daemon_user="_dserver"
+
+. /etc/rc.d/rc.subr
+
+rc_reload=NO
+
+rc_pre() {
+ install -d -o _dserver /var/log/dserver
+ install -d -o _dserver /var/run/dserver/cache
+}
+
+rc_cmd $1 &
diff --git a/gemfeed/examples/conf/frontends/etc/rc.d/gorum.tpl b/gemfeed/examples/conf/frontends/etc/rc.d/gorum.tpl
new file mode 100755
index 00000000..3b4f403d
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/rc.d/gorum.tpl
@@ -0,0 +1,16 @@
+#!/bin/ksh
+
+daemon="/usr/local/bin/gorum"
+daemon_flags="-cfg /etc/gorum.json"
+daemon_user="_gorum"
+daemon_logger="daemon.info"
+
+. /etc/rc.d/rc.subr
+
+rc_reload=NO
+
+rc_pre() {
+ install -d -o _gorum /var/log/gorum
+}
+
+rc_cmd $1 &
diff --git a/gemfeed/examples/conf/frontends/etc/relayd.conf.tpl b/gemfeed/examples/conf/frontends/etc/relayd.conf.tpl
new file mode 100644
index 00000000..1900c0bf
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/relayd.conf.tpl
@@ -0,0 +1,86 @@
+<% our @prefixes = ('', 'www.', 'standby.'); -%>
+log connection
+
+# Wireguard endpoints of the k3s cluster nodes running in FreeBSD bhyve Linux VMs via Wireguard tunnels
+table <f3s> {
+ 192.168.2.120
+ 192.168.2.121
+ 192.168.2.122
+}
+
+# Same backends, separate table for registry service on port 30001
+table <f3s_registry> {
+ 192.168.2.120
+ 192.168.2.121
+ 192.168.2.122
+}
+
+# Local OpenBSD httpd
+table <localhost> {
+ 127.0.0.1
+ ::1
+}
+
+http protocol "https" {
+ <% for my $host (@$acme_hosts) { for my $prefix (@prefixes) { -%>
+ tls keypair <%= $prefix.$host -%>
+ <% } } -%>
+ tls keypair <%= $hostname.'.'.$domain -%>
+
+ match request header set "X-Forwarded-For" value "$REMOTE_ADDR"
+ match request header set "X-Forwarded-Proto" value "https"
+
+ # WebSocket support for audiobookshelf
+ pass header "Connection"
+ pass header "Upgrade"
+ pass header "Sec-WebSocket-Key"
+ pass header "Sec-WebSocket-Version"
+ pass header "Sec-WebSocket-Extensions"
+ pass header "Sec-WebSocket-Protocol"
+
+ <% for my $host (@$f3s_hosts) { for my $prefix (@prefixes) { -%>
+ <% if ($host eq 'registry.f3s.buetow.org') { -%>
+ match request quick header "Host" value "<%= $prefix.$host -%>" forward to <f3s_registry>
+ <% } else { -%>
+ match request quick header "Host" value "<%= $prefix.$host -%>" forward to <f3s>
+ <% } } } -%>
+}
+
+relay "https4" {
+ listen on <%= $vio0_ip %> port 443 tls
+ protocol "https"
+ forward to <localhost> port 8080
+ forward to <f3s_registry> port 30001 check tcp
+ forward to <f3s> port 80 check tcp
+}
+
+relay "https6" {
+ listen on <%= $ipv6address->($hostname) %> port 443 tls
+ protocol "https"
+ forward to <localhost> port 8080
+ forward to <f3s_registry> port 30001 check tcp
+ forward to <f3s> port 80 check tcp
+}
+
+tcp protocol "gemini" {
+ tls keypair foo.zone
+ tls keypair stats.foo.zone
+ tls keypair snonux.foo
+ tls keypair paul.buetow.org
+ tls keypair standby.foo.zone
+ tls keypair standby.stats.foo.zone
+ tls keypair standby.snonux.foo
+ tls keypair standby.paul.buetow.org
+}
+
+relay "gemini4" {
+ listen on <%= $vio0_ip %> port 1965 tls
+ protocol "gemini"
+ forward to 127.0.0.1 port 11965
+}
+
+relay "gemini6" {
+ listen on <%= $ipv6address->($hostname) %> port 1965 tls
+ protocol "gemini"
+ forward to 127.0.0.1 port 11965
+}
diff --git a/gemfeed/examples/conf/frontends/etc/rsyncd.conf.tpl b/gemfeed/examples/conf/frontends/etc/rsyncd.conf.tpl
new file mode 100644
index 00000000..e9fe3cf8
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/rsyncd.conf.tpl
@@ -0,0 +1,28 @@
+<% my $allow = '*.wg0.wan.buetow.org,*.wg0,localhost'; %>
+max connections = 5
+timeout = 300
+
+[joernshtdocs]
+comment = Joerns htdocs
+path = /var/www/htdocs/joern
+read only = yes
+list = yes
+uid = www
+gid = www
+hosts allow = <%= $allow %>
+
+# [publicgemini]
+# comment = Public Gemini capsule content
+# path = /var/gemini
+# read only = yes
+# list = yes
+# uid = www
+# gid = www
+# hosts allow = <%= $allow %>
+
+# [sslcerts]
+# comment = TLS certificates
+# path = /etc/ssl
+# read only = yes
+# list = yes
+# hosts allow = <%= $allow %>
diff --git a/gemfeed/examples/conf/frontends/etc/taskrc.tpl b/gemfeed/examples/conf/frontends/etc/taskrc.tpl
new file mode 100644
index 00000000..ed97d385
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/taskrc.tpl
@@ -0,0 +1,40 @@
+# [Created by task 2.6.2 7/9/2023 20:52:31]
+# Taskwarrior program configuration file.
+# For more documentation, see https://taskwarrior.org or try 'man task', 'man task-color',
+# 'man task-sync' or 'man taskrc'
+
+# Here is an example of entries that use the default, override and blank values
+# variable=foo -- By specifying a value, this overrides the default
+# variable= -- By specifying no value, this means no default
+# #variable=foo -- By commenting out the line, or deleting it, this uses the default
+
+# You can also refence environment variables:
+# variable=$HOME/task
+# variable=$VALUE
+
+# Use the command 'task show' to see all defaults and overrides
+
+# Files
+data.location=/home/git/.task
+
+# To use the default location of the XDG directories,
+# move this configuration file from ~/.taskrc to ~/.config/task/taskrc and uncomment below
+
+#data.location=~/.local/share/task
+#hooks.location=~/.config/task/hooks
+
+# Color theme (uncomment one to use)
+#include light-16.theme
+#include light-256.theme
+#include dark-16.theme
+#include dark-256.theme
+#include dark-red-256.theme
+#include dark-green-256.theme
+#include dark-blue-256.theme
+#include dark-violets-256.theme
+#include dark-yellow-green.theme
+#include dark-gray-256.theme
+#include dark-gray-blue-256.theme
+#include solarized-dark-256.theme
+#include solarized-light-256.theme
+#include no-color.theme
diff --git a/gemfeed/examples/conf/frontends/etc/tmux.conf b/gemfeed/examples/conf/frontends/etc/tmux.conf
new file mode 100644
index 00000000..14493260
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/etc/tmux.conf
@@ -0,0 +1,24 @@
+set-option -g allow-rename off
+set-option -g default-terminal "screen-256color"
+set-option -g history-limit 100000
+set-option -g status-bg '#444444'
+set-option -g status-fg '#ffa500'
+
+set-window-option -g mode-keys vi
+
+bind-key h select-pane -L
+bind-key j select-pane -D
+bind-key k select-pane -U
+bind-key l select-pane -R
+
+bind-key H resize-pane -L 5
+bind-key J resize-pane -D 5
+bind-key K resize-pane -U 5
+bind-key L resize-pane -R 5
+
+bind-key b break-pane -d
+bind-key c new-window -c '#{pane_current_path}'
+bind-key p setw synchronize-panes off
+bind-key P setw synchronize-panes on
+bind-key r source-file ~/.tmux.conf \; display-message "~/.tmux.conf reloaded"
+bind-key T choose-tree
diff --git a/gemfeed/examples/conf/frontends/scripts/acme.sh.tpl b/gemfeed/examples/conf/frontends/scripts/acme.sh.tpl
new file mode 100644
index 00000000..8d306092
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/acme.sh.tpl
@@ -0,0 +1,68 @@
+#!/bin/sh
+
+MY_IP=`ifconfig vio0 | awk '$1 == "inet" { print $2 }'`
+
+# New hosts may not have a cert, just copy foo.zone as a
+# placeholder, so that services can at least start proprely.
+# cert will be updated with next acme-client runs!
+ensure_placeholder_cert () {
+ host=$1
+ copy_from=foo.zone
+
+ if [ ! -f /etc/ssl/$host.crt ]; then
+ cp -v /etc/ssl/$copy_from.crt /etc/ssl/$host.crt
+ cp -v /etc/ssl/$copy_from.fullchain.pem /etc/ssl/$host.fullchain.pem
+ cp -v /etc/ssl/private/$copy_from.key /etc/ssl/private/$host.key
+ fi
+}
+
+handle_cert () {
+ host=$1
+ host_ip=`host $host | awk '/has address/ { print $(NF) }'`
+
+ grep -q "^server \"$host\"" /etc/httpd.conf
+ if [ $? -ne 0 ]; then
+ echo "Host $host not configured in httpd, skipping..."
+ return
+ fi
+ ensure_placeholder_cert "$host"
+
+ if [ "$MY_IP" != "$host_ip" ]; then
+ echo "Not serving $host, skipping..."
+ return
+ fi
+
+ # Create symlink, so that relayd also can read it.
+ crt_path=/etc/ssl/$host
+ if [ -e $crt_path.crt ]; then
+ rm $crt_path.crt
+ fi
+ ln -s $crt_path.fullchain.pem $crt_path.crt
+ # Requesting and renewing certificate.
+ /usr/sbin/acme-client -v $host
+}
+
+has_update=no
+<% for my $host (@$acme_hosts) { -%>
+<% for my $prefix ('', 'www.', 'standby.') { -%>
+handle_cert <%= $prefix.$host %>
+if [ $? -eq 0 ]; then
+ has_update=yes
+fi
+<% } -%>
+<% } -%>
+
+# Current server's FQDN (e.g. for mail server certs)
+handle_cert <%= "$hostname.$domain" %>
+if [ $? -eq 0 ]; then
+ has_update=yes
+fi
+
+# Pick up the new certs.
+if [ $has_update = yes ]; then
+ # TLS offloading fully moved to relayd now
+ # /usr/sbin/rcctl reload httpd
+
+ /usr/sbin/rcctl reload relayd
+ /usr/sbin/rcctl restart smtpd
+fi
diff --git a/gemfeed/examples/conf/frontends/scripts/dns-failover.ksh b/gemfeed/examples/conf/frontends/scripts/dns-failover.ksh
new file mode 100644
index 00000000..dfc24ee3
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/dns-failover.ksh
@@ -0,0 +1,133 @@
+#!/bin/ksh
+
+ZONES_DIR=/var/nsd/zones/master/
+DEFAULT_MASTER=fishfinger.buetow.org
+DEFAULT_STANDBY=blowfish.buetow.org
+
+determine_master_and_standby () {
+ local master=$DEFAULT_MASTER
+ local standby=$DEFAULT_STANDBY
+
+ # Weekly auto-failover for Let's Encrypt automation
+ local -i -r week_of_the_year=$(date +%U)
+ if [ $(( week_of_the_year % 2 )) -ne 0 ]; then
+ local tmp=$master
+ master=$standby
+ standby=$tmp
+ fi
+
+ local -i health_ok=1
+ if ! ftp -4 -o - https://$master/index.txt | grep -q "Welcome to $master"; then
+ echo "https://$master/index.txt IPv4 health check failed"
+ health_ok=0
+ elif ! ftp -6 -o - https://$master/index.txt | grep -q "Welcome to $master"; then
+ echo "https://$master/index.txt IPv6 health check failed"
+ health_ok=0
+ fi
+
+ if [ $health_ok -eq 0 ]; then
+ local tmp=$master
+ master=$standby
+ standby=$tmp
+ fi
+
+ echo "Master is $master, standby is $standby"
+
+ host $master | awk '/has address/ { print $(NF) }' >/var/nsd/run/master_a
+ host $master | awk '/has IPv6 address/ { print $(NF) }' >/var/nsd/run/master_aaaa
+ host $standby | awk '/has address/ { print $(NF) }' >/var/nsd/run/standby_a
+ host $standby | awk '/has IPv6 address/ { print $(NF) }' >/var/nsd/run/standby_aaaa
+}
+
+transform () {
+ sed -E '
+ /IN A .*; Enable failover/ {
+ /^standby/! {
+ s/^(.*) 300 IN A (.*) ; (.*)/\1 300 IN A '$(cat /var/nsd/run/master_a)' ; \3/;
+ }
+ /^standby/ {
+ s/^(.*) 300 IN A (.*) ; (.*)/\1 300 IN A '$(cat /var/nsd/run/standby_a)' ; \3/;
+ }
+ }
+ /IN AAAA .*; Enable failover/ {
+ /^standby/! {
+ s/^(.*) 300 IN AAAA (.*) ; (.*)/\1 300 IN AAAA '$(cat /var/nsd/run/master_aaaa)' ; \3/;
+ }
+ /^standby/ {
+ s/^(.*) 300 IN AAAA (.*) ; (.*)/\1 300 IN AAAA '$(cat /var/nsd/run/standby_aaaa)' ; \3/;
+ }
+ }
+ / ; serial/ {
+ s/^( +) ([0-9]+) .*; (.*)/\1 '$(date +%s)' ; \3/;
+ }
+ '
+}
+
+zone_is_ok () {
+ local -r zone=$1
+ local -r domain=${zone%.zone}
+ dig $domain @localhost | grep -q "$domain.*IN.*NS"
+}
+
+failover_zone () {
+ local -r zone_file=$1
+ local -r zone=$(basename $zone_file)
+
+ # Race condition (e.g. script execution abored in the middle previous run)
+ if [ -f $zone_file.bak ]; then
+ mv $zone_file.bak $zone_file
+ fi
+
+ cat $zone_file | transform > $zone_file.new.tmp
+
+ grep -v ' ; serial' $zone_file.new.tmp > $zone_file.new.noserial.tmp
+ grep -v ' ; serial' $zone_file > $zone_file.old.noserial.tmp
+
+ echo "Has zone $zone_file changed?"
+ if diff -u $zone_file.old.noserial.tmp $zone_file.new.noserial.tmp; then
+ echo "The zone $zone_file hasn't changed"
+ rm $zone_file.*.tmp
+ return 0
+ fi
+
+ cp $zone_file $zone_file.bak
+ mv $zone_file.new.tmp $zone_file
+ rm $zone_file.*.tmp
+ echo "Reloading nsd"
+ nsd-control reload
+
+ if ! zone_is_ok $zone; then
+ echo "Rolling back $zone_file changes"
+ cp $zone_file $zone_file.invalid
+ mv $zone_file.bak $zone_file
+ echo "Reloading nsd"
+ nsd-control reload
+ zone_is_ok $zone
+ return 3
+ fi
+
+ for cleanup in invalid bak; do
+ if [ -f $zone_file.$cleanup ]; then
+ rm $zone_file.$cleanup
+ fi
+ done
+
+ echo "Failover of zone $zone to $MASTER completed"
+ return 1
+}
+
+main () {
+ determine_master_and_standby
+
+ local -i ec=0
+ for zone_file in $ZONES_DIR/*.zone; do
+ if ! failover_zone $zone_file; then
+ ec=1
+ fi
+ done
+
+ # ec other than 0: CRON will send out an E-Mail.
+ exit $ec
+}
+
+main
diff --git a/gemfeed/examples/conf/frontends/scripts/dserver-update-key-cache.sh.tpl b/gemfeed/examples/conf/frontends/scripts/dserver-update-key-cache.sh.tpl
new file mode 100644
index 00000000..86b5ecf9
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/dserver-update-key-cache.sh.tpl
@@ -0,0 +1,34 @@
+#!/bin/ksh
+
+CACHEDIR=/var/run/dserver/cache
+DSERVER_USER=_dserver
+DSERVER_GROUP=_dserver
+
+echo 'Updating SSH key cache'
+
+ls /home/ | while read remoteuser; do
+ keysfile=/home/$remoteuser/.ssh/authorized_keys
+
+ if [ -f $keysfile ]; then
+ cachefile=$CACHEDIR/$remoteuser.authorized_keys
+ echo "Caching $keysfile -> $cachefile"
+
+ cp $keysfile $cachefile
+ chown $DSERVER_USER:$DSERVER_GROUP $cachefile
+ chmod 600 $cachefile
+ fi
+done
+
+# Cleanup obsolete public SSH keys
+find $CACHEDIR -name \*.authorized_keys -type f |
+while read cachefile; do
+ remoteuser=$(basename $cachefile | cut -d. -f1)
+ keysfile=/home/$remoteuser/.ssh/authorized_keys
+
+ if [ ! -f $keysfile ]; then
+ echo 'Deleting obsolete cache file $cachefile'
+ rm $cachefile
+ fi
+done
+
+echo 'All set...'
diff --git a/gemfeed/examples/conf/frontends/scripts/fooodds.txt b/gemfeed/examples/conf/frontends/scripts/fooodds.txt
new file mode 100644
index 00000000..0e08bdd1
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/fooodds.txt
@@ -0,0 +1,191 @@
+%
++
+..
+/actuator
+/actuator/health
+/admin
+/ajax
+alfacgiapi
+/ALFA_DATA
+/api
+/apply.cgi
+/ARest1.exe
+.asp
+/aspera
+/assets
+/audiobookshelf
+/auth
+/autodiscover
+/.aws
+/bac
+/back
+/backup
+/bak
+/base
+/.bash_history
+/bf
+/bin
+/bin/sh
+/bk
+/bkp
+/blog
+/blurs
+/boaform
+/boafrm
+/.bod
+/Br7q
+/british-airways
+/buetow.org.zip
+/buetow.zip
+/burodecredito
+/c
+/.cache
+/ccaguardians
+/cdn-cgi
+/centralbankthailand
+/cfdump.packetsdatabase.com
+/charlesbridge
+/check.txt
+/cimtechsolutions
+/.circleci
+/c/k2
+/ckfinder
+/client.zip
+/cloud-config.yml
+/cloudflare.com
+/clssettlement
+/cmd,/simZysh/register_main/setCookie
+/cn/cmd
+/codeberg
+/CODE_OF_CONDUCT.md
+/columbiagas
+/common_page
+/comp
+/concerto
+/config
+/config.json
+/config.xml
+/Config.xml
+/config.yaml
+/config.yml
+/connectivitycheck.gstatic.com
+/connector.sds
+/console
+/contact-information.html
+/contact-us
+/containers
+/CONTRIBUTING.md
+/credentials.txt
+/crivo
+/current_config
+/cwservices
+/daAV
+/dana-cached
+/dana-na
+/database_backup.sql
+/.database.bak
+/database.sql
+/data.zip
+/db
+/debug
+/debug.cgi
+/decoherence-is-just-realizing-this
+/demo
+/developmentserver
+/directory.gz
+/directory.tar
+/directory.zip
+/dir.html
+/DnHb
+/dns-query
+docker-compose
+/docker-compose.yml
+/?document=images
+/Dorybau2.html
+/Dorybau.html
+/dory.buetow.org
+/download
+/DpbF
+/druid
+/dtail.dev.gz
+/dtail.dev.sql
+/dtail.dev.tar.gz
+/dtail.dev.zip
+/dtail.html
+/dtail.zip
+/dump.sql
+/dvQ1
+/dvr/cmd
+/edualy-shammin
+/ekggho
+.env
+/epa
+/etc
+/eW9h
+/ews
+/F3to
+/f3Yk
+/fahrzeugtechnik.fh-joanneum.at
+/failedbythefos
+/features
+/federalhomeloanbankofdesmoines
+/fhir
+/fhir-server
+/file-manager
+/files
+/files.zip
+/firstfinancial
+/flash
+/flower
+/foostats
+/footlocker
+/foo.zip
+/foo.zone.bz2
+/foozone.webp
+/foo.zone.zip
+/form.html
+/freeze.na4u.ru
+/frontend.zip
+/ftpsync.settings
+/full_backup.zip
+/FvwmRearrange.png
+/gdb.pdf
+/geoserver
+.git
+/git-guides
+/global-protect
+/gm-donate.net
+/GMUs
+/goform
+/google.com
+/GoRU
+/GponForm
+/helpdesk
+/high-noise-level-for-that-earth-day-with-colors-gay
+/his-viewpoint-is-not-economics-until-they-harden
+/hN6p
+HNAP1
+/hp
+/_ignition
+jndi:ldap
+.js
+.lua
+microsoft.exchange
+/owa/
+.php
+/phpinfo
+phpunit
+/portal/redlion
+/_profiler
+.rar
+/RDWeb
+robots.txt
+/SDK
+/sitemap.xml
+/sites
+.sql
+/ueditor
+/vendor
+@vite
+wordpress
+/wp
diff --git a/gemfeed/examples/conf/frontends/scripts/foostats.pl b/gemfeed/examples/conf/frontends/scripts/foostats.pl
new file mode 100644
index 00000000..a440d941
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/foostats.pl
@@ -0,0 +1,1910 @@
+#!/usr/bin/perl
+
+use v5.38;
+
+# Those are enabled automatically now w/ this version of Perl
+# use strict;
+# use warnings;
+
+use builtin qw(true false);
+use experimental qw(builtin);
+
+use feature qw(refaliasing);
+no warnings qw(experimental::refaliasing);
+
+# Debugging aids like diagnostics are noisy in production.
+# Removed per review: enable locally when debugging only.
+
+use constant VERSION => 'v0.1.0';
+
+# Package: FileHelper — small file/JSON helpers
+# - Purpose: Atomic writes, gzip JSON read/write, and line reading.
+# - Notes: Dies on I/O errors; JSON encoding uses core JSON.
+package FileHelper {
+ use JSON;
+
+ # Sub: write
+ # - Purpose: Atomic write to a file via "$path.tmp" and rename.
+ # - Params: $path (str) destination; $content (str) contents to write.
+ # - Return: undef; dies on failure.
+ sub write ($path, $content) {
+ open my $fh, '>', "$path.tmp" or die "\nCannot open file: $!";
+ print $fh $content;
+ close $fh;
+ rename "$path.tmp", $path;
+ }
+
+ # Sub: write_json_gz
+ # - Purpose: JSON-encode $data and write it gzipped atomically.
+ # - Params: $path (str) destination path; $data (ref/scalar) Perl data.
+ # - Return: undef; dies on failure.
+ sub write_json_gz ($path, $data) {
+ my $json = encode_json $data;
+
+ say "Writing $path";
+ open my $fd, '>:gzip', "$path.tmp" or die "$path.tmp: $!";
+ print $fd $json;
+ close $fd;
+
+ rename "$path.tmp", $path or die "$path.tmp: $!";
+ }
+
+ # Sub: read_json_gz
+ # - Purpose: Read a gzipped JSON file and decode to Perl data.
+ # - Params: $path (str) path to .json.gz file.
+ # - Return: Perl data structure.
+ sub read_json_gz ($path) {
+ say "Reading $path";
+ open my $fd, '<:gzip', $path or die "$path: $!";
+ my $json = decode_json <$fd>;
+ close $fd;
+ return $json;
+ }
+
+ # Sub: read_lines
+ # - Purpose: Slurp file lines and chomp newlines.
+ # - Params: $path (str) file path.
+ # - Return: list of lines (no trailing newlines).
+ sub read_lines ($path) {
+ my @lines;
+ open(my $fh, '<', $path) or die "$path: $!";
+ chomp(@lines = <$fh>);
+ close($fh);
+ return @lines;
+ }
+}
+
+# Package: DateHelper — date range helpers
+# - Purpose: Produce date strings used for report windows.
+# - Format: Dates are returned as YYYYMMDD strings.
+package DateHelper {
+ use Time::Piece;
+
+ # Sub: last_month_dates
+ # - Purpose: Return dates for today back to 30 days ago (inclusive).
+ # - Params: none.
+ # - Return: list of YYYYMMDD strings, newest first.
+ sub last_month_dates () {
+ my $today = localtime;
+ my @dates;
+
+ for my $days_ago (1 .. 31) {
+ my $date = $today - ($days_ago * 24 * 60 * 60);
+ push @dates, $date->strftime('%Y%m%d');
+ }
+
+ return @dates;
+ }
+
+}
+
+# Package: Foostats::Logreader — parse and normalize logs
+# - Purpose: Read web and gemini logs, anonymize IPs, and emit normalized events.
+# - Output Event: { proto, host, ip_hash, ip_proto, date, time, uri_path, status }
+package Foostats::Logreader {
+ use Digest::SHA3 'sha3_512_base64';
+ use File::stat;
+ use PerlIO::gzip;
+ use Time::Piece;
+ use String::Util qw(contains startswith endswith);
+
+ # Make log locations configurable (env overrides) to enable testing.
+ # Sub: gemini_logs_glob
+ # - Purpose: Glob for gemini-related logs; env override for testing.
+ # - Return: glob pattern string.
+ sub gemini_logs_glob { $ENV{FOOSTATS_GEMINI_LOGS_GLOB} // '/var/log/daemon*' }
+
+ # Sub: web_logs_glob
+ # - Purpose: Glob for web access logs; env override for testing.
+ # - Return: glob pattern string.
+ sub web_logs_glob { $ENV{FOOSTATS_WEB_LOGS_GLOB} // '/var/www/logs/access.log*' }
+
+ # Sub: anonymize_ip
+ # - Purpose: Classify IPv4/IPv6 and map IP to a stable SHA3-512 base64 hash.
+ # - Params: $ip (str) source IP.
+ # - Return: ($hash, $proto) where $proto is 'IPv4' or 'IPv6'.
+ sub anonymize_ip ($ip) {
+ my $ip_proto = contains($ip, ':') ? 'IPv6' : 'IPv4';
+ my $ip_hash = sha3_512_base64 $ip;
+ return ($ip_hash, $ip_proto);
+ }
+
+ # Sub: read_lines
+ # - Purpose: Iterate files matching glob by age; invoke $cb for each line.
+ # - Params: $glob (str) file glob; $cb (code) callback ($year, @fields).
+ # - Return: undef; stops early if callback returns undef for a file.
+ sub read_lines ($glob, $cb) {
+ my sub year ($path) {
+ localtime((stat $path)->mtime)->strftime('%Y');
+ }
+
+ my sub open_file ($path) {
+ my $flag = $path =~ /\.gz$/ ? '<:gzip' : '<';
+ open my $fd, $flag, $path or die "$path: $!";
+ return $fd;
+ }
+
+ my $last = false;
+ say 'File path glob matches: ' . join(' ', glob $glob);
+
+ LAST:
+ for my $path (sort { -M $a <=> -M $b } glob $glob) {
+ say "Processing $path";
+
+ my $file = open_file $path;
+ my $year = year $file;
+
+ while (<$file>) {
+ next if contains($_, 'logfile turned over');
+
+ # last == true means: After this file, don't process more
+ $last = true unless defined $cb->($year, split / +/);
+ }
+
+ say "Closing $path (last:$last)";
+ close $file;
+ last LAST if $last;
+ }
+ }
+
+ # Sub: parse_web_logs
+ # - Purpose: Parse web log lines into normalized events and pass to callback.
+ # - Params: $last_processed_date (YYYYMMDD int) lower bound; $cb (code) event consumer.
+ # - Return: undef.
+ sub parse_web_logs ($last_processed_date, $cb) {
+ my sub parse_date ($date) {
+ my $t = Time::Piece->strptime($date, '[%d/%b/%Y:%H:%M:%S');
+ return ($t->strftime('%Y%m%d'), $t->strftime('%H%M%S'));
+ }
+
+ my sub parse_web_line (@line) {
+ my ($date, $time) = parse_date $line [4];
+ return undef if $date < $last_processed_date;
+
+ # X-Forwarded-For?
+ my $ip = $line[-2] eq '-' ? $line[1] : $line[-2];
+ my ($ip_hash, $ip_proto) = anonymize_ip $ip;
+
+ return {
+ proto => 'web',
+ host => $line[0],
+ ip_hash => $ip_hash,
+ ip_proto => $ip_proto,
+ date => $date,
+ time => $time,
+ uri_path => $line[7],
+ status => $line[9],
+ };
+ }
+
+ read_lines web_logs_glob(), sub ($year, @line) {
+ $cb->(parse_web_line @line);
+ };
+ }
+
+ # Sub: parse_gemini_logs
+ # - Purpose: Parse vger/relayd lines, merge paired entries, and emit events.
+ # - Params: $last_processed_date (YYYYMMDD int); $cb (code) event consumer.
+ # - Return: undef.
+ sub parse_gemini_logs ($last_processed_date, $cb) {
+ my sub parse_date ($year, @line) {
+ my $timestr = "$line[0] $line[1]";
+ return Time::Piece->strptime($timestr, '%b %d')->strftime("$year%m%d");
+ }
+
+ my sub parse_vger_line ($year, @line) {
+ my $full_path = $line[5];
+ $full_path =~ s/"//g;
+ my ($proto, undef, $host, $uri_path) = split '/', $full_path, 4;
+ $uri_path = '' unless defined $uri_path;
+
+ return {
+ proto => 'gemini',
+ host => $host,
+ uri_path => "/$uri_path",
+ status => $line[6],
+ date => int(parse_date($year, @line)),
+ time => $line[2],
+ };
+ }
+
+ my sub parse_relayd_line ($year, @line) {
+ my $date = int(parse_date($year, @line));
+
+ my ($ip_hash, $ip_proto) = anonymize_ip $line [12];
+ return {
+ ip_hash => $ip_hash,
+ ip_proto => $ip_proto,
+ date => $date,
+ time => $line[2],
+ };
+ }
+
+ # Expect one vger and one relayd log line per event! So collect
+ # both events (one from one log line each) and then merge the result hash!
+ my ($vger, $relayd);
+ read_lines gemini_logs_glob(), sub ($year, @line) {
+ if ($line[4] eq 'vger:') {
+ $vger = parse_vger_line $year, @line;
+ }
+ elsif ($line[5] eq 'relay' and startswith($line[6], 'gemini')) {
+ $relayd = parse_relayd_line $year, @line;
+ return undef
+ if $relayd->{date} < $last_processed_date;
+ }
+
+ if (defined $vger and defined $relayd and $vger->{time} eq $relayd->{time}) {
+ $cb->({ %$vger, %$relayd });
+ $vger = $relayd = undef;
+ }
+
+ true;
+ };
+ }
+
+ # Sub: parse_logs
+ # - Purpose: Coordinate parsing for both web and gemini, aggregating into stats.
+ # - Params: $last_web_date, $last_gemini_date (YYYYMMDD int), $odds_file, $odds_log.
+ # - Return: stats hashref keyed by "proto_YYYYMMDD".
+ sub parse_logs ($last_web_date, $last_gemini_date, $odds_file, $odds_log) {
+ my $agg = Foostats::Aggregator->new($odds_file, $odds_log);
+
+ say "Last web date: $last_web_date";
+ say "Last gemini date: $last_gemini_date";
+
+ parse_web_logs $last_web_date, sub ($event) {
+ $agg->add($event);
+ };
+ parse_gemini_logs $last_gemini_date, sub ($event) {
+ $agg->add($event);
+ };
+
+ return $agg->{stats};
+ }
+}
+
+# Package: Foostats::Filter — request filtering and logging
+# - Purpose: Identify odd URI patterns and excessive requests per second per IP.
+# - Notes: Maintains an in-process blocklist for the current run.
+package Foostats::Filter {
+ use String::Util qw(contains startswith endswith);
+
+ # Sub: new
+ # - Purpose: Construct a filter with odd patterns and a log path.
+ # - Params: $odds_file (str) pattern list; $log_path (str) append-only log file.
+ # - Return: blessed Foostats::Filter instance.
+ sub new ($class, $odds_file, $log_path) {
+ say "Logging filter to $log_path";
+ my @odds = FileHelper::read_lines($odds_file);
+ bless { odds => \@odds, log_path => $log_path }, $class;
+ }
+
+ # Sub: ok
+ # - Purpose: Check if an event passes filters; updates block state/logging.
+ # - Params: $event (hashref) normalized request.
+ # - Return: true if allowed; false if blocked.
+ sub ok ($self, $event) {
+ state %blocked = ();
+ return false if exists $blocked{ $event->{ip_hash} };
+
+ if ($self->odd($event) or $self->excessive($event)) {
+ ($blocked{ $event->{ip_hash} } //= 0)++;
+ return false;
+ }
+ else {
+ return true;
+ }
+ }
+
+ # Sub: odd
+ # - Purpose: Match URI path against user-provided odd patterns (substring match).
+ # - Params: $event (hashref) with uri_path.
+ # - Return: true if odd (blocked), false otherwise.
+ sub odd ($self, $event) {
+ \my $uri_path = \$event->{uri_path};
+
+ for ($self->{odds}->@*) {
+ next if !defined $_ || $_ eq '' || /^\s*#/;
+ next unless contains($uri_path, $_);
+ $self->log('WARN', $uri_path, "contains $_ and is odd and will therefore be blocked!");
+ return true;
+ }
+
+ $self->log('OK', $uri_path, "appears fine...");
+ return false;
+ }
+
+ # Sub: log
+ # - Purpose: Deduplicated append-only logging for filter decisions.
+ # - Params: $severity (OK|WARN), $subject (str), $message (str).
+ # - Return: undef.
+ sub log ($self, $severity, $subject, $message) {
+ state %dedup;
+
+ # Don't log if path was already logged
+ return if exists $dedup{$subject};
+ $dedup{$subject} = 1;
+
+ open(my $fh, '>>', $self->{log_path}) or die $self->{log_path} . ": $!";
+ print $fh "$severity: $subject $message\n";
+ close($fh);
+ }
+
+ # Sub: excessive
+ # - Purpose: Block if an IP makes more than one request within the same second.
+ # - Params: $event (hashref) with time and ip_hash.
+ # - Return: true if blocked; false otherwise.
+ sub excessive ($self, $event) {
+ \my $time = \$event->{time};
+ \my $ip_hash = \$event->{ip_hash};
+
+ state $last_time = $time; # Time with second: 'HH:MM:SS'
+ state %count = (); # IPs accessing within the same second!
+
+ if ($last_time ne $time) {
+ $last_time = $time;
+ %count = ();
+ return false;
+ }
+
+ # IP requested site more than once within the same second!?
+ if (1 < ++($count{$ip_hash} //= 0)) {
+ $self->log('WARN', $ip_hash, "blocked due to excessive requesting...");
+ return true;
+ }
+
+ return false;
+ }
+}
+
+# Package: Foostats::Aggregator — in-memory stats builder
+# - Purpose: Apply filters and accumulate counts, unique IPs per feed/page.
+package Foostats::Aggregator {
+ use String::Util qw(contains startswith endswith);
+
+ use constant {
+ ATOM_FEED_URI => '/gemfeed/atom.xml',
+ GEMFEED_URI => '/gemfeed/index.gmi',
+ GEMFEED_URI_2 => '/gemfeed/',
+ };
+
+ # Sub: new
+ # - Purpose: Construct aggregator with a filter and empty stats store.
+ # - Params: $odds_file (str), $odds_log (str).
+ # - Return: Foostats::Aggregator instance.
+ sub new ($class, $odds_file, $odds_log) {
+ bless { filter => Foostats::Filter->new($odds_file, $odds_log), stats => {} }, $class;
+ }
+
+ # Sub: add
+ # - Purpose: Apply filter, update counts and unique-IP sets, and return event.
+ # - Params: $event (hashref) normalized event; ignored if undef.
+ # - Return: $event; filtered events increment filtered count only.
+ sub add ($self, $event) {
+ return undef unless defined $event;
+
+ my $date = $event->{date};
+ my $date_key = $event->{proto} . "_$date";
+
+ # Stats data model per protocol+day (key: "proto_YYYYMMDD"):
+ # - count: per-proto request count, per IP version, and filtered count
+ # - feed_ips: unique IPs per feed type (atom_feed, gemfeed)
+ # - page_ips: unique IPs per host and per URL
+ $self->{stats}{$date_key} //= {
+ count => { filtered => 0, },
+ feed_ips => {
+ atom_feed => {},
+ gemfeed => {},
+ },
+ page_ips => {
+ hosts => {},
+ urls => {},
+ },
+ };
+
+ \my $s = \$self->{stats}{$date_key};
+ unless ($self->{filter}->ok($event)) {
+ $s->{count}{filtered}++;
+ return $event;
+ }
+
+ $self->add_count($s, $event);
+ $self->add_page_ips($s, $event) unless $self->add_feed_ips($s, $event);
+ return $event;
+ }
+
+ # Sub: add_count
+ # - Purpose: Increment totals by protocol and IP version.
+ # - Params: $stats (hashref) date bucket; $event (hashref).
+ # - Return: undef.
+ sub add_count ($self, $stats, $event) {
+ \my $c = \$stats->{count};
+ \my $e = \$event;
+
+ ($c->{ $e->{proto} } //= 0)++;
+ ($c->{ $e->{ip_proto} } //= 0)++;
+ }
+
+ # Sub: add_feed_ips
+ # - Purpose: If event hits feed endpoints, add unique IP and short-circuit.
+ # - Params: $stats (hashref), $event (hashref).
+ # - Return: 1 if feed matched; 0 otherwise.
+ sub add_feed_ips ($self, $stats, $event) {
+ \my $f = \$stats->{feed_ips};
+ \my $e = \$event;
+
+ # Atom feed (exact path match, allow optional query string)
+ if ($e->{uri_path} =~ m{^/gemfeed/atom\.xml(?:[?#].*)?$}) {
+ ($f->{atom_feed}->{ $e->{ip_hash} } //= 0)++;
+ return 1;
+ }
+
+ # Gemfeed index: '/gemfeed/' or '/gemfeed/index.gmi' (optionally with query)
+ if ($e->{uri_path} =~ m{^/gemfeed/(?:index\.gmi)?(?:[?#].*)?$}) {
+ ($f->{gemfeed}->{ $e->{ip_hash} } //= 0)++;
+ return 1;
+ }
+
+ return 0;
+ }
+
+ # Sub: add_page_ips
+ # - Purpose: Track unique IPs per host and per URL for .html/.gmi pages.
+ # - Params: $stats (hashref), $event (hashref).
+ # - Return: undef.
+ sub add_page_ips ($self, $stats, $event) {
+ \my $e = \$event;
+ \my $p = \$stats->{page_ips};
+
+ return if !endswith($e->{uri_path}, '.html') && !endswith($e->{uri_path}, '.gmi');
+
+ ($p->{hosts}->{ $e->{host} }->{ $e->{ip_hash} } //= 0)++;
+ ($p->{urls}->{ $e->{host} . $e->{uri_path} }->{ $e->{ip_hash} } //= 0)++;
+ }
+}
+
+# Package: Foostats::FileOutputter — write per-day stats to disk
+# - Purpose: Persist aggregated stats to gzipped JSON files under a stats dir.
+package Foostats::FileOutputter {
+ use JSON;
+ use Sys::Hostname;
+ use PerlIO::gzip;
+
+ # Sub: new
+ # - Purpose: Create outputter with stats_dir; ensures directory exists.
+ # - Params: %args (hash) must include stats_dir.
+ # - Return: Foostats::FileOutputter instance.
+ sub new ($class, %args) {
+ my $self = bless \%args, $class;
+ mkdir $self->{stats_dir} or die $self->{stats_dir} . ": $!" unless -d $self->{stats_dir};
+ return $self;
+ }
+
+ # Sub: last_processed_date
+ # - Purpose: Determine the most recent processed date for a protocol for this host.
+ # - Params: $proto (str) 'web' or 'gemini'.
+ # - Return: YYYYMMDD int (0 if none found).
+ sub last_processed_date ($self, $proto) {
+ my $hostname = hostname();
+ my @processed = glob $self->{stats_dir} . "/${proto}_????????.$hostname.json.gz";
+ my ($date) = @processed ? ($processed[-1] =~ /_(\d{8})\.$hostname\.json.gz/) : 0;
+ return int($date);
+ }
+
+ # Sub: write
+ # - Purpose: Write one gzipped JSON file per date bucket to stats_dir.
+ # - Params: none (uses $self->{stats}).
+ # - Return: undef.
+ sub write ($self) {
+ $self->for_dates(
+ sub ($self, $date_key, $stats) {
+ my $hostname = hostname();
+ my $path = $self->{stats_dir} . "/${date_key}.$hostname.json.gz";
+ FileHelper::write_json_gz $path, $stats;
+ }
+ );
+ }
+
+ # Sub: for_dates
+ # - Purpose: Iterate date-keyed stats in sorted order and call $cb.
+ # - Params: $cb (code) receives ($self, $date_key, $stats).
+ # - Return: undef.
+ sub for_dates ($self, $cb) {
+ $cb->($self, $_, $self->{stats}{$_}) for sort keys $self->{stats}->%*;
+ }
+}
+
+# Package: Foostats::Replicator — pull partner stats files over HTTP(S)
+# - Purpose: Fetch recent partner node stats into local stats dir.
+package Foostats::Replicator {
+ use JSON;
+ use File::Basename;
+ use LWP::UserAgent;
+ use String::Util qw(endswith);
+
+ # Sub: replicate
+ # - Purpose: For each proto and last 31 days, replicate newest files.
+ # - Params: $stats_dir (str) local dir; $partner_node (str) hostname.
+ # - Return: undef (best-effort fetches).
+ sub replicate ($stats_dir, $partner_node) {
+ say "Replicating from $partner_node";
+
+ for my $proto (qw(gemini web)) {
+ my $count = 0;
+
+ for my $date (DateHelper::last_month_dates) {
+ my $file_base = "${proto}_${date}";
+ my $dest_path = "${file_base}.$partner_node.json.gz";
+
+ replicate_file(
+ "https://$partner_node/foostats/$dest_path",
+ "$stats_dir/$dest_path",
+ $count++ < 3, # Always replicate the newest 3 files.
+ );
+ }
+ }
+ }
+
+ # Sub: replicate_file
+ # - Purpose: Download a single URL to a destination unless already present (unless forced).
+ # - Params: $remote_url (str) source; $dest_path (str) destination; $force (bool/int).
+ # - Return: undef; logs failures.
+ sub replicate_file ($remote_url, $dest_path, $force) {
+
+ # $dest_path already exists, not replicating it
+ return if !$force && -f $dest_path;
+
+ say "Replicating $remote_url to $dest_path (force:$force)... ";
+ my $response = LWP::UserAgent->new->get($remote_url);
+ unless ($response->is_success) {
+ say "\nFailed to fetch the file: " . $response->status_line;
+ return;
+ }
+
+ FileHelper::write $dest_path, $response->decoded_content;
+ say 'done';
+ }
+}
+
+# Package: Foostats::Merger — merge per-host daily stats into a single view
+# - Purpose: Merge multiple node files per day into totals and unique counts.
+package Foostats::Merger {
+
+ # Sub: merge
+ # - Purpose: Produce merged stats for the last month (date => stats hashref).
+ # - Params: $stats_dir (str) directory with daily gz JSON files.
+ # - Return: hash (not ref) of date => merged stats.
+ sub merge ($stats_dir) {
+ my %merge;
+ $merge{$_} = merge_for_date($stats_dir, $_) for DateHelper::last_month_dates;
+ return %merge;
+ }
+
+ # Sub: merge_for_date
+ # - Purpose: Merge all node files for a specific date into one stats hashref.
+ # - Params: $stats_dir (str), $date (YYYYMMDD str/int).
+ # - Return: { feed_ips => {...}, count => {...}, page_ips => {...} }.
+ sub merge_for_date ($stats_dir, $date) {
+ printf "Merging for date %s\n", $date;
+ my @stats = stats_for_date($stats_dir, $date);
+ return {
+ feed_ips => feed_ips(@stats),
+ count => count(@stats),
+ page_ips => page_ips(@stats),
+ };
+ }
+
+ # Sub: merge_ips
+ # - Purpose: Deep-ish merge helper: sums numbers, merges hash-of-hash counts.
+ # - Params: $a (hashref target), $b (hashref source), $key_transform (code|undef).
+ # - Return: undef; updates $a in place; dies on incompatible types.
+ sub merge_ips ($a, $b, $key_transform = undef) {
+ my sub merge ($a, $b) {
+ while (my ($key, $val) = each %$b) {
+ $a->{$key} //= 0;
+ $a->{$key} += $val;
+ }
+ }
+
+ my $is_num = qr/^\d+(\.\d+)?$/;
+
+ while (my ($key, $val) = each %$b) {
+ $key = $key_transform->($key) if defined $key_transform;
+
+ if (not exists $a->{$key}) {
+ $a->{$key} = $val;
+ }
+ elsif (ref($a->{$key}) eq 'HASH' && ref($val) eq 'HASH') {
+ merge($a->{$key}, $val);
+ }
+ elsif ($a->{$key} =~ $is_num && $val =~ $is_num) {
+ $a->{$key} += $val;
+ }
+ else {
+ die "Not merging tkey '%s' (ref:%s): '%s' (ref:%s) with '%s' (ref:%s)\n",
+ $key,
+ ref($key), $a->{$key},
+ ref($a->{$key}),
+ $val,
+ ref($val);
+ }
+ }
+ }
+
+ # Sub: feed_ips
+ # - Purpose: Merge feed unique-IP sets from per-proto stats into totals.
+ # - Params: @stats (list of stats hashrefs) each with {proto, feed_ips}.
+ # - Return: hashref with Total and per-proto feed counts.
+ sub feed_ips (@stats) {
+ my (%gemini, %web);
+
+ for my $stats (@stats) {
+ my $merge = $stats->{proto} eq 'web' ? \%web : \%gemini;
+ printf "Merging proto %s feed IPs\n", $stats->{proto};
+ merge_ips($merge, $stats->{feed_ips});
+ }
+
+ my %total;
+ merge_ips(\%total, $web{$_}) for keys %web;
+ merge_ips(\%total, $gemini{$_}) for keys %gemini;
+
+ my %merge = (
+ 'Total' => scalar keys %total,
+ 'Gemini Gemfeed' => scalar keys $gemini{gemfeed}->%*,
+ 'Gemini Atom' => scalar keys $gemini{atom_feed}->%*,
+ 'Web Gemfeed' => scalar keys $web{gemfeed}->%*,
+ 'Web Atom' => scalar keys $web{atom_feed}->%*,
+ );
+
+ return \%merge;
+ }
+
+ # Sub: count
+ # - Purpose: Sum request counters across stats for the day.
+ # - Params: @stats (list of stats hashrefs) each with {count}.
+ # - Return: hashref of summed counters.
+ sub count (@stats) {
+ my %merge;
+
+ for my $stats (@stats) {
+ while (my ($key, $val) = each $stats->{count}->%*) {
+ $merge{$key} //= 0;
+ $merge{$key} += $val;
+ }
+ }
+
+ return \%merge;
+ }
+
+ # Sub: page_ips
+ # - Purpose: Merge unique IPs per host and per URL; coalesce truncated endings.
+ # - Params: @stats (list of stats hashrefs) with {page_ips}{urls,hosts}.
+ # - Return: hashref with urls/hosts each mapping => unique counts.
+ sub page_ips (@stats) {
+ my %merge = (
+ urls => {},
+ hosts => {}
+ );
+
+ for my $key (keys %merge) {
+ merge_ips(
+ $merge{$key},
+ $_->{page_ips}->{$key},
+ sub ($key) {
+ $key =~ s/\.gmi$/\.html/;
+ $key;
+ }
+ ) for @stats;
+
+ # Keep only uniq IP count
+ $merge{$key}->{$_} = scalar keys $merge{$key}->{$_}->%* for keys $merge{$key}->%*;
+ }
+
+ return \%merge;
+ }
+
+ # Sub: stats_for_date
+ # - Purpose: Load all stats files for a date across protos; tag proto/path.
+ # - Params: $stats_dir (str), $date (YYYYMMDD).
+ # - Return: list of stats hashrefs.
+ sub stats_for_date ($stats_dir, $date) {
+ my @stats;
+
+ for my $proto (qw(gemini web)) {
+ for my $path (<$stats_dir/${proto}_${date}.*.json.gz>) {
+ printf "Reading %s\n", $path;
+ push @stats, FileHelper::read_json_gz($path);
+ @{ $stats[-1] }{qw(proto path)} = ($proto, $path);
+ }
+ }
+
+ return @stats;
+ }
+}
+
+# Package: Foostats::Reporter — build gemtext/HTML daily and summary reports
+# - Purpose: Render daily reports and rolling summaries (30/365), and index pages.
+package Foostats::Reporter {
+ use Time::Piece;
+ use HTML::Entities qw(encode_entities);
+
+ our @TRUNCATED_URL_MAPPINGS;
+
+ sub reset_truncated_url_mappings { @TRUNCATED_URL_MAPPINGS = (); }
+
+ sub _record_truncated_url_mapping {
+ my ($truncated, $original) = @_;
+ push @TRUNCATED_URL_MAPPINGS, { truncated => $truncated, original => $original };
+ }
+
+ sub _lookup_full_url_for {
+ my ($candidate) = @_;
+ for my $idx (0 .. $#TRUNCATED_URL_MAPPINGS) {
+ my $entry = $TRUNCATED_URL_MAPPINGS[$idx];
+ next unless $entry->{truncated} eq $candidate;
+ my $original = $entry->{original};
+ splice @TRUNCATED_URL_MAPPINGS, $idx, 1;
+ return $original;
+ }
+ return undef;
+ }
+
+ # Sub: truncate_url
+ # - Purpose: Middle-ellipsize long URLs to fit within a target length.
+ # - Params: $url (str), $max_length (int default 100).
+ # - Return: possibly truncated string.
+ sub truncate_url {
+ my ($url, $max_length) = @_;
+ $max_length //= 100; # Default to 100 characters
+ return $url if length($url) <= $max_length;
+
+ # Calculate how many characters we need to remove
+ my $ellipsis = '...';
+ my $ellipsis_length = length($ellipsis);
+ my $available_length = $max_length - $ellipsis_length;
+
+ # Split available length between start and end, favoring the end
+ my $keep_start = int($available_length * 0.4); # 40% for start
+ my $keep_end = $available_length - $keep_start; # 60% for end
+
+ my $start = substr($url, 0, $keep_start);
+ my $end = substr($url, -$keep_end);
+
+ return $start . $ellipsis . $end;
+ }
+
+ # Sub: truncate_urls_for_table
+ # - Purpose: Truncate URL cells in-place to fit target table width.
+ # - Params: $url_rows (arrayref of [url,count]), $count_column_header (str).
+ # - Return: undef; mutates $url_rows.
+ sub truncate_urls_for_table {
+ my ($url_rows, $count_column_header) = @_;
+
+ # Calculate the maximum width needed for the count column
+ my $max_count_width = length($count_column_header);
+ for my $row (@$url_rows) {
+ my $count_width = length($row->[1]);
+ $max_count_width = $count_width if $count_width > $max_count_width;
+ }
+
+ # Row format: "| URL... | count |" with padding
+ # Calculate: "| " (2) + URL + " | " (3) + count_with_padding + " |" (2)
+ my $max_url_length = 100 - 7 - $max_count_width;
+ $max_url_length = 70 if $max_url_length > 70; # Cap at reasonable length
+
+ # Truncate URLs in place
+ for my $row (@$url_rows) {
+ my $original = $row->[0];
+ my $truncated = truncate_url($original, $max_url_length);
+ if ($truncated ne $original) {
+ _record_truncated_url_mapping($truncated, $original);
+ }
+ $row->[0] = $truncated;
+ }
+ }
+
+ # Sub: format_table
+ # - Purpose: Render a simple monospace table from headers and rows.
+ # - Params: $headers (arrayref), $rows (arrayref of arrayrefs).
+ # - Return: string with lines separated by \n.
+ sub format_table {
+ my ($headers, $rows) = @_;
+
+ my @widths;
+ for my $col (0 .. $#{$headers}) {
+ my $max_width = length($headers->[$col]);
+ for my $row (@$rows) {
+ my $len = length($row->[$col]);
+ $max_width = $len if $len > $max_width;
+ }
+ push @widths, $max_width;
+ }
+
+ my $header_line = '|';
+ my $separator_line = '|';
+ for my $col (0 .. $#{$headers}) {
+ $header_line .= sprintf(" %-*s |", $widths[$col], $headers->[$col]);
+ $separator_line .= '-' x ($widths[$col] + 2) . '|';
+ }
+
+ my @table_lines;
+ push @table_lines, $separator_line; # Add top terminator
+ push @table_lines, $header_line;
+ push @table_lines, $separator_line;
+
+ for my $row (@$rows) {
+ my $row_line = '|';
+ for my $col (0 .. $#{$row}) {
+ $row_line .= sprintf(" %-*s |", $widths[$col], $row->[$col]);
+ }
+ push @table_lines, $row_line;
+ }
+
+ push @table_lines, $separator_line; # Add bottom terminator
+
+ return join("\n", @table_lines);
+ }
+
+ # Convert gemtext to HTML
+ # Sub: gemtext_to_html
+ # - Purpose: Convert a subset of Gemtext to compact HTML, incl. code blocks and lists.
+ # - Params: $content (str) Gemtext.
+ # - Return: HTML string (fragment).
+ sub gemtext_to_html {
+ my ($content) = @_;
+ my $html = "";
+ my @lines = split /\n/, $content;
+ my $i = 0;
+
+ while ($i < @lines) {
+ my $line = $lines[$i];
+
+ if ($line =~ /^```/) {
+ my @block_lines;
+ $i++; # Move past the opening ```
+ while ($i < @lines && $lines[$i] !~ /^```/) {
+ push @block_lines, $lines[$i];
+ $i++;
+ }
+ $html .= _gemtext_to_html_code_block(\@block_lines);
+ }
+ elsif ($line =~ /^### /) {
+ $html .= _gemtext_to_html_heading($line);
+ }
+ elsif ($line =~ /^## /) {
+ $html .= _gemtext_to_html_heading($line);
+ }
+ elsif ($line =~ /^# /) {
+ $html .= _gemtext_to_html_heading($line);
+ }
+ elsif ($line =~ /^=> /) {
+ $html .= _gemtext_to_html_link($line);
+ }
+ elsif ($line =~ /^\* /) {
+ my @list_items;
+ while ($i < @lines && $lines[$i] =~ /^\* /) {
+ push @list_items, $lines[$i];
+ $i++;
+ }
+ $html .= _gemtext_to_html_list(\@list_items);
+ $i--; # Decrement to re-evaluate the current line in the outer loop
+ }
+ elsif ($line !~ /^\s*$/) {
+ $html .= _gemtext_to_html_paragraph($line);
+ }
+
+ # Else, it's a blank line, which we skip for compact output.
+ $i++;
+ }
+
+ return $html;
+ }
+
+ sub _gemtext_to_html_code_block {
+ my ($lines) = @_;
+ if (is_ascii_table($lines)) {
+ return convert_ascii_table_to_html($lines);
+ }
+ else {
+ my $html = "<pre>\n";
+ for my $code_line (@$lines) {
+ $html .= encode_entities($code_line) . "\n";
+ }
+ $html .= "</pre>\n";
+ return $html;
+ }
+ }
+
+ sub _gemtext_to_html_heading {
+ my ($line) = @_;
+ if ($line =~ /^### (.*)/) {
+ return "<h3>" . encode_entities($1) . "</h3>\n";
+ }
+ elsif ($line =~ /^## (.*)/) {
+ return "<h2>" . encode_entities($1) . "</h2>\n";
+ }
+ elsif ($line =~ /^# (.*)/) {
+ return "<h1>" . encode_entities($1) . "</h1>\n";
+ }
+ return '';
+ }
+
+ sub _gemtext_to_html_link {
+ my ($line) = @_;
+ if ($line =~ /^=> (\S+)\s+(.*)/) {
+ my ($url, $text) = ($1, $2);
+
+ # Drop 365-day summary links from HTML output
+ return '' if $url =~ /(?:^|[\/.])365day_summary_\d{8}\.gmi$/;
+
+ # Convert .gmi links to .html
+ $url =~ s/\.gmi$/\.html/;
+ return
+ "<p><a href=\""
+ . encode_entities($url) . "\">"
+ . encode_entities($text)
+ . "</a></p>\n";
+ }
+ return '';
+ }
+
+ sub _gemtext_to_html_list {
+ my ($lines) = @_;
+ my $html = "<ul>\n";
+ for my $line (@$lines) {
+ if ($line =~ /^\* (.*)/) {
+ $html .= "<li>" . linkify_text($1) . "</li>\n";
+ }
+ }
+ $html .= "</ul>\n";
+ return $html;
+ }
+
+ sub _gemtext_to_html_paragraph {
+ my ($line) = @_;
+ return "<p>" . linkify_text($line) . "</p>\n";
+ }
+
+ # Check if the lines form an ASCII table
+ # Sub: is_ascii_table
+ # - Purpose: Heuristically detect if a code block is an ASCII table.
+ # - Params: $lines (arrayref of strings).
+ # - Return: 1 if likely table; 0 otherwise.
+ sub is_ascii_table {
+ my ($lines) = @_;
+ return 0 if @$lines < 3; # Need at least header, separator, and one data row
+
+ # Check for separator lines with dashes and pipes
+ for my $line (@$lines) {
+ return 1 if $line =~ /^\|?[\s\-]+\|/;
+ }
+ return 0;
+ }
+
+ # Convert ASCII table to HTML table
+ # Sub: convert_ascii_table_to_html
+ # - Purpose: Convert simple ASCII table lines to an HTML <table>.
+ # - Params: $lines (arrayref of strings).
+ # - Return: HTML string.
+ sub convert_ascii_table_to_html {
+ my ($lines) = @_;
+ my $html = "<table>\n";
+ my $row_count = 0;
+ my $total_col_idx = -1;
+
+ for my $line (@$lines) {
+
+ # Skip separator lines
+ next if $line =~ /^\|?[\s\-]+\|/ && $line =~ /\-/;
+
+ # Parse table row
+ my @cells = split /\s*\|\s*/, $line;
+ @cells = grep { length($_) > 0 } @cells; # Remove empty cells
+
+ if (@cells) {
+ my $is_total_row = (trim($cells[0]) eq 'Total');
+ $html .= "<tr>\n";
+
+ if ($row_count == 0) { # Header row
+ for my $i (0 .. $#cells) {
+ if (trim($cells[$i]) eq 'Total') {
+ $total_col_idx = $i;
+ last;
+ }
+ }
+ }
+
+ my $tag = ($row_count == 0) ? "th" : "td";
+ for my $i (0 .. $#cells) {
+ my $val = trim($cells[$i]);
+ my $cell_content = linkify_text($val);
+
+ if ($is_total_row || ($i == $total_col_idx && $row_count > 0)) {
+ $html .= " <$tag><b>" . $cell_content . "</b></$tag>\n";
+ }
+ else {
+ $html .= " <$tag>" . $cell_content . "</$tag>\n";
+ }
+ }
+ $html .= "</tr>\n";
+ $row_count++;
+ }
+ }
+
+ $html .= "</table>\n";
+ return $html;
+ }
+
+ # Trim whitespace from string
+ # Sub: trim
+ # - Purpose: Strip leading/trailing whitespace.
+ # - Params: $str (str).
+ # - Return: trimmed string.
+ sub trim {
+ my ($str) = @_;
+ $str =~ s/^\s+//;
+ $str =~ s/\s+$//;
+ return $str;
+ }
+
+ # Build an href for a token that looks like a URL or FQDN
+ # Sub: _guess_href
+ # - Purpose: Infer absolute href for a token (supports gemini for .gmi).
+ # - Params: $token (str) token from text.
+ # - Return: href string or undef.
+ sub _guess_href {
+ my ($token) = @_;
+ my $t = $token;
+ $t =~ s/^\s+//;
+ $t =~ s/\s+$//;
+
+ # Already absolute http(s)
+ return $t if $t =~ m{^https?://}i;
+
+ # Extract trailing punctuation to avoid including it in href
+ my $trail = '';
+ if ($t =~ s{([)\]\}.,;:!?]+)$}{}) { $trail = $1; }
+
+ # host[/path]
+ if ($t =~ m{^([A-Za-z0-9.-]+\.[A-Za-z]{2,})(/[^\s<]*)?$}) {
+ my ($host, $path) = ($1, $2 // '');
+ my $is_gemini = defined($path) && $path =~ /\.gmi(?:[?#].*)?$/i;
+ my $scheme = 'https';
+
+ # If truncated, fall back to host root
+ my $href = sprintf('%s://%s%s', $scheme, $host, ($path eq '' ? '/' : $path));
+ return ($href . $trail);
+ }
+
+ return undef;
+ }
+
+ # Turn any URLs/FQDNs in the provided text into anchors
+ # Sub: linkify_text
+ # - Purpose: Replace URL/FQDN tokens in text with HTML anchors.
+ # - Params: $text (str) input text.
+ # - Return: HTML string with entities encoded.
+ sub linkify_text {
+ my ($text) = @_;
+ return '' unless defined $text;
+
+ my $out = '';
+ my $pos = 0;
+ while ($text =~ m{((?:https?://)?[A-Za-z0-9.-]+\.[A-Za-z]{2,}(?:/[^\s<]*)?)}g) {
+ my $match = $1;
+ my $start = $-[1];
+ my $end = $+[1];
+
+ # Emit preceding text
+ $out .= encode_entities(substr($text, $pos, $start - $pos));
+
+ # Separate trailing punctuation from the match
+ my ($core, $trail) = ($match, '');
+ if ($core =~ s{([)\]\}.,;:!?]+)$}{}) { $trail = $1; }
+
+ my $display = $core;
+ if (my $full = _lookup_full_url_for($core)) {
+ $display = $full;
+ }
+
+ my $href = _guess_href($display);
+ if (!$href) {
+ $href = _guess_href($core);
+ }
+
+ if ($href) {
+ $href =~ s/\.gmi$/\.html/i;
+ $out .= sprintf(
+ '<a href="%s">%s</a>%s',
+ encode_entities($href), encode_entities($display),
+ encode_entities($trail)
+ );
+ }
+ else {
+ # Not a linkable token after all
+ $out .= encode_entities($match);
+ }
+ $pos = $end;
+ }
+
+ # Remainder
+ $out .= encode_entities(substr($text, $pos));
+ return $out;
+ }
+
+ # Use HTML::Entities::encode_entities imported above
+
+ # Generate HTML wrapper
+ # Sub: generate_html_page
+ # - Purpose: Wrap content in a minimal HTML5 page with a title and CSS reset.
+ # - Params: $title (str), $content (str) HTML fragment.
+ # - Return: full HTML page string.
+ sub generate_html_page {
+ my ($title, $content) = @_;
+ return qq{<!DOCTYPE html>
+<html lang="en">
+<head>
+ <meta charset="UTF-8">
+ <meta name="viewport" content="width=device-width, initial-scale=1.0">
+ <title>$title</title>
+ <style>
+ /* Compact, full-width layout */
+ :root {
+ --pad: 8px;
+ }
+ html, body {
+ height: 100%;
+ }
+ body {
+ font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
+ line-height: 1.3;
+ margin: 0;
+ padding: var(--pad);
+ background: #fff;
+ color: #000;
+ }
+ /* Headings: smaller and tighter */
+ h1, h2, h3 { margin: 0.5em 0 0.25em; font-weight: 600; }
+ h1 { font-size: 1em; }
+ h2 { font-size: 0.95em; }
+ h3 { font-size: 0.9em; }
+ /* Paragraphs and lists: minimal vertical rhythm */
+ p { margin: 0.2em 0; }
+ ul { margin: 0.3em 0; padding-left: 1.2em; }
+ li { margin: 0.1em 0; }
+ /* Code blocks and tables */
+ pre {
+ overflow-x: auto;
+ white-space: pre;
+ margin: 0.3em 0;
+ }
+ table {
+ border-collapse: collapse;
+ table-layout: auto; /* size columns by content */
+ width: auto; /* do not stretch to full width */
+ max-width: 100%;
+ margin: 0.5em 0;
+ font-size: 0.95em;
+ display: inline-table; /* keep as compact as content allows */
+ }
+ th, td {
+ padding: 0.1em 0.3em;
+ text-align: left;
+ white-space: nowrap; /* avoid wide columns caused by wrapping */
+ }
+ /* Links */
+ a { color: #06c; text-decoration: underline; }
+ a:visited { color: #639; }
+ /* Rules */
+ hr { border: none; border-top: 1px solid #ccc; margin: 0.5em 0; }
+ </style>
+</head>
+<body>
+$content
+</body>
+</html>
+};
+ }
+
+ # Sub: should_generate_daily_report
+ # - Purpose: Check if a daily report should be generated based on file existence and age.
+ # - Params: $date (YYYYMMDD), $report_path (str), $html_report_path (str).
+ # - Return: 1 if report should be generated, 0 otherwise.
+ sub should_generate_daily_report {
+ my ($date, $report_path, $html_report_path) = @_;
+
+ my ($year, $month, $day) = $date =~ /(\d{4})(\d{2})(\d{2})/;
+
+ # Calculate age of the data based on date in filename
+ my $today = Time::Piece->new();
+ my $file_date = Time::Piece->strptime($date, '%Y%m%d');
+ my $age_days = ($today - $file_date) / (24 * 60 * 60);
+
+ if (-e $report_path && -e $html_report_path) {
+
+ # Files exist
+ if ($age_days <= 3) {
+
+ # Data is recent (within 3 days), regenerate it
+ say "Regenerating daily report for $year-$month-$day (data age: "
+ . sprintf("%.1f", $age_days)
+ . " days)";
+ return 1;
+ }
+ else {
+ # Data is old (older than 3 days), skip if files exist
+ say "Skipping daily report for $year-$month-$day (files exist, data age: "
+ . sprintf("%.1f", $age_days)
+ . " days)";
+ return 0;
+ }
+ }
+ else {
+ # File doesn't exist, generate it
+ say "Generating new daily report for $year-$month-$day (file doesn't exist, data age: "
+ . sprintf("%.1f", $age_days)
+ . " days)";
+ return 1;
+ }
+ }
+
+ sub generate_feed_stats_section {
+ my ($stats) = @_;
+ my $report_content = "### Feed Statistics\n\n";
+ my @feed_rows;
+ push @feed_rows, [ 'Total', $stats->{feed_ips}{'Total'} // 0 ];
+ push @feed_rows, [ 'Gemini Gemfeed', $stats->{feed_ips}{'Gemini Gemfeed'} // 0 ];
+ push @feed_rows, [ 'Gemini Atom', $stats->{feed_ips}{'Gemini Atom'} // 0 ];
+ push @feed_rows, [ 'Web Gemfeed', $stats->{feed_ips}{'Web Gemfeed'} // 0 ];
+ push @feed_rows, [ 'Web Atom', $stats->{feed_ips}{'Web Atom'} // 0 ];
+ $report_content .= "```\n";
+ $report_content .= format_table([ 'Feed Type', 'Count' ], \@feed_rows);
+ $report_content .= "\n```\n\n";
+ return $report_content;
+ }
+
+ sub generate_top_n_table {
+ my (%args) = @_;
+ my $title = $args{title};
+ my $data = $args{data};
+ my $headers = $args{headers};
+ my $limit = $args{limit} // 50;
+ my $is_url = $args{is_url} // 0;
+
+ my $report_content = "### $title\n\n";
+ my @rows;
+ my @sorted_keys =
+ sort { ($data->{$b} // 0) <=> ($data->{$a} // 0) }
+ keys %$data;
+ my $truncated = @sorted_keys > $limit;
+ @sorted_keys = @sorted_keys[ 0 .. $limit - 1 ] if $truncated;
+
+ for my $key (@sorted_keys) {
+ push @rows, [ $key, $data->{$key} // 0 ];
+ }
+
+ if ($is_url) {
+ truncate_urls_for_table(\@rows, $headers->[1]);
+ }
+
+ $report_content .= "```\n";
+ $report_content .= format_table($headers, \@rows);
+ $report_content .= "\n```\n";
+ if ($truncated) {
+ $report_content .= "\n... and more (truncated to $limit entries).\n";
+ }
+ $report_content .= "\n";
+ return $report_content;
+ }
+
+ sub generate_top_urls_section {
+ my ($stats) = @_;
+ return generate_top_n_table(
+ title => 'Top 50 URLs',
+ data => $stats->{page_ips}{urls},
+ headers => [ 'URL', 'Unique Visitors' ],
+ is_url => 1,
+ );
+ }
+
+ sub generate_top_hosts_section {
+ my ($stats) = @_;
+ return generate_top_n_table(
+ title => 'Page Statistics (by Host)',
+ data => $stats->{page_ips}{hosts},
+ headers => [ 'Host', 'Unique Visitors' ],
+ );
+ }
+
+ sub generate_summary_section {
+ my ($stats) = @_;
+ my $report_content = "### Summary\n\n";
+ my $total_requests =
+ ($stats->{count}{gemini} // 0) + ($stats->{count}{web} // 0);
+ $report_content .= "* Total requests: $total_requests\n";
+ $report_content .=
+ "* Filtered requests: " . ($stats->{count}{filtered} // 0) . "\n";
+ $report_content .=
+ "* Gemini requests: " . ($stats->{count}{gemini} // 0) . "\n";
+ $report_content .=
+ "* Web requests: " . ($stats->{count}{web} // 0) . "\n";
+ $report_content .=
+ "* IPv4 requests: " . ($stats->{count}{IPv4} // 0) . "\n";
+ $report_content .=
+ "* IPv6 requests: " . ($stats->{count}{IPv6} // 0) . "\n\n";
+ return $report_content;
+ }
+
+ # Sub: report
+ # - Purpose: Generate daily .gmi and .html reports per date, then summaries and index.
+ # - Params: $stats_dir, $output_dir, $html_output_dir, %merged (date => stats).
+ # - Return: undef.
+ sub report {
+ my ($stats_dir, $output_dir, $html_output_dir, %merged) = @_;
+ for my $date (sort { $b cmp $a } keys %merged) {
+ my $stats = $merged{$date};
+ next unless $stats->{count};
+
+ my ($year, $month, $day) = $date =~ /(\d{4})(\d{2})(\d{2})/;
+
+ my $report_path = "$output_dir/$date.gmi";
+ my $html_report_path = "$html_output_dir/$date.html";
+
+ next unless should_generate_daily_report($date, $report_path, $html_report_path);
+
+ reset_truncated_url_mappings();
+ my $report_content = "## Stats for $year-$month-$day\n\n";
+ $report_content .= generate_feed_stats_section($stats);
+ $report_content .= generate_top_urls_section($stats);
+ $report_content .= generate_top_hosts_section($stats);
+ $report_content .= generate_summary_section($stats);
+
+ # Add links to summary reports (only monthly)
+ $report_content .= "## Related Reports\n\n";
+ my $now = localtime;
+ my $current_date = $now->strftime('%Y%m%d');
+ $report_content .= "=> ./30day_summary_$current_date.gmi 30-Day Summary Report\n\n";
+
+ # Ensure output directory exists
+ mkdir $output_dir unless -d $output_dir;
+
+ # $report_path already defined above
+ say "Writing report to $report_path";
+ FileHelper::write($report_path, $report_content);
+
+ # Also write HTML version
+ mkdir $html_output_dir unless -d $html_output_dir;
+ my $html_path = "$html_output_dir/$date.html";
+ my $html_content = gemtext_to_html($report_content);
+ my $html_page = generate_html_page("Stats for $year-$month-$day", $html_content);
+ say "Writing HTML report to $html_path";
+ FileHelper::write($html_path, $html_page);
+ reset_truncated_url_mappings();
+ }
+
+ # Generate summary reports
+ generate_summary_report(30, $stats_dir, $output_dir, $html_output_dir, %merged);
+
+ # Generate index.gmi and index.html
+ generate_index($output_dir, $html_output_dir);
+ }
+
+ # Sub: generate_summary_report
+ # - Purpose: Generate N-day rolling summary in .gmi (+.html except 365-day).
+ # - Params: $days (int), $stats_dir, $output_dir, $html_output_dir, %merged.
+ # - Return: undef.
+ sub generate_summary_report {
+ my ($days, $stats_dir, $output_dir, $html_output_dir, %merged) = @_;
+
+ # Get the last N days of dates
+ my @dates = sort { $b cmp $a } keys %merged;
+ my $max_index = $days - 1;
+ @dates = @dates[ 0 .. $max_index ] if @dates > $days;
+
+ my $today = localtime;
+ my $report_date = $today->strftime('%Y%m%d');
+
+ # Build report content
+ reset_truncated_url_mappings();
+ my $report_content = build_report_header($today, $days);
+
+ # Order: feed counts -> Top URLs -> daily top 3 for last 30 days -> other tables
+ $report_content .= build_feed_statistics_section(\@dates, \%merged);
+ $report_content .= build_feed_statistics_daily_average_section(\@dates, \%merged);
+
+ # Aggregate and add top lists
+ my ($all_hosts, $all_urls) = aggregate_hosts_and_urls(\@dates, \%merged);
+ $report_content .= build_top_urls_section($all_urls, $days);
+ $report_content .= build_top3_urls_last_n_days_per_day($stats_dir, 30, \%merged);
+ $report_content .= build_top_hosts_section($all_hosts, $days);
+ $report_content .= build_daily_summary_section(\@dates, \%merged);
+
+ # Add links to other summary reports
+ $report_content .= build_summary_links($days, $report_date);
+
+ # Ensure output directory exists and write the summary report
+ mkdir $output_dir unless -d $output_dir;
+
+ my $report_path = "$output_dir/${days}day_summary_$report_date.gmi";
+ say "Writing $days-day summary report to $report_path";
+ FileHelper::write($report_path, $report_content);
+
+ # Also write HTML version, except for 365-day summaries (HTML suppressed)
+ if ($days != 365) {
+ mkdir $html_output_dir unless -d $html_output_dir;
+ my $html_path = "$html_output_dir/${days}day_summary_$report_date.html";
+ my $html_content = gemtext_to_html($report_content);
+ my $html_page = generate_html_page("$days-Day Summary Report", $html_content);
+ say "Writing HTML $days-day summary report to $html_path";
+ FileHelper::write($html_path, $html_page);
+ }
+ else {
+ say "Skipping HTML generation for 365-day summary (Gemtext only)";
+ }
+
+ reset_truncated_url_mappings();
+ }
+
+ sub build_feed_statistics_daily_average_section {
+ my ($dates, $merged) = @_;
+
+ my %totals;
+ my $days_with_stats = 0;
+
+ for my $date (@$dates) {
+ my $stats = $merged->{$date};
+ next unless $stats->{feed_ips};
+ $days_with_stats++;
+
+ for my $key (keys %{ $stats->{feed_ips} }) {
+ $totals{$key} += $stats->{feed_ips}{$key};
+ }
+ }
+
+ return "" unless $days_with_stats > 0;
+
+ my @avg_rows;
+ my $total_avg = 0;
+ my $has_total = 0;
+
+ # Separate 'Total' from other keys
+ my @other_keys;
+ for my $key (keys %totals) {
+ if ($key eq 'Total') {
+ $total_avg = sprintf("%.2f", $totals{$key} / $days_with_stats);
+ $has_total = 1;
+ }
+ else {
+ push @other_keys, $key;
+ }
+ }
+
+ # Sort other keys and create rows
+ for my $key (sort @other_keys) {
+ my $avg = sprintf("%.2f", $totals{$key} / $days_with_stats);
+ push @avg_rows, [ $key, $avg ];
+ }
+
+ # Add Total row at the end
+ push @avg_rows, [ 'Total', $total_avg ] if $has_total;
+
+ my $content = "### Feed Statistics Daily Average (Last 30 Days)\n\n```\n";
+ $content .= format_table([ 'Feed Type', 'Daily Average' ], \@avg_rows);
+ $content .= "\n```\n\n";
+
+ return $content;
+ }
+
+ # Sub: build_report_header
+ # - Purpose: Header section for summary reports.
+ # - Params: $today (Time::Piece), $days (int default 30).
+ # - Return: gemtext string.
+ sub build_report_header {
+ my ($today, $days) = @_;
+ $days //= 30; # Default to 30 days for backward compatibility
+
+ my $content = "# $days-Day Summary Report\n\n";
+ $content .= "Generated on " . $today->strftime('%Y-%m-%d') . "\n\n";
+ return $content;
+ }
+
+ # Sub: build_daily_summary_section
+ # - Purpose: Table of daily total counts over a period.
+ # - Params: $dates (arrayref YYYYMMDD), $merged (hashref date=>stats).
+ # - Return: gemtext string.
+ sub build_daily_summary_section {
+ my ($dates, $merged) = @_;
+
+ my $content = "## Daily Summary Evolution (Last 30 Days)\n\n";
+ $content .= "### Total Requests by Day\n\n```\n";
+
+ my @summary_rows;
+ for my $date (reverse @$dates) {
+ my $stats = $merged->{$date};
+ next unless $stats->{count};
+
+ push @summary_rows, build_daily_summary_row($date, $stats);
+ }
+
+ $content .= format_table([ 'Date', 'Filtered', 'Gemini', 'Web', 'IPv4', 'IPv6', 'Total' ],
+ \@summary_rows);
+ $content .= "\n```\n\n";
+
+ return $content;
+ }
+
+ # Sub: build_daily_summary_row
+ # - Purpose: Build one table row with counts for a date.
+ # - Params: $date (YYYYMMDD), $stats (hashref).
+ # - Return: arrayref of cell strings.
+ sub build_daily_summary_row {
+ my ($date, $stats) = @_;
+
+ my ($year, $month, $day) = $date =~ /(\d{4})(\d{2})(\d{2})/;
+ my $formatted_date = "$year-$month-$day";
+
+ my $total_requests = ($stats->{count}{gemini} // 0) + ($stats->{count}{web} // 0);
+ my $filtered = $stats->{count}{filtered} // 0;
+ my $gemini = $stats->{count}{gemini} // 0;
+ my $web = $stats->{count}{web} // 0;
+ my $ipv4 = $stats->{count}{IPv4} // 0;
+ my $ipv6 = $stats->{count}{IPv6} // 0;
+
+ return [ $formatted_date, $filtered, $gemini, $web, $ipv4, $ipv6, $total_requests ];
+ }
+
+ # Sub: build_feed_statistics_section
+ # - Purpose: Table of feed unique counts by day over a period.
+ # - Params: $dates (arrayref), $merged (hashref).
+ # - Return: gemtext string.
+ sub build_feed_statistics_section {
+ my ($dates, $merged) = @_;
+
+ my $content = "### Feed Statistics Evolution\n\n```\n";
+
+ my @feed_rows;
+ for my $date (reverse @$dates) {
+ my $stats = $merged->{$date};
+ next unless $stats->{feed_ips};
+
+ push @feed_rows, build_feed_statistics_row($date, $stats);
+ }
+
+ $content .=
+ format_table([ 'Date', 'Gem Feed', 'Gem Atom', 'Web Feed', 'Web Atom', 'Total' ],
+ \@feed_rows);
+ $content .= "\n```\n\n";
+
+ return $content;
+ }
+
+ # Sub: build_feed_statistics_row
+ # - Purpose: Build one row of feed unique counts for a date.
+ # - Params: $date (YYYYMMDD), $stats (hashref).
+ # - Return: arrayref of cell strings.
+ sub build_feed_statistics_row {
+ my ($date, $stats) = @_;
+
+ my ($year, $month, $day) = $date =~ /(\d{4})(\d{2})(\d{2})/;
+ my $formatted_date = "$year-$month-$day";
+
+ return [
+ $formatted_date,
+ $stats->{feed_ips}{'Gemini Gemfeed'} // 0,
+ $stats->{feed_ips}{'Gemini Atom'} // 0,
+ $stats->{feed_ips}{'Web Gemfeed'} // 0,
+ $stats->{feed_ips}{'Web Atom'} // 0,
+ $stats->{feed_ips}{'Total'} // 0
+ ];
+ }
+
+ # Sub: aggregate_hosts_and_urls
+ # - Purpose: Sum hosts and URLs across multiple days.
+ # - Params: $dates (arrayref), $merged (hashref).
+ # - Return: (\%all_hosts, \%all_urls).
+ sub aggregate_hosts_and_urls {
+ my ($dates, $merged) = @_;
+
+ my %all_hosts;
+ my %all_urls;
+
+ for my $date (@$dates) {
+ my $stats = $merged->{$date};
+ next unless $stats->{page_ips};
+
+ # Aggregate hosts
+ while (my ($host, $count) = each %{ $stats->{page_ips}{hosts} }) {
+ $all_hosts{$host} //= 0;
+ $all_hosts{$host} += $count;
+ }
+
+ # Aggregate URLs
+ while (my ($url, $count) = each %{ $stats->{page_ips}{urls} }) {
+ $all_urls{$url} //= 0;
+ $all_urls{$url} += $count;
+ }
+ }
+
+ return (\%all_hosts, \%all_urls);
+ }
+
+ sub build_top_hosts_section {
+ my ($all_hosts, $days) = @_;
+ $days //= 30;
+
+ return generate_top_n_table(
+ title => "Top 50 Hosts (${days}-Day Total)",
+ data => $all_hosts,
+ headers => [ 'Host', 'Visitors' ],
+ );
+ }
+
+ # Sub: build_top_urls_section
+ # - Purpose: Build Top-50 URLs table for the aggregated period (with truncation).
+ # - Params: $all_urls (hashref), $days (int default 30).
+ # - Return: gemtext string.
+ sub build_top_urls_section {
+ my ($all_urls, $days) = @_;
+ $days //= 30;
+
+ return generate_top_n_table(
+ title => "Top 50 URLs (${days}-Day Total)",
+ data => $all_urls,
+ headers => [ 'URL', 'Visitors' ],
+ is_url => 1,
+ );
+ }
+
+ # Sub: build_summary_links
+ # - Purpose: Links to other summary reports (30-day when not already on it).
+ # - Params: $current_days (int), $report_date (YYYYMMDD).
+ # - Return: gemtext string.
+ sub build_summary_links {
+ my ($current_days, $report_date) = @_;
+
+ my $content = '';
+
+ # Only add link to 30-day summary when not on the 30-day report itself
+ if ($current_days != 30) {
+ $content .= "## Other Summary Reports\n\n";
+ $content .= "=> ./30day_summary_$report_date.gmi 30-Day Summary Report\n\n";
+ }
+
+ return $content;
+ }
+
+ # Sub: build_top3_urls_last_n_days_per_day
+ # - Purpose: For each of last N days, render the top URLs table.
+ # - Params: $stats_dir (str), $days (int default 30), $merged (hashref).
+ # - Return: gemtext string.
+ sub build_top3_urls_last_n_days_per_day {
+ my ($stats_dir, $days, $merged) = @_;
+ $days //= 30;
+ my $content = "## Top 5 URLs Per Day (Last ${days} Days)\n\n";
+
+ my @all = DateHelper::last_month_dates();
+ my @dates = @all;
+ @dates = @all[ 0 .. $days - 1 ] if @all > $days;
+ return $content . "(no data)\n\n" unless @dates;
+
+ for my $date (@dates) {
+
+ # Prefer in-memory merged stats if available; otherwise merge from disk
+ my $stats = $merged->{$date};
+ if (!$stats || !($stats->{page_ips} && $stats->{page_ips}{urls})) {
+ $stats = Foostats::Merger::merge_for_date($stats_dir, $date);
+ }
+ next unless $stats && $stats->{page_ips} && $stats->{page_ips}{urls};
+
+ my ($y, $m, $d) = $date =~ /(\d{4})(\d{2})(\d{2})/;
+ $content .= "### $y-$m-$d\n\n";
+
+ my $urls = $stats->{page_ips}{urls};
+ my @sorted = sort { ($urls->{$b} // 0) <=> ($urls->{$a} // 0) } keys %$urls;
+ next unless @sorted;
+ my $limit = @sorted < 5 ? @sorted : 5;
+ @sorted = @sorted[ 0 .. $limit - 1 ];
+
+ my @rows;
+ for my $u (@sorted) {
+ $u =~ s/\.gmi$/\.html/;
+ push @rows, [ $u, $urls->{$u} // 0 ];
+ }
+ truncate_urls_for_table(\@rows, 'Visitors');
+ $content .= "```\n" . format_table([ 'URL', 'Visitors' ], \@rows) . "\n```\n\n";
+ }
+
+ return $content;
+ }
+
+ # Sub: generate_index
+ # - Purpose: Create index.gmi/.html using the latest 30-day summary as content.
+ # - Params: $output_dir (str), $html_output_dir (str).
+ # - Return: undef.
+ sub generate_index {
+ my ($output_dir, $html_output_dir) = @_;
+
+ # Find latest 30-day summary
+ opendir(my $dh, $output_dir) or die "Cannot open directory $output_dir: $!";
+ my @gmi_files = grep { /\.gmi$/ && $_ ne 'index.gmi' } readdir($dh);
+ closedir($dh);
+
+ my @summaries_30day = sort { $b cmp $a } grep { /^30day_summary_/ } @gmi_files;
+ my $latest_30 = $summaries_30day[0];
+
+ my $index_path = "$output_dir/index.gmi";
+ mkdir $html_output_dir unless -d $html_output_dir;
+ my $html_path = "$html_output_dir/index.html";
+
+ if ($latest_30) {
+
+ # Read 30-day summary content and use it as index
+ my $summary_path = "$output_dir/$latest_30";
+ open my $sfh, '<', $summary_path or die "$summary_path: $!";
+ local $/ = undef;
+ my $content = <$sfh>;
+ close $sfh;
+
+ say "Writing index to $index_path (using $latest_30)";
+ FileHelper::write($index_path, $content);
+
+ # HTML: use existing 30-day summary HTML if present, else convert
+ (my $latest_html = $latest_30) =~ s/\.gmi$/.html/;
+ my $summary_html_path = "$html_output_dir/$latest_html";
+ if (-e $summary_html_path) {
+ open my $hh, '<', $summary_html_path or die "$summary_html_path: $!";
+ local $/ = undef;
+ my $html_page = <$hh>;
+ close $hh;
+ say "Writing HTML index to $html_path (copy of $latest_html)";
+ FileHelper::write($html_path, $html_page);
+ }
+ else {
+ my $html_content = gemtext_to_html($content);
+ my $html_page = generate_html_page("30-Day Summary Report", $html_content);
+ say "Writing HTML index to $html_path (from gemtext)";
+ FileHelper::write($html_path, $html_page);
+ }
+ return;
+ }
+
+ # Fallback: minimal index if no 30-day summary found
+ my $fallback = "# Foostats Reports Index\n\n30-day summary not found.\n";
+ say "Writing fallback index to $index_path";
+ FileHelper::write($index_path, $fallback);
+
+ my $html_content = gemtext_to_html($fallback);
+ my $html_page = generate_html_page("Foostats Reports Index", $html_content);
+ say "Writing fallback HTML index to $html_path";
+ FileHelper::write($html_path, $html_page);
+ }
+}
+
+package main;
+
+# Package: main — CLI entrypoint and orchestration
+# - Purpose: Parse options and invoke parse/replicate/report flows.
+use Getopt::Long;
+use Sys::Hostname;
+
+# Sub: usage
+# - Purpose: Print usage and exit 0.
+# - Params: none.
+# - Return: never (exits).
+sub usage {
+ print <<~"USAGE";
+ Usage: $0 [options]
+
+ Options:
+ --parse-logs Parse web and gemini logs.
+ --replicate Replicate stats from partner node.
+ --report Generate a report from the stats.
+ --all Perform all of the above actions (parse, replicate, report).
+ --stats-dir <path> Directory to store stats files.
+ Default: /var/www/htdocs/buetow.org/self/foostats
+ --output-dir <path> Directory to write .gmi report files.
+ Default: /var/gemini/stats.foo.zone
+ --html-output-dir <path> Directory to write .html report files.
+ Default: /var/www/htdocs/gemtexter/stats.foo.zone
+ --odds-file <path> File with odd URI patterns to filter.
+ Default: <stats-dir>/fooodds.txt
+ --filter-log <path> Log file for filtered requests.
+ Default: /var/log/fooodds
+ --partner-node <hostname> Hostname of the partner node for replication.
+ Default: fishfinger.buetow.org or blowfish.buetow.org
+ --version Show version information.
+ --help Show this help message.
+ USAGE
+ exit 0;
+}
+
+# Sub: parse_logs
+# - Purpose: Parse logs and persist aggregated stats files under $stats_dir.
+# - Params: $stats_dir (str), $odds_file (str), $odds_log (str).
+# - Return: undef.
+sub parse_logs ($stats_dir, $odds_file, $odds_log) {
+ my $out = Foostats::FileOutputter->new(stats_dir => $stats_dir);
+
+ $out->{stats} = Foostats::Logreader::parse_logs(
+ $out->last_processed_date('web'),
+ $out->last_processed_date('gemini'),
+ $odds_file, $odds_log
+ );
+
+ $out->write;
+}
+
+# Sub: foostats_main
+# - Purpose: Option parsing and execution of requested actions.
+# - Params: none (reads @ARGV).
+# - Return: exit code via program termination.
+sub foostats_main {
+ my ($parse_logs, $replicate, $report, $all, $help, $version);
+
+ # With default values
+ my $stats_dir = '/var/www/htdocs/buetow.org/self/foostats';
+ my $odds_file = $stats_dir . '/fooodds.txt';
+ my $odds_log = '/var/log/fooodds';
+ my $output_dir; # Will default to $stats_dir/gemtext if not specified
+ my $html_output_dir; # Will default to /var/www/htdocs/gemtexter/stats.foo.zone if not specified
+ my $partner_node =
+ hostname eq 'fishfinger.buetow.org'
+ ? 'blowfish.buetow.org'
+ : 'fishfinger.buetow.org';
+
+ GetOptions
+ 'parse-logs!' => \$parse_logs,
+ 'filter-log=s' => \$odds_log,
+ 'odds-file=s' => \$odds_file,
+ 'replicate!' => \$replicate,
+ 'report!' => \$report,
+ 'all!' => \$all,
+ 'stats-dir=s' => \$stats_dir,
+ 'output-dir=s' => \$output_dir,
+ 'html-output-dir=s' => \$html_output_dir,
+ 'partner-node=s' => \$partner_node,
+ 'version' => \$version,
+ 'help|?' => \$help;
+
+ if ($version) {
+ print "foostats " . VERSION . "\n";
+ exit 0;
+ }
+
+ usage() if $help;
+
+ parse_logs($stats_dir, $odds_file, $odds_log) if $parse_logs or $all;
+ Foostats::Replicator::replicate($stats_dir, $partner_node) if $replicate or $all;
+
+ # Set default output directories if not specified
+ $output_dir //= '/var/gemini/stats.foo.zone';
+ $html_output_dir //= '/var/www/htdocs/gemtexter/stats.foo.zone';
+
+ Foostats::Reporter::report($stats_dir, $output_dir, $html_output_dir,
+ Foostats::Merger::merge($stats_dir))
+ if $report
+ or $all;
+}
+
+# Only run main flow when executed as a script, not when required (e.g., tests)
+foostats_main() unless caller;
diff --git a/gemfeed/examples/conf/frontends/scripts/gemtexter.sh.tpl b/gemfeed/examples/conf/frontends/scripts/gemtexter.sh.tpl
new file mode 100644
index 00000000..2bba20c7
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/gemtexter.sh.tpl
@@ -0,0 +1,65 @@
+#!/bin/sh
+
+PATH=$PATH:/usr/local/bin
+
+function ensure_site {
+ dir=$1
+ repo=$2
+ branch=$3
+
+ basename=$(basename $dir)
+ parent=$(dirname $dir)
+
+ if [ ! -d $parent ]; then
+ mkdir -p $parent
+ fi
+
+ cd $parent
+ if [ ! -e www.$basename ]; then
+ ln -s $basename www.$basename
+ fi
+
+ if [ ! -e standby.$basename ]; then
+ ln -s $basename standby.$basename
+ fi
+
+ if [ ! -d $basename ]; then
+ git clone $repo -b $branch --single-branch $basename
+ else
+ cd $basename
+ git pull
+ fi
+}
+
+function ensure_links {
+ dir=$1
+ target=$2
+
+ basename=$(basename $dir)
+ parent=$(dirname $dir)
+
+ cd $parent
+
+ if [ ! -e $target ]; then
+ ln -s $basename $target
+ fi
+
+ if [ ! -e www.$target ]; then
+ ln -s $basename www.$target
+ fi
+
+ if [ ! -e standby.$target ]; then
+ ln -s $basename standby.$target
+ fi
+}
+
+for site in foo.zone; do
+ ensure_site \
+ /var/gemini/$site \
+ https://codeberg.org/snonux/$site \
+ content-gemtext
+ ensure_site \
+ /var/www/htdocs/gemtexter/$site \
+ https://codeberg.org/snonux/$site \
+ content-html
+done
diff --git a/gemfeed/examples/conf/frontends/scripts/rsync.sh.tpl b/gemfeed/examples/conf/frontends/scripts/rsync.sh.tpl
new file mode 100644
index 00000000..c8d7b004
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/rsync.sh.tpl
@@ -0,0 +1,8 @@
+#!/bin/sh
+
+PATH=$PATH:/usr/local/bin
+
+# Sync Joern's content over to Fishfinger!
+if [ `hostname -s` = fishfinger ]; then
+ rsync -av --delete rsync://blowfish.wg0.wan.buetow.org/joernshtdocs/ /var/www/htdocs/joern/
+fi
diff --git a/gemfeed/examples/conf/frontends/scripts/taskwarrior.sh.tpl b/gemfeed/examples/conf/frontends/scripts/taskwarrior.sh.tpl
new file mode 100644
index 00000000..aaafbe98
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/scripts/taskwarrior.sh.tpl
@@ -0,0 +1,5 @@
+PATH=$PATH:/usr/local/bin
+
+echo "Any tasks due before the next 14 days?"
+# Using git user, as ssh keys are already there to sync the task db!
+su - git -c '/usr/local/bin/task rc:/etc/taskrc due.before:14day minimal 2>/dev/null'
diff --git a/gemfeed/examples/conf/frontends/var/nsd/etc/key.conf.tpl b/gemfeed/examples/conf/frontends/var/nsd/etc/key.conf.tpl
new file mode 100644
index 00000000..d8d6c76d
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/etc/key.conf.tpl
@@ -0,0 +1,4 @@
+key:
+ name: blowfish.buetow.org
+ algorithm: hmac-sha256
+ secret: "<%= $nsd_key %>"
diff --git a/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.master.tpl b/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.master.tpl
new file mode 100644
index 00000000..7f5ba56f
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.master.tpl
@@ -0,0 +1,17 @@
+include: "/var/nsd/etc/key.conf"
+
+server:
+ hide-version: yes
+ verbosity: 1
+ database: "" # disable database
+ debug-mode: no
+
+remote-control:
+ control-enable: yes
+ control-interface: /var/run/nsd.sock
+
+<% for my $zone (@$dns_zones) { %>
+zone:
+ name: "<%= $zone %>"
+ zonefile: "master/<%= $zone %>.zone"
+<% } %>
diff --git a/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.slave.tpl b/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.slave.tpl
new file mode 100644
index 00000000..d9d93fe6
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/etc/nsd.conf.slave.tpl
@@ -0,0 +1,17 @@
+include: "/var/nsd/etc/key.conf"
+
+server:
+ hide-version: yes
+ verbosity: 1
+ database: "" # disable database
+
+remote-control:
+ control-enable: yes
+ control-interface: /var/run/nsd.sock
+
+<% for my $zone (@$dns_zones) { %>
+zone:
+ name: "<%= $zone %>"
+ allow-notify: 23.88.35.144 blowfish.buetow.org
+ request-xfr: 23.88.35.144 blowfish.buetow.org
+<% } %>
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/buetow.org.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/buetow.org.zone.tpl
new file mode 100644
index 00000000..0a0fb36f
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/buetow.org.zone.tpl
@@ -0,0 +1,124 @@
+$ORIGIN buetow.org.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+master 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+master 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+
+ IN MX 10 fishfinger.buetow.org.
+ IN MX 20 blowfish.buetow.org.
+
+cool IN NS ns-75.awsdns-09.com.
+cool IN NS ns-707.awsdns-24.net.
+cool IN NS ns-1081.awsdns-07.org.
+cool IN NS ns-1818.awsdns-35.co.uk.
+
+paul 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+paul 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.paul 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.paul 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.paul 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.paul 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+blog 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+blog 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.blog 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.blog 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.blog 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.blog 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+tmp 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+tmp 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.tmp 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.tmp 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.tmp 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.tmp 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+<% for my $host (@$f3s_hosts) { -%>
+<%= $host %>. 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+<%= $host %>. 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.<%= $host %>. 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.<%= $host %>. 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.<%= $host %>. 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.<%= $host %>. 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+<% } -%>
+
+; So joern can directly preview the content before rsync happens from blowfish to fishfinger
+joern IN CNAME blowfish
+www.joern IN CNAME blowfish
+standby.joern IN CNAME fishfinger
+
+dory 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+dory 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.dory 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.dory 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.dory 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.dory 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+ecat 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ecat 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.ecat 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.ecat 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.ecat 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.ecat 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+fotos 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+fotos 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.fotos 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.fotos 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.fotos 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.fotos 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+git 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+git 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.git 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.git 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.git 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.git 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+blowfish 14400 IN A 23.88.35.144
+blowfish 14400 IN AAAA 2a01:4f8:c17:20f1::42
+blowfish IN MX 10 fishfinger.buetow.org.
+blowfish IN MX 20 blowfish.buetow.org.
+fishfinger 14400 IN A 46.23.94.99
+fishfinger 14400 IN AAAA 2a03:6000:6f67:624::99
+fishfinger IN MX 10 fishfinger.buetow.org.
+fishfinger IN MX 20 blowfish.buetow.org.
+
+git1 1800 IN CNAME blowfish.buetow.org.
+git2 1800 IN CNAME fishfinger.buetow.org.
+
+zapad.sofia 14400 IN CNAME 79-100-3-54.ip.btc-net.bg.
+www2 14400 IN CNAME snonux.codeberg.page.
+znc 1800 IN CNAME fishfinger.buetow.org.
+www.znc 1800 IN CNAME fishfinger.buetow.org.
+standby.znc 1800 IN CNAME fishfinger.buetow.org.
+bnc 1800 IN CNAME fishfinger.buetow.org.
+www.bnc 1800 IN CNAME fishfinger.buetow.org.
+
+protonmail._domainkey.paul IN CNAME protonmail.domainkey.d4xua2siwqfhvecokhuacmyn5fyaxmjk6q3hu2omv2z43zzkl73yq.domains.proton.ch.
+protonmail2._domainkey.paul IN CNAME protonmail2.domainkey.d4xua2siwqfhvecokhuacmyn5fyaxmjk6q3hu2omv2z43zzkl73yq.domains.proton.ch.
+protonmail3._domainkey.paul IN CNAME protonmail3.domainkey.d4xua2siwqfhvecokhuacmyn5fyaxmjk6q3hu2omv2z43zzkl73yq.domains.proton.ch.
+paul IN TXT protonmail-verification=a42447901e320064d13e536db4d73ce600d715b7
+paul IN TXT v=spf1 include:_spf.protonmail.ch mx ~all
+paul IN TXT v=DMARC1; p=none
+paul IN MX 10 mail.protonmail.ch.
+paul IN MX 20 mailsec.protonmail.ch.
+paul IN MX 42 blowfish.buetow.org.
+paul IN MX 42 fishfinger.buetow.org.
+
+* IN MX 10 fishfinger.buetow.org.
+* IN MX 20 blowfish.buetow.org.
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/dtail.dev.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/dtail.dev.zone.tpl
new file mode 100644
index 00000000..d5196e04
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/dtail.dev.zone.tpl
@@ -0,0 +1,21 @@
+$ORIGIN dtail.dev.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ IN MX 10 fishfinger.buetow.org.
+ IN MX 20 blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+github 86400 IN CNAME mimecast.github.io.
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/foo.zone.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/foo.zone.zone.tpl
new file mode 100644
index 00000000..d0755c91
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/foo.zone.zone.tpl
@@ -0,0 +1,34 @@
+$ORIGIN foo.zone.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ IN MX 10 fishfinger.buetow.org.
+ IN MX 20 blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+f3s 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+f3s 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.f3s 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.f3s 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.f3s 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.f3s 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+
+stats 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+stats 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www.stats 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.stats 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.stats 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+standby.stats 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/irregular.ninja.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/irregular.ninja.zone.tpl
new file mode 100644
index 00000000..d4f3d622
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/irregular.ninja.zone.tpl
@@ -0,0 +1,23 @@
+$ORIGIN irregular.ninja.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
+www.alt 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www.alt 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+alt 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+alt 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby.alt 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby.alt 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/paul.cyou.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/paul.cyou.zone.tpl
new file mode 100644
index 00000000..fdffef4f
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/paul.cyou.zone.tpl
@@ -0,0 +1,20 @@
+$ORIGIN paul.cyou.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ IN MX 10 fishfinger.buetow.org.
+ IN MX 20 blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
diff --git a/gemfeed/examples/conf/frontends/var/nsd/zones/master/snonux.foo.zone.tpl b/gemfeed/examples/conf/frontends/var/nsd/zones/master/snonux.foo.zone.tpl
new file mode 100644
index 00000000..a9d002ae
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/nsd/zones/master/snonux.foo.zone.tpl
@@ -0,0 +1,20 @@
+$ORIGIN snonux.foo.
+$TTL 4h
+@ IN SOA fishfinger.buetow.org. hostmaster.buetow.org. (
+ <%= time() %> ; serial
+ 1h ; refresh
+ 30m ; retry
+ 7d ; expire
+ 1h ) ; negative
+ IN NS fishfinger.buetow.org.
+ IN NS blowfish.buetow.org.
+
+ IN MX 10 fishfinger.buetow.org.
+ IN MX 20 blowfish.buetow.org.
+
+ 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+ 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+www 300 IN A <%= $ips->{current_master}{ipv4} %> ; Enable failover
+www 300 IN AAAA <%= $ips->{current_master}{ipv6} %> ; Enable failover
+standby 300 IN A <%= $ips->{current_standby}{ipv4} %> ; Enable failover
+standby 300 IN AAAA <%= $ips->{current_standby}{ipv6} %> ; Enable failover
diff --git a/gemfeed/examples/conf/frontends/var/www/htdocs/buetow.org/self/index.txt.tpl b/gemfeed/examples/conf/frontends/var/www/htdocs/buetow.org/self/index.txt.tpl
new file mode 100644
index 00000000..6b8979da
--- /dev/null
+++ b/gemfeed/examples/conf/frontends/var/www/htdocs/buetow.org/self/index.txt.tpl
@@ -0,0 +1 @@
+Welcome to <%= $hostname.'.'.$domain %>!