summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--.gitignore5
-rw-r--r--Magefile.go65
-rw-r--r--cmd/goprecords/main.go312
-rw-r--r--go.mod22
-rw-r--r--go.sum41
-rw-r--r--internal/goprecords/aggregate.go147
-rw-r--r--internal/goprecords/db.go175
-rw-r--r--internal/goprecords/order.go93
-rw-r--r--internal/goprecords/order_test.go72
-rw-r--r--internal/goprecords/parse_test.go73
-rw-r--r--internal/goprecords/report.go265
-rw-r--r--internal/goprecords/types.go287
-rw-r--r--internal/version/version.go4
-rwxr-xr-xscripts/compare-with-raku.sh72
14 files changed, 1633 insertions, 0 deletions
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..83dd821
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,5 @@
+# SQLite databases
+*.db
+
+# Built binary
+/goprecords
diff --git a/Magefile.go b/Magefile.go
new file mode 100644
index 0000000..07bfe1c
--- /dev/null
+++ b/Magefile.go
@@ -0,0 +1,65 @@
+//go:build mage
+
+package main
+
+// Magefile for goprecords. Targets follow the same style as other Go projects (e.g. hexai).
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+
+ "github.com/magefile/mage/mg"
+ "github.com/magefile/mage/sh"
+)
+
+const binaryName = "goprecords"
+
+// Default builds the binary.
+func Default() { mg.Deps(Build) }
+
+// Build builds the goprecords binary.
+func Build() error {
+ return sh.RunV("go", "build", "-o", binaryName, "./cmd/goprecords")
+}
+
+// Test runs all tests.
+func Test() error {
+ return sh.RunV("go", "test", "./...")
+}
+
+// Install builds and installs the binary to GOPATH/bin.
+func Install() error {
+ mg.Deps(Build)
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return fmt.Errorf("GOPATH unset and home: %w", err)
+ }
+ gopath = filepath.Join(home, "go")
+ }
+ binDir := filepath.Join(gopath, "bin")
+ if err := os.MkdirAll(binDir, 0755); err != nil {
+ return fmt.Errorf("mkdir %s: %w", binDir, err)
+ }
+ dest := filepath.Join(binDir, binaryName)
+ return sh.RunV("cp", "-v", binaryName, dest)
+}
+
+// Uninstall removes the binary from GOPATH/bin.
+func Uninstall() error {
+ gopath := os.Getenv("GOPATH")
+ if gopath == "" {
+ home, err := os.UserHomeDir()
+ if err != nil {
+ return fmt.Errorf("GOPATH unset and home: %w", err)
+ }
+ gopath = filepath.Join(home, "go")
+ }
+ dest := filepath.Join(gopath, "bin", binaryName)
+ if err := os.Remove(dest); err != nil && !os.IsNotExist(err) {
+ return err
+ }
+ return nil
+}
diff --git a/cmd/goprecords/main.go b/cmd/goprecords/main.go
new file mode 100644
index 0000000..7807d1f
--- /dev/null
+++ b/cmd/goprecords/main.go
@@ -0,0 +1,312 @@
+// Program goprecords generates uptime reports from uptimed record files or a SQLite database.
+package main
+
+import (
+ "context"
+ "flag"
+ "fmt"
+ "os"
+
+ "github.com/goprecords/internal/goprecords"
+ "github.com/goprecords/internal/version"
+)
+
+const defaultDB = "goprecords.db"
+
+func main() {
+ for _, arg := range os.Args[1:] {
+ if arg == "-version" || arg == "--version" {
+ fmt.Println(version.Version)
+ os.Exit(0)
+ }
+ }
+
+ if len(os.Args) >= 2 {
+ switch os.Args[1] {
+ case "import":
+ runImport(os.Args[2:])
+ return
+ case "query":
+ runQuery(os.Args[2:])
+ return
+ case "test":
+ runTests()
+ return
+ }
+ }
+
+ runReportFromFiles(os.Args[1:])
+}
+
+func runImport(args []string) {
+ fs := flag.NewFlagSet("import", flag.ExitOnError)
+ statsDir := fs.String("stats-dir", "", "Directory containing .records files (required)")
+ dbPath := fs.String("db", defaultDB, "SQLite database path")
+ fs.Parse(args)
+
+ if *statsDir == "" {
+ fmt.Fprintln(os.Stderr, "import: missing required flag: -stats-dir")
+ fs.Usage()
+ os.Exit(1)
+ }
+ db, err := goprecords.OpenDB(*dbPath)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "open db:", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+ ctx := context.Background()
+ if err := goprecords.CreateSchema(ctx, db); err != nil {
+ fmt.Fprintln(os.Stderr, "schema:", err)
+ os.Exit(1)
+ }
+ if err := goprecords.ImportFromDir(ctx, db, *statsDir); err != nil {
+ fmt.Fprintln(os.Stderr, "import:", err)
+ os.Exit(1)
+ }
+ fmt.Fprintf(os.Stderr, "imported %s into %s\n", *statsDir, *dbPath)
+}
+
+func runQuery(args []string) {
+ fs := flag.NewFlagSet("query", flag.ExitOnError)
+ dbPath := fs.String("db", defaultDB, "SQLite database path")
+ category := fs.String("category", "Host", "Category: Host, Kernel, KernelMajor, KernelName")
+ metric := fs.String("metric", "Uptime", "Metric: Boots, Uptime, Score, Downtime, Lifespan")
+ limit := fs.Uint("limit", 20, "Limit output to num of entries")
+ outputFormat := fs.String("output-format", "Plaintext", "Output format: Plaintext, Markdown, Gemtext")
+ all := fs.Bool("all", false, "Generate all possible stats but Kernel")
+ includeKernel := fs.Bool("include-kernel", false, "Also include Kernel when using -all")
+ statsOrder := fs.String("stats-order", "", "Comma-separated Category:Metric order for -all")
+ fs.Parse(args)
+
+ db, err := goprecords.OpenDB(*dbPath)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "open db:", err)
+ os.Exit(1)
+ }
+ defer db.Close()
+
+ ctx := context.Background()
+ aggregates, err := goprecords.LoadAggregates(ctx, db)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, "load:", err)
+ os.Exit(1)
+ }
+
+ cat, err := goprecords.ParseCategory(*category)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ met, err := goprecords.ParseMetric(*metric)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ outFmt, err := goprecords.ParseOutputFormat(*outputFormat)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if !*all {
+ if cat != goprecords.CategoryHost && (met == goprecords.MetricDowntime || met == goprecords.MetricLifespan) {
+ fmt.Fprintf(os.Stderr, "Category %s only supports: Boots, Uptime, Score\n", *category)
+ os.Exit(1)
+ }
+ if cat == goprecords.CategoryHost {
+ os.Stdout.WriteString(goprecords.NewHostReporter(aggregates, *limit, met, outFmt, 1).Report())
+ } else {
+ os.Stdout.WriteString(goprecords.NewReporter(aggregates, cat, *limit, met, outFmt, 1).Report())
+ }
+ return
+ }
+
+ order, err := goprecords.StatsOrderList(*statsOrder)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ headerIndent := uint(2)
+ for _, pair := range order {
+ c, m := pair.Category, pair.Metric
+ if !*includeKernel && c == goprecords.CategoryKernel {
+ continue
+ }
+ if c != goprecords.CategoryHost && (m == goprecords.MetricDowntime || m == goprecords.MetricLifespan) {
+ continue
+ }
+ if c == goprecords.CategoryHost {
+ os.Stdout.WriteString(goprecords.NewHostReporter(aggregates, *limit, m, outFmt, headerIndent).Report())
+ } else {
+ os.Stdout.WriteString(goprecords.NewReporter(aggregates, c, *limit, m, outFmt, headerIndent).Report())
+ }
+ os.Stdout.WriteString("\n")
+ }
+}
+
+func runReportFromFiles(args []string) {
+ fs := flag.NewFlagSet("goprecords", flag.ExitOnError)
+ statsDir := fs.String("stats-dir", "", "The uptimed raw record input dir (required)")
+ category := fs.String("category", "Host", "Category: Host, Kernel, KernelMajor, KernelName")
+ metric := fs.String("metric", "Uptime", "Metric: Boots, Uptime, Score, Downtime, Lifespan")
+ limit := fs.Uint("limit", 20, "Limit output to num of entries")
+ outputFormat := fs.String("output-format", "Plaintext", "Output format: Plaintext, Markdown, Gemtext")
+ all := fs.Bool("all", false, "Generate all possible stats but Kernel")
+ includeKernel := fs.Bool("include-kernel", false, "Also include Kernel when using -all")
+ statsOrder := fs.String("stats-order", "", "Comma-separated Category:Metric order for -all")
+ fs.Parse(args)
+
+ if *statsDir == "" {
+ fmt.Fprintln(os.Stderr, "missing required flag: -stats-dir")
+ fs.Usage()
+ os.Exit(1)
+ }
+
+ cat, err := goprecords.ParseCategory(*category)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ met, err := goprecords.ParseMetric(*metric)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ outFmt, err := goprecords.ParseOutputFormat(*outputFormat)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ ctx := context.Background()
+ aggr := goprecords.NewAggregator(*statsDir)
+ aggregates, err := aggr.Aggregate(ctx)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+
+ if !*all {
+ if cat != goprecords.CategoryHost && (met == goprecords.MetricDowntime || met == goprecords.MetricLifespan) {
+ fmt.Fprintf(os.Stderr, "Category %s only supports: Boots, Uptime, Score\n", *category)
+ os.Exit(1)
+ }
+ if cat == goprecords.CategoryHost {
+ os.Stdout.WriteString(goprecords.NewHostReporter(aggregates, *limit, met, outFmt, 1).Report())
+ } else {
+ os.Stdout.WriteString(goprecords.NewReporter(aggregates, cat, *limit, met, outFmt, 1).Report())
+ }
+ return
+ }
+
+ order, err := goprecords.StatsOrderList(*statsOrder)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ headerIndent := uint(2)
+ for _, pair := range order {
+ c, m := pair.Category, pair.Metric
+ if !*includeKernel && c == goprecords.CategoryKernel {
+ continue
+ }
+ if c != goprecords.CategoryHost && (m == goprecords.MetricDowntime || m == goprecords.MetricLifespan) {
+ continue
+ }
+ if c == goprecords.CategoryHost {
+ os.Stdout.WriteString(goprecords.NewHostReporter(aggregates, *limit, m, outFmt, headerIndent).Report())
+ } else {
+ os.Stdout.WriteString(goprecords.NewReporter(aggregates, c, *limit, m, outFmt, headerIndent).Report())
+ }
+ os.Stdout.WriteString("\n")
+ }
+}
+
+func runTests() {
+ ctx := context.Background()
+ aggr := goprecords.NewAggregator("./fixtures")
+ aggregates, err := aggr.Aggregate(ctx)
+ if err != nil {
+ fmt.Fprintln(os.Stderr, err)
+ os.Exit(1)
+ }
+ limit := uint(3)
+ categories := []goprecords.Category{goprecords.CategoryHost, goprecords.CategoryKernel, goprecords.CategoryKernelMajor, goprecords.CategoryKernelName}
+ metrics := []goprecords.Metric{goprecords.MetricBoots, goprecords.MetricUptime, goprecords.MetricScore, goprecords.MetricDowntime, goprecords.MetricLifespan}
+ formats := []goprecords.OutputFormat{goprecords.FormatPlaintext, goprecords.FormatMarkdown, goprecords.FormatGemtext}
+ failed := 0
+ for _, cat := range categories {
+ for _, met := range metrics {
+ if cat != goprecords.CategoryHost && (met == goprecords.MetricDowntime || met == goprecords.MetricLifespan) {
+ continue
+ }
+ for _, outFmt := range formats {
+ var report string
+ if cat == goprecords.CategoryHost {
+ report = goprecords.NewHostReporter(aggregates, limit, met, outFmt, 1).Report()
+ } else {
+ report = goprecords.NewReporter(aggregates, cat, limit, met, outFmt, 1).Report()
+ }
+ expectedPath := fmt.Sprintf("./fixtures/%s.%s.%s.expected", cat, met, outFmt)
+ expected, err := os.ReadFile(expectedPath)
+ if err != nil {
+ fmt.Printf("FAIL: read %s: %v\n", expectedPath, err)
+ failed++
+ continue
+ }
+ if report != string(expected) {
+ fmt.Printf("FAIL: %s\n--- got:\n%s--- expected:\n%s\n", expectedPath, report, string(expected))
+ failed++
+ }
+ }
+ }
+ }
+ if _, err := goprecords.ParseStatsOrder("Host:Uptime,Host:Boots"); err != nil {
+ fmt.Printf("FAIL: parse Host:Uptime,Host:Boots: %v\n", err)
+ failed++
+ }
+ merged, _ := goprecords.StatsOrderList("Host:Uptime")
+ if len(merged) == 0 || merged[0].Category != goprecords.CategoryHost || merged[0].Metric != goprecords.MetricUptime {
+ fmt.Printf("FAIL: stats-order custom first entry\n")
+ failed++
+ }
+ for _, bad := range []string{"Host", "Bad:Uptime", "Kernel:Downtime", "Host:Nope"} {
+ if _, err := goprecords.ParseStatsOrder(bad); err == nil {
+ fmt.Printf("FAIL: parse %q should error\n", bad)
+ failed++
+ }
+ }
+ tmpDB := "./fixtures/test_import.db"
+ os.Remove(tmpDB)
+ db, err := goprecords.OpenDB(tmpDB)
+ if err != nil {
+ fmt.Printf("FAIL: open tmp db: %v\n", err)
+ failed++
+ } else {
+ goprecords.CreateSchema(ctx, db)
+ if err := goprecords.ImportFromDir(ctx, db, "./fixtures"); err != nil {
+ fmt.Printf("FAIL: import: %v\n", err)
+ failed++
+ } else {
+ aggFromDB, err := goprecords.LoadAggregates(ctx, db)
+ if err != nil {
+ fmt.Printf("FAIL: load: %v\n", err)
+ failed++
+ } else {
+ reportFromDB := goprecords.NewHostReporter(aggFromDB, limit, goprecords.MetricUptime, goprecords.FormatPlaintext, 1).Report()
+ reportFromMem := goprecords.NewHostReporter(aggregates, limit, goprecords.MetricUptime, goprecords.FormatPlaintext, 1).Report()
+ if reportFromDB != reportFromMem {
+ fmt.Printf("FAIL: import/query report differs from in-memory\n--- from DB:\n%s--- from memory:\n%s\n", reportFromDB, reportFromMem)
+ failed++
+ }
+ }
+ }
+ db.Close()
+ os.Remove(tmpDB)
+ }
+ if failed > 0 {
+ os.Exit(1)
+ }
+ fmt.Println("ok")
+}
diff --git a/go.mod b/go.mod
new file mode 100644
index 0000000..b8513c2
--- /dev/null
+++ b/go.mod
@@ -0,0 +1,22 @@
+module github.com/goprecords
+
+go 1.21
+
+require modernc.org/sqlite v1.29.1
+
+require (
+ github.com/dustin/go-humanize v1.0.1 // indirect
+ github.com/google/uuid v1.3.0 // indirect
+ github.com/hashicorp/golang-lru/v2 v2.0.7 // indirect
+ github.com/magefile/mage v1.15.0 // indirect
+ github.com/mattn/go-isatty v0.0.16 // indirect
+ github.com/ncruces/go-strftime v0.1.9 // indirect
+ github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect
+ golang.org/x/sys v0.16.0 // indirect
+ modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 // indirect
+ modernc.org/libc v1.41.0 // indirect
+ modernc.org/mathutil v1.6.0 // indirect
+ modernc.org/memory v1.7.2 // indirect
+ modernc.org/strutil v1.2.0 // indirect
+ modernc.org/token v1.1.0 // indirect
+)
diff --git a/go.sum b/go.sum
new file mode 100644
index 0000000..83dc90d
--- /dev/null
+++ b/go.sum
@@ -0,0 +1,41 @@
+github.com/dustin/go-humanize v1.0.1 h1:GzkhY7T5VNhEkwH0PVJgjz+fX1rhBrR7pRT3mDkpeCY=
+github.com/dustin/go-humanize v1.0.1/go.mod h1:Mu1zIs6XwVuF/gI1OepvI0qD18qycQx+mFykh5fBlto=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26 h1:Xim43kblpZXfIBQsbuBVKCudVG457BR2GZFIz3uw3hQ=
+github.com/google/pprof v0.0.0-20221118152302-e6195bd50e26/go.mod h1:dDKJzRmX4S37WGHujM7tX//fmj1uioxKzKxz3lo4HJo=
+github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
+github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/hashicorp/golang-lru/v2 v2.0.7 h1:a+bsQ5rvGLjzHuww6tVxozPZFVghXaHOwFs4luLUK2k=
+github.com/hashicorp/golang-lru/v2 v2.0.7/go.mod h1:QeFd9opnmA6QUJc5vARoKUSoFhyfM2/ZepoAG6RGpeM=
+github.com/magefile/mage v1.15.0 h1:BvGheCMAsG3bWUDbZ8AyXXpCNwU9u5CB6sM+HNb9HYg=
+github.com/magefile/mage v1.15.0/go.mod h1:z5UZb/iS3GoOSn0JgWuiw7dxlurVYTu+/jHXqQg881A=
+github.com/mattn/go-isatty v0.0.16 h1:bq3VjFmv/sOjHtdEhmkEV4x1AJtvUvOJ2PFAZ5+peKQ=
+github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
+github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwpU1Y=
+github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
+github.com/ncruces/go-strftime v0.1.9 h1:bY0MQC28UADQmHmaF5dgpLmImcShSi2kHU9XLdhx/f4=
+github.com/ncruces/go-strftime v0.1.9/go.mod h1:Fwc5htZGVVkseilnfgOVb9mKy6w1naJmn9CehxcKcls=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec h1:W09IVJc94icq4NjY3clb7Lk8O1qJ8BdBEF8z0ibU0rE=
+github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
+golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0=
+golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
+golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.16.0 h1:xWw16ngr6ZMtmxDyKyIgsE93KNKz5HKmMa3b8ALHidU=
+golang.org/x/sys v0.16.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/tools v0.17.0 h1:FvmRgNOcs3kOa+T20R1uhfP9F6HgG2mfxDv1vrx1Htc=
+golang.org/x/tools v0.17.0/go.mod h1:xsh6VxdV005rRVaS6SSAf9oiAqljS7UZUacMZ8Bnsps=
+modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6 h1:5D53IMaUuA5InSeMu9eJtlQXS2NxAhyWQvkKEgXZhHI=
+modernc.org/gc/v3 v3.0.0-20240107210532-573471604cb6/go.mod h1:Qz0X07sNOR1jWYCrJMEnbW/X55x206Q7Vt4mz6/wHp4=
+modernc.org/libc v1.41.0 h1:g9YAc6BkKlgORsUWj+JwqoB1wU3o4DE3bM3yvA3k+Gk=
+modernc.org/libc v1.41.0/go.mod h1:w0eszPsiXoOnoMJgrXjglgLuDy/bt5RR4y3QzUUeodY=
+modernc.org/mathutil v1.6.0 h1:fRe9+AmYlaej+64JsEEhoWuAYBkOtQiMEU7n/XgfYi4=
+modernc.org/mathutil v1.6.0/go.mod h1:Ui5Q9q1TR2gFm0AQRqQUaBWFLAhQpCwNcuhBOSedWPo=
+modernc.org/memory v1.7.2 h1:Klh90S215mmH8c9gO98QxQFsY+W451E8AnzjoE2ee1E=
+modernc.org/memory v1.7.2/go.mod h1:NO4NVCQy0N7ln+T9ngWqOQfi7ley4vpwvARR+Hjw95E=
+modernc.org/sqlite v1.29.1 h1:19GY2qvWB4VPw0HppFlZCPAbmxFU41r+qjKZQdQ1ryA=
+modernc.org/sqlite v1.29.1/go.mod h1:hG41jCYxOAOoO6BRK66AdRlmOcDzXf7qnwlwjUIOqa0=
+modernc.org/strutil v1.2.0 h1:agBi9dp1I+eOnxXeiZawM8F4LawKv4NzGWSaLfyeNZA=
+modernc.org/strutil v1.2.0/go.mod h1:/mdcBmfOibveCTBxUl5B5l6W+TTH1FXPLHZE6bTosX0=
+modernc.org/token v1.1.0 h1:Xl7Ap9dKaEs5kLoOQeQmPWevfnk/DM5qcLcYlA8ys6Y=
+modernc.org/token v1.1.0/go.mod h1:UGzOrNV1mAFSEB63lOFHIpNRUVMvYTc6yu1SMY/XTDM=
diff --git a/internal/goprecords/aggregate.go b/internal/goprecords/aggregate.go
new file mode 100644
index 0000000..3fb9144
--- /dev/null
+++ b/internal/goprecords/aggregate.go
@@ -0,0 +1,147 @@
+package goprecords
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// Aggregates holds all category maps. Host uses HostAggregate; others use Aggregate.
+type Aggregates struct {
+ Host map[string]*HostAggregate
+ Kernel map[string]*Aggregate
+ KernelMajor map[string]*Aggregate
+ KernelName map[string]*Aggregate
+}
+
+// Aggregator reads .records files from a directory and builds Aggregates.
+type Aggregator struct {
+ statsDir string
+}
+
+// NewAggregator returns an Aggregator for the given stats directory.
+func NewAggregator(statsDir string) *Aggregator {
+ return &Aggregator{statsDir: statsDir}
+}
+
+// Aggregate reads all .records files and returns aggregated data.
+func (ag *Aggregator) Aggregate(ctx context.Context) (*Aggregates, error) {
+ out := &Aggregates{
+ Host: make(map[string]*HostAggregate),
+ Kernel: make(map[string]*Aggregate),
+ KernelMajor: make(map[string]*Aggregate),
+ KernelName: make(map[string]*Aggregate),
+ }
+ entries, err := os.ReadDir(ag.statsDir)
+ if err != nil {
+ return nil, fmt.Errorf("read stats dir: %w", err)
+ }
+ for _, e := range entries {
+ if e.IsDir() || !strings.HasSuffix(e.Name(), ".records") {
+ continue
+ }
+ path := filepath.Join(ag.statsDir, e.Name())
+ info, err := os.Stat(path)
+ if err != nil || info.Size() == 0 {
+ continue
+ }
+ host := strings.TrimSuffix(e.Name(), filepath.Ext(e.Name()))
+ if idx := strings.Index(host, "."); idx > 0 {
+ host = host[:idx]
+ }
+ if _, exists := out.Host[host]; exists {
+ return nil, fmt.Errorf("record file for %s already processed - duplicate inputs?", host)
+ }
+ lastKernel, err := lastKernelFromFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("last kernel %s: %w", path, err)
+ }
+ out.Host[host] = NewHostAggregate(host, lastKernel)
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("open %s: %w", path, err)
+ }
+ defer f.Close()
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ uptime, _ := strconv.ParseUint(parts[0], 10, 64)
+ bootTime, _ := strconv.ParseUint(parts[1], 10, 64)
+ osStr := parts[2]
+ uname := osStr
+ if i := strings.Index(osStr, " "); i > 0 {
+ uname = osStr[:i]
+ }
+ osMajor := uname + " "
+ rest := osStr
+ if i := strings.Index(osStr, " "); i >= 0 {
+ rest = osStr[i+1:]
+ }
+ if j := strings.Index(rest, "."); j >= 0 {
+ osMajor += rest[:j] + "..."
+ } else {
+ osMajor += rest + "..."
+ }
+ out.Host[host].AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.Kernel, osStr).AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.KernelName, uname).AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.KernelMajor, osMajor).AddRecord(uptime, bootTime)
+ }
+ if err := sc.Err(); err != nil {
+ return nil, fmt.Errorf("scan %s: %w", path, err)
+ }
+ }
+ return out, nil
+}
+
+func getOrNewAggregate(m map[string]*Aggregate, name string) *Aggregate {
+ if a, ok := m[name]; ok {
+ return a
+ }
+ a := NewAggregate(name)
+ m[name] = a
+ return a
+}
+
+func lastKernelFromFile(path string) (string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ var maxBoot uint64
+ var lastOS string
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ bootTime, _ := strconv.ParseUint(parts[1], 10, 64)
+ if bootTime >= maxBoot {
+ maxBoot = bootTime
+ lastOS = parts[2]
+ }
+ }
+ return lastOS, sc.Err()
+}
diff --git a/internal/goprecords/db.go b/internal/goprecords/db.go
new file mode 100644
index 0000000..3afe970
--- /dev/null
+++ b/internal/goprecords/db.go
@@ -0,0 +1,175 @@
+package goprecords
+
+import (
+ "bufio"
+ "context"
+ "database/sql"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ _ "modernc.org/sqlite"
+)
+
+const schemaSQL = `
+CREATE TABLE IF NOT EXISTS record (
+ host TEXT NOT NULL,
+ uptime_sec INTEGER NOT NULL,
+ boot_time INTEGER NOT NULL,
+ os TEXT NOT NULL,
+ os_kernel_name TEXT NOT NULL,
+ os_kernel_major TEXT NOT NULL
+);
+CREATE INDEX IF NOT EXISTS idx_record_host ON record(host);
+CREATE INDEX IF NOT EXISTS idx_record_os ON record(os);
+CREATE INDEX IF NOT EXISTS idx_record_os_kernel_name ON record(os_kernel_name);
+CREATE INDEX IF NOT EXISTS idx_record_os_kernel_major ON record(os_kernel_major);
+`
+
+// OpenDB opens the SQLite database at path, creating the file if needed.
+func OpenDB(path string) (*sql.DB, error) {
+ db, err := sql.Open("sqlite", path)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := db.Exec("PRAGMA foreign_keys = OFF"); err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
+}
+
+// CreateSchema creates the record table and indexes (idempotent).
+func CreateSchema(ctx context.Context, db *sql.DB) error {
+ _, err := db.ExecContext(ctx, schemaSQL)
+ return err
+}
+
+// ResetRecords removes all rows so import is repeatable.
+func ResetRecords(ctx context.Context, db *sql.DB) error {
+ _, err := db.ExecContext(ctx, "DELETE FROM record")
+ return err
+}
+
+// ImportFromDir reads all .records files from statsDir and inserts into the DB.
+// Resets the record table first so the run is repeatable.
+func ImportFromDir(ctx context.Context, db *sql.DB, statsDir string) error {
+ if err := ResetRecords(ctx, db); err != nil {
+ return fmt.Errorf("reset records: %w", err)
+ }
+ entries, err := os.ReadDir(statsDir)
+ if err != nil {
+ return fmt.Errorf("read dir: %w", err)
+ }
+ insert, err := db.PrepareContext(ctx, "INSERT INTO record (host, uptime_sec, boot_time, os, os_kernel_name, os_kernel_major) VALUES (?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return fmt.Errorf("prepare insert: %w", err)
+ }
+ defer insert.Close()
+
+ for _, e := range entries {
+ if e.IsDir() || !strings.HasSuffix(e.Name(), ".records") {
+ continue
+ }
+ path := filepath.Join(statsDir, e.Name())
+ info, err := os.Stat(path)
+ if err != nil || info.Size() == 0 {
+ continue
+ }
+ host := strings.TrimSuffix(e.Name(), filepath.Ext(e.Name()))
+ if idx := strings.Index(host, "."); idx > 0 {
+ host = host[:idx]
+ }
+ f, err := os.Open(path)
+ if err != nil {
+ return fmt.Errorf("open %s: %w", path, err)
+ }
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ uptimeSec, _ := strconv.ParseInt(parts[0], 10, 64)
+ bootTime, _ := strconv.ParseInt(parts[1], 10, 64)
+ osStr := parts[2]
+ osKernelName := osStr
+ if i := strings.Index(osStr, " "); i > 0 {
+ osKernelName = osStr[:i]
+ }
+ osMajor := osKernelName + " "
+ rest := osStr
+ if i := strings.Index(osStr, " "); i >= 0 {
+ rest = osStr[i+1:]
+ }
+ if j := strings.Index(rest, "."); j >= 0 {
+ osMajor += rest[:j] + "..."
+ } else {
+ osMajor += rest + "..."
+ }
+ _, err := insert.ExecContext(ctx, host, uptimeSec, bootTime, osStr, osKernelName, osMajor)
+ if err != nil {
+ f.Close()
+ return fmt.Errorf("insert: %w", err)
+ }
+ }
+ f.Close()
+ if err := sc.Err(); err != nil {
+ return fmt.Errorf("scan %s: %w", path, err)
+ }
+ }
+ return nil
+}
+
+// LoadAggregates reads all rows from the DB and builds Aggregates (same shape as file-based aggregation).
+func LoadAggregates(ctx context.Context, db *sql.DB) (*Aggregates, error) {
+ rows, err := db.QueryContext(ctx, "SELECT host, uptime_sec, boot_time, os, os_kernel_name, os_kernel_major FROM record ORDER BY host, boot_time")
+ if err != nil {
+ return nil, fmt.Errorf("query: %w", err)
+ }
+ defer rows.Close()
+
+ out := &Aggregates{
+ Host: make(map[string]*HostAggregate),
+ Kernel: make(map[string]*Aggregate),
+ KernelMajor: make(map[string]*Aggregate),
+ KernelName: make(map[string]*Aggregate),
+ }
+ hostMaxBoot := make(map[string]int64)
+ hostLastKernel := make(map[string]string)
+
+ for rows.Next() {
+ var host string
+ var uptimeSec, bootTime int64
+ var osStr, osKernelName, osKernelMajor string
+ if err := rows.Scan(&host, &uptimeSec, &bootTime, &osStr, &osKernelName, &osKernelMajor); err != nil {
+ return nil, fmt.Errorf("scan row: %w", err)
+ }
+ uptime := uint64(uptimeSec)
+ boot := uint64(bootTime)
+ if boot >= uint64(hostMaxBoot[host]) {
+ hostMaxBoot[host] = int64(boot)
+ hostLastKernel[host] = osStr
+ }
+ if _, ok := out.Host[host]; !ok {
+ out.Host[host] = NewHostAggregate(host, "")
+ }
+ out.Host[host].AddRecord(uptime, boot)
+ getOrNewAggregate(out.Kernel, osStr).AddRecord(uptime, boot)
+ getOrNewAggregate(out.KernelName, osKernelName).AddRecord(uptime, boot)
+ getOrNewAggregate(out.KernelMajor, osKernelMajor).AddRecord(uptime, boot)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("rows: %w", err)
+ }
+ for host, h := range out.Host {
+ h.LastKernel = hostLastKernel[host]
+ }
+ return out, nil
+}
diff --git a/internal/goprecords/order.go b/internal/goprecords/order.go
new file mode 100644
index 0000000..a20e5f9
--- /dev/null
+++ b/internal/goprecords/order.go
@@ -0,0 +1,93 @@
+package goprecords
+
+import (
+ "fmt"
+ "strings"
+)
+
+// CategoryMetric pairs a category with a metric for stats order.
+type CategoryMetric struct {
+ Category Category
+ Metric Metric
+}
+
+// ParseStatsOrder parses a comma-separated "Category:Metric" list.
+func ParseStatsOrder(s string) ([]CategoryMetric, error) {
+ parts := strings.Split(s, ",")
+ var entries []string
+ for _, p := range parts {
+ p = strings.TrimSpace(p)
+ if p != "" {
+ entries = append(entries, p)
+ }
+ }
+ if len(entries) == 0 {
+ return nil, fmt.Errorf("invalid -stats-order: empty list")
+ }
+ var order []CategoryMetric
+ seen := make(map[string]bool)
+ for _, entry := range entries {
+ idx := strings.Index(entry, ":")
+ if idx <= 0 || idx == len(entry)-1 {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (expected Category:Metric)", entry)
+ }
+ catName := strings.TrimSpace(entry[:idx])
+ metName := strings.TrimSpace(entry[idx+1:])
+ if catName == "" || metName == "" {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (expected Category:Metric)", entry)
+ }
+ cat, err := ParseCategory(catName)
+ if err != nil {
+ return nil, fmt.Errorf("invalid -stats-order category %q", catName)
+ }
+ met, err := ParseMetric(metName)
+ if err != nil {
+ return nil, fmt.Errorf("invalid -stats-order metric %q", metName)
+ }
+ if cat != CategoryHost && (met == MetricDowntime || met == MetricLifespan) {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (metric %s not supported for category %s)", entry, metName, catName)
+ }
+ key := cat.String() + ":" + met.String()
+ if seen[key] {
+ continue
+ }
+ seen[key] = true
+ order = append(order, CategoryMetric{cat, met})
+ }
+ return order, nil
+}
+
+// StatsOrderList returns the full order (custom entries first, then default remainder).
+func StatsOrderList(statsOrder string) ([]CategoryMetric, error) {
+ defaultOrder := defaultStatsOrder()
+ if statsOrder == "" {
+ return defaultOrder, nil
+ }
+ order, err := ParseStatsOrder(statsOrder)
+ if err != nil {
+ return nil, err
+ }
+ seen := make(map[string]bool)
+ for _, p := range order {
+ seen[p.Category.String()+":"+p.Metric.String()] = true
+ }
+ for _, p := range defaultOrder {
+ key := p.Category.String() + ":" + p.Metric.String()
+ if seen[key] {
+ continue
+ }
+ seen[key] = true
+ order = append(order, p)
+ }
+ return order, nil
+}
+
+func defaultStatsOrder() []CategoryMetric {
+ var out []CategoryMetric
+ for _, c := range []Category{CategoryHost, CategoryKernel, CategoryKernelMajor, CategoryKernelName} {
+ for _, m := range []Metric{MetricBoots, MetricUptime, MetricScore, MetricDowntime, MetricLifespan} {
+ out = append(out, CategoryMetric{c, m})
+ }
+ }
+ return out
+}
diff --git a/internal/goprecords/order_test.go b/internal/goprecords/order_test.go
new file mode 100644
index 0000000..c085723
--- /dev/null
+++ b/internal/goprecords/order_test.go
@@ -0,0 +1,72 @@
+package goprecords
+
+import (
+ "testing"
+)
+
+func TestParseStatsOrder(t *testing.T) {
+ tests := []struct {
+ in string
+ want []CategoryMetric
+ valid bool
+ }{
+ {
+ in: "Host:Uptime,Host:Boots",
+ want: []CategoryMetric{{CategoryHost, MetricUptime}, {CategoryHost, MetricBoots}},
+ valid: true,
+ },
+ {
+ in: "Host:Uptime",
+ want: []CategoryMetric{{CategoryHost, MetricUptime}},
+ valid: true,
+ },
+ {in: "Host", valid: false},
+ {in: "Bad:Uptime", valid: false},
+ {in: "Kernel:Downtime", valid: false},
+ {in: "Host:Nope", valid: false},
+ {in: "", valid: false},
+ {in: " , ", valid: false},
+ }
+ for _, tt := range tests {
+ got, err := ParseStatsOrder(tt.in)
+ valid := err == nil
+ if valid != tt.valid {
+ t.Errorf("ParseStatsOrder(%q) err=%v; valid=%v want %v", tt.in, err, valid, tt.valid)
+ continue
+ }
+ if !tt.valid {
+ continue
+ }
+ if len(got) != len(tt.want) {
+ t.Errorf("ParseStatsOrder(%q) len=%d want %d", tt.in, len(got), len(tt.want))
+ continue
+ }
+ for i := range got {
+ if got[i].Category != tt.want[i].Category || got[i].Metric != tt.want[i].Metric {
+ t.Errorf("ParseStatsOrder(%q)[%d] = %v; want %v", tt.in, i, got[i], tt.want[i])
+ }
+ }
+ }
+}
+
+func TestStatsOrderList(t *testing.T) {
+ // Empty string returns default order (all category×metric pairs).
+ got, err := StatsOrderList("")
+ if err != nil {
+ t.Fatalf("StatsOrderList(\"\"): %v", err)
+ }
+ if len(got) == 0 {
+ t.Error("StatsOrderList(\"\"): got empty order")
+ }
+ // Custom order: Host:Uptime first, then rest of default.
+ got, err = StatsOrderList("Host:Uptime")
+ if err != nil {
+ t.Fatalf("StatsOrderList(\"Host:Uptime\"): %v", err)
+ }
+ if len(got) == 0 {
+ t.Fatal("StatsOrderList(\"Host:Uptime\"): got empty")
+ }
+ if got[0].Category != CategoryHost || got[0].Metric != MetricUptime {
+ t.Errorf("StatsOrderList(\"Host:Uptime\")[0] = %v; want Host:Uptime", got[0])
+ }
+}
diff --git a/internal/goprecords/parse_test.go b/internal/goprecords/parse_test.go
new file mode 100644
index 0000000..304b06e
--- /dev/null
+++ b/internal/goprecords/parse_test.go
@@ -0,0 +1,73 @@
+package goprecords
+
+import (
+ "testing"
+)
+
+func TestParseCategory(t *testing.T) {
+ tests := []struct {
+ in string
+ want Category
+ ok bool
+ }{
+ {"Host", CategoryHost, true},
+ {"Kernel", CategoryKernel, true},
+ {"KernelMajor", CategoryKernelMajor, true},
+ {"KernelName", CategoryKernelName, true},
+ {"", 0, false},
+ {"host", 0, false},
+ {"Bad", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseCategory(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseCategory(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
+
+func TestParseMetric(t *testing.T) {
+ tests := []struct {
+ in string
+ want Metric
+ ok bool
+ }{
+ {"Boots", MetricBoots, true},
+ {"Uptime", MetricUptime, true},
+ {"Score", MetricScore, true},
+ {"Downtime", MetricDowntime, true},
+ {"Lifespan", MetricLifespan, true},
+ {"", 0, false},
+ {"uptime", 0, false},
+ {"Nope", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseMetric(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseMetric(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
+
+func TestParseOutputFormat(t *testing.T) {
+ tests := []struct {
+ in string
+ want OutputFormat
+ ok bool
+ }{
+ {"Plaintext", FormatPlaintext, true},
+ {"Markdown", FormatMarkdown, true},
+ {"Gemtext", FormatGemtext, true},
+ {"", 0, false},
+ {"html", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseOutputFormat(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseOutputFormat(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
diff --git a/internal/goprecords/report.go b/internal/goprecords/report.go
new file mode 100644
index 0000000..af61b29
--- /dev/null
+++ b/internal/goprecords/report.go
@@ -0,0 +1,265 @@
+package goprecords
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Reporter builds a single report (category + metric + format).
+type Reporter struct {
+ aggregates *Aggregates
+ limit uint
+ category Category
+ metric Metric
+ outputFormat OutputFormat
+ headerIndent uint
+}
+
+// NewReporter returns a Reporter for the given category and metric.
+func NewReporter(aggregates *Aggregates, category Category, limit uint, metric Metric, outputFormat OutputFormat, headerIndent uint) *Reporter {
+ return &Reporter{
+ aggregates: aggregates,
+ limit: limit,
+ category: category,
+ metric: metric,
+ outputFormat: outputFormat,
+ headerIndent: headerIndent,
+ }
+}
+
+// NewHostReporter returns a Reporter for Host category.
+func NewHostReporter(aggregates *Aggregates, limit uint, metric Metric, outputFormat OutputFormat, headerIndent uint) *Reporter {
+ return NewReporter(aggregates, CategoryHost, limit, metric, outputFormat, headerIndent)
+}
+
+// Report returns the formatted report string.
+func (r *Reporter) Report() string {
+ var rows []tableRow
+ var hasLastKernel bool
+ if r.category == CategoryHost {
+ rows, hasLastKernel = r.buildHostTable()
+ } else {
+ rows, hasLastKernel = r.buildCategoryTable()
+ }
+ if len(rows) == 0 {
+ return ""
+ }
+ return r.formatReport(rows, hasLastKernel)
+}
+
+func (r *Reporter) buildHostTable() ([]tableRow, bool) {
+ type keyVal struct {
+ agg *HostAggregate
+ key uint64
+ }
+ var list []keyVal
+ for _, h := range r.aggregates.Host {
+ var k uint64
+ switch r.metric {
+ case MetricUptime:
+ k = h.Uptime
+ case MetricBoots:
+ k = h.Boots
+ case MetricScore:
+ k = h.MetaScore()
+ case MetricDowntime:
+ k = h.Downtime()
+ case MetricLifespan:
+ k = h.Lifespan()
+ default:
+ k = h.Uptime
+ }
+ list = append(list, keyVal{h, k})
+ }
+ sort.Slice(list, func(i, j int) bool { return list[i].key > list[j].key })
+ var rows []tableRow
+ for i, kv := range list {
+ if uint(i) >= r.limit {
+ break
+ }
+ h := kv.agg
+ active := " "
+ if h.IsActive(90) {
+ active = "*"
+ }
+ rows = append(rows, tableRow{
+ Pos: fmt.Sprintf("%d.", i+1),
+ Name: active + h.Name,
+ Value: r.humanStrHost(h),
+ LastKernel: h.LastKernel,
+ })
+ }
+ return rows, true
+}
+
+func (r *Reporter) buildCategoryTable() ([]tableRow, bool) {
+ m := r.aggregates.Kernel
+ switch r.category {
+ case CategoryKernelMajor:
+ m = r.aggregates.KernelMajor
+ case CategoryKernelName:
+ m = r.aggregates.KernelName
+ }
+ type keyVal struct {
+ agg *Aggregate
+ key uint64
+ }
+ var list []keyVal
+ for _, a := range m {
+ var k uint64
+ switch r.metric {
+ case MetricUptime:
+ k = a.Uptime
+ case MetricBoots:
+ k = a.Boots
+ case MetricScore:
+ k = a.MetaScore()
+ default:
+ k = a.Uptime
+ }
+ list = append(list, keyVal{agg: a, key: k})
+ }
+ sort.Slice(list, func(i, j int) bool { return list[i].key > list[j].key })
+ var rows []tableRow
+ for i, kv := range list {
+ if uint(i) >= r.limit {
+ break
+ }
+ a := kv.agg
+ active := " "
+ if a.IsActive(90) {
+ active = "*"
+ }
+ rows = append(rows, tableRow{
+ Pos: fmt.Sprintf("%d.", i+1),
+ Name: active + a.Name,
+ Value: r.humanStrAgg(a),
+ })
+ }
+ return rows, false
+}
+
+func (r *Reporter) humanStrHost(h *HostAggregate) string {
+ switch r.metric {
+ case MetricUptime:
+ return formatDuration(h.Uptime)
+ case MetricBoots:
+ return formatInt(h.Boots)
+ case MetricScore:
+ return formatInt(h.MetaScore())
+ case MetricDowntime:
+ return formatDuration(h.Downtime())
+ case MetricLifespan:
+ return formatDuration(h.Lifespan())
+ default:
+ return formatDuration(h.Uptime)
+ }
+}
+
+func (r *Reporter) humanStrAgg(a *Aggregate) string {
+ switch r.metric {
+ case MetricUptime:
+ return formatDuration(a.Uptime)
+ case MetricBoots:
+ return formatInt(a.Boots)
+ case MetricScore:
+ return formatInt(a.MetaScore())
+ default:
+ return formatDuration(a.Uptime)
+ }
+}
+
+func (r *Reporter) formatReport(rows []tableRow, hasLastKernel bool) string {
+ cW, nW, vW, lkW := r.reportWidths(rows, hasLastKernel)
+ border := r.buildBorder(cW, nW, vW, lkW, hasLastKernel)
+ header := r.buildReportHeader(cW, nW, vW, lkW, hasLastKernel, border)
+ fmtStr := r.buildFormatStr(cW, nW, vW, lkW, hasLastKernel)
+ body := r.buildReportBody(rows, fmtStr, hasLastKernel)
+ out := header + body + border
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ out += "```\n"
+ }
+ return out
+}
+
+func (r *Reporter) reportWidths(rows []tableRow, hasLastKernel bool) (countW, nameW, valueW, lastKernelW int) {
+ countW = 3
+ nameW = len(r.category.String())
+ valueW = len(r.metric.String())
+ if hasLastKernel {
+ lastKernelW = len("Last Kernel")
+ }
+ for _, row := range rows {
+ if len(row.Pos) > countW {
+ countW = len(row.Pos)
+ }
+ if len(row.Name) > nameW {
+ nameW = len(row.Name)
+ }
+ if len(row.Value) > valueW {
+ valueW = len(row.Value)
+ }
+ if len(row.LastKernel) > lastKernelW {
+ lastKernelW = len(row.LastKernel)
+ }
+ }
+ return countW, nameW, valueW, lastKernelW
+}
+
+func (r *Reporter) buildBorder(countW, nameW, valueW, lastKernelW int, hasLastKernel bool) string {
+ parts := []string{
+ "+" + strings.Repeat("-", 2+countW),
+ "+" + strings.Repeat("-", 2+nameW),
+ "+" + strings.Repeat("-", 2+valueW),
+ }
+ if hasLastKernel {
+ parts = append(parts, "+"+strings.Repeat("-", 2+lastKernelW))
+ }
+ return strings.Join(parts, "") + "+\n"
+}
+
+func (r *Reporter) buildReportHeader(countW, nameW, valueW, lastKernelW int, hasLastKernel bool, border string) string {
+ var h string
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ h = strings.Repeat("#", int(r.headerIndent)) + " "
+ }
+ h += fmt.Sprintf("Top %d %s's by %s\n\n", r.limit, r.metric, r.category)
+ desc := MetricDescription(r.metric)
+ lineLimit := len(border)
+ if r.outputFormat == FormatPlaintext && lineLimit > 0 && len(desc) > lineLimit {
+ desc = " " + wordWrap(desc, lineLimit)
+ }
+ h += desc + "\n\n"
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ h += "```\n"
+ }
+ h += border
+ fmtStr := r.buildFormatStr(countW, nameW, valueW, lastKernelW, hasLastKernel)
+ if hasLastKernel {
+ h += fmt.Sprintf(fmtStr+"\n", "Pos", r.category.String(), r.metric.String(), "Last Kernel")
+ } else {
+ h += fmt.Sprintf(fmtStr+"\n", "Pos", r.category.String(), r.metric.String())
+ }
+ h += border
+ return h
+}
+
+func (r *Reporter) buildFormatStr(countW, nameW, valueW, lastKernelW int, hasLastKernel bool) string {
+ if hasLastKernel {
+ return fmt.Sprintf("| %%%ds | %%%ds | %%%ds | %%%ds |", countW, nameW, valueW, lastKernelW)
+ }
+ return fmt.Sprintf("| %%%ds | %%%ds | %%%ds |", countW, nameW, valueW)
+}
+
+func (r *Reporter) buildReportBody(rows []tableRow, fmtStr string, hasLastKernel bool) string {
+ var b strings.Builder
+ for _, row := range rows {
+ if hasLastKernel {
+ b.WriteString(fmt.Sprintf(fmtStr+"\n", row.Pos, row.Name, row.Value, row.LastKernel))
+ } else {
+ b.WriteString(fmt.Sprintf(fmtStr+"\n", row.Pos, row.Name, row.Value))
+ }
+ }
+ return b.String()
+}
diff --git a/internal/goprecords/types.go b/internal/goprecords/types.go
new file mode 100644
index 0000000..ce44df8
--- /dev/null
+++ b/internal/goprecords/types.go
@@ -0,0 +1,287 @@
+package goprecords
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // Day is seconds in 24 hours.
+ Day = 24 * 3600
+ // Month is 30 days in seconds.
+ Month = 30 * Day
+)
+
+// Category is the grouping for reports (Host, Kernel, etc.).
+type Category int
+
+const (
+ CategoryHost Category = iota
+ CategoryKernel
+ CategoryKernelMajor
+ CategoryKernelName
+)
+
+// String returns the category name.
+func (c Category) String() string {
+ switch c {
+ case CategoryHost:
+ return "Host"
+ case CategoryKernel:
+ return "Kernel"
+ case CategoryKernelMajor:
+ return "KernelMajor"
+ case CategoryKernelName:
+ return "KernelName"
+ default:
+ return "?"
+ }
+}
+
+// Metric is the value to rank by (Boots, Uptime, etc.).
+type Metric int
+
+const (
+ MetricBoots Metric = iota
+ MetricUptime
+ MetricScore
+ MetricDowntime
+ MetricLifespan
+)
+
+// String returns the metric name.
+func (m Metric) String() string {
+ switch m {
+ case MetricBoots:
+ return "Boots"
+ case MetricUptime:
+ return "Uptime"
+ case MetricScore:
+ return "Score"
+ case MetricDowntime:
+ return "Downtime"
+ case MetricLifespan:
+ return "Lifespan"
+ default:
+ return "?"
+ }
+}
+
+// OutputFormat is the report output format.
+type OutputFormat int
+
+const (
+ FormatPlaintext OutputFormat = iota
+ FormatMarkdown
+ FormatGemtext
+)
+
+// String returns the format name.
+func (f OutputFormat) String() string {
+ switch f {
+ case FormatPlaintext:
+ return "Plaintext"
+ case FormatMarkdown:
+ return "Markdown"
+ case FormatGemtext:
+ return "Gemtext"
+ default:
+ return "?"
+ }
+}
+
+// Epoch is a Unix timestamp for duration/date formatting.
+type Epoch uint64
+
+// HumanDuration returns a human-readable duration from epoch (e.g. "1 years, 2 months, 3 days").
+func (e Epoch) HumanDuration() string {
+ t := time.Unix(int64(e), 0).UTC()
+ y := t.Year() - 1970
+ m := int(t.Month())
+ d := t.Day()
+ return fmt.Sprintf("%d years, %d months, %d days", y, m, d)
+}
+
+// NewerThan reports whether the epoch is within the last limitDays days.
+func (e Epoch) NewerThan(limitDays uint) bool {
+ then := time.Unix(int64(e), 0)
+ return time.Since(then) < time.Duration(limitDays)*24*time.Hour
+}
+
+// Aggregate holds per-entity stats (Host, Kernel, etc.).
+type Aggregate struct {
+ Name string
+ Uptime uint64
+ FirstBoot uint64
+ LastSeen uint64
+ Boots uint64
+}
+
+// NewAggregate constructs an Aggregate with the given name.
+func NewAggregate(name string) *Aggregate {
+ return &Aggregate{Name: name}
+}
+
+// AddRecord adds one uptime record.
+func (a *Aggregate) AddRecord(uptimeSec, bootTime uint64) {
+ lastSeen := uptimeSec + bootTime
+ a.Uptime += uptimeSec
+ a.Boots++
+ if a.FirstBoot == 0 || bootTime < a.FirstBoot {
+ a.FirstBoot = bootTime
+ }
+ if lastSeen > a.LastSeen {
+ a.LastSeen = lastSeen
+ }
+}
+
+// IsActive reports whether the entity was seen within limitDays days.
+func (a *Aggregate) IsActive(limitDays uint) bool {
+ return Epoch(a.LastSeen).NewerThan(limitDays)
+}
+
+// MetaScore returns the computed score for this aggregate.
+func (a *Aggregate) MetaScore() uint64 {
+ activeBonus := uint64(0)
+ if a.IsActive(90) {
+ activeBonus = Month
+ }
+ return ((a.Uptime*2 + a.Boots*uint64(Day) + activeBonus) / 1000000)
+}
+
+// HostAggregate adds last-kernel and lifespan/downtime for host reports.
+type HostAggregate struct {
+ Aggregate
+ LastKernel string
+}
+
+// NewHostAggregate constructs a HostAggregate.
+func NewHostAggregate(name, lastKernel string) *HostAggregate {
+ return &HostAggregate{
+ Aggregate: Aggregate{Name: name},
+ LastKernel: lastKernel,
+ }
+}
+
+// Lifespan returns last-seen minus first-boot.
+func (h *HostAggregate) Lifespan() uint64 { return h.LastSeen - h.FirstBoot }
+
+// Downtime returns lifespan minus uptime.
+func (h *HostAggregate) Downtime() uint64 { return h.Lifespan() - h.Uptime }
+
+// MetaScore returns the host-specific score (includes downtime component).
+func (h *HostAggregate) MetaScore() uint64 {
+ return uint64(h.Downtime()/2000000) + h.Aggregate.MetaScore()
+}
+
+// tableRow is one row in the report table.
+type tableRow struct {
+ Pos string
+ Name string
+ Value string
+ LastKernel string
+}
+
+// MetricDescription returns the description text for a metric.
+func MetricDescription(m Metric) string {
+ switch m {
+ case MetricBoots:
+ return "Boots is the total number of host boots over the entire lifespan."
+ case MetricUptime:
+ return "Uptime is the total uptime of a host over the entire lifespan."
+ case MetricDowntime:
+ return "Downtime is the total downtime of a host over the entire lifespan."
+ case MetricLifespan:
+ return "Lifespan is the total uptime + the total downtime of a host."
+ case MetricScore:
+ return "Score is calculated by combining all other metrics."
+ default:
+ return ""
+ }
+}
+
+// ParseCategory parses a category string.
+func ParseCategory(s string) (Category, error) {
+ switch s {
+ case "Host":
+ return CategoryHost, nil
+ case "Kernel":
+ return CategoryKernel, nil
+ case "KernelMajor":
+ return CategoryKernelMajor, nil
+ case "KernelName":
+ return CategoryKernelName, nil
+ default:
+ return 0, fmt.Errorf("invalid category %q", s)
+ }
+}
+
+// ParseMetric parses a metric string.
+func ParseMetric(s string) (Metric, error) {
+ switch s {
+ case "Boots":
+ return MetricBoots, nil
+ case "Uptime":
+ return MetricUptime, nil
+ case "Score":
+ return MetricScore, nil
+ case "Downtime":
+ return MetricDowntime, nil
+ case "Lifespan":
+ return MetricLifespan, nil
+ default:
+ return 0, fmt.Errorf("invalid metric %q", s)
+ }
+}
+
+// ParseOutputFormat parses an output format string.
+func ParseOutputFormat(s string) (OutputFormat, error) {
+ switch s {
+ case "Plaintext":
+ return FormatPlaintext, nil
+ case "Markdown":
+ return FormatMarkdown, nil
+ case "Gemtext":
+ return FormatGemtext, nil
+ default:
+ return 0, fmt.Errorf("invalid output-format %q", s)
+ }
+}
+
+func wordWrap(s string, lineLimit int) string {
+ if lineLimit <= 0 || len(s) <= lineLimit {
+ return s
+ }
+ var b strings.Builder
+ chars := 0
+ for _, word := range strings.Fields(s) {
+ wlen := len(word)
+ if chars > 0 {
+ wlen++
+ }
+ if chars+wlen > lineLimit {
+ if chars > 0 {
+ b.WriteByte('\n')
+ }
+ b.WriteString(word)
+ chars = len(word)
+ } else {
+ if chars > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(word)
+ chars += wlen
+ }
+ }
+ return b.String()
+}
+
+func formatDuration(sec uint64) string {
+ return Epoch(sec).HumanDuration()
+}
+
+func formatInt(n uint64) string {
+ return strconv.FormatUint(n, 10)
+}
diff --git a/internal/version/version.go b/internal/version/version.go
new file mode 100644
index 0000000..3178b50
--- /dev/null
+++ b/internal/version/version.go
@@ -0,0 +1,4 @@
+package version
+
+// Version is the application version.
+const Version = "0.1.0"
diff --git a/scripts/compare-with-raku.sh b/scripts/compare-with-raku.sh
new file mode 100755
index 0000000..924cc85
--- /dev/null
+++ b/scripts/compare-with-raku.sh
@@ -0,0 +1,72 @@
+#!/bin/sh
+# Compare goprecords (Go) output with guprecords (Raku) on the same stats dir.
+# Usage: ./scripts/compare-with-raku.sh [stats-dir]
+# Default stats-dir: ../uprecords/stats (relative to repo root) or set UPRECORDS_STATS.
+
+set -e
+SCRIPT_DIR="$(cd "$(dirname "$0")" && pwd)"
+REPO_ROOT="$(cd "$SCRIPT_DIR/.." && pwd)"
+STATS="${1:-${UPRECORDS_STATS:-$REPO_ROOT/../uprecords/stats}}"
+RAKU_REPO="${GUPRECORDS_RAKU:-$REPO_ROOT/../guprecords}"
+
+if [ ! -d "$STATS" ]; then
+ echo "Stats dir not found: $STATS" >&2
+ echo "Usage: $0 [stats-dir]" >&2
+ echo " or set UPRECORDS_STATS" >&2
+ exit 1
+fi
+if [ ! -f "$RAKU_REPO/guprecords.raku" ]; then
+ echo "Raku guprecords not found: $RAKU_REPO/guprecords.raku" >&2
+ echo "Set GUPRECORDS_RAKU to the guprecords (Raku) repo." >&2
+ exit 1
+fi
+
+GO_BIN="$REPO_ROOT/goprecords"
+if [ ! -x "$GO_BIN" ]; then
+ echo "Build goprecords first (e.g. mage build)" >&2
+ exit 1
+fi
+
+mkdir -p /tmp/goprecords-compare
+R=/tmp/goprecords-compare/raku
+G=/tmp/goprecords-compare/go
+
+echo "Stats dir: $STATS"
+echo "Raku repo: $RAKU_REPO"
+echo ""
+
+# Single report: Host Uptime
+echo "=== Host Uptime (limit 10) ==="
+raku "$RAKU_REPO/guprecords.raku" --stats-dir="$STATS" --category=Host --metric=Uptime --limit=10 --output-format=Plaintext 2>/dev/null > "$R.host_uptime.txt"
+"$GO_BIN" -stats-dir="$STATS" -category=Host -metric=Uptime -limit=10 -output-format=Plaintext 2>/dev/null > "$G.host_uptime.txt"
+if diff -q "$R.host_uptime.txt" "$G.host_uptime.txt" >/dev/null; then
+ echo "OK (identical)"
+else
+ diff -u "$R.host_uptime.txt" "$G.host_uptime.txt" || true
+fi
+
+# Single report: Host Boots, Markdown
+echo ""
+echo "=== Host Boots Markdown (limit 5) ==="
+raku "$RAKU_REPO/guprecords.raku" --stats-dir="$STATS" --category=Host --metric=Boots --limit=5 --output-format=Markdown 2>/dev/null > "$R.host_boots_md.txt"
+"$GO_BIN" -stats-dir="$STATS" -category=Host -metric=Boots -limit=5 -output-format=Markdown 2>/dev/null > "$G.host_boots_md.txt"
+if diff -q "$R.host_boots_md.txt" "$G.host_boots_md.txt" >/dev/null; then
+ echo "OK (identical)"
+else
+ diff -u "$R.host_boots_md.txt" "$G.host_boots_md.txt" || true
+fi
+
+# --all (known: description word-wrap may differ in one section)
+echo ""
+echo "=== --all limit 5 ==="
+raku "$RAKU_REPO/guprecords.raku" --stats-dir="$STATS" --all --limit=5 --output-format=Plaintext 2>/dev/null > "$R.all.txt"
+"$GO_BIN" -stats-dir="$STATS" -all -limit=5 -output-format=Plaintext 2>/dev/null > "$G.all.txt"
+if diff -q "$R.all.txt" "$G.all.txt" >/dev/null; then
+ echo "OK (identical)"
+else
+ echo "Differences (often only description word-wrap):"
+ diff -u "$R.all.txt" "$G.all.txt" | head -40 || true
+fi
+
+echo ""
+echo "Done. Raku output: $R.*.txt Go output: $G.*.txt"