summaryrefslogtreecommitdiff
path: root/internal
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2026-02-20 21:06:35 +0200
committerPaul Buetow <paul@buetow.org>2026-02-20 21:06:35 +0200
commit59e86c9fd39308bc6b632e02ecf4d37265dabc91 (patch)
tree20e865fa680df4cff10056fa238ad07d7c58f458 /internal
parent0441f47fd585812b9c1f98b8a3dbeee03aa70b03 (diff)
Add Go implementation (goprecords)v0.1.0
- cmd/goprecords: CLI with import, query, report-from-files, test - internal/goprecords: types, order, aggregate, db, report - internal/version: version constant (0.1.0) - SQLite import (repeatable: clears record table then inserts) - Magefile: Build, Test, Install, Uninstall - Table-driven unit tests; comparison script vs Raku guprecords - .gitignore: *.db, /goprecords binary Co-authored-by: Cursor <cursoragent@cursor.com>
Diffstat (limited to 'internal')
-rw-r--r--internal/goprecords/aggregate.go147
-rw-r--r--internal/goprecords/db.go175
-rw-r--r--internal/goprecords/order.go93
-rw-r--r--internal/goprecords/order_test.go72
-rw-r--r--internal/goprecords/parse_test.go73
-rw-r--r--internal/goprecords/report.go265
-rw-r--r--internal/goprecords/types.go287
-rw-r--r--internal/version/version.go4
8 files changed, 1116 insertions, 0 deletions
diff --git a/internal/goprecords/aggregate.go b/internal/goprecords/aggregate.go
new file mode 100644
index 0000000..3fb9144
--- /dev/null
+++ b/internal/goprecords/aggregate.go
@@ -0,0 +1,147 @@
+package goprecords
+
+import (
+ "bufio"
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+// Aggregates holds all category maps. Host uses HostAggregate; others use Aggregate.
+type Aggregates struct {
+ Host map[string]*HostAggregate
+ Kernel map[string]*Aggregate
+ KernelMajor map[string]*Aggregate
+ KernelName map[string]*Aggregate
+}
+
+// Aggregator reads .records files from a directory and builds Aggregates.
+type Aggregator struct {
+ statsDir string
+}
+
+// NewAggregator returns an Aggregator for the given stats directory.
+func NewAggregator(statsDir string) *Aggregator {
+ return &Aggregator{statsDir: statsDir}
+}
+
+// Aggregate reads all .records files and returns aggregated data.
+func (ag *Aggregator) Aggregate(ctx context.Context) (*Aggregates, error) {
+ out := &Aggregates{
+ Host: make(map[string]*HostAggregate),
+ Kernel: make(map[string]*Aggregate),
+ KernelMajor: make(map[string]*Aggregate),
+ KernelName: make(map[string]*Aggregate),
+ }
+ entries, err := os.ReadDir(ag.statsDir)
+ if err != nil {
+ return nil, fmt.Errorf("read stats dir: %w", err)
+ }
+ for _, e := range entries {
+ if e.IsDir() || !strings.HasSuffix(e.Name(), ".records") {
+ continue
+ }
+ path := filepath.Join(ag.statsDir, e.Name())
+ info, err := os.Stat(path)
+ if err != nil || info.Size() == 0 {
+ continue
+ }
+ host := strings.TrimSuffix(e.Name(), filepath.Ext(e.Name()))
+ if idx := strings.Index(host, "."); idx > 0 {
+ host = host[:idx]
+ }
+ if _, exists := out.Host[host]; exists {
+ return nil, fmt.Errorf("record file for %s already processed - duplicate inputs?", host)
+ }
+ lastKernel, err := lastKernelFromFile(path)
+ if err != nil {
+ return nil, fmt.Errorf("last kernel %s: %w", path, err)
+ }
+ out.Host[host] = NewHostAggregate(host, lastKernel)
+ f, err := os.Open(path)
+ if err != nil {
+ return nil, fmt.Errorf("open %s: %w", path, err)
+ }
+ defer f.Close()
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ select {
+ case <-ctx.Done():
+ return nil, ctx.Err()
+ default:
+ }
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ uptime, _ := strconv.ParseUint(parts[0], 10, 64)
+ bootTime, _ := strconv.ParseUint(parts[1], 10, 64)
+ osStr := parts[2]
+ uname := osStr
+ if i := strings.Index(osStr, " "); i > 0 {
+ uname = osStr[:i]
+ }
+ osMajor := uname + " "
+ rest := osStr
+ if i := strings.Index(osStr, " "); i >= 0 {
+ rest = osStr[i+1:]
+ }
+ if j := strings.Index(rest, "."); j >= 0 {
+ osMajor += rest[:j] + "..."
+ } else {
+ osMajor += rest + "..."
+ }
+ out.Host[host].AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.Kernel, osStr).AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.KernelName, uname).AddRecord(uptime, bootTime)
+ getOrNewAggregate(out.KernelMajor, osMajor).AddRecord(uptime, bootTime)
+ }
+ if err := sc.Err(); err != nil {
+ return nil, fmt.Errorf("scan %s: %w", path, err)
+ }
+ }
+ return out, nil
+}
+
+func getOrNewAggregate(m map[string]*Aggregate, name string) *Aggregate {
+ if a, ok := m[name]; ok {
+ return a
+ }
+ a := NewAggregate(name)
+ m[name] = a
+ return a
+}
+
+func lastKernelFromFile(path string) (string, error) {
+ f, err := os.Open(path)
+ if err != nil {
+ return "", err
+ }
+ defer f.Close()
+ var maxBoot uint64
+ var lastOS string
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ bootTime, _ := strconv.ParseUint(parts[1], 10, 64)
+ if bootTime >= maxBoot {
+ maxBoot = bootTime
+ lastOS = parts[2]
+ }
+ }
+ return lastOS, sc.Err()
+}
diff --git a/internal/goprecords/db.go b/internal/goprecords/db.go
new file mode 100644
index 0000000..3afe970
--- /dev/null
+++ b/internal/goprecords/db.go
@@ -0,0 +1,175 @@
+package goprecords
+
+import (
+ "bufio"
+ "context"
+ "database/sql"
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+
+ _ "modernc.org/sqlite"
+)
+
+const schemaSQL = `
+CREATE TABLE IF NOT EXISTS record (
+ host TEXT NOT NULL,
+ uptime_sec INTEGER NOT NULL,
+ boot_time INTEGER NOT NULL,
+ os TEXT NOT NULL,
+ os_kernel_name TEXT NOT NULL,
+ os_kernel_major TEXT NOT NULL
+);
+CREATE INDEX IF NOT EXISTS idx_record_host ON record(host);
+CREATE INDEX IF NOT EXISTS idx_record_os ON record(os);
+CREATE INDEX IF NOT EXISTS idx_record_os_kernel_name ON record(os_kernel_name);
+CREATE INDEX IF NOT EXISTS idx_record_os_kernel_major ON record(os_kernel_major);
+`
+
+// OpenDB opens the SQLite database at path, creating the file if needed.
+func OpenDB(path string) (*sql.DB, error) {
+ db, err := sql.Open("sqlite", path)
+ if err != nil {
+ return nil, err
+ }
+ if _, err := db.Exec("PRAGMA foreign_keys = OFF"); err != nil {
+ db.Close()
+ return nil, err
+ }
+ return db, nil
+}
+
+// CreateSchema creates the record table and indexes (idempotent).
+func CreateSchema(ctx context.Context, db *sql.DB) error {
+ _, err := db.ExecContext(ctx, schemaSQL)
+ return err
+}
+
+// ResetRecords removes all rows so import is repeatable.
+func ResetRecords(ctx context.Context, db *sql.DB) error {
+ _, err := db.ExecContext(ctx, "DELETE FROM record")
+ return err
+}
+
+// ImportFromDir reads all .records files from statsDir and inserts into the DB.
+// Resets the record table first so the run is repeatable.
+func ImportFromDir(ctx context.Context, db *sql.DB, statsDir string) error {
+ if err := ResetRecords(ctx, db); err != nil {
+ return fmt.Errorf("reset records: %w", err)
+ }
+ entries, err := os.ReadDir(statsDir)
+ if err != nil {
+ return fmt.Errorf("read dir: %w", err)
+ }
+ insert, err := db.PrepareContext(ctx, "INSERT INTO record (host, uptime_sec, boot_time, os, os_kernel_name, os_kernel_major) VALUES (?, ?, ?, ?, ?, ?)")
+ if err != nil {
+ return fmt.Errorf("prepare insert: %w", err)
+ }
+ defer insert.Close()
+
+ for _, e := range entries {
+ if e.IsDir() || !strings.HasSuffix(e.Name(), ".records") {
+ continue
+ }
+ path := filepath.Join(statsDir, e.Name())
+ info, err := os.Stat(path)
+ if err != nil || info.Size() == 0 {
+ continue
+ }
+ host := strings.TrimSuffix(e.Name(), filepath.Ext(e.Name()))
+ if idx := strings.Index(host, "."); idx > 0 {
+ host = host[:idx]
+ }
+ f, err := os.Open(path)
+ if err != nil {
+ return fmt.Errorf("open %s: %w", path, err)
+ }
+ sc := bufio.NewScanner(f)
+ for sc.Scan() {
+ line := strings.TrimSpace(sc.Text())
+ if line == "" {
+ continue
+ }
+ parts := strings.SplitN(line, ":", 3)
+ if len(parts) != 3 {
+ continue
+ }
+ uptimeSec, _ := strconv.ParseInt(parts[0], 10, 64)
+ bootTime, _ := strconv.ParseInt(parts[1], 10, 64)
+ osStr := parts[2]
+ osKernelName := osStr
+ if i := strings.Index(osStr, " "); i > 0 {
+ osKernelName = osStr[:i]
+ }
+ osMajor := osKernelName + " "
+ rest := osStr
+ if i := strings.Index(osStr, " "); i >= 0 {
+ rest = osStr[i+1:]
+ }
+ if j := strings.Index(rest, "."); j >= 0 {
+ osMajor += rest[:j] + "..."
+ } else {
+ osMajor += rest + "..."
+ }
+ _, err := insert.ExecContext(ctx, host, uptimeSec, bootTime, osStr, osKernelName, osMajor)
+ if err != nil {
+ f.Close()
+ return fmt.Errorf("insert: %w", err)
+ }
+ }
+ f.Close()
+ if err := sc.Err(); err != nil {
+ return fmt.Errorf("scan %s: %w", path, err)
+ }
+ }
+ return nil
+}
+
+// LoadAggregates reads all rows from the DB and builds Aggregates (same shape as file-based aggregation).
+func LoadAggregates(ctx context.Context, db *sql.DB) (*Aggregates, error) {
+ rows, err := db.QueryContext(ctx, "SELECT host, uptime_sec, boot_time, os, os_kernel_name, os_kernel_major FROM record ORDER BY host, boot_time")
+ if err != nil {
+ return nil, fmt.Errorf("query: %w", err)
+ }
+ defer rows.Close()
+
+ out := &Aggregates{
+ Host: make(map[string]*HostAggregate),
+ Kernel: make(map[string]*Aggregate),
+ KernelMajor: make(map[string]*Aggregate),
+ KernelName: make(map[string]*Aggregate),
+ }
+ hostMaxBoot := make(map[string]int64)
+ hostLastKernel := make(map[string]string)
+
+ for rows.Next() {
+ var host string
+ var uptimeSec, bootTime int64
+ var osStr, osKernelName, osKernelMajor string
+ if err := rows.Scan(&host, &uptimeSec, &bootTime, &osStr, &osKernelName, &osKernelMajor); err != nil {
+ return nil, fmt.Errorf("scan row: %w", err)
+ }
+ uptime := uint64(uptimeSec)
+ boot := uint64(bootTime)
+ if boot >= uint64(hostMaxBoot[host]) {
+ hostMaxBoot[host] = int64(boot)
+ hostLastKernel[host] = osStr
+ }
+ if _, ok := out.Host[host]; !ok {
+ out.Host[host] = NewHostAggregate(host, "")
+ }
+ out.Host[host].AddRecord(uptime, boot)
+ getOrNewAggregate(out.Kernel, osStr).AddRecord(uptime, boot)
+ getOrNewAggregate(out.KernelName, osKernelName).AddRecord(uptime, boot)
+ getOrNewAggregate(out.KernelMajor, osKernelMajor).AddRecord(uptime, boot)
+ }
+ if err := rows.Err(); err != nil {
+ return nil, fmt.Errorf("rows: %w", err)
+ }
+ for host, h := range out.Host {
+ h.LastKernel = hostLastKernel[host]
+ }
+ return out, nil
+}
diff --git a/internal/goprecords/order.go b/internal/goprecords/order.go
new file mode 100644
index 0000000..a20e5f9
--- /dev/null
+++ b/internal/goprecords/order.go
@@ -0,0 +1,93 @@
+package goprecords
+
+import (
+ "fmt"
+ "strings"
+)
+
+// CategoryMetric pairs a category with a metric for stats order.
+type CategoryMetric struct {
+ Category Category
+ Metric Metric
+}
+
+// ParseStatsOrder parses a comma-separated "Category:Metric" list.
+func ParseStatsOrder(s string) ([]CategoryMetric, error) {
+ parts := strings.Split(s, ",")
+ var entries []string
+ for _, p := range parts {
+ p = strings.TrimSpace(p)
+ if p != "" {
+ entries = append(entries, p)
+ }
+ }
+ if len(entries) == 0 {
+ return nil, fmt.Errorf("invalid -stats-order: empty list")
+ }
+ var order []CategoryMetric
+ seen := make(map[string]bool)
+ for _, entry := range entries {
+ idx := strings.Index(entry, ":")
+ if idx <= 0 || idx == len(entry)-1 {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (expected Category:Metric)", entry)
+ }
+ catName := strings.TrimSpace(entry[:idx])
+ metName := strings.TrimSpace(entry[idx+1:])
+ if catName == "" || metName == "" {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (expected Category:Metric)", entry)
+ }
+ cat, err := ParseCategory(catName)
+ if err != nil {
+ return nil, fmt.Errorf("invalid -stats-order category %q", catName)
+ }
+ met, err := ParseMetric(metName)
+ if err != nil {
+ return nil, fmt.Errorf("invalid -stats-order metric %q", metName)
+ }
+ if cat != CategoryHost && (met == MetricDowntime || met == MetricLifespan) {
+ return nil, fmt.Errorf("invalid -stats-order entry %q (metric %s not supported for category %s)", entry, metName, catName)
+ }
+ key := cat.String() + ":" + met.String()
+ if seen[key] {
+ continue
+ }
+ seen[key] = true
+ order = append(order, CategoryMetric{cat, met})
+ }
+ return order, nil
+}
+
+// StatsOrderList returns the full order (custom entries first, then default remainder).
+func StatsOrderList(statsOrder string) ([]CategoryMetric, error) {
+ defaultOrder := defaultStatsOrder()
+ if statsOrder == "" {
+ return defaultOrder, nil
+ }
+ order, err := ParseStatsOrder(statsOrder)
+ if err != nil {
+ return nil, err
+ }
+ seen := make(map[string]bool)
+ for _, p := range order {
+ seen[p.Category.String()+":"+p.Metric.String()] = true
+ }
+ for _, p := range defaultOrder {
+ key := p.Category.String() + ":" + p.Metric.String()
+ if seen[key] {
+ continue
+ }
+ seen[key] = true
+ order = append(order, p)
+ }
+ return order, nil
+}
+
+func defaultStatsOrder() []CategoryMetric {
+ var out []CategoryMetric
+ for _, c := range []Category{CategoryHost, CategoryKernel, CategoryKernelMajor, CategoryKernelName} {
+ for _, m := range []Metric{MetricBoots, MetricUptime, MetricScore, MetricDowntime, MetricLifespan} {
+ out = append(out, CategoryMetric{c, m})
+ }
+ }
+ return out
+}
diff --git a/internal/goprecords/order_test.go b/internal/goprecords/order_test.go
new file mode 100644
index 0000000..c085723
--- /dev/null
+++ b/internal/goprecords/order_test.go
@@ -0,0 +1,72 @@
+package goprecords
+
+import (
+ "testing"
+)
+
+func TestParseStatsOrder(t *testing.T) {
+ tests := []struct {
+ in string
+ want []CategoryMetric
+ valid bool
+ }{
+ {
+ in: "Host:Uptime,Host:Boots",
+ want: []CategoryMetric{{CategoryHost, MetricUptime}, {CategoryHost, MetricBoots}},
+ valid: true,
+ },
+ {
+ in: "Host:Uptime",
+ want: []CategoryMetric{{CategoryHost, MetricUptime}},
+ valid: true,
+ },
+ {in: "Host", valid: false},
+ {in: "Bad:Uptime", valid: false},
+ {in: "Kernel:Downtime", valid: false},
+ {in: "Host:Nope", valid: false},
+ {in: "", valid: false},
+ {in: " , ", valid: false},
+ }
+ for _, tt := range tests {
+ got, err := ParseStatsOrder(tt.in)
+ valid := err == nil
+ if valid != tt.valid {
+ t.Errorf("ParseStatsOrder(%q) err=%v; valid=%v want %v", tt.in, err, valid, tt.valid)
+ continue
+ }
+ if !tt.valid {
+ continue
+ }
+ if len(got) != len(tt.want) {
+ t.Errorf("ParseStatsOrder(%q) len=%d want %d", tt.in, len(got), len(tt.want))
+ continue
+ }
+ for i := range got {
+ if got[i].Category != tt.want[i].Category || got[i].Metric != tt.want[i].Metric {
+ t.Errorf("ParseStatsOrder(%q)[%d] = %v; want %v", tt.in, i, got[i], tt.want[i])
+ }
+ }
+ }
+}
+
+func TestStatsOrderList(t *testing.T) {
+ // Empty string returns default order (all category×metric pairs).
+ got, err := StatsOrderList("")
+ if err != nil {
+ t.Fatalf("StatsOrderList(\"\"): %v", err)
+ }
+ if len(got) == 0 {
+ t.Error("StatsOrderList(\"\"): got empty order")
+ }
+ // Custom order: Host:Uptime first, then rest of default.
+ got, err = StatsOrderList("Host:Uptime")
+ if err != nil {
+ t.Fatalf("StatsOrderList(\"Host:Uptime\"): %v", err)
+ }
+ if len(got) == 0 {
+ t.Fatal("StatsOrderList(\"Host:Uptime\"): got empty")
+ }
+ if got[0].Category != CategoryHost || got[0].Metric != MetricUptime {
+ t.Errorf("StatsOrderList(\"Host:Uptime\")[0] = %v; want Host:Uptime", got[0])
+ }
+}
diff --git a/internal/goprecords/parse_test.go b/internal/goprecords/parse_test.go
new file mode 100644
index 0000000..304b06e
--- /dev/null
+++ b/internal/goprecords/parse_test.go
@@ -0,0 +1,73 @@
+package goprecords
+
+import (
+ "testing"
+)
+
+func TestParseCategory(t *testing.T) {
+ tests := []struct {
+ in string
+ want Category
+ ok bool
+ }{
+ {"Host", CategoryHost, true},
+ {"Kernel", CategoryKernel, true},
+ {"KernelMajor", CategoryKernelMajor, true},
+ {"KernelName", CategoryKernelName, true},
+ {"", 0, false},
+ {"host", 0, false},
+ {"Bad", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseCategory(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseCategory(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
+
+func TestParseMetric(t *testing.T) {
+ tests := []struct {
+ in string
+ want Metric
+ ok bool
+ }{
+ {"Boots", MetricBoots, true},
+ {"Uptime", MetricUptime, true},
+ {"Score", MetricScore, true},
+ {"Downtime", MetricDowntime, true},
+ {"Lifespan", MetricLifespan, true},
+ {"", 0, false},
+ {"uptime", 0, false},
+ {"Nope", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseMetric(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseMetric(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
+
+func TestParseOutputFormat(t *testing.T) {
+ tests := []struct {
+ in string
+ want OutputFormat
+ ok bool
+ }{
+ {"Plaintext", FormatPlaintext, true},
+ {"Markdown", FormatMarkdown, true},
+ {"Gemtext", FormatGemtext, true},
+ {"", 0, false},
+ {"html", 0, false},
+ }
+ for _, tt := range tests {
+ got, err := ParseOutputFormat(tt.in)
+ ok := err == nil
+ if ok != tt.ok || (tt.ok && got != tt.want) {
+ t.Errorf("ParseOutputFormat(%q) = %v, %v; want %v, ok=%v", tt.in, got, err, tt.want, tt.ok)
+ }
+ }
+}
diff --git a/internal/goprecords/report.go b/internal/goprecords/report.go
new file mode 100644
index 0000000..af61b29
--- /dev/null
+++ b/internal/goprecords/report.go
@@ -0,0 +1,265 @@
+package goprecords
+
+import (
+ "fmt"
+ "sort"
+ "strings"
+)
+
+// Reporter builds a single report (category + metric + format).
+type Reporter struct {
+ aggregates *Aggregates
+ limit uint
+ category Category
+ metric Metric
+ outputFormat OutputFormat
+ headerIndent uint
+}
+
+// NewReporter returns a Reporter for the given category and metric.
+func NewReporter(aggregates *Aggregates, category Category, limit uint, metric Metric, outputFormat OutputFormat, headerIndent uint) *Reporter {
+ return &Reporter{
+ aggregates: aggregates,
+ limit: limit,
+ category: category,
+ metric: metric,
+ outputFormat: outputFormat,
+ headerIndent: headerIndent,
+ }
+}
+
+// NewHostReporter returns a Reporter for Host category.
+func NewHostReporter(aggregates *Aggregates, limit uint, metric Metric, outputFormat OutputFormat, headerIndent uint) *Reporter {
+ return NewReporter(aggregates, CategoryHost, limit, metric, outputFormat, headerIndent)
+}
+
+// Report returns the formatted report string.
+func (r *Reporter) Report() string {
+ var rows []tableRow
+ var hasLastKernel bool
+ if r.category == CategoryHost {
+ rows, hasLastKernel = r.buildHostTable()
+ } else {
+ rows, hasLastKernel = r.buildCategoryTable()
+ }
+ if len(rows) == 0 {
+ return ""
+ }
+ return r.formatReport(rows, hasLastKernel)
+}
+
+func (r *Reporter) buildHostTable() ([]tableRow, bool) {
+ type keyVal struct {
+ agg *HostAggregate
+ key uint64
+ }
+ var list []keyVal
+ for _, h := range r.aggregates.Host {
+ var k uint64
+ switch r.metric {
+ case MetricUptime:
+ k = h.Uptime
+ case MetricBoots:
+ k = h.Boots
+ case MetricScore:
+ k = h.MetaScore()
+ case MetricDowntime:
+ k = h.Downtime()
+ case MetricLifespan:
+ k = h.Lifespan()
+ default:
+ k = h.Uptime
+ }
+ list = append(list, keyVal{h, k})
+ }
+ sort.Slice(list, func(i, j int) bool { return list[i].key > list[j].key })
+ var rows []tableRow
+ for i, kv := range list {
+ if uint(i) >= r.limit {
+ break
+ }
+ h := kv.agg
+ active := " "
+ if h.IsActive(90) {
+ active = "*"
+ }
+ rows = append(rows, tableRow{
+ Pos: fmt.Sprintf("%d.", i+1),
+ Name: active + h.Name,
+ Value: r.humanStrHost(h),
+ LastKernel: h.LastKernel,
+ })
+ }
+ return rows, true
+}
+
+func (r *Reporter) buildCategoryTable() ([]tableRow, bool) {
+ m := r.aggregates.Kernel
+ switch r.category {
+ case CategoryKernelMajor:
+ m = r.aggregates.KernelMajor
+ case CategoryKernelName:
+ m = r.aggregates.KernelName
+ }
+ type keyVal struct {
+ agg *Aggregate
+ key uint64
+ }
+ var list []keyVal
+ for _, a := range m {
+ var k uint64
+ switch r.metric {
+ case MetricUptime:
+ k = a.Uptime
+ case MetricBoots:
+ k = a.Boots
+ case MetricScore:
+ k = a.MetaScore()
+ default:
+ k = a.Uptime
+ }
+ list = append(list, keyVal{agg: a, key: k})
+ }
+ sort.Slice(list, func(i, j int) bool { return list[i].key > list[j].key })
+ var rows []tableRow
+ for i, kv := range list {
+ if uint(i) >= r.limit {
+ break
+ }
+ a := kv.agg
+ active := " "
+ if a.IsActive(90) {
+ active = "*"
+ }
+ rows = append(rows, tableRow{
+ Pos: fmt.Sprintf("%d.", i+1),
+ Name: active + a.Name,
+ Value: r.humanStrAgg(a),
+ })
+ }
+ return rows, false
+}
+
+func (r *Reporter) humanStrHost(h *HostAggregate) string {
+ switch r.metric {
+ case MetricUptime:
+ return formatDuration(h.Uptime)
+ case MetricBoots:
+ return formatInt(h.Boots)
+ case MetricScore:
+ return formatInt(h.MetaScore())
+ case MetricDowntime:
+ return formatDuration(h.Downtime())
+ case MetricLifespan:
+ return formatDuration(h.Lifespan())
+ default:
+ return formatDuration(h.Uptime)
+ }
+}
+
+func (r *Reporter) humanStrAgg(a *Aggregate) string {
+ switch r.metric {
+ case MetricUptime:
+ return formatDuration(a.Uptime)
+ case MetricBoots:
+ return formatInt(a.Boots)
+ case MetricScore:
+ return formatInt(a.MetaScore())
+ default:
+ return formatDuration(a.Uptime)
+ }
+}
+
+func (r *Reporter) formatReport(rows []tableRow, hasLastKernel bool) string {
+ cW, nW, vW, lkW := r.reportWidths(rows, hasLastKernel)
+ border := r.buildBorder(cW, nW, vW, lkW, hasLastKernel)
+ header := r.buildReportHeader(cW, nW, vW, lkW, hasLastKernel, border)
+ fmtStr := r.buildFormatStr(cW, nW, vW, lkW, hasLastKernel)
+ body := r.buildReportBody(rows, fmtStr, hasLastKernel)
+ out := header + body + border
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ out += "```\n"
+ }
+ return out
+}
+
+func (r *Reporter) reportWidths(rows []tableRow, hasLastKernel bool) (countW, nameW, valueW, lastKernelW int) {
+ countW = 3
+ nameW = len(r.category.String())
+ valueW = len(r.metric.String())
+ if hasLastKernel {
+ lastKernelW = len("Last Kernel")
+ }
+ for _, row := range rows {
+ if len(row.Pos) > countW {
+ countW = len(row.Pos)
+ }
+ if len(row.Name) > nameW {
+ nameW = len(row.Name)
+ }
+ if len(row.Value) > valueW {
+ valueW = len(row.Value)
+ }
+ if len(row.LastKernel) > lastKernelW {
+ lastKernelW = len(row.LastKernel)
+ }
+ }
+ return countW, nameW, valueW, lastKernelW
+}
+
+func (r *Reporter) buildBorder(countW, nameW, valueW, lastKernelW int, hasLastKernel bool) string {
+ parts := []string{
+ "+" + strings.Repeat("-", 2+countW),
+ "+" + strings.Repeat("-", 2+nameW),
+ "+" + strings.Repeat("-", 2+valueW),
+ }
+ if hasLastKernel {
+ parts = append(parts, "+"+strings.Repeat("-", 2+lastKernelW))
+ }
+ return strings.Join(parts, "") + "+\n"
+}
+
+func (r *Reporter) buildReportHeader(countW, nameW, valueW, lastKernelW int, hasLastKernel bool, border string) string {
+ var h string
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ h = strings.Repeat("#", int(r.headerIndent)) + " "
+ }
+ h += fmt.Sprintf("Top %d %s's by %s\n\n", r.limit, r.metric, r.category)
+ desc := MetricDescription(r.metric)
+ lineLimit := len(border)
+ if r.outputFormat == FormatPlaintext && lineLimit > 0 && len(desc) > lineLimit {
+ desc = " " + wordWrap(desc, lineLimit)
+ }
+ h += desc + "\n\n"
+ if r.outputFormat == FormatMarkdown || r.outputFormat == FormatGemtext {
+ h += "```\n"
+ }
+ h += border
+ fmtStr := r.buildFormatStr(countW, nameW, valueW, lastKernelW, hasLastKernel)
+ if hasLastKernel {
+ h += fmt.Sprintf(fmtStr+"\n", "Pos", r.category.String(), r.metric.String(), "Last Kernel")
+ } else {
+ h += fmt.Sprintf(fmtStr+"\n", "Pos", r.category.String(), r.metric.String())
+ }
+ h += border
+ return h
+}
+
+func (r *Reporter) buildFormatStr(countW, nameW, valueW, lastKernelW int, hasLastKernel bool) string {
+ if hasLastKernel {
+ return fmt.Sprintf("| %%%ds | %%%ds | %%%ds | %%%ds |", countW, nameW, valueW, lastKernelW)
+ }
+ return fmt.Sprintf("| %%%ds | %%%ds | %%%ds |", countW, nameW, valueW)
+}
+
+func (r *Reporter) buildReportBody(rows []tableRow, fmtStr string, hasLastKernel bool) string {
+ var b strings.Builder
+ for _, row := range rows {
+ if hasLastKernel {
+ b.WriteString(fmt.Sprintf(fmtStr+"\n", row.Pos, row.Name, row.Value, row.LastKernel))
+ } else {
+ b.WriteString(fmt.Sprintf(fmtStr+"\n", row.Pos, row.Name, row.Value))
+ }
+ }
+ return b.String()
+}
diff --git a/internal/goprecords/types.go b/internal/goprecords/types.go
new file mode 100644
index 0000000..ce44df8
--- /dev/null
+++ b/internal/goprecords/types.go
@@ -0,0 +1,287 @@
+package goprecords
+
+import (
+ "fmt"
+ "strconv"
+ "strings"
+ "time"
+)
+
+const (
+ // Day is seconds in 24 hours.
+ Day = 24 * 3600
+ // Month is 30 days in seconds.
+ Month = 30 * Day
+)
+
+// Category is the grouping for reports (Host, Kernel, etc.).
+type Category int
+
+const (
+ CategoryHost Category = iota
+ CategoryKernel
+ CategoryKernelMajor
+ CategoryKernelName
+)
+
+// String returns the category name.
+func (c Category) String() string {
+ switch c {
+ case CategoryHost:
+ return "Host"
+ case CategoryKernel:
+ return "Kernel"
+ case CategoryKernelMajor:
+ return "KernelMajor"
+ case CategoryKernelName:
+ return "KernelName"
+ default:
+ return "?"
+ }
+}
+
+// Metric is the value to rank by (Boots, Uptime, etc.).
+type Metric int
+
+const (
+ MetricBoots Metric = iota
+ MetricUptime
+ MetricScore
+ MetricDowntime
+ MetricLifespan
+)
+
+// String returns the metric name.
+func (m Metric) String() string {
+ switch m {
+ case MetricBoots:
+ return "Boots"
+ case MetricUptime:
+ return "Uptime"
+ case MetricScore:
+ return "Score"
+ case MetricDowntime:
+ return "Downtime"
+ case MetricLifespan:
+ return "Lifespan"
+ default:
+ return "?"
+ }
+}
+
+// OutputFormat is the report output format.
+type OutputFormat int
+
+const (
+ FormatPlaintext OutputFormat = iota
+ FormatMarkdown
+ FormatGemtext
+)
+
+// String returns the format name.
+func (f OutputFormat) String() string {
+ switch f {
+ case FormatPlaintext:
+ return "Plaintext"
+ case FormatMarkdown:
+ return "Markdown"
+ case FormatGemtext:
+ return "Gemtext"
+ default:
+ return "?"
+ }
+}
+
+// Epoch is a Unix timestamp for duration/date formatting.
+type Epoch uint64
+
+// HumanDuration returns a human-readable duration from epoch (e.g. "1 years, 2 months, 3 days").
+func (e Epoch) HumanDuration() string {
+ t := time.Unix(int64(e), 0).UTC()
+ y := t.Year() - 1970
+ m := int(t.Month())
+ d := t.Day()
+ return fmt.Sprintf("%d years, %d months, %d days", y, m, d)
+}
+
+// NewerThan reports whether the epoch is within the last limitDays days.
+func (e Epoch) NewerThan(limitDays uint) bool {
+ then := time.Unix(int64(e), 0)
+ return time.Since(then) < time.Duration(limitDays)*24*time.Hour
+}
+
+// Aggregate holds per-entity stats (Host, Kernel, etc.).
+type Aggregate struct {
+ Name string
+ Uptime uint64
+ FirstBoot uint64
+ LastSeen uint64
+ Boots uint64
+}
+
+// NewAggregate constructs an Aggregate with the given name.
+func NewAggregate(name string) *Aggregate {
+ return &Aggregate{Name: name}
+}
+
+// AddRecord adds one uptime record.
+func (a *Aggregate) AddRecord(uptimeSec, bootTime uint64) {
+ lastSeen := uptimeSec + bootTime
+ a.Uptime += uptimeSec
+ a.Boots++
+ if a.FirstBoot == 0 || bootTime < a.FirstBoot {
+ a.FirstBoot = bootTime
+ }
+ if lastSeen > a.LastSeen {
+ a.LastSeen = lastSeen
+ }
+}
+
+// IsActive reports whether the entity was seen within limitDays days.
+func (a *Aggregate) IsActive(limitDays uint) bool {
+ return Epoch(a.LastSeen).NewerThan(limitDays)
+}
+
+// MetaScore returns the computed score for this aggregate.
+func (a *Aggregate) MetaScore() uint64 {
+ activeBonus := uint64(0)
+ if a.IsActive(90) {
+ activeBonus = Month
+ }
+ return ((a.Uptime*2 + a.Boots*uint64(Day) + activeBonus) / 1000000)
+}
+
+// HostAggregate adds last-kernel and lifespan/downtime for host reports.
+type HostAggregate struct {
+ Aggregate
+ LastKernel string
+}
+
+// NewHostAggregate constructs a HostAggregate.
+func NewHostAggregate(name, lastKernel string) *HostAggregate {
+ return &HostAggregate{
+ Aggregate: Aggregate{Name: name},
+ LastKernel: lastKernel,
+ }
+}
+
+// Lifespan returns last-seen minus first-boot.
+func (h *HostAggregate) Lifespan() uint64 { return h.LastSeen - h.FirstBoot }
+
+// Downtime returns lifespan minus uptime.
+func (h *HostAggregate) Downtime() uint64 { return h.Lifespan() - h.Uptime }
+
+// MetaScore returns the host-specific score (includes downtime component).
+func (h *HostAggregate) MetaScore() uint64 {
+ return uint64(h.Downtime()/2000000) + h.Aggregate.MetaScore()
+}
+
+// tableRow is one row in the report table.
+type tableRow struct {
+ Pos string
+ Name string
+ Value string
+ LastKernel string
+}
+
+// MetricDescription returns the description text for a metric.
+func MetricDescription(m Metric) string {
+ switch m {
+ case MetricBoots:
+ return "Boots is the total number of host boots over the entire lifespan."
+ case MetricUptime:
+ return "Uptime is the total uptime of a host over the entire lifespan."
+ case MetricDowntime:
+ return "Downtime is the total downtime of a host over the entire lifespan."
+ case MetricLifespan:
+ return "Lifespan is the total uptime + the total downtime of a host."
+ case MetricScore:
+ return "Score is calculated by combining all other metrics."
+ default:
+ return ""
+ }
+}
+
+// ParseCategory parses a category string.
+func ParseCategory(s string) (Category, error) {
+ switch s {
+ case "Host":
+ return CategoryHost, nil
+ case "Kernel":
+ return CategoryKernel, nil
+ case "KernelMajor":
+ return CategoryKernelMajor, nil
+ case "KernelName":
+ return CategoryKernelName, nil
+ default:
+ return 0, fmt.Errorf("invalid category %q", s)
+ }
+}
+
+// ParseMetric parses a metric string.
+func ParseMetric(s string) (Metric, error) {
+ switch s {
+ case "Boots":
+ return MetricBoots, nil
+ case "Uptime":
+ return MetricUptime, nil
+ case "Score":
+ return MetricScore, nil
+ case "Downtime":
+ return MetricDowntime, nil
+ case "Lifespan":
+ return MetricLifespan, nil
+ default:
+ return 0, fmt.Errorf("invalid metric %q", s)
+ }
+}
+
+// ParseOutputFormat parses an output format string.
+func ParseOutputFormat(s string) (OutputFormat, error) {
+ switch s {
+ case "Plaintext":
+ return FormatPlaintext, nil
+ case "Markdown":
+ return FormatMarkdown, nil
+ case "Gemtext":
+ return FormatGemtext, nil
+ default:
+ return 0, fmt.Errorf("invalid output-format %q", s)
+ }
+}
+
+func wordWrap(s string, lineLimit int) string {
+ if lineLimit <= 0 || len(s) <= lineLimit {
+ return s
+ }
+ var b strings.Builder
+ chars := 0
+ for _, word := range strings.Fields(s) {
+ wlen := len(word)
+ if chars > 0 {
+ wlen++
+ }
+ if chars+wlen > lineLimit {
+ if chars > 0 {
+ b.WriteByte('\n')
+ }
+ b.WriteString(word)
+ chars = len(word)
+ } else {
+ if chars > 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteString(word)
+ chars += wlen
+ }
+ }
+ return b.String()
+}
+
+func formatDuration(sec uint64) string {
+ return Epoch(sec).HumanDuration()
+}
+
+func formatInt(n uint64) string {
+ return strconv.FormatUint(n, 10)
+}
diff --git a/internal/version/version.go b/internal/version/version.go
new file mode 100644
index 0000000..3178b50
--- /dev/null
+++ b/internal/version/version.go
@@ -0,0 +1,4 @@
+package version
+
+// Version is the application version.
+const Version = "0.1.0"