diff options
| author | Paul Buetow <paul@buetow.org> | 2026-03-23 09:04:17 +0200 |
|---|---|---|
| committer | Paul Buetow <paul@buetow.org> | 2026-03-23 09:04:17 +0200 |
| commit | 462184dff3eef32f01f06634305da1355ac1bec2 (patch) | |
| tree | 026ffaaeacfe152957298c985e1df77ff661b723 | |
| parent | 667f2d3384643aa877de2eefcbad3923965bad09 (diff) | |
chore: bump version to v0.25.9v0.25.9
Code quality fixes from audit:
- Log silently discarded errors in status sinks and stats.Update call sites
- Fix json.Marshal errors silently ignored in LSP handlers
- Replace time.Sleep in tests with channel signaling (mcp) and fake clock (stats)
- Make context cancellation work in production time.Sleep sites (handlers_document, cmdentry)
- Remove init()-based provider registration from llm package; use explicit RegisterAllProviders()
- Add WaitGroup goroutine tracking to MCP server Run()
Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
| -rw-r--r-- | internal/hexaiaction/cmdentry.go | 21 | ||||
| -rw-r--r-- | internal/hexaiaction/cmdentry_test.go | 8 | ||||
| -rw-r--r-- | internal/hexaicli/run.go | 1 | ||||
| -rw-r--r-- | internal/hexailsp/run.go | 1 | ||||
| -rw-r--r-- | internal/hexailsp/run_test.go | 7 | ||||
| -rw-r--r-- | internal/llm/anthropic.go | 4 | ||||
| -rw-r--r-- | internal/llm/ollama.go | 4 | ||||
| -rw-r--r-- | internal/llm/openai.go | 4 | ||||
| -rw-r--r-- | internal/llm/openrouter.go | 4 | ||||
| -rw-r--r-- | internal/llm/provider.go | 29 | ||||
| -rw-r--r-- | internal/llm/test_helpers_test.go | 12 | ||||
| -rw-r--r-- | internal/llmutils/client_test.go | 7 | ||||
| -rw-r--r-- | internal/lsp/handlers.go | 6 | ||||
| -rw-r--r-- | internal/lsp/handlers_completion.go | 22 | ||||
| -rw-r--r-- | internal/lsp/handlers_document.go | 22 | ||||
| -rw-r--r-- | internal/lsp/handlers_utils.go | 6 | ||||
| -rw-r--r-- | internal/lsp/server.go | 8 | ||||
| -rw-r--r-- | internal/mcp/server.go | 13 | ||||
| -rw-r--r-- | internal/mcp/server_test.go | 9 | ||||
| -rw-r--r-- | internal/stats/stats.go | 8 | ||||
| -rw-r--r-- | internal/stats/stats_test.go | 9 | ||||
| -rw-r--r-- | internal/version.go | 2 |
22 files changed, 143 insertions, 64 deletions
diff --git a/internal/hexaiaction/cmdentry.go b/internal/hexaiaction/cmdentry.go index 7d91ab2..78c315b 100644 --- a/internal/hexaiaction/cmdentry.go +++ b/internal/hexaiaction/cmdentry.go @@ -8,6 +8,7 @@ import ( "path/filepath" "time" + "codeberg.org/snonux/hexai/internal/llm" "codeberg.org/snonux/hexai/internal/tmux" "golang.org/x/term" ) @@ -25,11 +26,12 @@ type Options struct { // RunCommand is the CLI orchestrator used by cmd/hexai-tmux-action. It runs in tmux // split-pane mode by default, or child mode when -ui-child is set. func RunCommand(ctx context.Context, opts Options, stdin io.Reader, stdout, stderr io.Writer) error { + llm.RegisterAllProviders() if opts.UIChild { return runChild(ctx, opts.Infile, opts.Outfile, stdout, stderr) } // Always use tmux path - return runInTmuxParent(stdin, stdout, opts.TmuxTarget, opts.TmuxSplit, opts.TmuxPercent) + return runInTmuxParent(ctx, stdin, stdout, opts.TmuxTarget, opts.TmuxSplit, opts.TmuxPercent) } // seams for unit tests @@ -97,7 +99,7 @@ func runChild(ctx context.Context, infile, outfile string, stdout, stderr io.Wri return os.Rename(tmp, outfile) } -func runInTmuxParent(stdin io.Reader, stdout io.Writer, target, split string, percent int) error { +func runInTmuxParent(ctx context.Context, stdin io.Reader, stdout io.Writer, target, split string, percent int) error { dir, err := os.MkdirTemp("", "hexai-tmux-action-") if err != nil { return err @@ -117,7 +119,7 @@ func runInTmuxParent(stdin io.Reader, stdout io.Writer, target, split string, pe if err := splitRunFn(opts, argv); err != nil { return err } - if err := waitForFile(outPath, 60*time.Second); err != nil { + if err := waitForFile(ctx, outPath, 60*time.Second); err != nil { return err } return catFileTo(stdout, outPath) @@ -135,8 +137,13 @@ func persistStdin(path string, stdin io.Reader) error { return f.Sync() } -func waitForFile(path string, timeout time.Duration) error { +// waitForFile polls for the existence of path until it appears, the deadline +// expires, or ctx is cancelled. Uses a ticker instead of time.Sleep so the +// context is honoured without blocking the full poll interval. +func waitForFile(ctx context.Context, path string, timeout time.Duration) error { deadline := time.Now().Add(timeout) + ticker := time.NewTicker(200 * time.Millisecond) + defer ticker.Stop() for { if _, err := os.Stat(path); err == nil { return nil @@ -144,7 +151,11 @@ func waitForFile(path string, timeout time.Duration) error { if time.Now().After(deadline) { return fmt.Errorf("hexai-tmux-action: timeout waiting for reply file") } - time.Sleep(200 * time.Millisecond) + select { + case <-ticker.C: + case <-ctx.Done(): + return ctx.Err() + } } } diff --git a/internal/hexaiaction/cmdentry_test.go b/internal/hexaiaction/cmdentry_test.go index 71ed9db..054c78c 100644 --- a/internal/hexaiaction/cmdentry_test.go +++ b/internal/hexaiaction/cmdentry_test.go @@ -90,7 +90,7 @@ func TestRunInTmuxParent_Stubbed(t *testing.T) { return nil } t.Cleanup(func() { osExecutableFn = oldExec; splitRunFn = oldSplit }) - if err := runInTmuxParent(r, wout, "", "v", 33); err != nil { + if err := runInTmuxParent(context.Background(), r, wout, "", "v", 33); err != nil { t.Fatalf("runInTmuxParent: %v", err) } _ = wout.Close() @@ -108,7 +108,7 @@ func TestRunInTmuxParent_ExecutableError(t *testing.T) { r, w, _ := os.Pipe() _, _ = w.Write([]byte("x")) _ = w.Close() - if err := runInTmuxParent(r, io.Discard, "", "v", 33); err == nil { + if err := runInTmuxParent(context.Background(), r, io.Discard, "", "v", 33); err == nil { t.Fatal("expected error from missing executable") } } @@ -122,7 +122,7 @@ func TestRunInTmuxParent_SplitError(t *testing.T) { r, w, _ := os.Pipe() _, _ = w.Write([]byte("x")) _ = w.Close() - if err := runInTmuxParent(r, io.Discard, "", "v", 33); err == nil { + if err := runInTmuxParent(context.Background(), r, io.Discard, "", "v", 33); err == nil { t.Fatal("expected split error") } } @@ -161,7 +161,7 @@ func TestRunChild_StdoutAndOutfile(t *testing.T) { func TestWaitForFile_Timeout(t *testing.T) { dir := t.TempDir() p := filepath.Join(dir, "nope") - if err := waitForFile(p, 10*time.Millisecond); err == nil { + if err := waitForFile(context.Background(), p, 10*time.Millisecond); err == nil { t.Fatal("expected timeout error") } } diff --git a/internal/hexaicli/run.go b/internal/hexaicli/run.go index 0da1a5f..df37f82 100644 --- a/internal/hexaicli/run.go +++ b/internal/hexaicli/run.go @@ -92,6 +92,7 @@ func cliTemperatureFromEntry(cfg appconfig.App, provider string, entry appconfig // Run executes the Hexai CLI behavior given arguments and I/O streams. // It assumes flags have already been parsed by the caller. func Run(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer) error { + llm.RegisterAllProviders() return NewRunner().Run(ctx, args, stdin, stdout, stderr) } diff --git a/internal/hexailsp/run.go b/internal/hexailsp/run.go index 57e7476..242a013 100644 --- a/internal/hexailsp/run.go +++ b/internal/hexailsp/run.go @@ -54,6 +54,7 @@ func Run(logPath string, stdin io.Reader, stdout io.Writer, stderr io.Writer) er // RunWithConfig is like Run but accepts an explicit config file path. func RunWithConfig(logPath string, configPath string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + llm.RegisterAllProviders() return runWithConfigDependencies(logPath, configPath, stdin, stdout, stderr, defaultRunDependencies()) } diff --git a/internal/hexailsp/run_test.go b/internal/hexailsp/run_test.go index 583c6c8..badb27c 100644 --- a/internal/hexailsp/run_test.go +++ b/internal/hexailsp/run_test.go @@ -15,6 +15,13 @@ import ( "codeberg.org/snonux/hexai/internal/lsp" ) +// TestMain registers all built-in LLM providers before tests run, mirroring +// the explicit registration done in production binaries via RunWithConfig. +func TestMain(m *testing.M) { + llm.RegisterAllProviders() + os.Exit(m.Run()) +} + // fake server capturing options and recording run calls type fakeServer struct { ran bool diff --git a/internal/llm/anthropic.go b/internal/llm/anthropic.go index 7da72b3..17c40ad 100644 --- a/internal/llm/anthropic.go +++ b/internal/llm/anthropic.go @@ -85,10 +85,6 @@ var ( _ Streamer = (*anthropicClient)(nil) ) -func init() { - RegisterProvider("anthropic", anthropicProviderFactory) -} - func anthropicProviderFactory(cfg Config, keys ProviderKeys) (Client, error) { if strings.TrimSpace(keys.AnthropicAPIKey) == "" { return nil, missingAPIKeyError("anthropic", "ANTHROPIC_API_KEY", "HEXAI_ANTHROPIC_API_KEY") diff --git a/internal/llm/ollama.go b/internal/llm/ollama.go index e212466..b2cecfa 100644 --- a/internal/llm/ollama.go +++ b/internal/llm/ollama.go @@ -45,10 +45,6 @@ type ollamaChatResponse struct { Error string `json:"error,omitempty"` } -func init() { - RegisterProvider("ollama", ollamaProviderFactory) -} - func ollamaProviderFactory(cfg Config, _ ProviderKeys) (Client, error) { return newOllamaWithTimeout( cfg.OllamaBaseURL, diff --git a/internal/llm/openai.go b/internal/llm/openai.go index cf18d9b..a119fe7 100644 --- a/internal/llm/openai.go +++ b/internal/llm/openai.go @@ -78,10 +78,6 @@ type oaStreamChunk struct { } `json:"error,omitempty"` } -func init() { - RegisterProvider("openai", openAIProviderFactory) -} - func openAIProviderFactory(cfg Config, keys ProviderKeys) (Client, error) { if strings.TrimSpace(keys.OpenAIAPIKey) == "" { return nil, missingAPIKeyError("openai", "OPENAI_API_KEY", "HEXAI_OPENAI_API_KEY") diff --git a/internal/llm/openrouter.go b/internal/llm/openrouter.go index 451e9ad..aa8a4d4 100644 --- a/internal/llm/openrouter.go +++ b/internal/llm/openrouter.go @@ -27,10 +27,6 @@ var ( _ Streamer = openRouterClient{} ) -func init() { - RegisterProvider("openrouter", openRouterProviderFactory) -} - func openRouterProviderFactory(cfg Config, keys ProviderKeys) (Client, error) { if strings.TrimSpace(keys.OpenRouterAPIKey) == "" { return nil, missingAPIKeyError("openrouter", "OPENROUTER_API_KEY", "HEXAI_OPENROUTER_API_KEY") diff --git a/internal/llm/provider.go b/internal/llm/provider.go index 3c72181..6c0c04b 100644 --- a/internal/llm/provider.go +++ b/internal/llm/provider.go @@ -103,20 +103,17 @@ type ProviderKeys struct { // ProviderFactory builds an LLM client for a named provider. type ProviderFactory func(cfg Config, keys ProviderKeys) (Client, error) -// providerRegistry is a package-level singleton populated by init() calls in -// each provider file (anthropic.go, openai.go, etc.). It must be a -// package-level var — rather than a constructor argument — because Go's -// init() mechanism runs before any application code, and the alternative -// (an explicit RegisterAll() in main) would require every binary that uses -// the llm package to manually enumerate all providers. The RWMutex makes the -// map safe for the rare case where RegisterProvider is called from a test -// goroutine after init() has completed. +// providerRegistry is a package-level singleton populated via RegisterAllProviders. +// Callers (binaries and tests) must call RegisterAllProviders before creating any +// clients. The RWMutex makes the map safe for concurrent reads once populated. var ( - providerRegistryMu sync.RWMutex - providerRegistry = map[string]ProviderFactory{} + providerRegistryMu sync.RWMutex + providerRegistry = map[string]ProviderFactory{} + registerProvidersOnce sync.Once ) // RegisterProvider registers a provider factory by normalized name. +// Panics on empty name, nil factory, or duplicate registration. func RegisterProvider(name string, factory ProviderFactory) { normalized := normalizeProvider(name) if normalized == "" { @@ -133,6 +130,18 @@ func RegisterProvider(name string, factory ProviderFactory) { providerRegistry[normalized] = factory } +// RegisterAllProviders registers all built-in LLM providers (anthropic, openai, +// openrouter, ollama). It is safe to call from multiple entry points because the +// actual registration runs only once via sync.Once. +func RegisterAllProviders() { + registerProvidersOnce.Do(func() { + RegisterProvider("anthropic", anthropicProviderFactory) + RegisterProvider("openai", openAIProviderFactory) + RegisterProvider("openrouter", openRouterProviderFactory) + RegisterProvider("ollama", ollamaProviderFactory) + }) +} + // NewFromConfig creates an LLM client using only the supplied configuration. // The OpenAI API key is supplied separately and may be read from the environment // by the caller; other environment-based configuration is not used. diff --git a/internal/llm/test_helpers_test.go b/internal/llm/test_helpers_test.go index 051747a..b6553bf 100644 --- a/internal/llm/test_helpers_test.go +++ b/internal/llm/test_helpers_test.go @@ -1,3 +1,15 @@ package llm +import ( + "os" + "testing" +) + +// TestMain registers all built-in providers before any test runs, mirroring +// the explicit registration that happens in production binaries. +func TestMain(m *testing.M) { + RegisterAllProviders() + os.Exit(m.Run()) +} + func f64p(v float64) *float64 { return &v } diff --git a/internal/llmutils/client_test.go b/internal/llmutils/client_test.go index 0cbd26d..c688213 100644 --- a/internal/llmutils/client_test.go +++ b/internal/llmutils/client_test.go @@ -5,8 +5,15 @@ import ( "testing" "codeberg.org/snonux/hexai/internal/appconfig" + "codeberg.org/snonux/hexai/internal/llm" ) +// TestMain registers all built-in LLM providers before tests run. +func TestMain(m *testing.M) { + llm.RegisterAllProviders() + os.Exit(m.Run()) +} + func TestNewClientFromApp_Ollama(t *testing.T) { cfg := appconfig.App{CoreConfig: appconfig.CoreConfig{Provider: "ollama"}} c, err := NewClientFromApp(cfg) diff --git a/internal/lsp/handlers.go b/internal/lsp/handlers.go index 0f98715..b06b5ea 100644 --- a/internal/lsp/handlers.go +++ b/internal/lsp/handlers.go @@ -158,8 +158,10 @@ func (s *Server) checkTriggerFromContext(p CompletionParams, current string, ope logging.Logf("lsp ", "handleCompletion: unmarshal raw context: %v", err) } } else { - b, _ := json.Marshal(p.Context) - if err := json.Unmarshal(b, &ctx); err != nil { + b, err := json.Marshal(p.Context) + if err != nil { + logging.Logf("lsp ", "handleCompletion: marshal context: %v", err) + } else if err := json.Unmarshal(b, &ctx); err != nil { logging.Logf("lsp ", "handleCompletion: unmarshal context: %v", err) } } diff --git a/internal/lsp/handlers_completion.go b/internal/lsp/handlers_completion.go index d6529de..e6d8951 100644 --- a/internal/lsp/handlers_completion.go +++ b/internal/lsp/handlers_completion.go @@ -93,8 +93,10 @@ func extractTriggerInfo(p CompletionParams) (kind int, ch string) { logging.Logf("lsp ", "extractTriggerInfo: unmarshal raw context: %v", err) } } else { - b, _ := json.Marshal(p.Context) - if err := json.Unmarshal(b, &ctx); err != nil { + b, err := json.Marshal(p.Context) + if err != nil { + logging.Logf("lsp ", "extractTriggerInfo: marshal context: %v", err) + } else if err := json.Unmarshal(b, &ctx); err != nil { logging.Logf("lsp ", "extractTriggerInfo: unmarshal context: %v", err) } } @@ -325,7 +327,10 @@ func (s *Server) executeChatCompletion(ctx context.Context, plan completionPlan, } s.incRecvCounters(len(text)) modelUsed := spec.effectiveModel(client.DefaultModel()) - _ = stats.Update(ctx, client.Name(), modelUsed, sentSize, len(text)) + // Update global stats cache; log but don't fail on stats errors + if err := stats.Update(ctx, client.Name(), modelUsed, sentSize, len(text)); err != nil { + logging.Logf("lsp ", "stats update error: %v", err) + } s.logLLMStats(modelUsed) trimmed := strings.TrimSpace(text) cursorByte := utf16OffsetToByteOffset(plan.current, plan.params.Position.Character) @@ -357,8 +362,10 @@ func parseManualInvoke(ctx any) bool { logging.Logf("lsp ", "parseManualInvoke: unmarshal raw context: %v", err) } } else { - b, _ := json.Marshal(ctx) - if err := json.Unmarshal(b, &c); err != nil { + b, err := json.Marshal(ctx) + if err != nil { + logging.Logf("lsp ", "parseManualInvoke: marshal context: %v", err) + } else if err := json.Unmarshal(b, &c); err != nil { logging.Logf("lsp ", "parseManualInvoke: unmarshal context: %v", err) } } @@ -501,7 +508,10 @@ func (s *Server) tryProviderNativeCompletion(ctx context.Context, plan completio } s.incSentCounters(sentBytes) s.incRecvCounters(len(suggestions[0])) - _ = stats.Update(ctx2, client.Name(), modelUsed, sentBytes, len(suggestions[0])) + // Update global stats cache; log but don't fail on stats errors + if err := stats.Update(ctx2, client.Name(), modelUsed, sentBytes, len(suggestions[0])); err != nil { + logging.Logf("lsp ", "stats update error: %v", err) + } s.logLLMStats(modelUsed) cleaned := s.postProcessNativeCompletion(suggestions[0], current, p.Position.Character) if cleaned == "" { diff --git a/internal/lsp/handlers_document.go b/internal/lsp/handlers_document.go index f69c626..7c59aa8 100644 --- a/internal/lsp/handlers_document.go +++ b/internal/lsp/handlers_document.go @@ -2,6 +2,7 @@ package lsp import ( + "context" "encoding/json" "strings" "time" @@ -400,9 +401,13 @@ func (s *Server) buildChatMessages(uri string, pos Position, prompt string) []ll // clientApplyEdit sends a workspace/applyEdit request to the client. func (s *Server) clientApplyEdit(label string, edit WorkspaceEdit) { params := ApplyWorkspaceEditParams{Label: label, Edit: edit} + b, err := json.Marshal(params) + if err != nil { + logging.Logf("lsp ", "clientApplyEdit: marshal error: %v", err) + return + } id := s.nextReqID() req := Request{JSONRPC: "2.0", ID: id, Method: "workspace/applyEdit"} - b, _ := json.Marshal(params) req.Params = b s.writeMessage(req) } @@ -428,9 +433,13 @@ func (s *Server) clientShowDocument(uri string, sel *Range) { params.URI = uri params.TakeFocus = true params.Selection = sel + b, err := json.Marshal(params) + if err != nil { + logging.Logf("lsp ", "clientShowDocument: marshal error: %v", err) + return + } id := s.nextReqID() req := Request{JSONRPC: "2.0", ID: id, Method: "window/showDocument"} - b, _ := json.Marshal(params) req.Params = b s.writeMessage(req) } @@ -440,14 +449,13 @@ func (s *Server) clientShowDocument(uri string, sel *Range) { // The goroutine respects s.serverCtx so it won't write after shutdown. func (s *Server) deferShowDocument(uri string, sel Range) { ctx := s.serverCtx + if ctx == nil { + // Fallback for tests that don't set a server context. + ctx = context.Background() + } s.inflight.Add(1) go func() { defer s.inflight.Done() - if ctx == nil { - time.Sleep(120 * time.Millisecond) - s.clientShowDocument(uri, &sel) - return - } select { case <-time.After(120 * time.Millisecond): s.clientShowDocument(uri, &sel) diff --git a/internal/lsp/handlers_utils.go b/internal/lsp/handlers_utils.go index e3c65a5..048c2fd 100644 --- a/internal/lsp/handlers_utils.go +++ b/internal/lsp/handlers_utils.go @@ -278,8 +278,10 @@ func (s *Server) chatWithStats(ctx context.Context, surface surfaceKind, spec re return "", err } s.incRecvCounters(len(txt)) - // Update global stats cache - _ = stats.Update(ctx, client.Name(), modelUsed, sent, len(txt)) + // Update global stats cache; log but don't fail on stats errors + if err := stats.Update(ctx, client.Name(), modelUsed, sent, len(txt)); err != nil { + logging.Logf("lsp ", "stats update error: %v", err) + } s.logLLMStats(modelUsed) return txt, nil } diff --git a/internal/lsp/server.go b/internal/lsp/server.go index 25c5e5c..7675d34 100644 --- a/internal/lsp/server.go +++ b/internal/lsp/server.go @@ -344,13 +344,17 @@ func (s *Server) cancelRequests() { func (s *Server) emitLLMStartStatus(provider, model string) { if s.statusSink != nil { - _ = s.statusSink.SetLLMStart(provider, model) + if err := s.statusSink.SetLLMStart(provider, model); err != nil { + logging.Logf("lsp ", "status sink SetLLMStart error: %v", err) + } } } func (s *Server) emitGlobalStatus(gs GlobalStatus) { if s.statusSink != nil { - _ = s.statusSink.SetGlobal(gs) + if err := s.statusSink.SetGlobal(gs); err != nil { + logging.Logf("lsp ", "status sink SetGlobal error: %v", err) + } } } diff --git a/internal/mcp/server.go b/internal/mcp/server.go index 645c0cf..f8042ac 100644 --- a/internal/mcp/server.go +++ b/internal/mcp/server.go @@ -32,6 +32,7 @@ type Server struct { syncer SlashCommandSyncer initialized bool mu sync.RWMutex + inflight sync.WaitGroup // tracks handler goroutines; Run waits before returning // Dispatch table for JSON-RPC methods handlers map[string]func(Request) @@ -66,14 +67,16 @@ func NewServer(r io.Reader, w io.Writer, logger *log.Logger, store promptstore.P } // Run starts the server main loop, reading and dispatching requests. -// Returns on EOF or fatal error. +// Returns on EOF or fatal error, after waiting for all in-flight handlers. func (s *Server) Run() error { for { body, err := s.readMessage() if errors.Is(err, io.EOF) { + s.inflight.Wait() // drain handlers before signalling callers return nil } if err != nil { + s.inflight.Wait() return fmt.Errorf("read message: %w", err) } @@ -89,8 +92,12 @@ func (s *Server) Run() error { continue } - // Dispatch request - go s.handle(req) + // Dispatch request in a goroutine, tracked so Run can wait on completion. + s.inflight.Add(1) + go func(r Request) { + defer s.inflight.Done() + s.handle(r) + }(req) } } diff --git a/internal/mcp/server_test.go b/internal/mcp/server_test.go index 256b324..00a7823 100644 --- a/internal/mcp/server_test.go +++ b/internal/mcp/server_test.go @@ -430,14 +430,17 @@ func TestServer_Run(t *testing.T) { t.Fatalf("sendRequest() error = %v", err) } - // Run in background + // Run in background; bytes.Buffer returns EOF after the single request, + // so Run() will complete naturally once it has written the response. done := make(chan error, 1) go func() { done <- server.Run() }() - // Give time for processing (server will block waiting for more input) - time.Sleep(50 * time.Millisecond) + // Wait for Run() to return (signalled by EOF on the input buffer). + if err := <-done; err != nil { + t.Fatalf("Run() error = %v", err) + } // Read response resp, err := readResponse(outBuf) diff --git a/internal/stats/stats.go b/internal/stats/stats.go index d79025a..bd91e20 100644 --- a/internal/stats/stats.go +++ b/internal/stats/stats.go @@ -29,6 +29,10 @@ var windowSeconds int64 = int64(defaultWindow.Seconds()) var errLockWouldBlock = errors.New("stats: lock would block") +// nowFunc is the clock source for event timestamps and pruning cutoffs. +// Replaced in tests to control time without sleeping. +var nowFunc = time.Now + // SetWindow sets the sliding window used for pruning and aggregation. func SetWindow(d time.Duration) { if d < time.Second { @@ -117,7 +121,7 @@ func Update(ctx context.Context, provider, model string, sentBytes, recvBytes in path := filepath.Join(dir, fileName) sf := readStatsFile(path) - now := time.Now() + now := nowFunc() win := Window() sf.WindowSeconds = int(win.Seconds()) sf.Events = append(sf.Events, Event{ @@ -260,7 +264,7 @@ func TakeSnapshot() (Snapshot, error) { if win <= 0 { win = Window() } - cutoff := time.Now().Add(-win) + cutoff := nowFunc().Add(-win) snap := Snapshot{Providers: make(map[string]ProviderEntry), Window: win} for _, ev := range sf.Events { if ev.TS.Before(cutoff) { diff --git a/internal/stats/stats_test.go b/internal/stats/stats_test.go index a9b3d22..47e3068 100644 --- a/internal/stats/stats_test.go +++ b/internal/stats/stats_test.go @@ -33,10 +33,17 @@ func TestUpdate_PrunesOld_ByWindow(t *testing.T) { t.Setenv("XDG_CACHE_HOME", t.TempDir()) SetWindow(2 * time.Second) ctx := context.Background() + + // Inject a fake clock so we can advance time without sleeping. + fakeNow := time.Now() + nowFunc = func() time.Time { return fakeNow } + defer func() { nowFunc = time.Now }() + if err := Update(ctx, "p", "m", 1, 1); err != nil { t.Fatal(err) } - time.Sleep(2200 * time.Millisecond) + // Advance fake time past the 2-second window so the first event is pruned. + fakeNow = fakeNow.Add(3 * time.Second) if err := Update(ctx, "p", "m", 2, 2); err != nil { t.Fatal(err) } diff --git a/internal/version.go b/internal/version.go index 49e4e02..a9fcd29 100644 --- a/internal/version.go +++ b/internal/version.go @@ -1,4 +1,4 @@ // Package internal provides the Hexai semantic version identifier used by CLI and LSP binaries. package internal -const Version = "0.25.8" +const Version = "0.25.9" |
