summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2025-08-16 16:26:12 +0300
committerPaul Buetow <paul@buetow.org>2025-08-16 16:26:12 +0300
commit833bb66706dd991ecd3973da360c472d818e970a (patch)
tree62d8465b305883af1002063eb54ef38a08de299d
parent148cda5f7ed4513528e3a46164b990708eeb1bc6 (diff)
logging: migrate LSP logs to global singleton (internal/logging); use consistent colors/prefix; refactor LLM provider to use global logger and remove per-client loggers
-rw-r--r--README.md32
-rw-r--r--internal/llm/openai.go27
-rw-r--r--internal/llm/provider.go7
-rw-r--r--internal/logging/logging.go (renamed from internal/llm/logging.go)26
-rw-r--r--internal/lsp/context.go9
-rw-r--r--internal/lsp/handlers.go29
-rw-r--r--internal/lsp/server.go25
-rw-r--r--internal/lsp/transport.go37
8 files changed, 100 insertions, 92 deletions
diff --git a/README.md b/README.md
index 22a7821..c3c77c2 100644
--- a/README.md
+++ b/README.md
@@ -12,7 +12,7 @@ Hexai exposes a simple LLM provider interface and uses OpenAI by default for
code completion when `OPENAI_API_KEY` is present in the environment.
- Required: set `OPENAI_API_KEY` to your OpenAI API key.
-- Optional: set `OPENAI_MODEL` (default: `gpt-4o-mini`).
+- Optional: set `OPENAI_MODEL`.
- Optional: set `OPENAI_BASE_URL` to point at a compatible endpoint.
If no key is configured, Hexai will fall back to a basic, local completion.
@@ -20,17 +20,17 @@ If no key is configured, Hexai will fall back to a basic, local completion.
## CLI usage and configuration
- Run LSP server over stdio:
- - `hexai -stdio`
+ - `hexai`
- Completion settings:
- - `-max-tokens`: maximum tokens for LLM completions (default `500`). If the flag isn’t provided, `HEXAI_MAX_TOKENS` is used when set.
+ - `-max-tokens`: maximum tokens for LLM completions. If the flag isn’t provided, `HEXAI_MAX_TOKENS` is used when set.
- `-context-mode`: how much additional context to include with completion prompts (If the flag isn’t provided, `HEXAI_CONTEXT_MODE` is used when set). One of:
- `minimal`: no extra context
- `window`: include a sliding window around the cursor
- - `file-on-new-func` (default): include the full file only when defining a new function (cursor before the opening `{`)
+ - `file-on-new-func`ude the full file only when defining a new function (cursor before the opening `{`)
- `always-full`: always include the full file (may be slower/costly)
- - `-context-window-lines`: line count for the sliding window when `context-mode=window` (default `120`).
- - `-max-context-tokens`: budget for additional context tokens (default `2000`). If the flag isn’t provided, `HEXAI_MAX_CONTEXT_TOKENS` is used when set.
+ - `-context-window-lines`: line count for the sliding window when `context-mode=window`.
+ - `-max-context-tokens`: budget for additional context tokens. If the flag isn’t provided, `HEXAI_MAX_CONTEXT_TOKENS` is used when set.
Notes:
- Token estimation for truncation uses a simple 4 chars/token heuristic.
@@ -38,13 +38,13 @@ Notes:
### Flags quick reference
-| Flag | Default | Env override | Description |
-|-------------------------|--------------------|----------------------------|----------------------------------------------------|
-| `-stdio` | `true` | — | Run as LSP over stdio (only supported mode). |
-| `-log` | `/tmp/hexai.log` | — | Path to log file (optional). |
-| `-max-tokens` | `500` | `HEXAI_MAX_TOKENS` | Max tokens for LLM completions. |
-| `-context-mode` | `file-on-new-func` | `HEXAI_CONTEXT_MODE` | `minimal` `window` `file-on-new-func` `always-full` |
-| `-context-window-lines` | `120` | `HEXAI_CONTEXT_WINDOW_LINES` | Lines around cursor when using `window` mode. |
-| `-max-context-tokens` | `2000` | `HEXAI_MAX_CONTEXT_TOKENS` | Token budget for additional context. |
-| `-log-preview-limit` | `0` (unlimited) | `HEXAI_LOG_PREVIEW_LIMIT` | Limit characters shown in LLM preview logs. |
-| `-no-disk-io` | `true` | `HEXAI_NO_DISK_IO` | Disallow any disk reads for context. |
+| Flag | Env override | Description |
+|-------------------------|----------------------------|----------------------------------------------------|
+| `-stdio` | — | Run as LSP over stdio (only supported mode). |
+| `-log` | — | Path to log file (optional). |
+| `-max-tokens` | `HEXAI_MAX_TOKENS` | Max tokens for LLM completions. |
+| `-context-mode` | `HEXAI_CONTEXT_MODE` | `minimal` `window` `file-on-new-func` `always-full` |
+| `-context-window-lines` | `HEXAI_CONTEXT_WINDOW_LINES` | Lines around cursor when using `window` mode. |
+| `-max-context-tokens` | `HEXAI_MAX_CONTEXT_TOKENS` | Token budget for additional context. |
+| `-log-preview-limit` | `HEXAI_LOG_PREVIEW_LIMIT` | Limit characters shown in LLM preview logs. |
+| `-no-disk-io` | `HEXAI_NO_DISK_IO` | Disallow any disk reads for context. |
diff --git a/internal/llm/openai.go b/internal/llm/openai.go
index 9b48782..279eca4 100644
--- a/internal/llm/openai.go
+++ b/internal/llm/openai.go
@@ -6,10 +6,11 @@ import (
"encoding/json"
"errors"
"fmt"
- "log"
"net/http"
"os"
"time"
+
+ "hexai/internal/logging"
)
// openAIClient implements Client against OpenAI's Chat Completions API.
@@ -18,12 +19,11 @@ type openAIClient struct {
apiKey string
baseURL string
defaultModel string
- logger *log.Logger
}
// Colors and base styling are provided by logging.go
-func newOpenAIFromEnv(apiKey string, logger *log.Logger) Client {
+func newOpenAIFromEnv(apiKey string) Client {
base := os.Getenv("OPENAI_BASE_URL")
if base == "" {
base = "https://api.openai.com/v1"
@@ -37,7 +37,6 @@ func newOpenAIFromEnv(apiKey string, logger *log.Logger) Client {
apiKey: apiKey,
baseURL: base,
defaultModel: model,
- logger: logger,
}
}
@@ -83,10 +82,10 @@ func (c *openAIClient) Chat(ctx context.Context, messages []Message, opts ...Req
o.Model = c.defaultModel
}
start := time.Now()
- LogPrintf(c.logger, "llm/openai ", "chat start model=%s temp=%.2f max_tokens=%d stop=%d messages=%d", o.Model, o.Temperature, o.MaxTokens, len(o.Stop), len(messages))
+ logging.Logf("llm/openai ", "chat start model=%s temp=%.2f max_tokens=%d stop=%d messages=%d", o.Model, o.Temperature, o.MaxTokens, len(o.Stop), len(messages))
for i, m := range messages {
// Sending context (cyan)
- LogPrintf(c.logger, "llm/openai ", "msg[%d] role=%s size=%d preview=%s%s%s", i, m.Role, len(m.Content), AnsiCyan, previewForLog(m.Content), AnsiBase)
+ logging.Logf("llm/openai ", "msg[%d] role=%s size=%d preview=%s%s%s", i, m.Role, len(m.Content), logging.AnsiCyan, logging.PreviewForLog(m.Content), logging.AnsiBase)
}
req := oaChatRequest{Model: o.Model}
req.Messages = make([]oaMessage, len(messages))
@@ -109,7 +108,7 @@ func (c *openAIClient) Chat(ctx context.Context, messages []Message, opts ...Req
return "", err
}
endpoint := c.baseURL + "/chat/completions"
- LogPrintf(c.logger, "llm/openai ", "POST %s", endpoint)
+ logging.Logf("llm/openai ", "POST %s", endpoint)
httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
if err != nil {
c.logf("new request error: %v", err)
@@ -120,7 +119,7 @@ func (c *openAIClient) Chat(ctx context.Context, messages []Message, opts ...Req
resp, err := c.httpClient.Do(httpReq)
if err != nil {
- LogPrintf(c.logger, "llm/openai ", "%shttp error after %s: %v%s", AnsiRed, time.Since(start), err, AnsiBase)
+ logging.Logf("llm/openai ", "%shttp error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
return "", err
}
defer resp.Body.Close()
@@ -128,31 +127,31 @@ func (c *openAIClient) Chat(ctx context.Context, messages []Message, opts ...Req
var apiErr oaChatResponse
_ = json.NewDecoder(resp.Body).Decode(&apiErr)
if apiErr.Error != nil && apiErr.Error.Message != "" {
- LogPrintf(c.logger, "llm/openai ", "%sapi error status=%d type=%s msg=%s duration=%s%s", AnsiRed, resp.StatusCode, apiErr.Error.Type, apiErr.Error.Message, time.Since(start), AnsiBase)
+ logging.Logf("llm/openai ", "%sapi error status=%d type=%s msg=%s duration=%s%s", logging.AnsiRed, resp.StatusCode, apiErr.Error.Type, apiErr.Error.Message, time.Since(start), logging.AnsiBase)
return "", fmt.Errorf("openai error: %s (status %d)", apiErr.Error.Message, resp.StatusCode)
}
- LogPrintf(c.logger, "llm/openai ", "%shttp non-2xx status=%d duration=%s%s", AnsiRed, resp.StatusCode, time.Since(start), AnsiBase)
+ logging.Logf("llm/openai ", "%shttp non-2xx status=%d duration=%s%s", logging.AnsiRed, resp.StatusCode, time.Since(start), logging.AnsiBase)
return "", fmt.Errorf("openai http error: status %d", resp.StatusCode)
}
var out oaChatResponse
if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
- LogPrintf(c.logger, "llm/openai ", "%sdecode error after %s: %v%s", AnsiRed, time.Since(start), err, AnsiBase)
+ logging.Logf("llm/openai ", "%sdecode error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
return "", err
}
if len(out.Choices) == 0 {
- LogPrintf(c.logger, "llm/openai ", "%sno choices returned duration=%s%s", AnsiRed, time.Since(start), AnsiBase)
+ logging.Logf("llm/openai ", "%sno choices returned duration=%s%s", logging.AnsiRed, time.Since(start), logging.AnsiBase)
return "", errors.New("openai: no choices returned")
}
content := out.Choices[0].Message.Content
// Received context (green)
- LogPrintf(c.logger, "llm/openai ", "success choice=0 finish=%s size=%d preview=%s%s%s duration=%s", out.Choices[0].FinishReason, len(content), AnsiGreen, previewForLog(content), AnsiBase, time.Since(start))
+ logging.Logf("llm/openai ", "success choice=0 finish=%s size=%d preview=%s%s%s duration=%s", out.Choices[0].FinishReason, len(content), logging.AnsiGreen, logging.PreviewForLog(content), logging.AnsiBase, time.Since(start))
return content, nil
}
// small helper to keep return type consistent
func nilStringErr(msg string) (string, error) { return "", errors.New(msg) }
-func (c *openAIClient) logf(format string, args ...any) { LogPrintf(c.logger, "llm/openai ", format, args...) }
+func (c *openAIClient) logf(format string, args ...any) { logging.Logf("llm/openai ", format, args...) }
func trimPreview(s string, n int) string {
if n <= 0 || len(s) <= n {
diff --git a/internal/llm/provider.go b/internal/llm/provider.go
index e83d1e2..a87d815 100644
--- a/internal/llm/provider.go
+++ b/internal/llm/provider.go
@@ -3,8 +3,7 @@ package llm
import (
"context"
"errors"
- "log"
- "os"
+ "os"
)
// Message represents a chat-style prompt message.
@@ -40,10 +39,10 @@ func WithStop(stop ...string) RequestOption {
// NewDefault returns the default provider using environment configuration.
// Currently this is the OpenAI provider using OPENAI_API_KEY.
-func NewDefault(logger *log.Logger) (Client, error) {
+func NewDefault() (Client, error) {
apiKey := os.Getenv("OPENAI_API_KEY")
if apiKey == "" {
return nil, errors.New("OPENAI_API_KEY is not set")
}
- return newOpenAIFromEnv(apiKey, logger), nil
+ return newOpenAIFromEnv(apiKey), nil
}
diff --git a/internal/llm/logging.go b/internal/logging/logging.go
index a6a6e51..2e4bbc8 100644
--- a/internal/llm/logging.go
+++ b/internal/logging/logging.go
@@ -1,11 +1,11 @@
-package llm
+package logging
import (
"fmt"
"log"
)
-// ANSI color utilities shared across LLM providers.
+// ANSI color utilities shared across Hexai.
const (
AnsiBgBlack = "\x1b[40m"
AnsiGrey = "\x1b[90m"
@@ -18,13 +18,19 @@ const (
// AnsiBase is the default style: black background + grey foreground.
const AnsiBase = AnsiBgBlack + AnsiGrey
-// LogPrintf wraps a formatted message with a base style and prints with a prefix.
-func LogPrintf(logger *log.Logger, prefix, format string, args ...any) {
- if logger == nil {
+// singleton logger used across the codebase
+var std *log.Logger
+
+// Bind sets the underlying standard logger to use for Logf.
+func Bind(l *log.Logger) { std = l }
+
+// Logf prints a formatted message with a module prefix and base ANSI style.
+func Logf(prefix, format string, args ...any) {
+ if std == nil {
return
}
msg := fmt.Sprintf(format, args...)
- logger.Print(AnsiBase + prefix + msg + AnsiReset)
+ std.Print(AnsiBase + prefix + msg + AnsiReset)
}
// Logging configuration for previews (shared)
@@ -34,9 +40,13 @@ var logPreviewLimit int // 0 means unlimited
// request/response previews. Set to 0 for unlimited.
func SetLogPreviewLimit(n int) { logPreviewLimit = n }
-func previewForLog(s string) string {
+// PreviewForLog returns the string truncated to the configured preview limit.
+func PreviewForLog(s string) string {
if logPreviewLimit > 0 {
- return trimPreview(s, logPreviewLimit)
+ if len(s) <= logPreviewLimit {
+ return s
+ }
+ return s[:logPreviewLimit] + "…"
}
return s
}
diff --git a/internal/lsp/context.go b/internal/lsp/context.go
index 8b7ed67..8f345df 100644
--- a/internal/lsp/context.go
+++ b/internal/lsp/context.go
@@ -2,6 +2,7 @@ package lsp
import (
"strings"
+ "hexai/internal/logging"
)
// buildAdditionalContext builds extra context messages based on the configured mode.
@@ -33,9 +34,7 @@ func (s *Server) buildAdditionalContext(newFunc bool, uri string, pos Position)
func (s *Server) windowContext(uri string, pos Position) string {
d := s.getDocument(uri)
if d == nil || len(d.lines) == 0 {
- if s.logger != nil {
- s.logger.Printf("context: window requested but document not open; skipping uri=%s", uri)
- }
+ logging.Logf("lsp ", "context: window requested but document not open; skipping uri=%s", uri)
return ""
}
n := len(d.lines)
@@ -55,9 +54,7 @@ func (s *Server) windowContext(uri string, pos Position) string {
func (s *Server) fullFileContext(uri string) string {
d := s.getDocument(uri)
if d == nil {
- if s.logger != nil {
- s.logger.Printf("context: full-file requested but document not open; skipping uri=%s", uri)
- }
+ logging.Logf("lsp ", "context: full-file requested but document not open; skipping uri=%s", uri)
return ""
}
return truncateToApproxTokens(d.text, s.maxContextTokens)
diff --git a/internal/lsp/handlers.go b/internal/lsp/handlers.go
index 8a782c4..1e77141 100644
--- a/internal/lsp/handlers.go
+++ b/internal/lsp/handlers.go
@@ -1,14 +1,15 @@
package lsp
import (
- "context"
- "encoding/json"
- "fmt"
- "hexai/internal"
- "hexai/internal/llm"
- "os"
- "strings"
- "time"
+ "context"
+ "encoding/json"
+ "fmt"
+ "hexai/internal"
+ "hexai/internal/llm"
+ "hexai/internal/logging"
+ "os"
+ "strings"
+ "time"
)
func (s *Server) handle(req Request) {
@@ -52,7 +53,7 @@ func (s *Server) handleInitialize(req Request) {
}
func (s *Server) handleInitialized() {
- s.logger.Println("client initialized")
+ logging.Logf("lsp ", "client initialized")
}
func (s *Server) handleShutdown(req Request) {
@@ -126,7 +127,7 @@ func (s *Server) buildDocString(p CompletionParams, above, current, below, funcC
}
func (s *Server) logCompletionContext(p CompletionParams, above, current, below, funcCtx string) {
- s.logger.Printf("completion ctx uri=%s line=%d char=%d above=%q current=%q below=%q function=%q",
+ logging.Logf("lsp ", "completion ctx uri=%s line=%d char=%d above=%q current=%q below=%q function=%q",
p.TextDocument.URI, p.Position.Line, p.Position.Character, trimLen(above), trimLen(current), trimLen(below), trimLen(funcCtx))
}
@@ -145,10 +146,10 @@ func (s *Server) tryLLMCompletion(p CompletionParams, above, current, below, fun
}
text, err := s.llmClient.Chat(ctx, messages, llm.WithMaxTokens(s.maxTokens), llm.WithTemperature(0.2))
- if err != nil {
- s.logger.Printf("llm completion error: %v", err)
- return nil, false
- }
+ if err != nil {
+ logging.Logf("lsp ", "llm completion error: %v", err)
+ return nil, false
+ }
cleaned := strings.TrimSpace(text)
if cleaned == "" {
return nil, false
diff --git a/internal/lsp/server.go b/internal/lsp/server.go
index 865d033..65d0b95 100644
--- a/internal/lsp/server.go
+++ b/internal/lsp/server.go
@@ -1,13 +1,14 @@
package lsp
import (
- "bufio"
- "encoding/json"
- "hexai/internal/llm"
- "io"
- "log"
- "sync"
- "time"
+ "bufio"
+ "encoding/json"
+ "hexai/internal/llm"
+ "hexai/internal/logging"
+ "io"
+ "log"
+ "sync"
+ "time"
)
// Server implements a minimal LSP over stdio.
@@ -47,8 +48,8 @@ func NewServer(r io.Reader, w io.Writer, logger *log.Logger, logContext bool, ma
s.windowLines = windowLines
s.maxContextTokens = maxContextTokens
s.noDiskIO = noDiskIO
- if c, err := llm.NewDefault(logger); err != nil {
- s.logger.Printf("llm disabled: %v", err)
+ if c, err := llm.NewDefault(); err != nil {
+ logging.Logf("lsp ", "llm disabled: %v", err)
} else {
s.llmClient = c
}
@@ -66,9 +67,9 @@ func (s *Server) Run() error {
}
var req Request
if err := json.Unmarshal(body, &req); err != nil {
- s.logger.Printf("invalid JSON: %v", err)
- continue
- }
+ logging.Logf("lsp ", "invalid JSON: %v", err)
+ continue
+ }
if req.Method == "" {
// A response from client; ignore
continue
diff --git a/internal/lsp/transport.go b/internal/lsp/transport.go
index 671d69b..dfdb5fc 100644
--- a/internal/lsp/transport.go
+++ b/internal/lsp/transport.go
@@ -1,12 +1,13 @@
package lsp
import (
- "encoding/json"
- "fmt"
- "io"
- "net/textproto"
- "strconv"
- "strings"
+ "encoding/json"
+ "fmt"
+ "hexai/internal/logging"
+ "io"
+ "net/textproto"
+ "strconv"
+ "strings"
)
func (s *Server) readMessage() ([]byte, error) {
@@ -47,17 +48,17 @@ func (s *Server) readMessage() ([]byte, error) {
func (s *Server) writeMessage(v any) {
data, err := json.Marshal(v)
- if err != nil {
- s.logger.Printf("marshal error: %v", err)
- return
- }
+ if err != nil {
+ logging.Logf("lsp ", "marshal error: %v", err)
+ return
+ }
header := fmt.Sprintf("Content-Length: %d\r\n\r\n", len(data))
- if _, err := io.WriteString(s.out, header); err != nil {
- s.logger.Printf("write header error: %v", err)
- return
- }
- if _, err := s.out.Write(data); err != nil {
- s.logger.Printf("write body error: %v", err)
- return
- }
+ if _, err := io.WriteString(s.out, header); err != nil {
+ logging.Logf("lsp ", "write header error: %v", err)
+ return
+ }
+ if _, err := s.out.Write(data); err != nil {
+ logging.Logf("lsp ", "write body error: %v", err)
+ return
+ }
}