From 35e1de6f975088ade5dbf0af533fe6fdac8fcc94 Mon Sep 17 00:00:00 2001 From: Paul Buetow Date: Sun, 2 Nov 2025 23:42:15 +0200 Subject: some linter fixes --- SCRATCHPAD.md | 2 - cmd/hexai/main.go | 2 +- cmd/hexai/main_test.go | 4 +- config.toml.example | 7 +- docs/coverage.html | 3593 +- docs/coverage.out | 52027 +++++++++++++++++------------ internal/hexaiaction/cmdentry.go | 4 +- internal/hexaiaction/cmdentry_test.go | 6 +- internal/hexaiaction/run.go | 12 +- internal/hexaiaction/tui_delegate.go | 2 +- internal/hexaicli/run.go | 35 +- internal/hexaicli/run_test.go | 2 +- internal/hexaicli/testhelpers_test.go | 10 +- internal/hexailsp/run.go | 6 +- internal/llm/copilot.go | 20 +- internal/llm/copilot_http_test.go | 16 +- internal/llm/ollama.go | 14 +- internal/llm/openai.go | 14 +- internal/llm/openai_http_test.go | 14 +- internal/llm/openai_sse_negative_test.go | 4 +- internal/llm/openrouter.go | 12 +- internal/llm/openrouter_test.go | 4 +- internal/lsp/codeaction_prompts_test.go | 2 +- internal/lsp/completion_messages_test.go | 2 +- internal/stats/stats.go | 14 +- internal/tmux/status_more_test.go | 10 +- internal/version.go | 2 +- 27 files changed, 34119 insertions(+), 21721 deletions(-) diff --git a/SCRATCHPAD.md b/SCRATCHPAD.md index 803b1a3..c046e79 100644 --- a/SCRATCHPAD.md +++ b/SCRATCHPAD.md @@ -2,8 +2,6 @@ This document shows future items and items in progress. Already completed ones are deleted from this document as updates occur. * [ ] hexai cli to keep context for the follow-up question/prompt? -* [/] configure multiple models for cli and code completion * [ ] Exclude the test coverage files from git and wipe them from the history * [/] Review documentation -* [/] Manual review the code * [ ] ASCIInema: Record and share terminal sessions for demos and bug reports diff --git a/cmd/hexai/main.go b/cmd/hexai/main.go index 7caedc6..d0508e7 100644 --- a/cmd/hexai/main.go +++ b/cmd/hexai/main.go @@ -44,7 +44,7 @@ func main() { } _ = fs.Parse(remaining) if *showVersion { - fmt.Fprintln(os.Stdout, internal.Version) + _, _ = fmt.Fprintln(os.Stdout, internal.Version) return } var selection []int diff --git a/cmd/hexai/main_test.go b/cmd/hexai/main_test.go index 70c844f..797584f 100644 --- a/cmd/hexai/main_test.go +++ b/cmd/hexai/main_test.go @@ -15,7 +15,9 @@ func TestMain_Version(t *testing.T) { os.Stdout = w defer func() { os.Stdout = old }() main() - w.Close() + if err := w.Close(); err != nil { + t.Fatalf("failed to close pipe: %v", err) + } b, _ := io.ReadAll(r) if len(b) == 0 { t.Fatalf("expected version output") diff --git a/config.toml.example b/config.toml.example index cd10e73..84f716e 100644 --- a/config.toml.example +++ b/config.toml.example @@ -3,7 +3,12 @@ [general] max_tokens = 4000 max_context_tokens = 4000 -context_mode = "always-full" # minimal | window | file-on-new-func | always-full +# context_mode controls how much of the current document is sent as extra context: +# - minimal: no additional context beyond the request payload. +# - window: include a sliding window of ~context_window_lines around the cursor. +# - file-on-new-func: include the full file only when starting a new function. +# - always-full: always include the entire open file. +context_mode = "always-full" context_window_lines = 120 coding_temperature = 0.2 # single knob for LSP calls (optional) diff --git a/docs/coverage.html b/docs/coverage.html index 36775ce..4526ad1 100644 --- a/docs/coverage.html +++ b/docs/coverage.html @@ -55,13 +55,13 @@ @@ -165,15 +167,20 @@ package main import ( "flag" + "fmt" "log" "os" + "strings" "codeberg.org/snonux/hexai/internal" + "codeberg.org/snonux/hexai/internal/appconfig" "codeberg.org/snonux/hexai/internal/hexailsp" ) func main() { logPath := flag.String("log", "/tmp/hexai-lsp.log", "path to log file (optional)") + defaultCfg := defaultConfigPath() + configPath := flag.String("config", "", fmt.Sprintf("path to config file (default: %s)", defaultCfg)) showVersion := flag.Bool("version", false, "print version and exit") flag.Parse() if *showVersion { @@ -181,10 +188,19 @@ func main() { return } - if err := hexailsp.Run(*logPath, os.Stdin, os.Stdout, os.Stderr); err != nil { + path := strings.TrimSpace(*configPath) + if err := hexailsp.RunWithConfig(*logPath, path, os.Stdin, os.Stdout, os.Stderr); err != nil { log.Fatalf("server error: %v", err) } } + +func defaultConfigPath() string { + path, err := appconfig.ConfigPath() + if err != nil { + return "$XDG_CONFIG_HOME/hexai/config.toml" + } + return path +} @@ -1659,6 +2055,11 @@ type chatDoer interface { type providerNamer interface{ Name() string } +type requestArgs struct { + model string + options []llm.RequestOption +} + func providerOf(c any) string { if n, ok := c.(providerNamer); ok { return n.Name() @@ -1666,6 +2067,42 @@ func providerOf(c any) string { return "llm" } +func canonicalProvider(name string) string { + p := strings.ToLower(strings.TrimSpace(name)) + if p == "" { + return "openai" + } + return p +} + +func defaultModelForProvider(cfg appconfig.App, provider string) string { + switch provider { + case "ollama": + return cfg.OllamaModel + case "copilot": + return cfg.CopilotModel + default: + return cfg.OpenAIModel + } +} + +func selectActionTemperature(cfg appconfig.App, provider string, entry appconfig.SurfaceConfig, model string) (float64, bool) { + if entry.Temperature != nil { + return *entry.Temperature, true + } + if cfg.CodingTemperature != nil { + temp := *cfg.CodingTemperature + if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") && temp == 0.2 { + temp = 1.0 + } + return temp, true + } + if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") { + return 1.0, true + } + return 0, false +} + func runRewrite(ctx context.Context, cfg appconfig.App, client chatDoer, instruction, selection string) (string, error) { sys := cfg.PromptCodeActionRewriteSystem user := Render(cfg.PromptCodeActionRewriteUser, map[string]string{"instruction": instruction, "selection": selection}) @@ -1752,9 +2189,9 @@ func runOnce(ctx context.Context, client chatDoer, sys, user string) (string, er return out, nil } -func runOnceWithOpts(ctx context.Context, client chatDoer, sys, user string, opts []llm.RequestOption) (string, error) { +func runOnceWithOpts(ctx context.Context, client chatDoer, sys, user string, req requestArgs) (string, error) { msgs := []llm.Message{{Role: "system", Content: sys}, {Role: "user", Content: user}} - txt, err := client.Chat(ctx, msgs, opts...) + txt, err := client.Chat(ctx, msgs, req.options...) if err != nil { return "", err } @@ -1765,7 +2202,11 @@ func runOnceWithOpts(ctx context.Context, client chatDoer, sys, user string, opt sent += len(m.Content) } recv := len(out) - _ = stats.Update(ctx, providerOf(client), client.DefaultModel(), sent, recv) + model := strings.TrimSpace(req.model) + if model == "" { + model = client.DefaultModel() + } + _ = stats.Update(ctx, providerOf(client), model, sent, recv) if snap, err := stats.TakeSnapshot(); err == nil { minsWin := snap.Window.Minutes() if minsWin <= 0 { @@ -1773,30 +2214,42 @@ func runOnceWithOpts(ctx context.Context, client chatDoer, sys, user string, opt } scopeReqs := int64(0) if pe, ok := snap.Providers[providerOf(client)]; ok { - if mc, ok2 := pe.Models[client.DefaultModel()]; ok2 { + if mc, ok2 := pe.Models[model]; ok2 { scopeReqs = mc.Reqs } } scopeRPM := float64(scopeReqs) / minsWin - _ = tmux.SetStatus(tmux.FormatGlobalStatusColored(snap.Global.Reqs, snap.RPM, snap.Global.Sent, snap.Global.Recv, providerOf(client), client.DefaultModel(), scopeRPM, scopeReqs, snap.Window)) + _ = tmux.SetStatus(tmux.FormatGlobalStatusColored(snap.Global.Reqs, snap.RPM, snap.Global.Sent, snap.Global.Recv, providerOf(client), model, scopeRPM, scopeReqs, snap.Window)) } return out, nil } // reqOptsFrom builds LLM request options similar to LSP behavior. -func reqOptsFrom(cfg appconfig.App) []llm.RequestOption { - opts := []llm.RequestOption{llm.WithMaxTokens(cfg.MaxTokens)} - // Apply temperature, with special-case for gpt-5 (default temp must be 1.0) - if cfg.CodingTemperature != nil { - temp := *cfg.CodingTemperature - prov := strings.ToLower(strings.TrimSpace(cfg.Provider)) - model := strings.ToLower(strings.TrimSpace(cfg.OpenAIModel)) - if prov == "openai" && strings.HasPrefix(model, "gpt-5") { - temp = 1.0 - } - opts = append(opts, llm.WithTemperature(temp)) - } - return opts +func reqOptsFrom(cfg appconfig.App) requestArgs { + opts := make([]llm.RequestOption, 0, 3) + if cfg.MaxTokens > 0 { + opts = append(opts, llm.WithMaxTokens(cfg.MaxTokens)) + } + provider := canonicalProvider(cfg.Provider) + entries := cfg.CodeActionConfigs + if len(entries) == 0 { + entries = []appconfig.SurfaceConfig{{Provider: cfg.Provider, Model: strings.TrimSpace(defaultModelForProvider(cfg, provider))}} + } + primary := entries[0] + if strings.TrimSpace(primary.Provider) != "" { + provider = canonicalProvider(primary.Provider) + } + model := strings.TrimSpace(primary.Model) + if model == "" { + model = strings.TrimSpace(defaultModelForProvider(cfg, provider)) + } + if strings.TrimSpace(primary.Model) != "" { + opts = append(opts, llm.WithModel(strings.TrimSpace(primary.Model))) + } + if temp, ok := selectActionTemperature(cfg, provider, primary, model); ok { + opts = append(opts, llm.WithTemperature(temp)) + } + return requestArgs{model: model, options: opts} } // Timeout helpers to mirror LSP behavior. @@ -1834,13 +2287,15 @@ var ( newClientFromApp = llmutils.NewClientFromApp ) +type configPathKey struct{} + // selectedCustom carries the chosen custom action (if any) from the TUI submenu // to the executor. Cleared after use. var selectedCustom *appconfig.CustomAction func Run(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer) error { logger := log.New(stderr, "hexai-tmux-action ", log.LstdFlags|log.Lmsgprefix) - cfg := appconfig.Load(logger) + cfg := appconfig.LoadWithOptions(logger, appconfig.LoadOptions{ConfigPath: configPathFromContext(ctx)}) if cfg.StatsWindowMinutes > 0 { stats.SetWindow(time.Duration(cfg.StatsWindowMinutes) * time.Minute) } @@ -1849,15 +2304,24 @@ func Run(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer) error < return err } // Enable custom action submenu with configurable hotkey - if len(cfg.CustomActions) > 0 { + if len(cfg.CustomActions) > 0 { chooseActionFn = func() (ActionKind, error) { return RunTUIWithCustom(cfg.CustomActions, cfg.TmuxCustomMenuHotkey) } } + if len(cfg.CodeActionConfigs) > 0 { + if provider := strings.TrimSpace(cfg.CodeActionConfigs[0].Provider); provider != "" { + cfg.Provider = provider + } + } cli, err := newClientFromApp(cfg) if err != nil { fmt.Fprintf(stderr, logging.AnsiBase+"hexai-tmux-action: LLM disabled: %v"+logging.AnsiReset+"\n", err) return err } - _ = tmux.SetStatus(tmux.FormatLLMStartStatus(cli.Name(), cli.DefaultModel())) + primaryModel := strings.TrimSpace(reqOptsFrom(cfg).model) + if primaryModel == "" { + primaryModel = cli.DefaultModel() + } + _ = tmux.SetStatus(tmux.FormatLLMStartStatus(cli.Name(), primaryModel)) var client chatDoer = cli parts, err := ParseInput(stdin) if err != nil { @@ -1879,6 +2343,24 @@ func Run(ctx context.Context, stdin io.Reader, stdout, stderr io.Writer) error < return nil } +// WithConfigPath attaches a config path override to the context for Run/RunCommand. +func WithConfigPath(ctx context.Context, path string) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, configPathKey{}, strings.TrimSpace(path)) +} + +func configPathFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v, ok := ctx.Value(configPathKey{}).(string); ok { + return strings.TrimSpace(v) + } + return "" +} + func executeAction(ctx context.Context, kind ActionKind, parts InputParts, cfg appconfig.App, client chatDoer, stderr io.Writer) (string, error) { switch kind { case ActionSkip: @@ -2215,12 +2697,14 @@ func (oneLineDelegate) Render(w io.Writer, m list.Model, index int, listItem lis package hexaicli import ( + "bytes" "context" "fmt" "io" "log" "os" "strings" + "sync" "time" "codeberg.org/snonux/hexai/internal/appconfig" @@ -2230,22 +2714,169 @@ import ( "codeberg.org/snonux/hexai/internal/logging" "codeberg.org/snonux/hexai/internal/stats" "codeberg.org/snonux/hexai/internal/tmux" + "github.com/mattn/go-runewidth" + "golang.org/x/term" ) +type requestArgs struct { + model string + options []llm.RequestOption +} + +type cliJob struct { + index int + provider string + entry appconfig.SurfaceConfig + client llm.Client + req requestArgs +} + +type columnPrinter struct { + mu sync.Mutex + stdout io.Writer + columns int + colWidth int + partial []string + providers []string + models []string +} + +type columnWriter struct { + printer *columnPrinter + index int +} + +type ( + selectionContextKey struct{} + configPathContextKey struct{} +) + +func buildCLIJobs(cfg appconfig.App) ([]cliJob, error) { + entries := cfg.CLIConfigs + if len(entries) == 0 { + entries = []appconfig.SurfaceConfig{{}} + } + jobs := make([]cliJob, 0, len(entries)) + for i, raw := range entries { + entry := appconfig.SurfaceConfig{Provider: strings.TrimSpace(raw.Provider), Model: strings.TrimSpace(raw.Model), Temperature: raw.Temperature} + provider := entry.Provider + if provider == "" { + provider = cfg.Provider + } + provider = canonicalProvider(provider) + derived := cfg + derived.Provider = provider + switch provider { + case "openai": + if entry.Model != "" { + derived.OpenAIModel = entry.Model + } + case "copilot": + if entry.Model != "" { + derived.CopilotModel = entry.Model + } + case "ollama": + if entry.Model != "" { + derived.OllamaModel = entry.Model + } + } + client, err := newClientFromApp(derived) + if err != nil { + return nil, err + } + req := buildCLIRequest(entry, provider, cfg, client) + if strings.TrimSpace(req.model) == "" { + req.model = strings.TrimSpace(client.DefaultModel()) + } + jobs = append(jobs, cliJob{index: i, provider: provider, entry: entry, client: client, req: req}) + } + return jobs, nil +} + +func buildCLIRequest(entry appconfig.SurfaceConfig, provider string, cfg appconfig.App, client llm.Client) requestArgs { + opts := make([]llm.RequestOption, 0, 2) + if cfg.MaxTokens > 0 { + opts = append(opts, llm.WithMaxTokens(cfg.MaxTokens)) + } + model := strings.TrimSpace(entry.Model) + if model == "" { + if client != nil { + model = strings.TrimSpace(client.DefaultModel()) + } + if model == "" { + model = strings.TrimSpace(defaultModelForProvider(cfg, provider)) + } + } + if entry.Model != "" { + opts = append(opts, llm.WithModel(entry.Model)) + } + if temp, ok := cliTemperatureFromEntry(cfg, provider, entry, model); ok { + opts = append(opts, llm.WithTemperature(temp)) + } + return requestArgs{model: model, options: opts} +} + +func cliTemperatureFromEntry(cfg appconfig.App, provider string, entry appconfig.SurfaceConfig, model string) (float64, bool) { + if entry.Temperature != nil { + return *entry.Temperature, true + } + if cfg.CodingTemperature != nil { + temp := *cfg.CodingTemperature + if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") && temp == 0.2 { + temp = 1.0 + } + return temp, true + } + if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") { + return 1.0, true + } + return 0, false +} + +func canonicalProvider(name string) string { + p := strings.ToLower(strings.TrimSpace(name)) + if p == "" { + return "openai" + } + return p +} + +func defaultModelForProvider(cfg appconfig.App, provider string) string { + switch provider { + case "ollama": + return cfg.OllamaModel + case "copilot": + return cfg.CopilotModel + default: + return cfg.OpenAIModel + } +} + // Run executes the Hexai CLI behavior given arguments and I/O streams. // It assumes flags have already been parsed by the caller. func Run(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer) error { // Load configuration with a logger so file-based config is respected. logger := log.New(stderr, "hexai ", log.LstdFlags|log.Lmsgprefix) - cfg := appconfig.Load(logger) + configPath := configPathFromContext(ctx) + cfg := appconfig.LoadWithOptions(logger, appconfig.LoadOptions{ConfigPath: configPath}) if cfg.StatsWindowMinutes > 0 { stats.SetWindow(time.Duration(cfg.StatsWindowMinutes) * time.Minute) } - client, err := newClientFromApp(cfg) + jobs, err := buildCLIJobs(cfg) if err != nil { fmt.Fprintf(stderr, logging.AnsiBase+"hexai: LLM disabled: %v"+logging.AnsiReset+"\n", err) return err } + if selected := selectionFromContext(ctx); len(selected) > 0 { + jobs, err = filterJobsBySelection(jobs, selected) + if err != nil { + fmt.Fprintf(stderr, logging.AnsiBase+"hexai: %v"+logging.AnsiReset+"\n", err) + return err + } + } + if len(jobs) == 0 { + return fmt.Errorf("hexai: no CLI providers configured") + } // Prefer piped stdin when present; only open the editor when there are no args // and no stdin content available. input, rerr := readInput(stdin, args) @@ -2259,9 +2890,8 @@ func Run(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io. fmt.Fprintln(stderr, logging.AnsiBase+rerr.Error()+logging.AnsiReset) return rerr } - printProviderInfo(stderr, client) - msgs := buildMessagesFromConfig(cfg, input) - if err := runChat(ctx, client, msgs, input, stdout, stderr); err != nil { + msgs := buildMessagesFromConfig(cfg, input) + if err := runCLIJobs(ctx, jobs, msgs, input, stdout, stderr); err != nil { fmt.Fprintf(stderr, logging.AnsiBase+"hexai: error: %v"+logging.AnsiReset+"\n", err) return err } @@ -2276,15 +2906,348 @@ func RunWithClient(ctx context.Context, args []string, stdin io.Reader, stdout, fmt.Fprintln(stderr, logging.AnsiBase+err.Error()+logging.AnsiReset) return err } - printProviderInfo(stderr, client) + req := requestArgs{model: strings.TrimSpace(client.DefaultModel())} + printProviderInfo(stderr, client, req.model) msgs := buildMessages(input) - if err := runChat(ctx, client, msgs, input, stdout, stderr); err != nil { + if err := runChat(ctx, client, req, msgs, input, stdout, stderr); err != nil { fmt.Fprintf(stderr, logging.AnsiBase+"hexai: error: %v"+logging.AnsiReset+"\n", err) return err } return nil } +type cliJobResult struct { + provider string + model string + output string + summary string + err error +} + +func runCLIJobs(ctx context.Context, jobs []cliJob, msgs []llm.Message, input string, stdout, stderr io.Writer) error { + results := make([]*cliJobResult, len(jobs)) + var wg sync.WaitGroup + var printer *columnPrinter + if len(jobs) > 0 { + printer = newColumnPrinter(stdout, jobs) + printer.PrintHeader() + } + for _, job := range jobs { + job := job + wg.Add(1) + printProviderInfo(stderr, job.client, job.req.model) + go func() { + defer wg.Done() + var errBuf bytes.Buffer + var outBuf bytes.Buffer + jobMsgs := make([]llm.Message, len(msgs)) + copy(jobMsgs, msgs) + writer := io.Writer(&outBuf) + if printer != nil { + writer = printer.Writer(job.index) + } + err := runChat(ctx, job.client, job.req, jobMsgs, input, writer, &errBuf) + if printer != nil { + printer.Flush(job.index) + } + results[job.index] = &cliJobResult{ + provider: job.client.Name(), + model: job.req.model, + output: outBuf.String(), + summary: errBuf.String(), + err: err, + } + }() + } + wg.Wait() + var firstErr error + if printer == nil { + printed := false + for _, res := range results { + if res == nil { + continue + } + if printed { + if _, err := io.WriteString(stdout, "\n"); err != nil { + return err + } + } + heading := fmt.Sprintf("=== %s:%s ===\n", res.provider, res.model) + if _, err := io.WriteString(stdout, heading); err != nil { + return err + } + if res.output != "" { + if _, err := io.WriteString(stdout, res.output); err != nil { + return err + } + if !strings.HasSuffix(res.output, "\n") { + if _, err := io.WriteString(stdout, "\n"); err != nil { + return err + } + } + } + printed = true + } + } + for _, res := range results { + if res == nil { + continue + } + if res.summary != "" { + summary := strings.TrimLeft(res.summary, "\n") + if summary != "" { + if _, err := io.WriteString(stderr, summary); err != nil { + return err + } + } + } + if res.err != nil { + if _, err := fmt.Fprintf(stderr, logging.AnsiBase+"hexai: provider=%s model=%s error: %v"+logging.AnsiReset+"\n", res.provider, res.model, res.err); err != nil { + return err + } + } + if firstErr == nil && res.err != nil { + firstErr = res.err + } + } + return firstErr +} + +func newColumnPrinter(stdout io.Writer, jobs []cliJob) *columnPrinter { + cols := len(jobs) + width := detectTerminalWidth(stdout) + if width <= 0 { + width = 100 + } + sepWidth := (cols - 1) * 3 + colWidth := (width - sepWidth) / cols + if colWidth < 20 { + colWidth = 20 + } + providers := make([]string, cols) + models := make([]string, cols) + for _, job := range jobs { + providers[job.index] = job.client.Name() + models[job.index] = job.req.model + } + return &columnPrinter{ + stdout: stdout, + columns: cols, + colWidth: colWidth, + partial: make([]string, cols), + providers: providers, + models: models, + } +} + +func detectTerminalWidth(w io.Writer) int { + type fder interface{ Fd() uintptr } + if f, ok := w.(*os.File); ok { + if width, _, err := term.GetSize(int(f.Fd())); err == nil { + return width + } + } + if f, ok := w.(fder); ok { + if width, _, err := term.GetSize(int(f.Fd())); err == nil { + return width + } + } + return 0 +} + +func (cp *columnPrinter) Writer(idx int) io.Writer { + return columnWriter{printer: cp, index: idx} +} + +func (cp *columnPrinter) PrintHeader() { + cp.mu.Lock() + defer cp.mu.Unlock() + combo := make([]string, cp.columns) + for i := 0; i < cp.columns; i++ { + provider := strings.TrimSpace(cp.providers[i]) + model := strings.TrimSpace(cp.models[i]) + switch { + case provider != "" && model != "": + combo[i] = provider + ":" + model + case provider != "": + combo[i] = provider + case model != "": + combo[i] = model + default: + combo[i] = "" + } + } + cp.writeLine(combo) + divider := make([]string, cp.columns) + line := strings.Repeat("─", cp.colWidth) + for i := range divider { + divider[i] = line + } + cp.writeLine(divider) +} + +func (cp *columnPrinter) Flush(idx int) { + cp.mu.Lock() + defer cp.mu.Unlock() + if idx < 0 || idx >= len(cp.partial) { + return + } + if cp.partial[idx] == "" { + return + } + cp.emitJobLine(idx, cp.partial[idx]) + cp.partial[idx] = "" +} + +func (w columnWriter) Write(p []byte) (int, error) { + return w.printer.write(w.index, string(p)) +} + +func (cp *columnPrinter) write(idx int, data string) (int, error) { + cp.mu.Lock() + defer cp.mu.Unlock() + if idx < 0 || idx >= len(cp.partial) { + return len(data), nil + } + data = strings.ReplaceAll(data, "\r", "") + cp.partial[idx] += data + for strings.Contains(cp.partial[idx], "\n") { + line, rest, _ := strings.Cut(cp.partial[idx], "\n") + cp.partial[idx] = rest + cp.emitJobLine(idx, line) + } + return len(data), nil +} + +func (cp *columnPrinter) emitJobLine(idx int, line string) { + segments := cp.wrap(line) + for _, seg := range segments { + cells := make([]string, cp.columns) + if idx >= 0 && idx < len(cells) { + cells[idx] = seg + } + cp.writeLine(cells) + } +} + +func (cp *columnPrinter) wrap(text string) []string { + text = strings.ReplaceAll(text, "\t", " ") + if runewidth.StringWidth(text) <= cp.colWidth { + return []string{text} + } + var lines []string + var current strings.Builder + width := 0 + for _, r := range text { + rw := runewidth.RuneWidth(r) + if width+rw > cp.colWidth && current.Len() > 0 { + lines = append(lines, current.String()) + current.Reset() + width = 0 + } + current.WriteRune(r) + width += rw + } + if current.Len() > 0 { + lines = append(lines, current.String()) + } + if len(lines) == 0 { + lines = append(lines, "") + } + return lines +} + +func (cp *columnPrinter) writeLine(cells []string) { + if len(cells) < cp.columns { + extra := make([]string, cp.columns-len(cells)) + cells = append(cells, extra...) + } + var builder strings.Builder + for i := 0; i < cp.columns; i++ { + cell := cells[i] + width := runewidth.StringWidth(cell) + if width > cp.colWidth { + cell = runewidth.Truncate(cell, cp.colWidth, "…") + width = runewidth.StringWidth(cell) + } + builder.WriteString(cell) + if pad := cp.colWidth - width; pad > 0 { + builder.WriteString(strings.Repeat(" ", pad)) + } + if i != cp.columns-1 { + builder.WriteString(" │ ") + } + } + builder.WriteByte('\n') + _, _ = cp.stdout.Write([]byte(builder.String())) +} + +// WithCLISelection injects provider indices into the context so Run only executes those jobs. +func WithCLISelection(ctx context.Context, indices []int) context.Context { + if ctx == nil { + ctx = context.Background() + } + cpy := make([]int, len(indices)) + copy(cpy, indices) + return context.WithValue(ctx, selectionContextKey{}, cpy) +} + +// WithCLIConfigPath returns a context that carries the config file path override. +func WithCLIConfigPath(ctx context.Context, path string) context.Context { + if ctx == nil { + ctx = context.Background() + } + return context.WithValue(ctx, configPathContextKey{}, strings.TrimSpace(path)) +} + +func configPathFromContext(ctx context.Context) string { + if ctx == nil { + return "" + } + if v, ok := ctx.Value(configPathContextKey{}).(string); ok { + return strings.TrimSpace(v) + } + return "" +} + +func selectionFromContext(ctx context.Context) []int { + if ctx == nil { + return nil + } + if v, ok := ctx.Value(selectionContextKey{}).([]int); ok { + cpy := make([]int, len(v)) + copy(cpy, v) + return cpy + } + return nil +} + +func filterJobsBySelection(jobs []cliJob, indices []int) ([]cliJob, error) { + if len(indices) == 0 { + return jobs, nil + } + filtered := make([]cliJob, 0, len(indices)) + seen := make(map[int]struct{}, len(indices)) + for _, idx := range indices { + if idx < 0 || idx >= len(jobs) { + return nil, fmt.Errorf("provider index %d out of range (0-%d)", idx, len(jobs)-1) + } + if _, ok := seen[idx]; ok { + continue + } + clone := jobs[idx] + filtered = append(filtered, clone) + seen[idx] = struct{}{} + } + for i := range filtered { + filtered[i].index = i + } + if len(filtered) == 0 { + return nil, fmt.Errorf("no CLI providers matched selection") + } + return filtered, nil +} + // readInput reads from stdin and args, then combines them per CLI rules. func readInput(stdin io.Reader, args []string) (string, error) { var stdinData string @@ -2340,22 +3303,26 @@ func buildMessagesFromConfig(cfg appconfig.App, input string) []llm.Message { +func runChat(ctx context.Context, client llm.Client, req requestArgs, msgs []llm.Message, input string, out io.Writer, errw io.Writer) error { start := time.Now() // Best-effort tmux status update (colored start heartbeat) - _ = tmux.SetStatus(tmux.FormatLLMStartStatus(client.Name(), client.DefaultModel())) + model := strings.TrimSpace(req.model) + if model == "" { + model = client.DefaultModel() + } + _ = tmux.SetStatus(tmux.FormatLLMStartStatus(client.Name(), model)) var output string if s, ok := client.(llm.Streamer); ok { var b strings.Builder if err := s.ChatStream(ctx, msgs, func(chunk string) { b.WriteString(chunk) fmt.Fprint(out, chunk) - }); err != nil { + }, req.options...); err != nil { return err } output = b.String() } else { - txt, err := client.Chat(ctx, msgs) + txt, err := client.Chat(ctx, msgs, req.options...) if err != nil { return err } @@ -2369,7 +3336,7 @@ func runChat(ctx context.Context, client llm.Client, msgs []llm.Message, input s sent += len(m.Content) } recv := len(output) - _ = stats.Update(ctx, client.Name(), client.DefaultModel(), sent, recv) + _ = stats.Update(ctx, client.Name(), model, sent, recv) snap, _ := stats.TakeSnapshot() minsWin := snap.Window.Minutes() if minsWin <= 0 { @@ -2377,21 +3344,24 @@ func runChat(ctx context.Context, client llm.Client, msgs []llm.Message, input s } scopeReqs := int64(0) if pe, ok := snap.Providers[client.Name()]; ok { - if mc, ok2 := pe.Models[client.DefaultModel()]; ok2 { + if mc, ok2 := pe.Models[model]; ok2 { scopeReqs = mc.Reqs } } scopeRPM := float64(scopeReqs) / minsWin fmt.Fprintf(errw, "\n"+logging.AnsiBase+"done provider=%s model=%s time=%s in_bytes=%d out_bytes=%d | global Σ reqs=%d rpm=%.2f"+logging.AnsiReset+"\n", - client.Name(), client.DefaultModel(), dur.Round(time.Millisecond), sent, recv, snap.Global.Reqs, snap.RPM) - _ = tmux.SetStatus(tmux.FormatGlobalStatusColored(snap.Global.Reqs, snap.RPM, snap.Global.Sent, snap.Global.Recv, client.Name(), client.DefaultModel(), scopeRPM, scopeReqs, snap.Window)) + client.Name(), model, dur.Round(time.Millisecond), sent, recv, snap.Global.Reqs, snap.RPM) + _ = tmux.SetStatus(tmux.FormatGlobalStatusColored(snap.Global.Reqs, snap.RPM, snap.Global.Sent, snap.Global.Recv, client.Name(), model, scopeRPM, scopeReqs, snap.Window)) return nil } // printProviderInfo writes the provider/model line to stderr. -func printProviderInfo(errw io.Writer, client llm.Client) { - fmt.Fprintf(errw, logging.AnsiBase+"provider=%s model=%s"+logging.AnsiReset+"\n", client.Name(), client.DefaultModel()) -} +func printProviderInfo(errw io.Writer, client llm.Client, model string) { + if strings.TrimSpace(model) == "" { + model = client.DefaultModel() + } + fmt.Fprintf(errw, logging.AnsiBase+"provider=%s model=%s"+logging.AnsiReset+"\n", client.Name(), model) +} // newClientFromConfig is kept for tests; delegates to llmutils. var newClientFromApp = llmutils.NewClientFromApp @@ -2427,7 +3397,12 @@ type ServerFactory func(r io.Reader, w io.Writer, logger *log.Logger, opts lsp.S // Run configures logging, loads config, builds the LLM client and runs the LSP server. // It is thin and delegates to RunWithFactory for testability. + func Run(logPath string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { + return RunWithConfig(logPath, "", stdin, stdout, stderr) +} + +func RunWithConfig(logPath string, configPath string, stdin io.Reader, stdout io.Writer, stderr io.Writer) error { logger := log.New(stderr, "hexai-lsp ", log.LstdFlags|log.Lmsgprefix) if strings.TrimSpace(logPath) != "" { f, err := os.OpenFile(logPath, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0o644) @@ -2438,19 +3413,20 @@ func Run(logPath string, stdin io.Reader, stdout io.Writer, stderr io.Writer) er logger.SetOutput(f) } logging.Bind(logger) - cfg := appconfig.Load(logger) + loadOpts := appconfig.LoadOptions{ConfigPath: configPath} + cfg := appconfig.LoadWithOptions(logger, loadOpts) if err := cfg.Validate(); err != nil { logger.Fatalf("invalid config: %v", err) } if cfg.StatsWindowMinutes > 0 { stats.SetWindow(time.Duration(cfg.StatsWindowMinutes) * time.Minute) } - return RunWithFactory(logPath, stdin, stdout, logger, cfg, nil, nil) + return RunWithFactory(logPath, configPath, stdin, stdout, logger, cfg, nil, nil) } // RunWithFactory is the testable entrypoint. When client is nil, it is built from cfg+env. // When factory is nil, lsp.NewServer is used. -func RunWithFactory(logPath string, stdin io.Reader, stdout io.Writer, logger *log.Logger, cfg appconfig.App, client llm.Client, factory ServerFactory) error { +func RunWithFactory(logPath string, configPath string, stdin io.Reader, stdout io.Writer, logger *log.Logger, cfg appconfig.App, client llm.Client, factory ServerFactory) error { normalizeLoggingConfig(&cfg) if err := cfg.Validate(); err != nil { logger.Fatalf("invalid config: %v", err) @@ -2460,7 +3436,9 @@ func RunWithFactory(logPath string, stdin io.Reader, stdout io.Writer, logger *l store := runtimeconfig.New(cfg) logContext := strings.TrimSpace(logPath) != "" - opts := makeServerOptions(cfg, logContext, client) + loadOpts := appconfig.LoadOptions{ConfigPath: strings.TrimSpace(configPath)} + opts := makeServerOptions(cfg, logContext, client, loadOpts) + opts.ConfigLoadOptions = loadOpts opts.ConfigStore = store server := factory(stdin, stdout, logger, opts) if configurable, ok := server.(interface{ ApplyOptions(lsp.ServerOptions) }); ok { @@ -2470,10 +3448,10 @@ func RunWithFactory(logPath string, stdin io.Reader, stdout io.Writer, logger *l if updated.StatsWindowMinutes > 0 { stats.SetWindow(time.Duration(updated.StatsWindowMinutes) * time.Minute) } - if newClient := buildClientIfNil(updated, nil); newClient != nil { + if newClient := buildClientIfNil(updated, nil); newClient != nil { client = newClient } - opts := makeServerOptions(updated, logContext, client) + opts := makeServerOptions(updated, logContext, client, loadOpts) opts.ConfigStore = store configurable.ApplyOptions(opts) }) @@ -2498,31 +3476,39 @@ func buildClientIfNil(cfg appconfig.App, client llm.Client) llm.Client llmCfg := llm.Config{ - Provider: cfg.Provider, - OpenAIBaseURL: cfg.OpenAIBaseURL, - OpenAIModel: cfg.OpenAIModel, - OpenAITemperature: cfg.OpenAITemperature, - OllamaBaseURL: cfg.OllamaBaseURL, - OllamaModel: cfg.OllamaModel, - OllamaTemperature: cfg.OllamaTemperature, - CopilotBaseURL: cfg.CopilotBaseURL, - CopilotModel: cfg.CopilotModel, - CopilotTemperature: cfg.CopilotTemperature, + Provider: cfg.Provider, + OpenAIBaseURL: cfg.OpenAIBaseURL, + OpenAIModel: cfg.OpenAIModel, + OpenAITemperature: cfg.OpenAITemperature, + OpenRouterBaseURL: cfg.OpenRouterBaseURL, + OpenRouterModel: cfg.OpenRouterModel, + OpenRouterTemperature: cfg.OpenRouterTemperature, + OllamaBaseURL: cfg.OllamaBaseURL, + OllamaModel: cfg.OllamaModel, + OllamaTemperature: cfg.OllamaTemperature, + CopilotBaseURL: cfg.CopilotBaseURL, + CopilotModel: cfg.CopilotModel, + CopilotTemperature: cfg.CopilotTemperature, } // Prefer HEXAI_OPENAI_API_KEY; fall back to OPENAI_API_KEY oaKey := os.Getenv("HEXAI_OPENAI_API_KEY") if strings.TrimSpace(oaKey) == "" { oaKey = os.Getenv("OPENAI_API_KEY") } + // Prefer HEXAI_OPENROUTER_API_KEY; fall back to OPENROUTER_API_KEY + orKey := os.Getenv("HEXAI_OPENROUTER_API_KEY") + if strings.TrimSpace(orKey) == "" { + orKey = os.Getenv("OPENROUTER_API_KEY") + } // Prefer HEXAI_COPILOT_API_KEY; fall back to COPILOT_API_KEY cpKey := os.Getenv("HEXAI_COPILOT_API_KEY") if strings.TrimSpace(cpKey) == "" { cpKey = os.Getenv("COPILOT_API_KEY") } - if c, err := llm.NewFromConfig(llmCfg, oaKey, cpKey); err != nil { + if c, err := llm.NewFromConfig(llmCfg, oaKey, orKey, cpKey); err != nil { logging.Logf("lsp ", "llm disabled: %v", err) return nil - } else { + } else { logging.Logf("lsp ", "llm enabled provider=%s model=%s", c.Name(), c.DefaultModel()) return c } @@ -2537,12 +3523,12 @@ func ensureFactory(factory ServerFactory) ServerFactory } -func makeServerOptions(cfg appconfig.App, logContext bool, client llm.Client) lsp.ServerOptions { +func makeServerOptions(cfg appconfig.App, logContext bool, client llm.Client, loadOpts appconfig.LoadOptions) lsp.ServerOptions { // Map custom actions from appconfig to lsp type var customs []lsp.CustomAction - if len(cfg.CustomActions) > 0 { + if len(cfg.CustomActions) > 0 { customs = make([]lsp.CustomAction, 0, len(cfg.CustomActions)) - for _, ca := range cfg.CustomActions { + for _, ca := range cfg.CustomActions { customs = append(customs, lsp.CustomAction{ ID: ca.ID, Title: ca.Title, @@ -2555,6 +3541,7 @@ func makeServerOptions(cfg appconfig.App, logContext bool, client llm.Client) ls } } return lsp.ServerOptions{ + ConfigLoadOptions: loadOpts, LogContext: logContext, ConfigStore: nil, Config: &cfg, @@ -3288,14 +4275,14 @@ type oaStreamChunk struct { // Constructor (kept among the first functions by convention) // newOpenAI constructs an OpenAI client using explicit configuration values. // The apiKey may be empty; calls will fail until a valid key is supplied. -func newOpenAI(baseURL, model, apiKey string, defaultTemp *float64) Client { - if strings.TrimSpace(baseURL) == "" { +func newOpenAI(baseURL, model, apiKey string, defaultTemp *float64) Client { + if strings.TrimSpace(baseURL) == "" { baseURL = "https://api.openai.com/v1" } - if strings.TrimSpace(model) == "" { + if strings.TrimSpace(model) == "" { model = "gpt-4.1" } - return openAIClient{ + return openAIClient{ httpClient: &http.Client{Timeout: 30 * time.Second}, apiKey: apiKey, baseURL: baseURL, @@ -3305,26 +4292,26 @@ func newOpenAI(baseURL, model, apiKey string, defaultTemp *float64) Client } -func (c openAIClient) Chat(ctx context.Context, messages []Message, opts ...RequestOption) (string, error) { +func (c openAIClient) Chat(ctx context.Context, messages []Message, opts ...RequestOption) (string, error) { if c.apiKey == "" { return nilStringErr("missing OpenAI API key") } - o := Options{Model: c.defaultModel} - for _, opt := range opts { + o := Options{Model: c.defaultModel} + for _, opt := range opts { opt(&o) } - if o.Model == "" { + if o.Model == "" { o.Model = c.defaultModel } - start := time.Now() + start := time.Now() c.logStart(false, o, messages) - req := buildOAChatRequest(o, messages, c.defaultTemperature, false) + req := buildOAChatRequest(o, messages, c.defaultTemperature, false, "llm/openai ") body, err := json.Marshal(req) if err != nil { c.logf("marshal error: %v", err) return "", err } - endpoint := c.baseURL + "/chat/completions" + endpoint := c.baseURL + "/chat/completions" logging.Logf("llm/openai ", "POST %s", endpoint) resp, err := c.doJSON(ctx, endpoint, body, map[string]string{ "Authorization": "Bearer " + c.apiKey, @@ -3333,49 +4320,49 @@ func (c openAIClient) Chat(ctx context.Context, messages []Message, opts ...Requ logging.Logf("llm/openai ", "%shttp error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) return "", err } - defer resp.Body.Close() - if err := handleOpenAINon2xx(resp, start); err != nil { + defer resp.Body.Close() + if err := handleOpenAINon2xx(resp, start, "llm/openai ", "openai"); err != nil { return "", err } - out, err := decodeOpenAIChat(resp, start) + out, err := decodeOpenAIChat(resp, start, "llm/openai ") if err != nil { return "", err } - if len(out.Choices) == 0 { + if len(out.Choices) == 0 { logging.Logf("llm/openai ", "%sno choices returned duration=%s%s", logging.AnsiRed, time.Since(start), logging.AnsiBase) return "", errors.New("openai: no choices returned") } - content := out.Choices[0].Message.Content + content := out.Choices[0].Message.Content logging.Logf("llm/openai ", "success choice=0 finish=%s size=%d preview=%s%s%s duration=%s", out.Choices[0].FinishReason, len(content), logging.AnsiGreen, logging.PreviewForLog(content), logging.AnsiBase, time.Since(start)) return content, nil } // Provider metadata -func (c openAIClient) Name() string { return "openai" } -func (c openAIClient) DefaultModel() string { return c.defaultModel } +func (c openAIClient) Name() string { return "openai" } +func (c openAIClient) DefaultModel() string { return c.defaultModel } // Streaming support (optional) -func (c openAIClient) ChatStream(ctx context.Context, messages []Message, onDelta func(string), opts ...RequestOption) error { +func (c openAIClient) ChatStream(ctx context.Context, messages []Message, onDelta func(string), opts ...RequestOption) error { if c.apiKey == "" { return errors.New("missing OpenAI API key") } - o := Options{Model: c.defaultModel} + o := Options{Model: c.defaultModel} for _, opt := range opts { opt(&o) } - if o.Model == "" { + if o.Model == "" { o.Model = c.defaultModel } - start := time.Now() + start := time.Now() c.logStart(true, o, messages) - req := buildOAChatRequest(o, messages, c.defaultTemperature, true) + req := buildOAChatRequest(o, messages, c.defaultTemperature, true, "llm/openai ") body, err := json.Marshal(req) if err != nil { c.logf("marshal error: %v", err) return err } - endpoint := c.baseURL + "/chat/completions" + endpoint := c.baseURL + "/chat/completions" logging.Logf("llm/openai ", "POST %s (stream)", endpoint) resp, err := c.doJSONWithAccept(ctx, endpoint, body, map[string]string{ "Authorization": "Bearer " + c.apiKey, @@ -3384,15 +4371,15 @@ func (c openAIClient) ChatStream(ctx context.Context, messages []Message, onDelt logging.Logf("llm/openai ", "%shttp error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) return err } - defer resp.Body.Close() - if err := handleOpenAINon2xx(resp, start); err != nil { + defer resp.Body.Close() + if err := handleOpenAINon2xx(resp, start, "llm/openai ", "openai"); err != nil { return err } - if err := parseOpenAIStream(resp, start, onDelta); err != nil { + if err := parseOpenAIStream(resp, start, onDelta, "llm/openai ", "openai"); err != nil { return err } - logging.Logf("llm/openai ", "stream end duration=%s", time.Since(start)) + logging.Logf("llm/openai ", "stream end duration=%s", time.Since(start)) return nil } @@ -3400,141 +4387,311 @@ func (c openAIClient) ChatStream(ctx context.Context, messages []Message, onDelt func (c openAIClient) logf(format string, args ...any) { logging.Logf("llm/openai ", format, args...) } // helpers extracted to keep methods small -func (c openAIClient) logStart(stream bool, o Options, messages []Message) { +func (c openAIClient) logStart(stream bool, o Options, messages []Message) { logMessages := make([]struct{ Role, Content string }, len(messages)) - for i, m := range messages { + for i, m := range messages { logMessages[i] = struct{ Role, Content string }{m.Role, m.Content} } - c.chatLogger.LogStart(stream, o.Model, o.Temperature, o.MaxTokens, o.Stop, logMessages) + c.chatLogger.LogStart(stream, o.Model, o.Temperature, o.MaxTokens, o.Stop, logMessages) } -func buildOAChatRequest(o Options, messages []Message, defaultTemp *float64, stream bool) oaChatRequest { +func buildOAChatRequest(o Options, messages []Message, defaultTemp *float64, stream bool, logPrefix string) oaChatRequest { req := oaChatRequest{Model: o.Model, Stream: stream} req.Messages = make([]oaMessage, len(messages)) - for i, m := range messages { + for i, m := range messages { req.Messages[i] = oaMessage{Role: m.Role, Content: m.Content} } - if o.Temperature != 0 { + if o.Temperature != 0 { req.Temperature = &o.Temperature - } else if defaultTemp != nil { + } else if defaultTemp != nil { t := *defaultTemp req.Temperature = &t } - if o.MaxTokens > 0 { - if requiresMaxCompletionTokens(o.Model) { + if o.MaxTokens > 0 { + if requiresMaxCompletionTokens(o.Model) { req.MaxCompletionTokens = &o.MaxTokens - } else { + } else { req.MaxTokens = &o.MaxTokens } } - if len(o.Stop) > 0 { + if len(o.Stop) > 0 { req.Stop = o.Stop } // Enforce gpt-5 temperature constraints: only default (1.0) is supported. - if requiresMaxCompletionTokens(o.Model) { - if req.Temperature == nil || *req.Temperature != 1.0 { + if requiresMaxCompletionTokens(o.Model) { + if req.Temperature == nil || *req.Temperature != 1.0 { t := 1.0 req.Temperature = &t - logging.Logf("llm/openai ", "forcing temperature=1.0 for model=%s (gpt-5 constraint)", o.Model) + logging.Logf(logPrefix, "forcing temperature=1.0 for model=%s (gpt-5 constraint)", o.Model) } } - return req + return req } // requiresMaxCompletionTokens reports whether the given model prefers the // new parameter name "max_completion_tokens" instead of "max_tokens". Newer // models (e.g., gpt-5 family) expect this per OpenAI's API error guidance. -func requiresMaxCompletionTokens(model string) bool { +func requiresMaxCompletionTokens(model string) bool { m := strings.ToLower(strings.TrimSpace(model)) return strings.HasPrefix(m, "gpt-5") } -func (c openAIClient) doJSON(ctx context.Context, url string, body []byte, headers map[string]string) (*http.Response, error) { +func (c openAIClient) doJSON(ctx context.Context, url string, body []byte, headers map[string]string) (*http.Response, error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") - for k, v := range headers { + req.Header.Set("Content-Type", "application/json") + for k, v := range headers { req.Header.Set(k, v) } - return c.httpClient.Do(req) + return c.httpClient.Do(req) } -func (c openAIClient) doJSONWithAccept(ctx context.Context, url string, body []byte, headers map[string]string, accept string) (*http.Response, error) { +func (c openAIClient) doJSONWithAccept(ctx context.Context, url string, body []byte, headers map[string]string, accept string) (*http.Response, error) { req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, bytes.NewReader(body)) if err != nil { return nil, err } - req.Header.Set("Content-Type", "application/json") + req.Header.Set("Content-Type", "application/json") req.Header.Set("Accept", accept) - for k, v := range headers { + for k, v := range headers { req.Header.Set(k, v) } - return c.httpClient.Do(req) + return c.httpClient.Do(req) } -func handleOpenAINon2xx(resp *http.Response, start time.Time) error { - if resp.StatusCode >= 200 && resp.StatusCode < 300 { +func handleOpenAINon2xx(resp *http.Response, start time.Time, logPrefix, provider string) error { + if resp.StatusCode >= 200 && resp.StatusCode < 300 { return nil } - var apiErr oaChatResponse + var apiErr oaChatResponse _ = json.NewDecoder(resp.Body).Decode(&apiErr) if apiErr.Error != nil && apiErr.Error.Message != "" { - logging.Logf("llm/openai ", "%sapi error status=%d type=%s msg=%s duration=%s%s", logging.AnsiRed, resp.StatusCode, apiErr.Error.Type, apiErr.Error.Message, time.Since(start), logging.AnsiBase) - return fmt.Errorf("openai error: %s (status %d)", apiErr.Error.Message, resp.StatusCode) + logging.Logf(logPrefix, "%sapi error status=%d type=%s msg=%s duration=%s%s", logging.AnsiRed, resp.StatusCode, apiErr.Error.Type, apiErr.Error.Message, time.Since(start), logging.AnsiBase) + return fmt.Errorf("%s error: %s (status %d)", provider, apiErr.Error.Message, resp.StatusCode) } - logging.Logf("llm/openai ", "%shttp non-2xx status=%d duration=%s%s", logging.AnsiRed, resp.StatusCode, time.Since(start), logging.AnsiBase) - return fmt.Errorf("openai http error: status %d", resp.StatusCode) + logging.Logf(logPrefix, "%shttp non-2xx status=%d duration=%s%s", logging.AnsiRed, resp.StatusCode, time.Since(start), logging.AnsiBase) + return fmt.Errorf("%s http error: status %d", provider, resp.StatusCode) } -func decodeOpenAIChat(resp *http.Response, start time.Time) (oaChatResponse, error) { +func decodeOpenAIChat(resp *http.Response, start time.Time, logPrefix string) (oaChatResponse, error) { var out oaChatResponse if err := json.NewDecoder(resp.Body).Decode(&out); err != nil { - logging.Logf("llm/openai ", "%sdecode error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) + logging.Logf(logPrefix, "%sdecode error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) return oaChatResponse{}, err } - return out, nil + return out, nil } -func parseOpenAIStream(resp *http.Response, start time.Time, onDelta func(string)) error { +func parseOpenAIStream(resp *http.Response, start time.Time, onDelta func(string), logPrefix, provider string) error { // Parse SSE: lines starting with "data: " containing JSON or [DONE] scanner := bufio.NewScanner(resp.Body) const maxBuf = 1024 * 1024 buf := make([]byte, 0, 64*1024) scanner.Buffer(buf, maxBuf) - for scanner.Scan() { + for scanner.Scan() { line := scanner.Text() - if !strings.HasPrefix(line, "data: ") { + if !strings.HasPrefix(line, "data: ") { continue } - payload := strings.TrimPrefix(line, "data: ") - if strings.TrimSpace(payload) == "[DONE]" { + payload := strings.TrimPrefix(line, "data: ") + if strings.TrimSpace(payload) == "[DONE]" { break } - var chunk oaStreamChunk - if err := json.Unmarshal([]byte(payload), &chunk); err != nil { + var chunk oaStreamChunk + if err := json.Unmarshal([]byte(payload), &chunk); err != nil { continue } - if chunk.Error != nil && chunk.Error.Message != "" { - logging.Logf("llm/openai ", "%sstream error: %s%s", logging.AnsiRed, chunk.Error.Message, logging.AnsiBase) - return fmt.Errorf("openai stream error: %s", chunk.Error.Message) + if chunk.Error != nil && chunk.Error.Message != "" { + logging.Logf(logPrefix, "%sstream error: %s%s", logging.AnsiRed, chunk.Error.Message, logging.AnsiBase) + return fmt.Errorf("%s stream error: %s", provider, chunk.Error.Message) } - for _, ch := range chunk.Choices { - if ch.Delta.Content != "" { + for _, ch := range chunk.Choices { + if ch.Delta.Content != "" { onDelta(ch.Delta.Content) } } } - if err := scanner.Err(); err != nil { - logging.Logf("llm/openai ", "%sstream read error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) + if err := scanner.Err(); err != nil { + logging.Logf(logPrefix, "%sstream read error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase) return err } - return nil + return nil +} + + + -