summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2025-08-16 17:19:46 +0300
committerPaul Buetow <paul@buetow.org>2025-08-16 17:19:46 +0300
commite62c851109892ed60a60926904b03bb4d2378fe4 (patch)
tree7325477c1f7223de76545d47731a85c5c47a3f5c
parent778a3591bd27ce49acb6f8596f3c714351c412dc (diff)
feat(llm): add Ollama provider + provider selection and CLI override; update README and logsv0.0.1
-rw-r--r--README.md40
-rw-r--r--internal/llm/ollama.go124
-rw-r--r--internal/llm/openai.go12
-rw-r--r--internal/llm/provider.go39
-rw-r--r--internal/lsp/handlers.go35
-rw-r--r--internal/lsp/server.go1
6 files changed, 222 insertions, 29 deletions
diff --git a/README.md b/README.md
index c3c77c2..1649e1b 100644
--- a/README.md
+++ b/README.md
@@ -8,14 +8,34 @@ At the moment this project is only in the proof of concept phase.
## LLM provider
-Hexai exposes a simple LLM provider interface and uses OpenAI by default for
-code completion when `OPENAI_API_KEY` is present in the environment.
+Hexai exposes a simple LLM provider interface. It supports OpenAI and a local
+Ollama server. Provider selection and models are configured via environment
+variables.
-- Required: set `OPENAI_API_KEY` to your OpenAI API key.
-- Optional: set `OPENAI_MODEL`.
-- Optional: set `OPENAI_BASE_URL` to point at a compatible endpoint.
+### Selecting a provider
-If no key is configured, Hexai will fall back to a basic, local completion.
+- Set `HEXAI_LLM_PROVIDER` to `openai` or `ollama` to force a provider.
+- If not set, Hexai auto‑detects:
+ - Uses OpenAI when `OPENAI_API_KEY` is present.
+ - Uses Ollama when any `OLLAMA_*` variables are present.
+ - Otherwise, Hexai falls back to a basic, local completion.
+
+### OpenAI configuration
+
+- Required: `OPENAI_API_KEY` — your OpenAI API key.
+- Optional: `OPENAI_MODEL` — model name (default: `gpt-4o-mini`).
+- Optional: `OPENAI_BASE_URL` — override the API base (e.g., a compatible endpoint).
+
+### Ollama configuration (local)
+
+- Optional: `OLLAMA_MODEL` — model name/tag (default: `qwen2.5-coder:latest`).
+- Optional: `OLLAMA_BASE_URL` or `OLLAMA_HOST` — base URL to Ollama
+ (default: `http://localhost:11434`).
+
+Notes:
+- For Ollama, ensure the model is available locally (e.g., `ollama pull qwen2.5-coder:latest`).
+- If you run Ollama in OpenAI‑compatible mode, you may alternatively use the
+ OpenAI provider with `OPENAI_BASE_URL` pointing to your local endpoint.
## CLI usage and configuration
@@ -31,6 +51,7 @@ If no key is configured, Hexai will fall back to a basic, local completion.
- `always-full`: always include the full file (may be slower/costly)
- `-context-window-lines`: line count for the sliding window when `context-mode=window`.
- `-max-context-tokens`: budget for additional context tokens. If the flag isn’t provided, `HEXAI_MAX_CONTEXT_TOKENS` is used when set.
+ - `-provider`: LLM provider override: `openai` or `ollama` (overrides `HEXAI_LLM_PROVIDER`).
Notes:
- Token estimation for truncation uses a simple 4 chars/token heuristic.
@@ -48,3 +69,10 @@ Notes:
| `-max-context-tokens` | `HEXAI_MAX_CONTEXT_TOKENS` | Token budget for additional context. |
| `-log-preview-limit` | `HEXAI_LOG_PREVIEW_LIMIT` | Limit characters shown in LLM preview logs. |
| `-no-disk-io` | `HEXAI_NO_DISK_IO` | Disallow any disk reads for context. |
+| `-provider` | `HEXAI_LLM_PROVIDER` | Force LLM provider: `openai` or `ollama`. |
+
+### Environment quick reference (providers)
+
+- `HEXAI_LLM_PROVIDER`: `openai` | `ollama` (optional; otherwise auto‑detect).
+- OpenAI: `OPENAI_API_KEY` (required), `OPENAI_MODEL`, `OPENAI_BASE_URL`.
+- Ollama: `OLLAMA_MODEL`, `OLLAMA_BASE_URL` or `OLLAMA_HOST`.
diff --git a/internal/llm/ollama.go b/internal/llm/ollama.go
new file mode 100644
index 0000000..495b5c2
--- /dev/null
+++ b/internal/llm/ollama.go
@@ -0,0 +1,124 @@
+package llm
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "os"
+ "strings"
+ "time"
+
+ "hexai/internal/logging"
+)
+
+// ollamaClient implements Client against a local Ollama server.
+type ollamaClient struct {
+ httpClient *http.Client
+ baseURL string
+ defaultModel string
+}
+
+func newOllamaFromEnv() Client {
+ // Prefer OLLAMA_BASE_URL, fall back to OLLAMA_HOST, then default.
+ base := strings.TrimSpace(os.Getenv("OLLAMA_BASE_URL"))
+ if base == "" {
+ base = strings.TrimSpace(os.Getenv("OLLAMA_HOST"))
+ }
+ if base == "" {
+ base = "http://localhost:11434"
+ }
+ model := strings.TrimSpace(os.Getenv("OLLAMA_MODEL"))
+ if model == "" {
+ model = "qwen2.5-coder:latest"
+ }
+ return &ollamaClient{
+ httpClient: &http.Client{Timeout: 30 * time.Second},
+ baseURL: strings.TrimRight(base, "/"),
+ defaultModel: model,
+ }
+}
+
+type ollamaChatRequest struct {
+ Model string `json:"model"`
+ Messages []oaMessage `json:"messages"`
+ Stream bool `json:"stream"`
+ Options any `json:"options,omitempty"`
+}
+
+type ollamaChatResponse struct {
+ Message struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ } `json:"message"`
+ Done bool `json:"done"`
+ Error string `json:"error,omitempty"`
+}
+
+func (c *ollamaClient) Chat(ctx context.Context, messages []Message, opts ...RequestOption) (string, error) {
+ o := Options{Model: c.defaultModel}
+ for _, opt := range opts { opt(&o) }
+ if o.Model == "" { o.Model = c.defaultModel }
+
+ start := time.Now()
+ logging.Logf("llm/ollama ", "chat start model=%s temp=%.2f max_tokens=%d stop=%d messages=%d", o.Model, o.Temperature, o.MaxTokens, len(o.Stop), len(messages))
+ for i, m := range messages {
+ logging.Logf("llm/ollama ", "msg[%d] role=%s size=%d preview=%s%s%s", i, m.Role, len(m.Content), logging.AnsiCyan, logging.PreviewForLog(m.Content), logging.AnsiBase)
+ }
+
+ req := ollamaChatRequest{Model: o.Model, Stream: false}
+ req.Messages = make([]oaMessage, len(messages))
+ for i, m := range messages { req.Messages[i] = oaMessage{Role: m.Role, Content: m.Content} }
+
+ // Build options map only if any option is set
+ optsMap := map[string]any{}
+ if o.Temperature != 0 { optsMap["temperature"] = o.Temperature }
+ if o.MaxTokens > 0 { optsMap["num_predict"] = o.MaxTokens }
+ if len(o.Stop) > 0 { optsMap["stop"] = o.Stop }
+ if len(optsMap) > 0 { req.Options = optsMap }
+
+ body, err := json.Marshal(req)
+ if err != nil { return "", err }
+
+ endpoint := c.baseURL + "/api/chat"
+ logging.Logf("llm/ollama ", "POST %s", endpoint)
+ httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
+ if err != nil { return "", err }
+ httpReq.Header.Set("Content-Type", "application/json")
+
+ resp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ logging.Logf("llm/ollama ", "%shttp error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ var apiErr ollamaChatResponse
+ _ = json.NewDecoder(resp.Body).Decode(&apiErr)
+ if strings.TrimSpace(apiErr.Error) != "" {
+ logging.Logf("llm/ollama ", "%sapi error status=%d msg=%s duration=%s%s", logging.AnsiRed, resp.StatusCode, apiErr.Error, time.Since(start), logging.AnsiBase)
+ return "", fmt.Errorf("ollama error: %s (status %d)", apiErr.Error, resp.StatusCode)
+ }
+ logging.Logf("llm/ollama ", "%shttp non-2xx status=%d duration=%s%s", logging.AnsiRed, resp.StatusCode, time.Since(start), logging.AnsiBase)
+ return "", fmt.Errorf("ollama http error: status %d", resp.StatusCode)
+ }
+
+ var out ollamaChatResponse
+ if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
+ logging.Logf("llm/ollama ", "%sdecode error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
+ return "", err
+ }
+ if strings.TrimSpace(out.Message.Content) == "" {
+ logging.Logf("llm/ollama ", "%sempty content returned duration=%s%s", logging.AnsiRed, time.Since(start), logging.AnsiBase)
+ return "", errors.New("ollama: empty content")
+ }
+ content := out.Message.Content
+ logging.Logf("llm/ollama ", "success size=%d preview=%s%s%s duration=%s", len(content), logging.AnsiGreen, logging.PreviewForLog(content), logging.AnsiBase, time.Since(start))
+ return content, nil
+}
+
+// Provider metadata
+func (c *ollamaClient) Name() string { return "ollama" }
+func (c *ollamaClient) DefaultModel() string { return c.defaultModel }
diff --git a/internal/llm/openai.go b/internal/llm/openai.go
index 279eca4..dbcee4d 100644
--- a/internal/llm/openai.go
+++ b/internal/llm/openai.go
@@ -15,10 +15,10 @@ import (
// openAIClient implements Client against OpenAI's Chat Completions API.
type openAIClient struct {
- httpClient *http.Client
- apiKey string
- baseURL string
- defaultModel string
+ httpClient *http.Client
+ apiKey string
+ baseURL string
+ defaultModel string
}
// Colors and base styling are provided by logging.go
@@ -159,3 +159,7 @@ func trimPreview(s string, n int) string {
}
return s[:n] + "…"
}
+
+// Provider metadata
+func (c *openAIClient) Name() string { return "openai" }
+func (c *openAIClient) DefaultModel() string { return c.defaultModel }
diff --git a/internal/llm/provider.go b/internal/llm/provider.go
index a87d815..f7dad31 100644
--- a/internal/llm/provider.go
+++ b/internal/llm/provider.go
@@ -3,7 +3,8 @@ package llm
import (
"context"
"errors"
- "os"
+ "os"
+ "strings"
)
// Message represents a chat-style prompt message.
@@ -17,6 +18,10 @@ type Message struct {
type Client interface {
// Chat sends chat messages and returns the assistant text.
Chat(ctx context.Context, messages []Message, opts ...RequestOption) (string, error)
+ // Name returns the provider's short name (e.g., "openai", "ollama").
+ Name() string
+ // DefaultModel returns the configured default model name.
+ DefaultModel() string
}
// Options for a request. Providers may ignore unsupported fields.
@@ -38,11 +43,33 @@ func WithStop(stop ...string) RequestOption {
}
// NewDefault returns the default provider using environment configuration.
-// Currently this is the OpenAI provider using OPENAI_API_KEY.
+// Selection order:
+// 1) HEXAI_LLM_PROVIDER=openai|ollama
+// 2) If OPENAI_API_KEY is set -> OpenAI
+// 3) If any OLLAMA_* vars are set -> Ollama
func NewDefault() (Client, error) {
- apiKey := os.Getenv("OPENAI_API_KEY")
- if apiKey == "" {
- return nil, errors.New("OPENAI_API_KEY is not set")
+ // Explicit provider selection
+ if p := strings.ToLower(strings.TrimSpace(os.Getenv("HEXAI_LLM_PROVIDER"))); p != "" {
+ switch p {
+ case "openai":
+ apiKey := os.Getenv("OPENAI_API_KEY")
+ if apiKey == "" {
+ return nil, errors.New("OPENAI_API_KEY is not set")
+ }
+ return newOpenAIFromEnv(apiKey), nil
+ case "ollama":
+ return newOllamaFromEnv(), nil
+ default:
+ return nil, errors.New("unknown HEXAI_LLM_PROVIDER: " + p)
+ }
}
- return newOpenAIFromEnv(apiKey), nil
+
+ // Auto-detect
+ if apiKey := os.Getenv("OPENAI_API_KEY"); apiKey != "" {
+ return newOpenAIFromEnv(apiKey), nil
+ }
+ if os.Getenv("OLLAMA_BASE_URL") != "" || os.Getenv("OLLAMA_HOST") != "" || os.Getenv("OLLAMA_MODEL") != "" {
+ return newOllamaFromEnv(), nil
+ }
+ return nil, errors.New("no LLM provider configured (set OPENAI_API_KEY or HEXAI_LLM_PROVIDER/OLLAMA_*)")
}
diff --git a/internal/lsp/handlers.go b/internal/lsp/handlers.go
index 565be64..40ae143 100644
--- a/internal/lsp/handlers.go
+++ b/internal/lsp/handlers.go
@@ -38,18 +38,22 @@ func (s *Server) handle(req Request) {
}
func (s *Server) handleInitialize(req Request) {
- res := InitializeResult{
- Capabilities: ServerCapabilities{
- TextDocumentSync: 1, // 1 = TextDocumentSyncKindFull
- CompletionProvider: &CompletionOptions{
- ResolveProvider: false,
- // TODO: Make the trigger characters configurable
- TriggerCharacters: []string{".", ":", "/", "_"},
- },
- },
- ServerInfo: &ServerInfo{Name: "hexai", Version: internal.Version},
- }
- s.reply(req.ID, res, nil)
+ version := internal.Version
+ if s.llmClient != nil {
+ version = version + " [" + s.llmClient.Name() + ":" + s.llmClient.DefaultModel() + "]"
+ }
+ res := InitializeResult{
+ Capabilities: ServerCapabilities{
+ TextDocumentSync: 1, // 1 = TextDocumentSyncKindFull
+ CompletionProvider: &CompletionOptions{
+ ResolveProvider: false,
+ // TODO: Make the trigger characters configurable
+ TriggerCharacters: []string{".", ":", "/", "_"},
+ },
+ },
+ ServerInfo: &ServerInfo{Name: "hexai", Version: version},
+ }
+ s.reply(req.ID, res, nil)
}
func (s *Server) handleInitialized() {
@@ -158,10 +162,15 @@ func (s *Server) tryLLMCompletion(p CompletionParams, above, current, below, fun
te, filter := computeTextEditAndFilter(cleaned, inParams, current, p)
rm := s.collectPromptRemovalEdits(p.TextDocument.URI)
label := labelForCompletion(cleaned, filter)
+ // Detail shows provider/model for visibility in client UI
+ detail := "Hexai LLM completion"
+ if s.llmClient != nil {
+ detail = "Hexai " + s.llmClient.Name() + ":" + s.llmClient.DefaultModel()
+ }
items := []CompletionItem{{
Label: label,
Kind: 1,
- Detail: "OpenAI through Hexai completion",
+ Detail: detail,
InsertTextFormat: 1,
FilterText: strings.TrimLeft(filter, " \t"),
TextEdit: te,
diff --git a/internal/lsp/server.go b/internal/lsp/server.go
index 65d0b95..4e077a4 100644
--- a/internal/lsp/server.go
+++ b/internal/lsp/server.go
@@ -52,6 +52,7 @@ func NewServer(r io.Reader, w io.Writer, logger *log.Logger, logContext bool, ma
logging.Logf("lsp ", "llm disabled: %v", err)
} else {
s.llmClient = c
+ logging.Logf("lsp ", "llm enabled provider=%s model=%s", c.Name(), c.DefaultModel())
}
return s
}