summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorPaul Buetow <paul@buetow.org>2025-08-17 08:43:53 +0300
committerPaul Buetow <paul@buetow.org>2025-08-17 08:43:53 +0300
commit07d02d93dbb7a8167758f678c68b5a1a520167c8 (patch)
treeffa0698323175c2c5fa591420f2c33d1bcb9e069
parentd44ae13e97eff75704b0fbd90814811dcc98eff5 (diff)
llm: add GitHub Copilot provider
- Implement copilot client reading COPILOT_API_KEY - Wire copilot_base_url and copilot_model config - Update README and config example; defaults to gpt-4.1 - Keep OpenAI default at gpt-4.1 for consistency
-rw-r--r--README.md20
-rw-r--r--cmd/hexai/main.go13
-rw-r--r--config.json.example5
-rw-r--r--internal/llm/copilot.go153
-rw-r--r--internal/llm/provider.go54
5 files changed, 215 insertions, 30 deletions
diff --git a/README.md b/README.md
index bffb1d6..fad4e81 100644
--- a/README.md
+++ b/README.md
@@ -8,20 +8,27 @@ At the moment this project is only in the proof of PoC phase.
## LLM provider
-Hexai exposes a simple LLM provider interface. It supports OpenAI and a local Ollama server. Provider selection and models are configured via a JSON configuration file.
+Hexai exposes a simple LLM provider interface. It supports OpenAI, GitHub Copilot, and a local Ollama server. Provider selection and models are configured via a JSON configuration file.
### Selecting a provider
-- Set `provider` in the config file to `openai` or `ollama`.
+- Set `provider` in the config file to `openai`, `copilot`, or `ollama`.
- If omitted, Hexai defaults to `openai`.
### OpenAI configuration
- Required: `OPENAI_API_KEY` — provided via environment variable only.
- In config file:
- - `openai_model` — model name (default: `gpt-4o-mini`).
+ - `openai_model` — model name (default: `gpt-4.1`).
- `openai_base_url` — API base (default: `https://api.openai.com/v1`).
+### Copilot configuration
+
+- Required: `COPILOT_API_KEY` — provided via environment variable only.
+- In config file:
+ - `copilot_model` — model name (default: `gpt-4.1`).
+ - `copilot_base_url` — API base (default: `https://api.githubcopilot.com`).
+
### Ollama configuration (local)
- In config file:
@@ -59,6 +66,8 @@ Notes:
"no_disk_io": true,
"trigger_characters": [".", ":", "/", "_", ";", "?"],
"provider": "ollama",
+ "copilot_model": "gpt-4.1",
+ "copilot_base_url": "https://api.githubcopilot.com",
"openai_model": "gpt-4.1",
"openai_base_url": "https://api.openai.com/v1",
"ollama_model": "qwen2.5-coder:latest",
@@ -67,8 +76,9 @@ Notes:
```
* context_mode: minimal | window | file-on-new-func | always-full
-* provider: ollama or openai
+* provider: openai | copilot | ollama
* openai_model, openai_base_url: OpenAI-only options
+* copilot_model, copilot_base_url: Copilot-only options
* ollama_model, ollama_base_url: Ollama-only options
Minimal config (defaults to OpenAI):
@@ -76,7 +86,7 @@ Minimal config (defaults to OpenAI):
{}
```
-Ensure `OPENAI_API_KEY` is set in your environment.
+Ensure `OPENAI_API_KEY` or `COPILOT_API_KEY` is set in your environment according to your chosen provider.
## Inline triggers
diff --git a/cmd/hexai/main.go b/cmd/hexai/main.go
index 941460e..25b5281 100644
--- a/cmd/hexai/main.go
+++ b/cmd/hexai/main.go
@@ -53,9 +53,12 @@ func main() {
OpenAIModel: cfg.OpenAIModel,
OllamaBaseURL: cfg.OllamaBaseURL,
OllamaModel: cfg.OllamaModel,
+ CopilotBaseURL: cfg.CopilotBaseURL,
+ CopilotModel: cfg.CopilotModel,
}
oaKey := os.Getenv("OPENAI_API_KEY")
- if c, err := llm.NewFromConfig(llmCfg, oaKey); err != nil {
+ cpKey := os.Getenv("COPILOT_API_KEY")
+ if c, err := llm.NewFromConfig(llmCfg, oaKey, cpKey); err != nil {
logging.Logf("lsp ", "llm disabled: %v", err)
} else {
client = c
@@ -93,6 +96,8 @@ type appConfig struct {
OpenAIModel string `json:"openai_model"`
OllamaBaseURL string `json:"ollama_base_url"`
OllamaModel string `json:"ollama_model"`
+ CopilotBaseURL string `json:"copilot_base_url"`
+ CopilotModel string `json:"copilot_model"`
}
func loadConfig(logger *log.Logger) appConfig {
@@ -157,5 +162,11 @@ func loadConfig(logger *log.Logger) appConfig {
if strings.TrimSpace(fileCfg.OllamaModel) != "" {
cfg.OllamaModel = fileCfg.OllamaModel
}
+ if strings.TrimSpace(fileCfg.CopilotBaseURL) != "" {
+ cfg.CopilotBaseURL = fileCfg.CopilotBaseURL
+ }
+ if strings.TrimSpace(fileCfg.CopilotModel) != "" {
+ cfg.CopilotModel = fileCfg.CopilotModel
+ }
return cfg
}
diff --git a/config.json.example b/config.json.example
index ca97076..359e862 100644
--- a/config.json.example
+++ b/config.json.example
@@ -13,5 +13,8 @@
"openai_base_url": "https://api.openai.com/v1",
"ollama_model": "qwen2.5-coder:latest",
- "ollama_base_url": "http://localhost:11434"
+ "ollama_base_url": "http://localhost:11434",
+
+ "copilot_model": "gpt-4.1",
+ "copilot_base_url": "https://api.githubcopilot.com"
}
diff --git a/internal/llm/copilot.go b/internal/llm/copilot.go
new file mode 100644
index 0000000..a31022f
--- /dev/null
+++ b/internal/llm/copilot.go
@@ -0,0 +1,153 @@
+package llm
+
+import (
+ "bytes"
+ "context"
+ "encoding/json"
+ "errors"
+ "fmt"
+ "net/http"
+ "strings"
+ "time"
+
+ "hexai/internal/logging"
+)
+
+// copilotClient implements Client against GitHub Copilot's Chat Completions API.
+type copilotClient struct {
+ httpClient *http.Client
+ apiKey string
+ baseURL string
+ defaultModel string
+}
+
+func newCopilot(baseURL, model, apiKey string) Client {
+ if strings.TrimSpace(baseURL) == "" {
+ baseURL = "https://api.githubcopilot.com"
+ }
+ if strings.TrimSpace(model) == "" {
+ model = "gpt-4.1"
+ }
+ return &copilotClient{
+ httpClient: &http.Client{Timeout: 30 * time.Second},
+ apiKey: apiKey,
+ baseURL: strings.TrimRight(baseURL, "/"),
+ defaultModel: model,
+ }
+}
+
+type copilotChatRequest struct {
+ Model string `json:"model"`
+ Messages []copilotMessage `json:"messages"`
+ Temperature *float64 `json:"temperature,omitempty"`
+ MaxTokens *int `json:"max_tokens,omitempty"`
+ Stop []string `json:"stop,omitempty"`
+}
+
+type copilotMessage struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+}
+
+type copilotChatResponse struct {
+ Choices []struct {
+ Index int `json:"index"`
+ Message struct {
+ Role string `json:"role"`
+ Content string `json:"content"`
+ } `json:"message"`
+ FinishReason string `json:"finish_reason"`
+ } `json:"choices"`
+ Error *struct {
+ Message string `json:"message"`
+ Type string `json:"type"`
+ Param any `json:"param"`
+ Code any `json:"code"`
+ } `json:"error,omitempty"`
+}
+
+func (c *copilotClient) Chat(ctx context.Context, messages []Message, opts ...RequestOption) (string, error) {
+ if strings.TrimSpace(c.apiKey) == "" {
+ return nilStringErr("missing Copilot API key")
+ }
+ o := Options{Model: c.defaultModel}
+ for _, opt := range opts {
+ opt(&o)
+ }
+ if o.Model == "" {
+ o.Model = c.defaultModel
+ }
+
+ start := time.Now()
+ logging.Logf("llm/copilot ", "chat start model=%s temp=%.2f max_tokens=%d stop=%d messages=%d", o.Model, o.Temperature, o.MaxTokens, len(o.Stop), len(messages))
+ for i, m := range messages {
+ logging.Logf("llm/copilot ", "msg[%d] role=%s size=%d preview=%s%s%s", i, m.Role, len(m.Content), logging.AnsiCyan, logging.PreviewForLog(m.Content), logging.AnsiBase)
+ }
+
+ req := copilotChatRequest{Model: o.Model}
+ req.Messages = make([]copilotMessage, len(messages))
+ for i, m := range messages {
+ req.Messages[i] = copilotMessage{Role: m.Role, Content: m.Content}
+ }
+ if o.Temperature != 0 {
+ req.Temperature = &o.Temperature
+ }
+ if o.MaxTokens > 0 {
+ req.MaxTokens = &o.MaxTokens
+ }
+ if len(o.Stop) > 0 {
+ req.Stop = o.Stop
+ }
+
+ body, err := json.Marshal(req)
+ if err != nil {
+ logging.Logf("llm/copilot ", "marshal error: %v", err)
+ return "", err
+ }
+
+ endpoint := c.baseURL + "/chat/completions"
+ logging.Logf("llm/copilot ", "POST %s", endpoint)
+ httpReq, err := http.NewRequestWithContext(ctx, http.MethodPost, endpoint, bytes.NewReader(body))
+ if err != nil {
+ logging.Logf("llm/copilot ", "new request error: %v", err)
+ return "", err
+ }
+ httpReq.Header.Set("Content-Type", "application/json")
+ httpReq.Header.Set("Authorization", "Bearer "+c.apiKey)
+ // Some Copilot deployments expect a version header; optional here.
+ // httpReq.Header.Set("X-GitHub-Api-Version", "2023-12-07")
+
+ resp, err := c.httpClient.Do(httpReq)
+ if err != nil {
+ logging.Logf("llm/copilot ", "%shttp error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
+ return "", err
+ }
+ defer resp.Body.Close()
+ if resp.StatusCode < 200 || resp.StatusCode >= 300 {
+ var apiErr copilotChatResponse
+ _ = json.NewDecoder(resp.Body).Decode(&apiErr)
+ if apiErr.Error != nil && strings.TrimSpace(apiErr.Error.Message) != "" {
+ logging.Logf("llm/copilot ", "%sapi error status=%d type=%s msg=%s duration=%s%s", logging.AnsiRed, resp.StatusCode, apiErr.Error.Type, apiErr.Error.Message, time.Since(start), logging.AnsiBase)
+ return "", fmt.Errorf("copilot error: %s (status %d)", apiErr.Error.Message, resp.StatusCode)
+ }
+ logging.Logf("llm/copilot ", "%shttp non-2xx status=%d duration=%s%s", logging.AnsiRed, resp.StatusCode, time.Since(start), logging.AnsiBase)
+ return "", fmt.Errorf("copilot http error: status %d", resp.StatusCode)
+ }
+
+ var out copilotChatResponse
+ if err := json.NewDecoder(resp.Body).Decode(&out); err != nil {
+ logging.Logf("llm/copilot ", "%sdecode error after %s: %v%s", logging.AnsiRed, time.Since(start), err, logging.AnsiBase)
+ return "", err
+ }
+ if len(out.Choices) == 0 {
+ logging.Logf("llm/copilot ", "%sno choices returned duration=%s%s", logging.AnsiRed, time.Since(start), logging.AnsiBase)
+ return "", errors.New("copilot: no choices returned")
+ }
+ content := out.Choices[0].Message.Content
+ logging.Logf("llm/copilot ", "success choice=0 finish=%s size=%d preview=%s%s%s duration=%s", out.Choices[0].FinishReason, len(content), logging.AnsiGreen, logging.PreviewForLog(content), logging.AnsiBase, time.Since(start))
+ return content, nil
+}
+
+// Provider metadata
+func (c *copilotClient) Name() string { return "copilot" }
+func (c *copilotClient) DefaultModel() string { return c.defaultModel }
diff --git a/internal/llm/provider.go b/internal/llm/provider.go
index 6c6cf04..dda3d16 100644
--- a/internal/llm/provider.go
+++ b/internal/llm/provider.go
@@ -43,32 +43,40 @@ func WithStop(stop ...string) RequestOption {
// Config defines provider configuration read from the Hexai config file.
type Config struct {
- Provider string
- // OpenAI options
- OpenAIBaseURL string
- OpenAIModel string
- // Ollama options
- OllamaBaseURL string
- OllamaModel string
+ Provider string
+ // OpenAI options
+ OpenAIBaseURL string
+ OpenAIModel string
+ // Ollama options
+ OllamaBaseURL string
+ OllamaModel string
+ // Copilot options
+ CopilotBaseURL string
+ CopilotModel string
}
// NewFromConfig creates an LLM client using only the supplied configuration.
// The OpenAI API key is supplied separately and may be read from the environment
// by the caller; other environment-based configuration is not used.
-func NewFromConfig(cfg Config, openAIAPIKey string) (Client, error) {
- p := strings.ToLower(strings.TrimSpace(cfg.Provider))
- if p == "" {
- p = "openai"
- }
- switch p {
- case "openai":
- if strings.TrimSpace(openAIAPIKey) == "" {
- return nil, errors.New("missing OPENAI_API_KEY for provider openai")
- }
- return newOpenAI(cfg.OpenAIBaseURL, cfg.OpenAIModel, openAIAPIKey), nil
- case "ollama":
- return newOllama(cfg.OllamaBaseURL, cfg.OllamaModel), nil
- default:
- return nil, errors.New("unknown LLM provider: " + p)
- }
+func NewFromConfig(cfg Config, openAIAPIKey, copilotAPIKey string) (Client, error) {
+ p := strings.ToLower(strings.TrimSpace(cfg.Provider))
+ if p == "" {
+ p = "openai"
+ }
+ switch p {
+ case "openai":
+ if strings.TrimSpace(openAIAPIKey) == "" {
+ return nil, errors.New("missing OPENAI_API_KEY for provider openai")
+ }
+ return newOpenAI(cfg.OpenAIBaseURL, cfg.OpenAIModel, openAIAPIKey), nil
+ case "ollama":
+ return newOllama(cfg.OllamaBaseURL, cfg.OllamaModel), nil
+ case "copilot":
+ if strings.TrimSpace(copilotAPIKey) == "" {
+ return nil, errors.New("missing COPILOT_API_KEY for provider copilot")
+ }
+ return newCopilot(cfg.CopilotBaseURL, cfg.CopilotModel, copilotAPIKey), nil
+ default:
+ return nil, errors.New("unknown LLM provider: " + p)
+ }
}