1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
|
// Summary: Minimal LSP server over stdio; manages documents, dispatches requests, and tracks stats.
package lsp
import (
"bufio"
"encoding/json"
"codeberg.org/snonux/hexai/internal/llm"
"codeberg.org/snonux/hexai/internal/logging"
"io"
"log"
"sync"
"time"
)
// Server implements a minimal LSP over stdio.
type Server struct {
in *bufio.Reader
out io.Writer
logger *log.Logger
exited bool
mu sync.RWMutex
docs map[string]*document
logContext bool
llmClient llm.Client
lastInput time.Time
maxTokens int
contextMode string
windowLines int
maxContextTokens int
noDiskIO bool
triggerChars []string
// If set, used as the LSP coding temperature for all LLM calls
codingTemperature *float64
// LLM request stats
llmReqTotal int64
llmSentBytesTotal int64
llmRespTotal int64
llmRespBytesTotal int64
startTime time.Time
// Small LRU cache for recent code completion outputs (keyed by context)
compCache map[string]string
compCacheOrder []string // most-recent at end; cap ~10
// Outgoing JSON-RPC id counter for server-initiated requests
nextID int64
// Minimum identifier chars required for manual invoke to bypass prefix checks
manualInvokeMinPrefix int
// LLM concurrency guard: allow at most one in-flight request
llmBusy bool
// Dispatch table for JSON-RPC methods → handler functions
handlers map[string]func(Request)
}
// ServerOptions collects configuration for NewServer to avoid long parameter lists.
type ServerOptions struct {
LogContext bool
MaxTokens int
ContextMode string
WindowLines int
MaxContextTokens int
Client llm.Client
TriggerCharacters []string
CodingTemperature *float64
ManualInvokeMinPrefix int
}
func NewServer(r io.Reader, w io.Writer, logger *log.Logger, opts ServerOptions) *Server {
s := &Server{in: bufio.NewReader(r), out: w, logger: logger, docs: make(map[string]*document), logContext: opts.LogContext}
maxTokens := opts.MaxTokens
if maxTokens <= 0 {
maxTokens = 500
}
s.maxTokens = maxTokens
contextMode := opts.ContextMode
if contextMode == "" {
contextMode = "file-on-new-func"
}
windowLines := opts.WindowLines
if windowLines <= 0 {
windowLines = 120
}
maxContextTokens := opts.MaxContextTokens
if maxContextTokens <= 0 {
maxContextTokens = 2000
}
s.contextMode = contextMode
s.windowLines = windowLines
s.maxContextTokens = maxContextTokens
s.startTime = time.Now()
s.llmClient = opts.Client
if len(opts.TriggerCharacters) == 0 {
// Defaults (no space to avoid auto-trigger after whitespace)
s.triggerChars = []string{".", ":", "/", "_", ")", "{"}
} else {
s.triggerChars = append([]string{}, opts.TriggerCharacters...)
}
s.codingTemperature = opts.CodingTemperature
s.compCache = make(map[string]string)
s.manualInvokeMinPrefix = opts.ManualInvokeMinPrefix
// Initialize dispatch table
s.handlers = map[string]func(Request){
"initialize": s.handleInitialize,
"initialized": func(_ Request) { s.handleInitialized() },
"shutdown": s.handleShutdown,
"exit": func(_ Request) { s.handleExit() },
"textDocument/didOpen": s.handleDidOpen,
"textDocument/didChange": s.handleDidChange,
"textDocument/didClose": s.handleDidClose,
"textDocument/completion": s.handleCompletion,
"textDocument/codeAction": s.handleCodeAction,
"codeAction/resolve": s.handleCodeActionResolve,
}
return s
}
func (s *Server) Run() error {
for {
body, err := s.readMessage()
if err == io.EOF {
return nil
}
if err != nil {
return err
}
var req Request
if err := json.Unmarshal(body, &req); err != nil {
logging.Logf("lsp ", "invalid JSON: %v", err)
continue
}
if req.Method == "" {
// A response from client; ignore
continue
}
go s.handle(req)
if s.exited {
return nil
}
}
}
|