summaryrefslogtreecommitdiff
path: root/internal/lsp/server.go
blob: 7773dd14b3985edfd3501cc8113d7035fd3dbe9c (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
// Summary: Minimal LSP server over stdio; manages documents, dispatches requests, and tracks stats.
// Not yet reviewed by a human
package lsp

import (
	"bufio"
	"encoding/json"
	"hexai/internal/llm"
	"hexai/internal/logging"
	"io"
	"log"
	"sync"
	"time"
)

// Server implements a minimal LSP over stdio.
type Server struct {
	in               *bufio.Reader
	out              io.Writer
	logger           *log.Logger
	exited           bool
	mu               sync.RWMutex
	docs             map[string]*document
	logContext       bool
	llmClient        llm.Client
	lastInput        time.Time
	maxTokens        int
	contextMode      string
	windowLines      int
	maxContextTokens int
	noDiskIO         bool
    triggerChars     []string
    // If set, used as the LSP coding temperature for all LLM calls
    codingTemperature *float64
    // Concurrency guard: prevent overlapping LLM requests (esp. completions)
    llmBusy bool
	// LLM request stats
	llmReqTotal       int64
	llmSentBytesTotal int64
	llmRespTotal      int64
	llmRespBytesTotal int64
	startTime         time.Time
}

// ServerOptions collects configuration for NewServer to avoid long parameter lists.
type ServerOptions struct {
    LogContext        bool
    MaxTokens         int
    ContextMode       string
    WindowLines       int
    MaxContextTokens  int

    Client            llm.Client
    TriggerCharacters []string
    CodingTemperature *float64
}

func NewServer(r io.Reader, w io.Writer, logger *log.Logger, opts ServerOptions) *Server {
	s := &Server{in: bufio.NewReader(r), out: w, logger: logger, docs: make(map[string]*document), logContext: opts.LogContext}
	maxTokens := opts.MaxTokens
	if maxTokens <= 0 {
		maxTokens = 500
	}
	s.maxTokens = maxTokens
	contextMode := opts.ContextMode
	if contextMode == "" {
		contextMode = "file-on-new-func"
	}
	windowLines := opts.WindowLines
	if windowLines <= 0 {
		windowLines = 120
	}
	maxContextTokens := opts.MaxContextTokens
	if maxContextTokens <= 0 {
		maxContextTokens = 2000
	}
	s.contextMode = contextMode
	s.windowLines = windowLines
	s.maxContextTokens = maxContextTokens
	
	s.startTime = time.Now()
    s.llmClient = opts.Client
    if len(opts.TriggerCharacters) == 0 {
        // Conservative defaults to reduce early triggers and API usage
        s.triggerChars = []string{".", ":", "/", "_"}
    } else {
        s.triggerChars = append([]string{}, opts.TriggerCharacters...)
    }
    s.codingTemperature = opts.CodingTemperature
    return s
}

// tryStartLLM attempts to mark the LLM as busy. Returns true when it acquired
// the guard; false if another LLM request is already running.
func (s *Server) tryStartLLM() bool {
    s.mu.Lock()
    defer s.mu.Unlock()
    if s.llmBusy {
        return false
    }
    s.llmBusy = true
    return true
}

// endLLM releases the busy guard for LLM requests.
func (s *Server) endLLM() {
    s.mu.Lock()
    s.llmBusy = false
    s.mu.Unlock()
}

func (s *Server) Run() error {
	for {
		body, err := s.readMessage()
		if err == io.EOF {
			return nil
		}
		if err != nil {
			return err
		}
		var req Request
		if err := json.Unmarshal(body, &req); err != nil {
			logging.Logf("lsp ", "invalid JSON: %v", err)
			continue
		}
		if req.Method == "" {
			// A response from client; ignore
			continue
		}
		go s.handle(req)
		if s.exited {
			return nil
		}
	}
}