1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
|
// Summary: Hexai CLI runner; reads input, creates an LLM client, builds messages,
// streams or collects the model output, and prints a short summary to stderr.
package hexaicli
import (
"context"
"fmt"
"io"
"log"
"os"
"strings"
"time"
"codeberg.org/snonux/hexai/internal/appconfig"
"codeberg.org/snonux/hexai/internal/editor"
"codeberg.org/snonux/hexai/internal/llm"
"codeberg.org/snonux/hexai/internal/llmutils"
"codeberg.org/snonux/hexai/internal/logging"
"codeberg.org/snonux/hexai/internal/stats"
"codeberg.org/snonux/hexai/internal/tmux"
)
type requestArgs struct {
model string
options []llm.RequestOption
}
func buildCLIRequestArgs(cfg appconfig.App, client llm.Client) requestArgs {
provider := canonicalProvider(cfg.Provider)
if strings.TrimSpace(cfg.CLIProvider) != "" {
provider = canonicalProvider(cfg.CLIProvider)
}
if client != nil {
provider = strings.ToLower(strings.TrimSpace(client.Name()))
}
override := strings.TrimSpace(cfg.CLIModel)
fallback := strings.TrimSpace(defaultModelForProvider(cfg, provider))
if client != nil {
if dm := strings.TrimSpace(client.DefaultModel()); dm != "" {
fallback = dm
}
}
effective := override
if effective == "" {
effective = fallback
}
opts := make([]llm.RequestOption, 0, 2)
if override != "" {
opts = append(opts, llm.WithModel(override))
}
if temp, ok := cliTemperature(cfg, provider, effective); ok {
opts = append(opts, llm.WithTemperature(temp))
}
return requestArgs{model: effective, options: opts}
}
func defaultRequestArgs(cfg appconfig.App, client llm.Client) requestArgs {
model := strings.TrimSpace(cfg.CLIModel)
if model == "" && client != nil {
model = strings.TrimSpace(client.DefaultModel())
}
return requestArgs{model: model}
}
func cliTemperature(cfg appconfig.App, provider, model string) (float64, bool) {
if cfg.CLITemperature != nil {
return *cfg.CLITemperature, true
}
if cfg.CodingTemperature != nil {
temp := *cfg.CodingTemperature
if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") && temp == 0.2 {
temp = 1.0
}
return temp, true
}
if provider == "openai" && strings.HasPrefix(strings.ToLower(model), "gpt-5") {
return 1.0, true
}
return 0, false
}
func canonicalProvider(name string) string {
p := strings.ToLower(strings.TrimSpace(name))
if p == "" {
return "openai"
}
return p
}
func defaultModelForProvider(cfg appconfig.App, provider string) string {
switch provider {
case "ollama":
return cfg.OllamaModel
case "copilot":
return cfg.CopilotModel
default:
return cfg.OpenAIModel
}
}
// Run executes the Hexai CLI behavior given arguments and I/O streams.
// It assumes flags have already been parsed by the caller.
func Run(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer) error {
// Load configuration with a logger so file-based config is respected.
logger := log.New(stderr, "hexai ", log.LstdFlags|log.Lmsgprefix)
cfg := appconfig.Load(logger)
if cfg.StatsWindowMinutes > 0 {
stats.SetWindow(time.Duration(cfg.StatsWindowMinutes) * time.Minute)
}
providerOverride := strings.TrimSpace(cfg.CLIProvider)
if providerOverride != "" {
cfg.Provider = providerOverride
}
client, err := newClientFromApp(cfg)
if err != nil {
fmt.Fprintf(stderr, logging.AnsiBase+"hexai: LLM disabled: %v"+logging.AnsiReset+"\n", err)
return err
}
req := buildCLIRequestArgs(cfg, client)
// Prefer piped stdin when present; only open the editor when there are no args
// and no stdin content available.
input, rerr := readInput(stdin, args)
if rerr != nil && len(args) == 0 {
if prompt, eerr := editor.OpenTempAndEdit(nil); eerr == nil && strings.TrimSpace(prompt) != "" {
args = []string{prompt}
input, rerr = readInput(stdin, args)
}
}
if rerr != nil {
fmt.Fprintln(stderr, logging.AnsiBase+rerr.Error()+logging.AnsiReset)
return rerr
}
printProviderInfo(stderr, client, req.model)
msgs := buildMessagesFromConfig(cfg, input)
if err := runChat(ctx, client, req, msgs, input, stdout, stderr); err != nil {
fmt.Fprintf(stderr, logging.AnsiBase+"hexai: error: %v"+logging.AnsiReset+"\n", err)
return err
}
return nil
}
// RunWithClient executes the CLI flow using an already-constructed client.
// Useful for testing and embedding.
func RunWithClient(ctx context.Context, args []string, stdin io.Reader, stdout, stderr io.Writer, client llm.Client) error {
input, err := readInput(stdin, args)
if err != nil {
fmt.Fprintln(stderr, logging.AnsiBase+err.Error()+logging.AnsiReset)
return err
}
req := defaultRequestArgs(appconfig.App{}, client)
printProviderInfo(stderr, client, req.model)
msgs := buildMessages(input)
if err := runChat(ctx, client, req, msgs, input, stdout, stderr); err != nil {
fmt.Fprintf(stderr, logging.AnsiBase+"hexai: error: %v"+logging.AnsiReset+"\n", err)
return err
}
return nil
}
// readInput reads from stdin and args, then combines them per CLI rules.
func readInput(stdin io.Reader, args []string) (string, error) {
var stdinData string
if fi, err := os.Stdin.Stat(); err == nil && (fi.Mode()&os.ModeCharDevice) == 0 {
data, readErr := io.ReadAll(stdin)
if readErr != nil {
return "", fmt.Errorf("hexai: failed to read stdin: %w", readErr)
}
stdinData = strings.TrimSpace(string(data))
}
argData := strings.TrimSpace(strings.Join(args, " "))
switch {
case stdinData != "" && argData != "":
return fmt.Sprintf("%s:\n\n%s", argData, stdinData), nil
case stdinData != "":
return stdinData, nil
case argData != "":
return argData, nil
default:
return "", fmt.Errorf("hexai: no input provided; pass text as an argument or via stdin")
}
}
// newClientFromConfig builds an LLM client from the app config and env keys.
// client construction moved to internal/llmutils
// buildMessages creates system and user messages based on input content.
func buildMessages(input string) []llm.Message {
lower := strings.ToLower(input)
system := "You are Hexai CLI. Default to very short, concise answers. If the user asks for commands, output only the commands (one per line) with no commentary or explanation. Only when the word 'explain' appears in the prompt, produce a verbose explanation."
if strings.Contains(lower, "explain") {
system = "You are Hexai CLI. The user requested an explanation. Provide a clear, verbose explanation with reasoning and details. If commands are needed, include them with brief context."
}
return []llm.Message{
{Role: "system", Content: system},
{Role: "user", Content: input},
}
}
// buildMessagesFromConfig uses configured CLI system prompts.
func buildMessagesFromConfig(cfg appconfig.App, input string) []llm.Message {
lower := strings.ToLower(input)
system := cfg.PromptCLIDefaultSystem
if strings.Contains(lower, "explain") {
if strings.TrimSpace(cfg.PromptCLIExplainSystem) != "" {
system = cfg.PromptCLIExplainSystem
}
}
return []llm.Message{
{Role: "system", Content: system},
{Role: "user", Content: input},
}
}
// runChat executes the chat request, handling streaming and summary output.
func runChat(ctx context.Context, client llm.Client, req requestArgs, msgs []llm.Message, input string, out io.Writer, errw io.Writer) error {
start := time.Now()
// Best-effort tmux status update (colored start heartbeat)
model := strings.TrimSpace(req.model)
if model == "" {
model = client.DefaultModel()
}
_ = tmux.SetStatus(tmux.FormatLLMStartStatus(client.Name(), model))
var output string
if s, ok := client.(llm.Streamer); ok {
var b strings.Builder
if err := s.ChatStream(ctx, msgs, func(chunk string) {
b.WriteString(chunk)
fmt.Fprint(out, chunk)
}, req.options...); err != nil {
return err
}
output = b.String()
} else {
txt, err := client.Chat(ctx, msgs, req.options...)
if err != nil {
return err
}
output = txt
fmt.Fprint(out, output)
}
dur := time.Since(start)
// Contribute to global stats and update tmux status
sent := 0
for _, m := range msgs {
sent += len(m.Content)
}
recv := len(output)
_ = stats.Update(ctx, client.Name(), model, sent, recv)
snap, _ := stats.TakeSnapshot()
minsWin := snap.Window.Minutes()
if minsWin <= 0 {
minsWin = 0.001
}
scopeReqs := int64(0)
if pe, ok := snap.Providers[client.Name()]; ok {
if mc, ok2 := pe.Models[model]; ok2 {
scopeReqs = mc.Reqs
}
}
scopeRPM := float64(scopeReqs) / minsWin
fmt.Fprintf(errw, "\n"+logging.AnsiBase+"done provider=%s model=%s time=%s in_bytes=%d out_bytes=%d | global Σ reqs=%d rpm=%.2f"+logging.AnsiReset+"\n",
client.Name(), model, dur.Round(time.Millisecond), sent, recv, snap.Global.Reqs, snap.RPM)
_ = tmux.SetStatus(tmux.FormatGlobalStatusColored(snap.Global.Reqs, snap.RPM, snap.Global.Sent, snap.Global.Recv, client.Name(), model, scopeRPM, scopeReqs, snap.Window))
return nil
}
// printProviderInfo writes the provider/model line to stderr.
func printProviderInfo(errw io.Writer, client llm.Client, model string) {
if strings.TrimSpace(model) == "" {
model = client.DefaultModel()
}
fmt.Fprintf(errw, logging.AnsiBase+"provider=%s model=%s"+logging.AnsiReset+"\n", client.Name(), model)
}
// newClientFromConfig is kept for tests; delegates to llmutils.
var newClientFromApp = llmutils.NewClientFromApp
// Backcompat for tests referencing the older helper name.
func newClientFromConfig(cfg appconfig.App) (llm.Client, error) { return newClientFromApp(cfg) }
|