1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
|
# Hexai sectioned config example
[general]
max_tokens = 4000
max_context_tokens = 4000
# context_mode controls how much of the current document is sent as extra context:
# - minimal: no additional context beyond the request payload.
# - window: include a sliding window of ~context_window_lines around the cursor.
# - file-on-new-func: include the full file only when starting a new function.
# - always-full: always include the entire open file.
context_mode = "always-full"
context_window_lines = 120
coding_temperature = 0.2 # single knob for LSP calls (optional)
[logging]
# Flatten to: log_preview_limit
log_preview_limit = 100 # chars shown in log previews
[completion]
completion_debounce_ms = 800 # idle ms before sending a request
completion_throttle_ms = 0 # min ms between requests (0 disables)
manual_invoke_min_prefix = 0 # required identifier chars for manual invoke
[triggers]
trigger_characters = [".", ":", "/", "_", " "]
[inline]
inline_open = ">!" # marker prefix for inline prompts
inline_close = ">" # single-character
[chat]
chat_suffix = ">" # single-character
chat_prefixes = ["?", "!", ":", ";"] # single-character items
[models]
# Shorthand string form per surface (single entry)
# completion = "gpt-4o-mini"
# chat = "gpt-4.1"
# Full array form for multiple entries
# [[models.completion]]
# provider = "openai"
# model = "gpt-4o-mini"
# temperature = 0.2
#
# [[models.completion]]
# provider = "ollama"
# model = "mistral"
# temperature = 0.2
# [[models.code_action]]
# # Only the first entry is used; extras are ignored with a warning.
# provider = "copilot"
# model = "gpt-4o"
# temperature = 0.4
# [[models.cli]]
# provider = "openai"
# model = "gpt-4.1"
# temperature = 0.6
[provider]
name = "openai" # openai | openrouter | copilot | ollama | anthropic
[openai]
model = "gpt-4.1"
base_url = "https://api.openai.com/v1"
temperature = 0.2
[openrouter]
model = "openrouter/auto"
base_url = "https://openrouter.ai/api/v1"
temperature = 0.2
[copilot]
model = "gpt-4o-mini"
base_url = "https://api.githubcopilot.com"
temperature = 0.2
[ollama]
model = "qwen3-coder:30b-a3b-q4_K_M"
base_url = "http://localhost:11434"
temperature = 0.2
[anthropic]
model = "claude-3-5-sonnet-20241022"
base_url = "https://api.anthropic.com/v1"
temperature = 0.2
# Prompt templates (optional). Leave commented to use defaults.
[prompts]
[prompts.completion]
# Templates support {{file}}, {{function}}, {{above}}, {{current}}, {{below}}, {{char}}
# and for additional context: {{context}}
# system_general = "You are a terse code completion engine. Return only the code to insert, no surrounding prose or backticks. Only continue from the cursor; never repeat characters already present to the left of the cursor on the current line (e.g., if 'name :=' is already typed, only return the right-hand side expression)."
# system_params = "You are a code completion engine for function signatures. Return only the parameter list contents (without parentheses), no braces, no prose. Prefer idiomatic names and types."
# system_inline = "You are a precise code completion/refactoring engine. Output only the code to insert with no prose, no comments, and no backticks. Return raw code only."
# user_general = "Provide the next likely code to insert at the cursor.\nFile: {{file}}\nFunction/context: {{function}}\nAbove line: {{above}}\nCurrent line (cursor at character {{char}}): {{current}}\nBelow line: {{below}}\nOnly return the completion snippet."
# user_params = "Cursor is inside the function parameter list. Suggest only the parameter list (no parentheses).\nFunction line: {{function}}\nCurrent line (cursor at {{char}}): {{current}}"
# additional_context = "Additional context:\n{{context}}"
[prompts.provider_native]
# completion = "// Path: {{path}}\n{{before}}"
[prompts.chat]
# system = "You are a helpful coding assistant. Answer concisely and clearly."
[prompts.code_action]
# rewrite_system = "You are a precise code refactoring engine. Rewrite the given code strictly according to the instruction. Return only the updated code with no prose or backticks. Preserve formatting where reasonable."
# diagnostics_system = "You are a precise code fixer. Resolve the given diagnostics by editing only the selected code. Return only the corrected code with no prose or backticks. Keep behavior and style, and avoid unrelated changes."
# document_system = "You are a precise code documentation engine. Add idiomatic documentation comments to the given code. Preserve exact behavior and formatting as much as possible. Return only the updated code with comments, no prose or backticks."
# rewrite_user = "Instruction: {{instruction}}\n\nSelected code to transform:\n{{selection}}"
# diagnostics_user = "Diagnostics to resolve (selection only):\n{{diagnostics}}\n\nSelected code:\n{{selection}}"
# document_user = "Add documentation comments to this code:\n{{selection}}"
# go_test_system = "You are a precise Go unit test generator. Given a Go function, write one or more Test* functions using the testing package. Do NOT include package or imports, only the test function(s). Prefer table-driven tests. Keep it minimal and idiomatic."
# go_test_user = "Function under test:\n{{function}}"
# simplify_system = "You are a precise code improvement engine. Simplify and improve the given code while preserving behavior. Return only the improved code with no prose or backticks."
# simplify_user = "Improve this code:\n{{selection}}"
# Define additional custom code actions (optional)
# [[prompts.code_action.custom]]
# id = "extract-function" # required, unique slug (case-insensitive)
# title = "Extract function" # required, appears in LSP and tmux
# kind = "refactor.extract" # optional (default: "refactor")
# scope = "selection" # optional: selection | diagnostics (default: selection)
# hotkey = "e" # optional, single character for tmux submenu
# instruction = "Extract selected code into a new function named 'extracted' and replace with a call. Return only code, no backticks."
# [[prompts.code_action.custom]]
# id = "fix-lints"
# title = "Fix linters"
# kind = "quickfix"
# scope = "diagnostics"
# hotkey = "l"
# system = "You are a precise code fixer. Only change selected code."
# user = "Diagnostics to resolve (selection only):\n{{diagnostics}}\n\nSelected code:\n{{selection}}"
[prompts.cli]
# default_system = "You are Hexai CLI. Default to very short, concise answers. If the user asks for commands, output only the commands (one per line) with no commentary or explanation. Only when the word 'explain' appears in the prompt, produce a verbose explanation."
# explain_system = "You are Hexai CLI. The user requested an explanation. Provide a clear, verbose explanation with reasoning and details. If commands are needed, include them with brief context."
[tmux]
# custom_menu_hotkey = "a" # hotkey to open the custom actions submenu in hexai-tmux-action
[stats]
# window_minutes = 60 # sliding window for global stats (Σ@window); min 1, max 1440
|