diff options
| author | Paul Buetow <paul@buetow.org> | 2026-04-26 09:06:07 +0300 |
|---|---|---|
| committer | Paul Buetow <paul@buetow.org> | 2026-04-26 09:06:07 +0300 |
| commit | 3d3c56f40d9b37afda4d3d5b0ecf10aab190ddfc (patch) | |
| tree | ffbe835ee9ffa26d0b320946fa63809c6f34c87c | |
| parent | e917a81fd555668431c8d9b415cf6237d681df53 (diff) | |
Add hexai config to dotfiles (Linux only)
Manage ~/.config/hexai/config.toml from the dotfiles repo, mirroring how
helix, lazygit, and opencode are handled. The home_hexai task is
gated to Linux because hexai is currently only used on the Linux
workstation.
The deployed config sets Ollama Cloud (kimi-k2.6 at https://ollama.com)
as the default provider and uses gemma4:31b for in-code completion.
The API key is read from $HEXAI_OLLAMA_API_KEY (or $OLLAMA_API_KEY).
Co-Authored-By: Claude Opus 4.7 (1M context) <noreply@anthropic.com>
| -rw-r--r-- | Rexfile | 10 | ||||
| -rw-r--r-- | hexai/config.toml | 29 |
2 files changed, 39 insertions, 0 deletions
@@ -146,6 +146,16 @@ task 'home_helix', sub { ensure "$DOT/helix/*" => "$HOME/.config/helix/" }; desc 'Install ~/.config/ghostty'; task 'home_ghostty', sub { ensure "$DOT/ghostty/*" => "$HOME/.config/ghostty/" }; +desc 'Install ~/.config/hexai (Linux only)'; +task 'home_hexai', sub { + if ( $^O eq 'linux' ) { + ensure "$DOT/hexai/*" => "$HOME/.config/hexai/"; + } + else { + Rex::Logger::info( 'Skipping hexai configuration (not on Linux)', 'warn' ); + } +}; + desc 'Install ~/.config/lazygit'; task 'home_lazygit', sub { ensure "$DOT/lazygit/*" => "$HOME/.config/lazygit/" }; diff --git a/hexai/config.toml b/hexai/config.toml new file mode 100644 index 0000000..388faf2 --- /dev/null +++ b/hexai/config.toml @@ -0,0 +1,29 @@ +# Hexai Configuration File + +[mcp] +# Directory where MCP prompts are stored +prompts_dir = "~/.local/hexai/data/prompts" + +# Enable automatic syncing of MCP prompts to slash command files +slashcommand_sync = true + +# Directory where slash command files will be written +slashcommand_dir = "~/.cursor/commands" + +# Default LLM provider for chat / CLI / code actions. The API key is read from +# the environment (HEXAI_OLLAMA_API_KEY, falling back to OLLAMA_API_KEY). +[provider] +name = "ollama" + +[ollama] +model = "kimi-k2.6" +base_url = "https://ollama.com" +temperature = 0.2 + +# In-code auto-completion uses gemma4:31b (the dense larger Gemma 4 on Ollama +# Cloud). It's faster and tighter than kimi-k2.6 for short, latency-sensitive +# completions while everything else still defaults to kimi-k2.6 above. +[[models.completion]] +provider = "ollama" +model = "gemma4:31b" +temperature = 0.2 |
