1
0
Fork 0
nix-system-configurations/home-manager/ai.nix

267 lines
8.4 KiB
Nix

# SPDX-FileCopyrightText: 2025 Ethan Reece <contact@ethanreece.com>
#
# SPDX-License-Identifier: MIT
{
config,
inputs,
lib,
pkgs,
...
}:
let
llm = {
remote = {
key = "openrouter";
name = "OpenRouter";
url = "https://openrouter.ai/api/v1";
env = "OPENROUTER_API_KEY";
models = {
fast = {
key = "moonshotai/kimi-k2:free";
name = "Kimi K2 (free)";
provider = [ "chutes/fp8" ];
tools = true;
tool_call = false;
reasoning = false;
temperature = true;
vision = true;
};
coding = {
key = "moonshotai/kimi-k2";
name = "Kimi K2";
provider = [ "groq" ];
tools = true;
tool_call = false;
reasoning = false;
temperature = true;
vision = true;
};
reasoning = {
key = "deepseek/deepseek-r1-0528:free";
name = "R1 (free)";
provider = [ "chutes" ];
tools = true;
tool_call = false;
reasoning = true;
temperature = true;
vision = true;
};
};
};
};
in
{
imports = [ ./secrets.nix ];
sops.secrets = {
openrouter_api_key = { };
};
home = {
shellAliases = {
aichat_reasoning_remote = "${config.programs.aichat.package}/bin/aichat --model ${llm.remote.key}:${llm.remote.models.reasoning.key}";
aichat_fast_remote = "${config.programs.aichat.package}/bin/aichat --model ${llm.remote.key}:${llm.remote.models.fast.key}";
codex_remote = "${config.programs.codex.package}/bin/codex --profile fast_remote";
};
};
programs = {
nushell.environmentVariables = {
${llm.remote.env} = lib.hm.nushell.mkNushellInline "cat ${
config.sops.secrets."${llm.remote.key}_api_key".path
}";
};
codex = {
enable = true;
# custom-instructions = ''
# ## 10. Applying Patch Files with patch
# When the built-in `apply_patch` tool or `git apply` fails to apply a diff/patch file (especially if the file being patched contains special characters that might confuse simpler patch tools), the standard `patch` utility can be a more robust alternative.
# - **Patch File Format**: Ensure your patch file is in a standard unified diff format. Typically, these patches are generated with `git diff > my_feature.patch` or manually crafted. If the patch refers to files with `a/` and `b/` prefixes (e.g., `--- a/file.txt`, `+++ b/file.txt`), you'll use the `-p1` option.
# - **Creating the Patch File**: You can create a patch file using shell redirection, for example:
# ```bash`
# cat <<'EOF' > fix_descriptive_name.patch
# --- a/path/to/your/file.ext
# +++ b/path/to/your/file.ext
# @@ -line_num,num_lines +line_num,num_lines @@ context_or_change
# -old_line_content
# +new_line_content
# EOF
# ```
# *Important*: Ensure the `EOF` marker is on its own line with no trailing spaces.
# - **Applying the Patch**: Use the `patch` command via the `shell` tool. The `-p1` option strips the leading component from file paths in the patch file (`a/`, `b/`).
# ```
# # Example: Apply a patch file
# default_api.shell(command=["sh", "-c", "patch -p1 < fix_descriptive_name.patch"])
# ```
# - **Verification**: After applying, always verify that the target file has been changed as expected (e.g., using `cat` or `git diff`).
# - **Cleanup**: Remove the patch file if it's no longer needed:
# ```
# default_api.shell(command=["rm", "fix_descriptive_name.patch"])
# ```
# '';
settings = {
model = llm.remote.models.fast.key;
model_provider = llm.remote.key;
model_providers = {
${llm.remote.key} = {
name = llm.remote.name;
base_url = llm.remote.url;
env_key = llm.remote.env;
};
};
};
};
opencode = {
enable = true;
settings = {
"$schema" = "https://opencode.ai/config.json";
provider = {
${llm.remote.key} = {
npm = "@ai-sdk/openai-compatible";
name = llm.remote.name;
options = {
baseURL = llm.remote.url;
apiKey = "{env:${llm.remote.env}}";
};
models = {
${llm.remote.models.fast.key} = (
let
model = llm.remote.models.fast;
in
{
id = model.key;
name = model.name;
options = {
tools = model.tools;
${llm.remote.name} = {
provider = {
order = model.provider;
allow_fallbacks = false;
};
};
};
tool_call = model.tool_call;
reasoning = model.reasoning;
temperature = model.temperature;
}
);
${llm.remote.models.coding.key} = (
let
model = llm.remote.models.coding;
in
{
id = model.key;
name = model.name;
options = {
tools = model.tools;
${llm.remote.name} = {
provider = {
order = model.provider;
allow_fallbacks = false;
};
};
};
tool_call = model.tool_call;
reasoning = model.reasoning;
temperature = model.temperature;
}
);
${llm.remote.models.reasoning.key} = (
let
model = llm.remote.models.reasoning;
in
{
id = model.key;
name = model.name;
options = {
tools = model.tools;
${llm.remote.name} = {
provider = {
order = model.provider;
allow_fallbacks = false;
};
};
};
tool_call = model.tool_call;
reasoning = model.reasoning;
temperature = model.temperature;
}
);
};
};
};
model = "${llm.remote.key}:${llm.remote.models.fast.key}";
};
};
aichat = {
enable = true;
settings = {
model = "${llm.remote.key}:${llm.remote.models.fast.key}";
clients = [
# {
# type = "openai-compatible";
# name = "ollama";
# api_base = "http://localhost:11434/v1";
# models = [
# {
# name = "${localFastModel}";
# supports_function_calling = true;
# supports_vision = true;
# }
# {
# name = "${localReasoningModel}";
# supports_function_calling = true;
# supports_vision = true;
# }
# ];
# }
{
type = "openai-compatible";
name = llm.remote.key;
api_base = llm.remote.url;
models = [
(
let
model = llm.remote.models.fast;
in
{
name = model.key;
supports_function_calling = model.tools;
supports_vision = model.vision;
}
)
(
let
model = llm.remote.models.coding;
in
{
name = model.key;
supports_function_calling = model.tools;
supports_vision = model.vision;
}
)
(
let
model = llm.remote.models.reasoning;
in
{
name = model.key;
supports_function_calling = model.tools;
supports_vision = model.vision;
}
)
];
}
];
};
};
};
}