|
6 | 6 | }: |
7 | 7 | let |
8 | 8 | proxyConfig = import ../../../lib/proxy.nix { inherit lib pkgs; }; |
| 9 | + mcp = import ../../../modules/ai/mcp.nix { inherit pkgs lib config; }; |
9 | 10 | codex_home = "${config.xdg.configHome}/codex"; |
| 11 | + codexMcpToml = builtins.readFile ( |
| 12 | + (pkgs.formats.toml { }).generate "codex-mcp.toml" { mcp_servers = mcp.clients.codex; } |
| 13 | + ); |
10 | 14 | # codex_config_file = "${codex_home}/config.toml"; |
11 | 15 | # like commands in other agents |
12 | 16 | # prompts_dir = "${codex_home}/prompts"; |
|
30 | 34 | source = ./instructions; |
31 | 35 | recursive = true; |
32 | 36 | }; |
| 37 | + "codex/skills" = { |
| 38 | + source = ../../../../conf/claude-local-marketplace/skills; |
| 39 | + recursive = true; |
| 40 | + }; |
33 | 41 | # toml |
34 | | - "codex/config.toml".text = '' |
35 | | - model = "gpt-5" |
36 | | - model_provider = "litellm" |
| 42 | + "codex/config-generated.toml".text = '' |
| 43 | + model = "gpt-5.2-medium" |
| 44 | + model_provider = "packy" |
37 | 45 | approval_policy = "untrusted" |
38 | | - model_reasoning_effort = "low" |
| 46 | + model_reasoning_effort = "medium" |
39 | 47 | # the AGENTS.md contains instructions for using codex mcp, do not use it |
40 | 48 | # experimental_instructions_file = "${config.xdg.configHome}/AGENTS.md" |
41 | | - sandbox_mode = "read-only" |
| 49 | + project_doc_fallback_filenames = ["CLAUDE.md"] |
| 50 | + sandbox_mode = "workspace-write" |
| 51 | +
|
| 52 | + [features] |
| 53 | + tui2 = true |
| 54 | + skills = true |
| 55 | + unified_exec = true |
| 56 | + apply_patch_freeform = true |
| 57 | + view_image_tool = false |
| 58 | + ghost_commit = false |
| 59 | +
|
| 60 | + [model_providers.packy] |
| 61 | + name = "packy" |
| 62 | + wire_api = "responses" |
| 63 | + base_url = "https://www.packyapi.com/v1" |
| 64 | + env_key = "PACKYCODE_CODEX_API_KEY" |
42 | 65 |
|
43 | 66 | [model_providers.litellm] |
44 | 67 | name = "litellm" |
|
104 | 127 | hide_agent_reasoning = true |
105 | 128 | model_verbosity = "low" |
106 | 129 |
|
107 | | - [profiles.sage_slow] |
108 | | - model = "glm-4.6" |
109 | | - model_provider = "zhipuai-coding-plan" |
110 | | - sandbox_mode = "read-only" |
111 | | - experimental_instructions_file = "${codex_home}/instructions/sage-role.md" |
112 | | - approval_policy = "never" |
113 | | - model_reasoning_effort = "medium" |
114 | | - model_reasoning_summary = "concise" |
115 | | - hide_agent_reasoning = true |
116 | | - model_verbosity = "low" |
117 | | -
|
118 | | - [profiles.sage] |
119 | | - model = "kimi-k2-turbo-preview" |
120 | | - model_provider = "moonshot" |
121 | | - sandbox_mode = "read-only" |
122 | | - experimental_instructions_file = "${codex_home}/instructions/sage-role.md" |
123 | | - approval_policy = "never" |
124 | | - model_reasoning_effort = "low" |
125 | | - model_reasoning_summary = "concise" |
126 | | - hide_agent_reasoning = true |
127 | | - model_verbosity = "medium" |
128 | | -
|
129 | 130 | [tui] |
130 | 131 | # notifications = [ "agent-turn-complete", "approval-requested" ] |
131 | 132 | notifications = true |
| 133 | + animations = false |
| 134 | + scroll_events_per_tick = 3 |
| 135 | + scroll_wheel_lines = 3 |
| 136 | + scroll_mode = "auto" |
| 137 | +
|
| 138 | + [sandbox_workspace_write] |
| 139 | + network_access = true |
| 140 | + writable_roots = ["${config.home.homeDirectory}/workspace/work"] |
132 | 141 |
|
133 | 142 | [shell_environment_policy] |
134 | 143 | inherit = "core" |
|
140 | 149 | set = { HTTP_PROXY = "${proxyConfig.proxies.http}", HTTPS_PROXY = "${proxyConfig.proxies.https}" } |
141 | 150 |
|
142 | 151 | ## MCP |
143 | | - [mcp_servers.chromedev] |
144 | | - command = "bunx" |
145 | | - args = ["chrome-devtools-mcp@latest", "--browser-url=http://127.0.0.1:9222"] |
146 | | -
|
147 | | - # [mcp_servers.context7] |
148 | | - # command = "bunx" |
149 | | - # args = ["@upstash/context7-mcp"] |
150 | | -
|
151 | | - # [mcp_servers.mermaid] |
152 | | - # command = "bunx" |
153 | | - # args = ["@devstefancho/mermaid-mcp"] |
154 | | -
|
155 | | - # [mcp_servers.sequentialthinking] |
156 | | - # command = "bunx" |
157 | | - # args = ["@modelcontextprotocol/server-sequential-thinking"] |
158 | | -
|
159 | | - # [mcp_servers.github] |
160 | | - # command = "github-mcp-server" |
161 | | - # args = ["stdio", "--dynamic-toolsets"] |
162 | | - # env = { GITHUB_PERSONAL_ACCESS_TOKEN = "${pkgs.nix-priv.keys.github.accessToken}" } |
| 152 | + ${codexMcpToml} |
| 153 | + ''; |
| 154 | + }; |
| 155 | + |
| 156 | + home.activation = { |
| 157 | + setupCodexConfig = lib.hm.dag.entryAfter [ "writeBoundary" ] '' |
| 158 | + CODEX_HOME="${codex_home}" |
| 159 | +
|
| 160 | + cp -f ${codex_home}/config-generated.toml "${codex_home}/config.toml" |
| 161 | + chmod u+w "${codex_home}/config.toml" |
| 162 | +
|
| 163 | + cat ${../../../../conf/llm/docs/coding-rules.md} > ${codex_home}/AGENTS.md |
163 | 164 | ''; |
164 | 165 | }; |
165 | 166 | } |
0 commit comments