Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions codex-rs/Cargo.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

2 changes: 2 additions & 0 deletions codex-rs/common/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,3 +29,5 @@ mod config_summary;
pub use config_summary::create_config_summary_entries;
// Shared fuzzy matcher (used by TUI selection popups and other UI filtering)
pub mod fuzzy_match;
// Shared model presets used by TUI and MCP server
pub mod model_presets;
54 changes: 54 additions & 0 deletions codex-rs/common/src/model_presets.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
use codex_core::protocol_config_types::ReasoningEffort;

/// A simple preset pairing a model slug with a reasoning effort.
#[derive(Debug, Clone, Copy)]
pub struct ModelPreset {
/// Stable identifier for the preset.
pub id: &'static str,
/// Display label shown in UIs.
pub label: &'static str,
/// Short human description shown next to the label in UIs.
pub description: &'static str,
/// Model slug (e.g., "gpt-5").
pub model: &'static str,
/// Reasoning effort to apply for this preset.
pub effort: ReasoningEffort,
}

/// Built-in list of model presets that pair a model with a reasoning effort.
///
/// Keep this UI-agnostic so it can be reused by both TUI and MCP server.
pub fn builtin_model_presets() -> &'static [ModelPreset] {
// Order reflects effort from minimal to high.
const PRESETS: &[ModelPreset] = &[
ModelPreset {
id: "gpt-5-minimal",
label: "gpt-5 minimal",
description: "— Fastest responses with very limited reasoning; ideal for coding, instructions, or lightweight tasks.",
model: "gpt-5",
effort: ReasoningEffort::Minimal,
},
ModelPreset {
id: "gpt-5-low",
label: "gpt-5 low",
description: "— Balances speed with some reasoning; useful for straightforward queries and short explanations.",
model: "gpt-5",
effort: ReasoningEffort::Low,
},
ModelPreset {
id: "gpt-5-medium",
label: "gpt-5 medium",
description: "— Default setting; provides a solid balance of reasoning depth and latency for general-purpose tasks.",
model: "gpt-5",
effort: ReasoningEffort::Medium,
},
ModelPreset {
id: "gpt-5-high",
label: "gpt-5 high",
description: "— Maximizes reasoning depth for complex or ambiguous problems.",
model: "gpt-5",
effort: ReasoningEffort::High,
},
];
PRESETS
}
9 changes: 3 additions & 6 deletions codex-rs/config.md
Original file line number Diff line number Diff line change
Expand Up @@ -217,17 +217,14 @@ Users can specify config values at multiple levels. Order of precedence is as fo

## model_reasoning_effort

If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:
If the selected model is known to support reasoning (for example: `o3`, `o4-mini`, `codex-*`, `gpt-5`), reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning), this can be set to:

- `"minimal"`
- `"low"`
- `"medium"` (default)
- `"high"`

To disable reasoning, set `model_reasoning_effort` to `"none"` in your config:

```toml
model_reasoning_effort = "none" # disable reasoning
```
Note: to minimize reasoning, choose `"minimal"`.

## model_reasoning_summary

Expand Down
4 changes: 2 additions & 2 deletions codex-rs/core/src/config.rs
Original file line number Diff line number Diff line change
Expand Up @@ -140,8 +140,8 @@ pub struct Config {
/// When this program is invoked, arg0 will be set to `codex-linux-sandbox`.
pub codex_linux_sandbox_exe: Option<PathBuf>,

/// If not "none", the value to use for `reasoning.effort` when making a
/// request using the Responses API.
/// Value to use for `reasoning.effort` when making a request using the
/// Responses API.
pub model_reasoning_effort: ReasoningEffort,

/// If not "none", the value to use for `reasoning.summary` when making a
Expand Down
1 change: 1 addition & 0 deletions codex-rs/mcp-server/Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,7 @@ workspace = true
[dependencies]
anyhow = "1"
codex-arg0 = { path = "../arg0" }
codex-common = { path = "../common" }
codex-core = { path = "../core" }
codex-login = { path = "../login" }
codex-protocol = { path = "../protocol" }
Expand Down
7 changes: 4 additions & 3 deletions codex-rs/protocol/src/config_types.rs
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
use serde::Deserialize;
use serde::Serialize;
use strum_macros::Display;
use strum_macros::EnumIter;
use ts_rs::TS;

/// See https://platform.openai.com/docs/guides/reasoning?api-mode=responses#get-started-with-reasoning
#[derive(Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display, TS)]
#[derive(
Debug, Serialize, Deserialize, Default, Clone, Copy, PartialEq, Eq, Display, TS, EnumIter,
)]
#[serde(rename_all = "lowercase")]
#[strum(serialize_all = "lowercase")]
pub enum ReasoningEffort {
Expand All @@ -13,8 +16,6 @@ pub enum ReasoningEffort {
#[default]
Medium,
High,
/// Option to disable reasoning.
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Need to update config.md to reflect this change.

None,
}

/// A summary of the reasoning performed by the model. This can be useful for
Expand Down
15 changes: 15 additions & 0 deletions codex-rs/tui/src/app.rs
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,11 @@ impl App<'_> {
self.app_event_tx.send(AppEvent::CodexOp(Op::Compact));
}
}
SlashCommand::Model => {
if let AppState::Chat { widget } = &mut self.app_state {
widget.open_model_popup();
}
}
SlashCommand::Quit => {
break;
}
Expand Down Expand Up @@ -499,6 +504,16 @@ impl App<'_> {
widget.apply_file_search_result(query, matches);
}
}
AppEvent::UpdateReasoningEffort(effort) => {
if let AppState::Chat { widget } = &mut self.app_state {
widget.set_reasoning_effort(effort);
}
}
AppEvent::UpdateModel(model) => {
if let AppState::Chat { widget } = &mut self.app_state {
widget.set_model(model);
}
}
}
}
terminal.clear()?;
Expand Down
7 changes: 7 additions & 0 deletions codex-rs/tui/src/app_event.rs
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,7 @@ use std::time::Duration;

use crate::app::ChatWidgetArgs;
use crate::slash_command::SlashCommand;
use codex_core::protocol_config_types::ReasoningEffort;

#[allow(clippy::large_enum_variant)]
#[derive(Debug)]
Expand Down Expand Up @@ -63,4 +64,10 @@ pub(crate) enum AppEvent {
/// Onboarding: result of login_with_chatgpt.
OnboardingAuthComplete(Result<(), String>),
OnboardingComplete(ChatWidgetArgs),

/// Update the current reasoning effort in the running app and widget.
UpdateReasoningEffort(ReasoningEffort),

/// Update the current model slug in the running app and widget.
UpdateModel(String),
}
5 changes: 4 additions & 1 deletion codex-rs/tui/src/bottom_pane/command_popup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -71,13 +71,16 @@ impl CommandPopup {
for (_, cmd) in self.all_commands.iter() {
out.push((cmd, None, 0));
}
// Keep the original presentation order when no filter is applied.
return out;
} else {
for (_, cmd) in self.all_commands.iter() {
if let Some((indices, score)) = fuzzy_match(cmd.command(), filter) {
out.push((cmd, Some(indices), score));
}
}
}
// When filtering, sort by ascending score and then by command for stability.
out.sort_by(|a, b| a.2.cmp(&b.2).then_with(|| a.0.command().cmp(b.0.command())));
out
}
Expand Down Expand Up @@ -128,7 +131,7 @@ impl WidgetRef for CommandPopup {
})
.collect()
};
render_rows(area, buf, &rows_all, &self.state, MAX_POPUP_ROWS);
render_rows(area, buf, &rows_all, &self.state, MAX_POPUP_ROWS, false);
}
}

Expand Down
4 changes: 2 additions & 2 deletions codex-rs/tui/src/bottom_pane/file_search_popup.rs
Original file line number Diff line number Diff line change
Expand Up @@ -134,9 +134,9 @@ impl WidgetRef for &FileSearchPopup {

if self.waiting && rows_all.is_empty() {
// Render a minimal waiting stub using the shared renderer (no rows -> "no matches").
render_rows(area, buf, &[], &self.state, MAX_POPUP_ROWS);
render_rows(area, buf, &[], &self.state, MAX_POPUP_ROWS, false);
} else {
render_rows(area, buf, &rows_all, &self.state, MAX_POPUP_ROWS);
render_rows(area, buf, &rows_all, &self.state, MAX_POPUP_ROWS, false);
}
}
}
Loading
Loading