Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions codex-rs/app-server/tests/common/models_cache.rs
Original file line number Diff line number Diff line change
Expand Up @@ -42,6 +42,7 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo {
}
}

// todo(aibrahim): fix the priorities to be the opposite here.
/// Write a models_cache.json file to the codex home directory.
/// This prevents ModelsManager from making network requests to refresh models.
/// The cache will be treated as fresh (within TTL) and used instead of fetching from the network.
Expand Down
51 changes: 50 additions & 1 deletion codex-rs/app-server/tests/suite/v2/model_list.rs
Original file line number Diff line number Diff line change
Expand Up @@ -176,6 +176,33 @@ async fn list_models_returns_all_models_with_large_limit() -> Result<()> {
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
Model {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
supported_reasoning_efforts: vec![
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks"
.to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortOption {
reasoning_effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
default_reasoning_effort: ReasoningEffort::Medium,
is_default: false,
},
];

assert_eq!(items, expected_models);
Expand Down Expand Up @@ -299,7 +326,29 @@ async fn list_models_pagination_works() -> Result<()> {

assert_eq!(fifth_items.len(), 1);
assert_eq!(fifth_items[0].id, "gpt-5.1-codex-max");
assert!(fifth_cursor.is_none());
let sixth_cursor = fifth_cursor.ok_or_else(|| anyhow!("cursor for sixth page"))?;

let sixth_request = mcp
.send_list_models_request(ModelListParams {
limit: Some(1),
cursor: Some(sixth_cursor.clone()),
})
.await?;

let sixth_response: JSONRPCResponse = timeout(
DEFAULT_TIMEOUT,
mcp.read_stream_until_response_message(RequestId::Integer(sixth_request)),
)
.await??;

let ModelListResponse {
data: sixth_items,
next_cursor: sixth_cursor,
} = to_response::<ModelListResponse>(sixth_response)?;

assert_eq!(sixth_items.len(), 1);
assert_eq!(sixth_items[0].id, "caribou");
assert!(sixth_cursor.is_none());
Ok(())
}

Expand Down
14 changes: 14 additions & 0 deletions codex-rs/core/src/openai_models/model_family.rs
Original file line number Diff line number Diff line change
Expand Up @@ -294,6 +294,20 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily {
)

// Production models.
} else if slug.starts_with("caribou") {
// Same as gpt-5.1-codex-max.
model_family!(
slug, slug,
supports_reasoning_summaries: true,
reasoning_summary_format: ReasoningSummaryFormat::Experimental,
base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(),
apply_patch_tool_type: Some(ApplyPatchToolType::Freeform),
shell_type: ConfigShellToolType::ShellCommand,
supports_parallel_tool_calls: true,
support_verbosity: false,
truncation_policy: TruncationPolicy::Tokens(10_000),
context_window: Some(CONTEXT_WINDOW_272K),
)
} else if slug.starts_with("gpt-5.1-codex-max") {
model_family!(
slug, slug,
Expand Down
73 changes: 57 additions & 16 deletions codex-rs/core/src/openai_models/model_presets.rs
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,35 @@ pub const HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG: &str =

static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
vec![
ModelPreset {
id: "caribou".to_string(),
model: "caribou".to_string(),
display_name: "caribou".to_string(),
description: "Latest Codex-optimized flagship for deep and fast reasoning.".to_string(),
default_reasoning_effort: ReasoningEffort::Medium,
supported_reasoning_efforts: vec![
ReasoningEffortPreset {
effort: ReasoningEffort::Low,
description: "Fast responses with lighter reasoning".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::Medium,
description: "Balances speed and reasoning depth for everyday tasks".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::High,
description: "Greater reasoning depth for complex problems".to_string(),
},
ReasoningEffortPreset {
effort: ReasoningEffort::XHigh,
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: true,
upgrade: None,
show_in_picker: true,
supported_in_api: false,
},
ModelPreset {
id: "gpt-5.1-codex-max".to_string(),
model: "gpt-5.1-codex-max".to_string(),
Expand All @@ -35,9 +64,14 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
description: "Extra high reasoning depth for complex problems".to_string(),
},
],
is_default: true,
upgrade: None,
is_default: false,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex".to_string(),
Expand All @@ -62,11 +96,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1-codex-mini".to_string(),
Expand All @@ -86,12 +121,9 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
}),
upgrade: None,
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.2".to_string(),
Expand All @@ -118,8 +150,13 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
},
],
is_default: false,
upgrade: None,
upgrade: Some(ModelUpgrade {
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5.1".to_string(),
Expand All @@ -143,11 +180,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: true,
supported_in_api: true,
},
// Deprecated models.
ModelPreset {
Expand All @@ -172,11 +210,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5-codex-mini".to_string(),
Expand All @@ -201,6 +240,7 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
ModelPreset {
id: "gpt-5".to_string(),
Expand Down Expand Up @@ -228,11 +268,12 @@ static PRESETS: Lazy<Vec<ModelPreset>> = Lazy::new(|| {
],
is_default: false,
upgrade: Some(ModelUpgrade {
id: "gpt-5.1-codex-max".to_string(),
id: "caribou".to_string(),
reasoning_effort_mapping: None,
migration_config_key: HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG.to_string(),
migration_config_key: "caribou".to_string(),
}),
show_in_picker: false,
supported_in_api: true,
},
]
});
Expand Down
52 changes: 36 additions & 16 deletions codex-rs/core/src/openai_models/models_manager.rs
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,8 @@ use crate::openai_models::model_presets::builtin_model_presets;

const MODEL_CACHE_FILE: &str = "models_cache.json";
const DEFAULT_MODEL_CACHE_TTL: Duration = Duration::from_secs(300);
const OPENAI_DEFAULT_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_API_MODEL: &str = "gpt-5.1-codex-max";
const OPENAI_DEFAULT_CHATGPT_MODEL: &str = "caribou";
const CODEX_AUTO_BALANCED_MODEL: &str = "codex-auto-balanced";

/// Coordinates remote model discovery plus cached metadata on disk.
Expand Down Expand Up @@ -110,12 +111,12 @@ impl ModelsManager {
if let Err(err) = self.refresh_available_models(config).await {
error!("failed to refresh available models: {err}");
}
let remote_models = self.remote_models.read().await.clone();
let remote_models = self.remote_models(config).await;
self.build_available_models(remote_models)
}

pub fn try_list_models(&self) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.remote_models.try_read()?.clone();
pub fn try_list_models(&self, config: &Config) -> Result<Vec<ModelPreset>, TryLockError> {
let remote_models = self.try_get_remote_models(config)?;
Ok(self.build_available_models(remote_models))
}

Expand All @@ -126,7 +127,7 @@ impl ModelsManager {
/// Look up the requested model family while applying remote metadata overrides.
pub async fn construct_model_family(&self, model: &str, config: &Config) -> ModelFamily {
Self::find_family_for_model(model)
.with_remote_overrides(self.remote_models.read().await.clone())
.with_remote_overrides(self.remote_models(config).await)
.with_config_overrides(config)
}

Expand All @@ -139,21 +140,23 @@ impl ModelsManager {
}
// if codex-auto-balanced exists & signed in with chatgpt mode, return it, otherwise return the default model
let auth_mode = self.auth_manager.get_auth_mode();
let remote_models = self.remote_models.read().await.clone();
let remote_models = self.remote_models(config).await;
if auth_mode == Some(AuthMode::ChatGPT)
&& self
.build_available_models(remote_models)
.iter()
.any(|m| m.model == CODEX_AUTO_BALANCED_MODEL)
{
return CODEX_AUTO_BALANCED_MODEL.to_string();
} else if auth_mode == Some(AuthMode::ChatGPT) {
return OPENAI_DEFAULT_CHATGPT_MODEL.to_string();
}
OPENAI_DEFAULT_MODEL.to_string()
OPENAI_DEFAULT_API_MODEL.to_string()
}

#[cfg(any(test, feature = "test-support"))]
pub fn get_model_offline(model: Option<&str>) -> String {
model.unwrap_or(OPENAI_DEFAULT_MODEL).to_string()
model.unwrap_or(OPENAI_DEFAULT_CHATGPT_MODEL).to_string()
}

#[cfg(any(test, feature = "test-support"))]
Expand Down Expand Up @@ -217,7 +220,7 @@ impl ModelsManager {
let remote_presets: Vec<ModelPreset> = remote_models.into_iter().map(Into::into).collect();
let existing_presets = self.local_models.clone();
let mut merged_presets = Self::merge_presets(remote_presets, existing_presets);
merged_presets = Self::filter_visible_models(merged_presets);
merged_presets = self.filter_visible_models(merged_presets);

let has_default = merged_presets.iter().any(|preset| preset.is_default);
if let Some(default) = merged_presets.first_mut()
Expand All @@ -229,10 +232,11 @@ impl ModelsManager {
merged_presets
}

fn filter_visible_models(models: Vec<ModelPreset>) -> Vec<ModelPreset> {
fn filter_visible_models(&self, models: Vec<ModelPreset>) -> Vec<ModelPreset> {
let chatgpt_mode = self.auth_manager.get_auth_mode() == Some(AuthMode::ChatGPT);
models
.into_iter()
.filter(|model| model.show_in_picker)
.filter(|model| model.show_in_picker && (chatgpt_mode || model.supported_in_api))
.collect()
}

Expand Down Expand Up @@ -261,6 +265,22 @@ impl ModelsManager {
merged_presets
}

async fn remote_models(&self, config: &Config) -> Vec<ModelInfo> {
if config.features.enabled(Feature::RemoteModels) {
self.remote_models.read().await.clone()
} else {
Vec::new()
}
}

fn try_get_remote_models(&self, config: &Config) -> Result<Vec<ModelInfo>, TryLockError> {
if config.features.enabled(Feature::RemoteModels) {
Ok(self.remote_models.try_read()?.clone())
} else {
Ok(Vec::new())
}
}

fn cache_path(&self) -> PathBuf {
self.codex_home.join(MODEL_CACHE_FILE)
}
Expand Down Expand Up @@ -393,7 +413,7 @@ mod tests {
.refresh_available_models(&config)
.await
.expect("refresh succeeds");
let cached_remote = manager.remote_models.read().await.clone();
let cached_remote = manager.remote_models(&config).await;
assert_eq!(cached_remote, remote_models);

let available = manager.list_models(&config).await;
Expand Down Expand Up @@ -455,7 +475,7 @@ mod tests {
.await
.expect("first refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
remote_models,
"remote cache should store fetched models"
);
Expand All @@ -466,7 +486,7 @@ mod tests {
.await
.expect("cached refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
remote_models,
"cache path should not mutate stored models"
);
Expand Down Expand Up @@ -537,7 +557,7 @@ mod tests {
.await
.expect("second refresh succeeds");
assert_eq!(
*manager.remote_models.read().await,
manager.remote_models(&config).await,
updated_models,
"stale cache should trigger refetch"
);
Expand Down Expand Up @@ -602,7 +622,7 @@ mod tests {
.expect("second refresh succeeds");

let available = manager
.try_list_models()
.try_list_models(&config)
.expect("models should be available");
assert!(
available.iter().any(|preset| preset.model == "remote-new"),
Expand Down
Loading
Loading