Skip to content

Commit 9d02ec1

Browse files
committed
Merge remote-tracking branch 'origin/main' into copilot/fix-93d3c3b0-2ef0-424a-98a4-0f5a935d0181
# Conflicts: # src/config.rs # src/model.rs
2 parents 9e2f9dd + 401ede3 commit 9d02ec1

File tree

8 files changed

+221
-74
lines changed

8 files changed

+221
-74
lines changed

.github/workflows/copilot-setup-steps.yml

Lines changed: 20 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -2,10 +2,6 @@
22
name: "Copilot Setup Steps"
33

44
on:
5-
workflow_dispatch:
6-
push:
7-
paths:
8-
- .github/workflows/copilot-setup-steps.yml
95
pull_request:
106
paths:
117
- .github/workflows/copilot-setup-steps.yml
@@ -33,15 +29,29 @@ jobs:
3329
- name: Checkout code
3430
uses: actions/checkout@v5
3531

32+
- name: Setup Rust nightly toolchain
33+
uses: actions-rust-lang/setup-rust-toolchain@v1
34+
with:
35+
toolchain: nightly
36+
components: rustfmt, clippy
37+
3638
- name: Cache Rust dependencies
3739
uses: Swatinem/rust-cache@v2
3840
with:
3941
cache-on-failure: true
4042

41-
- name: Setup Rust nightly toolchain
42-
uses: actions-rust-lang/setup-rust-toolchain@v1
43+
# Fish shell is required for integration tests in tests/ directory
44+
- name: Install fish shell (for integration tests)
45+
run: sudo apt-get update && sudo apt-get install -y --no-install-recommends fish
46+
47+
- name: Install git-ai binary
48+
run: cargo install --path . --debug
49+
50+
- name: Setup git-ai configuration
51+
run: |
52+
git ai hook install
53+
git ai config set openai-api-key ${{ secrets.OPENAI_API_KEY }}
54+
git ai config set model gpt-4.1-nano
4355
44-
- run: cargo install --path . --debug
45-
- run: git ai hook install
46-
- run: git ai config set openai-api-key ${{ secrets.OPENAI_API_KEY }}
47-
- run: git ai config set model gpt-4.1
56+
- name: Verify installation
57+
run: git ai --version

src/commit.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ mod tests {
235235
let result = generate(
236236
"diff --git a/test.txt b/test.txt\n+Hello World".to_string(),
237237
1024,
238-
Model::GPT4oMini,
238+
Model::GPT41Mini,
239239
Some(&settings)
240240
)
241241
.await;
@@ -270,7 +270,7 @@ mod tests {
270270
let result = generate(
271271
"diff --git a/test.txt b/test.txt\n+Hello World".to_string(),
272272
1024,
273-
Model::GPT4oMini,
273+
Model::GPT41Mini,
274274
Some(&settings)
275275
)
276276
.await;

src/config.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -12,7 +12,7 @@ use console::Emoji;
1212
const DEFAULT_TIMEOUT: i64 = 30;
1313
const DEFAULT_MAX_COMMIT_LENGTH: i64 = 72;
1414
const DEFAULT_MAX_TOKENS: i64 = 2024;
15-
pub const DEFAULT_MODEL: &str = "gpt-4o-mini";
15+
pub const DEFAULT_MODEL: &str = "gpt-4.1"; // Matches Model::default()
1616
pub const DEFAULT_TEMPERATURE: f64 = 0.7;
1717
const DEFAULT_API_KEY: &str = "<PLACE HOLDER FOR YOUR API KEY>";
1818

src/model.rs

Lines changed: 65 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -17,25 +17,25 @@ use crate::config::AppConfig;
1717
static TOKENIZER: OnceLock<CoreBPE> = OnceLock::new();
1818

1919
// Model identifiers - using screaming case for constants
20-
const MODEL_GPT4: &str = "gpt-4";
21-
const MODEL_GPT4_OPTIMIZED: &str = "gpt-4o";
22-
const MODEL_GPT4_MINI: &str = "gpt-4o-mini";
2320
const MODEL_GPT4_1: &str = "gpt-4.1";
21+
const MODEL_GPT4_1_MINI: &str = "gpt-4.1-mini";
22+
const MODEL_GPT4_1_NANO: &str = "gpt-4.1-nano";
23+
const MODEL_GPT4_5: &str = "gpt-4.5";
2424
const DEFAULT_MODEL_NAME: &str = crate::config::DEFAULT_MODEL;
2525

2626
/// Represents the available AI models for commit message generation.
2727
/// Each model has different capabilities and token limits.
2828
#[derive(Debug, PartialEq, Eq, Hash, Copy, Clone, Serialize, Deserialize, Default)]
2929
pub enum Model {
30-
/// Standard GPT-4 model
31-
GPT4,
32-
/// Optimized GPT-4 model for better performance
33-
GPT4o,
34-
/// Mini version of optimized GPT-4 for faster processing
35-
GPT4oMini,
3630
/// Default model - GPT-4.1 latest version
3731
#[default]
38-
GPT41
32+
GPT41,
33+
/// Mini version of GPT-4.1 for faster processing
34+
GPT41Mini,
35+
/// Nano version of GPT-4.1 for very fast processing
36+
GPT41Nano,
37+
/// GPT-4.5 model for advanced capabilities
38+
GPT45
3939
}
4040

4141
impl Model {
@@ -57,10 +57,7 @@ impl Model {
5757

5858
// Always use the proper tokenizer for accurate counts
5959
// We cannot afford to underestimate tokens as it may cause API failures
60-
let tokenizer = TOKENIZER.get_or_init(|| {
61-
let model_str: &str = self.into();
62-
get_tokenizer(model_str)
63-
});
60+
let tokenizer = TOKENIZER.get_or_init(|| get_tokenizer(self.as_ref()));
6461

6562
// Use direct tokenization for accurate token count
6663
let tokens = tokenizer.encode_ordinary(text);
@@ -73,8 +70,7 @@ impl Model {
7370
/// * `usize` - The maximum number of tokens the model can process
7471
pub fn context_size(&self) -> usize {
7572
profile!("Get context size");
76-
let model_str: &str = self.into();
77-
get_context_size(model_str)
73+
get_context_size(self.as_ref())
7874
}
7975

8076
/// Truncates the given text to fit within the specified token limit.
@@ -165,41 +161,80 @@ impl Model {
165161
}
166162
}
167163

168-
impl From<&Model> for &str {
169-
fn from(model: &Model) -> Self {
170-
match model {
171-
Model::GPT4o => MODEL_GPT4_OPTIMIZED,
172-
Model::GPT4 => MODEL_GPT4,
173-
Model::GPT4oMini => MODEL_GPT4_MINI,
174-
Model::GPT41 => MODEL_GPT4_1
164+
impl AsRef<str> for Model {
165+
fn as_ref(&self) -> &str {
166+
match self {
167+
Model::GPT41 => MODEL_GPT4_1,
168+
Model::GPT41Mini => MODEL_GPT4_1_MINI,
169+
Model::GPT41Nano => MODEL_GPT4_1_NANO,
170+
Model::GPT45 => MODEL_GPT4_5
175171
}
176172
}
177173
}
178174

175+
// Keep conversion to String for cases that need owned strings
176+
impl From<&Model> for String {
177+
fn from(model: &Model) -> Self {
178+
model.as_ref().to_string()
179+
}
180+
}
181+
182+
// Keep the old impl for backwards compatibility where possible
183+
impl Model {
184+
pub fn as_str(&self) -> &str {
185+
self.as_ref()
186+
}
187+
}
188+
179189
impl FromStr for Model {
180190
type Err = anyhow::Error;
181191

182192
fn from_str(s: &str) -> Result<Self> {
183-
match s.trim().to_lowercase().as_str() {
184-
"gpt-4o" => Ok(Model::GPT4o),
185-
"gpt-4" => Ok(Model::GPT4),
186-
"gpt-4o-mini" => Ok(Model::GPT4oMini),
193+
let normalized = s.trim().to_lowercase();
194+
match normalized.as_str() {
187195
"gpt-4.1" => Ok(Model::GPT41),
188-
model => bail!("Invalid model name: {}", model)
196+
"gpt-4.1-mini" => Ok(Model::GPT41Mini),
197+
"gpt-4.1-nano" => Ok(Model::GPT41Nano),
198+
"gpt-4.5" => Ok(Model::GPT45),
199+
// Backward compatibility for deprecated models - map to closest GPT-4.1 equivalent
200+
"gpt-4" | "gpt-4o" => {
201+
log::warn!(
202+
"Model '{}' is deprecated. Mapping to 'gpt-4.1'. \
203+
Please update your configuration with: git ai config set model gpt-4.1",
204+
s
205+
);
206+
Ok(Model::GPT41)
207+
}
208+
"gpt-4o-mini" | "gpt-3.5-turbo" => {
209+
log::warn!(
210+
"Model '{}' is deprecated. Mapping to 'gpt-4.1-mini'. \
211+
Please update your configuration with: git ai config set model gpt-4.1-mini",
212+
s
213+
);
214+
Ok(Model::GPT41Mini)
215+
}
216+
model =>
217+
bail!(
218+
"Invalid model name: '{}'. Supported models: gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.5",
219+
model
220+
),
189221
}
190222
}
191223
}
192224

193225
impl Display for Model {
194226
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
195-
write!(f, "{}", <&str>::from(self))
227+
write!(f, "{}", self.as_ref())
196228
}
197229
}
198230

199231
// Implement conversion from string types to Model with fallback to default
200232
impl From<&str> for Model {
201233
fn from(s: &str) -> Self {
202-
s.parse().unwrap_or_default()
234+
s.parse().unwrap_or_else(|e| {
235+
log::error!("Failed to parse model '{}': {}. Falling back to default model 'gpt-4.1'.", s, e);
236+
Model::default()
237+
})
203238
}
204239
}
205240

src/openai.rs

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -37,7 +37,7 @@ pub async fn generate_commit_message(diff: &str) -> Result<String> {
3737
if let Ok(api_key) = std::env::var("OPENAI_API_KEY") {
3838
if !api_key.is_empty() {
3939
// Use the commit function directly without parsing
40-
match commit::generate(diff.to_string(), 256, Model::GPT4oMini, None).await {
40+
match commit::generate(diff.to_string(), 256, Model::GPT41Mini, None).await {
4141
Ok(response) => return Ok(response.response.trim().to_string()),
4242
Err(e) => {
4343
log::warn!("Direct generation failed, falling back to local: {e}");

0 commit comments

Comments
 (0)