Skip to content

Commit 7b6cdf4

Browse files
authored
Merge pull request #876 from ysimonson/fix-cap
Fix capitalization of LlamaCppError
2 parents 813e4be + 1d33652 commit 7b6cdf4

File tree

3 files changed

+11
-11
lines changed

3 files changed

+11
-11
lines changed

llama-cpp-2/src/lib.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -35,11 +35,11 @@ pub mod token;
3535
pub mod token_type;
3636

3737
/// A failable result from a llama.cpp function.
38-
pub type Result<T> = std::result::Result<T, LLamaCppError>;
38+
pub type Result<T> = std::result::Result<T, LlamaCppError>;
3939

4040
/// All errors that can occur in the llama-cpp crate.
4141
#[derive(Debug, Eq, PartialEq, thiserror::Error)]
42-
pub enum LLamaCppError {
42+
pub enum LlamaCppError {
4343
/// The backend was already initialized. This can generally be ignored as initializing the backend
4444
/// is idempotent.
4545
#[error("BackendAlreadyInitialized")]

llama-cpp-2/src/llama_backend.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
//! Representation of an initialized llama backend
22
3-
use crate::LLamaCppError;
3+
use crate::LlamaCppError;
44
use llama_cpp_sys_2::ggml_log_level;
55
use std::sync::atomic::AtomicBool;
66
use std::sync::atomic::Ordering::SeqCst;
@@ -18,7 +18,7 @@ impl LlamaBackend {
1818
fn mark_init() -> crate::Result<()> {
1919
match LLAMA_BACKEND_INITIALIZED.compare_exchange(false, true, SeqCst, SeqCst) {
2020
Ok(_) => Ok(()),
21-
Err(_) => Err(LLamaCppError::BackendAlreadyInitialized),
21+
Err(_) => Err(LlamaCppError::BackendAlreadyInitialized),
2222
}
2323
}
2424

@@ -28,15 +28,15 @@ impl LlamaBackend {
2828
///
2929
/// ```
3030
///# use llama_cpp_2::llama_backend::LlamaBackend;
31-
///# use llama_cpp_2::LLamaCppError;
31+
///# use llama_cpp_2::LlamaCppError;
3232
///# use std::error::Error;
3333
///
3434
///# fn main() -> Result<(), Box<dyn Error>> {
3535
///
3636
///
3737
/// let backend = LlamaBackend::init()?;
3838
/// // the llama backend can only be initialized once
39-
/// assert_eq!(Err(LLamaCppError::BackendAlreadyInitialized), LlamaBackend::init());
39+
/// assert_eq!(Err(LlamaCppError::BackendAlreadyInitialized), LlamaBackend::init());
4040
///
4141
///# Ok(())
4242
///# }

llama-cpp-2/src/model/params.rs

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -1,7 +1,7 @@
11
//! A safe wrapper around `llama_model_params`.
22
33
use crate::model::params::kv_overrides::KvOverrides;
4-
use crate::LLamaCppError;
4+
use crate::LlamaCppError;
55
use std::ffi::{c_char, CStr};
66
use std::fmt::{Debug, Formatter};
77
use std::pin::Pin;
@@ -375,19 +375,19 @@ impl LlamaModelParams {
375375
/// You don't need to specify CPU or ACCEL devices.
376376
///
377377
/// # Errors
378-
/// Returns `LLamaCppError::BackendDeviceNotFound` if any device index is invalid.
379-
pub fn with_devices(mut self, devices: &[usize]) -> Result<Self, LLamaCppError> {
378+
/// Returns `LlamaCppError::BackendDeviceNotFound` if any device index is invalid.
379+
pub fn with_devices(mut self, devices: &[usize]) -> Result<Self, LlamaCppError> {
380380
for dev in self.devices.iter_mut() {
381381
*dev = std::ptr::null_mut();
382382
}
383383
// Check device count
384384
let max_devices = crate::max_devices().min(LLAMA_CPP_MAX_DEVICES);
385385
if devices.len() > max_devices {
386-
return Err(LLamaCppError::MaxDevicesExceeded(max_devices));
386+
return Err(LlamaCppError::MaxDevicesExceeded(max_devices));
387387
}
388388
for (i, &dev) in devices.iter().enumerate() {
389389
if dev >= unsafe { llama_cpp_sys_2::ggml_backend_dev_count() } {
390-
return Err(LLamaCppError::BackendDeviceNotFound(dev));
390+
return Err(LlamaCppError::BackendDeviceNotFound(dev));
391391
}
392392
let backend_dev = unsafe { llama_cpp_sys_2::ggml_backend_dev_get(dev) };
393393
self.devices[i] = backend_dev;

0 commit comments

Comments
 (0)