Skip to content

Commit 84c8cef

Browse files
committed
move fname to impl
1 parent 52eab6a commit 84c8cef

File tree

3 files changed

+6
-4
lines changed

3 files changed

+6
-4
lines changed

src/llama-mmap.cpp

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -159,7 +159,7 @@ struct llama_file::impl {
159159
}
160160
}
161161
#else
162-
impl(const char * fname, const char * mode) {
162+
impl(const char * fname, const char * mode) : fname(fname) {
163163
fp = ggml_fopen(fname, mode);
164164
if (fp == NULL) {
165165
throw std::runtime_error(format("failed to open %s: %s", fname, strerror(errno)));
@@ -237,15 +237,17 @@ struct llama_file::impl {
237237
}
238238
#endif
239239

240+
const char * fname;
240241
FILE * fp;
241242
size_t size;
242243
};
243244

244-
llama_file::llama_file(const char * fname, const char * mode) : fname(fname), pimpl(std::make_unique<impl>(fname, mode)) {}
245+
llama_file::llama_file(const char * fname, const char * mode) : pimpl(std::make_unique<impl>(fname, mode)) {}
245246
llama_file::~llama_file() = default;
246247

247248
size_t llama_file::tell() const { return pimpl->tell(); }
248249
size_t llama_file::size() const { return pimpl->size; }
250+
const char * llama_file::fname() const { return pimpl->fname; }
249251

250252
int llama_file::file_id() const {
251253
#ifdef _WIN32

src/llama-mmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ struct llama_file {
1919
size_t tell() const;
2020
size_t size() const;
2121

22+
const char * fname() const;
2223
int file_id() const; // fileno overload
2324

2425
void seek(size_t offset, int whence) const;
@@ -28,7 +29,6 @@ struct llama_file {
2829

2930
void write_raw(const void * ptr, size_t len) const;
3031
void write_u32(uint32_t val) const;
31-
const char * fname;
3232

3333
private:
3434
struct impl;

src/llama-model-loader.cpp

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1039,7 +1039,7 @@ bool llama_model_loader::load_all_data(
10391039
mmap_used.second = std::max(mmap_used.second, weight->offs + n_size);
10401040
} else {
10411041
const auto & file = files.at(weight->idx);
1042-
if (!load_tensor(cur, file->fname, weight->offs, 0, n_size)) {
1042+
if (!load_tensor(cur, file->fname(), weight->offs, 0, n_size)) {
10431043
ggml_backend_tensor_set(cur, data, 0, n_size);
10441044
}
10451045
}

0 commit comments

Comments
 (0)