Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
LostRuins committed Jun 22, 2023
1 parent 1b71752 commit e6ddb15
Show file tree
Hide file tree
Showing 4 changed files with 10 additions and 2 deletions.
1 change: 1 addition & 0 deletions otherarch/gpt2_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -347,6 +347,7 @@ ModelLoadResult gpt2_model_load(const std::string & fname, gpt2_model & model, g

//gpu offload
#if defined(GGML_USE_CLBLAST)
if(gpulayers>0)
{
const auto & hparams = model.hparams;
size_t vram_total = 0;
Expand Down
1 change: 1 addition & 0 deletions otherarch/gptj_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -335,6 +335,7 @@ ModelLoadResult gptj_model_load(const std::string & fname, gptj_model & model, g

//gpu offload
#if defined(GGML_USE_CLBLAST)
if(gpulayers>0)
{
const auto & hparams = model.hparams;
size_t vram_total = 0;
Expand Down
5 changes: 4 additions & 1 deletion otherarch/mpt_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,9 @@

#include "model_adapter.h"


#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#endif

// load the model's weights from a file
bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vocab, int gpulayers) {
Expand Down Expand Up @@ -280,6 +282,7 @@ bool mpt_model_load(const std::string & fname, mpt_model & model, gpt_vocab & vo

//gpu offload
#if defined(GGML_USE_CLBLAST)
if(gpulayers>0)
{
const auto & hparams = model.hparams;
size_t vram_total = 0;
Expand Down
5 changes: 4 additions & 1 deletion otherarch/neox_v3.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -13,7 +13,9 @@
#include <vector>
#include <iostream>


#if defined(GGML_USE_CLBLAST)
#include "ggml-opencl.h"
#endif

// load the model's weights from a file
ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model & model, gpt_vocab & vocab, FileFormat file_format, int gpulayers) {
Expand Down Expand Up @@ -320,6 +322,7 @@ ModelLoadResult gpt_neox_model_load(const std::string & fname, gpt_neox_model &

//gpu offload
#if defined(GGML_USE_CLBLAST)
if(gpulayers>0)
{
const auto & hparams = model.hparams;
size_t vram_total = 0;
Expand Down

0 comments on commit e6ddb15

Please sign in to comment.