@@ -509,15 +509,15 @@ struct llama_server_context
509509    bool  load_model (const  common_params ¶ms_)
510510    {
511511        params = params_;
512-         if  (!params.mmproj .empty ()) {
512+         if  (!params.mmproj .path . empty ()) {
513513            multimodal = true ;
514514            LOG_INFO (" Multi Modal Mode Enabled"  , {});
515-             clp_ctx = clip_init (params.mmproj .c_str (), clip_context_params {
515+             clp_ctx = clip_init (params.mmproj .path . c_str (), clip_context_params {
516516                /*  use_gpu */   has_gpu,
517517                /* verbosity=*/   1 ,
518518            });
519519            if (clp_ctx == nullptr ) {
520-                 LOG_ERR (" unable to load clip model: %s"  , params.mmproj .c_str ());
520+                 LOG_ERR (" unable to load clip model: %s"  , params.mmproj .path . c_str ());
521521                return  false ;
522522            }
523523
@@ -531,7 +531,7 @@ struct llama_server_context
531531        ctx = common_init.context .release ();
532532        if  (model == nullptr )
533533        {
534-             LOG_ERR (" unable to load model: %s"  , params.model .c_str ());
534+             LOG_ERR (" unable to load model: %s"  , params.model .path . c_str ());
535535            return  false ;
536536        }
537537
@@ -2326,11 +2326,11 @@ static void params_parse(const backend::ModelOptions* request,
23262326
23272327    //  this is comparable to: https://github.com/ggerganov/llama.cpp/blob/d9b33fe95bd257b36c84ee5769cc048230067d6f/examples/server/server.cpp#L1809
23282328
2329-     params.model  = request->modelfile ();
2329+     params.model . path  = request->modelfile ();
23302330    if  (!request->mmproj ().empty ()) {
23312331    //  get the directory of modelfile
2332-       std::string model_dir = params.model .substr (0 , params.model .find_last_of (" /\\ "  ));
2333-       params.mmproj  = model_dir + " /"  + request->mmproj ();
2332+       std::string model_dir = params.model .path . substr (0 , params.model . path .find_last_of (" /\\ "  ));
2333+       params.mmproj . path  = model_dir + " /"  + request->mmproj ();
23342334    }
23352335    //   params.model_alias ??
23362336    params.model_alias  =  request->modelfile ();
@@ -2405,7 +2405,7 @@ static void params_parse(const backend::ModelOptions* request,
24052405        scale_factor = request->lorascale ();
24062406     }
24072407     //  get the directory of modelfile
2408-      std::string model_dir = params.model .substr (0 , params.model .find_last_of (" /\\ "  ));
2408+      std::string model_dir = params.model .path . substr (0 , params.model . path .find_last_of (" /\\ "  ));
24092409     params.lora_adapters .push_back ({ model_dir + " /"  +request->loraadapter (), scale_factor });
24102410    }
24112411    params.use_mlock  = request->mlock ();
0 commit comments