-
Notifications
You must be signed in to change notification settings - Fork 66
Custom APIs
Gabriel Linder edited this page Jul 30, 2024
·
7 revisions
To configure vim-ai to use a different API, it have to be OpenAI-compatible. Here is a list of cool projects/services providing such APIs:
CMAKE_ARGS="-DLLAMA_CUBLAS=on" pip install 'llama-cpp-python[server]'
wget https://huggingface.co/TheBloke/CodeLlama-13B-Instruct-GGUF/resolve/main/codellama-13b-instruct.Q5_K_M.gguf
python3 -m llama_cpp.server --n_gpu_layers 100 --model codellama-13b-instruct.Q5_K_M.gguf
let g:vim_ai_chat = {
\ "options": {
\ "endpoint_url": "http://localhost:8000/v1/chat/completions",
\ "enable_auth": 0,
\ },
\}
Ollama's OpenAI compatibility is still experimental, check their status here.
let s:vim_ai_endpoint_url = "http://localhost:11434/v1/chat/completions"
let s:vim_ai_model = "mistral-nemo:12b-instruct-2407-q6_K"
let s:vim_ai_temperature = 0.3
let s:vim_ai_chat_config = #{
\ engine: "chat",
\ options: #{
\ model: s:vim_ai_model,
\ temperature: s:vim_ai_temperature,
\ endpoint_url: s:vim_ai_endpoint_url,
\ enable_auth: 0,
\ max_tokens: 0,
\ request_timeout: 60,
\ },
\ ui: #{
\ code_syntax_enabled: 1,
\ },
\}
let s:vim_ai_edit_config = #{
\ engine: "chat",
\ options: #{
\ model: s:vim_ai_model,
\ temperature: s:vim_ai_temperature,
\ endpoint_url: s:vim_ai_endpoint_url,
\ enable_auth: 0,
\ max_tokens: 0,
\ request_timeout: 60,
\ },
\ ui: #{
\ paste_mode: 1,
\ },
\}
let g:vim_ai_chat = s:vim_ai_chat_config
let g:vim_ai_complete = s:vim_ai_edit_config
let g:vim_ai_edit = s:vim_ai_edit_config