Skip to content

Commit

Permalink
make server run
Browse files Browse the repository at this point in the history
  • Loading branch information
localagi committed May 24, 2023
1 parent 817e4bb commit 579b02c
Show file tree
Hide file tree
Showing 2 changed files with 4 additions and 4 deletions.
2 changes: 1 addition & 1 deletion alias.llama.cpp
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
# Alias helper
source .env
GPU_ARGS="--gpus all"
alias llama.cpp="docker run ${GPU_ARGS} -v ${LOCAL_MODEL_DIR}:${LOCAL_MODEL_DIR} localagi/llama.cpp:${LLAMA_CPP_VERSION}${LLAMA_CPP_FLAVOR}"
alias llama.cpp="docker run --init ${GPU_ARGS} -v ${LOCAL_MODEL_DIR}:${LOCAL_MODEL_DIR} localagi/llama.cpp:${LLAMA_CPP_VERSION}${LLAMA_CPP_FLAVOR}"
6 changes: 3 additions & 3 deletions docker-compose.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,9 @@ services:
# GPU Inference
llama.cpp-server:
image: localagi/llama.cpp:${LLAMA_CPP_VERSION}${LLAMA_CPP_FLAVOR}
command: ["--model", "/models/Wizard-Vicuna-13B-Uncensored-GGML/Wizard-Vicuna-13B-Uncensored.ggml.q5_1.bin" ]
init: true
tty: true
command: "server --model ${LOCAL_MODEL_DIR}/Wizard-Vicuna-7B-Uncensored-GGML/Wizard-Vicuna-7B-Uncensored.ggmlv3.q5_1.bin --host 0.0.0.0"
ports:
- 8080:8080
volumes:
- $LOCAL_MODEL_DIR:$LOCAL_MODEL_DIR
deploy:
Expand Down

0 comments on commit 579b02c

Please sign in to comment.