.\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf
~/.continue/config.json
models: - name: Llama CPP provider: llama.cpp model: MODEL_NAME apiBase: http://localhost:8080
Was this page helpful?