summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--continuedev/src/continuedev/plugins/steps/core/core.py2
-rw-r--r--docs/docs/customization.md24
2 files changed, 24 insertions, 2 deletions
diff --git a/continuedev/src/continuedev/plugins/steps/core/core.py b/continuedev/src/continuedev/plugins/steps/core/core.py
index 212746f4..17b325ab 100644
--- a/continuedev/src/continuedev/plugins/steps/core/core.py
+++ b/continuedev/src/continuedev/plugins/steps/core/core.py
@@ -334,7 +334,7 @@ class DefaultModelEditCodeStep(Step):
self, file_prefix: str, contents: str, file_suffix: str, sdk: ContinueSDK
) -> str:
if contents.strip() == "":
- # Seperate prompt for insertion at the cursor, the other tends to cause it to repeat whole file
+ # Separate prompt for insertion at the cursor, the other tends to cause it to repeat whole file
prompt = dedent(
f"""\
<file_prefix>
diff --git a/docs/docs/customization.md b/docs/docs/customization.md
index 096b42b2..74149ff9 100644
--- a/docs/docs/customization.md
+++ b/docs/docs/customization.md
@@ -14,7 +14,7 @@ Local Models
- [Ollama](#run-llama-2-locally-with-ollama) - If you have a Mac, Ollama is the simplest way to run open-source models like Code Llama.
- [GGML](#local-models-with-ggml) - Use llama-cpp-python to run a local server with any open-source model.
-- [LlamaCpp](#llama-cpp) - Use llama.cpp directly instead of llama-cpp-python.
+- [LlamaCpp](#llamacpp) - Use llama.cpp directly instead of llama-cpp-python.
Open-Source Models (not local)
@@ -117,6 +117,28 @@ config = ContinueConfig(
)
```
+### Llama.cpp
+
+Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0:
+```shell
+.\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf
+```
+
+After it's up and running, change `~/.continue/config.py` to look like this:
+
+```python
+from continuedev.src.continuedev.libs.llm.ggml import GGML
+
+config = ContinueConfig(
+ ...
+ models=Models(
+ default=LlamaCpp(
+ max_context_length=4096,
+ server_url="http://localhost:8080")
+ )
+)
+```
+
### Together
The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: