From 705324ed2ef588b2885c0b03107b9e30ae358dae Mon Sep 17 00:00:00 2001 From: Ikko Eltociear Ashimine Date: Wed, 30 Aug 2023 13:16:04 +0900 Subject: fix: 🐛 typo in core.py (#429) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Seperate -> Separate --- continuedev/src/continuedev/plugins/steps/core/core.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/continuedev/src/continuedev/plugins/steps/core/core.py b/continuedev/src/continuedev/plugins/steps/core/core.py index 212746f4..17b325ab 100644 --- a/continuedev/src/continuedev/plugins/steps/core/core.py +++ b/continuedev/src/continuedev/plugins/steps/core/core.py @@ -334,7 +334,7 @@ class DefaultModelEditCodeStep(Step): self, file_prefix: str, contents: str, file_suffix: str, sdk: ContinueSDK ) -> str: if contents.strip() == "": - # Seperate prompt for insertion at the cursor, the other tends to cause it to repeat whole file + # Separate prompt for insertion at the cursor, the other tends to cause it to repeat whole file prompt = dedent( f"""\ -- cgit v1.2.3-70-g09d2 From 489bc9d8d85ab46c1e3b9c092e5e3678e40671d3 Mon Sep 17 00:00:00 2001 From: Dennis McDonald <91274821+CambridgeComputing@users.noreply.github.com> Date: Wed, 30 Aug 2023 22:11:51 -0500 Subject: Include llama.cpp documentation (#435) * Update customization.md Created section for new implementation of llama.cpp * Update customization.md Fixed link to llama.cpp * Update customization.md Fixed link for llama.cpp...again. Made a markdown error initially. --- docs/docs/customization.md | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/docs/docs/customization.md b/docs/docs/customization.md index 096b42b2..74149ff9 100644 --- a/docs/docs/customization.md +++ b/docs/docs/customization.md @@ -14,7 +14,7 @@ Local Models - [Ollama](#run-llama-2-locally-with-ollama) - If you have a Mac, Ollama is the simplest way to run open-source models like Code Llama. - [GGML](#local-models-with-ggml) - Use llama-cpp-python to run a local server with any open-source model. -- [LlamaCpp](#llama-cpp) - Use llama.cpp directly instead of llama-cpp-python. +- [LlamaCpp](#llamacpp) - Use llama.cpp directly instead of llama-cpp-python. Open-Source Models (not local) @@ -117,6 +117,28 @@ config = ContinueConfig( ) ``` +### Llama.cpp + +Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0: +```shell +.\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf +``` + +After it's up and running, change `~/.continue/config.py` to look like this: + +```python +from continuedev.src.continuedev.libs.llm.ggml import GGML + +config = ContinueConfig( + ... + models=Models( + default=LlamaCpp( + max_context_length=4096, + server_url="http://localhost:8080") + ) +) +``` + ### Together The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: -- cgit v1.2.3-70-g09d2