From 331b2adcb6f8d962e4ed19292fd2ab5838ba479e Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Tue, 12 Sep 2023 00:59:20 -0700 Subject: docs: :memo: major docs improvements --- docs/docs/collecting-data.md | 5 - docs/docs/concepts/ide.md | 4 +- docs/docs/customization.md | 375 ---------------------- docs/docs/customization/intro.md | 10 - docs/docs/customization/models.md | 92 ++++++ docs/docs/customization/other-configuration.md | 51 +++ docs/docs/customization/overview.md | 10 + docs/docs/customization/slash-commands.md | 72 +++++ docs/docs/development-data.md | 5 + docs/docs/getting-started.md | 9 - docs/docs/how-continue-works.md | 5 +- docs/docs/how-to-use-continue.md | 13 +- docs/docs/quickstart.md | 9 + docs/docs/reference/Context Providers/diff.md | 17 + docs/docs/reference/Context Providers/file.md | 14 + docs/docs/reference/Context Providers/filetree.md | 17 + docs/docs/reference/Context Providers/github.md | 15 + docs/docs/reference/Context Providers/google.md | 17 + docs/docs/reference/Context Providers/intro.md | 1 - docs/docs/reference/Context Providers/search.md | 17 + docs/docs/reference/Context Providers/terminal.md | 17 + docs/docs/reference/Context Providers/url.md | 17 + docs/docs/reference/Models/anthropic.md | 19 +- docs/docs/reference/Models/ggml.md | 21 +- docs/docs/reference/Models/hf_inference_api.md | 29 ++ docs/docs/reference/Models/hf_tgi.md | 15 + docs/docs/reference/Models/llamacpp.md | 25 +- docs/docs/reference/Models/maybe_proxy_openai.md | 36 +++ docs/docs/reference/Models/ollama.md | 17 +- docs/docs/reference/Models/openai.md | 30 +- docs/docs/reference/Models/queued.md | 19 +- docs/docs/reference/Models/replicate.md | 22 +- docs/docs/reference/Models/text_gen_interface.md | 19 +- docs/docs/reference/Models/together.md | 21 +- docs/docs/reference/config.md | 14 + docs/docs/telemetry.md | 4 +- docs/docs/troubleshooting.md | 2 +- docs/sidebars.js | 15 +- docs/src/components/ClassPropertyRef.tsx | 34 +- docs/src/css/custom.css | 16 +- 40 files changed, 712 insertions(+), 438 deletions(-) delete mode 100644 docs/docs/collecting-data.md delete mode 100644 docs/docs/customization.md delete mode 100644 docs/docs/customization/intro.md create mode 100644 docs/docs/customization/overview.md create mode 100644 docs/docs/development-data.md delete mode 100644 docs/docs/getting-started.md create mode 100644 docs/docs/quickstart.md create mode 100644 docs/docs/reference/Context Providers/diff.md create mode 100644 docs/docs/reference/Context Providers/file.md create mode 100644 docs/docs/reference/Context Providers/filetree.md create mode 100644 docs/docs/reference/Context Providers/github.md create mode 100644 docs/docs/reference/Context Providers/google.md delete mode 100644 docs/docs/reference/Context Providers/intro.md create mode 100644 docs/docs/reference/Context Providers/search.md create mode 100644 docs/docs/reference/Context Providers/terminal.md create mode 100644 docs/docs/reference/Context Providers/url.md create mode 100644 docs/docs/reference/Models/hf_inference_api.md create mode 100644 docs/docs/reference/Models/hf_tgi.md create mode 100644 docs/docs/reference/Models/maybe_proxy_openai.md create mode 100644 docs/docs/reference/config.md (limited to 'docs') diff --git a/docs/docs/collecting-data.md b/docs/docs/collecting-data.md deleted file mode 100644 index 95beeee7..00000000 --- a/docs/docs/collecting-data.md +++ /dev/null @@ -1,5 +0,0 @@ -# Collecting data - -When you use Continue, you automatically collect data on how you build software. By default, this development data is saved to `.continue/dev_data` on your local machine. When combined with the code that you ultimately commit, it can be used to improve the LLM that you or your team use (if you allow). - -You can read more about how development data is generated as a byproduct of LLM-aided development and why we believe that you should start collecting it now: [It’s time to collect data on how you build software](https://blog.continue.dev/its-time-to-collect-data-on-how-you-build-software) diff --git a/docs/docs/concepts/ide.md b/docs/docs/concepts/ide.md index bd31481b..d4b48f0a 100644 --- a/docs/docs/concepts/ide.md +++ b/docs/docs/concepts/ide.md @@ -17,11 +17,11 @@ SDK provides "IDEProtocol" class so that steps can interact with VS Code, etc... ### VS Code -You can install the VS Code extension [here](../getting-started.md) +You can install the VS Code extension [here](../quickstart.md) ### GitHub Codespaces -You can install the GitHub Codespaces extension [here](../getting-started.md) +You can install the GitHub Codespaces extension [here](../quickstart.md) ## IDE Protocol methods diff --git a/docs/docs/customization.md b/docs/docs/customization.md deleted file mode 100644 index fb7dc0c5..00000000 --- a/docs/docs/customization.md +++ /dev/null @@ -1,375 +0,0 @@ -# Customization - -Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This file is created the first time you run Continue. - -## Summary of Models - -Commercial Models - -- [MaybeProxyOpenAI](#adding-an-openai-api-key) (default) - Use gpt-4 or gpt-3.5-turbo free with our API key, or with your API key. gpt-4 is probably the most capable model of all options. -- [OpenAI](#azure-openai-service) - Use any OpenAI model with your own key. Can also change the base URL if you have a server that uses the OpenAI API format, including using the Azure OpenAI service, LocalAI, etc. -- [AnthropicLLM](#claude-2) - Use claude-2 with your Anthropic API key. Claude 2 is also highly capable, and has a 100,000 token context window. - -Local Models - -- [Ollama](#run-llama-2-locally-with-ollama) - If you have a Mac, Ollama is the simplest way to run open-source models like Code Llama. -- [OpenAI](#local-models-with-openai-compatible-server) - If you have access to an OpenAI-compatible server (e.g. llama-cpp-python, LocalAI, FastChat, TextGenWebUI, etc.), you can use the `OpenAI` class and just change the base URL. -- [GGML](#local-models-with-ggml) - An alternative way to connect to OpenAI-compatible servers. Will use `aiohttp` directly instead of the `openai` Python package. -- [LlamaCpp](#llamacpp) - Build llama.cpp from source and use its built-in API server. - -Open-Source Models (not local) - -- [TogetherLLM](#together) - Use any model from the [Together Models list](https://docs.together.ai/docs/models-inference) with your Together API key. -- [ReplicateLLM](#replicate) - Use any open-source model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models) with your Replicate API key. -- [HuggingFaceInferenceAPI](#huggingface) - Use any open-source model from the [Hugging Face Inference API](https://huggingface.co/inference-api) with your Hugging Face token. - -## Change the default LLM - -In `config.py`, you'll find the `models` property: - -```python -from continuedev.src.continuedev.core.models import Models - -config = ContinueConfig( - ... - models=Models( - default=MaybeProxyOpenAI(model="gpt-4"), - medium=MaybeProxyOpenAI(model="gpt-3.5-turbo") - ) -) -``` - -The `default` and `medium` properties are different _model roles_. This allows different models to be used for different tasks. The available roles are `default`, `small`, `medium`, `large`, `edit`, and `chat`. `edit` is used when you use the '/edit' slash command, `chat` is used for all chat responses, and `medium` is used for summarizing. If not set, all roles will fall back to `default`. The values of these fields must be of the [`LLM`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/__init__.py) class, which implements methods for retrieving and streaming completions from an LLM. - -Below, we describe the `LLM` classes available in the Continue core library, and how they can be used. - -### Adding an OpenAI API key - -With the `MaybeProxyOpenAI` `LLM`, new users can try out Continue with GPT-4 using a proxy server that securely makes calls to OpenAI using our API key. Continue should just work the first time you install the extension in VS Code. - -Once you are using Continue regularly though, you will need to add an OpenAI API key that has access to GPT-4 by following these steps: - -1. Copy your API key from https://platform.openai.com/account/api-keys -2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue -3. Change the default LLMs to look like this: - -```python -API_KEY = "" -config = ContinueConfig( - ... - models=Models( - default=MaybeProxyOpenAI(model="gpt-4", api_key=API_KEY), - medium=MaybeProxyOpenAI(model="gpt-3.5-turbo", api_key=API_KEY) - ) -) -``` - -The `MaybeProxyOpenAI` class will automatically switch to using your API key instead of ours. If you'd like to explicitly use one or the other, you can use the `ProxyServer` or `OpenAI` classes instead. - -These classes support any models available through the OpenAI API, assuming your API key has access, including "gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", and "gpt-4-32k". - -### claude-2 - -Import the `AnthropicLLM` LLM class and set it as the default model: - -```python -from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM - -config = ContinueConfig( - ... - models=Models( - default=AnthropicLLM(api_key="", model="claude-2") - ) -) -``` - -Continue will automatically prompt you for your Anthropic API key, which must have access to Claude 2. You can request early access [here](https://www.anthropic.com/earlyaccess). - -### Run Llama-2 locally with Ollama - -[Ollama](https://ollama.ai/) is a Mac application that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class: - -```python -from continuedev.src.continuedev.libs.llm.ollama import Ollama - -config = ContinueConfig( - ... - models=Models( - default=Ollama(model="llama2") - ) -) -``` - -### Local models with OpenAI-compatible server - -If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this: - -```python -from continuedev.src.continuedev.libs.llm.openai import OpenAI - -config = ContinueConfig( - ... - models=Models( - default=OpenAI( - api_key="EMPTY", - model="", - api_base="http://localhost:8000", # change to your server - ) - ) -) -``` - -Options for serving models locally with an OpenAI-compatible server include: - -- [text-gen-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai#setup--installation) -- [FastChat](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md) -- [LocalAI](https://localai.io/basics/getting_started/) -- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#web-server) - -### Local models with ggml - -See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example) to run any model locally with ggml. While these models don't yet perform as well, they are free, entirely private, and run offline. - -Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this: - -```python -from continuedev.src.continuedev.libs.llm.ggml import GGML - -config = ContinueConfig( - ... - models=Models( - default=GGML( - max_context_length=2048, - server_url="http://localhost:8000") - ) -) -``` - -### Llama.cpp - -Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0: - -```shell -.\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf -``` - -After it's up and running, change `~/.continue/config.py` to look like this: - -```python -from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp - -config = ContinueConfig( - ... - models=Models( - default=LlamaCpp( - max_context_length=4096, - server_url="http://localhost:8080") - ) -) -``` - -### Together - -The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: - -```python -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.together import TogetherLLM - -config = ContinueConfig( - ... - models=Models( - default=TogetherLLM( - api_key="", - model="togethercomputer/llama-2-13b-chat" - ) - ) -) -``` - -### Replicate - -Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this: - -```python -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM - -config = ContinueConfig( - ... - models=Models( - default=ReplicateLLM( - model="replicate/codellama-13b-instruct:da5676342de1a5a335b848383af297f592b816b950a43d251a0a9edd0113604b", - api_key="my-replicate-api-key") - ) -) -``` - -If you don't specify the `model` parameter, it will default to `replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781`. - -### Hugging Face - -Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this: - -```python -from continuedev.src.continuedev.core.models import Models -from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI - -config = ContinueConfig( - ... - models=Models( - default=HuggingFaceInferenceAPI( - endpoint_url: "", - hf_token: "", - ) -) -``` - -### Self-hosting an open-source model - -If you want to self-host on Colab, RunPod, HuggingFace, Haven, or another hosting provider you will need to wire up a new LLM class. It only needs to implement 3 primary methods: `stream_complete`, `complete`, and `stream_chat`, and you can see examples in `continuedev/src/continuedev/libs/llm`. - -If by chance the provider has the exact same API interface as OpenAI, the `GGML` class will work for you out of the box, after changing the endpoint at the top of the file. - -### Azure OpenAI Service - -If you'd like to use OpenAI models but are concerned about privacy, you can use the Azure OpenAI service, which is GDPR and HIPAA compliant. After applying for access [here](https://azure.microsoft.com/en-us/products/ai-services/openai-service), you will typically hear back within only a few days. Once you have access, instantiate the model like so: - -```python -from continuedev.src.continuedev.libs.llm.openai import OpenAI - -config = ContinueConfig( - ... - models=Models( - default=OpenAI( - api_key="my-api-key", - model="gpt-3.5-turbo", - api_base="https://my-azure-openai-instance.openai.azure.com/", - engine="my-azure-openai-deployment", - api_version="2023-03-15-preview", - api_type="azure" - ) - ) -) -``` - -The easiest way to find this information is from the chat playground in the Azure OpenAI portal. Under the "Chat Session" section, click "View Code" to see each of these parameters. Finally, find one of your Azure OpenAI keys and enter it in the VS Code settings under `continue.OPENAI_API_KEY`. - -Note that you can also use these parameters for uses other than Azure, such as self-hosting a model. - -## Customize System Message - -You can write your own system message, a set of instructions that will always be top-of-mind for the LLM, by setting the `system_message` property to any string. For example, you might request "Please make all responses as concise as possible and never repeat something you have already explained." - -System messages can also reference files. For example, if there is a markdown file (e.g. at `/Users/nate/Documents/docs/reference.md`) you'd like the LLM to know about, you can reference it with [Mustache](http://mustache.github.io/mustache.5.html) templating like this: "Please reference this documentation: {{ Users/nate/Documents/docs/reference.md }}". As of now, you must use an absolute path. - -## Custom Commands with Natural Language Prompts - -You can add custom slash commands by adding a `CustomCommand` object to the `custom_commands` property. Each `CustomCommand` has - -- `name`: the name of the command, which will be invoked with `/name` -- `description`: a short description of the command, which will appear in the dropdown -- `prompt`: a set of instructions to the LLM, which will be shown in the prompt - -Custom commands are great when you are frequently reusing a prompt. For example, if you've crafted a great prompt and frequently ask the LLM to check for mistakes in your code, you could add a command like this: - -```python -config = ContinueConfig( - ... - custom_commands=[ - CustomCommand( - name="check", - description="Check for mistakes in my code", - prompt=dedent("""\ - Please read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant: - - Syntax errors - - Logic errors - - Security vulnerabilities - - Performance issues - - Anything else that looks wrong - - Once you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying "I think there is a syntax error on line 5", you should say "Syntax error on line 5". Give your answer as one bullet point per mistake found.""") - ) - ] -) -``` - -## Custom Slash Commands - -If you want to go a step further than writing custom commands with natural language, you can use a `SlashCommand` to run an arbitrary Python function, with access to the Continue SDK. To do this, create a subclass of `Step` with the `run` method implemented, and this is the code that will run when you call the command. For example, here is a step that generates a commit message: - -```python -class CommitMessageStep(Step): - async def run(self, sdk: ContinueSDK): - - # Get the root directory of the workspace - dir = sdk.ide.workspace_directory - - # Run git diff in that directory - diff = subprocess.check_output( - ["git", "diff"], cwd=dir).decode("utf-8") - - # Ask the LLM to write a commit message, - # and set it as the description of this step - self.description = await sdk.models.default.complete( - f"{diff}\n\nWrite a short, specific (less than 50 chars) commit message about the above changes:") - -config=ContinueConfig( - ... - slash_commands=[ - ... - SlashCommand( - name="commit", - description="Generate a commit message for the current changes", - step=CommitMessageStep, - ) - ] -) -``` - -## Temperature - -Set `temperature` to any value between 0 and 1. Higher values will make the LLM more creative, while lower values will make it more predictable. The default is 0.5. - -## Context Providers - -When you type '@' in the Continue text box, it will display a dropdown of items that can be selected to include in your message as context. For example, you might want to reference a GitHub Issue, file, or Slack thread. All of these options are provided by a `ContextProvider` class, and we make it easy to write your own or use our builtin options. See the [Context Providers](./context-providers.md) page for more info. - -## Custom Policies - -Policies can be used to deeply change the behavior of Continue, or to build agents that take longer sequences of actions on their own. The [`DefaultPolicy`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/plugins/policies/default.py) handles the parsing of slash commands, and otherwise always chooses the `SimpleChatStep`, but you could customize by for example always taking a "review" step after editing code. To do so, create a new `Policy` subclass that implements the `next` method: - -```python -class ReviewEditsPolicy(Policy): - - default_step: Step = SimpleChatStep() - - def next(self, config: ContinueConfig, history: History) -> Step: - # Get the last step - last_step = history.get_current() - - # If it edited code, then review the changes - if isinstance(last_step, EditHighlightedCodeStep): - return ReviewStep() # Not implemented - - # Otherwise, choose between EditHighlightedCodeStep and SimpleChatStep based on slash command - if observation is not None and isinstance(last_step.observation, UserInputObservation): - if user_input.startswith("/edit"): - return EditHighlightedCodeStep(user_input=user_input[5:]) - else: - return SimpleChatStep() - - return self.default_step.copy() - - # Don't do anything until the user enters something else - return None -``` - -Then, in `~/.continue/config.py`, override the default policy: - -```python -config=ContinueConfig( - ... - policy_override=ReviewEditsPolicy() -) -``` diff --git a/docs/docs/customization/intro.md b/docs/docs/customization/intro.md deleted file mode 100644 index a82b5dbf..00000000 --- a/docs/docs/customization/intro.md +++ /dev/null @@ -1,10 +0,0 @@ -# Customizing Continue - -Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This file is created the first time you run Continue. - -Currently, you can customize the following: - -- [Models](./models.md) - Use Continue with any LLM, including local models, Azure OpenAI service, and any OpenAI-compatible API. -- [Context Providers](./context-providers.md) - Define which sources you want to collect context from to share with the LLM. Just type '@' to easily add attachments to your prompt. -- [Slash Commands](./slash-commands.md) - Call custom prompts or programs written with our SDK by typing `/` in the prompt. -- [Other Configuration](./other-configuration.md) - Configure other settings like the system message, temperature, and more. diff --git a/docs/docs/customization/models.md b/docs/docs/customization/models.md index e69de29b..93ea2a57 100644 --- a/docs/docs/customization/models.md +++ b/docs/docs/customization/models.md @@ -0,0 +1,92 @@ +# Models + +Continue makes it easy to swap out different LLM providers. Once you've added any of these to your `config.py`, you will be able to switch between them with the model selection dropdown. + +Commercial Models + +- [MaybeProxyOpenAI](#adding-an-openai-api-key) (default) - Use gpt-4 or gpt-3.5-turbo free with our API key, or with your API key. gpt-4 is probably the most capable model of all options. +- [OpenAI](#azure-openai-service) - Use any OpenAI model with your own key. Can also change the base URL if you have a server that uses the OpenAI API format, including using the Azure OpenAI service, LocalAI, etc. +- [AnthropicLLM](#claude-2) - Use claude-2 with your Anthropic API key. Claude 2 is also highly capable, and has a 100,000 token context window. + +Local Models + +- [Ollama](#run-llama-2-locally-with-ollama) - If you have a Mac, Ollama is the simplest way to run open-source models like Code Llama. +- [OpenAI](#local-models-with-openai-compatible-server) - If you have access to an OpenAI-compatible server (e.g. llama-cpp-python, LocalAI, FastChat, TextGenWebUI, etc.), you can use the `OpenAI` class and just change the base URL. +- [GGML](#local-models-with-ggml) - An alternative way to connect to OpenAI-compatible servers. Will use `aiohttp` directly instead of the `openai` Python package. +- [LlamaCpp](#llamacpp) - Build llama.cpp from source and use its built-in API server. + +Open-Source Models (not local) + +- [TogetherLLM](#together) - Use any model from the [Together Models list](https://docs.together.ai/docs/models-inference) with your Together API key. +- [ReplicateLLM](#replicate) - Use any open-source model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models) with your Replicate API key. +- [HuggingFaceInferenceAPI](#huggingface) - Use any open-source model from the [Hugging Face Inference API](https://huggingface.co/inference-api) with your Hugging Face token. + +## Change the default LLM + +In `config.py`, you'll find the `models` property: + +```python +from continuedev.src.continuedev.core.models import Models + +config = ContinueConfig( + ... + models=Models( + default=MaybeProxyOpenAI(model="gpt-4"), + medium=MaybeProxyOpenAI(model="gpt-3.5-turbo") + ) +) +``` + +The `default` and `medium` properties are different _model roles_. This allows different models to be used for different tasks. The available roles are `default`, `small`, `medium`, `large`, `edit`, and `chat`. `edit` is used when you use the '/edit' slash command, `chat` is used for all chat responses, and `medium` is used for summarizing. If not set, all roles will fall back to `default`. The values of these fields must be of the [`LLM`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/llm/__init__.py) class, which implements methods for retrieving and streaming completions from an LLM. + +Below, we describe the `LLM` classes available in the Continue core library, and how they can be used. + +## Adding an OpenAI API key + +## claude-2 + +## Run Llama-2 locally with Ollama + +## Local models with OpenAI-compatible server + +## Local models with ggml + +## Llama.cpp + +## Together + +## Replicate + +## Hugging Face + +## Self-hosting an open-source model + +If you want to self-host on Colab, RunPod, HuggingFace, Haven, or another hosting provider you will need to wire up a new LLM class. It only needs to implement 3 primary methods: `stream_complete`, `complete`, and `stream_chat`, and you can see examples in `continuedev/src/continuedev/libs/llm`. + +If by chance the provider has the exact same API interface as OpenAI, the `GGML` class will work for you out of the box, after changing the endpoint at the top of the file. + +## Azure OpenAI Service + +If you'd like to use OpenAI models but are concerned about privacy, you can use the Azure OpenAI service, which is GDPR and HIPAA compliant. After applying for access [here](https://azure.microsoft.com/en-us/products/ai-services/openai-service), you will typically hear back within only a few days. Once you have access, instantiate the model like so: + +```python +from continuedev.src.continuedev.libs.llm.openai import OpenAI + +config = ContinueConfig( + ... + models=Models( + default=OpenAI( + api_key="my-api-key", + model="gpt-3.5-turbo", + api_base="https://my-azure-openai-instance.openai.azure.com/", + engine="my-azure-openai-deployment", + api_version="2023-03-15-preview", + api_type="azure" + ) + ) +) +``` + +The easiest way to find this information is from the chat playground in the Azure OpenAI portal. Under the "Chat Session" section, click "View Code" to see each of these parameters. Finally, find one of your Azure OpenAI keys and enter it in the VS Code settings under `continue.OPENAI_API_KEY`. + +Note that you can also use these parameters for uses other than Azure, such as self-hosting a model. diff --git a/docs/docs/customization/other-configuration.md b/docs/docs/customization/other-configuration.md index 088b2aac..8049e8d6 100644 --- a/docs/docs/customization/other-configuration.md +++ b/docs/docs/customization/other-configuration.md @@ -1 +1,52 @@ # Other Configuration + +See the [ContinueConfig Reference](../reference/config) for the full list of configuration options. + +## Customize System Message + +You can write your own system message, a set of instructions that will always be top-of-mind for the LLM, by setting the `system_message` property to any string. For example, you might request "Please make all responses as concise as possible and never repeat something you have already explained." + +System messages can also reference files. For example, if there is a markdown file (e.g. at `/Users/nate/Documents/docs/reference.md`) you'd like the LLM to know about, you can reference it with [Mustache](http://mustache.github.io/mustache.5.html) templating like this: "Please reference this documentation: {{ Users/nate/Documents/docs/reference.md }}". As of now, you must use an absolute path. + +## Temperature + +Set `temperature` to any value between 0 and 1. Higher values will make the LLM more creative, while lower values will make it more predictable. The default is 0.5. + +## Custom Policies + +Policies can be used to deeply change the behavior of Continue, or to build agents that take longer sequences of actions on their own. The [`DefaultPolicy`](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/plugins/policies/default.py) handles the parsing of slash commands, and otherwise always chooses the `SimpleChatStep`, but you could customize by for example always taking a "review" step after editing code. To do so, create a new `Policy` subclass that implements the `next` method: + +```python +class ReviewEditsPolicy(Policy): + + default_step: Step = SimpleChatStep() + + def next(self, config: ContinueConfig, history: History) -> Step: + # Get the last step + last_step = history.get_current() + + # If it edited code, then review the changes + if isinstance(last_step, EditHighlightedCodeStep): + return ReviewStep() # Not implemented + + # Otherwise, choose between EditHighlightedCodeStep and SimpleChatStep based on slash command + if observation is not None and isinstance(last_step.observation, UserInputObservation): + if user_input.startswith("/edit"): + return EditHighlightedCodeStep(user_input=user_input[5:]) + else: + return SimpleChatStep() + + return self.default_step.copy() + + # Don't do anything until the user enters something else + return None +``` + +Then, in `~/.continue/config.py`, override the default policy: + +```python +config=ContinueConfig( + ... + policy_override=ReviewEditsPolicy() +) +``` diff --git a/docs/docs/customization/overview.md b/docs/docs/customization/overview.md new file mode 100644 index 00000000..0d433cd6 --- /dev/null +++ b/docs/docs/customization/overview.md @@ -0,0 +1,10 @@ +# Overview + +Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This file is created the first time you run Continue. + +Currently, you can customize the following: + +- [Models](./models.md) - Use Continue with any LLM, including local models, Azure OpenAI service, any OpenAI-compatible API, and more. +- [Context Providers](./context-providers.md) - Just type '@' to easily add attachments to your prompt. Define which sources you want to easily reference, including GitHub Issues, terminal output, and preset URLs. +- [Slash Commands](./slash-commands.md) - Call custom prompts or programs written with our SDK by typing `/`. +- [Other Configuration](./other-configuration.md) - Configure other settings like the system message and temperature. diff --git a/docs/docs/customization/slash-commands.md b/docs/docs/customization/slash-commands.md index e69de29b..17f07075 100644 --- a/docs/docs/customization/slash-commands.md +++ b/docs/docs/customization/slash-commands.md @@ -0,0 +1,72 @@ +# Slash Commands + +Slash commands are shortcuts that can be activated by prefacing your input with '/'. For example, the built-in '/edit' slash command let you stream edits directly into your editor. + +There are two ways to add custom slash commands: + +1. With natural language prompts - this is simpler and only requires writing a string or string template. +2. With a custom `Step` - this gives you full access to the Continue SDK and allows you to write arbitrary Python code. + +## "Custom Commands" (Use Natural Language) + +You can add custom slash commands by adding a `CustomCommand` object to the `custom_commands` property. Each `CustomCommand` has + +- `name`: the name of the command, which will be invoked with `/name` +- `description`: a short description of the command, which will appear in the dropdown +- `prompt`: a set of instructions to the LLM, which will be shown in the prompt + +Custom commands are great when you are frequently reusing a prompt. For example, if you've crafted a great prompt and frequently ask the LLM to check for mistakes in your code, you could add a command like this: + +```python +config = ContinueConfig( + ... + custom_commands=[ + CustomCommand( + name="check", + description="Check for mistakes in my code", + prompt=dedent("""\ + Please read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant: + - Syntax errors + - Logic errors + - Security vulnerabilities + - Performance issues + - Anything else that looks wrong + + Once you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying "I think there is a syntax error on line 5", you should say "Syntax error on line 5". Give your answer as one bullet point per mistake found.""") + ) + ] +) +``` + +## Custom Slash Commands + +If you want to go a step further than writing custom commands with natural language, you can use a `SlashCommand` to run an arbitrary Python function, with access to the Continue SDK. To do this, create a subclass of `Step` with the `run` method implemented, and this is the code that will run when you call the command. For example, here is a step that generates a commit message: + +```python +class CommitMessageStep(Step): + async def run(self, sdk: ContinueSDK): + + # Get the root directory of the workspace + dir = sdk.ide.workspace_directory + + # Run git diff in that directory + diff = subprocess.check_output( + ["git", "diff"], cwd=dir).decode("utf-8") + + # Ask the LLM to write a commit message, + # and set it as the description of this step + self.description = await sdk.models.default.complete( + f"{diff}\n\nWrite a short, specific (less than 50 chars) commit message about the above changes:") + +config=ContinueConfig( + ... + slash_commands=[ + ... + SlashCommand( + name="commit", + description="Generate a commit message for the current changes", + step=CommitMessageStep, + ) + ] +) +``` diff --git a/docs/docs/development-data.md b/docs/docs/development-data.md new file mode 100644 index 00000000..267a746e --- /dev/null +++ b/docs/docs/development-data.md @@ -0,0 +1,5 @@ +# 🧑‍💻 Development Data + +When you use Continue, you automatically collect data on how you build software. By default, this development data is saved to `.continue/dev_data` on your local machine. When combined with the code that you ultimately commit, it can be used to improve the LLM that you or your team use (if you allow). + +You can read more about how development data is generated as a byproduct of LLM-aided development and why we believe that you should start collecting it now: [It’s time to collect data on how you build software](https://blog.continue.dev/its-time-to-collect-data-on-how-you-build-software) diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md deleted file mode 100644 index 18d99f08..00000000 --- a/docs/docs/getting-started.md +++ /dev/null @@ -1,9 +0,0 @@ -# Getting started - -1. Click `Install` on the **[Continue extension in the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=Continue.continue)** - -2. This will open the Continue extension page in VS Code, where you will need to click `Install` again - -3. Once you do this, you will see the Continue logo show up on the left side bar. If you click it, then the Continue extension will then open up: - -![vscode-install](/img/continue-screenshot.png) \ No newline at end of file diff --git a/docs/docs/how-continue-works.md b/docs/docs/how-continue-works.md index 06aada52..07d16474 100644 --- a/docs/docs/how-continue-works.md +++ b/docs/docs/how-continue-works.md @@ -1,4 +1,4 @@ -# How Continue works +# ⚙️ How Continue works ![Continue Architecture Diagram](/img/continue-architecture.png) @@ -10,7 +10,6 @@ The `Continue` library consists of an **SDK**, a **GUI**, and a **Server** that 3. The **Server** is responsible for connecting the GUI and SDK to the IDE as well as deciding which steps to take next. - ## Running the server manually If you would like to run the Continue server manually, rather than allowing the VS Code to set it up, you can follow these steps: @@ -25,7 +24,7 @@ If you would like to run the Continue server manually, rather than allowing the (official instructions [here](https://python-poetry.org/docs/#installing-with-the-official-installer)) 4. `poetry shell` to activate the virtual environment 5. Either: - + a) To run without the debugger: `cd ..` and `python3 -m continuedev.src.continuedev.server.main` b) To run with the debugger: Open a VS Code window with `continue` as the root folder. Ensure that you have selected the Python interpreter from virtual environment, then use the `.vscode/launch.json` we have provided to start the debugger. diff --git a/docs/docs/how-to-use-continue.md b/docs/docs/how-to-use-continue.md index 1fd8e99c..bf61a033 100644 --- a/docs/docs/how-to-use-continue.md +++ b/docs/docs/how-to-use-continue.md @@ -1,4 +1,4 @@ -# How to use Continue +# 🧑‍🎓 How to use Continue :::info **TL;DR: Using LLMs as you code can accelerate you if you leverage them in the right situations. However, they can also cause you to get lost and confused if you trust them when you should not. This page outlines when and where we think you should and should not use Continue.** @@ -36,6 +36,7 @@ Here are tasks that Continue excels at helping you complete: Continue works well in situations where find and replace does not work (i.e. “/edit change all of these to be like that”) Examples + - "/edit Use 'Union' instead of a vertical bar here" - “/edit Make this use more descriptive variable names” @@ -44,6 +45,7 @@ Examples Continue can help you get started building React components, Python scripts, Shell scripts, Makefiles, unit tests, etc. Examples + - “/edit write a python script to get posthog events" - “/edit add a react component for syntax highlighted code" @@ -52,6 +54,7 @@ Examples Continue can go even further. For example, it can help build the scaffolding for a Python package, which includes a typer cli app to sort the arguments and print them back out. Examples + - “/edit use this schema to write me a SQL query that gets recently churned users” - “/edit create a shell script to back up my home dir to /tmp/" @@ -60,6 +63,7 @@ Examples After selecting the code section(s), try to refactor it with Continue (e.g “/edit change the function to work like this” or “/edit do this everywhere”) Examples + - “/edit migrate this digital ocean terraform file into one that works for GCP” - “/edit rewrite this function to be async” @@ -68,6 +72,7 @@ Examples If you don't understand how some code works, highlight it and ask "how does this code work?" Examples + - “where in the page should I be making this request to the backend?” - “how can I communicate between these iframes?” @@ -80,6 +85,7 @@ Continue can also help explain errors / exceptions and offer possible solutions. Instead of switching windows and getting distracted, you can ask things like "How do I find running process on port 8000?" Examples + - "what is the load_dotenv library name?" - "how do I find running process on port 8000?" @@ -88,6 +94,7 @@ Examples Instead of leaving your IDE, you can ask open-ended questions that you don't expect to turn into multi-turn conversations. Examples + - “how can I set up a Prisma schema that cascades deletes?” - "what is the difference between dense and sparse embeddings?" @@ -96,6 +103,7 @@ Examples You can highlight an entire file and ask Continue to improve it as long as the file is not too large. Examples + - “/edit here is a connector for postgres, now write one for kafka” - "/edit Rewrite this API call to grab all pages" @@ -108,6 +116,7 @@ Similar to how you would make changes manually, focus on one file at a time. But There are many more tasks that Continue can help you complete. Typically, these will be tasks that don't involve too many steps to complete. Examples + - “/edit make an IAM policy that creates a user with read-only access to S3” - “/edit change this plot into a bar chart in this dashboard component” @@ -137,4 +146,4 @@ If you highlight very long lines (e.g. a complex SVG), you might also run into i ### Tasks with many steps -There are other tasks that Continue won't be able to take on entirely at once. However, typically, if you figure out how to break the task into sub-tasks, you can get help from Continue with those. \ No newline at end of file +There are other tasks that Continue won't be able to take on entirely at once. However, typically, if you figure out how to break the task into sub-tasks, you can get help from Continue with those. diff --git a/docs/docs/quickstart.md b/docs/docs/quickstart.md new file mode 100644 index 00000000..af2cd29d --- /dev/null +++ b/docs/docs/quickstart.md @@ -0,0 +1,9 @@ +# ⚡️ Quickstart + +1. Click `Install` on the **[Continue extension in the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=Continue.continue)** + +2. This will open the Continue extension page in VS Code, where you will need to click `Install` again + +3. Once you do this, you will see the Continue logo show up on the left side bar. If you click it, then the Continue extension will then open up: + +![vscode-install](/img/continue-screenshot.png) diff --git a/docs/docs/reference/Context Providers/diff.md b/docs/docs/reference/Context Providers/diff.md new file mode 100644 index 00000000..a0aaedcf --- /dev/null +++ b/docs/docs/reference/Context Providers/diff.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# DiffContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/diff.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/file.md b/docs/docs/reference/Context Providers/file.md new file mode 100644 index 00000000..d1ef0761 --- /dev/null +++ b/docs/docs/reference/Context Providers/file.md @@ -0,0 +1,14 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# FileContextProvider + +The FileContextProvider is a ContextProvider that allows you to search files in the open workspace. + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/file.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/filetree.md b/docs/docs/reference/Context Providers/filetree.md new file mode 100644 index 00000000..07c39630 --- /dev/null +++ b/docs/docs/reference/Context Providers/filetree.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# FileTreeContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/filetree.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/github.md b/docs/docs/reference/Context Providers/github.md new file mode 100644 index 00000000..45482957 --- /dev/null +++ b/docs/docs/reference/Context Providers/github.md @@ -0,0 +1,15 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# GitHubIssuesContextProvider + +The GitHubIssuesContextProvider is a ContextProvider +that allows you to search GitHub issues in a repo. + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/github.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/google.md b/docs/docs/reference/Context Providers/google.md new file mode 100644 index 00000000..6538802e --- /dev/null +++ b/docs/docs/reference/Context Providers/google.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# GoogleContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/google.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/intro.md b/docs/docs/reference/Context Providers/intro.md deleted file mode 100644 index 1e0981f1..00000000 --- a/docs/docs/reference/Context Providers/intro.md +++ /dev/null @@ -1 +0,0 @@ -# Intro diff --git a/docs/docs/reference/Context Providers/search.md b/docs/docs/reference/Context Providers/search.md new file mode 100644 index 00000000..5276daa2 --- /dev/null +++ b/docs/docs/reference/Context Providers/search.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# SearchContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/search.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/terminal.md b/docs/docs/reference/Context Providers/terminal.md new file mode 100644 index 00000000..37c70ab4 --- /dev/null +++ b/docs/docs/reference/Context Providers/terminal.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# TerminalContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/terminal.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Context Providers/url.md b/docs/docs/reference/Context Providers/url.md new file mode 100644 index 00000000..b0cfac07 --- /dev/null +++ b/docs/docs/reference/Context Providers/url.md @@ -0,0 +1,17 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# URLContextProvider + +The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'. +When you type '@', the context provider will be asked to populate a list of options. +These options will be updated on each keystroke. +When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object). + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/url.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/reference/Models/anthropic.md b/docs/docs/reference/Models/anthropic.md index 1aa31324..8fec179a 100644 --- a/docs/docs/reference/Models/anthropic.md +++ b/docs/docs/reference/Models/anthropic.md @@ -2,10 +2,27 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # AnthropicLLM +Import the `AnthropicLLM` class and set it as the default model: +```python +from continuedev.src.continuedev.libs.llm.anthropic import AnthropicLLM + +config = ContinueConfig( + ... + models=Models( + default=AnthropicLLM(api_key="", model="claude-2") + ) +) +``` + +Claude 2 is not yet publicly released. You can request early access [here](https://www.anthropic.com/earlyaccess). [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/anthropic.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/ggml.md b/docs/docs/reference/Models/ggml.md index dafc8870..fbaf12d0 100644 --- a/docs/docs/reference/Models/ggml.md +++ b/docs/docs/reference/Models/ggml.md @@ -2,10 +2,29 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # GGML +See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example) to run any model locally with ggml. While these models don't yet perform as well, they are free, entirely private, and run offline. +Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this: + +```python +from continuedev.src.continuedev.libs.llm.ggml import GGML + +config = ContinueConfig( + ... + models=Models( + default=GGML( + max_context_length=2048, + server_url="http://localhost:8000") + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/ggml.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_inference_api.md b/docs/docs/reference/Models/hf_inference_api.md new file mode 100644 index 00000000..605813be --- /dev/null +++ b/docs/docs/reference/Models/hf_inference_api.md @@ -0,0 +1,29 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# HuggingFaceInferenceAPI + +Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this: + +```python +from continuedev.src.continuedev.core.models import Models +from continuedev.src.continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI + +config = ContinueConfig( + ... + models=Models( + default=HuggingFaceInferenceAPI( + endpoint_url: "", + hf_token: "", + ) +) +``` + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/hf_inference_api.py) + +## Properties + + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/hf_tgi.md b/docs/docs/reference/Models/hf_tgi.md new file mode 100644 index 00000000..b6eb61d7 --- /dev/null +++ b/docs/docs/reference/Models/hf_tgi.md @@ -0,0 +1,15 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# HuggingFaceTGI + + + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/hf_tgi.py) + +## Properties + + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/llamacpp.md b/docs/docs/reference/Models/llamacpp.md index 7ce75574..0bb06e74 100644 --- a/docs/docs/reference/Models/llamacpp.md +++ b/docs/docs/reference/Models/llamacpp.md @@ -2,10 +2,33 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # LlamaCpp +Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0: +```shell +.\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf +``` + +After it's up and running, change `~/.continue/config.py` to look like this: + +```python +from continuedev.src.continuedev.libs.llm.llamacpp import LlamaCpp + +config = ContinueConfig( + ... + models=Models( + default=LlamaCpp( + max_context_length=4096, + server_url="http://localhost:8080") + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/llamacpp.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/maybe_proxy_openai.md b/docs/docs/reference/Models/maybe_proxy_openai.md new file mode 100644 index 00000000..22ac2382 --- /dev/null +++ b/docs/docs/reference/Models/maybe_proxy_openai.md @@ -0,0 +1,36 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# MaybeProxyOpenAI + +With the `MaybeProxyOpenAI` `LLM`, new users can try out Continue with GPT-4 using a proxy server that securely makes calls to OpenAI using our API key. Continue should just work the first time you install the extension in VS Code. + +Once you are using Continue regularly though, you will need to add an OpenAI API key that has access to GPT-4 by following these steps: + +1. Copy your API key from https://platform.openai.com/account/api-keys +2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue +3. Change the default LLMs to look like this: + +```python +API_KEY = "" +config = ContinueConfig( + ... + models=Models( + default=MaybeProxyOpenAI(model="gpt-4", api_key=API_KEY), + medium=MaybeProxyOpenAI(model="gpt-3.5-turbo", api_key=API_KEY) + ) +) +``` + +The `MaybeProxyOpenAI` class will automatically switch to using your API key instead of ours. If you'd like to explicitly use one or the other, you can use the `ProxyServer` or `OpenAI` classes instead. + +These classes support any models available through the OpenAI API, assuming your API key has access, including "gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", and "gpt-4-32k". + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/maybe_proxy_openai.py) + +## Properties + + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/ollama.md b/docs/docs/reference/Models/ollama.md index ef058119..9792ee52 100644 --- a/docs/docs/reference/Models/ollama.md +++ b/docs/docs/reference/Models/ollama.md @@ -2,10 +2,25 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # Ollama +[Ollama](https://ollama.ai/) is a Mac application that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class: +```python +from continuedev.src.continuedev.libs.llm.ollama import Ollama + +config = ContinueConfig( + ... + models=Models( + default=Ollama(model="llama2") + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/ollama.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/openai.md b/docs/docs/reference/Models/openai.md index d325ca2f..0ade1a8f 100644 --- a/docs/docs/reference/Models/openai.md +++ b/docs/docs/reference/Models/openai.md @@ -4,10 +4,36 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; The OpenAI class can be used to access OpenAI models like gpt-4 and gpt-3.5-turbo. -If you are running a local model with an OpenAI-compatible API, you can also use the OpenAI class by changing the `api_base` argument. +If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this: + +```python +from continuedev.src.continuedev.libs.llm.openai import OpenAI + +config = ContinueConfig( + ... + models=Models( + default=OpenAI( + api_key="EMPTY", + model="", + api_base="http://localhost:8000", # change to your server + ) + ) +) +``` + +Options for serving models locally with an OpenAI-compatible server include: + +- [text-gen-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai#setup--installation) +- [FastChat](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md) +- [LocalAI](https://localai.io/basics/getting_started/) +- [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#web-server) [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/openai.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/queued.md b/docs/docs/reference/Models/queued.md index 6888a4e5..e253da09 100644 --- a/docs/docs/reference/Models/queued.md +++ b/docs/docs/reference/Models/queued.md @@ -2,10 +2,27 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # QueuedLLM +QueuedLLM exists to make up for LLM servers that cannot handle multiple requests at once. It uses a lock to ensure that only one request is being processed at a time. +If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this: + +```python +from continuedev.src.continuedev.libs.llm.queued import QueuedLLM + +config = ContinueConfig( + ... + models=Models( + default=QueuedLLM(llm=) + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/queued.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/replicate.md b/docs/docs/reference/Models/replicate.md index 4f05cdfa..0c93a758 100644 --- a/docs/docs/reference/Models/replicate.md +++ b/docs/docs/reference/Models/replicate.md @@ -2,10 +2,30 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # ReplicateLLM +Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this: +```python +from continuedev.src.continuedev.core.models import Models +from continuedev.src.continuedev.libs.llm.replicate import ReplicateLLM + +config = ContinueConfig( + ... + models=Models( + default=ReplicateLLM( + model="replicate/codellama-13b-instruct:da5676342de1a5a335b848383af297f592b816b950a43d251a0a9edd0113604b", + api_key="my-replicate-api-key") + ) +) +``` + +If you don't specify the `model` parameter, it will default to `replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781`. [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/replicate.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/text_gen_interface.md b/docs/docs/reference/Models/text_gen_interface.md index a59a4166..21404960 100644 --- a/docs/docs/reference/Models/text_gen_interface.md +++ b/docs/docs/reference/Models/text_gen_interface.md @@ -2,10 +2,27 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # TextGenUI +TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so: +```python +from continuedev.src.continuedev.libs.llm.text_gen_interface import TextGenUI + +config = ContinueConfig( + ... + models=Models( + default=TextGenUI( + model="", + ) + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/text_gen_interface.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/Models/together.md b/docs/docs/reference/Models/together.md index e436644c..ec1ebb9c 100644 --- a/docs/docs/reference/Models/together.md +++ b/docs/docs/reference/Models/together.md @@ -2,10 +2,29 @@ import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; # TogetherLLM +The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this: +```python +from continuedev.src.continuedev.core.models import Models +from continuedev.src.continuedev.libs.llm.together import TogetherLLM + +config = ContinueConfig( + ... + models=Models( + default=TogetherLLM( + api_key="", + model="togethercomputer/llama-2-13b-chat" + ) + ) +) +``` [View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/together.py) ## Properties - \ No newline at end of file + + +### Inherited Properties + + \ No newline at end of file diff --git a/docs/docs/reference/config.md b/docs/docs/reference/config.md new file mode 100644 index 00000000..dbcfc4c6 --- /dev/null +++ b/docs/docs/reference/config.md @@ -0,0 +1,14 @@ +import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx'; + +# ContinueConfig + +Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This class is instantiated from the config file for every new session. + +[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/libs/llm/config.py) + +## Properties + + + +### Inherited Properties + diff --git a/docs/docs/telemetry.md b/docs/docs/telemetry.md index e0ea2158..2202aa92 100644 --- a/docs/docs/telemetry.md +++ b/docs/docs/telemetry.md @@ -1,4 +1,4 @@ -# Telemetry +# 🦔 Telemetry ## Overview @@ -27,4 +27,4 @@ config = ContinueConfig( ) ``` -You can turn off anonymous telemetry by changing the value of `allow_anonymous_telemetry` to `false`. +You can turn off anonymous telemetry by changing the value of `allow_anonymous_telemetry` to `False`. diff --git a/docs/docs/troubleshooting.md b/docs/docs/troubleshooting.md index 722c5d1b..46845c55 100644 --- a/docs/docs/troubleshooting.md +++ b/docs/docs/troubleshooting.md @@ -1,4 +1,4 @@ -# Troubleshooting +# ❓ Troubleshooting The Continue VS Code extension is currently in beta. It will attempt to start the Continue Python server locally for you, but sometimes this will fail, causing the "Starting Continue server..." not to disappear, or other hangups. While we are working on fixes to all of these problems, there are a few things you can do to temporarily troubleshoot: diff --git a/docs/sidebars.js b/docs/sidebars.js index 2121fea6..47e0baf7 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -15,34 +15,35 @@ const sidebars = { docsSidebar: [ "intro", - "getting-started", + "quickstart", "how-to-use-continue", "how-continue-works", { type: "category", - label: "Customization", + label: "🎨 Customization", collapsible: true, collapsed: false, items: [ + "customization/overview", "customization/models", "customization/context-providers", "customization/slash-commands", "customization/other-configuration", ], }, - "collecting-data", - "telemetry", - "troubleshooting", { type: "category", - label: "Walkthroughs", + label: "🚶 Walkthroughs", collapsible: true, collapsed: false, items: ["walkthroughs/codellama"], }, + "development-data", + "telemetry", + "troubleshooting", { type: "category", - label: "Reference", + label: "📖 Reference", collapsible: true, collapsed: false, items: [ diff --git a/docs/src/components/ClassPropertyRef.tsx b/docs/src/components/ClassPropertyRef.tsx index 46664c4c..7246663b 100644 --- a/docs/src/components/ClassPropertyRef.tsx +++ b/docs/src/components/ClassPropertyRef.tsx @@ -4,8 +4,14 @@ interface ClassPropertyRefProps { name: string; details: string; required: boolean; + default: string; } +const PYTHON_TYPES = { + string: "str", + integer: "int", +}; + export default function ClassPropertyRef(props: ClassPropertyRefProps) { const details = JSON.parse(props.details); @@ -15,10 +21,32 @@ export default function ClassPropertyRef(props: ClassPropertyRefProps) {

{props.name}

- - {props.required && "REQUIRED"} + {props.required && ( + + REQUIRED + + )} + + {details.type && `(${PYTHON_TYPES[details.type] || details.type})`} - {details.type && `(${details.type})`} + + {props.default && ( + + {" "} + = {details.type === "string" && '"'} + {props.default} + {details.type === "string" && '"'} + + )}

{details.description}

diff --git a/docs/src/css/custom.css b/docs/src/css/custom.css index 794febaf..3a7178dd 100644 --- a/docs/src/css/custom.css +++ b/docs/src/css/custom.css @@ -18,13 +18,13 @@ } /* For readability concerns, you should choose a lighter palette in dark mode. */ -[data-theme='dark'] { - --ifm-color-primary: #be1b55ff; - --ifm-color-primary-dark: #be1b55ff; - --ifm-color-primary-darker: #be1b55ff; - --ifm-color-primary-darkest: #be1b55ff; - --ifm-color-primary-light: #be1b55ff; - --ifm-color-primary-lighter: #be1b55ff; - --ifm-color-primary-lightest: #be1b55ff; +[data-theme="dark"] { + --ifm-color-primary: #59bc89ff; + --ifm-color-primary-dark: #59bc89ff; + --ifm-color-primary-darker: #59bc89ff; + --ifm-color-primary-darkest: #59bc89ff; + --ifm-color-primary-light: #59bc89ff; + --ifm-color-primary-lighter: #59bc89ff; + --ifm-color-primary-lightest: #59bc89ff; --docusaurus-highlighted-code-line-bg: rgba(0, 0, 0, 0.3); } -- cgit v1.2.3-70-g09d2