diff options
-rw-r--r-- | docs/docs/customization.md | 124 | ||||
-rw-r--r-- | docs/sidebars.js | 10 | ||||
-rw-r--r-- | extension/package-lock.json | 4 | ||||
-rw-r--r-- | extension/package.json | 2 | ||||
-rw-r--r-- | extension/react-app/src/components/TextDialog.tsx | 60 | ||||
-rw-r--r-- | extension/react-app/src/pages/gui.tsx | 40 |
6 files changed, 189 insertions, 51 deletions
diff --git a/docs/docs/customization.md b/docs/docs/customization.md new file mode 100644 index 00000000..cd306cfe --- /dev/null +++ b/docs/docs/customization.md @@ -0,0 +1,124 @@ +# Customization + +Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` on your machine. This file is created the first time you run Continue. + +## Change the default LLM + +Change the `default_model` field to any of "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "claude-2", or "ggml". + +### claude-2 and gpt-X + +If you have access, simply set `default_model` to the model you would like to use, then you will be prompted for a personal API key after reloading VS Code. If using an OpenAI model, you can press enter to try with our API key for free. + +### Local models with ggml + +See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example) to run any model locally with ggml. While these models don't yet perform as well, they are free, entirely private, and run offline. + +### Azure OpenAI Service + +If you'd like to use OpenAI models but are concerned about privacy, you can use the Azure OpenAI service, which is GDPR and HIPAA compliant. After applying for access [here](https://azure.microsoft.com/en-us/products/ai-services/openai-service), you will typically hear back within only a few days. Once you have access, set `default_model` to "gpt-4", and then set the `azure_openai_info` property in the `ContinueConfig` like so: + +```python +config = ContinueConfig( + ... + azure_openai_info=AzureInfo( + endpoint="https://my-azure-openai-instance.openai.azure.com/", + engine="my-azure-openai-deployment", + api_version="2023-03-15-preview" + ) +) +``` + +The easiest way to find this information is from the chat playground in the Azure OpenAI portal. Under the "Chat Session" section, click "View Code" to see each of these parameters. Finally, find one of your Azure OpenAI keys and enter it in the VS Code settings under `continue.OPENAI_API_KEY`. + +## Customize System Message + +You can write your own system message, a set of instructions that will always be top-of-mind for the LLM, by setting the `system_message` property to any string. For example, you might request "Please make all responses as concise as possible and never repeat something you have already explained." + +System messages can also reference files. For example, if there is a markdown file (e.g. at `/Users/nate/Documents/docs/reference.md`) you'd like the LLM to know about, you can reference it with [Mustache](http://mustache.github.io/mustache.5.html) templating like this: "Please reference this documentation: {{ Users/nate/Documents/docs/reference.md }}". As of now, you must use an absolute path. + +## Custom Commands + +You can add custom slash commands by adding a `CustomCommand` object to the `custom_commands` property. Each `CustomCommand` has + +- `name`: the name of the command, which will be invoked with `/name` +- `description`: a short description of the command, which will appear in the dropdown +- `prompt`: a set of instructions to the LLM, which will be shown in the prompt + +Custom commands are great when you are frequently reusing a prompt. For example, if you've crafted a great prompt and frequently ask the LLM to check for mistakes in your code, you could add a command like this: + +```python +config = ContinueConfig( + ... + custom_commands=[ + CustomCommand( + name="check", + description="Check for mistakes in my code", + prompt=dedent("""\ + Please read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant: + - Syntax errors + - Logic errors + - Security vulnerabilities + - Performance issues + - Anything else that looks wrong + + Once you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying "I think there is a syntax error on line 5", you should say "Syntax error on line 5". Give your answer as one bullet point per mistake found.""") + ) + ] +) +``` + +## Temperature + +Set `temperature` to any value between 0 and 1. Higher values will make the LLM more creative, while lower values will make it more predictable. The default is 0.5. + +## Custom Context Providers + +When you type '@' in the Continue text box, it will display a dropdown of items that can be selected to include in your message as context. For example, you might want to reference a GitHub Issue, file, or Slack thread. All of these options are provided by a `ContextProvider` class, and we make it easy to write your own. As an example, here is the `GitHubIssuesContextProvider`, which lets you search all open GitHub Issues in a repo: + +```python +class GitHubIssuesContextProvider(ContextProvider): + """ + The GitHubIssuesContextProvider is a ContextProvider that allows you to search GitHub Issues in a repo. + """ + + title = "issues" + repo_name: str + auth_token: str + + async def provide_context_items(self) -> List[ContextItem]: + auth = Auth.Token(self.auth_token) + gh = Github(auth=auth) + + repo = gh.get_repo(self.repo_name) + issues = repo.get_issues().get_page(0) + + items = [ContextItem( + content=issue.body, + description=ContextItemDescription( + name=f"Issue #{issue.number}", + description=issue.title, + id=ContextItemId( + provider_title=self.title, + item_id=issue.id + ) + ) + ) for issue in issues] + self.context_items = { + item.description.id.to_string(): item for item in items} + return items +``` + +It can then be set in the `ContinueConfig` like so: + +```python +config = ContinueConfig( + ... + context_providers=[ + GitHubIssuesContextProvider( + repo_name="my-github-username-or-org/my-github-repo", + auth_token="my-github-auth-token" + ) + ] +) +``` diff --git a/docs/sidebars.js b/docs/sidebars.js index 9baf1b94..83b34ee8 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -13,7 +13,15 @@ /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ const sidebars = { - docsSidebar: ["intro", "getting-started", "how-to-use-continue", "how-continue-works", "telemetry", "collecting-data"], + docsSidebar: [ + "intro", + "getting-started", + "how-to-use-continue", + "how-continue-works", + "telemetry", + "collecting-data", + "customization", + ], }; module.exports = sidebars; diff --git a/extension/package-lock.json b/extension/package-lock.json index 5c8e27d0..933da12b 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.188", + "version": "0.0.189", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.188", + "version": "0.0.189", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 3d44c156..08737ff4 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.188", + "version": "0.0.189", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index cba3852d..9597b578 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -3,6 +3,7 @@ import React, { useEffect, useState } from "react"; import styled from "styled-components"; import { Button, secondaryDark, vscBackground, vscForeground } from "."; import { isMetaEquivalentKeyPressed } from "../util"; +import { ReactMarkdown } from "react-markdown/lib/react-markdown"; const ScreenCover = styled.div` position: absolute; @@ -56,6 +57,7 @@ const TextDialog = (props: { onEnter: (text: string) => void; onClose: () => void; message?: string; + entryOn?: boolean; }) => { const [text, setText] = useState(""); const textAreaRef = React.createRef<HTMLTextAreaElement>(); @@ -79,33 +81,37 @@ const TextDialog = (props: { }} > <Dialog> - <P>{props.message || ""}</P> - <TextArea - rows={10} - ref={textAreaRef} - onKeyDown={(e) => { - if ( - e.key === "Enter" && - isMetaEquivalentKeyPressed(e) && - textAreaRef.current - ) { - props.onEnter(textAreaRef.current.value); - setText(""); - } else if (e.key === "Escape") { - props.onClose(); - } - }} - /> - <Button - onClick={() => { - if (textAreaRef.current) { - props.onEnter(textAreaRef.current.value); - setText(""); - } - }} - > - Enter - </Button> + <ReactMarkdown>{props.message || ""}</ReactMarkdown> + {props.entryOn && ( + <> + <TextArea + rows={10} + ref={textAreaRef} + onKeyDown={(e) => { + if ( + e.key === "Enter" && + isMetaEquivalentKeyPressed(e) && + textAreaRef.current + ) { + props.onEnter(textAreaRef.current.value); + setText(""); + } else if (e.key === "Escape") { + props.onClose(); + } + }} + /> + <Button + onClick={() => { + if (textAreaRef.current) { + props.onEnter(textAreaRef.current.value); + setText(""); + } + }} + > + Enter + </Button> + </> + )} </Dialog> </DialogContainer> </ScreenCover> diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index fccc9b4b..49f41dcf 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -75,7 +75,6 @@ function GUI(props: GUIProps) { } }, [dataSwitchOn]); - const [usingFastModel, setUsingFastModel] = useState(false); const [waitingForSteps, setWaitingForSteps] = useState(false); const [userInputQueue, setUserInputQueue] = useState<string[]>([]); const [highlightedRanges, setHighlightedRanges] = useState< @@ -117,6 +116,7 @@ function GUI(props: GUIProps) { const [showFeedbackDialog, setShowFeedbackDialog] = useState(false); const [feedbackDialogMessage, setFeedbackDialogMessage] = useState(""); + const [feedbackEntryOn, setFeedbackEntryOn] = useState(true); const topGuiDivRef = useRef<HTMLDivElement>(null); @@ -140,12 +140,9 @@ function GUI(props: GUIProps) { }, [topGuiDivRef.current, scrollTimeout]); useEffect(() => { + // Cmd + Backspace to delete current step const listener = (e: any) => { - // Cmd + i to toggle fast model - if (e.key === "i" && isMetaEquivalentKeyPressed(e) && e.shiftKey) { - setUsingFastModel((prev) => !prev); - // Cmd + backspace to stop currently running step - } else if ( + if ( e.key === "Backspace" && isMetaEquivalentKeyPressed(e) && typeof history?.current_index !== "undefined" && @@ -164,7 +161,6 @@ function GUI(props: GUIProps) { useEffect(() => { client?.onStateUpdate((state: FullState) => { // Scroll only if user is at very bottom of the window. - setUsingFastModel(state.default_model === "gpt-3.5-turbo"); const shouldScrollToBottom = topGuiDivRef.current && topGuiDivRef.current?.offsetHeight - window.scrollY < 100; @@ -281,6 +277,7 @@ function GUI(props: GUIProps) { setShowFeedbackDialog(false); }} message={feedbackDialogMessage} + entryOn={feedbackEntryOn} /> <TopGUIDiv @@ -431,24 +428,26 @@ function GUI(props: GUIProps) { </div> <HeaderButtonWithText onClick={() => { - // client?.changeDefaultModel( - // usingFastModel ? "gpt-4" : "gpt-3.5-turbo" - // ); - if (!usingFastModel) { - // Show the dialog - setFeedbackDialogMessage( - "We don't yet support local models, but we're working on it! If privacy is a concern of yours, please write a short note to let us know." - ); - setShowFeedbackDialog(true); - } - setUsingFastModel((prev) => !prev); + // Show the dialog + setFeedbackDialogMessage( + `Continue uses GPT-4 by default, but works with any model. If you'd like to keep your code completely private, there are few options: + +Run a local model with ggml: [5 minute quickstart](https://github.com/continuedev/ggml-server-example) + +Use Azure OpenAI service, which is GDPR and HIPAA compliant: [Tutorial](https://continue.dev/docs/customization#azure-openai-service) + +If you already have an LLM deployed on your own infrastructure, or would like to do so, please contact us at hi@continue.dev. + ` + ); + setFeedbackEntryOn(false); + setShowFeedbackDialog(true); }} - text={usingFastModel ? "local" : "gpt-4"} + text={"Use Private Model"} > <div style={{ fontSize: "18px", marginLeft: "2px", marginRight: "2px" }} > - {usingFastModel ? "🔒" : "🧠"} + 🔒 </div> </HeaderButtonWithText> <HeaderButtonWithText @@ -473,6 +472,7 @@ function GUI(props: GUIProps) { setFeedbackDialogMessage( "Having trouble using Continue? Want a new feature? Let us know! This box is anonymous, but we will promptly address your feedback." ); + setFeedbackEntryOn(true); setShowFeedbackDialog(true); }} text="Feedback" |