From 39cd2ef27d6ed439b00a9edec4a487343ff1c2c9 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 14 Jul 2023 03:24:46 -0700 Subject: warn of large highlighted ranges, cmd+k->m --- extension/react-app/src/pages/gui.tsx | 27 +++++++++++++-------------- 1 file changed, 13 insertions(+), 14 deletions(-) (limited to 'extension/react-app/src/pages') diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index 4ff260fa..57cebac3 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -95,11 +95,8 @@ function GUI(props: GUIProps) { name: "Welcome to Continue", hide: false, description: `- Highlight code and ask a question or give instructions -- Use \`cmd+k\` (Mac) / \`ctrl+k\` (Windows) to open Continue -- Use \`cmd+shift+e\` / \`ctrl+shift+e\` to open file Explorer -- Add your own OpenAI API key to VS Code Settings with \`cmd+,\` -- Use slash commands when you want fine-grained control -- Past steps are included as part of the context by default`, + - Use \`cmd+m\` (Mac) / \`ctrl+m\` (Windows) to open Continue + - Use \`/help\` to ask questions about how to use Continue`, system_message: null, chat_context: [], manage_own_chat_context: false, @@ -269,15 +266,17 @@ function GUI(props: GUIProps) { return ( <> - { - client?.sendMainInput(`/feedback ${text}`); - setShowFeedbackDialog(false); - }} - onClose={() => { - setShowFeedbackDialog(false); - }} - message={feedbackDialogMessage} /> + { + client?.sendMainInput(`/feedback ${text}`); + setShowFeedbackDialog(false); + }} + onClose={() => { + setShowFeedbackDialog(false); + }} + message={feedbackDialogMessage} + /> Date: Sat, 15 Jul 2023 14:30:11 -0700 Subject: ctrl shortcuts on windows, load models immediately --- continuedev/src/continuedev/core/autopilot.py | 10 ++-- continuedev/src/continuedev/core/sdk.py | 59 +++++++++++++++------- .../src/continuedev/libs/llm/hf_inference_api.py | 6 ++- continuedev/src/continuedev/server/ide.py | 4 +- .../src/continuedev/server/session_manager.py | 6 +-- .../react-app/src/components/StepContainer.tsx | 2 +- extension/react-app/src/components/TextDialog.tsx | 6 ++- extension/react-app/src/pages/gui.tsx | 6 +-- extension/react-app/src/util/index.ts | 30 +++++++++++ 9 files changed, 98 insertions(+), 31 deletions(-) create mode 100644 extension/react-app/src/util/index.ts (limited to 'extension/react-app/src/pages') diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 82439f49..0696c360 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -50,6 +50,8 @@ class Autopilot(ContinueBaseModel): full_state: Union[FullState, None] = None _on_update_callbacks: List[Callable[[FullState], None]] = [] + continue_sdk: ContinueSDK = None + _active: bool = False _should_halt: bool = False _main_user_input_queue: List[str] = [] @@ -57,9 +59,11 @@ class Autopilot(ContinueBaseModel): _user_input_queue = AsyncSubscriptionQueue() _retry_queue = AsyncSubscriptionQueue() - @cached_property - def continue_sdk(self) -> ContinueSDK: - return ContinueSDK(self) + @classmethod + async def create(cls, policy: Policy, ide: AbstractIdeProtocolServer, full_state: FullState) -> "Autopilot": + autopilot = cls(ide=ide, policy=policy) + autopilot.continue_sdk = await ContinueSDK.create(autopilot) + return autopilot class Config: arbitrary_types_allowed = True diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index aa2d8892..d73561d2 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -1,6 +1,6 @@ import asyncio from functools import cached_property -from typing import Coroutine, Union +from typing import Coroutine, Dict, Union import os from ..steps.core.core import DefaultModelEditCodeStep @@ -13,7 +13,7 @@ from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI from ..libs.llm.openai import OpenAI from .observation import Observation from ..server.ide_protocol import AbstractIdeProtocolServer -from .main import Context, ContinueCustomException, HighlightedRangeContext, History, Step, ChatMessage, ChatMessageRole +from .main import Context, ContinueCustomException, History, Step, ChatMessage from ..steps.core.core import * from ..libs.llm.proxy_server import ProxyServer @@ -22,26 +22,46 @@ class Autopilot: pass +ModelProvider = Literal["openai", "hf_inference_api", "ggml", "anthropic"] +MODEL_PROVIDER_TO_ENV_VAR = { + "openai": "OPENAI_API_KEY", + "hf_inference_api": "HUGGING_FACE_TOKEN", + "anthropic": "ANTHROPIC_API_KEY" +} + + class Models: - def __init__(self, sdk: "ContinueSDK"): + provider_keys: Dict[ModelProvider, str] = {} + model_providers: List[ModelProvider] + + def __init__(self, sdk: "ContinueSDK", model_providers: List[ModelProvider]): self.sdk = sdk + self.model_providers = model_providers + + @classmethod + async def create(cls, sdk: "ContinueSDK", with_providers: List[ModelProvider] = ["openai"]) -> "Models": + models = Models(sdk, with_providers) + for provider in with_providers: + if provider in MODEL_PROVIDER_TO_ENV_VAR: + env_var = MODEL_PROVIDER_TO_ENV_VAR[provider] + models.provider_keys[provider] = await sdk.get_user_secret( + env_var, f'Please add your {env_var} to the .env file') + + return models def __load_openai_model(self, model: str) -> OpenAI: - async def load_openai_model(): - api_key = await self.sdk.get_user_secret( - 'OPENAI_API_KEY', 'Enter your OpenAI API key or press enter to try for free') - if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, model) - return OpenAI(api_key=api_key, default_model=model) - return asyncio.get_event_loop().run_until_complete(load_openai_model()) + api_key = self.provider_keys["openai"] + if api_key == "": + return ProxyServer(self.sdk.ide.unique_id, model) + return OpenAI(api_key=api_key, default_model=model) + + def __load_hf_inference_api_model(self, model: str) -> HuggingFaceInferenceAPI: + api_key = self.provider_keys["hf_inference_api"] + return HuggingFaceInferenceAPI(api_key=api_key, model=model) @cached_property def starcoder(self): - async def load_starcoder(): - api_key = await self.sdk.get_user_secret( - 'HUGGING_FACE_TOKEN', 'Please add your Hugging Face token to the .env file') - return HuggingFaceInferenceAPI(api_key=api_key) - return asyncio.get_event_loop().run_until_complete(load_starcoder()) + return self.__load_hf_inference_api_model("bigcode/starcoder") @cached_property def gpt35(self): @@ -74,7 +94,7 @@ class Models: @property def default(self): default_model = self.sdk.config.default_model - return self.__model_from_name(default_model) if default_model is not None else self.gpt35 + return self.__model_from_name(default_model) if default_model is not None else self.gpt4 class ContinueSDK(AbstractContinueSDK): @@ -87,10 +107,15 @@ class ContinueSDK(AbstractContinueSDK): def __init__(self, autopilot: Autopilot): self.ide = autopilot.ide self.__autopilot = autopilot - self.models = Models(self) self.context = autopilot.context self.config = self._load_config() + @classmethod + async def create(cls, autopilot: Autopilot) -> "ContinueSDK": + sdk = ContinueSDK(autopilot) + sdk.models = await Models.create(sdk) + return sdk + config: ContinueConfig def _load_config(self) -> ContinueConfig: diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py index 1586c620..803ba122 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py @@ -9,7 +9,11 @@ DEFAULT_MAX_TIME = 120. class HuggingFaceInferenceAPI(LLM): api_key: str - model: str = "bigcode/starcoder" + model: str + + def __init__(self, api_key: str, model: str): + self.api_key = api_key + self.model = model def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): """Return the completion of the text with the given temperature.""" diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 7875c94d..77b13483 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -227,8 +227,8 @@ class IdeProtocolServer(AbstractIdeProtocolServer): }) async def getSessionId(self): - session_id = self.session_manager.new_session( - self, self.session_id).session_id + session_id = (await self.session_manager.new_session( + self, self.session_id)).session_id await self._send_json("getSessionId", { "sessionId": session_id }) diff --git a/continuedev/src/continuedev/server/session_manager.py b/continuedev/src/continuedev/server/session_manager.py index fb8ac386..6d109ca6 100644 --- a/continuedev/src/continuedev/server/session_manager.py +++ b/continuedev/src/continuedev/server/session_manager.py @@ -53,18 +53,18 @@ class SessionManager: session_files = os.listdir(sessions_folder) if f"{session_id}.json" in session_files and session_id in self.registered_ides: if self.registered_ides[session_id].session_id is not None: - return self.new_session(self.registered_ides[session_id], session_id=session_id) + return await self.new_session(self.registered_ides[session_id], session_id=session_id) raise KeyError("Session ID not recognized", session_id) return self.sessions[session_id] - def new_session(self, ide: AbstractIdeProtocolServer, session_id: Union[str, None] = None) -> Session: + async def new_session(self, ide: AbstractIdeProtocolServer, session_id: Union[str, None] = None) -> Session: full_state = None if session_id is not None and os.path.exists(getSessionFilePath(session_id)): with open(getSessionFilePath(session_id), "r") as f: full_state = FullState(**json.load(f)) - autopilot = DemoAutopilot( + autopilot = await DemoAutopilot.create( policy=DemoPolicy(), ide=ide, full_state=full_state) session_id = session_id or str(uuid4()) ide.session_id = session_id diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 14e9b854..7f23e333 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -181,7 +181,7 @@ function StepContainer(props: StepContainerProps) { } className="overflow-hidden cursor-pointer" onClick={(e) => { - if (e.metaKey) { + if (isMetaEquivalentKeyPressed(e)) { props.onToggleAll(); } else { props.onToggle(); diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index ea5727f0..c724697d 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -81,7 +81,11 @@ const TextDialog = (props: { rows={10} ref={textAreaRef} onKeyDown={(e) => { - if (e.key === "Enter" && e.metaKey && textAreaRef.current) { + if ( + e.key === "Enter" && + isMetaEquivalentKeyPressed(e) && + textAreaRef.current + ) { props.onEnter(textAreaRef.current.value); setText(""); } else if (e.key === "Escape") { diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index 57cebac3..cb0404ab 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -137,12 +137,12 @@ function GUI(props: GUIProps) { useEffect(() => { const listener = (e: any) => { // Cmd + i to toggle fast model - if (e.key === "i" && e.metaKey && e.shiftKey) { + if (e.key === "i" && isMetaEquivalentKeyPressed(e) && e.shiftKey) { setUsingFastModel((prev) => !prev); // Cmd + backspace to stop currently running step } else if ( e.key === "Backspace" && - e.metaKey && + isMetaEquivalentKeyPressed(e) && typeof history?.current_index !== "undefined" && history.timeline[history.current_index]?.active ) { @@ -220,7 +220,7 @@ function GUI(props: GUIProps) { if (mainTextInputRef.current) { let input = (mainTextInputRef.current as any).inputValue; // cmd+enter to /edit - if (event?.metaKey) { + if (isMetaEquivalentKeyPressed(event)) { input = `/edit ${input}`; } (mainTextInputRef.current as any).setInputValue(""); diff --git a/extension/react-app/src/util/index.ts b/extension/react-app/src/util/index.ts new file mode 100644 index 00000000..ad711321 --- /dev/null +++ b/extension/react-app/src/util/index.ts @@ -0,0 +1,30 @@ +type Platform = "mac" | "linux" | "windows" | "unknown"; + +function getPlatform(): Platform { + const platform = window.navigator.platform.toUpperCase(); + if (platform.indexOf("MAC") >= 0) { + return "mac"; + } else if (platform.indexOf("LINUX") >= 0) { + return "linux"; + } else if (platform.indexOf("WIN") >= 0) { + return "windows"; + } else { + return "unknown"; + } +} + +function isMetaEquivalentKeyPressed(event: { + metaKey: boolean; + ctrlKey: boolean; +}): boolean { + const platform = getPlatform(); + switch (platform) { + case "mac": + return event.metaKey; + case "linux": + case "windows": + return event.ctrlKey; + default: + return event.metaKey; + } +} -- cgit v1.2.3-70-g09d2 From 3a39b979c55b005d9bb18b88b43ca7293ee5410d Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sat, 15 Jul 2023 16:32:56 -0700 Subject: patch --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/react-app/src/components/TextDialog.tsx | 1 + extension/react-app/src/pages/gui.tsx | 1 + extension/src/activation/environmentSetup.ts | 19 ++++++++----------- 5 files changed, 13 insertions(+), 14 deletions(-) (limited to 'extension/react-app/src/pages') diff --git a/extension/package-lock.json b/extension/package-lock.json index 0edd4885..e77bfac2 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.166", + "version": "0.0.167", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.166", + "version": "0.0.167", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 7cd7b793..bbd18b12 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.166", + "version": "0.0.167", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index c724697d..646d6846 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -2,6 +2,7 @@ import React, { useEffect, useState } from "react"; import styled from "styled-components"; import { Button, buttonColor, secondaryDark, vscBackground } from "."; +import { isMetaEquivalentKeyPressed } from "../util"; const ScreenCover = styled.div` position: absolute; diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index cb0404ab..64207487 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -23,6 +23,7 @@ import { RootStore } from "../redux/store"; import { postVscMessage } from "../vscode"; import UserInputContainer from "../components/UserInputContainer"; import Onboarding from "../components/Onboarding"; +import { isMetaEquivalentKeyPressed } from "../util"; const TopGUIDiv = styled.div` overflow: hidden; diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts index 374c38c0..6a66532e 100644 --- a/extension/src/activation/environmentSetup.ts +++ b/extension/src/activation/environmentSetup.ts @@ -11,7 +11,8 @@ import * as os from "os"; import fkill from "fkill"; import { sendTelemetryEvent, TelemetryEvent } from "../telemetry"; -const WINDOWS_REMOTE_SIGNED_SCRIPTS_ERROR = "A Python virtual enviroment cannot be activated because running scripts is disabled for this user. Please enable signed scripts to run with this command in PowerShell: `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`, reload VS Code, and then try again."; +const WINDOWS_REMOTE_SIGNED_SCRIPTS_ERROR = + "A Python virtual enviroment cannot be activated because running scripts is disabled for this user. In order to use Continue, please enable signed scripts to run with this command in PowerShell: `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`, reload VS Code, and then try again."; const MAX_RETRIES = 3; async function retryThenFail( @@ -26,7 +27,8 @@ async function retryThenFail( } // Show corresponding error message depending on the platform - let msg = "Failed to set up Continue extension. Please email nate@continue.dev and we'll get this fixed ASAP!"; + let msg = + "Failed to set up Continue extension. Please email nate@continue.dev and we'll get this fixed ASAP!"; try { switch (process.platform) { case "win32": @@ -35,14 +37,14 @@ async function retryThenFail( case "darwin": break; case "linux": - const [pythonCmd,] = await getPythonPipCommands(); + const [pythonCmd] = await getPythonPipCommands(); msg = await getLinuxAptInstallError(pythonCmd); break; } } finally { vscode.window.showErrorMessage(msg); } - + sendTelemetryEvent(TelemetryEvent.ExtensionSetupError, { error: e.message, }); @@ -216,10 +218,7 @@ async function getLinuxAptInstallError(pythonCmd: string) { const version = stdout.split(" ")[1].split(".")[1]; const installVenvCommand = `apt-get install python3.${version}-venv`; await runCommand("apt-get update"); - // Ask the user to run the command to install python3-venv (requires sudo, so we can't) - // First, get the python version - const msg = `[Important] Continue needs to create a Python virtual environment, but python3.${version}-venv is not installed. Please run this command in your terminal: \`${installVenvCommand}\`, reload VS Code, and then try again.`; - return msg; + return `[Important] Continue needs to create a Python virtual environment, but python3.${version}-venv is not installed. Please run this command in your terminal: \`${installVenvCommand}\`, reload VS Code, and then try again.`; } async function setupPythonEnv() { @@ -246,9 +245,7 @@ async function setupPythonEnv() { stderr && stderr.includes("running scripts is disabled on this system") ) { - await vscode.window.showErrorMessage( - WINDOWS_REMOTE_SIGNED_SCRIPTS_ERROR - ); + await vscode.window.showErrorMessage(WINDOWS_REMOTE_SIGNED_SCRIPTS_ERROR); throw new Error(stderr); } else if ( stderr?.includes("On Debian/Ubuntu systems") || -- cgit v1.2.3-70-g09d2 From 08221a0879b4a163eab6860524f255dbcb4743ae Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Mon, 17 Jul 2023 12:05:03 -0700 Subject: match vscode color theme --- continuedev/src/continuedev/libs/util/dedent.py | 25 ----------- continuedev/src/continuedev/libs/util/strings.py | 49 ++++++++++++++++++++++ continuedev/src/continuedev/steps/chat.py | 8 ++-- continuedev/src/continuedev/steps/core/core.py | 17 ++------ extension/package-lock.json | 4 +- extension/package.json | 2 +- extension/react-app/src/components/ComboBox.tsx | 10 +++-- .../react-app/src/components/InputAndButton.tsx | 10 ++--- extension/react-app/src/components/PillButton.tsx | 9 +++- .../react-app/src/components/StepContainer.tsx | 12 ++++-- extension/react-app/src/components/TextDialog.tsx | 14 ++++--- extension/react-app/src/components/index.ts | 23 ++++++---- extension/react-app/src/index.css | 4 +- extension/react-app/src/pages/gui.tsx | 11 +++-- 14 files changed, 119 insertions(+), 79 deletions(-) delete mode 100644 continuedev/src/continuedev/libs/util/dedent.py create mode 100644 continuedev/src/continuedev/libs/util/strings.py (limited to 'extension/react-app/src/pages') diff --git a/continuedev/src/continuedev/libs/util/dedent.py b/continuedev/src/continuedev/libs/util/dedent.py deleted file mode 100644 index e59c2e97..00000000 --- a/continuedev/src/continuedev/libs/util/dedent.py +++ /dev/null @@ -1,25 +0,0 @@ -from typing import Tuple - - -def dedent_and_get_common_whitespace(s: str) -> Tuple[str, str]: - lines = s.splitlines() - if len(lines) == 0: - return "", "" - - # Longest common whitespace prefix - lcp = lines[0].split(lines[0].strip())[0] - # Iterate through the lines - for i in range(1, len(lines)): - # Empty lines are wildcards - if lines[i].strip() == "": - continue - # Iterate through the leading whitespace characters of the current line - for j in range(0, len(lcp)): - # If it doesn't have the same whitespace as lcp, then update lcp - if j >= len(lines[i]) or lcp[j] != lines[i][j]: - lcp = lcp[:j] - if lcp == "": - return s, "" - break - - return "\n".join(map(lambda x: x.lstrip(lcp), lines)), lcp diff --git a/continuedev/src/continuedev/libs/util/strings.py b/continuedev/src/continuedev/libs/util/strings.py new file mode 100644 index 00000000..f1fb8d0b --- /dev/null +++ b/continuedev/src/continuedev/libs/util/strings.py @@ -0,0 +1,49 @@ +from typing import Tuple + + +def dedent_and_get_common_whitespace(s: str) -> Tuple[str, str]: + lines = s.splitlines() + if len(lines) == 0: + return "", "" + + # Longest common whitespace prefix + lcp = lines[0].split(lines[0].strip())[0] + # Iterate through the lines + for i in range(1, len(lines)): + # Empty lines are wildcards + if lines[i].strip() == "": + continue + # Iterate through the leading whitespace characters of the current line + for j in range(0, len(lcp)): + # If it doesn't have the same whitespace as lcp, then update lcp + if j >= len(lines[i]) or lcp[j] != lines[i][j]: + lcp = lcp[:j] + if lcp == "": + return s, "" + break + + return "\n".join(map(lambda x: x.lstrip(lcp), lines)), lcp + + +def remove_quotes_and_escapes(output: str) -> str: + """ + Clean up the output of the completion API, removing unnecessary escapes and quotes + """ + output = output.strip() + + # Replace smart quotes + output = output.replace("“", '"') + output = output.replace("”", '"') + output = output.replace("‘", "'") + output = output.replace("’", "'") + + # Remove escapes + output = output.replace('\\"', '"') + output = output.replace("\\'", "'") + output = output.replace("\\n", "\n") + output = output.replace("\\t", "\t") + output = output.replace("\\\\", "\\") + if (output.startswith('"') and output.endswith('"')) or (output.startswith("'") and output.endswith("'")): + output = output[1:-1] + + return output diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 3751dec2..7c6b42db 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -3,6 +3,7 @@ from typing import Any, Coroutine, List from pydantic import Field +from ..libs.util.strings import remove_quotes_and_escapes from .main import EditHighlightedCodeStep from .core.core import MessageStep from ..core.main import FunctionCall, Models @@ -43,11 +44,8 @@ class SimpleChatStep(Step): finally: await generator.aclose() - self.name = (await sdk.models.gpt35.complete( - f"Write a short title for the following chat message: {self.description}")).strip() - - if self.name.startswith('"') and self.name.endswith('"'): - self.name = self.name[1:-1] + self.name = remove_quotes_and_escapes(await sdk.models.gpt35.complete( + f"Write a short title for the following chat message: {self.description}")) self.chat_context.append(ChatMessage( role="assistant", diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index d5a7cd9a..5b9b9fd5 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -12,7 +12,7 @@ from ...models.filesystem import FileSystem, RangeInFile, RangeInFileWithContent from ...core.observation import Observation, TextObservation, TracebackObservation, UserInputObservation from ...core.main import ChatMessage, ContinueCustomException, Step, SequentialStep from ...libs.util.count_tokens import MAX_TOKENS_FOR_MODEL, DEFAULT_MAX_TOKENS -from ...libs.util.dedent import dedent_and_get_common_whitespace +from ...libs.util.strings import dedent_and_get_common_whitespace, remove_quotes_and_escapes import difflib @@ -157,17 +157,6 @@ class DefaultModelEditCodeStep(Step): _new_contents: str = "" _prompt_and_completion: str = "" - def _cleanup_output(self, output: str) -> str: - output = output.replace('\\"', '"') - output = output.replace("\\'", "'") - output = output.replace("\\n", "\n") - output = output.replace("\\t", "\t") - output = output.replace("\\\\", "\\") - if output.startswith('"') and output.endswith('"'): - output = output[1:-1] - - return output - async def describe(self, models: Models) -> Coroutine[str, None, None]: if self._previous_contents.strip() == self._new_contents.strip(): description = "No edits were made" @@ -183,9 +172,9 @@ class DefaultModelEditCodeStep(Step): Please give brief a description of the changes made above using markdown bullet points. Be concise:""")) name = await models.gpt3516k.complete(f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:") - self.name = self._cleanup_output(name) + self.name = remove_quotes_and_escapes(name) - return f"{self._cleanup_output(description)}" + return f"{remove_quotes_and_escapes(description)}" async def get_prompt_parts(self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str): # We don't know here all of the functions being passed in. diff --git a/extension/package-lock.json b/extension/package-lock.json index 33f81dec..0e0125b0 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.176", + "version": "0.0.177", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.176", + "version": "0.0.177", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index e515ed36..8462bf68 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.176", + "version": "0.0.177", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index dbebd534..0ea8a3e1 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -6,6 +6,7 @@ import { lightGray, secondaryDark, vscBackground, + vscForeground, } from "."; import CodeBlock from "./CodeBlock"; import PillButton from "./PillButton"; @@ -48,7 +49,7 @@ const MainTextInput = styled.textarea` height: auto; width: 100%; background-color: ${secondaryDark}; - color: white; + color: ${vscForeground}; z-index: 1; border: 1px solid transparent; @@ -71,7 +72,7 @@ const Ul = styled.ul<{ position: absolute; background: ${vscBackground}; background-color: ${secondaryDark}; - color: white; + color: ${vscForeground}; max-height: ${UlMaxHeight}px; width: calc(100% - 16px); overflow-y: scroll; @@ -95,6 +96,7 @@ const Li = styled.li<{ selected: boolean; isLastItem: boolean; }>` + background-color: ${secondaryDark}; ${({ highlighted }) => highlighted && "background: #ff000066;"} ${({ selected }) => selected && "font-weight: bold;"} padding: 0.5rem 0.75rem; @@ -218,7 +220,9 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { ? "Editing such a large range may be slow" : undefined } - onlyShowDelete={highlightedCodeSections.length <= 1} + onlyShowDelete={ + highlightedCodeSections.length <= 1 || section.editing + } editing={section.editing} pinned={section.pinned} index={idx} diff --git a/extension/react-app/src/components/InputAndButton.tsx b/extension/react-app/src/components/InputAndButton.tsx index 0a8592f2..8019d014 100644 --- a/extension/react-app/src/components/InputAndButton.tsx +++ b/extension/react-app/src/components/InputAndButton.tsx @@ -1,6 +1,6 @@ import React, { useRef } from "react"; import styled from "styled-components"; -import { vscBackground } from "."; +import { vscBackground, vscForeground } from "."; interface InputAndButtonProps { onUserInput: (input: string) => void; @@ -16,7 +16,7 @@ const Input = styled.input` padding: 0.5rem; border: 1px solid white; background-color: ${vscBackground}; - color: white; + color: ${vscForeground}; border-radius: 4px; border-top-right-radius: 0; border-bottom-right-radius: 0; @@ -27,7 +27,7 @@ const Button = styled.button` padding: 0.5rem; border: 1px solid white; background-color: ${vscBackground}; - color: white; + color: ${vscForeground}; border-radius: 4px; border-top-left-radius: 0; border-bottom-left-radius: 0; @@ -35,8 +35,8 @@ const Button = styled.button` cursor: pointer; &:hover { - background-color: white; - color: black; + background-color: ${vscForeground}; + color: ${vscBackground}; } `; diff --git a/extension/react-app/src/components/PillButton.tsx b/extension/react-app/src/components/PillButton.tsx index 5a16516e..eba5cf8f 100644 --- a/extension/react-app/src/components/PillButton.tsx +++ b/extension/react-app/src/components/PillButton.tsx @@ -1,6 +1,11 @@ import { useContext, useState } from "react"; import styled from "styled-components"; -import { StyledTooltip, defaultBorderRadius, secondaryDark } from "."; +import { + StyledTooltip, + defaultBorderRadius, + secondaryDark, + vscForeground, +} from "."; import { Trash, PaintBrush, @@ -10,7 +15,7 @@ import { GUIClientContext } from "../App"; const Button = styled.button` border: none; - color: white; + color: ${vscForeground}; background-color: ${secondaryDark}; border-radius: ${defaultBorderRadius}; padding: 8px; diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 93bdbc89..26bc8e33 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -6,6 +6,7 @@ import { secondaryDark, vscBackground, vscBackgroundTransparent, + vscForeground, } from "."; import { ChevronDown, @@ -120,20 +121,22 @@ const StyledMarkdownPreview = styled(MarkdownPreview)` } code { - color: #f69292; + color: #f78383; word-wrap: break-word; + border-radius: ${defaultBorderRadius}; + background-color: ${secondaryDark}; } pre > code { background-color: ${secondaryDark}; - color: white; + color: ${vscForeground}; } background-color: ${vscBackground}; font-family: "Lexend", sans-serif; font-size: 13px; padding: 8px; - color: white; + color: ${vscForeground}; `; // #endregion @@ -267,6 +270,9 @@ function StepContainer(props: StepContainerProps) { ) : ( )} diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index 646d6846..cba3852d 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -1,7 +1,7 @@ // Write a component that displays a dialog box with a text field and a button. import React, { useEffect, useState } from "react"; import styled from "styled-components"; -import { Button, buttonColor, secondaryDark, vscBackground } from "."; +import { Button, secondaryDark, vscBackground, vscForeground } from "."; import { isMetaEquivalentKeyPressed } from "../util"; const ScreenCover = styled.div` @@ -21,13 +21,13 @@ const DialogContainer = styled.div` `; const Dialog = styled.div` - background-color: white; + color: ${vscForeground}; + background-color: ${vscBackground}; border-radius: 8px; padding: 8px; display: flex; flex-direction: column; - /* box-shadow: 0 0 10px 0 rgba(255, 255, 255, 0.5); */ - border: 2px solid ${buttonColor}; + box-shadow: 0 0 10px 0 ${vscForeground}; width: fit-content; margin: auto; `; @@ -38,14 +38,16 @@ const TextArea = styled.textarea` padding: 8px; outline: 1px solid black; resize: none; + background-color: ${secondaryDark}; + color: ${vscForeground}; &:focus { - outline: 1px solid ${buttonColor}; + outline: 1px solid ${vscForeground}; } `; const P = styled.p` - color: black; + color: ${vscForeground}; margin: 8px auto; `; diff --git a/extension/react-app/src/components/index.ts b/extension/react-app/src/components/index.ts index 9ae0f097..cb5e7915 100644 --- a/extension/react-app/src/components/index.ts +++ b/extension/react-app/src/components/index.ts @@ -3,12 +3,16 @@ import styled, { keyframes } from "styled-components"; export const defaultBorderRadius = "5px"; export const lightGray = "rgb(100 100 100)"; -export const secondaryDark = "rgb(45 45 45)"; -export const vscBackground = "rgb(30 30 30)"; +// export const secondaryDark = "rgb(45 45 45)"; +// export const vscBackground = "rgb(30 30 30)"; export const vscBackgroundTransparent = "#1e1e1ede"; export const buttonColor = "rgb(113 28 59)"; export const buttonColorHover = "rgb(113 28 59 0.67)"; +export const secondaryDark = "var(--vscode-textBlockQuote-background)"; +export const vscBackground = "var(--vscode-editor-background)"; +export const vscForeground = "var(--vscode-editor-foreground)"; + export const Button = styled.button` padding: 10px 12px; margin: 8px 0; @@ -46,8 +50,8 @@ export const TextArea = styled.textarea` resize: vertical; padding: 4px; - caret-color: white; - color: white; + caret-color: ${vscForeground}; + color: #{vscForeground}; &:focus { outline: 1px solid ${buttonColor}; @@ -120,7 +124,7 @@ export const MainTextInput = styled.textarea` border: 1px solid #ccc; margin: 8px 8px; background-color: ${vscBackground}; - color: white; + color: ${vscForeground}; outline: 1px solid orange; resize: none; `; @@ -137,8 +141,9 @@ export const appear = keyframes` `; export const HeaderButton = styled.button<{ inverted: boolean | undefined }>` - background-color: ${({ inverted }) => (inverted ? "white" : "transparent")}; - color: ${({ inverted }) => (inverted ? "black" : "white")}; + background-color: ${({ inverted }) => + inverted ? vscForeground : "transparent"}; + color: ${({ inverted }) => (inverted ? vscBackground : vscForeground)}; border: none; border-radius: ${defaultBorderRadius}; @@ -146,7 +151,9 @@ export const HeaderButton = styled.button<{ inverted: boolean | undefined }>` &:hover { background-color: ${({ inverted }) => - typeof inverted === "undefined" || inverted ? lightGray : "transparent"}; + typeof inverted === "undefined" || inverted + ? secondaryDark + : "transparent"}; } display: flex; align-items: center; diff --git a/extension/react-app/src/index.css b/extension/react-app/src/index.css index 6e33c89c..bac7fe97 100644 --- a/extension/react-app/src/index.css +++ b/extension/react-app/src/index.css @@ -14,13 +14,13 @@ html, body, #root { height: 100%; - background-color: var(--vsc-background); + background-color: var(--vscode-editor-background); font-family: "Lexend", sans-serif; } body { padding: 0; - color: white; + color: var(--vscode-editor-foreground); padding: 0px; margin: 0px; height: 100%; diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index 64207487..c35cf21b 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -1,5 +1,9 @@ import styled from "styled-components"; -import { defaultBorderRadius } from "../components"; +import { + defaultBorderRadius, + vscBackground, + vscForeground, +} from "../components"; import Loader from "../components/Loader"; import ContinueButton from "../components/ContinueButton"; import { FullState, HighlightedRangeContext } from "../../../schema/FullState"; @@ -371,12 +375,13 @@ function GUI(props: GUIProps) { style={{ position: "fixed", bottom: "50px", - backgroundColor: "white", - color: "black", + backgroundColor: vscBackground, + color: vscForeground, borderRadius: defaultBorderRadius, padding: "16px", margin: "16px", zIndex: 100, + boxShadow: `0px 0px 10px 0px ${vscForeground}`, }} hidden={!showDataSharingInfo} > -- cgit v1.2.3-70-g09d2 From dc64c73adb8c8a2aeb3210bc9f4ff1bd82c03de2 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Mon, 17 Jul 2023 21:09:30 -0700 Subject: show exact prompt/completion logs --- continuedev/src/continuedev/core/main.py | 1 + continuedev/src/continuedev/core/sdk.py | 28 +++++++++++-- continuedev/src/continuedev/libs/llm/openai.py | 47 +++++++++++++++++----- .../src/continuedev/libs/llm/proxy_server.py | 33 +++++++++++---- .../src/continuedev/libs/util/count_tokens.py | 7 ++++ continuedev/src/continuedev/server/gui.py | 9 +++++ continuedev/src/continuedev/server/ide.py | 14 ++++++- continuedev/src/continuedev/server/ide_protocol.py | 4 ++ .../src/continuedev/server/session_manager.py | 2 +- extension/package-lock.json | 4 +- extension/package.json | 2 +- .../react-app/src/components/StepContainer.tsx | 17 +++++++- .../src/hooks/ContinueGUIClientProtocol.ts | 2 + .../react-app/src/hooks/useContinueGUIProtocol.ts | 4 ++ extension/react-app/src/pages/gui.tsx | 1 + extension/src/continueIdeClient.ts | 38 +++++++++++++++++ 16 files changed, 185 insertions(+), 28 deletions(-) (limited to 'extension/react-app/src/pages') diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index 88690c83..5931d978 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -102,6 +102,7 @@ class HistoryNode(ContinueBaseModel): depth: int deleted: bool = False active: bool = True + logs: List[str] = [] def to_chat_messages(self) -> List[ChatMessage]: if self.step.description is None or self.step.manage_own_chat_context: diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 280fefa8..53214384 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -37,6 +37,25 @@ class Models: model_providers: List[ModelProvider] system_message: str + """ + Better to have sdk.llm.stream_chat(messages, model="claude-2"). + Then you also don't care that it' async. + And it's easier to add more models. + And intermediate shared code is easier to add. + And you can make constants like ContinueModels.GPT35 = "gpt-3.5-turbo" + PromptTransformer would be a good concept: You pass a prompt or list of messages and a model, then it outputs the prompt for that model. + Easy to reason about, can place anywhere. + And you can even pass a Prompt object to sdk.llm.stream_chat maybe, and it'll automatically be transformed for the given model. + This can all happen inside of Models? + + class Prompt: + def __init__(self, ...info): + '''take whatever info is needed to describe the prompt''' + + def to_string(self, model: str) -> str: + '''depending on the model, return the single prompt string''' + """ + def __init__(self, sdk: "ContinueSDK", model_providers: List[ModelProvider]): self.sdk = sdk self.model_providers = model_providers @@ -59,8 +78,8 @@ class Models: def __load_openai_model(self, model: str) -> OpenAI: api_key = self.provider_keys["openai"] if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, model, system_message=self.system_message) - return OpenAI(api_key=api_key, default_model=model, system_message=self.system_message, azure_info=self.sdk.config.azure_openai_info) + return ProxyServer(self.sdk.ide.unique_id, model, system_message=self.system_message, write_log=self.sdk.write_log) + return OpenAI(api_key=api_key, default_model=model, system_message=self.system_message, azure_info=self.sdk.config.azure_openai_info, write_log=self.sdk.write_log) def __load_hf_inference_api_model(self, model: str) -> HuggingFaceInferenceAPI: api_key = self.provider_keys["hf_inference_api"] @@ -156,6 +175,9 @@ class ContinueSDK(AbstractContinueSDK): def history(self) -> History: return self.__autopilot.history + def write_log(self, message: str): + self.history.timeline[self.history.current_index].logs.append(message) + async def _ensure_absolute_path(self, path: str) -> str: if os.path.isabs(path): return path @@ -263,7 +285,7 @@ class ContinueSDK(AbstractContinueSDK): for rif in highlighted_code: msg = ChatMessage(content=f"{preface} ({rif.filepath}):\n```\n{rif.contents}\n```", - role="system", summary=f"{preface}: {rif.filepath}") + role="user", summary=f"{preface}: {rif.filepath}") # Don't insert after latest user message or function call i = -1 diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 33d10985..64bb39a2 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -1,10 +1,11 @@ from functools import cached_property -from typing import Any, Coroutine, Dict, Generator, List, Union +import json +from typing import Any, Callable, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage import openai from ..llm import LLM -from ..util.count_tokens import compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top +from ..util.count_tokens import compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, format_chat_messages, prune_raw_prompt_from_top from ...core.config import AzureInfo @@ -12,11 +13,12 @@ class OpenAI(LLM): api_key: str default_model: str - def __init__(self, api_key: str, default_model: str, system_message: str = None, azure_info: AzureInfo = None): + def __init__(self, api_key: str, default_model: str, system_message: str = None, azure_info: AzureInfo = None, write_log: Callable[[str], None] = None): self.api_key = api_key self.default_model = default_model self.system_message = system_message self.azure_info = azure_info + self.write_log = write_log openai.api_key = api_key @@ -46,18 +48,29 @@ class OpenAI(LLM): args["stream"] = True if args["model"] in CHAT_MODELS: + messages = compile_chat_messages( + args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") + completion = "" async for chunk in await openai.ChatCompletion.acreate( - messages=compile_chat_messages( - args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), + messages=messages, **args, ): if "content" in chunk.choices[0].delta: yield chunk.choices[0].delta.content + completion += chunk.choices[0].delta.content else: continue + + self.write_log(f"Completion: \n\n{completion}") else: + self.write_log(f"Prompt:\n\n{prompt}") + completion = "" async for chunk in await openai.Completion.acreate(prompt=prompt, **args): yield chunk.choices[0].text + completion += chunk.choices[0].text + + self.write_log(f"Completion:\n\n{completion}") async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() @@ -67,27 +80,39 @@ class OpenAI(LLM): if not args["model"].endswith("0613") and "functions" in args: del args["functions"] + messages = compile_chat_messages( + args["model"], messages, args["max_tokens"], functions=args.get("functions", None), system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") + completion = "" async for chunk in await openai.ChatCompletion.acreate( - messages=compile_chat_messages( - args["model"], messages, args["max_tokens"], functions=args.get("functions", None), system_message=self.system_message), + messages=messages, **args, ): yield chunk.choices[0].delta + if "content" in chunk.choices[0].delta: + completion += chunk.choices[0].delta.content + self.write_log(f"Completion: \n\n{completion}") async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} if args["model"] in CHAT_MODELS: + messages = compile_chat_messages( + args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") resp = (await openai.ChatCompletion.acreate( - messages=compile_chat_messages( - args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), + messages=messages, **args, )).choices[0].message.content + self.write_log(f"Completion: \n\n{resp}") else: + prompt = prune_raw_prompt_from_top( + args["model"], prompt, args["max_tokens"]) + self.write_log(f"Prompt:\n\n{prompt}") resp = (await openai.Completion.acreate( - prompt=prune_raw_prompt_from_top( - args["model"], prompt, args["max_tokens"]), + prompt=prompt, **args, )).choices[0].text + self.write_log(f"Completion:\n\n{resp}") return resp diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 3ec492f3..91b5842a 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -1,10 +1,11 @@ + from functools import cached_property import json -from typing import Any, Coroutine, Dict, Generator, List, Literal, Union +from typing import Any, Callable, Coroutine, Dict, Generator, List, Literal, Union import aiohttp from ...core.main import ChatMessage from ..llm import LLM -from ..util.count_tokens import DEFAULT_ARGS, DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, count_tokens +from ..util.count_tokens import DEFAULT_ARGS, DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, count_tokens, format_chat_messages import certifi import ssl @@ -19,12 +20,14 @@ class ProxyServer(LLM): unique_id: str name: str default_model: Literal["gpt-3.5-turbo", "gpt-4"] + write_log: Callable[[str], None] - def __init__(self, unique_id: str, default_model: Literal["gpt-3.5-turbo", "gpt-4"], system_message: str = None): + def __init__(self, unique_id: str, default_model: Literal["gpt-3.5-turbo", "gpt-4"], system_message: str = None, write_log: Callable[[str], None] = None): self.unique_id = unique_id self.default_model = default_model self.system_message = system_message self.name = default_model + self.write_log = write_log @property def default_args(self): @@ -36,14 +39,19 @@ class ProxyServer(LLM): async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} + messages = compile_chat_messages( + args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/complete", json={ - "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), + "messages": messages, "unique_id": self.unique_id, **args }) as resp: try: - return await resp.text() + response_text = await resp.text() + self.write_log(f"Completion: \n\n{response_text}") + return response_text except: raise Exception(await resp.text()) @@ -51,6 +59,7 @@ class ProxyServer(LLM): args = {**self.default_args, **kwargs} messages = compile_chat_messages( self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/stream_chat", json={ @@ -59,6 +68,7 @@ class ProxyServer(LLM): **args }) as resp: # This is streaming application/json instaed of text/event-stream + completion = "" async for line in resp.content.iter_chunks(): if line[1]: try: @@ -67,14 +77,19 @@ class ProxyServer(LLM): chunks = json_chunk.split("\n") for chunk in chunks: if chunk.strip() != "": - yield json.loads(chunk) + loaded_chunk = json.loads(chunk) + yield loaded_chunk + if "content" in loaded_chunk: + completion += loaded_chunk["content"] except: raise Exception(str(line[0])) + self.write_log(f"Completion: \n\n{completion}") async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message) + self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/stream_complete", json={ @@ -82,9 +97,13 @@ class ProxyServer(LLM): "unique_id": self.unique_id, **args }) as resp: + completion = "" async for line in resp.content.iter_any(): if line: try: - yield line.decode("utf-8") + decoded_line = line.decode("utf-8") + yield decoded_line + completion += decoded_line except: raise Exception(str(line)) + self.write_log(f"Completion: \n\n{completion}") diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 1d5d6729..13de7990 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -107,3 +107,10 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, }) return history + + +def format_chat_messages(messages: List[ChatMessage]) -> str: + formatted = "" + for msg in messages: + formatted += f"<{msg['role'].capitalize()}>\n{msg['content']}\n\n" + return formatted diff --git a/continuedev/src/continuedev/server/gui.py b/continuedev/src/continuedev/server/gui.py index 4201353e..ae57c0b6 100644 --- a/continuedev/src/continuedev/server/gui.py +++ b/continuedev/src/continuedev/server/gui.py @@ -99,6 +99,8 @@ class GUIProtocolServer(AbstractGUIProtocolServer): self.on_set_editing_at_indices(data["indices"]) elif message_type == "set_pinned_at_indices": self.on_set_pinned_at_indices(data["indices"]) + elif message_type == "show_logs_at_index": + self.on_show_logs_at_index(data["index"]) except Exception as e: print(e) @@ -166,6 +168,13 @@ class GUIProtocolServer(AbstractGUIProtocolServer): indices), self.session.autopilot.continue_sdk.ide.unique_id ) + def on_show_logs_at_index(self, index: int): + name = f"continue_logs.txt" + logs = "\n\n############################################\n\n".join( + ["This is a log of the exact prompt/completion pairs sent/received from the LLM during this step"] + self.session.autopilot.continue_sdk.history.timeline[index].logs) + create_async_task( + self.session.autopilot.ide.showVirtualFile(name, logs)) + @router.websocket("/ws") async def websocket_endpoint(websocket: WebSocket, session: Session = Depends(websocket_session)): diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 43538407..aeff5623 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -224,6 +224,12 @@ class IdeProtocolServer(AbstractIdeProtocolServer): "open": open }) + async def showVirtualFile(self, name: str, contents: str): + await self._send_json("showVirtualFile", { + "name": name, + "contents": contents + }) + async def setSuggestionsLocked(self, filepath: str, locked: bool = True): # Lock suggestions in the file so they don't ruin the offset before others are inserted await self._send_json("setSuggestionsLocked", { @@ -288,6 +294,8 @@ class IdeProtocolServer(AbstractIdeProtocolServer): pass def __get_autopilot(self): + if self.session_id not in self.session_manager.sessions: + return None return self.session_manager.sessions[self.session_id].autopilot def onFileEdits(self, edits: List[FileEditWithFullContents]): @@ -442,7 +450,8 @@ async def websocket_endpoint(websocket: WebSocket, session_id: str = None): if session_id is not None: session_manager.registered_ides[session_id] = ideProtocolServer other_msgs = await ideProtocolServer.initialize(session_id) - capture_event(ideProtocolServer.unique_id, "session_started", { "session_id": ideProtocolServer.session_id }) + capture_event(ideProtocolServer.unique_id, "session_started", { + "session_id": ideProtocolServer.session_id}) for other_msg in other_msgs: handle_msg(other_msg) @@ -463,5 +472,6 @@ async def websocket_endpoint(websocket: WebSocket, session_id: str = None): if websocket.client_state != WebSocketState.DISCONNECTED: await websocket.close() - capture_event(ideProtocolServer.unique_id, "session_ended", { "session_id": ideProtocolServer.session_id }) + capture_event(ideProtocolServer.unique_id, "session_ended", { + "session_id": ideProtocolServer.session_id}) session_manager.registered_ides.pop(ideProtocolServer.session_id) diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py index d0fb0bf8..0ae7e7fa 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/continuedev/src/continuedev/server/ide_protocol.py @@ -23,6 +23,10 @@ class AbstractIdeProtocolServer(ABC): async def setFileOpen(self, filepath: str, open: bool = True): """Set whether a file is open""" + @abstractmethod + async def showVirtualFile(self, name: str, contents: str): + """Show a virtual file""" + @abstractmethod async def setSuggestionsLocked(self, filepath: str, locked: bool = True): """Set whether suggestions are locked""" diff --git a/continuedev/src/continuedev/server/session_manager.py b/continuedev/src/continuedev/server/session_manager.py index 6d109ca6..90172a4e 100644 --- a/continuedev/src/continuedev/server/session_manager.py +++ b/continuedev/src/continuedev/server/session_manager.py @@ -100,7 +100,7 @@ class SessionManager: if session_id not in self.sessions: raise SessionNotFound(f"Session {session_id} not found") if self.sessions[session_id].ws is None: - print(f"Session {session_id} has no websocket") + # print(f"Session {session_id} has no websocket") return await self.sessions[session_id].ws.send_json({ diff --git a/extension/package-lock.json b/extension/package-lock.json index e67fa950..107a7001 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.178", + "version": "0.0.179", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.178", + "version": "0.0.179", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 121423ed..89c6daf5 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.178", + "version": "0.0.179", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 93b90f0d..bc8665fd 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -1,4 +1,4 @@ -import { useEffect, useRef, useState } from "react"; +import { useContext, useEffect, useRef, useState } from "react"; import styled, { keyframes } from "styled-components"; import { appear, @@ -13,12 +13,14 @@ import { ChevronRight, ArrowPath, XMark, + MagnifyingGlass, } from "@styled-icons/heroicons-outline"; import { StopCircle } from "@styled-icons/heroicons-solid"; import { HistoryNode } from "../../../schema/HistoryNode"; import HeaderButtonWithText from "./HeaderButtonWithText"; import MarkdownPreview from "@uiw/react-markdown-preview"; import { getMetaKeyLabel, isMetaEquivalentKeyPressed } from "../util"; +import { GUIClientContext } from "../App"; interface StepContainerProps { historyNode: HistoryNode; @@ -32,6 +34,7 @@ interface StepContainerProps { onToggle: () => void; isFirst: boolean; isLast: boolean; + index: number; } // #region styled components @@ -140,6 +143,7 @@ function StepContainer(props: StepContainerProps) { const naturalLanguageInputRef = useRef(null); const userInputRef = useRef(null); const isUserInput = props.historyNode.step.name === "UserInputStep"; + const client = useContext(GUIClientContext); useEffect(() => { if (userInputRef?.current) { @@ -210,6 +214,17 @@ function StepContainer(props: StepContainerProps) { */} <> + {(props.historyNode.logs as any)?.length > 0 && ( + { + e.stopPropagation(); + client?.showLogsAtIndex(props.index); + }} + > + + + )} { e.stopPropagation(); diff --git a/extension/react-app/src/hooks/ContinueGUIClientProtocol.ts b/extension/react-app/src/hooks/ContinueGUIClientProtocol.ts index a179c2bf..6c0df8fc 100644 --- a/extension/react-app/src/hooks/ContinueGUIClientProtocol.ts +++ b/extension/react-app/src/hooks/ContinueGUIClientProtocol.ts @@ -28,6 +28,8 @@ abstract class AbstractContinueGUIClientProtocol { abstract setPinnedAtIndices(indices: number[]): void; abstract toggleAddingHighlightedCode(): void; + + abstract showLogsAtIndex(index: number): void; } export default AbstractContinueGUIClientProtocol; diff --git a/extension/react-app/src/hooks/useContinueGUIProtocol.ts b/extension/react-app/src/hooks/useContinueGUIProtocol.ts index 2060dd7f..fef5b2e1 100644 --- a/extension/react-app/src/hooks/useContinueGUIProtocol.ts +++ b/extension/react-app/src/hooks/useContinueGUIProtocol.ts @@ -86,6 +86,10 @@ class ContinueGUIClientProtocol extends AbstractContinueGUIClientProtocol { toggleAddingHighlightedCode(): void { this.messenger.send("toggle_adding_highlighted_code", {}); } + + showLogsAtIndex(index: number): void { + this.messenger.send("show_logs_at_index", { index }); + } } export default ContinueGUIClientProtocol; diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index c35cf21b..fccc9b4b 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -311,6 +311,7 @@ function GUI(props: GUIProps) { ) ) : ( (); + onDidChange = this.onDidChangeEmitter.event; + + provideTextDocumentContent(uri: vscode.Uri): string { + return uri.query; + } + })(); + context.subscriptions.push( + vscode.workspace.registerTextDocumentContentProvider( + continueVirtualDocumentScheme, + documentContentProvider + ) + ); } async handleMessage( @@ -200,6 +221,9 @@ class IdeProtocolClient { this.openFile(data.filepath); // TODO: Close file if False break; + case "showVirtualFile": + this.showVirtualFile(data.name, data.contents); + break; case "setSuggestionsLocked": this.setSuggestionsLocked(data.filepath, data.locked); break; @@ -295,6 +319,20 @@ class IdeProtocolClient { openEditorAndRevealRange(filepath, undefined, vscode.ViewColumn.One); } + showVirtualFile(name: string, contents: string) { + vscode.workspace + .openTextDocument( + vscode.Uri.parse( + `${continueVirtualDocumentScheme}:${name}?${encodeURIComponent( + contents + )}` + ) + ) + .then((doc) => { + vscode.window.showTextDocument(doc, { preview: false }); + }); + } + setSuggestionsLocked(filepath: string, locked: boolean) { editorSuggestionsLocked.set(filepath, locked); // TODO: Rerender? -- cgit v1.2.3-70-g09d2 From 0ad32bd6dfaf96af0a2db82fb2b06c200e131e62 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sat, 22 Jul 2023 12:14:07 -0700 Subject: how to use private model docs and button --- docs/docs/customization.md | 124 ++++++++++++++++++++++ docs/sidebars.js | 10 +- extension/package-lock.json | 4 +- extension/package.json | 2 +- extension/react-app/src/components/TextDialog.tsx | 60 ++++++----- extension/react-app/src/pages/gui.tsx | 40 +++---- 6 files changed, 189 insertions(+), 51 deletions(-) create mode 100644 docs/docs/customization.md (limited to 'extension/react-app/src/pages') diff --git a/docs/docs/customization.md b/docs/docs/customization.md new file mode 100644 index 00000000..cd306cfe --- /dev/null +++ b/docs/docs/customization.md @@ -0,0 +1,124 @@ +# Customization + +Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` on your machine. This file is created the first time you run Continue. + +## Change the default LLM + +Change the `default_model` field to any of "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "claude-2", or "ggml". + +### claude-2 and gpt-X + +If you have access, simply set `default_model` to the model you would like to use, then you will be prompted for a personal API key after reloading VS Code. If using an OpenAI model, you can press enter to try with our API key for free. + +### Local models with ggml + +See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example) to run any model locally with ggml. While these models don't yet perform as well, they are free, entirely private, and run offline. + +### Azure OpenAI Service + +If you'd like to use OpenAI models but are concerned about privacy, you can use the Azure OpenAI service, which is GDPR and HIPAA compliant. After applying for access [here](https://azure.microsoft.com/en-us/products/ai-services/openai-service), you will typically hear back within only a few days. Once you have access, set `default_model` to "gpt-4", and then set the `azure_openai_info` property in the `ContinueConfig` like so: + +```python +config = ContinueConfig( + ... + azure_openai_info=AzureInfo( + endpoint="https://my-azure-openai-instance.openai.azure.com/", + engine="my-azure-openai-deployment", + api_version="2023-03-15-preview" + ) +) +``` + +The easiest way to find this information is from the chat playground in the Azure OpenAI portal. Under the "Chat Session" section, click "View Code" to see each of these parameters. Finally, find one of your Azure OpenAI keys and enter it in the VS Code settings under `continue.OPENAI_API_KEY`. + +## Customize System Message + +You can write your own system message, a set of instructions that will always be top-of-mind for the LLM, by setting the `system_message` property to any string. For example, you might request "Please make all responses as concise as possible and never repeat something you have already explained." + +System messages can also reference files. For example, if there is a markdown file (e.g. at `/Users/nate/Documents/docs/reference.md`) you'd like the LLM to know about, you can reference it with [Mustache](http://mustache.github.io/mustache.5.html) templating like this: "Please reference this documentation: {{ Users/nate/Documents/docs/reference.md }}". As of now, you must use an absolute path. + +## Custom Commands + +You can add custom slash commands by adding a `CustomCommand` object to the `custom_commands` property. Each `CustomCommand` has + +- `name`: the name of the command, which will be invoked with `/name` +- `description`: a short description of the command, which will appear in the dropdown +- `prompt`: a set of instructions to the LLM, which will be shown in the prompt + +Custom commands are great when you are frequently reusing a prompt. For example, if you've crafted a great prompt and frequently ask the LLM to check for mistakes in your code, you could add a command like this: + +```python +config = ContinueConfig( + ... + custom_commands=[ + CustomCommand( + name="check", + description="Check for mistakes in my code", + prompt=dedent("""\ + Please read the highlighted code and check for any mistakes. You should look for the following, and be extremely vigilant: + - Syntax errors + - Logic errors + - Security vulnerabilities + - Performance issues + - Anything else that looks wrong + + Once you find an error, please explain it as clearly as possible, but without using extra words. For example, instead of saying "I think there is a syntax error on line 5", you should say "Syntax error on line 5". Give your answer as one bullet point per mistake found.""") + ) + ] +) +``` + +## Temperature + +Set `temperature` to any value between 0 and 1. Higher values will make the LLM more creative, while lower values will make it more predictable. The default is 0.5. + +## Custom Context Providers + +When you type '@' in the Continue text box, it will display a dropdown of items that can be selected to include in your message as context. For example, you might want to reference a GitHub Issue, file, or Slack thread. All of these options are provided by a `ContextProvider` class, and we make it easy to write your own. As an example, here is the `GitHubIssuesContextProvider`, which lets you search all open GitHub Issues in a repo: + +```python +class GitHubIssuesContextProvider(ContextProvider): + """ + The GitHubIssuesContextProvider is a ContextProvider that allows you to search GitHub Issues in a repo. + """ + + title = "issues" + repo_name: str + auth_token: str + + async def provide_context_items(self) -> List[ContextItem]: + auth = Auth.Token(self.auth_token) + gh = Github(auth=auth) + + repo = gh.get_repo(self.repo_name) + issues = repo.get_issues().get_page(0) + + items = [ContextItem( + content=issue.body, + description=ContextItemDescription( + name=f"Issue #{issue.number}", + description=issue.title, + id=ContextItemId( + provider_title=self.title, + item_id=issue.id + ) + ) + ) for issue in issues] + self.context_items = { + item.description.id.to_string(): item for item in items} + return items +``` + +It can then be set in the `ContinueConfig` like so: + +```python +config = ContinueConfig( + ... + context_providers=[ + GitHubIssuesContextProvider( + repo_name="my-github-username-or-org/my-github-repo", + auth_token="my-github-auth-token" + ) + ] +) +``` diff --git a/docs/sidebars.js b/docs/sidebars.js index 9baf1b94..83b34ee8 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -13,7 +13,15 @@ /** @type {import('@docusaurus/plugin-content-docs').SidebarsConfig} */ const sidebars = { - docsSidebar: ["intro", "getting-started", "how-to-use-continue", "how-continue-works", "telemetry", "collecting-data"], + docsSidebar: [ + "intro", + "getting-started", + "how-to-use-continue", + "how-continue-works", + "telemetry", + "collecting-data", + "customization", + ], }; module.exports = sidebars; diff --git a/extension/package-lock.json b/extension/package-lock.json index 5c8e27d0..933da12b 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.188", + "version": "0.0.189", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.188", + "version": "0.0.189", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 3d44c156..08737ff4 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.188", + "version": "0.0.189", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index cba3852d..9597b578 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -3,6 +3,7 @@ import React, { useEffect, useState } from "react"; import styled from "styled-components"; import { Button, secondaryDark, vscBackground, vscForeground } from "."; import { isMetaEquivalentKeyPressed } from "../util"; +import { ReactMarkdown } from "react-markdown/lib/react-markdown"; const ScreenCover = styled.div` position: absolute; @@ -56,6 +57,7 @@ const TextDialog = (props: { onEnter: (text: string) => void; onClose: () => void; message?: string; + entryOn?: boolean; }) => { const [text, setText] = useState(""); const textAreaRef = React.createRef(); @@ -79,33 +81,37 @@ const TextDialog = (props: { }} > -

{props.message || ""}

-