From 699a74250fd4cf91af930ff63077aeb81f74856f Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 13 Jul 2023 09:55:09 -0700 Subject: show react immediately --- extension/src/activation/activate.ts | 72 ++++++++++++++++++------------------ extension/src/bridge.ts | 6 +-- 2 files changed, 38 insertions(+), 40 deletions(-) diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts index cd885b12..b03282e5 100644 --- a/extension/src/activation/activate.ts +++ b/extension/src/activation/activate.ts @@ -35,46 +35,48 @@ export async function activateExtension(context: vscode.ExtensionContext) { }) .catch((e) => console.log("Error checking for extension updates: ", e)); - // Start the Python server - await new Promise((resolve, reject) => { - vscode.window.withProgress( + const sessionIdPromise = (async () => { + // Start the Python server + await new Promise((resolve, reject) => { + vscode.window.withProgress( + { + location: vscode.ProgressLocation.Notification, + title: + "Starting Continue Server... (it may take a minute to download Python packages)", + cancellable: false, + }, + async (progress, token) => { + await startContinuePythonServer(); + resolve(null); + } + ); + }); + + // Initialize IDE Protocol Client + const serverUrl = getContinueServerUrl(); + ideProtocolClient = new IdeProtocolClient( + `${serverUrl.replace("http", "ws")}/ide/ws`, + context + ); + + return ideProtocolClient.getSessionId(); + })(); + + // Register the webview + const provider = new ContinueGUIWebviewViewProvider(sessionIdPromise); + + context.subscriptions.push( + vscode.window.registerWebviewViewProvider( + "continue.continueGUIView", + provider, { - location: vscode.ProgressLocation.Notification, - title: - "Starting Continue Server... (it may take a minute to download Python packages)", - cancellable: false, - }, - async (progress, token) => { - await startContinuePythonServer(); - resolve(null); + webviewOptions: { retainContextWhenHidden: true }, } - ); - }); + ) + ); // Register commands and providers sendTelemetryEvent(TelemetryEvent.ExtensionActivated); registerAllCodeLensProviders(context); registerAllCommands(context); - - // Initialize IDE Protocol Client - const serverUrl = getContinueServerUrl(); - ideProtocolClient = new IdeProtocolClient( - `${serverUrl.replace("http", "ws")}/ide/ws`, - context - ); - - { - const sessionIdPromise = await ideProtocolClient.getSessionId(); - const provider = new ContinueGUIWebviewViewProvider(sessionIdPromise); - - context.subscriptions.push( - vscode.window.registerWebviewViewProvider( - "continue.continueGUIView", - provider, - { - webviewOptions: { retainContextWhenHidden: true }, - } - ) - ); - } } diff --git a/extension/src/bridge.ts b/extension/src/bridge.ts index 7e6398be..d614ace4 100644 --- a/extension/src/bridge.ts +++ b/extension/src/bridge.ts @@ -1,11 +1,7 @@ import fetch from "node-fetch"; import * as path from "path"; import * as vscode from "vscode"; -import { - Configuration, - DebugApi, - UnittestApi, -} from "./client"; +import { Configuration, DebugApi, UnittestApi } from "./client"; import { convertSingleToDoubleQuoteJSON } from "./util/util"; import { getExtensionUri } from "./util/vscode"; import { extensionContext } from "./activation/activate"; -- cgit v1.2.3-70-g09d2 From 6bcb3dcb7158ccb9674f79978b343312f9ad8a31 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Wed, 19 Jul 2023 09:52:39 -0700 Subject: patch --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/extension/package-lock.json b/extension/package-lock.json index 6818857b..bc2824c4 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.181", + "version": "0.0.182", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.181", + "version": "0.0.182", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index b37bb1b6..2998b148 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.181", + "version": "0.0.182", "publisher": "Continue", "engines": { "vscode": "^1.67.0" -- cgit v1.2.3-70-g09d2 From 2f777ea933d4a41b600feedeff7d85257c5b136d Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Wed, 19 Jul 2023 18:45:46 -0700 Subject: transparent bg fix --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/react-app/src/components/ComboBox.tsx | 3 +-- extension/react-app/src/components/PillButton.tsx | 2 ++ extension/src/commands.ts | 3 +++ 5 files changed, 9 insertions(+), 5 deletions(-) diff --git a/extension/package-lock.json b/extension/package-lock.json index bc2824c4..3f9ff3aa 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.182", + "version": "0.0.184", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.182", + "version": "0.0.184", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 2998b148..72afa46f 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.182", + "version": "0.0.184", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index f327e3a3..1e2ca135 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -71,7 +71,6 @@ const Ul = styled.ul<{ : `transform: translateY(${2 * mainInputFontSize}px);`} position: absolute; background: ${vscBackground}; - background-color: ${secondaryDark}; color: ${vscForeground}; max-height: ${UlMaxHeight}px; width: calc(100% - 16px); @@ -96,7 +95,7 @@ const Li = styled.li<{ selected: boolean; isLastItem: boolean; }>` - background-color: ${secondaryDark}; + background-color: ${vscBackground}; ${({ highlighted }) => highlighted && "background: #ff000066;"} ${({ selected }) => selected && "font-weight: bold;"} padding: 0.5rem 0.75rem; diff --git a/extension/react-app/src/components/PillButton.tsx b/extension/react-app/src/components/PillButton.tsx index c24dba83..5929d06a 100644 --- a/extension/react-app/src/components/PillButton.tsx +++ b/extension/react-app/src/components/PillButton.tsx @@ -4,6 +4,7 @@ import { StyledTooltip, defaultBorderRadius, secondaryDark, + vscBackground, vscForeground, } from "."; import { @@ -113,6 +114,7 @@ const PillButton = (props: PillButtonProps) => { {props.onlyShowDelete || ( diff --git a/extension/src/commands.ts b/extension/src/commands.ts index 2b7f4c0c..1da2f04e 100644 --- a/extension/src/commands.ts +++ b/extension/src/commands.ts @@ -40,6 +40,9 @@ const commandsMap: { [command: string]: (...args: any) => any } = { edit ? "/edit " : "" }${code}\n\nHow do I fix this problem in the above code?: ${message}` ); + if (!edit) { + vscode.commands.executeCommand("continue.continueGUIView.focus"); + } }, "continue.focusContinueInput": async () => { if (focusedOnContinueInput) { -- cgit v1.2.3-70-g09d2 From dc90631c443db710e1c92a556497e403d9f9b8be Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 20 Jul 2023 12:19:56 -0700 Subject: fix mutable default arg with_history bug --- continuedev/src/continuedev/libs/llm/__init__.py | 6 +++--- continuedev/src/continuedev/libs/llm/anthropic.py | 6 +++--- continuedev/src/continuedev/libs/llm/ggml.py | 6 +++--- continuedev/src/continuedev/libs/llm/hf_inference_api.py | 2 +- continuedev/src/continuedev/libs/llm/openai.py | 6 +++--- continuedev/src/continuedev/libs/llm/proxy_server.py | 6 +++--- continuedev/src/continuedev/libs/util/count_tokens.py | 14 ++++++++------ 7 files changed, 24 insertions(+), 22 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 4c4de213..2766db4b 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -9,15 +9,15 @@ from pydantic import BaseModel class LLM(ABC): system_message: Union[str, None] = None - async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: """Return the completion of the text with the given temperature.""" raise NotImplementedError - def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + def stream_complete(self, prompt, with_history: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: """Stream the completion through generator.""" raise NotImplementedError - async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_chat(self, messages: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: """Stream the chat through generator.""" raise NotImplementedError diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py index c82895c6..625d4e57 100644 --- a/continuedev/src/continuedev/libs/llm/anthropic.py +++ b/continuedev/src/continuedev/libs/llm/anthropic.py @@ -54,7 +54,7 @@ class AnthropicLLM(LLM): prompt += AI_PROMPT return prompt - async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_complete(self, prompt, with_history: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() args.update(kwargs) args["stream"] = True @@ -66,7 +66,7 @@ class AnthropicLLM(LLM): ): yield chunk.completion - async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_chat(self, messages: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() args.update(kwargs) args["stream"] = True @@ -83,7 +83,7 @@ class AnthropicLLM(LLM): "content": chunk.completion } - async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} args = self._transform_args(args) diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index 6007fdb4..4889a556 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -26,7 +26,7 @@ class GGML(LLM): def count_tokens(self, text: str): return count_tokens(self.name, text) - async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_complete(self, prompt, with_history: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() args.update(kwargs) args["stream"] = True @@ -47,7 +47,7 @@ class GGML(LLM): except: raise Exception(str(line)) - async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_chat(self, messages: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( self.name, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) @@ -72,7 +72,7 @@ class GGML(LLM): except: raise Exception(str(line[0])) - async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} async with aiohttp.ClientSession() as session: diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py index 7e11fbbe..36f03270 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py @@ -16,7 +16,7 @@ class HuggingFaceInferenceAPI(LLM): self.model = model self.system_message = system_message # TODO: Nothing being done with this - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): + def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs): """Return the completion of the text with the given temperature.""" API_URL = f"https://api-inference.huggingface.co/models/{self.model}" headers = { diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 64bb39a2..96a4ab71 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -42,7 +42,7 @@ class OpenAI(LLM): def count_tokens(self, text: str): return count_tokens(self.default_model, text) - async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_complete(self, prompt, with_history: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() args.update(kwargs) args["stream"] = True @@ -72,7 +72,7 @@ class OpenAI(LLM): self.write_log(f"Completion:\n\n{completion}") - async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_chat(self, messages: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() args.update(kwargs) args["stream"] = True @@ -93,7 +93,7 @@ class OpenAI(LLM): completion += chunk.choices[0].delta.content self.write_log(f"Completion: \n\n{completion}") - async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} if args["model"] in CHAT_MODELS: diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index bd50fe02..b1bb8f06 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -38,7 +38,7 @@ class ProxyServer(LLM): def count_tokens(self, text: str): return count_tokens(self.default_model, text) - async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async def complete(self, prompt: str, with_history: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( @@ -57,7 +57,7 @@ class ProxyServer(LLM): except: raise Exception(await resp.text()) - async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]: + async def stream_chat(self, messages: List[ChatMessage] = None, **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( args["model"], messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) @@ -89,7 +89,7 @@ class ProxyServer(LLM): self.write_log(f"Completion: \n\n{completion}") - async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async def stream_complete(self, prompt, with_history: List[ChatMessage] = None, **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message) diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 987aa722..6e0a3b88 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -101,13 +101,15 @@ def prune_chat_history(model: str, chat_history: List[ChatMessage], max_tokens: TOKEN_BUFFER_FOR_SAFETY = 100 -def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, prompt: Union[str, None] = None, functions: Union[List, None] = None, system_message: Union[str, None] = None) -> List[Dict]: +def compile_chat_messages(model: str, msgs: Union[List[ChatMessage], None], max_tokens: int, prompt: Union[str, None] = None, functions: Union[List, None] = None, system_message: Union[str, None] = None) -> List[Dict]: """ The total number of tokens is system_message + sum(msgs) + functions + prompt after it is converted to a message """ + msgs_copy = msgs.copy() if msgs is not None else [] + if prompt is not None: prompt_msg = ChatMessage(role="user", content=prompt, summary=prompt) - msgs += [prompt_msg] + msgs_copy += [prompt_msg] if system_message is not None: # NOTE: System message takes second precedence to user prompt, so it is placed just before @@ -116,7 +118,7 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, system_chat_msg = ChatMessage( role="system", content=rendered_system_message, summary=rendered_system_message) # insert at second-to-last position - msgs.insert(-1, system_chat_msg) + msgs_copy.insert(-1, system_chat_msg) # Add tokens from functions function_tokens = 0 @@ -124,11 +126,11 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, for function in functions: function_tokens += count_tokens(model, json.dumps(function)) - msgs = prune_chat_history( - model, msgs, MAX_TOKENS_FOR_MODEL[model], function_tokens + max_tokens + TOKEN_BUFFER_FOR_SAFETY) + msgs_copy = prune_chat_history( + model, msgs_copy, MAX_TOKENS_FOR_MODEL[model], function_tokens + max_tokens + TOKEN_BUFFER_FOR_SAFETY) history = [msg.to_dict(with_functions=functions is not None) - for msg in msgs] + for msg in msgs_copy] # Move system message back to start if system_message is not None and len(history) >= 2 and history[-2]["role"] == "system": -- cgit v1.2.3-70-g09d2 From 67b1e77e9dc2134e63a0e2d87524db2260ad817a Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 20 Jul 2023 12:21:07 -0700 Subject: patch --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/extension/package-lock.json b/extension/package-lock.json index 3f9ff3aa..d37a29a7 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.184", + "version": "0.0.185", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.184", + "version": "0.0.185", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 72afa46f..a5b0a7b6 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.184", + "version": "0.0.185", "publisher": "Continue", "engines": { "vscode": "^1.67.0" -- cgit v1.2.3-70-g09d2 From 00efacfc3df025f359a8aac86dad8b273d5fd350 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 20 Jul 2023 16:30:30 -0700 Subject: deep copy --- continuedev/src/continuedev/libs/llm/openai.py | 2 +- continuedev/src/continuedev/libs/util/count_tokens.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 96a4ab71..a0773c1d 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -81,7 +81,7 @@ class OpenAI(LLM): del args["functions"] messages = compile_chat_messages( - args["model"], messages, args["max_tokens"], functions=args.get("functions", None), system_message=self.system_message) + args["model"], messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) self.write_log(f"Prompt: \n\n{format_chat_messages(messages)}") completion = "" async for chunk in await openai.ChatCompletion.acreate( diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 6e0a3b88..cea91470 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -105,7 +105,8 @@ def compile_chat_messages(model: str, msgs: Union[List[ChatMessage], None], max_ """ The total number of tokens is system_message + sum(msgs) + functions + prompt after it is converted to a message """ - msgs_copy = msgs.copy() if msgs is not None else [] + msgs_copy = [msg.copy(deep=True) + for msg in msgs] if msgs is not None else [] if prompt is not None: prompt_msg = ChatMessage(role="user", content=prompt, summary=prompt) -- cgit v1.2.3-70-g09d2 From 20b399a20ed36815e40fda292f0bfb1e1b30aed8 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 20 Jul 2023 16:40:40 -0700 Subject: don't summarize last user message --- continuedev/src/continuedev/libs/util/count_tokens.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index cea91470..c58ae499 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -73,9 +73,9 @@ def prune_chat_history(model: str, chat_history: List[ChatMessage], max_tokens: message = chat_history.pop(0) total_tokens -= count_tokens(model, message.content) - # 3. Truncate message in the last 5 + # 3. Truncate message in the last 5, except last 1 i = 0 - while total_tokens > max_tokens and len(chat_history) > 0 and i < len(chat_history): + while total_tokens > max_tokens and len(chat_history) > 0 and i < len(chat_history) - 1: message = chat_history[i] total_tokens -= count_tokens(model, message.content) total_tokens += count_tokens(model, message.summary) -- cgit v1.2.3-70-g09d2 From 883219874035f46ca3409901ea186493a2ce46a5 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 20 Jul 2023 16:47:15 -0700 Subject: patch --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/extension/package-lock.json b/extension/package-lock.json index d37a29a7..7ca62d4a 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.185", + "version": "0.0.186", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.185", + "version": "0.0.186", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index a5b0a7b6..76b80ed7 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.185", + "version": "0.0.186", "publisher": "Continue", "engines": { "vscode": "^1.67.0" -- cgit v1.2.3-70-g09d2 From 0cd32ba813f5506c0871159658728b8ce31825e1 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 21 Jul 2023 01:49:28 -0700 Subject: fix for top-of-file pruning in default edit step --- continuedev/src/continuedev/steps/core/core.py | 14 +++++++------- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- 3 files changed, 10 insertions(+), 10 deletions(-) diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 4afc36e8..98600f8b 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -220,13 +220,13 @@ class DefaultModelEditCodeStep(Step): if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use.name]: break - if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use.name]: - while cur_start_line < max_start_line: - cur_start_line += 1 - total_tokens -= model_to_use.count_tokens( - full_file_contents_lst[cur_end_line]) - if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use.name]: - break + if total_tokens > MAX_TOKENS_FOR_MODEL[model_to_use.name]: + while cur_start_line < max_start_line: + cur_start_line += 1 + total_tokens -= model_to_use.count_tokens( + full_file_contents_lst[cur_start_line]) + if total_tokens < MAX_TOKENS_FOR_MODEL[model_to_use.name]: + break # Now use the found start/end lines to get the prefix and suffix strings file_prefix = "\n".join( diff --git a/extension/package-lock.json b/extension/package-lock.json index 7ca62d4a..d44b84c4 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.186", + "version": "0.0.187", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.186", + "version": "0.0.187", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 76b80ed7..34bc8bc4 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.186", + "version": "0.0.187", "publisher": "Continue", "engines": { "vscode": "^1.67.0" -- cgit v1.2.3-70-g09d2 From 607ace7321d1ccf41292665ed625b44c222ec74b Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 21 Jul 2023 16:27:25 -0500 Subject: python3 --> python --- extension/src/activation/environmentSetup.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts index c341db39..94481430 100644 --- a/extension/src/activation/environmentSetup.ts +++ b/extension/src/activation/environmentSetup.ts @@ -139,7 +139,7 @@ export async function getPythonPipCommands() { if (!versionExists) { vscode.window.showErrorMessage( - "Continue requires Python3 version 3.8 or greater. Please update your Python3 installation, reload VS Code, and try again." + "Continue requires Python version 3.8 or greater. Please update your Python installation, reload VS Code, and try again." ); throw new Error("Python3.8 or greater is not installed."); } -- cgit v1.2.3-70-g09d2 From ec1538856007fa60a778a65793f072b0d4b7d3b9 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Fri, 21 Jul 2023 16:35:44 -0500 Subject: notice for python and to check console logs --- README.md | 10 ++++++++++ docs/docs/getting-started.md | 4 ++++ extension/README.md | 10 ++++++++++ 3 files changed, 24 insertions(+) diff --git a/README.md b/README.md index 01462247..4337c591 100644 --- a/README.md +++ b/README.md @@ -44,6 +44,16 @@ Let Continue build the scaffolding of Python scripts, React components, and more ### [Download for VS Code](https://marketplace.visualstudio.com/items?itemName=Continue.continue) +## Install + +Continue requires that you have Python 3.8 or greater. If you do not, please [install](https://python.org) it + +If your Continue server is not setting up, please check the console logs: +1. `cmd+shift+p` (MacOS) / `ctrl+shift+p` (Windows) +2. Search for and then select "Developer: Toggle Developer Tools" +3. Select `Console` +4. Read the console logs + ## OpenAI API Key New users can try out Continue with GPT-4 using a proxy server that securely makes calls to OpenAI using our API key. Continue should just work the first time you install the extension in VS Code. diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md index 753c1479..fc19552e 100644 --- a/docs/docs/getting-started.md +++ b/docs/docs/getting-started.md @@ -2,6 +2,10 @@ ## Recommended: Install in VS Code +:::note +Continue requires that you have Python 3.8 or greater. If you do not, please [install](https://python.org) it +::: + 1. Click `Install` on the **[Continue extension in the Visual Studio Marketplace](https://marketplace.visualstudio.com/items?itemName=Continue.continue)** 2. This will open the Continue extension page in VS Code, where you will need to click `Install` again diff --git a/extension/README.md b/extension/README.md index 2d449b92..2944325b 100644 --- a/extension/README.md +++ b/extension/README.md @@ -25,6 +25,16 @@ Let Continue build the scaffolding of Python scripts, React components, and more - “/edit make an IAM policy that creates a user with read-only access to S3” - “/edit use this schema to write me a SQL query that gets recently churned users” +## Install + +Continue requires that you have Python 3.8 or greater. If you do not, please [install](https://python.org) it + +If your Continue server is not setting up, please check the console logs: +1. `cmd+shift+p` (MacOS) / `ctrl+shift+p` (Windows) +2. Search for and then select "Developer: Toggle Developer Tools" +3. Select `Console` +4. Read the console logs + ## OpenAI API Key New users can try out Continue with GPT-4 using a proxy server that securely makes calls to OpenAI using our API key. Continue should just work the first time you install the extension in VS Code. -- cgit v1.2.3-70-g09d2 From b97ceee5cad5f3a645f067353aa33c17dfcf0a60 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 21 Jul 2023 16:43:17 -0700 Subject: notify to reload window after changing settings --- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/src/continueIdeClient.ts | 14 ++++++++++++++ 3 files changed, 17 insertions(+), 3 deletions(-) diff --git a/extension/package-lock.json b/extension/package-lock.json index d44b84c4..5c8e27d0 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.187", + "version": "0.0.188", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.187", + "version": "0.0.188", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 34bc8bc4..3d44c156 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.187", + "version": "0.0.188", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index a1370a01..3a42e773 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -167,6 +167,20 @@ class IdeProtocolClient { documentContentProvider ) ); + + // Listen for changes to settings.json + vscode.workspace.onDidChangeConfiguration((event) => { + if (event.affectsConfiguration("continue")) { + vscode.window.showInformationMessage( + "Please reload VS Code for changes to Continue settings to take effect.", + "Reload" + ).then((selection) => { + if (selection === "Reload") { + vscode.commands.executeCommand("workbench.action.reloadWindow"); + } + }); + } + }); } async handleMessage( -- cgit v1.2.3-70-g09d2 From a87e66758731a9e76c9c394dc2190b9882ddbceb Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 21 Jul 2023 19:51:23 -0700 Subject: clean pii from telemetry --- .../src/continuedev/libs/util/commonregex.py | 138 +++++++++++++++++++++ continuedev/src/continuedev/libs/util/telemetry.py | 7 +- 2 files changed, 144 insertions(+), 1 deletion(-) create mode 100644 continuedev/src/continuedev/libs/util/commonregex.py diff --git a/continuedev/src/continuedev/libs/util/commonregex.py b/continuedev/src/continuedev/libs/util/commonregex.py new file mode 100644 index 00000000..55da7fc0 --- /dev/null +++ b/continuedev/src/continuedev/libs/util/commonregex.py @@ -0,0 +1,138 @@ +# coding: utf-8 +import json +import re +from typing import Any, Dict + +date = re.compile( + '(?:(?]+[^\s`!()\[\]{};:\'".,<>?\xab\xbb\u201c\u201d\u2018\u2019])?)', re.IGNORECASE) +email = re.compile( + "([a-z0-9!#$%&'*+\/=?^_`{|.}~-]+@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)", re.IGNORECASE) +ip = re.compile('(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)', re.IGNORECASE) +ipv6 = re.compile( + '\s*(?!.*::.*::)(?:(?!:)|:(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?", + "unix_absolute_filepath": "", + "dates": "", + "times": "