From a63a8aa2b228e9165359bae7f5a8e19227e9e824 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 22 Jun 2023 17:05:53 -0700 Subject: patch --- extension/package-lock.json | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) (limited to 'extension/package-lock.json') diff --git a/extension/package-lock.json b/extension/package-lock.json index 79b010cc..12b2fe13 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.60", + "version": "0.0.61", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.60", + "version": "0.0.61", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", -- cgit v1.2.3-70-g09d2 From 3338d05db9f5f21d8a9d440cb428f2c6a188b363 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 25 Jun 2023 15:14:56 -0700 Subject: shipped function calling --- continuedev/src/continuedev/core/autopilot.py | 14 ++++++++- continuedev/src/continuedev/steps/chat.py | 33 +++++++++++---------- extension/package-lock.json | 4 +-- extension/package.json | 2 +- .../src/components/HeaderButtonWithText.tsx | 6 +++- extension/react-app/src/tabs/gui.tsx | 2 -- .../scripts/continuedev-0.1.1-py3-none-any.whl | Bin 86716 -> 89177 bytes 7 files changed, 39 insertions(+), 22 deletions(-) (limited to 'extension/package-lock.json') diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index f14a4127..04f64ed8 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -15,6 +15,17 @@ from .sdk import ContinueSDK import asyncio from ..libs.util.step_name_to_steps import get_step_from_name from ..libs.util.traceback_parsers import get_python_traceback, get_javascript_traceback +from openai import error as openai_errors + + +def get_error_title(e: Exception) -> str: + if isinstance(e, openai_errors.APIError): + return "OpenAI is overloaded with requests. Please try again." + elif isinstance(e, openai_errors.RateLimitError): + return "This OpenAI API key has been rate limited. Please try again." + elif isinstance(e, openai_errors.Timeout): + return "OpenAI timed out. Please try again." + return e.__repr__() class Autopilot(ContinueBaseModel): @@ -166,7 +177,8 @@ class Autopilot(ContinueBaseModel): error_string = e.message if is_continue_custom_exception else '\n\n'.join( traceback.format_tb(e.__traceback__)) + f"\n\n{e.__repr__()}" - error_title = e.title if is_continue_custom_exception else e.__repr__() + error_title = e.title if is_continue_custom_exception else get_error_title( + e) # Attach an InternalErrorObservation to the step and unhide it. print(f"Error while running step: \n{error_string}\n{error_title}") diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index a940c3ba..2a8ae2da 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -182,21 +182,24 @@ class ChatWithFunctions(Step): else: if func_name == "python" and "python" not in step_name_step_class_map: # GPT must be fine-tuned to believe this exists, but it doesn't always - self.chat_context.append(ChatMessage( - role="assistant", - content=None, - function_call=FunctionCall( - name=func_name, - arguments=func_args - ), - summary=f"Ran function {func_name}" - )) - self.chat_context.append(ChatMessage( - role="user", - content="The 'python' function does not exist. Don't call it.", - summary="'python' function does not exist." - )) - continue + func_name = "EditHighlightedCodeStep" + func_args = json.dumps({"user_input": self.user_input}) + # self.chat_context.append(ChatMessage( + # role="assistant", + # content=None, + # function_call=FunctionCall( + # name=func_name, + # arguments=func_args + # ), + # summary=f"Ran function {func_name}" + # )) + # self.chat_context.append(ChatMessage( + # role="user", + # content="The 'python' function does not exist. Don't call it. Try again to call another function.", + # summary="'python' function does not exist." + # )) + # msg_step.hide = True + # continue # Call the function, then continue to chat func_args = "{}" if func_args == "" else func_args fn_call_params = json.loads(func_args) diff --git a/extension/package-lock.json b/extension/package-lock.json index 12b2fe13..71edfe8c 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.61", + "version": "0.0.62", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.61", + "version": "0.0.62", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 9fd1d7a6..9679b159 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "Accelerating software development with language models", - "version": "0.0.61", + "version": "0.0.62", "publisher": "Continue", "engines": { "vscode": "^1.74.0" diff --git a/extension/react-app/src/components/HeaderButtonWithText.tsx b/extension/react-app/src/components/HeaderButtonWithText.tsx index 5901c5d8..30931f86 100644 --- a/extension/react-app/src/components/HeaderButtonWithText.tsx +++ b/extension/react-app/src/components/HeaderButtonWithText.tsx @@ -15,7 +15,11 @@ const HeaderButtonWithText = (props: HeaderButtonWithTextProps) => { setHover(true)} + onMouseEnter={() => { + if (!props.disabled) { + setHover(true); + } + }} onMouseLeave={() => { setHover(false); }} diff --git a/extension/react-app/src/tabs/gui.tsx b/extension/react-app/src/tabs/gui.tsx index 5001fe4b..4886bfad 100644 --- a/extension/react-app/src/tabs/gui.tsx +++ b/extension/react-app/src/tabs/gui.tsx @@ -491,8 +491,6 @@ function GUI(props: GUIProps) { { client?.sendClear(); - // Reload the window to get completely fresh session - window.location.reload(); }} text="Clear All" > diff --git a/extension/scripts/continuedev-0.1.1-py3-none-any.whl b/extension/scripts/continuedev-0.1.1-py3-none-any.whl index 29e41d3c..692bdfb7 100644 Binary files a/extension/scripts/continuedev-0.1.1-py3-none-any.whl and b/extension/scripts/continuedev-0.1.1-py3-none-any.whl differ -- cgit v1.2.3-70-g09d2 From f00fd0c5525b7717716d535d3a783ad508a0b3a9 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 25 Jun 2023 22:35:37 -0700 Subject: context overflow fix --- continuedev/src/continuedev/libs/llm/openai.py | 4 ++-- .../src/continuedev/libs/util/count_tokens.py | 14 +++++++++++-- continuedev/src/continuedev/steps/core/core.py | 22 ++++++--------------- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/react-app/src/components/TextDialog.tsx | 2 +- .../scripts/continuedev-0.1.1-py3-none-any.whl | Bin 89177 -> 89291 bytes 7 files changed, 24 insertions(+), 24 deletions(-) (limited to 'extension/package-lock.json') diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index f0b2e6d8..3024ae61 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -4,7 +4,7 @@ from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage import openai from ..llm import LLM -from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens +from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top class OpenAI(LLM): @@ -72,7 +72,7 @@ class OpenAI(LLM): )).choices[0].message.content else: resp = (await openai.Completion.acreate( - prompt=prompt, + prompt=prune_raw_prompt_from_top(args["model"], prompt), **args, )).choices[0].text diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 6038b68d..addafcff 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -28,6 +28,16 @@ def count_tokens(model: str, text: str | None): return len(encoding.encode(text, disallowed_special=())) +def prune_raw_prompt_from_top(model: str, prompt: str): + max_tokens = MAX_TOKENS_FOR_MODEL.get(model, DEFAULT_MAX_TOKENS) + encoding = encoding_for_model(model) + tokens = encoding.encode(prompt, disallowed_special=()) + if len(tokens) <= max_tokens: + return prompt + else: + return encoding.decode(tokens[-max_tokens:]) + + def prune_chat_history(model: str, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): total_tokens = tokens_for_completion + \ sum(count_tokens(model, message.content) @@ -43,13 +53,13 @@ def prune_chat_history(model: str, chat_history: List[ChatMessage], max_tokens: i += 1 # 2. Remove entire messages until the last 5 - while len(chat_history) > 5 and total_tokens > max_tokens: + while len(chat_history) > 5 and total_tokens > max_tokens and len(chat_history) > 0: message = chat_history.pop(0) total_tokens -= count_tokens(model, message.content) # 3. Truncate message in the last 5 i = 0 - while total_tokens > max_tokens: + while total_tokens > max_tokens and len(chat_history) > 0: message = chat_history[0] total_tokens -= count_tokens(model, message.content) total_tokens += count_tokens(model, message.summary) diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index f146c94a..24f00d36 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -211,26 +211,16 @@ class DefaultModelEditCodeStep(Step): return cur_start_line, cur_end_line - if model_to_use.name == "gpt-4": - - total_tokens = model_to_use.count_tokens( - full_file_contents + self._prompt) - cur_start_line, cur_end_line = cut_context( - model_to_use, total_tokens, cur_start_line, cur_end_line) - - elif model_to_use.name == "gpt-3.5-turbo" or model_to_use.name == "gpt-3.5-turbo-16k": - + model_to_use = sdk.models.default + if model_to_use.name == "gpt-3.5-turbo": if sdk.models.gpt35.count_tokens(full_file_contents) > MAX_TOKENS_FOR_MODEL["gpt-3.5-turbo"]: - model_to_use = sdk.models.gpt3516k - total_tokens = model_to_use.count_tokens( - full_file_contents + self._prompt) - cur_start_line, cur_end_line = cut_context( - model_to_use, total_tokens, cur_start_line, cur_end_line) - else: + total_tokens = model_to_use.count_tokens( + full_file_contents + self._prompt + self.user_input) - raise Exception("Unknown default model") + cur_start_line, cur_end_line = cut_context( + model_to_use, total_tokens, cur_start_line, cur_end_line) code_before = "\n".join( full_file_contents_lst[cur_start_line:max_start_line]) diff --git a/extension/package-lock.json b/extension/package-lock.json index 71edfe8c..c2f1cf18 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.62", + "version": "0.0.63", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.62", + "version": "0.0.63", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 9679b159..ba5fd5b0 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "Accelerating software development with language models", - "version": "0.0.62", + "version": "0.0.63", "publisher": "Continue", "engines": { "vscode": "^1.74.0" diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index e50a7686..2632e572 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -15,6 +15,7 @@ const DialogContainer = styled.div` top: 50%; left: 50%; transform: translate(-50%, -50%); + width: 75%; `; const Dialog = styled.div` @@ -76,7 +77,6 @@ const TextDialog = (props: {

Thanks for your feedback. We'll get back to you soon!