From d181b1da69a43b4ee92a5822790716baa7023654 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Thu, 13 Jul 2023 14:50:10 -0700 Subject: diff editor infer filepath, codelens in middle --- continuedev/src/continuedev/steps/core/core.py | 98 ++++++++++++++------------ 1 file changed, 52 insertions(+), 46 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 5ea95104..787da316 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -486,58 +486,64 @@ Please output the code to be inserted at the cursor in order to fulfill the user completion_lines_covered = 0 repeating_file_suffix = False line_below_highlighted_range = file_suffix.lstrip().split("\n")[0] - async for chunk in model_to_use.stream_chat(messages, temperature=0, max_tokens=max_tokens): - # Stop early if it is repeating the file_suffix or the step was deleted - if repeating_file_suffix: - break - if sdk.current_step_was_deleted(): - return - # Accumulate lines - if "content" not in chunk: - continue - chunk = chunk["content"] - chunk_lines = chunk.split("\n") - chunk_lines[0] = unfinished_line + chunk_lines[0] - if chunk.endswith("\n"): - unfinished_line = "" - chunk_lines.pop() # because this will be an empty string - else: - unfinished_line = chunk_lines.pop() - - # Deal with newly accumulated lines - for i in range(len(chunk_lines)): - # Trailing whitespace doesn't matter - chunk_lines[i] = chunk_lines[i].rstrip() - chunk_lines[i] = common_whitespace + chunk_lines[i] - - # Lines that should signify the end of generation - if self.is_end_line(chunk_lines[i]): - break - # Lines that should be ignored, like the <> tags - elif self.line_to_be_ignored(chunk_lines[i], completion_lines_covered == 0): - continue - # Check if we are currently just copying the prefix - elif (lines_of_prefix_copied > 0 or completion_lines_covered == 0) and lines_of_prefix_copied < len(file_prefix.splitlines()) and chunk_lines[i] == full_file_contents_lines[lines_of_prefix_copied]: - # This is a sketchy way of stopping it from repeating the file_prefix. Is a bug if output happens to have a matching line - lines_of_prefix_copied += 1 - continue - # Because really short lines might be expected to be repeated, this is only a !heuristic! - # Stop when it starts copying the file_suffix - elif chunk_lines[i].strip() == line_below_highlighted_range.strip() and len(chunk_lines[i].strip()) > 4 and not (len(original_lines_below_previous_blocks) > 0 and chunk_lines[i].strip() == original_lines_below_previous_blocks[0].strip()): - repeating_file_suffix = True + generator = model_to_use.stream_chat( + messages, temperature=0, max_tokens=max_tokens) + + try: + async for chunk in generator: + # Stop early if it is repeating the file_suffix or the step was deleted + if repeating_file_suffix: break + if sdk.current_step_was_deleted(): + return - # If none of the above, insert the line! - if False: - await handle_generated_line(chunk_lines[i]) + # Accumulate lines + if "content" not in chunk: + continue + chunk = chunk["content"] + chunk_lines = chunk.split("\n") + chunk_lines[0] = unfinished_line + chunk_lines[0] + if chunk.endswith("\n"): + unfinished_line = "" + chunk_lines.pop() # because this will be an empty string + else: + unfinished_line = chunk_lines.pop() + + # Deal with newly accumulated lines + for i in range(len(chunk_lines)): + # Trailing whitespace doesn't matter + chunk_lines[i] = chunk_lines[i].rstrip() + chunk_lines[i] = common_whitespace + chunk_lines[i] + + # Lines that should signify the end of generation + if self.is_end_line(chunk_lines[i]): + break + # Lines that should be ignored, like the <> tags + elif self.line_to_be_ignored(chunk_lines[i], completion_lines_covered == 0): + continue + # Check if we are currently just copying the prefix + elif (lines_of_prefix_copied > 0 or completion_lines_covered == 0) and lines_of_prefix_copied < len(file_prefix.splitlines()) and chunk_lines[i] == full_file_contents_lines[lines_of_prefix_copied]: + # This is a sketchy way of stopping it from repeating the file_prefix. Is a bug if output happens to have a matching line + lines_of_prefix_copied += 1 + continue + # Because really short lines might be expected to be repeated, this is only a !heuristic! + # Stop when it starts copying the file_suffix + elif chunk_lines[i].strip() == line_below_highlighted_range.strip() and len(chunk_lines[i].strip()) > 4 and not (len(original_lines_below_previous_blocks) > 0 and chunk_lines[i].strip() == original_lines_below_previous_blocks[0].strip()): + repeating_file_suffix = True + break - lines.append(chunk_lines[i]) - completion_lines_covered += 1 - current_line_in_file += 1 + # If none of the above, insert the line! + if False: + await handle_generated_line(chunk_lines[i]) - await sendDiffUpdate(lines + [common_whitespace if unfinished_line.startswith("<") else (common_whitespace + unfinished_line)], sdk) + lines.append(chunk_lines[i]) + completion_lines_covered += 1 + current_line_in_file += 1 + await sendDiffUpdate(lines + [common_whitespace if unfinished_line.startswith("<") else (common_whitespace + unfinished_line)], sdk) + finally: + await generator.aclose() # Add the unfinished line if unfinished_line != "" and not self.line_to_be_ignored(unfinished_line, completion_lines_covered == 0) and not self.is_end_line(unfinished_line): unfinished_line = common_whitespace + unfinished_line -- cgit v1.2.3-70-g09d2 From 83fe8f06b1be0cc57351319757bdaa3ad405d7fc Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Thu, 13 Jul 2023 15:36:09 -0700 Subject: adding /help command --- continuedev/src/continuedev/core/config.py | 5 ++ continuedev/src/continuedev/core/policy.py | 5 +- .../continuedev/libs/util/step_name_to_steps.py | 4 +- continuedev/src/continuedev/steps/help.py | 57 ++++++++++++++++++++++ 4 files changed, 66 insertions(+), 5 deletions(-) create mode 100644 continuedev/src/continuedev/steps/help.py (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index f6167638..6e430c04 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -44,6 +44,11 @@ DEFAULT_SLASH_COMMANDS = [ description="Open the config file to create new and edit existing slash commands", step_name="OpenConfigStep", ), + SlashCommand( + name="help", + description="Ask a question like '/help what is given to the llm as context?'", + step_name="HelpStep", + ), SlashCommand( name="comment", description="Write comments for the current file or highlighted code", diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py index b8363df2..59ea78b1 100644 --- a/continuedev/src/continuedev/core/policy.py +++ b/continuedev/src/continuedev/core/policy.py @@ -60,10 +60,7 @@ class DemoPolicy(Policy): MessageStep(name="Welcome to Continue", message=dedent("""\ - Highlight code and ask a question or give instructions - Use `cmd+k` (Mac) / `ctrl+k` (Windows) to open Continue - - Use `cmd+shift+e` / `ctrl+shift+e` to open file Explorer - - Add your own OpenAI API key to VS Code Settings with `cmd+,` - - Use slash commands when you want fine-grained control - - Past steps are included as part of the context by default""")) >> + - Use `/help` to ask questions about how to use Continue""")) >> WelcomeStep() >> # SetupContinueWorkspaceStep() >> # CreateCodebaseIndexChroma() >> diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/continuedev/src/continuedev/libs/util/step_name_to_steps.py index d329e110..49056c81 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/continuedev/src/continuedev/libs/util/step_name_to_steps.py @@ -13,6 +13,7 @@ from ...recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowRec from ...steps.on_traceback import DefaultOnTracebackStep from ...steps.clear_history import ClearHistoryStep from ...steps.open_config import OpenConfigStep +from ...steps.help import HelpStep # This mapping is used to convert from string in ContinueConfig json to corresponding Step class. # Used for example in slash_commands and steps_on_startup @@ -28,7 +29,8 @@ step_name_to_step_class = { "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe, "DefaultOnTracebackStep": DefaultOnTracebackStep, "ClearHistoryStep": ClearHistoryStep, - "OpenConfigStep": OpenConfigStep + "OpenConfigStep": OpenConfigStep, + "HelpStep": HelpStep, } diff --git a/continuedev/src/continuedev/steps/help.py b/continuedev/src/continuedev/steps/help.py new file mode 100644 index 00000000..fdfb986f --- /dev/null +++ b/continuedev/src/continuedev/steps/help.py @@ -0,0 +1,57 @@ +from textwrap import dedent +from ..core.main import ChatMessage, Step +from ..core.sdk import ContinueSDK +from ..libs.util.telemetry import capture_event + +help = dedent("""\ + Continue is an open-source coding autopilot. It is a VS Code extension that brings the power of ChatGPT to your IDE. + + It gathers context for you and stores your interactions automatically, so that you can avoid copy/paste now and benefit from a customized LLM later. + + Continue can be used to... + 1. Edit chunks of code with specific instructions (e.g. "/edit migrate this digital ocean terraform file into one that works for GCP") + 2. Get answers to questions without switching windows (e.g. "how do I find running process on port 8000?") + 3. Generate files from scratch (e.g. "/edit Create a Python CLI tool that uses the posthog api to get events from DAUs") + + You tell Continue to edit a specific section of code by highlighting it. If you highlight multiple code sections, then it will only edit the one with the purple glow around it. You can switch which one has the purple glow by clicking the paint brush. + + If you don't highlight any code, then Continue will insert at the location of your cursor. + + Continue passes all of the sections of code you highlight, the code above and below the to-be edited highlighted code section, and all previous steps above input box as context to the LLM. + + You can use cmd+k (Mac) / ctrl+k (Windows) to open Continue. You can use cmd+shift+e / ctrl+shift+e to open file Explorer. You can add your own OpenAI API key to VS Code Settings with `cmd+,` + + If Continue is stuck loading, try using `cmd+shift+p` to open the command palette, search "Reload Window", and then select it. This will reload VS Code and Continue and often fixes issues. + + If you have feedback, please use /feedback to let us know how you would like to use Continue. We are excited to hear from you!""") + +class HelpStep(Step): + + name: str = "Help" + user_input: str + manage_own_chat_context: bool = True + description: str = "" + + async def run(self, sdk: ContinueSDK): + + question = self.user_input + + prompt = dedent(f"""Please us the information below to provide a succinct answer to the following quesiton: {question} + + Information: + + {help}""") + + self.chat_context.append(ChatMessage( + role="user", + content=prompt, + summary="Help" + )) + messages = await sdk.get_chat_context() + generator = sdk.models.gpt4.stream_chat(messages) + async for chunk in generator: + if "content" in chunk: + self.description += chunk["content"] + await sdk.update_ui() + + capture_event(sdk.ide.unique_id, "help", {"question": question, "answer": self.description}) \ No newline at end of file -- cgit v1.2.3-70-g09d2 From c22670ba2122412eca3f2de1f270b5f113db7e25 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 14 Jul 2023 02:30:45 -0700 Subject: fixed unique_id being asyncio.run property --- continuedev/src/continuedev/server/ide.py | 22 +++++++++++----------- continuedev/src/continuedev/server/ide_protocol.py | 5 +---- continuedev/src/continuedev/steps/help.py | 8 +++++--- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/react-app/src/components/ComboBox.tsx | 17 ++--------------- extension/react-app/src/components/Onboarding.tsx | 1 + 7 files changed, 23 insertions(+), 36 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 12a21f19..73cce201 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -123,10 +123,12 @@ class IdeProtocolServer(AbstractIdeProtocolServer): self.websocket = websocket self.session_manager = session_manager - workspace_directory: str + workspace_directory: str = None + unique_id: str = None async def initialize(self) -> List[str]: await self._send_json("workspaceDirectory", {}) + await self._send_json("uniqueId", {}) other_msgs = [] while True: msg_string = await self.websocket.receive_text() @@ -137,9 +139,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer): data = message["data"] if message_type == "workspaceDirectory": self.workspace_directory = data["workspaceDirectory"] - break + elif message_type == "uniqueId": + self.unique_id = data["uniqueId"] else: other_msgs.append(msg_string) + + if self.workspace_directory is not None and self.unique_id is not None: + break return other_msgs async def _send_json(self, message_type: str, data: Any): @@ -183,10 +189,12 @@ class IdeProtocolServer(AbstractIdeProtocolServer): self.onMainUserInput(data["input"]) elif message_type == "deleteAtIndex": self.onDeleteAtIndex(data["index"]) - elif message_type in ["highlightedCode", "openFiles", "visibleFiles", "readFile", "editFile", "getUserSecret", "runCommand", "uniqueId"]: + elif message_type in ["highlightedCode", "openFiles", "visibleFiles", "readFile", "editFile", "getUserSecret", "runCommand"]: self.sub_queue.post(message_type, data) elif message_type == "workspaceDirectory": self.workspace_directory = data["workspaceDirectory"] + elif message_type == "uniqueId": + self.unique_id = data["uniqueId"] else: raise ValueError("Unknown message type", message_type) @@ -311,14 +319,6 @@ class IdeProtocolServer(AbstractIdeProtocolServer): resp = await self._send_and_receive_json({}, VisibleFilesResponse, "visibleFiles") return resp.visibleFiles - async def get_unique_id(self) -> str: - resp = await self._send_and_receive_json({}, UniqueIdResponse, "uniqueId") - return resp.uniqueId - - @cached_property_no_none - def unique_id(self) -> str: - return asyncio.run(self.get_unique_id()) - async def getHighlightedCode(self) -> List[RangeInFile]: resp = await self._send_and_receive_json({}, HighlightedCodeResponse, "highlightedCode") return resp.highlightedCode diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py index 2f78cf0e..d0fb0bf8 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/continuedev/src/continuedev/server/ide_protocol.py @@ -108,7 +108,4 @@ class AbstractIdeProtocolServer(ABC): """Show a diff""" workspace_directory: str - - @abstractproperty - def unique_id(self) -> str: - """Get a unique ID for this IDE""" + unique_id: str diff --git a/continuedev/src/continuedev/steps/help.py b/continuedev/src/continuedev/steps/help.py index fdfb986f..2dc3647c 100644 --- a/continuedev/src/continuedev/steps/help.py +++ b/continuedev/src/continuedev/steps/help.py @@ -6,7 +6,7 @@ from ..libs.util.telemetry import capture_event help = dedent("""\ Continue is an open-source coding autopilot. It is a VS Code extension that brings the power of ChatGPT to your IDE. - It gathers context for you and stores your interactions automatically, so that you can avoid copy/paste now and benefit from a customized LLM later. + It gathers context for you and stores your interactions automatically, so that you can avoid copy/paste now and benefit from a customized Large Language Model (LLM) later. Continue can be used to... 1. Edit chunks of code with specific instructions (e.g. "/edit migrate this digital ocean terraform file into one that works for GCP") @@ -25,6 +25,7 @@ help = dedent("""\ If you have feedback, please use /feedback to let us know how you would like to use Continue. We are excited to hear from you!""") + class HelpStep(Step): name: str = "Help" @@ -41,7 +42,7 @@ class HelpStep(Step): Information: {help}""") - + self.chat_context.append(ChatMessage( role="user", content=prompt, @@ -54,4 +55,5 @@ class HelpStep(Step): self.description += chunk["content"] await sdk.update_ui() - capture_event(sdk.ide.unique_id, "help", {"question": question, "answer": self.description}) \ No newline at end of file + capture_event(sdk.ide.unique_id, "help", { + "question": question, "answer": self.description}) diff --git a/extension/package-lock.json b/extension/package-lock.json index 65fdab12..9d5c73e1 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.162", + "version": "0.0.163", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.162", + "version": "0.0.163", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index ef39582b..2b0f6b94 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.162", + "version": "0.0.163", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index 7d6541c7..73db33ca 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -1,29 +1,16 @@ -import React, { - useCallback, - useEffect, - useImperativeHandle, - useState, -} from "react"; +import React, { useEffect, useImperativeHandle, useState } from "react"; import { useCombobox } from "downshift"; import styled from "styled-components"; import { - buttonColor, defaultBorderRadius, lightGray, secondaryDark, vscBackground, } from "."; import CodeBlock from "./CodeBlock"; -import { RangeInFile } from "../../../src/client"; import PillButton from "./PillButton"; import HeaderButtonWithText from "./HeaderButtonWithText"; -import { - Trash, - LockClosed, - LockOpen, - Plus, - DocumentPlus, -} from "@styled-icons/heroicons-outline"; +import { DocumentPlus } from "@styled-icons/heroicons-outline"; import { HighlightedRangeContext } from "../../../schema/FullState"; // #region styled components diff --git a/extension/react-app/src/components/Onboarding.tsx b/extension/react-app/src/components/Onboarding.tsx index e2dd6f57..6bfb0ccd 100644 --- a/extension/react-app/src/components/Onboarding.tsx +++ b/extension/react-app/src/components/Onboarding.tsx @@ -22,6 +22,7 @@ const StyledSpan = styled.span` &:hover { background-color: #ffffff33; } + white-space: nowrap; `; const Onboarding = () => { -- cgit v1.2.3-70-g09d2 From 612c4115a0c74b52b77956867e0f47a84eca98a9 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 14 Jul 2023 03:24:46 -0700 Subject: warn of large highlighted ranges, cmd+k->m --- continuedev/src/continuedev/core/policy.py | 2 +- continuedev/src/continuedev/steps/help.py | 2 +- extension/package-lock.json | 4 +- extension/package.json | 6 +- extension/react-app/src/components/ComboBox.tsx | 5 + extension/react-app/src/components/Onboarding.tsx | 1 + extension/react-app/src/components/PillButton.tsx | 167 +++++++++++++--------- extension/react-app/src/pages/gui.tsx | 27 ++-- extension/src/commands.ts | 17 ++- extension/src/lang-server/codeLens.ts | 4 +- 10 files changed, 141 insertions(+), 94 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py index 59ea78b1..bc897357 100644 --- a/continuedev/src/continuedev/core/policy.py +++ b/continuedev/src/continuedev/core/policy.py @@ -59,7 +59,7 @@ class DemoPolicy(Policy): return ( MessageStep(name="Welcome to Continue", message=dedent("""\ - Highlight code and ask a question or give instructions - - Use `cmd+k` (Mac) / `ctrl+k` (Windows) to open Continue + - Use `cmd+m` (Mac) / `ctrl+m` (Windows) to open Continue - Use `/help` to ask questions about how to use Continue""")) >> WelcomeStep() >> # SetupContinueWorkspaceStep() >> diff --git a/continuedev/src/continuedev/steps/help.py b/continuedev/src/continuedev/steps/help.py index 2dc3647c..ba1e6087 100644 --- a/continuedev/src/continuedev/steps/help.py +++ b/continuedev/src/continuedev/steps/help.py @@ -19,7 +19,7 @@ help = dedent("""\ Continue passes all of the sections of code you highlight, the code above and below the to-be edited highlighted code section, and all previous steps above input box as context to the LLM. - You can use cmd+k (Mac) / ctrl+k (Windows) to open Continue. You can use cmd+shift+e / ctrl+shift+e to open file Explorer. You can add your own OpenAI API key to VS Code Settings with `cmd+,` + You can use cmd+m (Mac) / ctrl+m (Windows) to open Continue. You can use cmd+shift+e / ctrl+shift+e to open file Explorer. You can add your own OpenAI API key to VS Code Settings with `cmd+,` If Continue is stuck loading, try using `cmd+shift+p` to open the command palette, search "Reload Window", and then select it. This will reload VS Code and Continue and often fixes issues. diff --git a/extension/package-lock.json b/extension/package-lock.json index 9d5c73e1..a79dd6b4 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.163", + "version": "0.0.164", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.163", + "version": "0.0.164", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 2b0f6b94..de1f395d 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.163", + "version": "0.0.164", "publisher": "Continue", "engines": { "vscode": "^1.67.0" @@ -111,8 +111,8 @@ "keybindings": [ { "command": "continue.focusContinueInput", - "mac": "cmd+k", - "key": "ctrl+k" + "mac": "cmd+m", + "key": "ctrl+m" }, { "command": "continue.suggestionDown", diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index 73db33ca..bd0d59b5 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -228,6 +228,11 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { )} */} {highlightedCodeSections.map((section, idx) => ( 4000 && section.editing + ? "Editing such a large range may be slow" + : undefined + } editing={section.editing} pinned={section.pinned} index={idx} diff --git a/extension/react-app/src/components/Onboarding.tsx b/extension/react-app/src/components/Onboarding.tsx index 6bfb0ccd..231c1e93 100644 --- a/extension/react-app/src/components/Onboarding.tsx +++ b/extension/react-app/src/components/Onboarding.tsx @@ -109,6 +109,7 @@ const Onboarding = () => { paddingBottom: "50px", textAlign: "center", cursor: "pointer", + whiteSpace: "nowrap", }} > ` } `; +const CircleDiv = styled.div` + position: absolute; + top: -10px; + right: -10px; + width: 20px; + height: 20px; + border-radius: 50%; + background-color: red; + color: white; + display: flex; + align-items: center; + justify-content: center; + padding: 2px; +`; + interface PillButtonProps { onHover?: (arg0: boolean) => void; onDelete?: () => void; @@ -55,6 +68,7 @@ interface PillButtonProps { index: number; editing: boolean; pinned: boolean; + warning?: string; } const PillButton = (props: PillButtonProps) => { @@ -63,75 +77,96 @@ const PillButton = (props: PillButtonProps) => { return ( <> - + + {props.editing + ? "Editing this range (with rest of file as context)" + : "Edit this range"} + + Delete + {props.warning && ( + <> + + + + + {props.warning} - { - if (props.onDelete) { - props.onDelete(); - } - }} - > - - - + )} - {props.title} - - - {props.editing - ? "Editing this range (with rest of file as context)" - : "Edit this range"} - - Delete + ); }; diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index 4ff260fa..57cebac3 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -95,11 +95,8 @@ function GUI(props: GUIProps) { name: "Welcome to Continue", hide: false, description: `- Highlight code and ask a question or give instructions -- Use \`cmd+k\` (Mac) / \`ctrl+k\` (Windows) to open Continue -- Use \`cmd+shift+e\` / \`ctrl+shift+e\` to open file Explorer -- Add your own OpenAI API key to VS Code Settings with \`cmd+,\` -- Use slash commands when you want fine-grained control -- Past steps are included as part of the context by default`, + - Use \`cmd+m\` (Mac) / \`ctrl+m\` (Windows) to open Continue + - Use \`/help\` to ask questions about how to use Continue`, system_message: null, chat_context: [], manage_own_chat_context: false, @@ -269,15 +266,17 @@ function GUI(props: GUIProps) { return ( <> - { - client?.sendMainInput(`/feedback ${text}`); - setShowFeedbackDialog(false); - }} - onClose={() => { - setShowFeedbackDialog(false); - }} - message={feedbackDialogMessage} /> + { + client?.sendMainInput(`/feedback ${text}`); + setShowFeedbackDialog(false); + }} + onClose={() => { + setShowFeedbackDialog(false); + }} + message={feedbackDialogMessage} + /> any } = { "continue.suggestionDown": suggestionDownCommand, @@ -30,10 +32,15 @@ const commandsMap: { [command: string]: (...args: any) => any } = { "continue.acceptAllSuggestions": acceptAllSuggestionsCommand, "continue.rejectAllSuggestions": rejectAllSuggestionsCommand, "continue.focusContinueInput": async () => { - vscode.commands.executeCommand("continue.continueGUIView.focus"); - debugPanelWebview?.postMessage({ - type: "focusContinueInput", - }); + if (focusedOnContinueInput) { + vscode.commands.executeCommand("workbench.action.focusActiveEditorGroup"); + } else { + vscode.commands.executeCommand("continue.continueGUIView.focus"); + debugPanelWebview?.postMessage({ + type: "focusContinueInput", + }); + } + focusedOnContinueInput = !focusedOnContinueInput; }, "continue.quickTextEntry": async () => { const text = await vscode.window.showInputBox({ @@ -53,4 +60,4 @@ export function registerAllCommands(context: vscode.ExtensionContext) { vscode.commands.registerCommand(command, callback) ); } -} \ No newline at end of file +} diff --git a/extension/src/lang-server/codeLens.ts b/extension/src/lang-server/codeLens.ts index 5800a00e..1cfef5d5 100644 --- a/extension/src/lang-server/codeLens.ts +++ b/extension/src/lang-server/codeLens.ts @@ -60,12 +60,12 @@ class DiffViewerCodeLensProvider implements vscode.CodeLensProvider { } codeLenses.push( new vscode.CodeLens(range, { - title: "Accept ✅ (⌘⇧↩)", + title: "Accept All ✅ (⌘⇧↩)", command: "continue.acceptDiff", arguments: [document.uri.fsPath], }), new vscode.CodeLens(range, { - title: "Reject ❌ (⌘⇧⌫)", + title: "Reject All ❌ (⌘⇧⌫)", command: "continue.rejectDiff", arguments: [document.uri.fsPath], }) -- cgit v1.2.3-70-g09d2 From f0b2597895920b7d714b53f2d70a3a5858f89d42 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 14 Jul 2023 13:45:10 -0700 Subject: insidious client_state vs application_state err --- continuedev/src/continuedev/core/autopilot.py | 2 ++ continuedev/src/continuedev/server/gui.py | 2 +- continuedev/src/continuedev/server/ide.py | 2 +- continuedev/src/continuedev/steps/core/core.py | 9 ++++++++- extension/react-app/src/components/ComboBox.tsx | 9 ++++++++- extension/react-app/src/components/StepContainer.tsx | 9 ++++++--- extension/src/continueIdeClient.ts | 8 ++++++-- extension/src/diffs.ts | 2 +- extension/src/util/messenger.ts | 2 +- 9 files changed, 34 insertions(+), 11 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index e1c8a076..82439f49 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -37,6 +37,8 @@ def get_error_title(e: Exception) -> str: return "The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to \"\"" elif isinstance(e, openai_errors.InvalidRequestError): return 'Your API key does not have access to GPT-4. You can use ours for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to ""' + elif e.__str__().startswith("Cannot connect to host"): + return "The request failed. Please check your internet connection and try again." return e.__str__() or e.__repr__() diff --git a/continuedev/src/continuedev/server/gui.py b/continuedev/src/continuedev/server/gui.py index 238273b2..9a411fbe 100644 --- a/continuedev/src/continuedev/server/gui.py +++ b/continuedev/src/continuedev/server/gui.py @@ -53,7 +53,7 @@ class GUIProtocolServer(AbstractGUIProtocolServer): self.session = session async def _send_json(self, message_type: str, data: Any): - if self.websocket.client_state == WebSocketState.DISCONNECTED: + if self.websocket.application_state == WebSocketState.DISCONNECTED: return await self.websocket.send_json({ "messageType": message_type, diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 73cce201..7875c94d 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -149,7 +149,7 @@ class IdeProtocolServer(AbstractIdeProtocolServer): return other_msgs async def _send_json(self, message_type: str, data: Any): - if self.websocket.client_state == WebSocketState.DISCONNECTED: + if self.websocket.application_state == WebSocketState.DISCONNECTED: return await self.websocket.send_json({ "messageType": message_type, diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 787da316..75f8e460 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -9,7 +9,7 @@ from ...libs.llm.prompt_utils import MarkdownStyleEncoderDecoder from ...models.filesystem_edit import EditDiff, FileEdit, FileEditWithFullContents, FileSystemEdit from ...models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents from ...core.observation import Observation, TextObservation, TracebackObservation, UserInputObservation -from ...core.main import ChatMessage, Step, SequentialStep +from ...core.main import ChatMessage, ContinueCustomException, Step, SequentialStep from ...libs.util.count_tokens import MAX_TOKENS_FOR_MODEL, DEFAULT_MAX_TOKENS from ...libs.util.dedent import dedent_and_get_common_whitespace import difflib @@ -608,6 +608,13 @@ Please output the code to be inserted at the cursor in order to fulfill the user rif_dict[rif.filepath] = rif.contents for rif in rif_with_contents: + # If the file doesn't exist, ask them to save it first + if not os.path.exists(rif.filepath): + message = f"The file {rif.filepath} does not exist. Please save it first." + raise ContinueCustomException( + title=message, message=message + ) + await sdk.ide.setFileOpen(rif.filepath) await sdk.ide.setSuggestionsLocked(rif.filepath, True) await self.stream_rif(rif, sdk) diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index 5d9b5109..754c9445 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -169,6 +169,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { useImperativeHandle(ref, () => downshiftProps, [downshiftProps]); const [metaKeyPressed, setMetaKeyPressed] = useState(false); + const [focused, setFocused] = useState(false); useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { if (e.key === "Meta") { @@ -298,7 +299,11 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { // setShowContextDropdown(target.value.endsWith("@")); }, + onFocus: (e) => { + setFocused(true); + }, onBlur: (e) => { + setFocused(false); postVscMessage("blurContinueInput", {}); }, onKeyDown: (event) => { @@ -374,7 +379,9 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { {highlightedCodeSections.length === 0 && (downshiftProps.inputValue?.startsWith("/edit") || - (metaKeyPressed && downshiftProps.inputValue?.length > 0)) && ( + (focused && + metaKeyPressed && + downshiftProps.inputValue?.length > 0)) && (
Inserting at cursor
diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 6fa4ba13..14e9b854 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -253,9 +253,12 @@ function StepContainer(props: StepContainerProps) { )} {props.historyNode.observation?.error ? ( -
-              {props.historyNode.observation.error as string}
-            
+
+ View Traceback +
+                {props.historyNode.observation.error as string}
+              
+
) : ( void): void; abstract onClose(callback: () => void): void; - + abstract onError(callback: () => void): void; abstract sendAndReceive(messageType: string, data: any): Promise; -- cgit v1.2.3-70-g09d2 From 6b3d20c943c0c1417b437ad475019bae729103ed Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Fri, 14 Jul 2023 17:40:16 -0700 Subject: fixed config explanation, don't read terminals --- continuedev/src/continuedev/steps/open_config.py | 4 ++-- extension/src/continueIdeClient.ts | 26 ++++++++++++++---------- 2 files changed, 17 insertions(+), 13 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/steps/open_config.py b/continuedev/src/continuedev/steps/open_config.py index 87f03e9f..af55a95a 100644 --- a/continuedev/src/continuedev/steps/open_config.py +++ b/continuedev/src/continuedev/steps/open_config.py @@ -14,10 +14,10 @@ class OpenConfigStep(Step): "custom_commands": [ { "name": "test", - "description": "Write unit tests like I do for the highlighted code" + "description": "Write unit tests like I do for the highlighted code", "prompt": "Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated." } - ], + ] ``` `"name"` is the command you will type. `"description"` is the description displayed in the slash command menu. diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index 6dd117d3..2c96763d 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -15,6 +15,7 @@ import { FileEditWithFullContents } from "../schema/FileEditWithFullContents"; import fs = require("fs"); import { WebsocketMessenger } from "./util/messenger"; import { diffManager } from "./diffs"; +import path = require("path"); class IdeProtocolClient { private messenger: WebsocketMessenger | null = null; @@ -350,25 +351,28 @@ class IdeProtocolClient { // ------------------------------------ // // Respond to request + private editorIsTerminal(editor: vscode.TextEditor) { + return ( + !!path.basename(editor.document.uri.fsPath).match(/\d/) || + (editor.document.languageId === "plaintext" && + editor.document.getText() === "accessible-buffer-accessible-buffer-") + ); + } + getOpenFiles(): string[] { return vscode.window.visibleTextEditors - .filter((editor) => { - return !( - editor.document.uri.fsPath.endsWith("/1") || - (editor.document.languageId === "plaintext" && - editor.document.getText() === - "accessible-buffer-accessible-buffer-") - ); - }) + .filter((editor) => !this.editorIsTerminal(editor)) .map((editor) => { return editor.document.uri.fsPath; }); } getVisibleFiles(): string[] { - return vscode.window.visibleTextEditors.map((editor) => { - return editor.document.uri.fsPath; - }); + return vscode.window.visibleTextEditors + .filter((editor) => !this.editorIsTerminal(editor)) + .map((editor) => { + return editor.document.uri.fsPath; + }); } saveFile(filepath: string) { -- cgit v1.2.3-70-g09d2 From 925c3e0ef45d9eb01a8f6c1efd239fa011492bd2 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sat, 15 Jul 2023 14:30:11 -0700 Subject: ctrl shortcuts on windows, load models immediately --- continuedev/src/continuedev/core/autopilot.py | 10 ++-- continuedev/src/continuedev/core/sdk.py | 59 +++++++++++++++------- .../src/continuedev/libs/llm/hf_inference_api.py | 6 ++- continuedev/src/continuedev/server/ide.py | 4 +- .../src/continuedev/server/session_manager.py | 6 +-- .../react-app/src/components/StepContainer.tsx | 2 +- extension/react-app/src/components/TextDialog.tsx | 6 ++- extension/react-app/src/pages/gui.tsx | 6 +-- extension/react-app/src/util/index.ts | 30 +++++++++++ 9 files changed, 98 insertions(+), 31 deletions(-) create mode 100644 extension/react-app/src/util/index.ts (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 82439f49..0696c360 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -50,6 +50,8 @@ class Autopilot(ContinueBaseModel): full_state: Union[FullState, None] = None _on_update_callbacks: List[Callable[[FullState], None]] = [] + continue_sdk: ContinueSDK = None + _active: bool = False _should_halt: bool = False _main_user_input_queue: List[str] = [] @@ -57,9 +59,11 @@ class Autopilot(ContinueBaseModel): _user_input_queue = AsyncSubscriptionQueue() _retry_queue = AsyncSubscriptionQueue() - @cached_property - def continue_sdk(self) -> ContinueSDK: - return ContinueSDK(self) + @classmethod + async def create(cls, policy: Policy, ide: AbstractIdeProtocolServer, full_state: FullState) -> "Autopilot": + autopilot = cls(ide=ide, policy=policy) + autopilot.continue_sdk = await ContinueSDK.create(autopilot) + return autopilot class Config: arbitrary_types_allowed = True diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index aa2d8892..d73561d2 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -1,6 +1,6 @@ import asyncio from functools import cached_property -from typing import Coroutine, Union +from typing import Coroutine, Dict, Union import os from ..steps.core.core import DefaultModelEditCodeStep @@ -13,7 +13,7 @@ from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI from ..libs.llm.openai import OpenAI from .observation import Observation from ..server.ide_protocol import AbstractIdeProtocolServer -from .main import Context, ContinueCustomException, HighlightedRangeContext, History, Step, ChatMessage, ChatMessageRole +from .main import Context, ContinueCustomException, History, Step, ChatMessage from ..steps.core.core import * from ..libs.llm.proxy_server import ProxyServer @@ -22,26 +22,46 @@ class Autopilot: pass +ModelProvider = Literal["openai", "hf_inference_api", "ggml", "anthropic"] +MODEL_PROVIDER_TO_ENV_VAR = { + "openai": "OPENAI_API_KEY", + "hf_inference_api": "HUGGING_FACE_TOKEN", + "anthropic": "ANTHROPIC_API_KEY" +} + + class Models: - def __init__(self, sdk: "ContinueSDK"): + provider_keys: Dict[ModelProvider, str] = {} + model_providers: List[ModelProvider] + + def __init__(self, sdk: "ContinueSDK", model_providers: List[ModelProvider]): self.sdk = sdk + self.model_providers = model_providers + + @classmethod + async def create(cls, sdk: "ContinueSDK", with_providers: List[ModelProvider] = ["openai"]) -> "Models": + models = Models(sdk, with_providers) + for provider in with_providers: + if provider in MODEL_PROVIDER_TO_ENV_VAR: + env_var = MODEL_PROVIDER_TO_ENV_VAR[provider] + models.provider_keys[provider] = await sdk.get_user_secret( + env_var, f'Please add your {env_var} to the .env file') + + return models def __load_openai_model(self, model: str) -> OpenAI: - async def load_openai_model(): - api_key = await self.sdk.get_user_secret( - 'OPENAI_API_KEY', 'Enter your OpenAI API key or press enter to try for free') - if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, model) - return OpenAI(api_key=api_key, default_model=model) - return asyncio.get_event_loop().run_until_complete(load_openai_model()) + api_key = self.provider_keys["openai"] + if api_key == "": + return ProxyServer(self.sdk.ide.unique_id, model) + return OpenAI(api_key=api_key, default_model=model) + + def __load_hf_inference_api_model(self, model: str) -> HuggingFaceInferenceAPI: + api_key = self.provider_keys["hf_inference_api"] + return HuggingFaceInferenceAPI(api_key=api_key, model=model) @cached_property def starcoder(self): - async def load_starcoder(): - api_key = await self.sdk.get_user_secret( - 'HUGGING_FACE_TOKEN', 'Please add your Hugging Face token to the .env file') - return HuggingFaceInferenceAPI(api_key=api_key) - return asyncio.get_event_loop().run_until_complete(load_starcoder()) + return self.__load_hf_inference_api_model("bigcode/starcoder") @cached_property def gpt35(self): @@ -74,7 +94,7 @@ class Models: @property def default(self): default_model = self.sdk.config.default_model - return self.__model_from_name(default_model) if default_model is not None else self.gpt35 + return self.__model_from_name(default_model) if default_model is not None else self.gpt4 class ContinueSDK(AbstractContinueSDK): @@ -87,10 +107,15 @@ class ContinueSDK(AbstractContinueSDK): def __init__(self, autopilot: Autopilot): self.ide = autopilot.ide self.__autopilot = autopilot - self.models = Models(self) self.context = autopilot.context self.config = self._load_config() + @classmethod + async def create(cls, autopilot: Autopilot) -> "ContinueSDK": + sdk = ContinueSDK(autopilot) + sdk.models = await Models.create(sdk) + return sdk + config: ContinueConfig def _load_config(self) -> ContinueConfig: diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py index 1586c620..803ba122 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py @@ -9,7 +9,11 @@ DEFAULT_MAX_TIME = 120. class HuggingFaceInferenceAPI(LLM): api_key: str - model: str = "bigcode/starcoder" + model: str + + def __init__(self, api_key: str, model: str): + self.api_key = api_key + self.model = model def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): """Return the completion of the text with the given temperature.""" diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 7875c94d..77b13483 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -227,8 +227,8 @@ class IdeProtocolServer(AbstractIdeProtocolServer): }) async def getSessionId(self): - session_id = self.session_manager.new_session( - self, self.session_id).session_id + session_id = (await self.session_manager.new_session( + self, self.session_id)).session_id await self._send_json("getSessionId", { "sessionId": session_id }) diff --git a/continuedev/src/continuedev/server/session_manager.py b/continuedev/src/continuedev/server/session_manager.py index fb8ac386..6d109ca6 100644 --- a/continuedev/src/continuedev/server/session_manager.py +++ b/continuedev/src/continuedev/server/session_manager.py @@ -53,18 +53,18 @@ class SessionManager: session_files = os.listdir(sessions_folder) if f"{session_id}.json" in session_files and session_id in self.registered_ides: if self.registered_ides[session_id].session_id is not None: - return self.new_session(self.registered_ides[session_id], session_id=session_id) + return await self.new_session(self.registered_ides[session_id], session_id=session_id) raise KeyError("Session ID not recognized", session_id) return self.sessions[session_id] - def new_session(self, ide: AbstractIdeProtocolServer, session_id: Union[str, None] = None) -> Session: + async def new_session(self, ide: AbstractIdeProtocolServer, session_id: Union[str, None] = None) -> Session: full_state = None if session_id is not None and os.path.exists(getSessionFilePath(session_id)): with open(getSessionFilePath(session_id), "r") as f: full_state = FullState(**json.load(f)) - autopilot = DemoAutopilot( + autopilot = await DemoAutopilot.create( policy=DemoPolicy(), ide=ide, full_state=full_state) session_id = session_id or str(uuid4()) ide.session_id = session_id diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 14e9b854..7f23e333 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -181,7 +181,7 @@ function StepContainer(props: StepContainerProps) { } className="overflow-hidden cursor-pointer" onClick={(e) => { - if (e.metaKey) { + if (isMetaEquivalentKeyPressed(e)) { props.onToggleAll(); } else { props.onToggle(); diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx index ea5727f0..c724697d 100644 --- a/extension/react-app/src/components/TextDialog.tsx +++ b/extension/react-app/src/components/TextDialog.tsx @@ -81,7 +81,11 @@ const TextDialog = (props: { rows={10} ref={textAreaRef} onKeyDown={(e) => { - if (e.key === "Enter" && e.metaKey && textAreaRef.current) { + if ( + e.key === "Enter" && + isMetaEquivalentKeyPressed(e) && + textAreaRef.current + ) { props.onEnter(textAreaRef.current.value); setText(""); } else if (e.key === "Escape") { diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx index 57cebac3..cb0404ab 100644 --- a/extension/react-app/src/pages/gui.tsx +++ b/extension/react-app/src/pages/gui.tsx @@ -137,12 +137,12 @@ function GUI(props: GUIProps) { useEffect(() => { const listener = (e: any) => { // Cmd + i to toggle fast model - if (e.key === "i" && e.metaKey && e.shiftKey) { + if (e.key === "i" && isMetaEquivalentKeyPressed(e) && e.shiftKey) { setUsingFastModel((prev) => !prev); // Cmd + backspace to stop currently running step } else if ( e.key === "Backspace" && - e.metaKey && + isMetaEquivalentKeyPressed(e) && typeof history?.current_index !== "undefined" && history.timeline[history.current_index]?.active ) { @@ -220,7 +220,7 @@ function GUI(props: GUIProps) { if (mainTextInputRef.current) { let input = (mainTextInputRef.current as any).inputValue; // cmd+enter to /edit - if (event?.metaKey) { + if (isMetaEquivalentKeyPressed(event)) { input = `/edit ${input}`; } (mainTextInputRef.current as any).setInputValue(""); diff --git a/extension/react-app/src/util/index.ts b/extension/react-app/src/util/index.ts new file mode 100644 index 00000000..ad711321 --- /dev/null +++ b/extension/react-app/src/util/index.ts @@ -0,0 +1,30 @@ +type Platform = "mac" | "linux" | "windows" | "unknown"; + +function getPlatform(): Platform { + const platform = window.navigator.platform.toUpperCase(); + if (platform.indexOf("MAC") >= 0) { + return "mac"; + } else if (platform.indexOf("LINUX") >= 0) { + return "linux"; + } else if (platform.indexOf("WIN") >= 0) { + return "windows"; + } else { + return "unknown"; + } +} + +function isMetaEquivalentKeyPressed(event: { + metaKey: boolean; + ctrlKey: boolean; +}): boolean { + const platform = getPlatform(); + switch (platform) { + case "mac": + return event.metaKey; + case "linux": + case "windows": + return event.ctrlKey; + default: + return event.metaKey; + } +} -- cgit v1.2.3-70-g09d2 From 176087be502ce6663c3b3128352a69f8c7409666 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sat, 15 Jul 2023 18:47:28 -0700 Subject: 5s timeout on websocket connections --- continuedev/src/continuedev/server/gui.py | 9 +++++++-- continuedev/src/continuedev/server/ide.py | 8 ++++++-- extension/package-lock.json | 4 ++-- extension/package.json | 2 +- extension/src/activation/environmentSetup.ts | 5 ++--- 5 files changed, 18 insertions(+), 10 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/server/gui.py b/continuedev/src/continuedev/server/gui.py index 9a411fbe..4201353e 100644 --- a/continuedev/src/continuedev/server/gui.py +++ b/continuedev/src/continuedev/server/gui.py @@ -1,3 +1,4 @@ +import asyncio import json from fastapi import Depends, Header, WebSocket, APIRouter from starlette.websockets import WebSocketState, WebSocketDisconnect @@ -60,8 +61,12 @@ class GUIProtocolServer(AbstractGUIProtocolServer): "data": data }) - async def _receive_json(self, message_type: str) -> Any: - return await self.sub_queue.get(message_type) + async def _receive_json(self, message_type: str, timeout: int = 5) -> Any: + try: + return await asyncio.wait_for(self.sub_queue.get(message_type), timeout=timeout) + except asyncio.TimeoutError: + raise Exception( + "GUI Protocol _receive_json timed out after 5 seconds") async def _send_and_receive_json(self, data: Any, resp_model: Type[T], message_type: str) -> T: await self._send_json(message_type, data) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index 77b13483..e5e8de02 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -156,8 +156,12 @@ class IdeProtocolServer(AbstractIdeProtocolServer): "data": data }) - async def _receive_json(self, message_type: str) -> Any: - return await self.sub_queue.get(message_type) + async def _receive_json(self, message_type: str, timeout: int = 5) -> Any: + try: + return await asyncio.wait_for(self.sub_queue.get(message_type), timeout=timeout) + except asyncio.TimeoutError: + raise Exception( + "IDE Protocol _receive_json timed out after 5 seconds") async def _send_and_receive_json(self, data: Any, resp_model: Type[T], message_type: str) -> T: await self._send_json(message_type, data) diff --git a/extension/package-lock.json b/extension/package-lock.json index f793abae..b86cb10e 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.171", + "version": "0.0.172", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.171", + "version": "0.0.172", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 38dc4542..6b719723 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.171", + "version": "0.0.172", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts index be1c220c..7a0d24d4 100644 --- a/extension/src/activation/environmentSetup.ts +++ b/extension/src/activation/environmentSetup.ts @@ -28,8 +28,6 @@ async function retryThenFail( ); console.log("Execution policy stdout: ", stdout); console.log("Execution policy stderr: ", stderr); - // Then reload the window for this to take effect - await vscode.commands.executeCommand("workbench.action.reloadWindow"); } } @@ -447,7 +445,8 @@ export async function startContinuePythonServer() { console.log(`stdout: ${data}`); if ( data.includes("Uvicorn running on") || // Successfully started the server - data.includes("address already in use") // The server is already running (probably a simultaneously opened VS Code window) + data.includes("only one usage of each socket address") || // [windows] The server is already running (probably a simultaneously opened VS Code window) + data.includes("address already in use") // [mac/linux] The server is already running (probably a simultaneously opened VS Code window) ) { console.log("Successfully started Continue python server"); resolve(null); -- cgit v1.2.3-70-g09d2 From 8e96e0ee4a1c251d20577769bfcb76dbc7b043a2 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sat, 15 Jul 2023 21:55:47 -0700 Subject: fixed reading of terminal and other vscode windows --- continuedev/src/continuedev/server/ide.py | 34 ++++++--------- extension/package-lock.json | 4 +- extension/package.json | 2 +- extension/src/activation/environmentSetup.ts | 54 ++++++++++++------------ extension/src/continueIdeClient.ts | 63 ++++++++++++++++------------ 5 files changed, 79 insertions(+), 78 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index e5e8de02..a8868a9a 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -126,7 +126,8 @@ class IdeProtocolServer(AbstractIdeProtocolServer): workspace_directory: str = None unique_id: str = None - async def initialize(self) -> List[str]: + async def initialize(self, session_id: str) -> List[str]: + self.session_id = session_id await self._send_json("workspaceDirectory", {}) await self._send_json("uniqueId", {}) other_msgs = [] @@ -287,32 +288,24 @@ class IdeProtocolServer(AbstractIdeProtocolServer): pass def onFileEdits(self, edits: List[FileEditWithFullContents]): - # Send the file edits to ALL autopilots. - # Maybe not ideal behavior - for _, session in self.session_manager.sessions.items(): - session.autopilot.handle_manual_edits(edits) + session_manager.sessions[self.session_id].autopilot.handle_manual_edits( + edits) def onDeleteAtIndex(self, index: int): - for _, session in self.session_manager.sessions.items(): - create_async_task( - session.autopilot.delete_at_index(index), self.unique_id) + create_async_task( + session_manager.sessions[self.session_id].autopilot.delete_at_index(index), self.unique_id) def onCommandOutput(self, output: str): - # Send the output to ALL autopilots. - # Maybe not ideal behavior - for _, session in self.session_manager.sessions.items(): - create_async_task( - session.autopilot.handle_command_output(output), self.unique_id) + create_async_task( + self.session_manager.sessions[self.session_id].autopilot.handle_command_output(output), self.unique_id) def onHighlightedCodeUpdate(self, range_in_files: List[RangeInFileWithContents]): - for _, session in self.session_manager.sessions.items(): - create_async_task( - session.autopilot.handle_highlighted_code(range_in_files), self.unique_id) + create_async_task( + self.session_manager.sessions[self.session_id].autopilot.handle_highlighted_code(range_in_files), self.unique_id) def onMainUserInput(self, input: str): - for _, session in self.session_manager.sessions.items(): - create_async_task( - session.autopilot.accept_user_input(input), self.unique_id) + create_async_task( + self.session_manager.sessions[self.session_id].autopilot.accept_user_input(input), self.unique_id) # Request information. Session doesn't matter. async def getOpenFiles(self) -> List[str]: @@ -440,10 +433,9 @@ async def websocket_endpoint(websocket: WebSocket, session_id: str = None): ideProtocolServer.handle_json(message_type, data)) ideProtocolServer = IdeProtocolServer(session_manager, websocket) - ideProtocolServer.session_id = session_id if session_id is not None: session_manager.registered_ides[session_id] = ideProtocolServer - other_msgs = await ideProtocolServer.initialize() + other_msgs = await ideProtocolServer.initialize(session_id) for other_msg in other_msgs: handle_msg(other_msg) diff --git a/extension/package-lock.json b/extension/package-lock.json index f1423041..6f777c72 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.173", + "version": "0.0.174", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.173", + "version": "0.0.174", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 0638e768..9fe38f7f 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "The open-source coding autopilot", - "version": "0.0.173", + "version": "0.0.174", "publisher": "Continue", "engines": { "vscode": "^1.67.0" diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts index 928fe04b..df609a34 100644 --- a/extension/src/activation/environmentSetup.ts +++ b/extension/src/activation/environmentSetup.ts @@ -260,34 +260,34 @@ async function createPythonVenv(pythonCmd: string) { await vscode.window.showErrorMessage(msg); } else if (checkEnvExists()) { console.log("Successfully set up python env at ", `${serverPath()}/env`); + } else if ( + stderr?.includes("Permission denied") && + stderr?.includes("python.exe") + ) { + // This might mean that another window is currently using the python.exe file to install requirements + // So we want to wait and try again + let i = 0; + await new Promise((resolve, reject) => + setInterval(() => { + if (i > 5) { + reject("Timed out waiting for other window to create env..."); + } + if (checkEnvExists()) { + resolve(null); + } else { + console.log("Waiting for other window to create env..."); + } + i++; + }, 5000) + ); } else { - try { - // This might mean that another window is currently using the python.exe file to install requirements - // So we want to wait and try again - let i = 0; - await new Promise((resolve, reject) => - setInterval(() => { - if (i > 5) { - reject(); - } - if (checkEnvExists()) { - resolve(null); - } else { - console.log("Waiting for other window to create env..."); - } - i++; - }, 5000) - ); - } catch (e) { - const msg = [ - "Python environment not successfully created. Trying again. Here was the stdout + stderr: ", - `stdout: ${stdout}`, - `stderr: ${stderr}`, - `e: ${e}`, - ].join("\n\n"); - console.log(msg); - throw new Error(msg); - } + const msg = [ + "Python environment not successfully created. Trying again. Here was the stdout + stderr: ", + `stdout: ${stdout}`, + `stderr: ${stderr}`, + ].join("\n\n"); + console.log(msg); + throw new Error(msg); } } } diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index 2c96763d..fac0a227 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -104,8 +104,11 @@ class IdeProtocolClient { // } // }); - // Setup listeners for any file changes in open editors + // Setup listeners for any selection changes in open editors vscode.window.onDidChangeTextEditorSelection((event) => { + if (this.editorIsTerminal(event.textEditor)) { + return; + } if (this._highlightDebounce) { clearTimeout(this._highlightDebounce); } @@ -376,20 +379,24 @@ class IdeProtocolClient { } saveFile(filepath: string) { - vscode.window.visibleTextEditors.forEach((editor) => { - if (editor.document.uri.fsPath === filepath) { - editor.document.save(); - } - }); + vscode.window.visibleTextEditors + .filter((editor) => !this.editorIsTerminal(editor)) + .forEach((editor) => { + if (editor.document.uri.fsPath === filepath) { + editor.document.save(); + } + }); } readFile(filepath: string): string { let contents: string | undefined; - vscode.window.visibleTextEditors.forEach((editor) => { - if (editor.document.uri.fsPath === filepath) { - contents = editor.document.getText(); - } - }); + vscode.window.visibleTextEditors + .filter((editor) => !this.editorIsTerminal(editor)) + .forEach((editor) => { + if (editor.document.uri.fsPath === filepath) { + contents = editor.document.getText(); + } + }); if (typeof contents === "undefined") { if (fs.existsSync(filepath)) { contents = fs.readFileSync(filepath, "utf-8"); @@ -429,25 +436,27 @@ class IdeProtocolClient { getHighlightedCode(): RangeInFile[] { // TODO let rangeInFiles: RangeInFile[] = []; - vscode.window.visibleTextEditors.forEach((editor) => { - editor.selections.forEach((selection) => { - // if (!selection.isEmpty) { - rangeInFiles.push({ - filepath: editor.document.uri.fsPath, - range: { - start: { - line: selection.start.line, - character: selection.start.character, - }, - end: { - line: selection.end.line, - character: selection.end.character, + vscode.window.visibleTextEditors + .filter((editor) => !this.editorIsTerminal(editor)) + .forEach((editor) => { + editor.selections.forEach((selection) => { + // if (!selection.isEmpty) { + rangeInFiles.push({ + filepath: editor.document.uri.fsPath, + range: { + start: { + line: selection.start.line, + character: selection.start.character, + }, + end: { + line: selection.end.line, + character: selection.end.character, + }, }, - }, + }); + // } }); - // } }); - }); return rangeInFiles; } -- cgit v1.2.3-70-g09d2 From 28718535f4fdbd9414155eb7d701a766cccaf771 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 16 Jul 2023 00:21:22 -0700 Subject: fixes --- continuedev/src/continuedev/server/ide.py | 26 ++++++++++++++++---------- continuedev/src/continuedev/steps/core/core.py | 8 ++++++++ 2 files changed, 24 insertions(+), 10 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index a8868a9a..a91708ec 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -287,25 +287,31 @@ class IdeProtocolServer(AbstractIdeProtocolServer): def onOpenGUIRequest(self): pass + def __get_autopilot(self): + return self.session_manager.sessions[self.session_id].autopilot + def onFileEdits(self, edits: List[FileEditWithFullContents]): - session_manager.sessions[self.session_id].autopilot.handle_manual_edits( - edits) + if autopilot := self.__get_autopilot(): + autopilot.handle_manual_edits(edits) def onDeleteAtIndex(self, index: int): - create_async_task( - session_manager.sessions[self.session_id].autopilot.delete_at_index(index), self.unique_id) + if autopilot := self.__get_autopilot(): + create_async_task(autopilot.delete_at_index(index), self.unique_id) def onCommandOutput(self, output: str): - create_async_task( - self.session_manager.sessions[self.session_id].autopilot.handle_command_output(output), self.unique_id) + if autopilot := self.__get_autopilot(): + create_async_task( + autopilot.handle_command_output(output), self.unique_id) def onHighlightedCodeUpdate(self, range_in_files: List[RangeInFileWithContents]): - create_async_task( - self.session_manager.sessions[self.session_id].autopilot.handle_highlighted_code(range_in_files), self.unique_id) + if autopilot := self.__get_autopilot(): + create_async_task(autopilot.handle_highlighted_code( + range_in_files), self.unique_id) def onMainUserInput(self, input: str): - create_async_task( - self.session_manager.sessions[self.session_id].autopilot.accept_user_input(input), self.unique_id) + if autopilot := self.__get_autopilot(): + create_async_task( + autopilot.accept_user_input(input), self.unique_id) # Request information. Session doesn't matter. async def getOpenFiles(self) -> List[str]: diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 75f8e460..90d64287 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -474,6 +474,14 @@ Please output the code to be inserted at the cursor in order to fulfill the user current_block_lines.append(line) messages = await sdk.get_chat_context() + # Delete the last user and assistant messages + i = len(messages) - 1 + deleted = 0 + while i >= 0 and deleted < 2: + if messages[i].role == "user" or messages[i].role == "assistant": + messages.pop(i) + deleted += 1 + i -= 1 messages.append(ChatMessage( role="user", content=prompt, -- cgit v1.2.3-70-g09d2 From 062b0872797fb4734ed36ea3a14f653dc685a86a Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 16 Jul 2023 00:21:56 -0700 Subject: Anthropic support --- continuedev/poetry.lock | 186 +++++++++++++++++++++- continuedev/pyproject.toml | 3 +- continuedev/src/continuedev/core/config.py | 2 +- continuedev/src/continuedev/core/sdk.py | 16 +- continuedev/src/continuedev/libs/llm/anthropic.py | 81 ++++++++++ continuedev/src/continuedev/steps/chat.py | 2 +- 6 files changed, 284 insertions(+), 6 deletions(-) create mode 100644 continuedev/src/continuedev/libs/llm/anthropic.py (limited to 'continuedev/src') diff --git a/continuedev/poetry.lock b/continuedev/poetry.lock index a49a570f..e688e076 100644 --- a/continuedev/poetry.lock +++ b/continuedev/poetry.lock @@ -124,6 +124,26 @@ files = [ [package.dependencies] frozenlist = ">=1.1.0" +[[package]] +name = "anthropic" +version = "0.3.4" +description = "Client library for the anthropic API" +category = "main" +optional = false +python-versions = ">=3.7,<4.0" +files = [ + {file = "anthropic-0.3.4-py3-none-any.whl", hash = "sha256:7b0396f663b0e4eaaf485ae59a0be014cddfc0f0b8f4dad79bb35d8f28439097"}, + {file = "anthropic-0.3.4.tar.gz", hash = "sha256:36184840bd33184697666d4f1ec951d78ef5da22e87d936cd3c04b611d84e93c"}, +] + +[package.dependencies] +anyio = ">=3.5.0,<4" +distro = ">=1.7.0,<2" +httpx = ">=0.23.0,<1" +pydantic = ">=1.9.0,<2.0.0" +tokenizers = ">=0.13.0" +typing-extensions = ">=4.1.1,<5" + [[package]] name = "anyio" version = "3.6.2" @@ -374,6 +394,18 @@ files = [ [package.extras] dev = ["pytest (>=3.7)"] +[[package]] +name = "distro" +version = "1.8.0" +description = "Distro - an OS platform information API" +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"}, + {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"}, +] + [[package]] name = "fastapi" version = "0.95.1" @@ -588,6 +620,52 @@ files = [ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"}, ] +[[package]] +name = "httpcore" +version = "0.17.3" +description = "A minimal low-level HTTP client." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"}, + {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"}, +] + +[package.dependencies] +anyio = ">=3.0,<5.0" +certifi = "*" +h11 = ">=0.13,<0.15" +sniffio = ">=1.0.0,<2.0.0" + +[package.extras] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (>=1.0.0,<2.0.0)"] + +[[package]] +name = "httpx" +version = "0.24.1" +description = "The next generation HTTP client." +category = "main" +optional = false +python-versions = ">=3.7" +files = [ + {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"}, + {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"}, +] + +[package.dependencies] +certifi = "*" +httpcore = ">=0.15.0,<0.18.0" +idna = "*" +sniffio = "*" + +[package.extras] +brotli = ["brotli", "brotlicffi"] +cli = ["click (>=8.0.0,<9.0.0)", "pygments (>=2.0.0,<3.0.0)", "rich (>=10,<14)"] +http2 = ["h2 (>=3,<5)"] +socks = ["socksio (>=1.0.0,<2.0.0)"] + [[package]] name = "idna" version = "3.4" @@ -600,6 +678,25 @@ files = [ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] +[[package]] +name = "importlib-resources" +version = "6.0.0" +description = "Read resources from Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.0.0-py3-none-any.whl", hash = "sha256:d952faee11004c045f785bb5636e8f885bed30dc3c940d5d42798a2a4541c185"}, + {file = "importlib_resources-6.0.0.tar.gz", hash = "sha256:4cf94875a8368bd89531a756df9a9ebe1f150e0f885030b461237bc7f2d905f2"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + [[package]] name = "jsonref" version = "1.1.0" @@ -626,6 +723,8 @@ files = [ [package.dependencies] attrs = ">=17.4.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" [package.extras] @@ -1024,6 +1123,18 @@ sql-other = ["SQLAlchemy (>=1.4.16)"] test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.6.3)"] +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + [[package]] name = "posthog" version = "3.0.1" @@ -1532,6 +1643,61 @@ requests = ">=2.26.0" [package.extras] blobfile = ["blobfile (>=2)"] +[[package]] +name = "tokenizers" +version = "0.13.3" +description = "Fast and Customizable Tokenizers" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"}, + {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"}, + {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"}, + {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"}, + {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"}, + {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"}, + {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"}, + {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"}, + {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"}, + {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"}, + {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"}, + {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"}, + {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"}, + {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"}, + {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"}, + {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"}, + {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"}, + {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"}, + {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"}, + {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"}, + {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"}, +] + +[package.extras] +dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] +docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"] +testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"] + [[package]] name = "tqdm" version = "4.65.0" @@ -1818,7 +1984,23 @@ files = [ idna = ">=2.0" multidict = ">=4.0" +[[package]] +name = "zipp" +version = "3.16.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"}, + {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + [metadata] lock-version = "2.0" -python-versions = "^3.9" -content-hash = "3ba2a7278fda36a059d76e227be94b0cb5e2efc9396b47a9642b916680214d9f" +python-versions = "^3.8.1" +content-hash = "87dbf6d1e56ce6ba81a01a59c0de2d3717925bac9639710bf3ff3ce30f5f5e2c" diff --git a/continuedev/pyproject.toml b/continuedev/pyproject.toml index 6727e29a..08c3fd04 100644 --- a/continuedev/pyproject.toml +++ b/continuedev/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Nate Sesti "] readme = "README.md" [tool.poetry.dependencies] -python = "^3.8" +python = "^3.8.1" diff-match-patch = "^20230430" fastapi = "^0.95.1" typer = "^0.7.0" @@ -24,6 +24,7 @@ tiktoken = "^0.4.0" jsonref = "^1.1.0" jsonschema = "^4.17.3" directory-tree = "^0.0.3.1" +anthropic = "^0.3.4" [tool.poetry.scripts] typegen = "src.continuedev.models.generate_json_schema:main" diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 6e430c04..05ba48c6 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -76,7 +76,7 @@ class ContinueConfig(BaseModel): server_url: Optional[str] = None allow_anonymous_telemetry: Optional[bool] = True default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", - "gpt-4"] = 'gpt-4' + "gpt-4", "claude-2"] = 'gpt-4' custom_commands: Optional[List[CustomCommand]] = [CustomCommand( name="test", description="This is an example custom command. Use /config to edit it and create more", diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index d73561d2..28487600 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -11,6 +11,7 @@ from ..models.filesystem_edit import FileEdit, FileSystemEdit, AddFile, DeleteFi from ..models.filesystem import RangeInFile from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI from ..libs.llm.openai import OpenAI +from ..libs.llm.anthropic import Anthropic from .observation import Observation from ..server.ide_protocol import AbstractIdeProtocolServer from .main import Context, ContinueCustomException, History, Step, ChatMessage @@ -26,7 +27,7 @@ ModelProvider = Literal["openai", "hf_inference_api", "ggml", "anthropic"] MODEL_PROVIDER_TO_ENV_VAR = { "openai": "OPENAI_API_KEY", "hf_inference_api": "HUGGING_FACE_TOKEN", - "anthropic": "ANTHROPIC_API_KEY" + "anthropic": "ANTHROPIC_API_KEY", } @@ -40,6 +41,9 @@ class Models: @classmethod async def create(cls, sdk: "ContinueSDK", with_providers: List[ModelProvider] = ["openai"]) -> "Models": + if sdk.config.default_model == "claude-2": + with_providers.append("anthropic") + models = Models(sdk, with_providers) for provider in with_providers: if provider in MODEL_PROVIDER_TO_ENV_VAR: @@ -59,6 +63,14 @@ class Models: api_key = self.provider_keys["hf_inference_api"] return HuggingFaceInferenceAPI(api_key=api_key, model=model) + def __load_anthropic_model(self, model: str) -> Anthropic: + api_key = self.provider_keys["anthropic"] + return Anthropic(api_key=api_key, model=model) + + @cached_property + def claude2(self): + return self.__load_anthropic_model("claude-2") + @cached_property def starcoder(self): return self.__load_hf_inference_api_model("bigcode/starcoder") @@ -88,6 +100,8 @@ class Models: return self.gpt3516k elif model_name == "gpt-4": return self.gpt4 + elif model_name == "claude-2": + return self.claude2 else: raise Exception(f"Unknown model {model_name}") diff --git a/continuedev/src/continuedev/libs/llm/anthropic.py b/continuedev/src/continuedev/libs/llm/anthropic.py new file mode 100644 index 00000000..2b8831f0 --- /dev/null +++ b/continuedev/src/continuedev/libs/llm/anthropic.py @@ -0,0 +1,81 @@ + +from functools import cached_property +import time +from typing import Any, Coroutine, Dict, Generator, List, Union +from ...core.main import ChatMessage +from anthropic import Anthropic, HUMAN_PROMPT, AI_PROMPT +from ..llm import LLM +from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top + + +class AnthropicLLM(LLM): + api_key: str + default_model: str + anthropic: Anthropic + + def __init__(self, api_key: str, default_model: str, system_message: str = None): + self.api_key = api_key + self.default_model = default_model + self.system_message = system_message + + self.anthropic = Anthropic(api_key) + + @cached_property + def name(self): + return self.default_model + + @property + def default_args(self): + return {**DEFAULT_ARGS, "model": self.default_model} + + def count_tokens(self, text: str): + return count_tokens(self.default_model, text) + + def __messages_to_prompt(self, messages: List[ChatMessage]) -> str: + prompt = "" + for msg in messages: + prompt += f"{HUMAN_PROMPT if msg.role == 'user' else AI_PROMPT} {msg.content} " + + return prompt + + async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + args = self.default_args.copy() + args.update(kwargs) + args["stream"] = True + + async for chunk in await self.anthropic.completions.create( + model=args["model"], + max_tokens_to_sample=args["max_tokens"], + prompt=f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}", + **args + ): + yield chunk.completion + + async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + args = self.default_args.copy() + args.update(kwargs) + args["stream"] = True + + messages = compile_chat_messages( + args["model"], messages, args["max_tokens"], functions=args.get("functions", None)) + async for chunk in await self.anthropic.completions.create( + model=args["model"], + max_tokens_to_sample=args["max_tokens"], + prompt=self.__messages_to_prompt(messages), + **args + ): + yield chunk.completion + + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + args = {**self.default_args, **kwargs} + + messages = compile_chat_messages( + args["model"], with_history, args["max_tokens"], prompt, functions=None) + resp = (await self.anthropic.completions.create( + model=args["model"], + max_tokens_to_sample=args["max_tokens"], + prompt=self.__messages_to_prompt(messages), + **args + )).completion + + return resp diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 14a1cd41..3751dec2 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -28,7 +28,7 @@ class SimpleChatStep(Step): completion = "" messages = self.messages or await sdk.get_chat_context() - generator = sdk.models.gpt4.stream_chat(messages, temperature=0.5) + generator = sdk.models.default.stream_chat(messages, temperature=0.5) try: async for chunk in generator: if sdk.current_step_was_deleted(): -- cgit v1.2.3-70-g09d2 From 3ded151331933c9a1352cc46c3cc67c5733d1c86 Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 16 Jul 2023 16:16:41 -0700 Subject: ggml --- continuedev/src/continuedev/core/sdk.py | 6 ++ continuedev/src/continuedev/libs/llm/ggml.py | 99 ++++++++++++++++++++++ .../src/continuedev/libs/util/count_tokens.py | 7 +- continuedev/src/continuedev/steps/chat.py | 2 +- continuedev/src/continuedev/steps/core/core.py | 8 +- 5 files changed, 118 insertions(+), 4 deletions(-) create mode 100644 continuedev/src/continuedev/libs/llm/ggml.py (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 8649cd58..22393746 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -11,6 +11,7 @@ from ..models.filesystem_edit import FileEdit, FileSystemEdit, AddFile, DeleteFi from ..models.filesystem import RangeInFile from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI from ..libs.llm.openai import OpenAI +from ..libs.llm.ggml import GGML from .observation import Observation from ..server.ide_protocol import AbstractIdeProtocolServer from .main import Context, ContinueCustomException, HighlightedRangeContext, History, Step, ChatMessage, ChatMessageRole @@ -59,6 +60,10 @@ class Models: def gpt4(self): return self.__load_openai_model("gpt-4") + @cached_property + def ggml(self): + return GGML("", "ggml") + def __model_from_name(self, model_name: str): if model_name == "starcoder": return self.starcoder @@ -73,6 +78,7 @@ class Models: @property def default(self): + return self.ggml default_model = self.sdk.config.default_model return self.__model_from_name(default_model) if default_model is not None else self.gpt35 diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py new file mode 100644 index 00000000..bef0d993 --- /dev/null +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -0,0 +1,99 @@ +from functools import cached_property +import json +from typing import Any, Coroutine, Dict, Generator, List, Union + +import aiohttp +from ...core.main import ChatMessage +import openai +from ..llm import LLM +from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top +import certifi +import ssl + +ca_bundle_path = certifi.where() +ssl_context = ssl.create_default_context(cafile=ca_bundle_path) + +SERVER_URL = "http://localhost:8000" + + +class GGML(LLM): + api_key: str + default_model: str + + def __init__(self, api_key: str, default_model: str, system_message: str = None): + self.api_key = api_key + self.default_model = default_model + self.system_message = system_message + + openai.api_key = api_key + + @cached_property + def name(self): + return self.default_model + + @property + def default_args(self): + return {**DEFAULT_ARGS, "model": self.default_model} + + def count_tokens(self, text: str): + return count_tokens(self.default_model, text) + + async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + args = self.default_args.copy() + args.update(kwargs) + args["stream"] = True + + args = {**self.default_args, **kwargs} + messages = compile_chat_messages( + self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None)) + + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with session.post(f"{SERVER_URL}/v1/completions", json={ + "messages": messages, + **args + }) as resp: + async for line in resp.content.iter_any(): + if line: + try: + yield line.decode("utf-8") + except: + raise Exception(str(line)) + + async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + args = {**self.default_args, **kwargs} + messages = compile_chat_messages( + self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None)) + args["stream"] = True + + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with session.post(f"{SERVER_URL}/v1/chat/completions", json={ + "messages": messages, + **args + }) as resp: + # This is streaming application/json instaed of text/event-stream + async for line in resp.content.iter_chunks(): + if line[1]: + try: + json_chunk = line[0].decode("utf-8") + if json_chunk.startswith(": ping - ") or json_chunk.startswith("data: [DONE]"): + continue + json_chunk = "{}" if json_chunk == "" else json_chunk + chunks = json_chunk.split("\n") + for chunk in chunks: + if chunk.strip() != "": + yield json.loads(chunk[6:])["choices"][0]["delta"] + except: + raise Exception(str(line[0])) + + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + args = {**self.default_args, **kwargs} + + async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with session.post(f"{SERVER_URL}/v1/completions", json={ + "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None), + **args + }) as resp: + try: + return await resp.text() + except: + raise Exception(await resp.text()) diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index 73be0717..e1baeca1 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -3,13 +3,16 @@ from typing import Dict, List, Union from ...core.main import ChatMessage import tiktoken -aliases = {} +aliases = { + "ggml": "gpt-3.5-turbo", +} DEFAULT_MAX_TOKENS = 2048 MAX_TOKENS_FOR_MODEL = { "gpt-3.5-turbo": 4096, "gpt-3.5-turbo-0613": 4096, "gpt-3.5-turbo-16k": 16384, - "gpt-4": 8192 + "gpt-4": 8192, + "ggml": 2048 } CHAT_MODELS = { "gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-3.5-turbo-0613" diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index a10319d8..1df1e0bf 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -27,7 +27,7 @@ class SimpleChatStep(Step): async def run(self, sdk: ContinueSDK): completion = "" messages = self.messages or await sdk.get_chat_context() - async for chunk in sdk.models.gpt4.stream_chat(messages, temperature=0.5): + async for chunk in sdk.models.default.stream_chat(messages, temperature=0.5): if sdk.current_step_was_deleted(): return diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 4b35a758..0b067d7d 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -4,6 +4,7 @@ import subprocess from textwrap import dedent from typing import Coroutine, List, Literal, Union +from ...libs.llm.ggml import GGML from ...models.main import Range from ...libs.llm.prompt_utils import MarkdownStyleEncoderDecoder from ...models.filesystem_edit import EditDiff, FileEdit, FileEditWithFullContents, FileSystemEdit @@ -180,7 +181,7 @@ class DefaultModelEditCodeStep(Step): # We don't know here all of the functions being passed in. # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion. # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. - model_to_use = sdk.models.gpt4 + model_to_use = sdk.models.default max_tokens = DEFAULT_MAX_TOKENS TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200 @@ -442,6 +443,11 @@ class DefaultModelEditCodeStep(Step): completion_lines_covered = 0 repeating_file_suffix = False line_below_highlighted_range = file_suffix.lstrip().split("\n")[0] + + if isinstance(model_to_use, GGML): + messages = [ChatMessage( + role="user", content=f"```\n{rif.contents}\n```\n{self.user_input}\n```\n", summary=self.user_input)] + async for chunk in model_to_use.stream_chat(messages, temperature=0, max_tokens=max_tokens): # Stop early if it is repeating the file_suffix or the step was deleted if repeating_file_suffix: -- cgit v1.2.3-70-g09d2 From a3f4a2a59d6785499f3ce0c4af80b57b02de1b1f Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 16 Jul 2023 16:55:24 -0700 Subject: better prompt for editing --- continuedev/src/continuedev/core/config.py | 2 +- continuedev/src/continuedev/core/sdk.py | 5 ++-- continuedev/src/continuedev/libs/llm/ggml.py | 33 ++++++++------------------ continuedev/src/continuedev/steps/core/core.py | 5 ++-- 4 files changed, 17 insertions(+), 28 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 6e430c04..957609c5 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -76,7 +76,7 @@ class ContinueConfig(BaseModel): server_url: Optional[str] = None allow_anonymous_telemetry: Optional[bool] = True default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", - "gpt-4"] = 'gpt-4' + "gpt-4", "ggml"] = 'gpt-4' custom_commands: Optional[List[CustomCommand]] = [CustomCommand( name="test", description="This is an example custom command. Use /config to edit it and create more", diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 9389e1e9..eb60109c 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -82,7 +82,7 @@ class Models: @cached_property def ggml(self): - return GGML("", "ggml") + return GGML() def __model_from_name(self, model_name: str): if model_name == "starcoder": @@ -93,12 +93,13 @@ class Models: return self.gpt3516k elif model_name == "gpt-4": return self.gpt4 + elif model_name == "ggml": + return self.ggml else: raise Exception(f"Unknown model {model_name}") @property def default(self): - return self.ggml default_model = self.sdk.config.default_model return self.__model_from_name(default_model) if default_model is not None else self.gpt4 diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index bef0d993..d3589b70 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -4,39 +4,27 @@ from typing import Any, Coroutine, Dict, Generator, List, Union import aiohttp from ...core.main import ChatMessage -import openai from ..llm import LLM -from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top -import certifi -import ssl - -ca_bundle_path = certifi.where() -ssl_context = ssl.create_default_context(cafile=ca_bundle_path) +from ..util.count_tokens import compile_chat_messages, DEFAULT_ARGS, count_tokens SERVER_URL = "http://localhost:8000" class GGML(LLM): - api_key: str - default_model: str - def __init__(self, api_key: str, default_model: str, system_message: str = None): - self.api_key = api_key - self.default_model = default_model + def __init__(self, system_message: str = None): self.system_message = system_message - openai.api_key = api_key - @cached_property def name(self): - return self.default_model + return "ggml" @property def default_args(self): - return {**DEFAULT_ARGS, "model": self.default_model} + return {**DEFAULT_ARGS, "model": self.name, "max_tokens": 1024} def count_tokens(self, text: str): - return count_tokens(self.default_model, text) + return count_tokens(self.name, text) async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = self.default_args.copy() @@ -45,9 +33,9 @@ class GGML(LLM): args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None)) + self.name, with_history, args["max_tokens"], prompt, functions=args.get("functions", None)) - async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with aiohttp.ClientSession() as session: async with session.post(f"{SERVER_URL}/v1/completions", json={ "messages": messages, **args @@ -62,10 +50,10 @@ class GGML(LLM): async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None)) + self.name, messages, args["max_tokens"], None, functions=args.get("functions", None)) args["stream"] = True - async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with aiohttp.ClientSession() as session: async with session.post(f"{SERVER_URL}/v1/chat/completions", json={ "messages": messages, **args @@ -77,7 +65,6 @@ class GGML(LLM): json_chunk = line[0].decode("utf-8") if json_chunk.startswith(": ping - ") or json_chunk.startswith("data: [DONE]"): continue - json_chunk = "{}" if json_chunk == "" else json_chunk chunks = json_chunk.split("\n") for chunk in chunks: if chunk.strip() != "": @@ -88,7 +75,7 @@ class GGML(LLM): async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: args = {**self.default_args, **kwargs} - async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: + async with aiohttp.ClientSession() as session: async with session.post(f"{SERVER_URL}/v1/completions", json={ "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None), **args diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 2c9d8c01..d5a7cd9a 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -192,7 +192,8 @@ class DefaultModelEditCodeStep(Step): # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion. # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need. model_to_use = sdk.models.default - max_tokens = DEFAULT_MAX_TOKENS + max_tokens = MAX_TOKENS_FOR_MODEL.get( + model_to_use.name, DEFAULT_MAX_TOKENS) / 2 TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200 if model_to_use.count_tokens(rif.contents) > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE: @@ -498,7 +499,7 @@ Please output the code to be inserted at the cursor in order to fulfill the user if isinstance(model_to_use, GGML): messages = [ChatMessage( - role="user", content=f"```\n{rif.contents}\n```\n{self.user_input}\n```\n", summary=self.user_input)] + role="user", content=f"```\n{rif.contents}\n```\n\nUser request: \"{self.user_input}\"\n\nThis is the code after changing to perfectly comply with the user request. It does not include any placeholder code, only real implementations:\n\n```\n", summary=self.user_input)] generator = model_to_use.stream_chat( messages, temperature=0, max_tokens=max_tokens) -- cgit v1.2.3-70-g09d2 From 2c27204336a2ca112c3c84058544d3b8656ac70f Mon Sep 17 00:00:00 2001 From: Nate Sesti Date: Sun, 16 Jul 2023 22:12:44 -0700 Subject: templated system messages --- continuedev/poetry.lock | 65 +++++++++++++++++++++- continuedev/pyproject.toml | 3 +- continuedev/src/continuedev/core/config.py | 1 + continuedev/src/continuedev/core/sdk.py | 10 ++-- continuedev/src/continuedev/libs/llm/ggml.py | 6 +- .../src/continuedev/libs/llm/hf_inference_api.py | 3 +- continuedev/src/continuedev/libs/llm/openai.py | 6 +- .../src/continuedev/libs/llm/proxy_server.py | 6 +- .../src/continuedev/libs/util/count_tokens.py | 7 ++- .../src/continuedev/libs/util/templating.py | 39 +++++++++++++ 10 files changed, 127 insertions(+), 19 deletions(-) create mode 100644 continuedev/src/continuedev/libs/util/templating.py (limited to 'continuedev/src') diff --git a/continuedev/poetry.lock b/continuedev/poetry.lock index a49a570f..625aabc9 100644 --- a/continuedev/poetry.lock +++ b/continuedev/poetry.lock @@ -297,6 +297,18 @@ files = [ {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, ] +[[package]] +name = "chevron" +version = "0.14.0" +description = "Mustache templating language renderer" +category = "main" +optional = false +python-versions = "*" +files = [ + {file = "chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443"}, + {file = "chevron-0.14.0.tar.gz", hash = "sha256:87613aafdf6d77b6a90ff073165a61ae5086e21ad49057aa0e53681601800ebf"}, +] + [[package]] name = "click" version = "8.1.3" @@ -600,6 +612,25 @@ files = [ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, ] +[[package]] +name = "importlib-resources" +version = "6.0.0" +description = "Read resources from Python packages" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "importlib_resources-6.0.0-py3-none-any.whl", hash = "sha256:d952faee11004c045f785bb5636e8f885bed30dc3c940d5d42798a2a4541c185"}, + {file = "importlib_resources-6.0.0.tar.gz", hash = "sha256:4cf94875a8368bd89531a756df9a9ebe1f150e0f885030b461237bc7f2d905f2"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + [[package]] name = "jsonref" version = "1.1.0" @@ -626,6 +657,8 @@ files = [ [package.dependencies] attrs = ">=17.4.0" +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" [package.extras] @@ -1024,6 +1057,18 @@ sql-other = ["SQLAlchemy (>=1.4.16)"] test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] xml = ["lxml (>=4.6.3)"] +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +category = "main" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + [[package]] name = "posthog" version = "3.0.1" @@ -1818,7 +1863,23 @@ files = [ idna = ">=2.0" multidict = ">=4.0" +[[package]] +name = "zipp" +version = "3.16.2" +description = "Backport of pathlib-compatible object wrapper for zip files" +category = "main" +optional = false +python-versions = ">=3.8" +files = [ + {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"}, + {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"] + [metadata] lock-version = "2.0" -python-versions = "^3.9" -content-hash = "3ba2a7278fda36a059d76e227be94b0cb5e2efc9396b47a9642b916680214d9f" +python-versions = "^3.8.1" +content-hash = "82510deb9f4afb5bc38db0dfd88ad88005fa0b6221c24e8c1700c006360f3f88" diff --git a/continuedev/pyproject.toml b/continuedev/pyproject.toml index 6727e29a..3077de1c 100644 --- a/continuedev/pyproject.toml +++ b/continuedev/pyproject.toml @@ -6,7 +6,7 @@ authors = ["Nate Sesti "] readme = "README.md" [tool.poetry.dependencies] -python = "^3.8" +python = "^3.8.1" diff-match-patch = "^20230430" fastapi = "^0.95.1" typer = "^0.7.0" @@ -24,6 +24,7 @@ tiktoken = "^0.4.0" jsonref = "^1.1.0" jsonschema = "^4.17.3" directory-tree = "^0.0.3.1" +chevron = "^0.14.0" [tool.poetry.scripts] typegen = "src.continuedev.models.generate_json_schema:main" diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 957609c5..91a47c8e 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -85,6 +85,7 @@ class ContinueConfig(BaseModel): slash_commands: Optional[List[SlashCommand]] = DEFAULT_SLASH_COMMANDS on_traceback: Optional[List[OnTracebackSteps]] = [ OnTracebackSteps(step_name="DefaultOnTracebackStep")] + system_message: Optional[str] = None # Want to force these to be the slash commands for now @validator('slash_commands', pre=True) diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index eb60109c..ac57c122 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -34,10 +34,12 @@ MODEL_PROVIDER_TO_ENV_VAR = { class Models: provider_keys: Dict[ModelProvider, str] = {} model_providers: List[ModelProvider] + system_message: str def __init__(self, sdk: "ContinueSDK", model_providers: List[ModelProvider]): self.sdk = sdk self.model_providers = model_providers + self.system_message = sdk.config.system_message @classmethod async def create(cls, sdk: "ContinueSDK", with_providers: List[ModelProvider] = ["openai"]) -> "Models": @@ -53,12 +55,12 @@ class Models: def __load_openai_model(self, model: str) -> OpenAI: api_key = self.provider_keys["openai"] if api_key == "": - return ProxyServer(self.sdk.ide.unique_id, model) - return OpenAI(api_key=api_key, default_model=model) + return ProxyServer(self.sdk.ide.unique_id, model, system_message=self.system_message) + return OpenAI(api_key=api_key, default_model=model, system_message=self.system_message) def __load_hf_inference_api_model(self, model: str) -> HuggingFaceInferenceAPI: api_key = self.provider_keys["hf_inference_api"] - return HuggingFaceInferenceAPI(api_key=api_key, model=model) + return HuggingFaceInferenceAPI(api_key=api_key, model=model, system_message=self.system_message) @cached_property def starcoder(self): @@ -82,7 +84,7 @@ class Models: @cached_property def ggml(self): - return GGML() + return GGML(system_message=self.system_message) def __model_from_name(self, model_name: str): if model_name == "starcoder": diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py index d3589b70..6007fdb4 100644 --- a/continuedev/src/continuedev/libs/llm/ggml.py +++ b/continuedev/src/continuedev/libs/llm/ggml.py @@ -33,7 +33,7 @@ class GGML(LLM): args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.name, with_history, args["max_tokens"], prompt, functions=args.get("functions", None)) + self.name, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message) async with aiohttp.ClientSession() as session: async with session.post(f"{SERVER_URL}/v1/completions", json={ @@ -50,7 +50,7 @@ class GGML(LLM): async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.name, messages, args["max_tokens"], None, functions=args.get("functions", None)) + self.name, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) args["stream"] = True async with aiohttp.ClientSession() as session: @@ -77,7 +77,7 @@ class GGML(LLM): async with aiohttp.ClientSession() as session: async with session.post(f"{SERVER_URL}/v1/completions", json={ - "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None), + "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), **args }) as resp: try: diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py index 803ba122..7e11fbbe 100644 --- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py +++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py @@ -11,9 +11,10 @@ class HuggingFaceInferenceAPI(LLM): api_key: str model: str - def __init__(self, api_key: str, model: str): + def __init__(self, api_key: str, model: str, system_message: str = None): self.api_key = api_key self.model = model + self.system_message = system_message # TODO: Nothing being done with this def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): """Return the completion of the text with the given temperature.""" diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index f0877d90..d973f19e 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -37,7 +37,7 @@ class OpenAI(LLM): if args["model"] in CHAT_MODELS: async for chunk in await openai.ChatCompletion.acreate( messages=compile_chat_messages( - args["model"], with_history, args["max_tokens"], prompt, functions=None), + args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), **args, ): if "content" in chunk.choices[0].delta: @@ -58,7 +58,7 @@ class OpenAI(LLM): async for chunk in await openai.ChatCompletion.acreate( messages=compile_chat_messages( - args["model"], messages, args["max_tokens"], functions=args.get("functions", None)), + args["model"], messages, args["max_tokens"], functions=args.get("functions", None), system_message=self.system_message), **args, ): yield chunk.choices[0].delta @@ -69,7 +69,7 @@ class OpenAI(LLM): if args["model"] in CHAT_MODELS: resp = (await openai.ChatCompletion.acreate( messages=compile_chat_messages( - args["model"], with_history, args["max_tokens"], prompt, functions=None), + args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), **args, )).choices[0].message.content else: diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index eab6e441..3ec492f3 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -38,7 +38,7 @@ class ProxyServer(LLM): async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/complete", json={ - "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None), + "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message), "unique_id": self.unique_id, **args }) as resp: @@ -50,7 +50,7 @@ class ProxyServer(LLM): async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None)) + self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message) async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/stream_chat", json={ @@ -74,7 +74,7 @@ class ProxyServer(LLM): async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: args = {**self.default_args, **kwargs} messages = compile_chat_messages( - self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None)) + self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message) async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session: async with session.post(f"{SERVER_URL}/stream_complete", json={ diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py index e1baeca1..1ca98fe6 100644 --- a/continuedev/src/continuedev/libs/util/count_tokens.py +++ b/continuedev/src/continuedev/libs/util/count_tokens.py @@ -1,6 +1,7 @@ import json from typing import Dict, List, Union from ...core.main import ChatMessage +from .templating import render_system_message import tiktoken aliases = { @@ -85,13 +86,15 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, for function in functions: prompt_tokens += count_tokens(model, json.dumps(function)) + rendered_system_message = render_system_message(system_message) + msgs = prune_chat_history(model, - msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + max_tokens + count_tokens(model, system_message)) + msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + max_tokens + count_tokens(model, rendered_system_message)) history = [] if system_message: history.append({ "role": "system", - "content": system_message + "content": rendered_system_message }) history += [msg.to_dict(with_functions=functions is not None) for msg in msgs] diff --git a/continuedev/src/continuedev/libs/util/templating.py b/continuedev/src/continuedev/libs/util/templating.py new file mode 100644 index 00000000..ebfc2e31 --- /dev/null +++ b/continuedev/src/continuedev/libs/util/templating.py @@ -0,0 +1,39 @@ +import os +import chevron + + +def get_vars_in_template(template): + """ + Get the variables in a template + """ + return [token[1] for token in chevron.tokenizer.tokenize(template) if token[0] == 'variable'] + + +def escape_var(var: str) -> str: + """ + Escape a variable so it can be used in a template + """ + return var.replace(os.path.sep, '').replace('.', '') + + +def render_system_message(system_message: str) -> str: + """ + Render system message with mustache syntax. + Right now it only supports rendering absolute file paths as their contents. + """ + vars = get_vars_in_template(system_message) + + args = {} + for var in vars: + if var.startswith(os.path.sep): + # Escape vars which are filenames, because mustache doesn't allow / in variable names + escaped_var = escape_var(var) + system_message = system_message.replace( + var, escaped_var) + + if os.path.exists(var): + args[escaped_var] = open(var, 'r').read() + else: + args[escaped_var] = '' + + return chevron.render(system_message, args) -- cgit v1.2.3-70-g09d2 From bfea0307075308eabc1a91283e56ab3b52ea880c Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Mon, 17 Jul 2023 10:19:54 -0500 Subject: use difflib to give edit change description --- continuedev/src/continuedev/steps/core/core.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index d5a7cd9a..41988000 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -1,6 +1,7 @@ # These steps are depended upon by ContinueSDK import os import subprocess +import difflib from textwrap import dedent from typing import Coroutine, List, Literal, Union @@ -172,13 +173,12 @@ class DefaultModelEditCodeStep(Step): if self._previous_contents.strip() == self._new_contents.strip(): description = "No edits were made" else: + changes = '\n'.join(difflib.ndiff(self._previous_contents.splitlines(), self._new_contents.splitlines())) description = await models.gpt3516k.complete(dedent(f"""\ - ```original - {self._previous_contents} - ``` + Diff summary: "{self.user_input}" - ```new - {self._new_contents} + ```diff + {changes} ``` Please give brief a description of the changes made above using markdown bullet points. Be concise:""")) -- cgit v1.2.3-70-g09d2 From f509a2b30d8bee581d1bfd91586acc54e9209599 Mon Sep 17 00:00:00 2001 From: Ty Dunn Date: Mon, 17 Jul 2023 11:36:21 -0500 Subject: align on `code section` --- continuedev/src/continuedev/core/policy.py | 2 +- extension/react-app/src/components/ComboBox.tsx | 6 +++--- extension/react-app/src/components/PillButton.tsx | 4 ++-- 3 files changed, 6 insertions(+), 6 deletions(-) (limited to 'continuedev/src') diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py index bc897357..d007c92b 100644 --- a/continuedev/src/continuedev/core/policy.py +++ b/continuedev/src/continuedev/core/policy.py @@ -58,7 +58,7 @@ class DemoPolicy(Policy): if history.get_current() is None: return ( MessageStep(name="Welcome to Continue", message=dedent("""\ - - Highlight code and ask a question or give instructions + - Highlight code section and ask a question or give instructions - Use `cmd+m` (Mac) / `ctrl+m` (Windows) to open Continue - Use `/help` to ask questions about how to use Continue""")) >> WelcomeStep() >> diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index dbebd534..8136399a 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -245,11 +245,11 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { props.onToggleAddContext(); }} > - Highlight to Add Context + Highlight code section ) : ( { props.onToggleAddContext(); }} @@ -261,7 +261,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {