diff options
Diffstat (limited to 'continuedev/src')
21 files changed, 175 insertions, 83 deletions
diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 703a73af..3ccce89a 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -13,6 +13,7 @@ from ..steps.core.core import ReversibleStep, ManualEditStep, UserInputStep from ..libs.util.telemetry import capture_event from .sdk import ContinueSDK import asyncio +from ..libs.util.step_name_to_steps import get_step_from_name class Autopilot(ContinueBaseModel): @@ -88,9 +89,15 @@ class Autopilot(ContinueBaseModel): self._manual_edits_buffer.append(edit) # TODO: You're storing a lot of unecessary data here. Can compress into EditDiffs on the spot, and merge. # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit) + # Note that this is being overriden to do nothing in DemoAgent - def handle_traceback(self, traceback: str): - raise NotImplementedError + async def handle_command_output(self, output: str): + is_traceback = False + if is_traceback: + for tb_step in self.continue_sdk.config.on_traceback: + step = get_step_from_name(tb_step.step_name)( + output=output, **tb_step.params) + await self._run_singular_step(step) _step_depth: int = 0 @@ -99,9 +106,19 @@ class Autopilot(ContinueBaseModel): async def delete_at_index(self, index: int): self.history.timeline[index].step.hide = True + self.history.timeline[index].deleted = True await self.update_subscribers() async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]: + # If a parent step is deleted/cancelled, don't run this step + last_depth = self._step_depth + i = self.history.current_index + while i >= 0 and self.history.timeline[i].depth > last_depth: + if self.history.timeline[i].deleted: + return None + last_depth = self.history.timeline[i].depth + i -= 1 + capture_event(self.continue_sdk.ide.unique_id, 'step run', { 'step_name': step.name, 'params': step.dict()}) @@ -114,7 +131,7 @@ class Autopilot(ContinueBaseModel): await self._run_singular_step(manualEditsStep) # Update history - do this first so we get top-first tree ordering - self.history.add_node(HistoryNode( + index_of_history_node = self.history.add_node(HistoryNode( step=step, observation=None, depth=self._step_depth)) # Call all subscribed callbacks @@ -127,6 +144,10 @@ class Autopilot(ContinueBaseModel): try: observation = await step(self.continue_sdk) except Exception as e: + if self.history.timeline[index_of_history_node].deleted: + # If step was deleted/cancelled, don't show error or allow retry + return None + caught_error = True is_continue_custom_exception = issubclass( @@ -176,8 +197,7 @@ class Autopilot(ContinueBaseModel): # Add observation to history, unless already attached error observation if not caught_error: - self.history.get_last_at_depth( - self._step_depth, include_current=True).observation = observation + self.history.timeline[index_of_history_node].observation = observation await self.update_subscribers() # Update its description diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 23be8133..d8b29f5b 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -12,6 +12,11 @@ class SlashCommand(BaseModel): params: Optional[Dict] = {} +class OnTracebackSteps(BaseModel): + step_name: str + params: Optional[Dict] = {} + + class ContinueConfig(BaseModel): """ A pydantic class for the continue config file. @@ -48,6 +53,8 @@ class ContinueConfig(BaseModel): step_name="FeedbackStep", ) ] + on_traceback: Optional[List[OnTracebackSteps]] = [ + OnTracebackSteps(step_name="DefaultOnTracebackStep")] def load_config(config_file: str) -> ContinueConfig: diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index f6b26d69..0c7ec67f 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -11,6 +11,8 @@ ChatMessageRole = Literal["assistant", "user", "system"] class ChatMessage(ContinueBaseModel): role: ChatMessageRole content: str + # A summary for pruning chat context to fit context window. Often the Step name. + summary: str class HistoryNode(ContinueBaseModel): @@ -18,11 +20,12 @@ class HistoryNode(ContinueBaseModel): step: "Step" observation: Union[Observation, None] depth: int + deleted: bool = False def to_chat_messages(self) -> List[ChatMessage]: if self.step.description is None: return self.step.chat_context - return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description)] + return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description, summary=self.step.name)] class History(ContinueBaseModel): @@ -37,9 +40,11 @@ class History(ContinueBaseModel): msgs += node.to_chat_messages() return msgs - def add_node(self, node: HistoryNode): + def add_node(self, node: HistoryNode) -> int: + """ Add node and return the index where it was added """ self.timeline.insert(self.current_index + 1, node) self.current_index += 1 + return self.current_index def get_current(self) -> Union[HistoryNode, None]: if self.current_index < 0: diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 76f72d01..8aea6b7f 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -165,26 +165,29 @@ class ContinueSDK(AbstractContinueSDK): def raise_exception(self, message: str, title: str, with_step: Union[Step, None] = None): raise ContinueCustomException(message, title, with_step) - def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): + def add_chat_context(self, content: str, summary: Union[str, None] = None, role: ChatMessageRole = "assistent"): self.history.timeline[self.history.current_index].step.chat_context.append( - ChatMessage(content=content, role=role)) + ChatMessage(content=content, role=role, summary=summary)) async def get_chat_context(self) -> List[ChatMessage]: history_context = self.history.to_chat_history() highlighted_code = await self.ide.getHighlightedCode() + + preface = "The following code is highlighted" + if len(highlighted_code) == 0: + preface = "The following file is open" # Get the full contents of all open files files = await self.ide.getOpenFiles() - contents = {} - for file in files: - contents[file] = await self.ide.readFile(file) + if len(files) > 0: + content = await self.ide.readFile(files[0]) + highlighted_code = [ + RangeInFile.from_entire_file(files[0], content)] - highlighted_code = [RangeInFile.from_entire_file( - filepath, content) for filepath, content in contents.items()] for rif in highlighted_code: code = await self.ide.readRangeInFile(rif) history_context.append(ChatMessage( - content=f"The following code is highlighted:\n```\n{code}\n```", role="user")) + content=f"{preface} ({rif.filepath}):\n```\n{code}\n```", role="user", summary=f"{preface}: {rif.filepath}")) return history_context async def update_ui(self): diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 2986b2c4..108eedf1 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -1,5 +1,5 @@ from abc import ABC -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage from ...models.main import AbstractModel @@ -9,17 +9,14 @@ from pydantic import BaseModel class LLM(ABC): system_message: Union[str, None] = None - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: """Return the completion of the text with the given temperature.""" - raise + raise NotImplementedError def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: """Yield a stream of chat messages.""" raise NotImplementedError - def __call__(self, prompt: str, **kwargs): - return self.complete(prompt, **kwargs) - def with_system_message(self, system_message: Union[str, None]): """Return a new model with the given system message.""" raise NotImplementedError diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 180ea5f0..ec285d55 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -1,7 +1,7 @@ import asyncio from functools import cached_property import time -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage import openai import aiohttp @@ -42,12 +42,37 @@ class OpenAI(LLM): return len(self.__encoding_for_model.encode(text, disallowed_special=())) def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): - tokens = tokens_for_completion - for i in range(len(chat_history) - 1, -1, -1): - message = chat_history[i] - tokens += self.count_tokens(message.content) - if tokens > max_tokens: - return chat_history[i + 1:] + total_tokens = tokens_for_completion + \ + sum(self.count_tokens(message.content) for message in chat_history) + + # 1. Replace beyond last 5 messages with summary + i = 0 + while total_tokens > max_tokens and i < len(chat_history) - 5: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 2. Remove entire messages until the last 5 + while len(chat_history) > 5 and total_tokens > max_tokens: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + + # 3. Truncate message in the last 5 + i = 0 + while total_tokens > max_tokens: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 4. Remove entire messages in the last 5 + while total_tokens > max_tokens and len(chat_history) > 0: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + return chat_history def with_system_message(self, system_message: Union[str, None]): @@ -78,7 +103,7 @@ class OpenAI(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -107,7 +132,7 @@ class OpenAI(LLM): for chunk in generator: yield chunk.choices[0].text - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: t1 = time.time() self.completion_count += 1 @@ -115,12 +140,12 @@ class OpenAI(LLM): "frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs if args["model"] in CHAT_MODELS: - resp = openai.ChatCompletion.create( + resp = await openai.ChatCompletion.acreate( messages=self.compile_chat_messages(with_history, prompt), **args, ).choices[0].message.content else: - resp = openai.Completion.create( + resp = await openai.Completion.acreate( prompt=prompt, **args, ).choices[0].text diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 4ff57101..4227042f 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -1,8 +1,9 @@ from functools import cached_property import json -from typing import Any, Dict, Generator, List, Literal, Union +from typing import Any, Coroutine, Dict, Generator, List, Literal, Union import requests import tiktoken +import aiohttp from ...core.main import ChatMessage from ..llm import LLM @@ -39,16 +40,6 @@ class ProxyServer(LLM): def count_tokens(self, text: str): return len(self.__encoding_for_model.encode(text, disallowed_special=())) - def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: - resp = requests.post(f"{SERVER_URL}/stream_complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }, stream=True) - for line in resp.iter_lines(): - if line: - yield line.decode("utf-8") - def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): tokens = tokens_for_completion for i in range(len(chat_history) - 1, -1, -1): @@ -67,7 +58,7 @@ class ProxyServer(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -75,11 +66,25 @@ class ProxyServer(LLM): return history - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + try: + return json.loads(await resp.text()) + except json.JSONDecodeError: + raise Exception(await resp.text()) - resp = requests.post(f"{SERVER_URL}/complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }) - return json.loads(resp.text) + async def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/stream_complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + async for line in resp.content: + if line: + yield line.decode("utf-8") diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/continuedev/src/continuedev/libs/util/step_name_to_steps.py index 4dd9c430..2c4474af 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/continuedev/src/continuedev/libs/util/step_name_to_steps.py @@ -6,13 +6,25 @@ from ...steps.main import EditHighlightedCodeStep from ...steps.chat import SimpleChatStep from ...steps.comment_code import CommentCodeStep from ...steps.feedback import FeedbackStep +from ...recipes.AddTransformRecipe.main import AddTransformRecipe +from ...recipes.CreatePipelineRecipe.main import CreatePipelineRecipe +from ...recipes.DDtoBQRecipe.main import DDtoBQRecipe +from ...recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowRecipe +from ...steps.on_traceback import DefaultOnTracebackStep +# This mapping is used to convert from string in ContinueConfig json to corresponding Step class. +# Used for example in slash_commands and steps_on_startup step_name_to_step_class = { "UserInputStep": UserInputStep, "EditHighlightedCodeStep": EditHighlightedCodeStep, "SimpleChatStep": SimpleChatStep, "CommentCodeStep": CommentCodeStep, "FeedbackStep": FeedbackStep, + "AddTransformRecipe": AddTransformRecipe, + "CreatePipelineRecipe": CreatePipelineRecipe, + "DDtoBQRecipe": DDtoBQRecipe, + "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe, + "DefaultOnTracebackStep": DefaultOnTracebackStep, } @@ -22,4 +34,4 @@ def get_step_from_name(step_name: str, params: Dict) -> Step: except: print( f"Incorrect parameters for step {step_name}. Parameters provided were: {params}") - raise
\ No newline at end of file + raise diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py index 096b41c6..3fba1112 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py @@ -29,8 +29,8 @@ class SetupPipelineStep(Step): async def run(self, sdk: ContinueSDK): sdk.context.set("api_description", self.api_description) - source_name = sdk.models.gpt35.complete( - f"Write a snake_case name for the data source described by {self.api_description}: ").strip() + source_name = (await sdk.models.gpt35.complete( + f"Write a snake_case name for the data source described by {self.api_description}: ")).strip() filename = f'{source_name}.py' # running commands to get started when creating a new dlt pipeline @@ -91,7 +91,7 @@ class ValidatePipelineStep(Step): if "Traceback" in output or "SyntaxError" in output: output = "Traceback" + output.split("Traceback")[-1] file_content = await sdk.ide.readFile(os.path.join(workspace_dir, filename)) - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {file_content} ``` @@ -103,7 +103,7 @@ class ValidatePipelineStep(Step): This is a brief summary of the error followed by a suggestion on how it can be fixed by editing the resource function:""")) - api_documentation_url = sdk.models.gpt35.complete(dedent(f"""\ + api_documentation_url = await sdk.models.gpt35.complete(dedent(f"""\ The API I am trying to call is the '{sdk.context.get('api_description')}'. I tried calling it in the @resource function like this: ```python {file_content} @@ -159,7 +159,7 @@ class RunQueryStep(Step): output = await sdk.run('.env/bin/python3 query.py', name="Run test query", description="Running `.env/bin/python3 query.py` to test that the data was loaded into DuckDB as expected", handle_error=False) if "Traceback" in output or "SyntaxError" in output: - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {await sdk.ide.readFile(os.path.join(sdk.ide.workspace_directory, "query.py"))} ``` diff --git a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py index 6db9fd4b..df414e2e 100644 --- a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py @@ -82,7 +82,7 @@ class LoadDataStep(Step): docs = f.read() output = "Traceback" + output.split("Traceback")[-1] - suggestion = sdk.models.default.complete(dedent(f"""\ + suggestion = await sdk.models.default.complete(dedent(f"""\ When trying to load data into BigQuery, the following error occurred: ```ascii diff --git a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py index 688f44c3..6e1244b3 100644 --- a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py +++ b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py @@ -41,7 +41,7 @@ class WritePytestsRecipe(Step): "{self.user_input}" Here is a complete set of pytest unit tests:""") - tests = sdk.models.gpt35.complete(prompt) + tests = await sdk.models.gpt35.complete(prompt) await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests)) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index c53149d8..c66cc142 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -1,5 +1,4 @@ # This is a separate server from server/main.py -import asyncio from functools import cached_property import json import os @@ -10,11 +9,13 @@ from uvicorn.main import Server from ..libs.util.queue import AsyncSubscriptionQueue from ..models.filesystem import FileSystem, RangeInFile, EditDiff, RealFileSystem -from ..models.main import Traceback from ..models.filesystem_edit import AddDirectory, AddFile, DeleteDirectory, DeleteFile, FileSystemEdit, FileEdit, FileEditWithFullContents, RenameDirectory, RenameFile, SequentialFileSystemEdit from pydantic import BaseModel from .gui import SessionManager, session_manager from .ide_protocol import AbstractIdeProtocolServer +import asyncio +import nest_asyncio +nest_asyncio.apply() router = APIRouter(prefix="/ide", tags=["ide"]) @@ -135,6 +136,9 @@ class IdeProtocolServer(AbstractIdeProtocolServer): fileEdits = list( map(lambda d: FileEditWithFullContents.parse_obj(d), data["fileEdits"])) self.onFileEdits(fileEdits) + elif message_type == "commandOutput": + output = data["output"] + self.onCommandOutput(output) elif message_type in ["highlightedCode", "openFiles", "readFile", "editFile", "workspaceDirectory", "getUserSecret", "runCommand", "uniqueId"]: self.sub_queue.post(message_type, data) else: @@ -189,11 +193,6 @@ class IdeProtocolServer(AbstractIdeProtocolServer): def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool): pass - def onTraceback(self, traceback: Traceback): - # Same as below, maybe not every autopilot? - for _, session in self.session_manager.sessions.items(): - session.autopilot.handle_traceback(traceback) - def onFileSystemUpdate(self, update: FileSystemEdit): # Access to Autopilot (so SessionManager) pass @@ -211,6 +210,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer): for _, session in self.session_manager.sessions.items(): session.autopilot.handle_manual_edits(edits) + def onCommandOutput(self, output: str): + # Send the output to ALL autopilots. + # Maybe not ideal behavior + for _, session in self.session_manager.sessions.items(): + asyncio.create_task( + session.autopilot.handle_command_output(output)) + # Request information. Session doesn't matter. async def getOpenFiles(self) -> List[str]: resp = await self._send_and_receive_json({}, OpenFilesResponse, "openFiles") diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 7cfe7e0c..499d127f 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -11,9 +11,9 @@ class SimpleChatStep(Step): async def run(self, sdk: ContinueSDK): self.description = f"## {self.user_input}\n\n" - for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): + async for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): self.description += chunk await sdk.update_ui() - self.name = sdk.models.gpt35.complete( - f"Write a short title for the following chat message: {self.description}").strip() + self.name = (await sdk.models.gpt35.complete( + f"Write a short title for the following chat message: {self.description}")).strip() diff --git a/continuedev/src/continuedev/steps/chroma.py b/continuedev/src/continuedev/steps/chroma.py index 058455b2..9d085981 100644 --- a/continuedev/src/continuedev/steps/chroma.py +++ b/continuedev/src/continuedev/steps/chroma.py @@ -56,7 +56,7 @@ class AnswerQuestionChroma(Step): Here is the answer:""") - answer = sdk.models.gpt35.complete(prompt) + answer = await sdk.models.gpt35.complete(prompt) # Make paths relative to the workspace directory answer = answer.replace(await sdk.ide.getWorkspaceDirectory(), "") diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index af3c6cc2..7f3a93ba 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -72,7 +72,7 @@ class ShellCommandsStep(Step): return f"Error when running shell commands:\n```\n{self._err_text}\n```" cmds_str = "\n".join(self.cmds) - return models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") + return await models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: cwd = await sdk.ide.getWorkspaceDirectory() if self.cwd is None else self.cwd @@ -80,7 +80,7 @@ class ShellCommandsStep(Step): for cmd in self.cmds: output = await sdk.ide.runCommand(cmd) if self.handle_error and output is not None and output_contains_error(output): - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ While running the command `{cmd}`, the following error occurred: ```ascii @@ -151,10 +151,8 @@ class DefaultModelEditCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - description = models.gpt35.complete( + description = await models.gpt35.complete( f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:") - # self.name = models.gpt35.complete( - # f"Write a short title for this description: {description}") return description async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: @@ -183,7 +181,7 @@ class DefaultModelEditCodeStep(Step): prompt = self._prompt.format( code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) - completion = str(sdk.models.default.complete(prompt, with_history=await sdk.get_chat_context())) + completion = str(await sdk.models.default.complete(prompt, with_history=await sdk.get_chat_context())) eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/draft/migration.py b/continuedev/src/continuedev/steps/draft/migration.py index 7c4b7eb5..f3b36b5e 100644 --- a/continuedev/src/continuedev/steps/draft/migration.py +++ b/continuedev/src/continuedev/steps/draft/migration.py @@ -13,7 +13,7 @@ class MigrationStep(Step): recent_edits = await sdk.ide.get_recent_edits(self.edited_file) recent_edits_string = "\n\n".join( map(lambda x: x.to_string(), recent_edits)) - description = sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") + description = await sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") await sdk.run([ "cd libs", "poetry run alembic revision --autogenerate -m " + description, diff --git a/continuedev/src/continuedev/steps/input/nl_multiselect.py b/continuedev/src/continuedev/steps/input/nl_multiselect.py index 36c489c7..aee22866 100644 --- a/continuedev/src/continuedev/steps/input/nl_multiselect.py +++ b/continuedev/src/continuedev/steps/input/nl_multiselect.py @@ -23,6 +23,6 @@ class NLMultiselectStep(Step): if first_try is not None: return first_try - gpt_parsed = sdk.models.gpt35.complete( + gpt_parsed = await sdk.models.gpt35.complete( f"These are the available options are: [{', '.join(self.options)}]. The user requested {user_response}. This is the exact string from the options array that they selected:") return extract_option(gpt_parsed) or self.options[0] diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py index 0e42d8bf..b61aa3fe 100644 --- a/continuedev/src/continuedev/steps/main.py +++ b/continuedev/src/continuedev/steps/main.py @@ -145,7 +145,7 @@ class FasterEditHighlightedCodeStep(Step): for rif in rif_with_contents: rif_dict[rif.filepath] = rif.contents - completion = sdk.models.gpt35.complete(prompt) + completion = await sdk.models.gpt35.complete(prompt) # Temporarily doing this to generate description. self._prompt = prompt @@ -213,7 +213,7 @@ class StarCoderEditHighlightedCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - return models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") + return await models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: range_in_files = await sdk.ide.getHighlightedCode() @@ -247,7 +247,7 @@ class StarCoderEditHighlightedCodeStep(Step): segs = full_file_contents.split(rif.contents) prompt = f"<file_prefix>{segs[0]}<file_suffix>{segs[1]}" + prompt - completion = str((await sdk.models.starcoder()).complete(prompt)) + completion = str(await sdk.models.starcoder.complete(prompt)) eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/on_traceback.py b/continuedev/src/continuedev/steps/on_traceback.py new file mode 100644 index 00000000..de668775 --- /dev/null +++ b/continuedev/src/continuedev/steps/on_traceback.py @@ -0,0 +1,14 @@ +from ..core.main import Step +from ..core.sdk import ContinueSDK +from .chat import SimpleChatStep + + +class DefaultOnTracebackStep(Step): + output: str + name: str = "Help With Traceback" + hide: bool = True + + async def run(self, sdk: ContinueSDK): + sdk.run_step(SimpleChatStep( + name="Help With Traceback", + user_input=f"""I got the following error, can you please help explain how to fix it?\n\n{self.output}""")) diff --git a/continuedev/src/continuedev/steps/react.py b/continuedev/src/continuedev/steps/react.py index d825d424..4d310fc8 100644 --- a/continuedev/src/continuedev/steps/react.py +++ b/continuedev/src/continuedev/steps/react.py @@ -27,7 +27,7 @@ class NLDecisionStep(Step): Select the step which should be taken next to satisfy the user input. Say only the name of the selected step. You must choose one:""") - resp = sdk.models.gpt35.complete(prompt).lower() + resp = (await sdk.models.gpt35.complete(prompt)).lower() step_to_run = None for step in self.steps: diff --git a/continuedev/src/continuedev/steps/search_directory.py b/continuedev/src/continuedev/steps/search_directory.py index 9f4594b9..d2966f46 100644 --- a/continuedev/src/continuedev/steps/search_directory.py +++ b/continuedev/src/continuedev/steps/search_directory.py @@ -41,7 +41,7 @@ class WriteRegexPatternStep(Step): async def run(self, sdk: ContinueSDK): # Ask the user for a regex pattern - pattern = sdk.models.gpt35.complete(dedent(f"""\ + pattern = await sdk.models.gpt35.complete(dedent(f"""\ This is the user request: {self.user_request} |