diff options
Diffstat (limited to 'continuedev')
26 files changed, 229 insertions, 140 deletions
diff --git a/continuedev/src/continuedev/core/abstract_sdk.py b/continuedev/src/continuedev/core/abstract_sdk.py index 0658f1b8..017e75ef 100644 --- a/continuedev/src/continuedev/core/abstract_sdk.py +++ b/continuedev/src/continuedev/core/abstract_sdk.py @@ -85,7 +85,7 @@ class AbstractContinueSDK(ABC): pass @abstractmethod - def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): + def add_chat_context(self, content: str, role: ChatMessageRole = "assistant"): pass @abstractmethod diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 703a73af..73f46a37 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -13,6 +13,8 @@ from ..steps.core.core import ReversibleStep, ManualEditStep, UserInputStep from ..libs.util.telemetry import capture_event from .sdk import ContinueSDK import asyncio +from ..libs.util.step_name_to_steps import get_step_from_name +from ..libs.util.traceback_parsers import get_python_traceback, get_javascript_traceback class Autopilot(ContinueBaseModel): @@ -88,9 +90,17 @@ class Autopilot(ContinueBaseModel): self._manual_edits_buffer.append(edit) # TODO: You're storing a lot of unecessary data here. Can compress into EditDiffs on the spot, and merge. # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit) - - def handle_traceback(self, traceback: str): - raise NotImplementedError + # Note that this is being overriden to do nothing in DemoAgent + + async def handle_command_output(self, output: str): + get_traceback_funcs = [get_python_traceback, get_javascript_traceback] + for get_tb_func in get_traceback_funcs: + traceback = get_tb_func(output) + if traceback is not None: + for tb_step in self.continue_sdk.config.on_traceback: + step = get_step_from_name( + tb_step.step_name, {"output": output, **tb_step.params}) + await self._run_singular_step(step) _step_depth: int = 0 @@ -99,9 +109,23 @@ class Autopilot(ContinueBaseModel): async def delete_at_index(self, index: int): self.history.timeline[index].step.hide = True + self.history.timeline[index].deleted = True await self.update_subscribers() async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]: + # Allow config to set disallowed steps + if step.__class__.__name__ in self.continue_sdk.config.disallowed_steps: + return None + + # If a parent step is deleted/cancelled, don't run this step + last_depth = self._step_depth + i = self.history.current_index + while i >= 0 and self.history.timeline[i].depth > last_depth: + if self.history.timeline[i].deleted: + return None + last_depth = self.history.timeline[i].depth + i -= 1 + capture_event(self.continue_sdk.ide.unique_id, 'step run', { 'step_name': step.name, 'params': step.dict()}) @@ -114,7 +138,7 @@ class Autopilot(ContinueBaseModel): await self._run_singular_step(manualEditsStep) # Update history - do this first so we get top-first tree ordering - self.history.add_node(HistoryNode( + index_of_history_node = self.history.add_node(HistoryNode( step=step, observation=None, depth=self._step_depth)) # Call all subscribed callbacks @@ -127,6 +151,10 @@ class Autopilot(ContinueBaseModel): try: observation = await step(self.continue_sdk) except Exception as e: + if self.history.timeline[index_of_history_node].deleted: + # If step was deleted/cancelled, don't show error or allow retry + return None + caught_error = True is_continue_custom_exception = issubclass( @@ -176,8 +204,7 @@ class Autopilot(ContinueBaseModel): # Add observation to history, unless already attached error observation if not caught_error: - self.history.get_last_at_depth( - self._step_depth, include_current=True).observation = observation + self.history.timeline[index_of_history_node].observation = observation await self.update_subscribers() # Update its description diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 6a811412..859c6188 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -12,11 +12,17 @@ class SlashCommand(BaseModel): params: Optional[Dict] = {} +class OnTracebackSteps(BaseModel): + step_name: str + params: Optional[Dict] = {} + + class ContinueConfig(BaseModel): """ A pydantic class for the continue config file. """ steps_on_startup: Optional[Dict[str, Dict]] = {} + disallowed_steps: Optional[List[str]] = [] server_url: Optional[str] = None allow_anonymous_telemetry: Optional[bool] = True default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", @@ -48,6 +54,8 @@ class ContinueConfig(BaseModel): step_name="FeedbackStep", ) ] + on_traceback: Optional[List[OnTracebackSteps]] = [ + OnTracebackSteps(step_name="DefaultOnTracebackStep")] def load_config(config_file: str) -> ContinueConfig: diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index f6b26d69..0c7ec67f 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -11,6 +11,8 @@ ChatMessageRole = Literal["assistant", "user", "system"] class ChatMessage(ContinueBaseModel): role: ChatMessageRole content: str + # A summary for pruning chat context to fit context window. Often the Step name. + summary: str class HistoryNode(ContinueBaseModel): @@ -18,11 +20,12 @@ class HistoryNode(ContinueBaseModel): step: "Step" observation: Union[Observation, None] depth: int + deleted: bool = False def to_chat_messages(self) -> List[ChatMessage]: if self.step.description is None: return self.step.chat_context - return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description)] + return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description, summary=self.step.name)] class History(ContinueBaseModel): @@ -37,9 +40,11 @@ class History(ContinueBaseModel): msgs += node.to_chat_messages() return msgs - def add_node(self, node: HistoryNode): + def add_node(self, node: HistoryNode) -> int: + """ Add node and return the index where it was added """ self.timeline.insert(self.current_index + 1, node) self.current_index += 1 + return self.current_index def get_current(self) -> Union[HistoryNode, None]: if self.current_index < 0: diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py index 255f598d..1b53834b 100644 --- a/continuedev/src/continuedev/core/policy.py +++ b/continuedev/src/continuedev/core/policy.py @@ -8,7 +8,7 @@ from ..recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowReci from ..recipes.AddTransformRecipe.main import AddTransformRecipe from .main import Step, Validator, History, Policy from .observation import Observation, TracebackObservation, UserInputObservation -from ..steps.main import EditHighlightedCodeStep, SolveTracebackStep, RunCodeStep, FasterEditHighlightedCodeStep, StarCoderEditHighlightedCodeStep, EmptyStep, SetupContinueWorkspaceStep +from ..steps.main import EditHighlightedCodeStep, SolveTracebackStep from ..recipes.WritePytestsRecipe.main import WritePytestsRecipe from ..recipes.ContinueRecipeRecipe.main import ContinueStepStep from ..steps.comment_code import CommentCodeStep diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 9f31c4c2..7159beaa 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -177,26 +177,29 @@ class ContinueSDK(AbstractContinueSDK): def raise_exception(self, message: str, title: str, with_step: Union[Step, None] = None): raise ContinueCustomException(message, title, with_step) - def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): + def add_chat_context(self, content: str, summary: Union[str, None] = None, role: ChatMessageRole = "assistant"): self.history.timeline[self.history.current_index].step.chat_context.append( - ChatMessage(content=content, role=role)) + ChatMessage(content=content, role=role, summary=summary)) async def get_chat_context(self) -> List[ChatMessage]: history_context = self.history.to_chat_history() highlighted_code = await self.ide.getHighlightedCode() + + preface = "The following code is highlighted" + if len(highlighted_code) == 0: + preface = "The following file is open" # Get the full contents of all open files files = await self.ide.getOpenFiles() - contents = {} - for file in files: - contents[file] = await self.ide.readFile(file) + if len(files) > 0: + content = await self.ide.readFile(files[0]) + highlighted_code = [ + RangeInFile.from_entire_file(files[0], content)] - highlighted_code = [RangeInFile.from_entire_file( - filepath, content) for filepath, content in contents.items()] for rif in highlighted_code: code = await self.ide.readRangeInFile(rif) history_context.append(ChatMessage( - content=f"The following code is highlighted:\n```\n{code}\n```", role="user")) + content=f"{preface} ({rif.filepath}):\n```\n{code}\n```", role="user", summary=f"{preface}: {rif.filepath}")) return history_context async def update_ui(self): diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 2986b2c4..108eedf1 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -1,5 +1,5 @@ from abc import ABC -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage from ...models.main import AbstractModel @@ -9,17 +9,14 @@ from pydantic import BaseModel class LLM(ABC): system_message: Union[str, None] = None - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: """Return the completion of the text with the given temperature.""" - raise + raise NotImplementedError def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: """Yield a stream of chat messages.""" raise NotImplementedError - def __call__(self, prompt: str, **kwargs): - return self.complete(prompt, **kwargs) - def with_system_message(self, system_message: Union[str, None]): """Return a new model with the given system message.""" raise NotImplementedError diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 136e86b4..22c28b20 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -1,7 +1,7 @@ import asyncio from functools import cached_property import time -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage import openai import aiohttp @@ -47,12 +47,37 @@ class OpenAI(LLM): return len(self.__encoding_for_model.encode(text, disallowed_special=())) def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): - tokens = tokens_for_completion - for i in range(len(chat_history) - 1, -1, -1): - message = chat_history[i] - tokens += self.count_tokens(message.content) - if tokens > max_tokens: - return chat_history[i + 1:] + total_tokens = tokens_for_completion + \ + sum(self.count_tokens(message.content) for message in chat_history) + + # 1. Replace beyond last 5 messages with summary + i = 0 + while total_tokens > max_tokens and i < len(chat_history) - 5: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 2. Remove entire messages until the last 5 + while len(chat_history) > 5 and total_tokens > max_tokens: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + + # 3. Truncate message in the last 5 + i = 0 + while total_tokens > max_tokens: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 4. Remove entire messages in the last 5 + while total_tokens > max_tokens and len(chat_history) > 0: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + return chat_history def with_system_message(self, system_message: Union[str, None]): @@ -83,7 +108,7 @@ class OpenAI(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -112,7 +137,7 @@ class OpenAI(LLM): for chunk in generator: yield chunk.choices[0].text - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: t1 = time.time() self.completion_count += 1 @@ -120,15 +145,15 @@ class OpenAI(LLM): "frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs if args["model"] in CHAT_MODELS: - resp = openai.ChatCompletion.create( + resp = (await openai.ChatCompletion.acreate( messages=self.compile_chat_messages(with_history, prompt), **args, - ).choices[0].message.content + )).choices[0].message.content else: - resp = openai.Completion.create( + resp = (await openai.Completion.acreate( prompt=prompt, **args, - ).choices[0].text + )).choices[0].text t2 = time.time() print("Completion time:", t2 - t1) diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 4ff57101..93f2d48a 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -1,8 +1,9 @@ from functools import cached_property import json -from typing import Any, Dict, Generator, List, Literal, Union +from typing import Any, Coroutine, Dict, Generator, List, Literal, Union import requests import tiktoken +import aiohttp from ...core.main import ChatMessage from ..llm import LLM @@ -16,7 +17,7 @@ CHAT_MODELS = { "gpt-3.5-turbo", "gpt-4" } -# SERVER_URL = "http://127.0.0.1:8002" +# SERVER_URL = "http://127.0.0.1:8080" SERVER_URL = "https://proxy-server-l6vsfbzhba-uc.a.run.app" @@ -39,16 +40,6 @@ class ProxyServer(LLM): def count_tokens(self, text: str): return len(self.__encoding_for_model.encode(text, disallowed_special=())) - def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: - resp = requests.post(f"{SERVER_URL}/stream_complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }, stream=True) - for line in resp.iter_lines(): - if line: - yield line.decode("utf-8") - def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): tokens = tokens_for_completion for i in range(len(chat_history) - 1, -1, -1): @@ -67,7 +58,7 @@ class ProxyServer(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -75,11 +66,28 @@ class ProxyServer(LLM): return history - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + try: + return json.loads(await resp.text()) + except json.JSONDecodeError: + raise Exception(await resp.text()) - resp = requests.post(f"{SERVER_URL}/complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }) - return json.loads(resp.text) + async def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/stream_complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + async for line in resp.content: + if line: + try: + yield line.decode("utf-8") + except json.JSONDecodeError: + raise Exception(str(line)) diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/continuedev/src/continuedev/libs/util/step_name_to_steps.py index 4dd9c430..2c4474af 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/continuedev/src/continuedev/libs/util/step_name_to_steps.py @@ -6,13 +6,25 @@ from ...steps.main import EditHighlightedCodeStep from ...steps.chat import SimpleChatStep from ...steps.comment_code import CommentCodeStep from ...steps.feedback import FeedbackStep +from ...recipes.AddTransformRecipe.main import AddTransformRecipe +from ...recipes.CreatePipelineRecipe.main import CreatePipelineRecipe +from ...recipes.DDtoBQRecipe.main import DDtoBQRecipe +from ...recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowRecipe +from ...steps.on_traceback import DefaultOnTracebackStep +# This mapping is used to convert from string in ContinueConfig json to corresponding Step class. +# Used for example in slash_commands and steps_on_startup step_name_to_step_class = { "UserInputStep": UserInputStep, "EditHighlightedCodeStep": EditHighlightedCodeStep, "SimpleChatStep": SimpleChatStep, "CommentCodeStep": CommentCodeStep, "FeedbackStep": FeedbackStep, + "AddTransformRecipe": AddTransformRecipe, + "CreatePipelineRecipe": CreatePipelineRecipe, + "DDtoBQRecipe": DDtoBQRecipe, + "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe, + "DefaultOnTracebackStep": DefaultOnTracebackStep, } @@ -22,4 +34,4 @@ def get_step_from_name(step_name: str, params: Dict) -> Step: except: print( f"Incorrect parameters for step {step_name}. Parameters provided were: {params}") - raise
\ No newline at end of file + raise diff --git a/continuedev/src/continuedev/libs/util/traceback_parsers.py b/continuedev/src/continuedev/libs/util/traceback_parsers.py index c31929c1..a2e94c26 100644 --- a/continuedev/src/continuedev/libs/util/traceback_parsers.py +++ b/continuedev/src/continuedev/libs/util/traceback_parsers.py @@ -1,24 +1,25 @@ -from typing import Union -from ...models.main import Traceback -from boltons import tbutils +PYTHON_TRACEBACK_PREFIX = "Traceback (most recent call last):" -def sort_func(items): - """Sort a list of items.""" - return sorted(items) - - -def parse_python_traceback(stdout: str) -> Union[Traceback, None]: - """Parse a python traceback from stdout.""" +def get_python_traceback(output: str) -> str: + if PYTHON_TRACEBACK_PREFIX in output: + return PYTHON_TRACEBACK_PREFIX + output.split(PYTHON_TRACEBACK_PREFIX)[-1] + elif "SyntaxError" in output: + return "SyntaxError" + output.split("SyntaxError")[-1] + else: + return None - # Sometimes paths are not quoted, but they need to be - if "File \"" not in stdout: - stdout = stdout.replace("File ", "File \"").replace( - ", line ", "\", line ") - try: - tbutil_parsed_exc = tbutils.ParsedException.from_string(stdout) - return Traceback.from_tbutil_parsed_exc(tbutil_parsed_exc) +def get_javascript_traceback(output: str) -> str: + lines = output.splitlines() + first_line = None + for i in range(len(lines) - 1): + segs = lines[i].split(":") + if len(segs) > 1 and segs[0] != "" and segs[1].startswith(" ") and lines[i + 1].strip().startswith("at"): + first_line = lines[i] + break - except Exception: + if first_line is not None: + return "\n".join(lines[lines.index(first_line):]) + else: return None diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py index 92bddc98..55ef107b 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py @@ -27,5 +27,7 @@ class CreatePipelineRecipe(Step): await sdk.run_step( SetupPipelineStep(api_description=text_observation.text) >> ValidatePipelineStep() >> - RunQueryStep() + RunQueryStep() >> + MessageStep( + name="Congrats!", message="You've successfully created your first dlt pipeline! 🎉") ) diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py index 096b41c6..91515dc2 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py @@ -29,8 +29,8 @@ class SetupPipelineStep(Step): async def run(self, sdk: ContinueSDK): sdk.context.set("api_description", self.api_description) - source_name = sdk.models.gpt35.complete( - f"Write a snake_case name for the data source described by {self.api_description}: ").strip() + source_name = (await sdk.models.gpt35.complete( + f"Write a snake_case name for the data source described by {self.api_description}: ")).strip() filename = f'{source_name}.py' # running commands to get started when creating a new dlt pipeline @@ -49,7 +49,7 @@ class SetupPipelineStep(Step): - `pip install -r requirements.txt`: Install the Python dependencies for the pipeline"""), name="Setup Python environment") # editing the resource function to call the requested API - resource_function_range = Range.from_shorthand(15, 0, 29, 0) + resource_function_range = Range.from_shorthand(15, 0, 30, 0) await sdk.ide.highlightCode(RangeInFile(filepath=os.path.join(await sdk.ide.getWorkspaceDirectory(), filename), range=resource_function_range), "#ffa50033") # sdk.set_loading_message("Writing code to call the API...") @@ -64,7 +64,7 @@ class SetupPipelineStep(Step): # wait for user to put API key in secrets.toml await sdk.ide.setFileOpen(await sdk.ide.getWorkspaceDirectory() + "/.dlt/secrets.toml") - await sdk.wait_for_user_confirmation("If this service requires an API key, please add it to the `secrets.toml` file and then press `Continue`") + await sdk.wait_for_user_confirmation("If this service requires an API key, please add it to the `secrets.toml` file and then press `Continue`.") sdk.context.set("source_name", source_name) @@ -91,7 +91,7 @@ class ValidatePipelineStep(Step): if "Traceback" in output or "SyntaxError" in output: output = "Traceback" + output.split("Traceback")[-1] file_content = await sdk.ide.readFile(os.path.join(workspace_dir, filename)) - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {file_content} ``` @@ -103,7 +103,7 @@ class ValidatePipelineStep(Step): This is a brief summary of the error followed by a suggestion on how it can be fixed by editing the resource function:""")) - api_documentation_url = sdk.models.gpt35.complete(dedent(f"""\ + api_documentation_url = await sdk.models.gpt35.complete(dedent(f"""\ The API I am trying to call is the '{sdk.context.get('api_description')}'. I tried calling it in the @resource function like this: ```python {file_content} @@ -159,7 +159,7 @@ class RunQueryStep(Step): output = await sdk.run('.env/bin/python3 query.py', name="Run test query", description="Running `.env/bin/python3 query.py` to test that the data was loaded into DuckDB as expected", handle_error=False) if "Traceback" in output or "SyntaxError" in output: - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {await sdk.ide.readFile(os.path.join(sdk.ide.workspace_directory, "query.py"))} ``` @@ -172,5 +172,5 @@ class RunQueryStep(Step): This is a brief summary of the error followed by a suggestion on how it can be fixed:""")) sdk.raise_exception( - title="Error while running query", message=output, with_step=MessageStep(name=f"Suggestion to solve error {AI_ASSISTED_STRING}", message=suggestion) + title="Error while running query", message=output, with_step=MessageStep(name=f"Suggestion to solve error {AI_ASSISTED_STRING}", message=suggestion + "\n\nIt is also very likely that no duckdb table was created, which can happen if the resource function did not yield any data. Please make sure that it is yielding data and then rerun this step.") ) diff --git a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py index 6db9fd4b..df414e2e 100644 --- a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py @@ -82,7 +82,7 @@ class LoadDataStep(Step): docs = f.read() output = "Traceback" + output.split("Traceback")[-1] - suggestion = sdk.models.default.complete(dedent(f"""\ + suggestion = await sdk.models.default.complete(dedent(f"""\ When trying to load data into BigQuery, the following error occurred: ```ascii diff --git a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py index 688f44c3..6e1244b3 100644 --- a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py +++ b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py @@ -41,7 +41,7 @@ class WritePytestsRecipe(Step): "{self.user_input}" Here is a complete set of pytest unit tests:""") - tests = sdk.models.gpt35.complete(prompt) + tests = await sdk.models.gpt35.complete(prompt) await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests)) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index c53149d8..c83fbc8a 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -1,5 +1,4 @@ # This is a separate server from server/main.py -import asyncio from functools import cached_property import json import os @@ -10,11 +9,13 @@ from uvicorn.main import Server from ..libs.util.queue import AsyncSubscriptionQueue from ..models.filesystem import FileSystem, RangeInFile, EditDiff, RealFileSystem -from ..models.main import Traceback from ..models.filesystem_edit import AddDirectory, AddFile, DeleteDirectory, DeleteFile, FileSystemEdit, FileEdit, FileEditWithFullContents, RenameDirectory, RenameFile, SequentialFileSystemEdit from pydantic import BaseModel from .gui import SessionManager, session_manager from .ide_protocol import AbstractIdeProtocolServer +import asyncio +import nest_asyncio +nest_asyncio.apply() router = APIRouter(prefix="/ide", tags=["ide"]) @@ -135,6 +136,9 @@ class IdeProtocolServer(AbstractIdeProtocolServer): fileEdits = list( map(lambda d: FileEditWithFullContents.parse_obj(d), data["fileEdits"])) self.onFileEdits(fileEdits) + elif message_type == "commandOutput": + output = data["output"] + self.onCommandOutput(output) elif message_type in ["highlightedCode", "openFiles", "readFile", "editFile", "workspaceDirectory", "getUserSecret", "runCommand", "uniqueId"]: self.sub_queue.post(message_type, data) else: @@ -189,11 +193,6 @@ class IdeProtocolServer(AbstractIdeProtocolServer): def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool): pass - def onTraceback(self, traceback: Traceback): - # Same as below, maybe not every autopilot? - for _, session in self.session_manager.sessions.items(): - session.autopilot.handle_traceback(traceback) - def onFileSystemUpdate(self, update: FileSystemEdit): # Access to Autopilot (so SessionManager) pass @@ -211,6 +210,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer): for _, session in self.session_manager.sessions.items(): session.autopilot.handle_manual_edits(edits) + def onCommandOutput(self, output: str): + # Send the output to ALL autopilots. + # Maybe not ideal behavior + for _, session in self.session_manager.sessions.items(): + asyncio.create_task( + session.autopilot.handle_command_output(output)) + # Request information. Session doesn't matter. async def getOpenFiles(self) -> List[str]: resp = await self._send_and_receive_json({}, OpenFilesResponse, "openFiles") @@ -224,7 +230,7 @@ class IdeProtocolServer(AbstractIdeProtocolServer): resp = await self._send_and_receive_json({}, UniqueIdResponse, "uniqueId") return resp.uniqueId - @cached_property + @property def workspace_directory(self) -> str: return asyncio.run(self.getWorkspaceDirectory()) diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py index 1d98f4a1..2dcedc30 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/continuedev/src/continuedev/server/ide_protocol.py @@ -36,10 +36,6 @@ class AbstractIdeProtocolServer(ABC): """Called when the user accepts or rejects a suggestion""" @abstractmethod - def onTraceback(self, traceback: Traceback): - """Called when a traceback is received""" - - @abstractmethod def onFileSystemUpdate(self, update: FileSystemEdit): """Called when a file system update is received""" diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 7cfe7e0c..90514ad6 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -10,10 +10,10 @@ class SimpleChatStep(Step): name: str = "Chat" async def run(self, sdk: ContinueSDK): - self.description = f"## {self.user_input}\n\n" - for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): + self.description = f"```{self.user_input}```\n\n" + async for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): self.description += chunk await sdk.update_ui() - self.name = sdk.models.gpt35.complete( - f"Write a short title for the following chat message: {self.description}").strip() + self.name = (await sdk.models.gpt35.complete( + f"Write a short title for the following chat message: {self.description}")).strip() diff --git a/continuedev/src/continuedev/steps/chroma.py b/continuedev/src/continuedev/steps/chroma.py index 058455b2..9d085981 100644 --- a/continuedev/src/continuedev/steps/chroma.py +++ b/continuedev/src/continuedev/steps/chroma.py @@ -56,7 +56,7 @@ class AnswerQuestionChroma(Step): Here is the answer:""") - answer = sdk.models.gpt35.complete(prompt) + answer = await sdk.models.gpt35.complete(prompt) # Make paths relative to the workspace directory answer = answer.replace(await sdk.ide.getWorkspaceDirectory(), "") diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index de6fa29a..d580e2d2 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -73,7 +73,7 @@ class ShellCommandsStep(Step): return f"Error when running shell commands:\n```\n{self._err_text}\n```" cmds_str = "\n".join(self.cmds) - return models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") + return await models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: cwd = await sdk.ide.getWorkspaceDirectory() if self.cwd is None else self.cwd @@ -81,7 +81,7 @@ class ShellCommandsStep(Step): for cmd in self.cmds: output = await sdk.ide.runCommand(cmd) if self.handle_error and output is not None and output_contains_error(output): - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ While running the command `{cmd}`, the following error occurred: ```ascii @@ -152,10 +152,8 @@ class DefaultModelEditCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - description = models.gpt35.complete( + description = await models.gpt35.complete( f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:") - # self.name = models.gpt35.complete( - # f"Write a short title for this description: {description}") return description async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: @@ -229,7 +227,8 @@ class DefaultModelEditCodeStep(Step): prompt = self._prompt.format( code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) - completion = str(model_to_use.complete(prompt, with_history=await sdk.get_chat_context())) + completion = str(await model_to_use.complete(prompt, with_history=await sdk.get_chat_context())) + eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/draft/migration.py b/continuedev/src/continuedev/steps/draft/migration.py index 7c4b7eb5..f3b36b5e 100644 --- a/continuedev/src/continuedev/steps/draft/migration.py +++ b/continuedev/src/continuedev/steps/draft/migration.py @@ -13,7 +13,7 @@ class MigrationStep(Step): recent_edits = await sdk.ide.get_recent_edits(self.edited_file) recent_edits_string = "\n\n".join( map(lambda x: x.to_string(), recent_edits)) - description = sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") + description = await sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") await sdk.run([ "cd libs", "poetry run alembic revision --autogenerate -m " + description, diff --git a/continuedev/src/continuedev/steps/input/nl_multiselect.py b/continuedev/src/continuedev/steps/input/nl_multiselect.py index 36c489c7..aee22866 100644 --- a/continuedev/src/continuedev/steps/input/nl_multiselect.py +++ b/continuedev/src/continuedev/steps/input/nl_multiselect.py @@ -23,6 +23,6 @@ class NLMultiselectStep(Step): if first_try is not None: return first_try - gpt_parsed = sdk.models.gpt35.complete( + gpt_parsed = await sdk.models.gpt35.complete( f"These are the available options are: [{', '.join(self.options)}]. The user requested {user_response}. This is the exact string from the options array that they selected:") return extract_option(gpt_parsed) or self.options[0] diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py index 0e42d8bf..5ba86c53 100644 --- a/continuedev/src/continuedev/steps/main.py +++ b/continuedev/src/continuedev/steps/main.py @@ -3,7 +3,6 @@ from typing import Coroutine, List, Union from pydantic import BaseModel -from ..libs.util.traceback_parsers import parse_python_traceback from ..libs.llm import LLM from ..models.main import Traceback, Range from ..models.filesystem_edit import EditDiff, FileEdit @@ -33,28 +32,6 @@ class SetupContinueWorkspaceStep(Step): }""")) -class RunCodeStep(Step): - cmd: str - - async def describe(self, models: Models) -> Coroutine[str, None, None]: - return f"Ran command: `{self.cmd}`" - - async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: - result = subprocess.run( - self.cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout = result.stdout.decode("utf-8") - stderr = result.stderr.decode("utf-8") - print(stdout, stderr) - - # If it fails, return the error - tb = parse_python_traceback(stdout) or parse_python_traceback(stderr) - if tb: - return TracebackObservation(traceback=tb) - else: - self.hide = True - return None - - class Policy(BaseModel): pass @@ -145,7 +122,7 @@ class FasterEditHighlightedCodeStep(Step): for rif in rif_with_contents: rif_dict[rif.filepath] = rif.contents - completion = sdk.models.gpt35.complete(prompt) + completion = await sdk.models.gpt35.complete(prompt) # Temporarily doing this to generate description. self._prompt = prompt @@ -213,7 +190,7 @@ class StarCoderEditHighlightedCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - return models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") + return await models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: range_in_files = await sdk.ide.getHighlightedCode() @@ -247,7 +224,7 @@ class StarCoderEditHighlightedCodeStep(Step): segs = full_file_contents.split(rif.contents) prompt = f"<file_prefix>{segs[0]}<file_suffix>{segs[1]}" + prompt - completion = str((await sdk.models.starcoder()).complete(prompt)) + completion = str(await sdk.models.starcoder.complete(prompt)) eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/on_traceback.py b/continuedev/src/continuedev/steps/on_traceback.py new file mode 100644 index 00000000..053b4ef4 --- /dev/null +++ b/continuedev/src/continuedev/steps/on_traceback.py @@ -0,0 +1,23 @@ +import os +from ..core.main import Step +from ..core.sdk import ContinueSDK +from .chat import SimpleChatStep + + +class DefaultOnTracebackStep(Step): + output: str + name: str = "Help With Traceback" + hide: bool = True + + async def run(self, sdk: ContinueSDK): + # Add context for any files in the traceback that are in the workspace + for line in self.output.split("\n"): + segs = line.split(" ") + for seg in segs: + if seg.startswith(os.path.sep) and os.path.exists(seg) and os.path.commonprefix([seg, sdk.ide.workspace_directory]) == sdk.ide.workspace_directory: + file_contents = await sdk.ide.readFile(seg) + await sdk.add_chat_context(f"The contents of {seg}:\n```\n{file_contents}\n```", "", "user") + + await sdk.run_step(SimpleChatStep( + name="Help With Traceback", + user_input=f"""I got the following error, can you please help explain how to fix it?\n\n{self.output}""")) diff --git a/continuedev/src/continuedev/steps/react.py b/continuedev/src/continuedev/steps/react.py index d825d424..4d310fc8 100644 --- a/continuedev/src/continuedev/steps/react.py +++ b/continuedev/src/continuedev/steps/react.py @@ -27,7 +27,7 @@ class NLDecisionStep(Step): Select the step which should be taken next to satisfy the user input. Say only the name of the selected step. You must choose one:""") - resp = sdk.models.gpt35.complete(prompt).lower() + resp = (await sdk.models.gpt35.complete(prompt)).lower() step_to_run = None for step in self.steps: diff --git a/continuedev/src/continuedev/steps/search_directory.py b/continuedev/src/continuedev/steps/search_directory.py index 9f4594b9..d2966f46 100644 --- a/continuedev/src/continuedev/steps/search_directory.py +++ b/continuedev/src/continuedev/steps/search_directory.py @@ -41,7 +41,7 @@ class WriteRegexPatternStep(Step): async def run(self, sdk: ContinueSDK): # Ask the user for a regex pattern - pattern = sdk.models.gpt35.complete(dedent(f"""\ + pattern = await sdk.models.gpt35.complete(dedent(f"""\ This is the user request: {self.user_request} |