diff options
33 files changed, 422 insertions, 267 deletions
diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 703a73af..3ccce89a 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -13,6 +13,7 @@ from ..steps.core.core import ReversibleStep, ManualEditStep, UserInputStep  from ..libs.util.telemetry import capture_event  from .sdk import ContinueSDK  import asyncio +from ..libs.util.step_name_to_steps import get_step_from_name  class Autopilot(ContinueBaseModel): @@ -88,9 +89,15 @@ class Autopilot(ContinueBaseModel):              self._manual_edits_buffer.append(edit)              # TODO: You're storing a lot of unecessary data here. Can compress into EditDiffs on the spot, and merge.              # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit) +            # Note that this is being overriden to do nothing in DemoAgent -    def handle_traceback(self, traceback: str): -        raise NotImplementedError +    async def handle_command_output(self, output: str): +        is_traceback = False +        if is_traceback: +            for tb_step in self.continue_sdk.config.on_traceback: +                step = get_step_from_name(tb_step.step_name)( +                    output=output, **tb_step.params) +                await self._run_singular_step(step)      _step_depth: int = 0 @@ -99,9 +106,19 @@ class Autopilot(ContinueBaseModel):      async def delete_at_index(self, index: int):          self.history.timeline[index].step.hide = True +        self.history.timeline[index].deleted = True          await self.update_subscribers()      async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]: +        # If a parent step is deleted/cancelled, don't run this step +        last_depth = self._step_depth +        i = self.history.current_index +        while i >= 0 and self.history.timeline[i].depth > last_depth: +            if self.history.timeline[i].deleted: +                return None +            last_depth = self.history.timeline[i].depth +            i -= 1 +          capture_event(self.continue_sdk.ide.unique_id, 'step run', {                        'step_name': step.name, 'params': step.dict()}) @@ -114,7 +131,7 @@ class Autopilot(ContinueBaseModel):                  await self._run_singular_step(manualEditsStep)          # Update history - do this first so we get top-first tree ordering -        self.history.add_node(HistoryNode( +        index_of_history_node = self.history.add_node(HistoryNode(              step=step, observation=None, depth=self._step_depth))          # Call all subscribed callbacks @@ -127,6 +144,10 @@ class Autopilot(ContinueBaseModel):          try:              observation = await step(self.continue_sdk)          except Exception as e: +            if self.history.timeline[index_of_history_node].deleted: +                # If step was deleted/cancelled, don't show error or allow retry +                return None +              caught_error = True              is_continue_custom_exception = issubclass( @@ -176,8 +197,7 @@ class Autopilot(ContinueBaseModel):          # Add observation to history, unless already attached error observation          if not caught_error: -            self.history.get_last_at_depth( -                self._step_depth, include_current=True).observation = observation +            self.history.timeline[index_of_history_node].observation = observation              await self.update_subscribers()          # Update its description diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 23be8133..d8b29f5b 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -12,6 +12,11 @@ class SlashCommand(BaseModel):      params: Optional[Dict] = {} +class OnTracebackSteps(BaseModel): +    step_name: str +    params: Optional[Dict] = {} + +  class ContinueConfig(BaseModel):      """      A pydantic class for the continue config file. @@ -48,6 +53,8 @@ class ContinueConfig(BaseModel):              step_name="FeedbackStep",          )      ] +    on_traceback: Optional[List[OnTracebackSteps]] = [ +        OnTracebackSteps(step_name="DefaultOnTracebackStep")]  def load_config(config_file: str) -> ContinueConfig: diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index f6b26d69..0c7ec67f 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -11,6 +11,8 @@ ChatMessageRole = Literal["assistant", "user", "system"]  class ChatMessage(ContinueBaseModel):      role: ChatMessageRole      content: str +    # A summary for pruning chat context to fit context window. Often the Step name. +    summary: str  class HistoryNode(ContinueBaseModel): @@ -18,11 +20,12 @@ class HistoryNode(ContinueBaseModel):      step: "Step"      observation: Union[Observation, None]      depth: int +    deleted: bool = False      def to_chat_messages(self) -> List[ChatMessage]:          if self.step.description is None:              return self.step.chat_context -        return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description)] +        return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description, summary=self.step.name)]  class History(ContinueBaseModel): @@ -37,9 +40,11 @@ class History(ContinueBaseModel):                  msgs += node.to_chat_messages()          return msgs -    def add_node(self, node: HistoryNode): +    def add_node(self, node: HistoryNode) -> int: +        """ Add node and return the index where it was added """          self.timeline.insert(self.current_index + 1, node)          self.current_index += 1 +        return self.current_index      def get_current(self) -> Union[HistoryNode, None]:          if self.current_index < 0: diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 76f72d01..8aea6b7f 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -165,26 +165,29 @@ class ContinueSDK(AbstractContinueSDK):      def raise_exception(self, message: str, title: str, with_step: Union[Step, None] = None):          raise ContinueCustomException(message, title, with_step) -    def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): +    def add_chat_context(self, content: str, summary: Union[str, None] = None, role: ChatMessageRole = "assistent"):          self.history.timeline[self.history.current_index].step.chat_context.append( -            ChatMessage(content=content, role=role)) +            ChatMessage(content=content, role=role, summary=summary))      async def get_chat_context(self) -> List[ChatMessage]:          history_context = self.history.to_chat_history()          highlighted_code = await self.ide.getHighlightedCode() + +        preface = "The following code is highlighted" +          if len(highlighted_code) == 0: +            preface = "The following file is open"              # Get the full contents of all open files              files = await self.ide.getOpenFiles() -            contents = {} -            for file in files: -                contents[file] = await self.ide.readFile(file) +            if len(files) > 0: +                content = await self.ide.readFile(files[0]) +                highlighted_code = [ +                    RangeInFile.from_entire_file(files[0], content)] -            highlighted_code = [RangeInFile.from_entire_file( -                filepath, content) for filepath, content in contents.items()]          for rif in highlighted_code:              code = await self.ide.readRangeInFile(rif)              history_context.append(ChatMessage( -                content=f"The following code is highlighted:\n```\n{code}\n```", role="user")) +                content=f"{preface} ({rif.filepath}):\n```\n{code}\n```", role="user", summary=f"{preface}: {rif.filepath}"))          return history_context      async def update_ui(self): diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 2986b2c4..108eedf1 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -1,5 +1,5 @@  from abc import ABC -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union  from ...core.main import ChatMessage  from ...models.main import AbstractModel @@ -9,17 +9,14 @@ from pydantic import BaseModel  class LLM(ABC):      system_message: Union[str, None] = None -    def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): +    async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]:          """Return the completion of the text with the given temperature.""" -        raise +        raise NotImplementedError      def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:          """Yield a stream of chat messages."""          raise NotImplementedError -    def __call__(self, prompt: str, **kwargs): -        return self.complete(prompt, **kwargs) -      def with_system_message(self, system_message: Union[str, None]):          """Return a new model with the given system message."""          raise NotImplementedError diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 180ea5f0..ec285d55 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -1,7 +1,7 @@  import asyncio  from functools import cached_property  import time -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union  from ...core.main import ChatMessage  import openai  import aiohttp @@ -42,12 +42,37 @@ class OpenAI(LLM):          return len(self.__encoding_for_model.encode(text, disallowed_special=()))      def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): -        tokens = tokens_for_completion -        for i in range(len(chat_history) - 1, -1, -1): -            message = chat_history[i] -            tokens += self.count_tokens(message.content) -            if tokens > max_tokens: -                return chat_history[i + 1:] +        total_tokens = tokens_for_completion + \ +            sum(self.count_tokens(message.content) for message in chat_history) + +        # 1. Replace beyond last 5 messages with summary +        i = 0 +        while total_tokens > max_tokens and i < len(chat_history) - 5: +            message = chat_history[0] +            total_tokens -= self.count_tokens(message.content) +            total_tokens += self.count_tokens(message.summary) +            message.content = message.summary +            i += 1 + +        # 2. Remove entire messages until the last 5 +        while len(chat_history) > 5 and total_tokens > max_tokens: +            message = chat_history.pop(0) +            total_tokens -= self.count_tokens(message.content) + +        # 3. Truncate message in the last 5 +        i = 0 +        while total_tokens > max_tokens: +            message = chat_history[0] +            total_tokens -= self.count_tokens(message.content) +            total_tokens += self.count_tokens(message.summary) +            message.content = message.summary +            i += 1 + +        # 4. Remove entire messages in the last 5 +        while total_tokens > max_tokens and len(chat_history) > 0: +            message = chat_history.pop(0) +            total_tokens -= self.count_tokens(message.content) +          return chat_history      def with_system_message(self, system_message: Union[str, None]): @@ -78,7 +103,7 @@ class OpenAI(LLM):                  "role": "system",                  "content": self.system_message              }) -        history += [msg.dict() for msg in msgs] +        history += [{"role": msg.role, "content": msg.content} for msg in msgs]          history.append({              "role": "user",              "content": prompt @@ -107,7 +132,7 @@ class OpenAI(LLM):              for chunk in generator:                  yield chunk.choices[0].text -    def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: +    async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]:          t1 = time.time()          self.completion_count += 1 @@ -115,12 +140,12 @@ class OpenAI(LLM):                  "frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs          if args["model"] in CHAT_MODELS: -            resp = openai.ChatCompletion.create( +            resp = await openai.ChatCompletion.acreate(                  messages=self.compile_chat_messages(with_history, prompt),                  **args,              ).choices[0].message.content          else: -            resp = openai.Completion.create( +            resp = await openai.Completion.acreate(                  prompt=prompt,                  **args,              ).choices[0].text diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 4ff57101..4227042f 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -1,8 +1,9 @@  from functools import cached_property  import json -from typing import Any, Dict, Generator, List, Literal, Union +from typing import Any, Coroutine, Dict, Generator, List, Literal, Union  import requests  import tiktoken +import aiohttp  from ...core.main import ChatMessage  from ..llm import LLM @@ -39,16 +40,6 @@ class ProxyServer(LLM):      def count_tokens(self, text: str):          return len(self.__encoding_for_model.encode(text, disallowed_special=())) -    def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: -        resp = requests.post(f"{SERVER_URL}/stream_complete", json={ -            "chat_history": self.compile_chat_messages(with_history, prompt), -            "model": self.default_model, -            "unique_id": self.unique_id, -        }, stream=True) -        for line in resp.iter_lines(): -            if line: -                yield line.decode("utf-8") -      def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int):          tokens = tokens_for_completion          for i in range(len(chat_history) - 1, -1, -1): @@ -67,7 +58,7 @@ class ProxyServer(LLM):                  "role": "system",                  "content": self.system_message              }) -        history += [msg.dict() for msg in msgs] +        history += [{"role": msg.role, "content": msg.content} for msg in msgs]          history.append({              "role": "user",              "content": prompt @@ -75,11 +66,25 @@ class ProxyServer(LLM):          return history -    def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: +    async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: +        async with aiohttp.ClientSession() as session: +            async with session.post(f"{SERVER_URL}/complete", json={ +                "chat_history": self.compile_chat_messages(with_history, prompt), +                "model": self.default_model, +                "unique_id": self.unique_id, +            }) as resp: +                try: +                    return json.loads(await resp.text()) +                except json.JSONDecodeError: +                    raise Exception(await resp.text()) -        resp = requests.post(f"{SERVER_URL}/complete", json={ -            "chat_history": self.compile_chat_messages(with_history, prompt), -            "model": self.default_model, -            "unique_id": self.unique_id, -        }) -        return json.loads(resp.text) +    async def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: +        async with aiohttp.ClientSession() as session: +            async with session.post(f"{SERVER_URL}/stream_complete", json={ +                "chat_history": self.compile_chat_messages(with_history, prompt), +                "model": self.default_model, +                "unique_id": self.unique_id, +            }) as resp: +                async for line in resp.content: +                    if line: +                        yield line.decode("utf-8") diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/continuedev/src/continuedev/libs/util/step_name_to_steps.py index 4dd9c430..2c4474af 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/continuedev/src/continuedev/libs/util/step_name_to_steps.py @@ -6,13 +6,25 @@ from ...steps.main import EditHighlightedCodeStep  from ...steps.chat import SimpleChatStep  from ...steps.comment_code import CommentCodeStep  from ...steps.feedback import FeedbackStep +from ...recipes.AddTransformRecipe.main import AddTransformRecipe +from ...recipes.CreatePipelineRecipe.main import CreatePipelineRecipe +from ...recipes.DDtoBQRecipe.main import DDtoBQRecipe +from ...recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowRecipe +from ...steps.on_traceback import DefaultOnTracebackStep +# This mapping is used to convert from string in ContinueConfig json to corresponding Step class. +# Used for example in slash_commands and steps_on_startup  step_name_to_step_class = {      "UserInputStep": UserInputStep,      "EditHighlightedCodeStep": EditHighlightedCodeStep,      "SimpleChatStep": SimpleChatStep,      "CommentCodeStep": CommentCodeStep,      "FeedbackStep": FeedbackStep, +    "AddTransformRecipe": AddTransformRecipe, +    "CreatePipelineRecipe": CreatePipelineRecipe, +    "DDtoBQRecipe": DDtoBQRecipe, +    "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe, +    "DefaultOnTracebackStep": DefaultOnTracebackStep,  } @@ -22,4 +34,4 @@ def get_step_from_name(step_name: str, params: Dict) -> Step:      except:          print(              f"Incorrect parameters for step {step_name}. Parameters provided were: {params}") -        raise
\ No newline at end of file +        raise diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py index 096b41c6..3fba1112 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py @@ -29,8 +29,8 @@ class SetupPipelineStep(Step):      async def run(self, sdk: ContinueSDK):          sdk.context.set("api_description", self.api_description) -        source_name = sdk.models.gpt35.complete( -            f"Write a snake_case name for the data source described by {self.api_description}: ").strip() +        source_name = (await sdk.models.gpt35.complete( +            f"Write a snake_case name for the data source described by {self.api_description}: ")).strip()          filename = f'{source_name}.py'          # running commands to get started when creating a new dlt pipeline @@ -91,7 +91,7 @@ class ValidatePipelineStep(Step):          if "Traceback" in output or "SyntaxError" in output:              output = "Traceback" + output.split("Traceback")[-1]              file_content = await sdk.ide.readFile(os.path.join(workspace_dir, filename)) -            suggestion = sdk.models.gpt35.complete(dedent(f"""\ +            suggestion = await sdk.models.gpt35.complete(dedent(f"""\                  ```python                  {file_content}                  ``` @@ -103,7 +103,7 @@ class ValidatePipelineStep(Step):                  This is a brief summary of the error followed by a suggestion on how it can be fixed by editing the resource function:""")) -            api_documentation_url = sdk.models.gpt35.complete(dedent(f"""\ +            api_documentation_url = await sdk.models.gpt35.complete(dedent(f"""\                  The API I am trying to call is the '{sdk.context.get('api_description')}'. I tried calling it in the @resource function like this:                  ```python                         {file_content} @@ -159,7 +159,7 @@ class RunQueryStep(Step):          output = await sdk.run('.env/bin/python3 query.py', name="Run test query", description="Running `.env/bin/python3 query.py` to test that the data was loaded into DuckDB as expected", handle_error=False)          if "Traceback" in output or "SyntaxError" in output: -            suggestion = sdk.models.gpt35.complete(dedent(f"""\ +            suggestion = await sdk.models.gpt35.complete(dedent(f"""\                  ```python                  {await sdk.ide.readFile(os.path.join(sdk.ide.workspace_directory, "query.py"))}                  ``` diff --git a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py index 6db9fd4b..df414e2e 100644 --- a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py @@ -82,7 +82,7 @@ class LoadDataStep(Step):                  docs = f.read()              output = "Traceback" + output.split("Traceback")[-1] -            suggestion = sdk.models.default.complete(dedent(f"""\ +            suggestion = await sdk.models.default.complete(dedent(f"""\                  When trying to load data into BigQuery, the following error occurred:                  ```ascii diff --git a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py index 688f44c3..6e1244b3 100644 --- a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py +++ b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py @@ -41,7 +41,7 @@ class WritePytestsRecipe(Step):              "{self.user_input}"              Here is a complete set of pytest unit tests:""") -        tests = sdk.models.gpt35.complete(prompt) +        tests = await sdk.models.gpt35.complete(prompt)          await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests)) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index c53149d8..c66cc142 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -1,5 +1,4 @@  # This is a separate server from server/main.py -import asyncio  from functools import cached_property  import json  import os @@ -10,11 +9,13 @@ from uvicorn.main import Server  from ..libs.util.queue import AsyncSubscriptionQueue  from ..models.filesystem import FileSystem, RangeInFile, EditDiff, RealFileSystem -from ..models.main import Traceback  from ..models.filesystem_edit import AddDirectory, AddFile, DeleteDirectory, DeleteFile, FileSystemEdit, FileEdit, FileEditWithFullContents, RenameDirectory, RenameFile, SequentialFileSystemEdit  from pydantic import BaseModel  from .gui import SessionManager, session_manager  from .ide_protocol import AbstractIdeProtocolServer +import asyncio +import nest_asyncio +nest_asyncio.apply()  router = APIRouter(prefix="/ide", tags=["ide"]) @@ -135,6 +136,9 @@ class IdeProtocolServer(AbstractIdeProtocolServer):              fileEdits = list(                  map(lambda d: FileEditWithFullContents.parse_obj(d), data["fileEdits"]))              self.onFileEdits(fileEdits) +        elif message_type == "commandOutput": +            output = data["output"] +            self.onCommandOutput(output)          elif message_type in ["highlightedCode", "openFiles", "readFile", "editFile", "workspaceDirectory", "getUserSecret", "runCommand", "uniqueId"]:              self.sub_queue.post(message_type, data)          else: @@ -189,11 +193,6 @@ class IdeProtocolServer(AbstractIdeProtocolServer):      def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool):          pass -    def onTraceback(self, traceback: Traceback): -        # Same as below, maybe not every autopilot? -        for _, session in self.session_manager.sessions.items(): -            session.autopilot.handle_traceback(traceback) -      def onFileSystemUpdate(self, update: FileSystemEdit):          # Access to Autopilot (so SessionManager)          pass @@ -211,6 +210,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer):          for _, session in self.session_manager.sessions.items():              session.autopilot.handle_manual_edits(edits) +    def onCommandOutput(self, output: str): +        # Send the output to ALL autopilots. +        # Maybe not ideal behavior +        for _, session in self.session_manager.sessions.items(): +            asyncio.create_task( +                session.autopilot.handle_command_output(output)) +      # Request information. Session doesn't matter.      async def getOpenFiles(self) -> List[str]:          resp = await self._send_and_receive_json({}, OpenFilesResponse, "openFiles") diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 7cfe7e0c..499d127f 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -11,9 +11,9 @@ class SimpleChatStep(Step):      async def run(self, sdk: ContinueSDK):          self.description = f"## {self.user_input}\n\n" -        for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): +        async for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()):              self.description += chunk              await sdk.update_ui() -        self.name = sdk.models.gpt35.complete( -            f"Write a short title for the following chat message: {self.description}").strip() +        self.name = (await sdk.models.gpt35.complete( +            f"Write a short title for the following chat message: {self.description}")).strip() diff --git a/continuedev/src/continuedev/steps/chroma.py b/continuedev/src/continuedev/steps/chroma.py index 058455b2..9d085981 100644 --- a/continuedev/src/continuedev/steps/chroma.py +++ b/continuedev/src/continuedev/steps/chroma.py @@ -56,7 +56,7 @@ class AnswerQuestionChroma(Step):              Here is the answer:""") -        answer = sdk.models.gpt35.complete(prompt) +        answer = await sdk.models.gpt35.complete(prompt)          # Make paths relative to the workspace directory          answer = answer.replace(await sdk.ide.getWorkspaceDirectory(), "") diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index af3c6cc2..7f3a93ba 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -72,7 +72,7 @@ class ShellCommandsStep(Step):              return f"Error when running shell commands:\n```\n{self._err_text}\n```"          cmds_str = "\n".join(self.cmds) -        return models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") +        return await models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:")      async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:          cwd = await sdk.ide.getWorkspaceDirectory() if self.cwd is None else self.cwd @@ -80,7 +80,7 @@ class ShellCommandsStep(Step):          for cmd in self.cmds:              output = await sdk.ide.runCommand(cmd)              if self.handle_error and output is not None and output_contains_error(output): -                suggestion = sdk.models.gpt35.complete(dedent(f"""\ +                suggestion = await sdk.models.gpt35.complete(dedent(f"""\                      While running the command `{cmd}`, the following error occurred:                      ```ascii @@ -151,10 +151,8 @@ class DefaultModelEditCodeStep(Step):      _prompt_and_completion: str = ""      async def describe(self, models: Models) -> Coroutine[str, None, None]: -        description = models.gpt35.complete( +        description = await models.gpt35.complete(              f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:") -        # self.name = models.gpt35.complete( -        #     f"Write a short title for this description: {description}")          return description      async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: @@ -183,7 +181,7 @@ class DefaultModelEditCodeStep(Step):              prompt = self._prompt.format(                  code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) -            completion = str(sdk.models.default.complete(prompt, with_history=await sdk.get_chat_context())) +            completion = str(await sdk.models.default.complete(prompt, with_history=await sdk.get_chat_context()))              eot_token = "<|endoftext|>"              completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/draft/migration.py b/continuedev/src/continuedev/steps/draft/migration.py index 7c4b7eb5..f3b36b5e 100644 --- a/continuedev/src/continuedev/steps/draft/migration.py +++ b/continuedev/src/continuedev/steps/draft/migration.py @@ -13,7 +13,7 @@ class MigrationStep(Step):          recent_edits = await sdk.ide.get_recent_edits(self.edited_file)          recent_edits_string = "\n\n".join(              map(lambda x: x.to_string(), recent_edits)) -        description = sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") +        description = await sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n")          await sdk.run([              "cd libs",              "poetry run alembic revision --autogenerate -m " + description, diff --git a/continuedev/src/continuedev/steps/input/nl_multiselect.py b/continuedev/src/continuedev/steps/input/nl_multiselect.py index 36c489c7..aee22866 100644 --- a/continuedev/src/continuedev/steps/input/nl_multiselect.py +++ b/continuedev/src/continuedev/steps/input/nl_multiselect.py @@ -23,6 +23,6 @@ class NLMultiselectStep(Step):          if first_try is not None:              return first_try -        gpt_parsed = sdk.models.gpt35.complete( +        gpt_parsed = await sdk.models.gpt35.complete(              f"These are the available options are: [{', '.join(self.options)}]. The user requested {user_response}. This is the exact string from the options array that they selected:")          return extract_option(gpt_parsed) or self.options[0] diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py index 0e42d8bf..b61aa3fe 100644 --- a/continuedev/src/continuedev/steps/main.py +++ b/continuedev/src/continuedev/steps/main.py @@ -145,7 +145,7 @@ class FasterEditHighlightedCodeStep(Step):          for rif in rif_with_contents:              rif_dict[rif.filepath] = rif.contents -        completion = sdk.models.gpt35.complete(prompt) +        completion = await sdk.models.gpt35.complete(prompt)          # Temporarily doing this to generate description.          self._prompt = prompt @@ -213,7 +213,7 @@ class StarCoderEditHighlightedCodeStep(Step):      _prompt_and_completion: str = ""      async def describe(self, models: Models) -> Coroutine[str, None, None]: -        return models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") +        return await models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:")      async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:          range_in_files = await sdk.ide.getHighlightedCode() @@ -247,7 +247,7 @@ class StarCoderEditHighlightedCodeStep(Step):                  segs = full_file_contents.split(rif.contents)                  prompt = f"<file_prefix>{segs[0]}<file_suffix>{segs[1]}" + prompt -            completion = str((await sdk.models.starcoder()).complete(prompt)) +            completion = str(await sdk.models.starcoder.complete(prompt))              eot_token = "<|endoftext|>"              completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/on_traceback.py b/continuedev/src/continuedev/steps/on_traceback.py new file mode 100644 index 00000000..de668775 --- /dev/null +++ b/continuedev/src/continuedev/steps/on_traceback.py @@ -0,0 +1,14 @@ +from ..core.main import Step +from ..core.sdk import ContinueSDK +from .chat import SimpleChatStep + + +class DefaultOnTracebackStep(Step): +    output: str +    name: str = "Help With Traceback" +    hide: bool = True + +    async def run(self, sdk: ContinueSDK): +        sdk.run_step(SimpleChatStep( +            name="Help With Traceback", +            user_input=f"""I got the following error, can you please help explain how to fix it?\n\n{self.output}""")) diff --git a/continuedev/src/continuedev/steps/react.py b/continuedev/src/continuedev/steps/react.py index d825d424..4d310fc8 100644 --- a/continuedev/src/continuedev/steps/react.py +++ b/continuedev/src/continuedev/steps/react.py @@ -27,7 +27,7 @@ class NLDecisionStep(Step):              Select the step which should be taken next to satisfy the user input. Say only the name of the selected step. You must choose one:""") -        resp = sdk.models.gpt35.complete(prompt).lower() +        resp = (await sdk.models.gpt35.complete(prompt)).lower()          step_to_run = None          for step in self.steps: diff --git a/continuedev/src/continuedev/steps/search_directory.py b/continuedev/src/continuedev/steps/search_directory.py index 9f4594b9..d2966f46 100644 --- a/continuedev/src/continuedev/steps/search_directory.py +++ b/continuedev/src/continuedev/steps/search_directory.py @@ -41,7 +41,7 @@ class WriteRegexPatternStep(Step):      async def run(self, sdk: ContinueSDK):          # Ask the user for a regex pattern -        pattern = sdk.models.gpt35.complete(dedent(f"""\ +        pattern = await sdk.models.gpt35.complete(dedent(f"""\              This is the user request:              {self.user_request} diff --git a/extension/package-lock.json b/extension/package-lock.json index e41cd2c2..7e8da126 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@  {    "name": "continue", -  "version": "0.0.40", +  "version": "0.0.44",    "lockfileVersion": 2,    "requires": true,    "packages": {      "": {        "name": "continue", -      "version": "0.0.40", +      "version": "0.0.44",        "license": "Apache-2.0",        "dependencies": {          "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 4b199420..8cf50d2a 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@    "displayName": "Continue",    "pricing": "Free",    "description": "Refine code 10x faster", -  "version": "0.0.40", +  "version": "0.0.44",    "publisher": "Continue",    "engines": {      "vscode": "^1.74.0" diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index ace0605e..2b140567 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -113,6 +113,9 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {                (event.nativeEvent as any).preventDownshiftDefault = true;                if (props.onEnter) props.onEnter(event);                setInputValue(""); +            } else if (event.key === "Tab" && items.length > 0) { +              setInputValue(items[0].name); +              event.preventDefault();              }            },            ref: ref as any, diff --git a/extension/react-app/src/components/DebugPanel.tsx b/extension/react-app/src/components/DebugPanel.tsx index 11ec2fe2..30f38779 100644 --- a/extension/react-app/src/components/DebugPanel.tsx +++ b/extension/react-app/src/components/DebugPanel.tsx @@ -17,39 +17,15 @@ interface DebugPanelProps {    }[];  } -const GradientContainer = styled.div` -  // Uncomment to get gradient border -  /* background: linear-gradient( -    101.79deg, -    #12887a 0%, -    #87245c 37.64%, -    #e12637 65.98%, -    #ffb215 110.45% -  ); */ -  /* padding: 10px; */ -  background-color: ${secondaryDark}; -  margin: 0; -  height: 100%; -  /* border: 1px solid white; */ -  border-radius: ${defaultBorderRadius}; -`; - -const MainDiv = styled.div` -  height: 100%; -  border-radius: ${defaultBorderRadius}; -  scrollbar-base-color: transparent; -  background-color: ${vscBackground}; -`; -  const TabBar = styled.div<{ numTabs: number }>`    display: grid;    grid-template-columns: repeat(${(props) => props.numTabs}, 1fr);  `;  const TabsAndBodyDiv = styled.div` -  display: grid; -  grid-template-rows: auto 1fr;    height: 100%; +  border-radius: ${defaultBorderRadius}; +  scrollbar-base-color: transparent;  `;  function DebugPanel(props: DebugPanelProps) { @@ -76,42 +52,43 @@ function DebugPanel(props: DebugPanelProps) {    const [currentTab, setCurrentTab] = useState(0);    return ( -    <GradientContainer> -      <MainDiv> -        <TabsAndBodyDiv> -          {props.tabs.length > 1 && ( -            <TabBar numTabs={props.tabs.length}> -              {props.tabs.map((tab, index) => { -                return ( -                  <div -                    key={index} -                    className={`p-2 cursor-pointer text-center ${ -                      index === currentTab -                        ? "bg-secondary-dark" -                        : "bg-vsc-background" -                    }`} -                    onClick={() => setCurrentTab(index)} -                  > -                    {tab.title} -                  </div> -                ); -              })} -            </TabBar> -          )} +    <TabsAndBodyDiv> +      {props.tabs.length > 1 && ( +        <TabBar numTabs={props.tabs.length}>            {props.tabs.map((tab, index) => {              return (                <div                  key={index} -                hidden={index !== currentTab} -                style={{ scrollbarGutter: "stable both-edges" }} +                className={`p-2 cursor-pointer text-center ${ +                  index === currentTab +                    ? "bg-secondary-dark" +                    : "bg-vsc-background" +                }`} +                onClick={() => setCurrentTab(index)}                > -                {tab.element} +                {tab.title}                </div>              );            })} -        </TabsAndBodyDiv> -      </MainDiv> -    </GradientContainer> +        </TabBar> +      )} +      {props.tabs.map((tab, index) => { +        return ( +          <div +            key={index} +            hidden={index !== currentTab} +            style={{ +              scrollbarGutter: "stable both-edges", +              minHeight: "100%", +              display: "grid", +              gridTemplateRows: "1fr auto", +            }} +          > +            {tab.element} +          </div> +        ); +      })} +    </TabsAndBodyDiv>    );  } diff --git a/extension/react-app/src/components/HeaderButtonWithText.tsx b/extension/react-app/src/components/HeaderButtonWithText.tsx new file mode 100644 index 00000000..d70a3d70 --- /dev/null +++ b/extension/react-app/src/components/HeaderButtonWithText.tsx @@ -0,0 +1,26 @@ +import React, { useState } from "react"; + +import { HeaderButton } from "."; + +interface HeaderButtonWithTextProps { +  text: string; +  onClick?: (e: any) => void; +  children: React.ReactNode; +} + +const HeaderButtonWithText = (props: HeaderButtonWithTextProps) => { +  const [hover, setHover] = useState(false); +  return ( +    <HeaderButton +      style={{ padding: "3px" }} +      onMouseEnter={() => setHover(true)} +      onMouseLeave={() => setHover(false)} +      onClick={props.onClick} +    > +      <span hidden={!hover}>{props.text}</span> +      {props.children} +    </HeaderButton> +  ); +}; + +export default HeaderButtonWithText; diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 48f970d7..480f517f 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -21,9 +21,7 @@ import {  } from "@styled-icons/heroicons-outline";  import { HistoryNode } from "../../../schema/HistoryNode";  import ReactMarkdown from "react-markdown"; -import ContinueButton from "./ContinueButton"; -import InputAndButton from "./InputAndButton"; -import ToggleErrorDiv from "./ToggleErrorDiv"; +import HeaderButtonWithText from "./HeaderButtonWithText";  interface StepContainerProps {    historyNode: HistoryNode; @@ -152,23 +150,25 @@ function StepContainer(props: StepContainerProps) {              </HeaderButton> */}              <> -              <HeaderButton +              <HeaderButtonWithText                  onClick={(e) => {                    e.stopPropagation();                    props.onDelete();                  }} +                text="Delete"                >                  <XMark size="1.6em" onClick={props.onDelete} /> -              </HeaderButton> +              </HeaderButtonWithText>                {props.historyNode.observation?.error ? ( -                <HeaderButton +                <HeaderButtonWithText +                  text="Retry"                    onClick={(e) => {                      e.stopPropagation();                      props.onRetry();                    }}                  >                    <ArrowPath size="1.6em" onClick={props.onRetry} /> -                </HeaderButton> +                </HeaderButtonWithText>                ) : (                  <></>                )} @@ -193,7 +193,7 @@ function StepContainer(props: StepContainerProps) {            ) : (              <ReactMarkdown                key={1} -              className="overflow-scroll" +              className="overflow-x-scroll"                components={                  {                    // pre: ({ node, ...props }) => { diff --git a/extension/react-app/src/index.css b/extension/react-app/src/index.css index 20599d30..32a92d0e 100644 --- a/extension/react-app/src/index.css +++ b/extension/react-app/src/index.css @@ -21,7 +21,7 @@  html,  body,  #root { -  height: calc(100%); +  height: 100%;  }  body { @@ -31,4 +31,5 @@ body {    font-family: "Mona Sans", "Arial", sans-serif;    padding: 0px;    margin: 0px; +  height: 100%;  } diff --git a/extension/react-app/src/tabs/gui.tsx b/extension/react-app/src/tabs/gui.tsx index 5316f42b..279d052b 100644 --- a/extension/react-app/src/tabs/gui.tsx +++ b/extension/react-app/src/tabs/gui.tsx @@ -14,25 +14,18 @@ import StepContainer from "../components/StepContainer";  import useContinueGUIProtocol from "../hooks/useWebsocket";  import {    BookOpen, -  ChatBubbleOvalLeft,    ChatBubbleOvalLeftEllipsis,    Trash,  } from "@styled-icons/heroicons-outline";  import ComboBox from "../components/ComboBox";  import TextDialog from "../components/TextDialog"; +import HeaderButtonWithText from "../components/HeaderButtonWithText"; -const MainDiv = styled.div` -  display: grid; -  grid-template-rows: 1fr auto; +const TopGUIDiv = styled.div` +  overflow: hidden;  `; -let TopGUIDiv = styled.div` -  display: grid; -  grid-template-columns: 1fr; -  background-color: ${vscBackground}; -`; - -let UserInputQueueItem = styled.div` +const UserInputQueueItem = styled.div`    border-radius: ${defaultBorderRadius};    color: gray;    padding: 8px; @@ -40,7 +33,7 @@ let UserInputQueueItem = styled.div`    text-align: center;  `; -const TopBar = styled.div` +const Footer = styled.footer`    display: flex;    flex-direction: row;    gap: 8px; @@ -303,103 +296,98 @@ function GUI(props: GUIProps) {            setShowFeedbackDialog(false);          }}        ></TextDialog> -      <MainDiv> -        <TopGUIDiv -          ref={topGuiDivRef} -          onKeyDown={(e) => { -            if (e.key === "Enter" && e.ctrlKey) { -              onMainTextInput(); -            } -          }} -        > -          {typeof client === "undefined" && ( -            <> -              <Loader></Loader> -              <p style={{ textAlign: "center" }}> -                Trying to reconnect with server... -              </p> -            </> -          )} -          {history?.timeline.map((node: HistoryNode, index: number) => { -            return ( -              <StepContainer -                key={index} -                onUserInput={(input: string) => { -                  onStepUserInput(input, index); -                }} -                inFuture={index > history?.current_index} -                historyNode={node} -                onRefinement={(input: string) => { -                  client?.sendRefinementInput(input, index); -                }} -                onReverse={() => { -                  client?.reverseToIndex(index); -                }} -                onRetry={() => { -                  client?.retryAtIndex(index); -                  setWaitingForSteps(true); -                }} -                onDelete={() => { -                  client?.deleteAtIndex(index); -                }} -              /> -            ); -          })} -          {waitingForSteps && <Loader></Loader>} -          <div> -            {userInputQueue.map((input) => { -              return <UserInputQueueItem>{input}</UserInputQueueItem>; -            })} -          </div> - -          <ComboBox -            disabled={ -              history?.timeline.length -                ? history.timeline[history.current_index].step.name === -                  "Waiting for user confirmation" -                : false -            } -            ref={mainTextInputRef} -            onEnter={(e) => { -              onMainTextInput(); -              e.stopPropagation(); -              e.preventDefault(); -            }} -            onInputValueChange={() => {}} -            items={availableSlashCommands} -          /> -          <ContinueButton onClick={onMainTextInput} /> - -          <TopBar> -            <a href="https://continue.dev/docs" className="no-underline"> -              <HeaderButton style={{ padding: "3px" }}> -                Continue Docs -                <BookOpen size="1.6em" /> -              </HeaderButton> -            </a> -            <HeaderButton -              style={{ padding: "3px" }} -              onClick={() => { -                // Set dialog open -                setShowFeedbackDialog(true); +      <TopGUIDiv +        ref={topGuiDivRef} +        onKeyDown={(e) => { +          if (e.key === "Enter" && e.ctrlKey) { +            onMainTextInput(); +          } +        }} +      > +        {typeof client === "undefined" && ( +          <> +            <Loader></Loader> +            <p style={{ textAlign: "center" }}> +              Trying to reconnect with server... +            </p> +          </> +        )} +        {history?.timeline.map((node: HistoryNode, index: number) => { +          return ( +            <StepContainer +              key={index} +              onUserInput={(input: string) => { +                onStepUserInput(input, index); +              }} +              inFuture={index > history?.current_index} +              historyNode={node} +              onRefinement={(input: string) => { +                client?.sendRefinementInput(input, index); +              }} +              onReverse={() => { +                client?.reverseToIndex(index);                }} -            > -              Feedback -              <ChatBubbleOvalLeftEllipsis size="1.6em" /> -            </HeaderButton> -            <HeaderButton -              onClick={() => { -                client?.sendClear(); +              onRetry={() => { +                client?.retryAtIndex(index); +                setWaitingForSteps(true);                }} -              style={{ padding: "3px" }} -            > -              Clear History -              <Trash size="1.6em" /> -            </HeaderButton> -          </TopBar> -        </TopGUIDiv> -      </MainDiv> +              onDelete={() => { +                client?.deleteAtIndex(index); +              }} +            /> +          ); +        })} +        {waitingForSteps && <Loader></Loader>} + +        <div> +          {userInputQueue.map((input) => { +            return <UserInputQueueItem>{input}</UserInputQueueItem>; +          })} +        </div> + +        <ComboBox +          disabled={ +            history?.timeline.length +              ? history.timeline[history.current_index].step.name === +                "Waiting for user confirmation" +              : false +          } +          ref={mainTextInputRef} +          onEnter={(e) => { +            onMainTextInput(); +            e.stopPropagation(); +            e.preventDefault(); +          }} +          onInputValueChange={() => {}} +          items={availableSlashCommands} +        /> +        <ContinueButton onClick={onMainTextInput} /> +      </TopGUIDiv> +      <Footer> +        <a href="https://continue.dev/docs" className="no-underline"> +          <HeaderButtonWithText text="Continue Docs"> +            <BookOpen size="1.6em" /> +          </HeaderButtonWithText> +        </a> +        <HeaderButtonWithText +          onClick={() => { +            // Set dialog open +            setShowFeedbackDialog(true); +          }} +          text="Feedback" +        > +          <ChatBubbleOvalLeftEllipsis size="1.6em" /> +        </HeaderButtonWithText> +        <HeaderButtonWithText +          onClick={() => { +            client?.sendClear(); +          }} +          text="Clear History" +        > +          <Trash size="1.6em" /> +        </HeaderButtonWithText> +      </Footer>      </>    );  } diff --git a/extension/scripts/continuedev-0.1.1-py3-none-any.whl b/extension/scripts/continuedev-0.1.1-py3-none-any.whl Binary files differindex 2f8f1550..614190c7 100644 --- a/extension/scripts/continuedev-0.1.1-py3-none-any.whl +++ b/extension/scripts/continuedev-0.1.1-py3-none-any.whl diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts index 135a8ec7..77010241 100644 --- a/extension/src/activation/activate.ts +++ b/extension/src/activation/activate.ts @@ -8,6 +8,7 @@ import * as path from "path";  import IdeProtocolClient from "../continueIdeClient";  import { getContinueServerUrl } from "../bridge";  import { setupDebugPanel, ContinueGUIWebviewViewProvider } from "../debugPanel"; +import { CapturedTerminal } from "../terminal/terminalEmulator";  export let extensionContext: vscode.ExtensionContext | undefined = undefined; @@ -47,5 +48,36 @@ export function activateExtension(      );    })(); +  // All opened terminals should be replaced by our own terminal +  vscode.window.onDidOpenTerminal((terminal) => { +    if (terminal.name === "Continue") { +      return; +    } +    const options = terminal.creationOptions; +    const capturedTerminal = new CapturedTerminal({ +      ...options, +      name: "Continue", +    }); +    terminal.dispose(); +  }); + +  // If any terminals are open to start, replace them +  vscode.window.terminals.forEach((terminal) => { +    if (terminal.name === "Continue") { +      return; +    } +    const options = terminal.creationOptions; +    const capturedTerminal = new CapturedTerminal( +      { +        ...options, +        name: "Continue", +      }, +      (commandOutput: string) => { +        ideProtocolClient.sendCommandOutput(commandOutput); +      } +    ); +    terminal.dispose(); +  }); +    extensionContext = context;  } diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index ef9a91c8..a889d3dc 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -326,13 +326,19 @@ class IdeProtocolClient {    private continueTerminal: CapturedTerminal | undefined;    async runCommand(command: string) { -    if (!this.continueTerminal) { -      this.continueTerminal = new CapturedTerminal("Continue"); +    if (!this.continueTerminal || this.continueTerminal.isClosed()) { +      this.continueTerminal = new CapturedTerminal({ +        name: "Continue", +      });      }      this.continueTerminal.show();      return await this.continueTerminal.runCommand(command);    } + +  sendCommandOutput(output: string) { +    this.messenger?.send("commandOutput", { output }); +  }  }  export default IdeProtocolClient; diff --git a/extension/src/terminal/terminalEmulator.ts b/extension/src/terminal/terminalEmulator.ts index b3031baf..35f02ac0 100644 --- a/extension/src/terminal/terminalEmulator.ts +++ b/extension/src/terminal/terminalEmulator.ts @@ -62,21 +62,29 @@ export class CapturedTerminal {      this.terminal.show();    } +  isClosed(): boolean { +    return this.terminal.exitStatus !== undefined; +  } +    private commandQueue: [string, (output: string) => void][] = [];    private hasRunCommand: boolean = false; +  private dataEndsInPrompt(strippedData: string): boolean { +    const lines = this.dataBuffer.split("\n"); +    return ( +      lines.length > 0 && +      (lines[lines.length - 1].includes("bash-") || +        lines[lines.length - 1].includes(") $ ")) && +      lines[lines.length - 1].includes("$") +    ); +  } +    private async waitForCommandToFinish() {      return new Promise<string>((resolve, reject) => {        this.onDataListeners.push((data: any) => {          const strippedData = stripAnsi(data);          this.dataBuffer += strippedData; -        const lines = this.dataBuffer.split("\n"); -        if ( -          lines.length > 0 && -          (lines[lines.length - 1].includes("bash-") || -            lines[lines.length - 1].includes(") $ ")) && -          lines[lines.length - 1].includes("$") -        ) { +        if (this.dataEndsInPrompt(strippedData)) {            resolve(this.dataBuffer);            this.dataBuffer = "";            this.onDataListeners = []; @@ -112,8 +120,30 @@ export class CapturedTerminal {    private readonly writeEmitter: vscode.EventEmitter<string>; -  constructor(terminalName: string) { -    this.shellCmd = "bash"; // getDefaultShell(); +  private splitByCommandsBuffer: string = ""; +  private readonly onCommandOutput: ((output: string) => void) | undefined; + +  splitByCommandsListener(data: string) { +    // Split the output by commands so it can be sent to Continue Server + +    const strippedData = stripAnsi(data); +    this.splitByCommandsBuffer += strippedData; +    if (this.dataEndsInPrompt(strippedData)) { +      if (this.onCommandOutput) { +        this.onCommandOutput(this.splitByCommandsBuffer); +      } +      this.dataBuffer = ""; +    } +  } + +  constructor( +    options: { name: string } & Partial<vscode.ExtensionTerminalOptions>, +    onCommandOutput?: (output: string) => void +  ) { +    this.onCommandOutput = onCommandOutput; + +    // this.shellCmd = "bash"; // getDefaultShell(); +    this.shellCmd = getDefaultShell();      const env = { ...(process.env as any) };      if (os.platform() !== "win32") { @@ -154,7 +184,7 @@ export class CapturedTerminal {      // Create and clear the terminal      this.terminal = vscode.window.createTerminal({ -      name: terminalName, +      ...options,        pty: newPty,      });      this.terminal.show();  | 
