diff options
48 files changed, 623 insertions, 364 deletions
diff --git a/continuedev/src/continuedev/core/abstract_sdk.py b/continuedev/src/continuedev/core/abstract_sdk.py index 0658f1b8..017e75ef 100644 --- a/continuedev/src/continuedev/core/abstract_sdk.py +++ b/continuedev/src/continuedev/core/abstract_sdk.py @@ -85,7 +85,7 @@ class AbstractContinueSDK(ABC): pass @abstractmethod - def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): + def add_chat_context(self, content: str, role: ChatMessageRole = "assistant"): pass @abstractmethod diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 703a73af..73f46a37 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -13,6 +13,8 @@ from ..steps.core.core import ReversibleStep, ManualEditStep, UserInputStep from ..libs.util.telemetry import capture_event from .sdk import ContinueSDK import asyncio +from ..libs.util.step_name_to_steps import get_step_from_name +from ..libs.util.traceback_parsers import get_python_traceback, get_javascript_traceback class Autopilot(ContinueBaseModel): @@ -88,9 +90,17 @@ class Autopilot(ContinueBaseModel): self._manual_edits_buffer.append(edit) # TODO: You're storing a lot of unecessary data here. Can compress into EditDiffs on the spot, and merge. # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit) - - def handle_traceback(self, traceback: str): - raise NotImplementedError + # Note that this is being overriden to do nothing in DemoAgent + + async def handle_command_output(self, output: str): + get_traceback_funcs = [get_python_traceback, get_javascript_traceback] + for get_tb_func in get_traceback_funcs: + traceback = get_tb_func(output) + if traceback is not None: + for tb_step in self.continue_sdk.config.on_traceback: + step = get_step_from_name( + tb_step.step_name, {"output": output, **tb_step.params}) + await self._run_singular_step(step) _step_depth: int = 0 @@ -99,9 +109,23 @@ class Autopilot(ContinueBaseModel): async def delete_at_index(self, index: int): self.history.timeline[index].step.hide = True + self.history.timeline[index].deleted = True await self.update_subscribers() async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]: + # Allow config to set disallowed steps + if step.__class__.__name__ in self.continue_sdk.config.disallowed_steps: + return None + + # If a parent step is deleted/cancelled, don't run this step + last_depth = self._step_depth + i = self.history.current_index + while i >= 0 and self.history.timeline[i].depth > last_depth: + if self.history.timeline[i].deleted: + return None + last_depth = self.history.timeline[i].depth + i -= 1 + capture_event(self.continue_sdk.ide.unique_id, 'step run', { 'step_name': step.name, 'params': step.dict()}) @@ -114,7 +138,7 @@ class Autopilot(ContinueBaseModel): await self._run_singular_step(manualEditsStep) # Update history - do this first so we get top-first tree ordering - self.history.add_node(HistoryNode( + index_of_history_node = self.history.add_node(HistoryNode( step=step, observation=None, depth=self._step_depth)) # Call all subscribed callbacks @@ -127,6 +151,10 @@ class Autopilot(ContinueBaseModel): try: observation = await step(self.continue_sdk) except Exception as e: + if self.history.timeline[index_of_history_node].deleted: + # If step was deleted/cancelled, don't show error or allow retry + return None + caught_error = True is_continue_custom_exception = issubclass( @@ -176,8 +204,7 @@ class Autopilot(ContinueBaseModel): # Add observation to history, unless already attached error observation if not caught_error: - self.history.get_last_at_depth( - self._step_depth, include_current=True).observation = observation + self.history.timeline[index_of_history_node].observation = observation await self.update_subscribers() # Update its description diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py index 6a811412..859c6188 100644 --- a/continuedev/src/continuedev/core/config.py +++ b/continuedev/src/continuedev/core/config.py @@ -12,11 +12,17 @@ class SlashCommand(BaseModel): params: Optional[Dict] = {} +class OnTracebackSteps(BaseModel): + step_name: str + params: Optional[Dict] = {} + + class ContinueConfig(BaseModel): """ A pydantic class for the continue config file. """ steps_on_startup: Optional[Dict[str, Dict]] = {} + disallowed_steps: Optional[List[str]] = [] server_url: Optional[str] = None allow_anonymous_telemetry: Optional[bool] = True default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k", @@ -48,6 +54,8 @@ class ContinueConfig(BaseModel): step_name="FeedbackStep", ) ] + on_traceback: Optional[List[OnTracebackSteps]] = [ + OnTracebackSteps(step_name="DefaultOnTracebackStep")] def load_config(config_file: str) -> ContinueConfig: diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index f6b26d69..0c7ec67f 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -11,6 +11,8 @@ ChatMessageRole = Literal["assistant", "user", "system"] class ChatMessage(ContinueBaseModel): role: ChatMessageRole content: str + # A summary for pruning chat context to fit context window. Often the Step name. + summary: str class HistoryNode(ContinueBaseModel): @@ -18,11 +20,12 @@ class HistoryNode(ContinueBaseModel): step: "Step" observation: Union[Observation, None] depth: int + deleted: bool = False def to_chat_messages(self) -> List[ChatMessage]: if self.step.description is None: return self.step.chat_context - return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description)] + return self.step.chat_context + [ChatMessage(role="assistant", content=self.step.description, summary=self.step.name)] class History(ContinueBaseModel): @@ -37,9 +40,11 @@ class History(ContinueBaseModel): msgs += node.to_chat_messages() return msgs - def add_node(self, node: HistoryNode): + def add_node(self, node: HistoryNode) -> int: + """ Add node and return the index where it was added """ self.timeline.insert(self.current_index + 1, node) self.current_index += 1 + return self.current_index def get_current(self) -> Union[HistoryNode, None]: if self.current_index < 0: diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py index 255f598d..1b53834b 100644 --- a/continuedev/src/continuedev/core/policy.py +++ b/continuedev/src/continuedev/core/policy.py @@ -8,7 +8,7 @@ from ..recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowReci from ..recipes.AddTransformRecipe.main import AddTransformRecipe from .main import Step, Validator, History, Policy from .observation import Observation, TracebackObservation, UserInputObservation -from ..steps.main import EditHighlightedCodeStep, SolveTracebackStep, RunCodeStep, FasterEditHighlightedCodeStep, StarCoderEditHighlightedCodeStep, EmptyStep, SetupContinueWorkspaceStep +from ..steps.main import EditHighlightedCodeStep, SolveTracebackStep from ..recipes.WritePytestsRecipe.main import WritePytestsRecipe from ..recipes.ContinueRecipeRecipe.main import ContinueStepStep from ..steps.comment_code import CommentCodeStep diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 9f31c4c2..7159beaa 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -177,26 +177,29 @@ class ContinueSDK(AbstractContinueSDK): def raise_exception(self, message: str, title: str, with_step: Union[Step, None] = None): raise ContinueCustomException(message, title, with_step) - def add_chat_context(self, content: str, role: ChatMessageRole = "assistent"): + def add_chat_context(self, content: str, summary: Union[str, None] = None, role: ChatMessageRole = "assistant"): self.history.timeline[self.history.current_index].step.chat_context.append( - ChatMessage(content=content, role=role)) + ChatMessage(content=content, role=role, summary=summary)) async def get_chat_context(self) -> List[ChatMessage]: history_context = self.history.to_chat_history() highlighted_code = await self.ide.getHighlightedCode() + + preface = "The following code is highlighted" + if len(highlighted_code) == 0: + preface = "The following file is open" # Get the full contents of all open files files = await self.ide.getOpenFiles() - contents = {} - for file in files: - contents[file] = await self.ide.readFile(file) + if len(files) > 0: + content = await self.ide.readFile(files[0]) + highlighted_code = [ + RangeInFile.from_entire_file(files[0], content)] - highlighted_code = [RangeInFile.from_entire_file( - filepath, content) for filepath, content in contents.items()] for rif in highlighted_code: code = await self.ide.readRangeInFile(rif) history_context.append(ChatMessage( - content=f"The following code is highlighted:\n```\n{code}\n```", role="user")) + content=f"{preface} ({rif.filepath}):\n```\n{code}\n```", role="user", summary=f"{preface}: {rif.filepath}")) return history_context async def update_ui(self): diff --git a/continuedev/src/continuedev/libs/llm/__init__.py b/continuedev/src/continuedev/libs/llm/__init__.py index 2986b2c4..108eedf1 100644 --- a/continuedev/src/continuedev/libs/llm/__init__.py +++ b/continuedev/src/continuedev/libs/llm/__init__.py @@ -1,5 +1,5 @@ from abc import ABC -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage from ...models.main import AbstractModel @@ -9,17 +9,14 @@ from pydantic import BaseModel class LLM(ABC): system_message: Union[str, None] = None - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs): + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: """Return the completion of the text with the given temperature.""" - raise + raise NotImplementedError def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: """Yield a stream of chat messages.""" raise NotImplementedError - def __call__(self, prompt: str, **kwargs): - return self.complete(prompt, **kwargs) - def with_system_message(self, system_message: Union[str, None]): """Return a new model with the given system message.""" raise NotImplementedError diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index 136e86b4..22c28b20 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -1,7 +1,7 @@ import asyncio from functools import cached_property import time -from typing import Any, Dict, Generator, List, Union +from typing import Any, Coroutine, Dict, Generator, List, Union from ...core.main import ChatMessage import openai import aiohttp @@ -47,12 +47,37 @@ class OpenAI(LLM): return len(self.__encoding_for_model.encode(text, disallowed_special=())) def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): - tokens = tokens_for_completion - for i in range(len(chat_history) - 1, -1, -1): - message = chat_history[i] - tokens += self.count_tokens(message.content) - if tokens > max_tokens: - return chat_history[i + 1:] + total_tokens = tokens_for_completion + \ + sum(self.count_tokens(message.content) for message in chat_history) + + # 1. Replace beyond last 5 messages with summary + i = 0 + while total_tokens > max_tokens and i < len(chat_history) - 5: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 2. Remove entire messages until the last 5 + while len(chat_history) > 5 and total_tokens > max_tokens: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + + # 3. Truncate message in the last 5 + i = 0 + while total_tokens > max_tokens: + message = chat_history[0] + total_tokens -= self.count_tokens(message.content) + total_tokens += self.count_tokens(message.summary) + message.content = message.summary + i += 1 + + # 4. Remove entire messages in the last 5 + while total_tokens > max_tokens and len(chat_history) > 0: + message = chat_history.pop(0) + total_tokens -= self.count_tokens(message.content) + return chat_history def with_system_message(self, system_message: Union[str, None]): @@ -83,7 +108,7 @@ class OpenAI(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -112,7 +137,7 @@ class OpenAI(LLM): for chunk in generator: yield chunk.choices[0].text - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: t1 = time.time() self.completion_count += 1 @@ -120,15 +145,15 @@ class OpenAI(LLM): "frequency_penalty": 0, "presence_penalty": 0, "stream": False} | kwargs if args["model"] in CHAT_MODELS: - resp = openai.ChatCompletion.create( + resp = (await openai.ChatCompletion.acreate( messages=self.compile_chat_messages(with_history, prompt), **args, - ).choices[0].message.content + )).choices[0].message.content else: - resp = openai.Completion.create( + resp = (await openai.Completion.acreate( prompt=prompt, **args, - ).choices[0].text + )).choices[0].text t2 = time.time() print("Completion time:", t2 - t1) diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 4ff57101..93f2d48a 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -1,8 +1,9 @@ from functools import cached_property import json -from typing import Any, Dict, Generator, List, Literal, Union +from typing import Any, Coroutine, Dict, Generator, List, Literal, Union import requests import tiktoken +import aiohttp from ...core.main import ChatMessage from ..llm import LLM @@ -16,7 +17,7 @@ CHAT_MODELS = { "gpt-3.5-turbo", "gpt-4" } -# SERVER_URL = "http://127.0.0.1:8002" +# SERVER_URL = "http://127.0.0.1:8080" SERVER_URL = "https://proxy-server-l6vsfbzhba-uc.a.run.app" @@ -39,16 +40,6 @@ class ProxyServer(LLM): def count_tokens(self, text: str): return len(self.__encoding_for_model.encode(text, disallowed_special=())) - def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: - resp = requests.post(f"{SERVER_URL}/stream_complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }, stream=True) - for line in resp.iter_lines(): - if line: - yield line.decode("utf-8") - def __prune_chat_history(self, chat_history: List[ChatMessage], max_tokens: int, tokens_for_completion: int): tokens = tokens_for_completion for i in range(len(chat_history) - 1, -1, -1): @@ -67,7 +58,7 @@ class ProxyServer(LLM): "role": "system", "content": self.system_message }) - history += [msg.dict() for msg in msgs] + history += [{"role": msg.role, "content": msg.content} for msg in msgs] history.append({ "role": "user", "content": prompt @@ -75,11 +66,28 @@ class ProxyServer(LLM): return history - def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> str: + async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + try: + return json.loads(await resp.text()) + except json.JSONDecodeError: + raise Exception(await resp.text()) - resp = requests.post(f"{SERVER_URL}/complete", json={ - "chat_history": self.compile_chat_messages(with_history, prompt), - "model": self.default_model, - "unique_id": self.unique_id, - }) - return json.loads(resp.text) + async def stream_chat(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: + async with aiohttp.ClientSession() as session: + async with session.post(f"{SERVER_URL}/stream_complete", json={ + "chat_history": self.compile_chat_messages(with_history, prompt), + "model": self.default_model, + "unique_id": self.unique_id, + }) as resp: + async for line in resp.content: + if line: + try: + yield line.decode("utf-8") + except json.JSONDecodeError: + raise Exception(str(line)) diff --git a/continuedev/src/continuedev/libs/util/step_name_to_steps.py b/continuedev/src/continuedev/libs/util/step_name_to_steps.py index 4dd9c430..2c4474af 100644 --- a/continuedev/src/continuedev/libs/util/step_name_to_steps.py +++ b/continuedev/src/continuedev/libs/util/step_name_to_steps.py @@ -6,13 +6,25 @@ from ...steps.main import EditHighlightedCodeStep from ...steps.chat import SimpleChatStep from ...steps.comment_code import CommentCodeStep from ...steps.feedback import FeedbackStep +from ...recipes.AddTransformRecipe.main import AddTransformRecipe +from ...recipes.CreatePipelineRecipe.main import CreatePipelineRecipe +from ...recipes.DDtoBQRecipe.main import DDtoBQRecipe +from ...recipes.DeployPipelineAirflowRecipe.main import DeployPipelineAirflowRecipe +from ...steps.on_traceback import DefaultOnTracebackStep +# This mapping is used to convert from string in ContinueConfig json to corresponding Step class. +# Used for example in slash_commands and steps_on_startup step_name_to_step_class = { "UserInputStep": UserInputStep, "EditHighlightedCodeStep": EditHighlightedCodeStep, "SimpleChatStep": SimpleChatStep, "CommentCodeStep": CommentCodeStep, "FeedbackStep": FeedbackStep, + "AddTransformRecipe": AddTransformRecipe, + "CreatePipelineRecipe": CreatePipelineRecipe, + "DDtoBQRecipe": DDtoBQRecipe, + "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe, + "DefaultOnTracebackStep": DefaultOnTracebackStep, } @@ -22,4 +34,4 @@ def get_step_from_name(step_name: str, params: Dict) -> Step: except: print( f"Incorrect parameters for step {step_name}. Parameters provided were: {params}") - raise
\ No newline at end of file + raise diff --git a/continuedev/src/continuedev/libs/util/traceback_parsers.py b/continuedev/src/continuedev/libs/util/traceback_parsers.py index c31929c1..a2e94c26 100644 --- a/continuedev/src/continuedev/libs/util/traceback_parsers.py +++ b/continuedev/src/continuedev/libs/util/traceback_parsers.py @@ -1,24 +1,25 @@ -from typing import Union -from ...models.main import Traceback -from boltons import tbutils +PYTHON_TRACEBACK_PREFIX = "Traceback (most recent call last):" -def sort_func(items): - """Sort a list of items.""" - return sorted(items) - - -def parse_python_traceback(stdout: str) -> Union[Traceback, None]: - """Parse a python traceback from stdout.""" +def get_python_traceback(output: str) -> str: + if PYTHON_TRACEBACK_PREFIX in output: + return PYTHON_TRACEBACK_PREFIX + output.split(PYTHON_TRACEBACK_PREFIX)[-1] + elif "SyntaxError" in output: + return "SyntaxError" + output.split("SyntaxError")[-1] + else: + return None - # Sometimes paths are not quoted, but they need to be - if "File \"" not in stdout: - stdout = stdout.replace("File ", "File \"").replace( - ", line ", "\", line ") - try: - tbutil_parsed_exc = tbutils.ParsedException.from_string(stdout) - return Traceback.from_tbutil_parsed_exc(tbutil_parsed_exc) +def get_javascript_traceback(output: str) -> str: + lines = output.splitlines() + first_line = None + for i in range(len(lines) - 1): + segs = lines[i].split(":") + if len(segs) > 1 and segs[0] != "" and segs[1].startswith(" ") and lines[i + 1].strip().startswith("at"): + first_line = lines[i] + break - except Exception: + if first_line is not None: + return "\n".join(lines[lines.index(first_line):]) + else: return None diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py index 92bddc98..55ef107b 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/main.py @@ -27,5 +27,7 @@ class CreatePipelineRecipe(Step): await sdk.run_step( SetupPipelineStep(api_description=text_observation.text) >> ValidatePipelineStep() >> - RunQueryStep() + RunQueryStep() >> + MessageStep( + name="Congrats!", message="You've successfully created your first dlt pipeline! 🎉") ) diff --git a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py index 096b41c6..91515dc2 100644 --- a/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/CreatePipelineRecipe/steps.py @@ -29,8 +29,8 @@ class SetupPipelineStep(Step): async def run(self, sdk: ContinueSDK): sdk.context.set("api_description", self.api_description) - source_name = sdk.models.gpt35.complete( - f"Write a snake_case name for the data source described by {self.api_description}: ").strip() + source_name = (await sdk.models.gpt35.complete( + f"Write a snake_case name for the data source described by {self.api_description}: ")).strip() filename = f'{source_name}.py' # running commands to get started when creating a new dlt pipeline @@ -49,7 +49,7 @@ class SetupPipelineStep(Step): - `pip install -r requirements.txt`: Install the Python dependencies for the pipeline"""), name="Setup Python environment") # editing the resource function to call the requested API - resource_function_range = Range.from_shorthand(15, 0, 29, 0) + resource_function_range = Range.from_shorthand(15, 0, 30, 0) await sdk.ide.highlightCode(RangeInFile(filepath=os.path.join(await sdk.ide.getWorkspaceDirectory(), filename), range=resource_function_range), "#ffa50033") # sdk.set_loading_message("Writing code to call the API...") @@ -64,7 +64,7 @@ class SetupPipelineStep(Step): # wait for user to put API key in secrets.toml await sdk.ide.setFileOpen(await sdk.ide.getWorkspaceDirectory() + "/.dlt/secrets.toml") - await sdk.wait_for_user_confirmation("If this service requires an API key, please add it to the `secrets.toml` file and then press `Continue`") + await sdk.wait_for_user_confirmation("If this service requires an API key, please add it to the `secrets.toml` file and then press `Continue`.") sdk.context.set("source_name", source_name) @@ -91,7 +91,7 @@ class ValidatePipelineStep(Step): if "Traceback" in output or "SyntaxError" in output: output = "Traceback" + output.split("Traceback")[-1] file_content = await sdk.ide.readFile(os.path.join(workspace_dir, filename)) - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {file_content} ``` @@ -103,7 +103,7 @@ class ValidatePipelineStep(Step): This is a brief summary of the error followed by a suggestion on how it can be fixed by editing the resource function:""")) - api_documentation_url = sdk.models.gpt35.complete(dedent(f"""\ + api_documentation_url = await sdk.models.gpt35.complete(dedent(f"""\ The API I am trying to call is the '{sdk.context.get('api_description')}'. I tried calling it in the @resource function like this: ```python {file_content} @@ -159,7 +159,7 @@ class RunQueryStep(Step): output = await sdk.run('.env/bin/python3 query.py', name="Run test query", description="Running `.env/bin/python3 query.py` to test that the data was loaded into DuckDB as expected", handle_error=False) if "Traceback" in output or "SyntaxError" in output: - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ ```python {await sdk.ide.readFile(os.path.join(sdk.ide.workspace_directory, "query.py"))} ``` @@ -172,5 +172,5 @@ class RunQueryStep(Step): This is a brief summary of the error followed by a suggestion on how it can be fixed:""")) sdk.raise_exception( - title="Error while running query", message=output, with_step=MessageStep(name=f"Suggestion to solve error {AI_ASSISTED_STRING}", message=suggestion) + title="Error while running query", message=output, with_step=MessageStep(name=f"Suggestion to solve error {AI_ASSISTED_STRING}", message=suggestion + "\n\nIt is also very likely that no duckdb table was created, which can happen if the resource function did not yield any data. Please make sure that it is yielding data and then rerun this step.") ) diff --git a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py index 6db9fd4b..df414e2e 100644 --- a/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py +++ b/continuedev/src/continuedev/recipes/DDtoBQRecipe/steps.py @@ -82,7 +82,7 @@ class LoadDataStep(Step): docs = f.read() output = "Traceback" + output.split("Traceback")[-1] - suggestion = sdk.models.default.complete(dedent(f"""\ + suggestion = await sdk.models.default.complete(dedent(f"""\ When trying to load data into BigQuery, the following error occurred: ```ascii diff --git a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py index 688f44c3..6e1244b3 100644 --- a/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py +++ b/continuedev/src/continuedev/recipes/WritePytestsRecipe/main.py @@ -41,7 +41,7 @@ class WritePytestsRecipe(Step): "{self.user_input}" Here is a complete set of pytest unit tests:""") - tests = sdk.models.gpt35.complete(prompt) + tests = await sdk.models.gpt35.complete(prompt) await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests)) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index c53149d8..c83fbc8a 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -1,5 +1,4 @@ # This is a separate server from server/main.py -import asyncio from functools import cached_property import json import os @@ -10,11 +9,13 @@ from uvicorn.main import Server from ..libs.util.queue import AsyncSubscriptionQueue from ..models.filesystem import FileSystem, RangeInFile, EditDiff, RealFileSystem -from ..models.main import Traceback from ..models.filesystem_edit import AddDirectory, AddFile, DeleteDirectory, DeleteFile, FileSystemEdit, FileEdit, FileEditWithFullContents, RenameDirectory, RenameFile, SequentialFileSystemEdit from pydantic import BaseModel from .gui import SessionManager, session_manager from .ide_protocol import AbstractIdeProtocolServer +import asyncio +import nest_asyncio +nest_asyncio.apply() router = APIRouter(prefix="/ide", tags=["ide"]) @@ -135,6 +136,9 @@ class IdeProtocolServer(AbstractIdeProtocolServer): fileEdits = list( map(lambda d: FileEditWithFullContents.parse_obj(d), data["fileEdits"])) self.onFileEdits(fileEdits) + elif message_type == "commandOutput": + output = data["output"] + self.onCommandOutput(output) elif message_type in ["highlightedCode", "openFiles", "readFile", "editFile", "workspaceDirectory", "getUserSecret", "runCommand", "uniqueId"]: self.sub_queue.post(message_type, data) else: @@ -189,11 +193,6 @@ class IdeProtocolServer(AbstractIdeProtocolServer): def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool): pass - def onTraceback(self, traceback: Traceback): - # Same as below, maybe not every autopilot? - for _, session in self.session_manager.sessions.items(): - session.autopilot.handle_traceback(traceback) - def onFileSystemUpdate(self, update: FileSystemEdit): # Access to Autopilot (so SessionManager) pass @@ -211,6 +210,13 @@ class IdeProtocolServer(AbstractIdeProtocolServer): for _, session in self.session_manager.sessions.items(): session.autopilot.handle_manual_edits(edits) + def onCommandOutput(self, output: str): + # Send the output to ALL autopilots. + # Maybe not ideal behavior + for _, session in self.session_manager.sessions.items(): + asyncio.create_task( + session.autopilot.handle_command_output(output)) + # Request information. Session doesn't matter. async def getOpenFiles(self) -> List[str]: resp = await self._send_and_receive_json({}, OpenFilesResponse, "openFiles") @@ -224,7 +230,7 @@ class IdeProtocolServer(AbstractIdeProtocolServer): resp = await self._send_and_receive_json({}, UniqueIdResponse, "uniqueId") return resp.uniqueId - @cached_property + @property def workspace_directory(self) -> str: return asyncio.run(self.getWorkspaceDirectory()) diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py index 1d98f4a1..2dcedc30 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/continuedev/src/continuedev/server/ide_protocol.py @@ -36,10 +36,6 @@ class AbstractIdeProtocolServer(ABC): """Called when the user accepts or rejects a suggestion""" @abstractmethod - def onTraceback(self, traceback: Traceback): - """Called when a traceback is received""" - - @abstractmethod def onFileSystemUpdate(self, update: FileSystemEdit): """Called when a file system update is received""" diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 7cfe7e0c..90514ad6 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -10,10 +10,10 @@ class SimpleChatStep(Step): name: str = "Chat" async def run(self, sdk: ContinueSDK): - self.description = f"## {self.user_input}\n\n" - for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): + self.description = f"```{self.user_input}```\n\n" + async for chunk in sdk.models.default.stream_chat(self.user_input, with_history=await sdk.get_chat_context()): self.description += chunk await sdk.update_ui() - self.name = sdk.models.gpt35.complete( - f"Write a short title for the following chat message: {self.description}").strip() + self.name = (await sdk.models.gpt35.complete( + f"Write a short title for the following chat message: {self.description}")).strip() diff --git a/continuedev/src/continuedev/steps/chroma.py b/continuedev/src/continuedev/steps/chroma.py index 058455b2..9d085981 100644 --- a/continuedev/src/continuedev/steps/chroma.py +++ b/continuedev/src/continuedev/steps/chroma.py @@ -56,7 +56,7 @@ class AnswerQuestionChroma(Step): Here is the answer:""") - answer = sdk.models.gpt35.complete(prompt) + answer = await sdk.models.gpt35.complete(prompt) # Make paths relative to the workspace directory answer = answer.replace(await sdk.ide.getWorkspaceDirectory(), "") diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index de6fa29a..d580e2d2 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -73,7 +73,7 @@ class ShellCommandsStep(Step): return f"Error when running shell commands:\n```\n{self._err_text}\n```" cmds_str = "\n".join(self.cmds) - return models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") + return await models.gpt35.complete(f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: cwd = await sdk.ide.getWorkspaceDirectory() if self.cwd is None else self.cwd @@ -81,7 +81,7 @@ class ShellCommandsStep(Step): for cmd in self.cmds: output = await sdk.ide.runCommand(cmd) if self.handle_error and output is not None and output_contains_error(output): - suggestion = sdk.models.gpt35.complete(dedent(f"""\ + suggestion = await sdk.models.gpt35.complete(dedent(f"""\ While running the command `{cmd}`, the following error occurred: ```ascii @@ -152,10 +152,8 @@ class DefaultModelEditCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - description = models.gpt35.complete( + description = await models.gpt35.complete( f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:") - # self.name = models.gpt35.complete( - # f"Write a short title for this description: {description}") return description async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: @@ -229,7 +227,8 @@ class DefaultModelEditCodeStep(Step): prompt = self._prompt.format( code=rif.contents, user_request=self.user_input, file_prefix=segs[0], file_suffix=segs[1]) - completion = str(model_to_use.complete(prompt, with_history=await sdk.get_chat_context())) + completion = str(await model_to_use.complete(prompt, with_history=await sdk.get_chat_context())) + eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/draft/migration.py b/continuedev/src/continuedev/steps/draft/migration.py index 7c4b7eb5..f3b36b5e 100644 --- a/continuedev/src/continuedev/steps/draft/migration.py +++ b/continuedev/src/continuedev/steps/draft/migration.py @@ -13,7 +13,7 @@ class MigrationStep(Step): recent_edits = await sdk.ide.get_recent_edits(self.edited_file) recent_edits_string = "\n\n".join( map(lambda x: x.to_string(), recent_edits)) - description = sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") + description = await sdk.models.gpt35.complete(f"{recent_edits_string}\n\nGenerate a short description of the migration made in the above changes:\n") await sdk.run([ "cd libs", "poetry run alembic revision --autogenerate -m " + description, diff --git a/continuedev/src/continuedev/steps/input/nl_multiselect.py b/continuedev/src/continuedev/steps/input/nl_multiselect.py index 36c489c7..aee22866 100644 --- a/continuedev/src/continuedev/steps/input/nl_multiselect.py +++ b/continuedev/src/continuedev/steps/input/nl_multiselect.py @@ -23,6 +23,6 @@ class NLMultiselectStep(Step): if first_try is not None: return first_try - gpt_parsed = sdk.models.gpt35.complete( + gpt_parsed = await sdk.models.gpt35.complete( f"These are the available options are: [{', '.join(self.options)}]. The user requested {user_response}. This is the exact string from the options array that they selected:") return extract_option(gpt_parsed) or self.options[0] diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py index 0e42d8bf..5ba86c53 100644 --- a/continuedev/src/continuedev/steps/main.py +++ b/continuedev/src/continuedev/steps/main.py @@ -3,7 +3,6 @@ from typing import Coroutine, List, Union from pydantic import BaseModel -from ..libs.util.traceback_parsers import parse_python_traceback from ..libs.llm import LLM from ..models.main import Traceback, Range from ..models.filesystem_edit import EditDiff, FileEdit @@ -33,28 +32,6 @@ class SetupContinueWorkspaceStep(Step): }""")) -class RunCodeStep(Step): - cmd: str - - async def describe(self, models: Models) -> Coroutine[str, None, None]: - return f"Ran command: `{self.cmd}`" - - async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: - result = subprocess.run( - self.cmd.split(), stdout=subprocess.PIPE, stderr=subprocess.PIPE) - stdout = result.stdout.decode("utf-8") - stderr = result.stderr.decode("utf-8") - print(stdout, stderr) - - # If it fails, return the error - tb = parse_python_traceback(stdout) or parse_python_traceback(stderr) - if tb: - return TracebackObservation(traceback=tb) - else: - self.hide = True - return None - - class Policy(BaseModel): pass @@ -145,7 +122,7 @@ class FasterEditHighlightedCodeStep(Step): for rif in rif_with_contents: rif_dict[rif.filepath] = rif.contents - completion = sdk.models.gpt35.complete(prompt) + completion = await sdk.models.gpt35.complete(prompt) # Temporarily doing this to generate description. self._prompt = prompt @@ -213,7 +190,7 @@ class StarCoderEditHighlightedCodeStep(Step): _prompt_and_completion: str = "" async def describe(self, models: Models) -> Coroutine[str, None, None]: - return models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") + return await models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:") async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]: range_in_files = await sdk.ide.getHighlightedCode() @@ -247,7 +224,7 @@ class StarCoderEditHighlightedCodeStep(Step): segs = full_file_contents.split(rif.contents) prompt = f"<file_prefix>{segs[0]}<file_suffix>{segs[1]}" + prompt - completion = str((await sdk.models.starcoder()).complete(prompt)) + completion = str(await sdk.models.starcoder.complete(prompt)) eot_token = "<|endoftext|>" completion = completion.removesuffix(eot_token) diff --git a/continuedev/src/continuedev/steps/on_traceback.py b/continuedev/src/continuedev/steps/on_traceback.py new file mode 100644 index 00000000..053b4ef4 --- /dev/null +++ b/continuedev/src/continuedev/steps/on_traceback.py @@ -0,0 +1,23 @@ +import os +from ..core.main import Step +from ..core.sdk import ContinueSDK +from .chat import SimpleChatStep + + +class DefaultOnTracebackStep(Step): + output: str + name: str = "Help With Traceback" + hide: bool = True + + async def run(self, sdk: ContinueSDK): + # Add context for any files in the traceback that are in the workspace + for line in self.output.split("\n"): + segs = line.split(" ") + for seg in segs: + if seg.startswith(os.path.sep) and os.path.exists(seg) and os.path.commonprefix([seg, sdk.ide.workspace_directory]) == sdk.ide.workspace_directory: + file_contents = await sdk.ide.readFile(seg) + await sdk.add_chat_context(f"The contents of {seg}:\n```\n{file_contents}\n```", "", "user") + + await sdk.run_step(SimpleChatStep( + name="Help With Traceback", + user_input=f"""I got the following error, can you please help explain how to fix it?\n\n{self.output}""")) diff --git a/continuedev/src/continuedev/steps/react.py b/continuedev/src/continuedev/steps/react.py index d825d424..4d310fc8 100644 --- a/continuedev/src/continuedev/steps/react.py +++ b/continuedev/src/continuedev/steps/react.py @@ -27,7 +27,7 @@ class NLDecisionStep(Step): Select the step which should be taken next to satisfy the user input. Say only the name of the selected step. You must choose one:""") - resp = sdk.models.gpt35.complete(prompt).lower() + resp = (await sdk.models.gpt35.complete(prompt)).lower() step_to_run = None for step in self.steps: diff --git a/continuedev/src/continuedev/steps/search_directory.py b/continuedev/src/continuedev/steps/search_directory.py index 9f4594b9..d2966f46 100644 --- a/continuedev/src/continuedev/steps/search_directory.py +++ b/continuedev/src/continuedev/steps/search_directory.py @@ -41,7 +41,7 @@ class WriteRegexPatternStep(Step): async def run(self, sdk: ContinueSDK): # Ask the user for a regex pattern - pattern = sdk.models.gpt35.complete(dedent(f"""\ + pattern = await sdk.models.gpt35.complete(dedent(f"""\ This is the user request: {self.user_request} diff --git a/docs/docs/getting-started.md b/docs/docs/getting-started.md index 9824aadd..b7401106 100644 --- a/docs/docs/getting-started.md +++ b/docs/docs/getting-started.md @@ -12,10 +12,9 @@ We don't want to waste your time with install and env setup before you try Conti 2. Select the `Create new codespace` button and wait 30-90 seconds while it launches and installs the Continue extension. Once complete, it should look like this:
-**TODO: Nate to add a screenshot of what Codespaces + Continue looks like when it is ready**
+![codespaces-install](/img/codespaces-install.png)
## Next Steps
- Read the `Getting Started` section of the `README.md` file that has been opened in your codespace, which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md)
-
- If you're ready to download for VS Code, you can do so [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue).
+ - Read the `Getting Started` section of the `README.md` file that has been opened in your codespace, which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md)
+ - If you're ready to download for VS Code, you can do so [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue)
\ No newline at end of file diff --git a/docs/docs/install.md b/docs/docs/install.md index a042739f..ac0aef30 100644 --- a/docs/docs/install.md +++ b/docs/docs/install.md @@ -12,7 +12,7 @@ If you want to try Continue before installing locally, check out the [GitHub Cod 3. Once you do this, you will see a message in the bottom right hand corner of VS Code that says `Setting up Continue extension...`. After 30-90 seconds, the Continue extension will then open up. It should look like this when it is complete:
-**TODO: Nate to add a screenshot of what Codespaces + Continue looks like when it is ready**
+![vscode-install](/img/vscode-install.png)
You can also open the Continue GUI with `cmd+shift+p` on Mac / `ctrl+shift+p` on Windows and then selecting `Continue: Open GUI`
@@ -22,7 +22,4 @@ If you would like to install Continue from source, please [follow the instructio ## Next steps
-**TODO: Nate to update VS Code install to have the same getting started as Codespaces**
-
-Read the `Getting Started` section of the `README.md` file that has been opened in VS Code,
-which you can also find [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md)
\ No newline at end of file +- Read the `Getting Started` section of the `README.md` [here](https://github.com/continuedev/continue-codespaces-demo/blob/main/README.md)
\ No newline at end of file diff --git a/docs/docs/intro.md b/docs/docs/intro.md index e7d922c2..6795797e 100644 --- a/docs/docs/intro.md +++ b/docs/docs/intro.md @@ -11,10 +11,10 @@ **Continue is the open-source library for accelerating software development with language models** -You define the scenarios where Large Language Models LLMs like GPT-4 and StarCoder should act as an autopilot, helping you complete software development tasks. You use recipes created by others to automate more steps in your workflows. If a recipe does not exist or work exactly like you want, you can use the Continue SDK to create custom steps and compose them into personalized recipes. Whether you are using a recipe created by yourself or someone else, you can review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. +You determine when Large Language Models (LLMs) like GPT-4 should act as an autopilot, helping you complete software development tasks. You open a file or highlight some code and then use slash commands like `/edit`, `/explain`, and `/comment` and naturual language instructions to tell the language model what to do. If an error or exception occurs when you run Python or JavaScript code, Continue will automatically tell you in plain English what to try to address it. You can also review, reverse, and rerun steps with the Continue GUI, which helps you guide the work done by LLMs and learn when to use and trust them. ## Why do developers use Continue? -Many developers have begun to use [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5) and [GPT-4](https://openai.com/research/gpt-4) through [ChatGPT](https://openai.com/blog/chatgpt) while coding; however, the experience is painful because of how much manual copying, pasting, and editing is required to provide the necessary context and incorporate the generated solutions into your codebase. Continue eliminates this pain by deeply integrating LLMs into your IDE amd workflows. +Many developers have begun to use [GPT-3.5](https://platform.openai.com/docs/models/gpt-3-5) and [GPT-4](https://openai.com/research/gpt-4) through [ChatGPT](https://openai.com/blog/chatgpt) while coding; however, the experience is painful because of how much manual copying, pasting, and editing is required to provide the necessary context and incorporate the generated solutions and advice into your codebase. Continue eliminates this pain by enabling LLMs to natively act in your IDE as you complete your workflows. Continue accelerates how developers build, ship, and maintain software, while giving them the control to define when LLMs should take actions and the confidence to trust LLMs. In short, it enables developers to do what they have always done: work together to create better and better abstractions that make it easier and easier to automate the repetitive work that people want computers to do.
\ No newline at end of file diff --git a/docs/docusaurus.config.js b/docs/docusaurus.config.js index 7b817bb9..7746a87b 100644 --- a/docs/docusaurus.config.js +++ b/docs/docusaurus.config.js @@ -36,21 +36,19 @@ const config = { presets: [ [ "classic", - { - gtag: { - trackingID: 'G-M3JWW8N2XQ', - }, - }, /** @type {import('@docusaurus/preset-classic').Options} */ ({ docs: { routeBasePath: '/', sidebarPath: require.resolve("./sidebars.js"), - editUrl: "https://github.com/continuedev/continue/", + editUrl: "https://github.com/continuedev/continue/tree/main/docs", }, theme: { customCss: require.resolve("./src/css/custom.css"), }, + gtag: { + trackingID: 'G-M3JWW8N2XQ', + }, }), ], ], diff --git a/docs/sidebars.js b/docs/sidebars.js index 30b8ad3a..f9a5bdef 100644 --- a/docs/sidebars.js +++ b/docs/sidebars.js @@ -17,7 +17,6 @@ const sidebars = { "intro", "getting-started", "install", - "catalog", "how-continue-works", "telemetry", ], diff --git a/docs/src/components/HomepageFeatures/index.js b/docs/src/components/HomepageFeatures/index.js index 31df2b27..6348f80a 100644 --- a/docs/src/components/HomepageFeatures/index.js +++ b/docs/src/components/HomepageFeatures/index.js @@ -4,32 +4,29 @@ import styles from "./styles.module.css"; const FeatureList = [ { - title: "Tell LLMs when to step in", + title: "Understand and edit code", Svg: require("@site/static/img/undraw_docusaurus_mountain.svg").default, description: ( <> - Seamlessly put your repetitive software development tasks - on autopilot by leveraging recipes created by others + Seamlessly ask language models to help you complete steps in your software development tasks </> ), }, { - title: "Write your own recipes", + title: "Customizable for your team", Svg: require("@site/static/img/undraw_docusaurus_tree.svg").default, description: ( <> - Use the Continue SDK to create custom steps and compose - them into recipes, guiding LLMs through common tasks + Define when and how LLMs should act to accelerate steps in your team-specific workflows </> ), }, { - title: "Wield LLMs with confidence", + title: "Break down your work step-by-step", Svg: require("@site/static/img/undraw_docusaurus_react.svg").default, description: ( <> - Use the Continue GUI to review, reverse, and rerun steps or even - entire recipes, allowing you to build trust in language models + Use the Continue GUI to review, reverse, and rerun steps that LLMs and you have taken together </> ), }, diff --git a/docs/static/img/codespaces-install.png b/docs/static/img/codespaces-install.png Binary files differnew file mode 100644 index 00000000..e960eff1 --- /dev/null +++ b/docs/static/img/codespaces-install.png diff --git a/docs/static/img/vscode-install.png b/docs/static/img/vscode-install.png Binary files differnew file mode 100644 index 00000000..17ac547a --- /dev/null +++ b/docs/static/img/vscode-install.png diff --git a/extension/package-lock.json b/extension/package-lock.json index e41cd2c2..86c816e0 100644 --- a/extension/package-lock.json +++ b/extension/package-lock.json @@ -1,12 +1,12 @@ { "name": "continue", - "version": "0.0.40", + "version": "0.0.47", "lockfileVersion": 2, "requires": true, "packages": { "": { "name": "continue", - "version": "0.0.40", + "version": "0.0.47", "license": "Apache-2.0", "dependencies": { "@electron/rebuild": "^3.2.10", diff --git a/extension/package.json b/extension/package.json index 4b199420..56a522ac 100644 --- a/extension/package.json +++ b/extension/package.json @@ -14,7 +14,7 @@ "displayName": "Continue", "pricing": "Free", "description": "Refine code 10x faster", - "version": "0.0.40", + "version": "0.0.47", "publisher": "Continue", "engines": { "vscode": "^1.74.0" @@ -96,7 +96,7 @@ { "type": "webview", "id": "continue.continueGUIView", - "name": ")", + "name": "GUI", "visibility": "visible" } ] diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx index ace0605e..2b140567 100644 --- a/extension/react-app/src/components/ComboBox.tsx +++ b/extension/react-app/src/components/ComboBox.tsx @@ -113,6 +113,9 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => { (event.nativeEvent as any).preventDownshiftDefault = true; if (props.onEnter) props.onEnter(event); setInputValue(""); + } else if (event.key === "Tab" && items.length > 0) { + setInputValue(items[0].name); + event.preventDefault(); } }, ref: ref as any, diff --git a/extension/react-app/src/components/DebugPanel.tsx b/extension/react-app/src/components/DebugPanel.tsx index 11ec2fe2..30f38779 100644 --- a/extension/react-app/src/components/DebugPanel.tsx +++ b/extension/react-app/src/components/DebugPanel.tsx @@ -17,39 +17,15 @@ interface DebugPanelProps { }[]; } -const GradientContainer = styled.div` - // Uncomment to get gradient border - /* background: linear-gradient( - 101.79deg, - #12887a 0%, - #87245c 37.64%, - #e12637 65.98%, - #ffb215 110.45% - ); */ - /* padding: 10px; */ - background-color: ${secondaryDark}; - margin: 0; - height: 100%; - /* border: 1px solid white; */ - border-radius: ${defaultBorderRadius}; -`; - -const MainDiv = styled.div` - height: 100%; - border-radius: ${defaultBorderRadius}; - scrollbar-base-color: transparent; - background-color: ${vscBackground}; -`; - const TabBar = styled.div<{ numTabs: number }>` display: grid; grid-template-columns: repeat(${(props) => props.numTabs}, 1fr); `; const TabsAndBodyDiv = styled.div` - display: grid; - grid-template-rows: auto 1fr; height: 100%; + border-radius: ${defaultBorderRadius}; + scrollbar-base-color: transparent; `; function DebugPanel(props: DebugPanelProps) { @@ -76,42 +52,43 @@ function DebugPanel(props: DebugPanelProps) { const [currentTab, setCurrentTab] = useState(0); return ( - <GradientContainer> - <MainDiv> - <TabsAndBodyDiv> - {props.tabs.length > 1 && ( - <TabBar numTabs={props.tabs.length}> - {props.tabs.map((tab, index) => { - return ( - <div - key={index} - className={`p-2 cursor-pointer text-center ${ - index === currentTab - ? "bg-secondary-dark" - : "bg-vsc-background" - }`} - onClick={() => setCurrentTab(index)} - > - {tab.title} - </div> - ); - })} - </TabBar> - )} + <TabsAndBodyDiv> + {props.tabs.length > 1 && ( + <TabBar numTabs={props.tabs.length}> {props.tabs.map((tab, index) => { return ( <div key={index} - hidden={index !== currentTab} - style={{ scrollbarGutter: "stable both-edges" }} + className={`p-2 cursor-pointer text-center ${ + index === currentTab + ? "bg-secondary-dark" + : "bg-vsc-background" + }`} + onClick={() => setCurrentTab(index)} > - {tab.element} + {tab.title} </div> ); })} - </TabsAndBodyDiv> - </MainDiv> - </GradientContainer> + </TabBar> + )} + {props.tabs.map((tab, index) => { + return ( + <div + key={index} + hidden={index !== currentTab} + style={{ + scrollbarGutter: "stable both-edges", + minHeight: "100%", + display: "grid", + gridTemplateRows: "1fr auto", + }} + > + {tab.element} + </div> + ); + })} + </TabsAndBodyDiv> ); } diff --git a/extension/react-app/src/components/HeaderButtonWithText.tsx b/extension/react-app/src/components/HeaderButtonWithText.tsx new file mode 100644 index 00000000..c4f22211 --- /dev/null +++ b/extension/react-app/src/components/HeaderButtonWithText.tsx @@ -0,0 +1,30 @@ +import React, { useState } from "react"; + +import { HeaderButton } from "."; + +interface HeaderButtonWithTextProps { + text: string; + onClick?: (e: any) => void; + children: React.ReactNode; +} + +const HeaderButtonWithText = (props: HeaderButtonWithTextProps) => { + const [hover, setHover] = useState(false); + return ( + <HeaderButton + style={{ padding: "3px" }} + onMouseEnter={() => setHover(true)} + onMouseLeave={() => { + setTimeout(() => { + setHover(false); + }, 100); + }} + onClick={props.onClick} + > + <span hidden={!hover}>{props.text}</span> + {props.children} + </HeaderButton> + ); +}; + +export default HeaderButtonWithText; diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx index 48f970d7..480f517f 100644 --- a/extension/react-app/src/components/StepContainer.tsx +++ b/extension/react-app/src/components/StepContainer.tsx @@ -21,9 +21,7 @@ import { } from "@styled-icons/heroicons-outline"; import { HistoryNode } from "../../../schema/HistoryNode"; import ReactMarkdown from "react-markdown"; -import ContinueButton from "./ContinueButton"; -import InputAndButton from "./InputAndButton"; -import ToggleErrorDiv from "./ToggleErrorDiv"; +import HeaderButtonWithText from "./HeaderButtonWithText"; interface StepContainerProps { historyNode: HistoryNode; @@ -152,23 +150,25 @@ function StepContainer(props: StepContainerProps) { </HeaderButton> */} <> - <HeaderButton + <HeaderButtonWithText onClick={(e) => { e.stopPropagation(); props.onDelete(); }} + text="Delete" > <XMark size="1.6em" onClick={props.onDelete} /> - </HeaderButton> + </HeaderButtonWithText> {props.historyNode.observation?.error ? ( - <HeaderButton + <HeaderButtonWithText + text="Retry" onClick={(e) => { e.stopPropagation(); props.onRetry(); }} > <ArrowPath size="1.6em" onClick={props.onRetry} /> - </HeaderButton> + </HeaderButtonWithText> ) : ( <></> )} @@ -193,7 +193,7 @@ function StepContainer(props: StepContainerProps) { ) : ( <ReactMarkdown key={1} - className="overflow-scroll" + className="overflow-x-scroll" components={ { // pre: ({ node, ...props }) => { diff --git a/extension/react-app/src/components/index.ts b/extension/react-app/src/components/index.ts index 525989af..d99b4d96 100644 --- a/extension/react-app/src/components/index.ts +++ b/extension/react-app/src/components/index.ts @@ -48,7 +48,7 @@ export const Pre = styled.pre` max-height: 150px; overflow-y: scroll; margin: 0; - background-color: ${secondaryDark}; + background-color: ${vscBackground}; border: none; /* text wrapping */ diff --git a/extension/react-app/src/index.css b/extension/react-app/src/index.css index 20599d30..db8afab9 100644 --- a/extension/react-app/src/index.css +++ b/extension/react-app/src/index.css @@ -21,7 +21,8 @@ html, body, #root { - height: calc(100%); + height: 100%; + background-color: var(--vsc-background); } body { @@ -31,4 +32,5 @@ body { font-family: "Mona Sans", "Arial", sans-serif; padding: 0px; margin: 0px; + height: 100%; } diff --git a/extension/react-app/src/tabs/gui.tsx b/extension/react-app/src/tabs/gui.tsx index 5316f42b..994cb896 100644 --- a/extension/react-app/src/tabs/gui.tsx +++ b/extension/react-app/src/tabs/gui.tsx @@ -14,25 +14,18 @@ import StepContainer from "../components/StepContainer"; import useContinueGUIProtocol from "../hooks/useWebsocket"; import { BookOpen, - ChatBubbleOvalLeft, ChatBubbleOvalLeftEllipsis, Trash, } from "@styled-icons/heroicons-outline"; import ComboBox from "../components/ComboBox"; import TextDialog from "../components/TextDialog"; +import HeaderButtonWithText from "../components/HeaderButtonWithText"; -const MainDiv = styled.div` - display: grid; - grid-template-rows: 1fr auto; +const TopGUIDiv = styled.div` + overflow: hidden; `; -let TopGUIDiv = styled.div` - display: grid; - grid-template-columns: 1fr; - background-color: ${vscBackground}; -`; - -let UserInputQueueItem = styled.div` +const UserInputQueueItem = styled.div` border-radius: ${defaultBorderRadius}; color: gray; padding: 8px; @@ -40,7 +33,7 @@ let UserInputQueueItem = styled.div` text-align: center; `; -const TopBar = styled.div` +const Footer = styled.footer` display: flex; flex-direction: row; gap: 8px; @@ -201,7 +194,7 @@ function GUI(props: GUIProps) { if (topGuiDivRef.current) { const timeout = setTimeout(() => { window.scrollTo({ - top: window.outerHeight, + top: topGuiDivRef.current!.offsetHeight, behavior: "smooth", }); }, 200); @@ -213,7 +206,9 @@ function GUI(props: GUIProps) { console.log("CLIENT ON STATE UPDATE: ", client, client?.onStateUpdate); client?.onStateUpdate((state) => { // Scroll only if user is at very bottom of the window. - const shouldScrollToBottom = window.outerHeight - window.scrollY < 200; + const shouldScrollToBottom = + topGuiDivRef.current && + topGuiDivRef.current?.offsetHeight - window.scrollY < 100; setWaitingForSteps(state.active); setHistory(state.history); setUserInputQueue(state.user_input_queue); @@ -303,103 +298,98 @@ function GUI(props: GUIProps) { setShowFeedbackDialog(false); }} ></TextDialog> - <MainDiv> - <TopGUIDiv - ref={topGuiDivRef} - onKeyDown={(e) => { - if (e.key === "Enter" && e.ctrlKey) { - onMainTextInput(); - } - }} - > - {typeof client === "undefined" && ( - <> - <Loader></Loader> - <p style={{ textAlign: "center" }}> - Trying to reconnect with server... - </p> - </> - )} - {history?.timeline.map((node: HistoryNode, index: number) => { - return ( - <StepContainer - key={index} - onUserInput={(input: string) => { - onStepUserInput(input, index); - }} - inFuture={index > history?.current_index} - historyNode={node} - onRefinement={(input: string) => { - client?.sendRefinementInput(input, index); - }} - onReverse={() => { - client?.reverseToIndex(index); - }} - onRetry={() => { - client?.retryAtIndex(index); - setWaitingForSteps(true); - }} - onDelete={() => { - client?.deleteAtIndex(index); - }} - /> - ); - })} - {waitingForSteps && <Loader></Loader>} - <div> - {userInputQueue.map((input) => { - return <UserInputQueueItem>{input}</UserInputQueueItem>; - })} - </div> - - <ComboBox - disabled={ - history?.timeline.length - ? history.timeline[history.current_index].step.name === - "Waiting for user confirmation" - : false - } - ref={mainTextInputRef} - onEnter={(e) => { - onMainTextInput(); - e.stopPropagation(); - e.preventDefault(); - }} - onInputValueChange={() => {}} - items={availableSlashCommands} - /> - <ContinueButton onClick={onMainTextInput} /> - - <TopBar> - <a href="https://continue.dev/docs" className="no-underline"> - <HeaderButton style={{ padding: "3px" }}> - Continue Docs - <BookOpen size="1.6em" /> - </HeaderButton> - </a> - <HeaderButton - style={{ padding: "3px" }} - onClick={() => { - // Set dialog open - setShowFeedbackDialog(true); + <TopGUIDiv + ref={topGuiDivRef} + onKeyDown={(e) => { + if (e.key === "Enter" && e.ctrlKey) { + onMainTextInput(); + } + }} + > + {typeof client === "undefined" && ( + <> + <Loader></Loader> + <p style={{ textAlign: "center" }}> + Trying to reconnect with server... + </p> + </> + )} + {history?.timeline.map((node: HistoryNode, index: number) => { + return ( + <StepContainer + key={index} + onUserInput={(input: string) => { + onStepUserInput(input, index); + }} + inFuture={index > history?.current_index} + historyNode={node} + onRefinement={(input: string) => { + client?.sendRefinementInput(input, index); + }} + onReverse={() => { + client?.reverseToIndex(index); }} - > - Feedback - <ChatBubbleOvalLeftEllipsis size="1.6em" /> - </HeaderButton> - <HeaderButton - onClick={() => { - client?.sendClear(); + onRetry={() => { + client?.retryAtIndex(index); + setWaitingForSteps(true); }} - style={{ padding: "3px" }} - > - Clear History - <Trash size="1.6em" /> - </HeaderButton> - </TopBar> - </TopGUIDiv> - </MainDiv> + onDelete={() => { + client?.deleteAtIndex(index); + }} + /> + ); + })} + {waitingForSteps && <Loader></Loader>} + + <div> + {userInputQueue.map((input) => { + return <UserInputQueueItem>{input}</UserInputQueueItem>; + })} + </div> + + <ComboBox + // disabled={ + // history?.timeline.length + // ? history.timeline[history.current_index].step.name === + // "Waiting for user confirmation" + // : false + // } + ref={mainTextInputRef} + onEnter={(e) => { + onMainTextInput(); + e.stopPropagation(); + e.preventDefault(); + }} + onInputValueChange={() => {}} + items={availableSlashCommands} + /> + <ContinueButton onClick={onMainTextInput} /> + </TopGUIDiv> + <Footer> + <HeaderButtonWithText + onClick={() => { + client?.sendClear(); + }} + text="Clear History" + > + <Trash size="1.6em" /> + </HeaderButtonWithText> + <a href="https://continue.dev/docs" className="no-underline"> + <HeaderButtonWithText text="Continue Docs"> + <BookOpen size="1.6em" /> + </HeaderButtonWithText> + </a> + <HeaderButtonWithText + onClick={() => { + // Set dialog open + setShowFeedbackDialog(true); + }} + text="Feedback" + > + <ChatBubbleOvalLeftEllipsis size="1.6em" /> + </HeaderButtonWithText> + </Footer> </> ); } diff --git a/extension/scripts/continuedev-0.1.1-py3-none-any.whl b/extension/scripts/continuedev-0.1.1-py3-none-any.whl Binary files differindex 2f8f1550..b0b84230 100644 --- a/extension/scripts/continuedev-0.1.1-py3-none-any.whl +++ b/extension/scripts/continuedev-0.1.1-py3-none-any.whl diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts index 135a8ec7..32726c86 100644 --- a/extension/src/activation/activate.ts +++ b/extension/src/activation/activate.ts @@ -8,6 +8,7 @@ import * as path from "path"; import IdeProtocolClient from "../continueIdeClient"; import { getContinueServerUrl } from "../bridge"; import { setupDebugPanel, ContinueGUIWebviewViewProvider } from "../debugPanel"; +import { CapturedTerminal } from "../terminal/terminalEmulator"; export let extensionContext: vscode.ExtensionContext | undefined = undefined; @@ -47,5 +48,42 @@ export function activateExtension( ); })(); + // All opened terminals should be replaced by our own terminal + vscode.window.onDidOpenTerminal((terminal) => { + if (terminal.name === "Continue") { + return; + } + const options = terminal.creationOptions; + const capturedTerminal = new CapturedTerminal({ + ...options, + name: "Continue", + }); + terminal.dispose(); + if (!ideProtocolClient.continueTerminal) { + ideProtocolClient.continueTerminal = capturedTerminal; + } + }); + + // If any terminals are open to start, replace them + vscode.window.terminals.forEach((terminal) => { + if (terminal.name === "Continue") { + return; + } + const options = terminal.creationOptions; + const capturedTerminal = new CapturedTerminal( + { + ...options, + name: "Continue", + }, + (commandOutput: string) => { + ideProtocolClient.sendCommandOutput(commandOutput); + } + ); + terminal.dispose(); + if (!ideProtocolClient.continueTerminal) { + ideProtocolClient.continueTerminal = capturedTerminal; + } + }); + extensionContext = context; } diff --git a/extension/src/continueIdeClient.ts b/extension/src/continueIdeClient.ts index ef9a91c8..9a93a4ef 100644 --- a/extension/src/continueIdeClient.ts +++ b/extension/src/continueIdeClient.ts @@ -323,16 +323,22 @@ class IdeProtocolClient { return rangeInFiles; } - private continueTerminal: CapturedTerminal | undefined; + public continueTerminal: CapturedTerminal | undefined; async runCommand(command: string) { - if (!this.continueTerminal) { - this.continueTerminal = new CapturedTerminal("Continue"); + if (!this.continueTerminal || this.continueTerminal.isClosed()) { + this.continueTerminal = new CapturedTerminal({ + name: "Continue", + }); } this.continueTerminal.show(); return await this.continueTerminal.runCommand(command); } + + sendCommandOutput(output: string) { + this.messenger?.send("commandOutput", { output }); + } } export default IdeProtocolClient; diff --git a/extension/src/terminal/terminalEmulator.ts b/extension/src/terminal/terminalEmulator.ts index b3031baf..8e49737e 100644 --- a/extension/src/terminal/terminalEmulator.ts +++ b/extension/src/terminal/terminalEmulator.ts @@ -3,6 +3,7 @@ import * as vscode from "vscode"; import os = require("os"); import stripAnsi from "strip-ansi"; +import { longestCommonSubsequence } from "../util/lcs"; function loadNativeModule<T>(id: string): T | null { try { @@ -62,21 +63,38 @@ export class CapturedTerminal { this.terminal.show(); } + isClosed(): boolean { + return this.terminal.exitStatus !== undefined; + } + private commandQueue: [string, (output: string) => void][] = []; private hasRunCommand: boolean = false; + private dataEndsInPrompt(strippedData: string): boolean { + const lines = strippedData.split("\n"); + const lastLine = lines[lines.length - 1]; + + return ( + lines.length > 0 && + (((lastLine.includes("bash-") || lastLine.includes(") $ ")) && + lastLine.includes("$")) || + (lastLine.includes("]> ") && lastLine.includes(") [")) || + (lastLine.includes(" (") && lastLine.includes(")>")) || + (typeof this.commandPromptString !== "undefined" && + (lastLine.includes(this.commandPromptString) || + this.commandPromptString.length - + longestCommonSubsequence(lastLine, this.commandPromptString) + .length < + 3))) + ); + } + private async waitForCommandToFinish() { return new Promise<string>((resolve, reject) => { this.onDataListeners.push((data: any) => { const strippedData = stripAnsi(data); this.dataBuffer += strippedData; - const lines = this.dataBuffer.split("\n"); - if ( - lines.length > 0 && - (lines[lines.length - 1].includes("bash-") || - lines[lines.length - 1].includes(") $ ")) && - lines[lines.length - 1].includes("$") - ) { + if (this.dataEndsInPrompt(strippedData)) { resolve(this.dataBuffer); this.dataBuffer = ""; this.onDataListeners = []; @@ -86,12 +104,6 @@ export class CapturedTerminal { } async runCommand(command: string): Promise<string> { - if (!this.hasRunCommand) { - this.hasRunCommand = true; - // Let the first bash- prompt appear and let python env be opened - await this.waitForCommandToFinish(); - } - if (this.commandQueue.length === 0) { return new Promise(async (resolve, reject) => { this.commandQueue.push([command, resolve]); @@ -99,8 +111,12 @@ export class CapturedTerminal { while (this.commandQueue.length > 0) { const [command, resolve] = this.commandQueue.shift()!; + // Refresh the command prompt string every time in case it changes + await this.refreshCommandPromptString(); + this.terminal.sendText(command); - resolve(await this.waitForCommandToFinish()); + const output = await this.waitForCommandToFinish(); + resolve(output); } }); } else { @@ -112,8 +128,48 @@ export class CapturedTerminal { private readonly writeEmitter: vscode.EventEmitter<string>; - constructor(terminalName: string) { - this.shellCmd = "bash"; // getDefaultShell(); + private splitByCommandsBuffer: string = ""; + private readonly onCommandOutput: ((output: string) => void) | undefined; + + splitByCommandsListener(data: string) { + // Split the output by commands so it can be sent to Continue Server + + const strippedData = stripAnsi(data); + this.splitByCommandsBuffer += data; + if (this.dataEndsInPrompt(strippedData)) { + if (this.onCommandOutput) { + this.onCommandOutput(stripAnsi(this.splitByCommandsBuffer)); + } + this.splitByCommandsBuffer = ""; + } + } + + private runningClearToGetPrompt: boolean = false; + private seenClear: boolean = false; + private commandPromptString: string | undefined = undefined; + private resolveMeWhenCommandPromptStringFound: + | ((_: unknown) => void) + | undefined = undefined; + + private async refreshCommandPromptString(): Promise<string | undefined> { + // Sends a message that will be received by the terminal to get the command prompt string, see the onData method below in constructor. + this.runningClearToGetPrompt = true; + this.terminal.sendText("echo"); + const promise = new Promise((resolve, reject) => { + this.resolveMeWhenCommandPromptStringFound = resolve; + }); + await promise; + return this.commandPromptString; + } + + constructor( + options: { name: string } & Partial<vscode.ExtensionTerminalOptions>, + onCommandOutput?: (output: string) => void + ) { + this.onCommandOutput = onCommandOutput; + + // this.shellCmd = "bash"; // getDefaultShell(); + this.shellCmd = getDefaultShell(); const env = { ...(process.env as any) }; if (os.platform() !== "win32") { @@ -123,7 +179,7 @@ export class CapturedTerminal { // Create the pseudo terminal this.ptyProcess = pty.spawn(this.shellCmd, [], { name: "xterm-256color", - cols: 160, // TODO: Get size of vscode terminal, and change with resize + cols: 250, // No way to get the size of VS Code terminal, or listen to resize, so make it just bigger than most conceivable VS Code widths rows: 26, cwd: getRootDir(), env, @@ -133,9 +189,57 @@ export class CapturedTerminal { this.writeEmitter = new vscode.EventEmitter<string>(); this.ptyProcess.onData((data: any) => { + if (this.runningClearToGetPrompt) { + if ( + stripAnsi(data) + .split("\n") + .flatMap((line) => line.split("\r")) + .find((line) => line.trim() === "echo") !== undefined + ) { + this.seenClear = true; + return; + } else if (this.seenClear) { + const strippedLines = stripAnsi(data) + .split("\r") + .filter( + (line) => + line.trim().length > 0 && + line.trim() !== "%" && + line.trim() !== "⏎" + ); + const lastLine = strippedLines[strippedLines.length - 1] || ""; + const lines = lastLine + .split("\n") + .filter( + (line) => + line.trim().length > 0 && + line.trim() !== "%" && + line.trim() !== "⏎" + ); + const commandPromptString = (lines[lines.length - 1] || "").trim(); + if ( + commandPromptString.length > 0 && + !commandPromptString.includes("echo") + ) { + this.runningClearToGetPrompt = false; + this.seenClear = false; + this.commandPromptString = commandPromptString; + console.log( + "Found command prompt string: " + this.commandPromptString + ); + if (this.resolveMeWhenCommandPromptStringFound) { + this.resolveMeWhenCommandPromptStringFound(undefined); + } + } + return; + } + } + // Pass data through to terminal + data = data.replace("⏎", ""); this.writeEmitter.fire(data); + this.splitByCommandsListener(data); for (let listener of this.onDataListeners) { listener(data); } @@ -154,7 +258,7 @@ export class CapturedTerminal { // Create and clear the terminal this.terminal = vscode.window.createTerminal({ - name: terminalName, + ...options, pty: newPty, }); this.terminal.show(); diff --git a/extension/src/util/lcs.ts b/extension/src/util/lcs.ts new file mode 100644 index 00000000..17ea63f9 --- /dev/null +++ b/extension/src/util/lcs.ts @@ -0,0 +1,30 @@ +export function longestCommonSubsequence(a: string, b: string) { + const lengths: number[][] = []; + for (let i = 0; i <= a.length; i++) { + lengths[i] = []; + for (let j = 0; j <= b.length; j++) { + if (i === 0 || j === 0) { + lengths[i][j] = 0; + } else if (a[i - 1] === b[j - 1]) { + lengths[i][j] = lengths[i - 1][j - 1] + 1; + } else { + lengths[i][j] = Math.max(lengths[i - 1][j], lengths[i][j - 1]); + } + } + } + let result = ""; + let x = a.length; + let y = b.length; + while (x !== 0 && y !== 0) { + if (lengths[x][y] === lengths[x - 1][y]) { + x--; + } else if (lengths[x][y] === lengths[x][y - 1]) { + y--; + } else { + result = a[x - 1] + result; + x--; + y--; + } + } + return result; +} |