summaryrefslogtreecommitdiff
path: root/server/continuedev/core
diff options
context:
space:
mode:
Diffstat (limited to 'server/continuedev/core')
-rw-r--r--server/continuedev/core/abstract_sdk.py82
-rw-r--r--server/continuedev/core/autopilot.py746
-rw-r--r--server/continuedev/core/config.py114
-rw-r--r--server/continuedev/core/context.py516
-rw-r--r--server/continuedev/core/env.py31
-rw-r--r--server/continuedev/core/lsp.py416
-rw-r--r--server/continuedev/core/main.py437
-rw-r--r--server/continuedev/core/models.py113
-rw-r--r--server/continuedev/core/observation.py41
-rw-r--r--server/continuedev/core/sdk.py309
-rw-r--r--server/continuedev/core/steps.py963
11 files changed, 3768 insertions, 0 deletions
diff --git a/server/continuedev/core/abstract_sdk.py b/server/continuedev/core/abstract_sdk.py
new file mode 100644
index 00000000..fdb99d47
--- /dev/null
+++ b/server/continuedev/core/abstract_sdk.py
@@ -0,0 +1,82 @@
+from abc import ABC, abstractmethod
+from typing import Coroutine, List, Union
+
+from ..models.filesystem_edit import FileSystemEdit
+from .config import ContinueConfig
+from .main import ChatMessage, History, Step
+from .observation import Observation
+
+"""
+[[Generate]]
+[Prompt]
+Write an abstract class AbstractContinueSDK(ABC) that has all of the same methods as the ContinueSDK class, but without any implementation.
+All methods should be documented with the same docstrings as the ContinueSDK class and have the same types.
+[Context]
+./sdk.py:ContinueSDK
+"""
+
+
+class AbstractContinueSDK(ABC):
+ """The SDK provided as parameters to a step"""
+
+ @property
+ def history(self) -> History:
+ return self.__autopilot.history
+
+ @abstractmethod
+ async def _ensure_absolute_path(self, path: str) -> str:
+ pass
+
+ @abstractmethod
+ async def run_step(self, step: Step) -> Coroutine[Observation, None, None]:
+ pass
+
+ @abstractmethod
+ async def apply_filesystem_edit(self, edit: FileSystemEdit):
+ pass
+
+ @abstractmethod
+ async def wait_for_user_input(self) -> str:
+ pass
+
+ @abstractmethod
+ async def wait_for_user_confirmation(self, prompt: str):
+ pass
+
+ @abstractmethod
+ async def run(self, commands: Union[List[str], str], cwd: str = None):
+ pass
+
+ @abstractmethod
+ async def edit_file(self, filename: str, prompt: str):
+ pass
+
+ @abstractmethod
+ async def append_to_file(self, filename: str, content: str):
+ pass
+
+ @abstractmethod
+ async def add_file(self, filename: str, content: Union[str, None]):
+ pass
+
+ @abstractmethod
+ async def delete_file(self, filename: str):
+ pass
+
+ @abstractmethod
+ async def add_directory(self, path: str):
+ pass
+
+ @abstractmethod
+ async def delete_directory(self, path: str):
+ pass
+
+ config: ContinueConfig
+
+ @abstractmethod
+ def set_loading_message(self, message: str):
+ pass
+
+ @abstractmethod
+ async def get_chat_context(self) -> List[ChatMessage]:
+ pass
diff --git a/server/continuedev/core/autopilot.py b/server/continuedev/core/autopilot.py
new file mode 100644
index 00000000..11c05378
--- /dev/null
+++ b/server/continuedev/core/autopilot.py
@@ -0,0 +1,746 @@
+import json
+import os
+import time
+import traceback
+import uuid
+from functools import cached_property
+from typing import Callable, Coroutine, Dict, List, Optional
+
+import redbaron
+from aiohttp import ClientPayloadError
+from openai import error as openai_errors
+from pydantic import root_validator
+
+from ..libs.llm.prompts.chat import template_alpaca_messages
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.edit_config import edit_config_property
+from ..libs.util.logging import logger
+from ..libs.util.paths import getSavedContextGroupsPath
+from ..libs.util.queue import AsyncSubscriptionQueue
+from ..libs.util.strings import remove_quotes_and_escapes
+from ..libs.util.telemetry import posthog_logger
+from ..libs.util.traceback.traceback_parsers import (
+ get_javascript_traceback,
+ get_python_traceback,
+)
+from ..models.filesystem import RangeInFileWithContents
+from ..models.filesystem_edit import FileEditWithFullContents
+from ..models.main import ContinueBaseModel
+from ..plugins.context_providers.file import FileContextProvider
+from ..plugins.context_providers.highlighted_code import HighlightedCodeContextProvider
+from ..plugins.policies.default import DefaultPolicy
+from ..plugins.steps.on_traceback import DefaultOnTracebackStep
+from ..server.ide_protocol import AbstractIdeProtocolServer
+from ..server.meilisearch_server import get_meilisearch_url, stop_meilisearch
+from .config import ContinueConfig
+from .context import ContextManager
+from .main import (
+ Context,
+ ContextItem,
+ ContinueCustomException,
+ FullState,
+ History,
+ HistoryNode,
+ Policy,
+ SessionInfo,
+ Step,
+)
+from .observation import InternalErrorObservation, Observation
+from .sdk import ContinueSDK
+from .steps import DisplayErrorStep, ManualEditStep, ReversibleStep, UserInputStep
+
+
+def get_error_title(e: Exception) -> str:
+ if isinstance(e, openai_errors.APIError):
+ return "OpenAI is overloaded with requests. Please try again."
+ elif isinstance(e, openai_errors.RateLimitError):
+ return "This OpenAI API key has been rate limited. Please try again."
+ elif isinstance(e, openai_errors.Timeout):
+ return "OpenAI timed out. Please try again."
+ elif (
+ isinstance(e, openai_errors.InvalidRequestError)
+ and e.code == "context_length_exceeded"
+ ):
+ return e._message
+ elif isinstance(e, ClientPayloadError):
+ return "The request failed. Please try again."
+ elif isinstance(e, openai_errors.APIConnectionError):
+ return 'The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to ""'
+ elif isinstance(e, openai_errors.InvalidRequestError):
+ return "Invalid request sent to OpenAI. Please try again."
+ elif "rate_limit_ip_middleware" in e.__str__():
+ return "You have reached your limit for free usage of our token. You can continue using Continue by entering your own OpenAI API key in VS Code settings."
+ elif e.__str__().startswith("Cannot connect to host"):
+ return (
+ "The request failed. Please check your internet connection and try again."
+ )
+ return e.__str__() or e.__repr__()
+
+
+class Autopilot(ContinueBaseModel):
+ ide: AbstractIdeProtocolServer
+
+ policy: Policy = DefaultPolicy()
+ history: History = History.from_empty()
+ context: Context = Context()
+ full_state: Optional[FullState] = None
+ session_info: Optional[SessionInfo] = None
+ context_manager: ContextManager = ContextManager()
+ continue_sdk: ContinueSDK = None
+
+ _on_update_callbacks: List[Callable[[FullState], None]] = []
+
+ _active: bool = False
+ _should_halt: bool = False
+ _main_user_input_queue: List[str] = []
+
+ _user_input_queue = AsyncSubscriptionQueue()
+ _retry_queue = AsyncSubscriptionQueue()
+
+ started: bool = False
+
+ async def load(
+ self, config: Optional[ContinueConfig] = None, only_reloading: bool = False
+ ):
+ self.continue_sdk = await ContinueSDK.create(self, config=config)
+ if override_policy := self.continue_sdk.config.policy_override:
+ self.policy = override_policy
+
+ # Load documents into the search index
+ logger.debug("Starting context manager")
+ await self.context_manager.start(
+ self.continue_sdk.config.context_providers
+ + [
+ HighlightedCodeContextProvider(ide=self.ide),
+ FileContextProvider(workspace_dir=self.ide.workspace_directory),
+ ],
+ self.continue_sdk,
+ only_reloading=only_reloading,
+ )
+
+ async def start(
+ self,
+ full_state: Optional[FullState] = None,
+ config: Optional[ContinueConfig] = None,
+ ):
+ await self.load(config=config, only_reloading=False)
+
+ if full_state is not None:
+ self.history = full_state.history
+ self.session_info = full_state.session_info
+
+ # Load saved context groups
+ context_groups_file = getSavedContextGroupsPath()
+ try:
+ with open(context_groups_file, "r") as f:
+ json_ob = json.load(f)
+ for title, context_group in json_ob.items():
+ self._saved_context_groups[title] = [
+ ContextItem(**item) for item in context_group
+ ]
+ except Exception as e:
+ logger.warning(
+ f"Failed to load saved_context_groups.json: {e}. Reverting to empty list."
+ )
+ self._saved_context_groups = {}
+
+ self.started = True
+
+ async def reload_config(self):
+ await self.load(config=None, only_reloading=True)
+ await self.update_subscribers()
+
+ async def cleanup(self):
+ stop_meilisearch()
+
+ class Config:
+ arbitrary_types_allowed = True
+ keep_untouched = (cached_property,)
+
+ @root_validator(pre=True)
+ def fill_in_values(cls, values):
+ full_state: FullState = values.get("full_state")
+ if full_state is not None:
+ values["history"] = full_state.history
+ return values
+
+ async def get_full_state(self) -> FullState:
+ full_state = FullState(
+ history=self.history,
+ active=self._active,
+ user_input_queue=self._main_user_input_queue,
+ slash_commands=self.get_available_slash_commands(),
+ adding_highlighted_code=self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code
+ if "code" in self.context_manager.context_providers
+ else False,
+ selected_context_items=await self.context_manager.get_selected_items()
+ if self.context_manager is not None
+ else [],
+ session_info=self.session_info,
+ config=self.continue_sdk.config,
+ saved_context_groups=self._saved_context_groups,
+ context_providers=self.context_manager.get_provider_descriptions(),
+ meilisearch_url=get_meilisearch_url(),
+ )
+ self.full_state = full_state
+ return full_state
+
+ def get_available_slash_commands(self) -> List[Dict]:
+ custom_commands = (
+ list(
+ map(
+ lambda x: {"name": x.name, "description": x.description},
+ self.continue_sdk.config.custom_commands,
+ )
+ )
+ or []
+ )
+ slash_commands = (
+ list(
+ map(
+ lambda x: {"name": x.name, "description": x.description},
+ self.continue_sdk.config.slash_commands,
+ )
+ )
+ or []
+ )
+ cmds = custom_commands + slash_commands
+ cmds.sort(key=lambda x: x["name"] == "edit", reverse=True)
+ return cmds
+
+ async def clear_history(self):
+ # Reset history
+ self.history = History.from_empty()
+ self._main_user_input_queue = []
+ self._active = False
+
+ # Clear context
+ # await self.context_manager.clear_context()
+
+ await self.update_subscribers()
+
+ def on_update(self, callback: Coroutine["FullState", None, None]):
+ """Subscribe to changes to state"""
+ self._on_update_callbacks.append(callback)
+
+ async def update_subscribers(self):
+ full_state = await self.get_full_state()
+ for callback in self._on_update_callbacks:
+ await callback(full_state)
+
+ def give_user_input(self, input: str, index: int):
+ self._user_input_queue.post(str(index), input)
+
+ async def wait_for_user_input(self) -> str:
+ self._active = False
+ await self.update_subscribers()
+ user_input = await self._user_input_queue.get(str(self.history.current_index))
+ self._active = True
+ await self.update_subscribers()
+ return user_input
+
+ _manual_edits_buffer: List[FileEditWithFullContents] = []
+
+ async def reverse_to_index(self, index: int):
+ try:
+ while self.history.get_current_index() >= index:
+ current_step = self.history.get_current().step
+ self.history.step_back()
+ if issubclass(current_step.__class__, ReversibleStep):
+ await current_step.reverse(self.continue_sdk)
+
+ await self.update_subscribers()
+ except Exception as e:
+ logger.debug(e)
+
+ def handle_manual_edits(self, edits: List[FileEditWithFullContents]):
+ for edit in edits:
+ self._manual_edits_buffer.append(edit)
+ # TODO: You're storing a lot of unnecessary data here. Can compress into EditDiffs on the spot, and merge.
+ # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit)
+ # Note that this is being overridden to do nothing in DemoAgent
+
+ async def handle_command_output(self, output: str):
+ get_traceback_funcs = [get_python_traceback, get_javascript_traceback]
+ for get_tb_func in get_traceback_funcs:
+ traceback = get_tb_func(output)
+ if (
+ traceback is not None
+ and self.continue_sdk.config.on_traceback is not None
+ ):
+ step = self.continue_sdk.config.on_traceback(output=output)
+ await self._run_singular_step(step)
+
+ async def handle_debug_terminal(self, content: str):
+ """Run the debug terminal step"""
+ # step = self.continue_sdk.config.on_traceback(output=content)
+ step = DefaultOnTracebackStep(output=content)
+ await self._run_singular_step(step)
+
+ async def handle_highlighted_code(
+ self,
+ range_in_files: List[RangeInFileWithContents],
+ edit: Optional[bool] = False,
+ ):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ # Add to context manager
+ await self.context_manager.context_providers["code"].handle_highlighted_code(
+ range_in_files, edit
+ )
+
+ await self.update_subscribers()
+
+ _step_depth: int = 0
+
+ async def retry_at_index(self, index: int):
+ self.history.timeline[index].step.hide = True
+ self._retry_queue.post(str(index), None)
+
+ async def delete_at_index(self, index: int):
+ if not self.history.timeline[index].active:
+ self.history.timeline[index].step.hide = True
+
+ self.history.timeline[index].deleted = True
+ self.history.timeline[index].active = False
+
+ await self.update_subscribers()
+
+ async def edit_step_at_index(self, user_input: str, index: int):
+ node_to_rerun = self.history.timeline[index].copy()
+ step_to_rerun = node_to_rerun.step
+ step_to_rerun.user_input = user_input
+ step_to_rerun.description = user_input
+
+ # Halt the agent's currently running jobs (delete them)
+ while len(self.history.timeline) > index:
+ # Remove from timeline
+ node_to_delete = self.history.timeline.pop()
+ # Delete so it is stopped if in the middle of running
+ node_to_delete.deleted = True
+
+ self.history.current_index = index - 1
+
+ # Set the context to the context used by that step
+ await self.context_manager.clear_context()
+ for context_item in node_to_rerun.context_used:
+ await self.context_manager.manually_add_context_item(context_item)
+
+ await self.update_subscribers()
+
+ # Rerun from the current step
+ await self.run_from_step(step_to_rerun)
+
+ async def delete_context_with_ids(
+ self, ids: List[str], index: Optional[int] = None
+ ):
+ if index is None:
+ await self.context_manager.delete_context_with_ids(ids)
+ else:
+ self.history.timeline[index].context_used = list(
+ filter(
+ lambda item: item.description.id.to_string() not in ids,
+ self.history.timeline[index].context_used,
+ )
+ )
+ await self.update_subscribers()
+
+ async def toggle_adding_highlighted_code(self):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code = not self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code
+ await self.update_subscribers()
+
+ async def set_editing_at_ids(self, ids: List[str]):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ await self.context_manager.context_providers["code"].set_editing_at_ids(ids)
+ await self.update_subscribers()
+
+ async def _run_singular_step(
+ self, step: "Step", is_future_step: bool = False
+ ) -> Coroutine[Observation, None, None]:
+ # Allow config to set disallowed steps
+ if step.__class__.__name__ in self.continue_sdk.config.disallowed_steps:
+ return None
+
+ # If a parent step is deleted/cancelled, don't run this step
+ # TODO: This was problematic because when running a step after deleting one, it seemed to think that was the parent
+ # last_depth = self._step_depth
+ # i = self.history.current_index
+ # while i >= 0 and self.history.timeline[i].depth == last_depth - 1:
+ # if self.history.timeline[i].deleted:
+ # return None
+ # last_depth = self.history.timeline[i].depth
+ # i -= 1
+
+ # Log the context and step to dev data
+ context_used = await self.context_manager.get_selected_items()
+ posthog_logger.capture_event(
+ "step run", {"step_name": step.name, "params": step.dict()}
+ )
+ step_id = uuid.uuid4().hex
+ dev_data_logger.capture(
+ "step_run",
+ {"step_name": step.name, "params": step.dict(), "step_id": step_id},
+ )
+ dev_data_logger.capture(
+ "context_used",
+ {
+ "context": list(
+ map(
+ lambda item: item.dict(),
+ context_used,
+ )
+ ),
+ "step_id": step_id,
+ },
+ )
+
+ if not is_future_step:
+ # Check manual edits buffer, clear out if needed by creating a ManualEditStep
+ if len(self._manual_edits_buffer) > 0:
+ manualEditsStep = ManualEditStep.from_sequence(
+ self._manual_edits_buffer
+ )
+ self._manual_edits_buffer = []
+ await self._run_singular_step(manualEditsStep)
+
+ # Update history - do this first so we get top-first tree ordering
+ index_of_history_node = self.history.add_node(
+ HistoryNode(
+ step=step,
+ observation=None,
+ depth=self._step_depth,
+ context_used=context_used,
+ )
+ )
+
+ # Call all subscribed callbacks
+ await self.update_subscribers()
+
+ # Try to run step and handle errors
+ self._step_depth += 1
+
+ caught_error = False
+ try:
+ observation = await step(self.continue_sdk)
+ except Exception as e:
+ if (
+ index_of_history_node >= len(self.history.timeline)
+ or self.history.timeline[index_of_history_node].deleted
+ ):
+ # If step was deleted/cancelled, don't show error or allow retry
+ return None
+
+ caught_error = True
+
+ is_continue_custom_exception = (
+ issubclass(e.__class__, ContinueCustomException)
+ or e.__class__.__name__ == ContinueCustomException.__name__
+ )
+
+ error_string = (
+ e.message
+ if is_continue_custom_exception
+ else "\n".join(traceback.format_exception(e))
+ )
+ error_title = (
+ e.title if is_continue_custom_exception else get_error_title(e)
+ )
+
+ # Attach an InternalErrorObservation to the step and unhide it.
+ logger.error(f"Error while running step: \n{error_string}\n{error_title}")
+ posthog_logger.capture_event(
+ "step error",
+ {
+ "error_message": error_string,
+ "error_title": error_title,
+ "step_name": step.name,
+ "params": step.dict(),
+ },
+ )
+
+ observation = InternalErrorObservation(
+ error=error_string, title=error_title
+ )
+
+ # Reveal this step, but hide all of the following steps (its substeps)
+ step_was_hidden = step.hide
+
+ step.hide = False
+ i = self.history.get_current_index()
+ while self.history.timeline[i].step.name != step.name:
+ self.history.timeline[i].step.hide = True
+ i -= 1
+
+ # i is now the index of the step that we want to show/rerun
+ self.history.timeline[i].observation = observation
+ self.history.timeline[i].active = False
+
+ await self.update_subscribers()
+
+ # ContinueCustomException can optionally specify a step to run on the error
+ if is_continue_custom_exception and e.with_step is not None:
+ await self._run_singular_step(e.with_step)
+
+ # Wait for a retry signal and then resume the step
+ self._active = False
+ await self._retry_queue.get(str(i))
+ self._active = True
+ # You might consider a "ignore and continue" button
+ # want it to have same step depth, so have to decrement
+ self._step_depth -= 1
+ copy_step = step.copy()
+ copy_step.hide = step_was_hidden
+ observation = await self._run_singular_step(copy_step)
+ self._step_depth += 1
+
+ self._step_depth -= 1
+
+ # Add observation to history, unless already attached error observation
+ if not caught_error and index_of_history_node < len(self.history.timeline):
+ self.history.timeline[index_of_history_node].observation = observation
+ self.history.timeline[index_of_history_node].active = False
+ await self.update_subscribers()
+
+ # Update its description
+ async def update_description():
+ if self.continue_sdk.config.disable_summaries:
+ return
+
+ description = await step.describe(self.continue_sdk.models)
+ if description is not None:
+ step.description = description
+ # Update subscribers with new description
+ await self.update_subscribers()
+
+ create_async_task(
+ update_description(),
+ on_error=lambda e: self.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ ),
+ )
+
+ # Create the session title if not done yet
+ if self.session_info is None or self.session_info.title is None:
+ visible_nodes = list(
+ filter(lambda node: not node.step.hide, self.history.timeline)
+ )
+
+ user_input = None
+ should_create_title = False
+ for visible_node in visible_nodes:
+ if isinstance(visible_node.step, UserInputStep):
+ if user_input is None:
+ user_input = visible_node.step.user_input
+ else:
+ # More than one user input, so don't create title
+ should_create_title = False
+ break
+ elif user_input is None:
+ continue
+ else:
+ # Already have user input, now have the next step
+ should_create_title = True
+ break
+
+ # Only create the title if the step after the first input is done
+ if should_create_title:
+ create_async_task(
+ self.create_title(backup=user_input),
+ on_error=lambda e: self.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ ),
+ )
+
+ return observation
+
+ async def run_from_step(self, step: "Step"):
+ # if self._active:
+ # raise RuntimeError("Autopilot is already running")
+ self._active = True
+
+ next_step = step
+ is_future_step = False
+ while not (next_step is None or self._should_halt):
+ if is_future_step:
+ # If future step, then we are replaying and need to delete the step from history so it can be replaced
+ self.history.remove_current_and_substeps()
+
+ await self._run_singular_step(next_step, is_future_step)
+
+ if next_step := self.policy.next(self.continue_sdk.config, self.history):
+ is_future_step = False
+ elif next_step := self.history.take_next_step():
+ is_future_step = True
+ else:
+ next_step = None
+
+ self._active = False
+
+ # Doing this so active can make it to the frontend after steps are done. But want better state syncing tools
+ await self.update_subscribers()
+
+ async def run_from_observation(self, observation: Observation):
+ next_step = self.policy.next(self.continue_sdk.config, self.history)
+ await self.run_from_step(next_step)
+
+ async def run_policy(self):
+ first_step = self.policy.next(self.continue_sdk.config, self.history)
+ await self.run_from_step(first_step)
+
+ async def _request_halt(self):
+ if self._active:
+ self._should_halt = True
+ while self._active:
+ time.sleep(0.1)
+ self._should_halt = False
+ return None
+
+ def set_current_session_title(self, title: str):
+ self.session_info = SessionInfo(
+ title=title,
+ session_id=self.ide.session_id,
+ date_created=str(time.time()),
+ workspace_directory=self.ide.workspace_directory,
+ )
+
+ async def create_title(self, backup: str = None):
+ # Use the first input and first response to create title for session info, and make the session saveable
+ if self.session_info is not None and self.session_info.title is not None:
+ return
+
+ if self.continue_sdk.config.disable_summaries:
+ if backup is not None:
+ title = backup
+ else:
+ title = "New Session"
+ else:
+ chat_history = list(
+ map(lambda x: x.dict(), await self.continue_sdk.get_chat_context())
+ )
+ chat_history_str = template_alpaca_messages(chat_history)
+ title = await self.continue_sdk.models.summarize.complete(
+ f"{chat_history_str}\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: ",
+ max_tokens=20,
+ log=False,
+ )
+ title = remove_quotes_and_escapes(title)
+
+ self.set_current_session_title(title)
+ await self.update_subscribers()
+ dev_data_logger.capture("new_session", self.session_info.dict())
+
+ async def accept_user_input(self, user_input: str):
+ self._main_user_input_queue.append(user_input)
+ # await self.update_subscribers()
+
+ if len(self._main_user_input_queue) > 1:
+ return
+
+ # await self._request_halt()
+ # Just run the step that takes user input, and
+ # then up to the policy to decide how to deal with it.
+ self._main_user_input_queue.pop(0)
+ # await self.update_subscribers()
+ await self.run_from_step(UserInputStep(user_input=user_input))
+
+ while len(self._main_user_input_queue) > 0:
+ await self.run_from_step(
+ UserInputStep(user_input=self._main_user_input_queue.pop(0))
+ )
+
+ async def accept_refinement_input(self, user_input: str, index: int):
+ await self._request_halt()
+ await self.reverse_to_index(index)
+ await self.run_from_step(UserInputStep(user_input=user_input))
+
+ async def reject_diff(self, step_index: int):
+ # Hide the edit step and the UserInputStep before it
+ self.history.timeline[step_index].step.hide = True
+ for i in range(step_index - 1, -1, -1):
+ if isinstance(self.history.timeline[i].step, UserInputStep):
+ self.history.timeline[i].step.hide = True
+ break
+ await self.update_subscribers()
+
+ async def select_context_item(self, id: str, query: str):
+ await self.context_manager.select_context_item(id, query)
+ await self.update_subscribers()
+
+ async def select_context_item_at_index(self, id: str, query: str, index: int):
+ # TODO: This is different from how it works for the main input
+ # Ideally still tracked through the ContextProviders
+ # so they can watch for duplicates
+ context_item = await self.context_manager.get_context_item(id, query)
+ if context_item is None:
+ return
+ self.history.timeline[index].context_used.append(context_item)
+ await self.update_subscribers()
+
+ async def set_config_attr(self, key_path: List[str], value: redbaron.RedBaron):
+ edit_config_property(key_path, value)
+ await self.update_subscribers()
+
+ _saved_context_groups: Dict[str, List[ContextItem]] = {}
+
+ def _persist_context_groups(self):
+ context_groups_file = getSavedContextGroupsPath()
+ if os.path.exists(context_groups_file):
+ with open(context_groups_file, "w") as f:
+ dict_to_save = {
+ title: [item.dict() for item in context_items]
+ for title, context_items in self._saved_context_groups.items()
+ }
+ json.dump(dict_to_save, f)
+
+ async def save_context_group(self, title: str, context_items: List[ContextItem]):
+ self._saved_context_groups[title] = context_items
+ await self.update_subscribers()
+
+ # Update saved context groups
+ self._persist_context_groups()
+
+ posthog_logger.capture_event(
+ "save_context_group", {"title": title, "length": len(context_items)}
+ )
+
+ async def select_context_group(self, id: str):
+ if id not in self._saved_context_groups:
+ logger.warning(f"Context group {id} not found")
+ return
+ context_group = self._saved_context_groups[id]
+ await self.context_manager.clear_context()
+ for item in context_group:
+ await self.context_manager.manually_add_context_item(item)
+ await self.update_subscribers()
+
+ posthog_logger.capture_event(
+ "select_context_group", {"title": id, "length": len(context_group)}
+ )
+ dev_data_logger.capture(
+ "select_context_group", {"title": id, "items": context_group}
+ )
+
+ async def delete_context_group(self, id: str):
+ if id not in self._saved_context_groups:
+ logger.warning(f"Context group {id} not found")
+ return
+ del self._saved_context_groups[id]
+ await self.update_subscribers()
+
+ # Update saved context groups
+ self._persist_context_groups()
+
+ posthog_logger.capture_event("delete_context_group", {"title": id})
diff --git a/server/continuedev/core/config.py b/server/continuedev/core/config.py
new file mode 100644
index 00000000..2bbb42cc
--- /dev/null
+++ b/server/continuedev/core/config.py
@@ -0,0 +1,114 @@
+from typing import Dict, List, Optional, Type
+
+from pydantic import BaseModel, Field, validator
+
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from .context import ContextProvider
+from .main import Policy, Step
+from .models import Models
+
+
+class SlashCommand(BaseModel):
+ name: str
+ description: str
+ step: Type[Step]
+ params: Optional[Dict] = {}
+
+ def dict(self, *args, **kwargs):
+ return {
+ "name": self.name,
+ "description": self.description,
+ "params": self.params,
+ "step": self.step.__name__,
+ }
+
+
+class CustomCommand(BaseModel):
+ name: str
+ prompt: str
+ description: str
+
+
+class ContinueConfig(BaseModel):
+ """
+ Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This class is instantiated from the config file for every new session.
+ """
+
+ steps_on_startup: List[Step] = Field(
+ [],
+ description="Steps that will be automatically run at the beginning of a new session",
+ )
+ disallowed_steps: Optional[List[str]] = Field(
+ [],
+ description="Steps that are not allowed to be run, and will be skipped if attempted",
+ )
+ allow_anonymous_telemetry: Optional[bool] = Field(
+ True,
+ description="If this field is set to True, we will collect anonymous telemetry as described in the documentation page on telemetry. If set to False, we will not collect any data.",
+ )
+ models: Models = Field(
+ Models(
+ default=OpenAIFreeTrial(model="gpt-4"),
+ summarize=OpenAIFreeTrial(model="gpt-3.5-turbo"),
+ ),
+ description="Configuration for the models used by Continue. Read more about how to configure models in the documentation.",
+ )
+ temperature: Optional[float] = Field(
+ 0.5,
+ description="The temperature parameter for sampling from the LLM. Higher temperatures will result in more random output, while lower temperatures will result in more predictable output. This value ranges from 0 to 1.",
+ )
+ custom_commands: Optional[List[CustomCommand]] = Field(
+ [
+ CustomCommand(
+ name="test",
+ description="This is an example custom command. Use /config to edit it and create more",
+ prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
+ )
+ ],
+ description="An array of custom commands that allow you to reuse prompts. Each has name, description, and prompt properties. When you enter /<name> in the text input, it will act as a shortcut to the prompt.",
+ )
+ slash_commands: Optional[List[SlashCommand]] = Field(
+ [],
+ description="An array of slash commands that let you map custom Steps to a shortcut.",
+ )
+ on_traceback: Optional[Step] = Field(
+ None,
+ description="The step that will be run when a traceback is detected (when you use the shortcut cmd+shift+R)",
+ )
+ system_message: Optional[str] = Field(
+ None, description="A system message that will always be followed by the LLM"
+ )
+ policy_override: Optional[Policy] = Field(
+ None,
+ description="A Policy object that can be used to override the default behavior of Continue, for example in order to build custom agents that take multiple steps at a time.",
+ )
+ context_providers: List[ContextProvider] = Field(
+ [],
+ description="A list of ContextProvider objects that can be used to provide context to the LLM by typing '@'. Read more about ContextProviders in the documentation.",
+ )
+ user_token: Optional[str] = Field(
+ None, description="An optional token to identify the user."
+ )
+ data_server_url: Optional[str] = Field(
+ "https://us-west1-autodebug.cloudfunctions.net",
+ description="The URL of the server where development data is sent. No data is sent unless a valid user token is provided.",
+ )
+ disable_summaries: Optional[bool] = Field(
+ False,
+ description="If set to `True`, Continue will not generate summaries for each Step. This can be useful if you want to save on compute.",
+ )
+
+ @validator("temperature", pre=True)
+ def temperature_validator(cls, v):
+ return max(0.0, min(1.0, v))
+
+ @staticmethod
+ def from_filepath(filepath: str) -> "ContinueConfig":
+ # Use importlib to load the config file config.py at the given path
+ import importlib.util
+
+ spec = importlib.util.spec_from_file_location("config", filepath)
+ config = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(config)
+
+ return config.config
diff --git a/server/continuedev/core/context.py b/server/continuedev/core/context.py
new file mode 100644
index 00000000..547a1593
--- /dev/null
+++ b/server/continuedev/core/context.py
@@ -0,0 +1,516 @@
+import asyncio
+import time
+from abc import abstractmethod
+from typing import Awaitable, Callable, Dict, List, Optional
+
+from meilisearch_python_async import Client
+from pydantic import BaseModel, Field
+
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.logging import logger
+from ..libs.util.telemetry import posthog_logger
+from ..server.meilisearch_server import (
+ check_meilisearch_running,
+ get_meilisearch_url,
+ poll_meilisearch_running,
+ restart_meilisearch,
+ start_meilisearch,
+)
+from .main import (
+ ChatMessage,
+ ContextItem,
+ ContextItemDescription,
+ ContextItemId,
+ ContextProviderDescription,
+)
+
+
+class ContinueSDK(BaseModel):
+ """To avoid circular imports"""
+
+ ...
+
+
+SEARCH_INDEX_NAME = "continue_context_items"
+
+
+class ContextProvider(BaseModel):
+ """
+ The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'.
+ When you type '@', the context provider will be asked to populate a list of options.
+ These options will be updated on each keystroke.
+ When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object).
+ """
+
+ title: str = Field(
+ ...,
+ description="The title of the ContextProvider. This is what must be typed in the input to trigger the ContextProvider.",
+ )
+ sdk: ContinueSDK = Field(
+ None, description="The ContinueSDK instance accessible by the ContextProvider"
+ )
+ delete_documents: Callable[[List[str]], Awaitable] = Field(
+ None, description="Function to delete documents"
+ )
+ update_documents: Callable[[List[ContextItem], str], Awaitable] = Field(
+ None, description="Function to update documents"
+ )
+
+ display_title: str = Field(
+ ...,
+ description="The display title of the ContextProvider shown in the dropdown menu",
+ )
+ description: str = Field(
+ ...,
+ description="A description of the ContextProvider displayed in the dropdown menu",
+ )
+ dynamic: bool = Field(
+ ..., description="Indicates whether the ContextProvider is dynamic"
+ )
+ requires_query: bool = Field(
+ False,
+ description="Indicates whether the ContextProvider requires a query. For example, the SearchContextProvider requires you to type '@search <STRING_TO_SEARCH>'. This will change the behavior of the UI so that it can indicate the expectation for a query.",
+ )
+
+ selected_items: List[ContextItem] = Field(
+ [], description="List of selected items in the ContextProvider"
+ )
+
+ def dict(self, *args, **kwargs):
+ original_dict = super().dict(*args, **kwargs)
+ original_dict.pop("sdk", None)
+ original_dict.pop("delete_documents", None)
+ original_dict.pop("update_documents", None)
+ return original_dict
+
+ async def start(self, sdk: ContinueSDK, delete_documents, update_documents):
+ """
+ Starts the context provider.
+
+ Default implementation sets the sdk.
+ """
+ self.sdk = sdk
+ self.delete_documents = delete_documents
+ self.update_documents = update_documents
+
+ async def get_selected_items(self) -> List[ContextItem]:
+ """
+ Returns all of the selected ContextItems.
+
+ Default implementation simply returns self.selected_items.
+
+ Other implementations may add an async processing step.
+ """
+ return self.selected_items
+
+ @abstractmethod
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ """
+ Provide documents for search index. This is run on startup.
+
+ This is the only method that must be implemented.
+ """
+
+ async def get_chat_messages(self) -> List[ChatMessage]:
+ """
+ Returns all of the chat messages for the context provider.
+
+ Default implementation has a string template.
+ """
+ return [
+ ChatMessage(
+ role="user",
+ content=f"{item.description.name}: {item.description.description}\n\n{item.content}",
+ summary=item.description.description,
+ )
+ for item in await self.get_selected_items()
+ ]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ """
+ Returns the ContextItem with the given id.
+
+ Default implementation uses the search index to get the item.
+ """
+ async with Client(get_meilisearch_url()) as search_client:
+ try:
+ result = await search_client.index(SEARCH_INDEX_NAME).get_document(
+ id.to_string()
+ )
+ return ContextItem(
+ description=ContextItemDescription(
+ name=result["name"], description=result["description"], id=id
+ ),
+ content=result["content"],
+ )
+ except Exception as e:
+ logger.warning(f"Error while retrieving document from meilisearch: {e}")
+
+ return None
+
+ async def delete_context_with_ids(self, ids: List[ContextItemId]):
+ """
+ Deletes the ContextItems with the given IDs, lets ContextProviders recalculate.
+
+ Default implementation simply deletes those with the given ids.
+ """
+ id_strings = {id.to_string() for id in ids}
+ self.selected_items = list(
+ filter(
+ lambda item: item.description.id.to_string() not in id_strings,
+ self.selected_items,
+ )
+ )
+
+ async def clear_context(self):
+ """
+ Clears all context.
+
+ Default implementation simply clears the selected items.
+ """
+ self.selected_items = []
+
+ async def add_context_item(self, id: ContextItemId, query: str):
+ """
+ Adds the given ContextItem to the list of ContextItems.
+
+ Default implementation simply appends the item, not allowing duplicates.
+
+ This method also allows you not to have to load all of the information until an item is selected.
+ """
+
+ # Don't add duplicate context
+ for item in self.selected_items:
+ if item.description.id.item_id == id.item_id:
+ return
+
+ if new_item := await self.get_item(id, query):
+ self.selected_items.append(new_item)
+
+ async def manually_add_context_item(self, context_item: ContextItem):
+ for item in self.selected_items:
+ if item.description.id.item_id == context_item.description.id.item_id:
+ return
+
+ self.selected_items.append(context_item)
+
+
+class ContextManager:
+ """
+ The context manager is responsible for storing the context to be passed to the LLM, including
+ - ContextItems (highlighted code, GitHub Issues, etc.)
+ - ChatMessages in the history
+ - System Message
+ - Functions
+
+ It is responsible for compiling all of this information into a single prompt without exceeding the token limit.
+ """
+
+ def get_provider_descriptions(self) -> List[ContextProviderDescription]:
+ """
+ Returns a list of ContextProviderDescriptions for each context provider.
+ """
+ return [
+ ContextProviderDescription(
+ title=provider.title,
+ display_title=provider.display_title,
+ description=provider.description,
+ dynamic=provider.dynamic,
+ requires_query=provider.requires_query,
+ )
+ for provider in self.context_providers.values()
+ if provider.title != "code"
+ ]
+
+ async def get_selected_items(self) -> List[ContextItem]:
+ """
+ Returns all of the selected ContextItems.
+ """
+ return sum(
+ [
+ await provider.get_selected_items()
+ for provider in self.context_providers.values()
+ ],
+ [],
+ )
+
+ async def get_chat_messages(self) -> List[ChatMessage]:
+ """
+ Returns chat messages from each provider.
+ """
+ return sum(
+ [
+ await provider.get_chat_messages()
+ for provider in self.context_providers.values()
+ ],
+ [],
+ )
+
+ def __init__(self):
+ self.context_providers = {}
+ self.provider_titles = set()
+
+ async def start(
+ self,
+ context_providers: List[ContextProvider],
+ sdk: ContinueSDK,
+ only_reloading: bool = False,
+ ):
+ """
+ Starts the context manager.
+ """
+ new_context_providers = {
+ provider.title: provider
+ for provider in context_providers
+ if provider.title not in self.provider_titles
+ }
+
+ self.context_providers = {
+ provider.title: provider for provider in context_providers
+ }
+ self.provider_titles = {provider.title for provider in context_providers}
+
+ for provider in context_providers:
+ await provider.start(
+ sdk,
+ ContextManager.delete_documents,
+ ContextManager.update_documents,
+ )
+
+ async def on_err(e):
+ logger.warning(f"Error loading meilisearch index: {e}")
+
+ # Start MeiliSearch in the background without blocking
+ async def load_index(providers_to_load: List[ContextProvider]):
+ running = await check_meilisearch_running()
+ if not running:
+ await start_meilisearch()
+ try:
+ await asyncio.wait_for(poll_meilisearch_running(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Meilisearch did not start in less than 20 seconds. Stopping polling."
+ )
+ return
+
+ logger.debug("Loading Meilisearch index...")
+ await self.load_index(
+ sdk.ide.workspace_directory, providers_to_load=providers_to_load
+ )
+ logger.debug("Loaded Meilisearch index")
+
+ providers_to_load = (
+ new_context_providers if only_reloading else context_providers
+ )
+ create_async_task(load_index(providers_to_load), on_err)
+
+ @staticmethod
+ async def update_documents(context_items: List[ContextItem], workspace_dir: str):
+ """
+ Updates the documents in the search index.
+ """
+ documents = [
+ {
+ "id": item.description.id.to_string(),
+ "name": item.description.name,
+ "description": item.description.description,
+ "content": item.content,
+ "workspace_dir": workspace_dir,
+ "provider_name": item.description.id.provider_title,
+ }
+ for item in context_items
+ ]
+ async with Client(get_meilisearch_url()) as search_client:
+
+ async def add_docs():
+ index = await search_client.get_index(SEARCH_INDEX_NAME)
+ await index.add_documents(documents or [])
+
+ try:
+ await asyncio.wait_for(add_docs(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning("Failed to add document to meilisearch in 20 seconds")
+ except Exception as e:
+ logger.warning(f"Error adding document to meilisearch: {e}")
+
+ @staticmethod
+ async def delete_documents(ids):
+ """
+ Deletes the documents in the search index.
+ """
+ async with Client(get_meilisearch_url()) as search_client:
+ try:
+ await asyncio.wait_for(
+ search_client.index(SEARCH_INDEX_NAME).delete_documents(ids),
+ timeout=20,
+ )
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Failed to delete document from meilisearch in 20 seconds"
+ )
+ except Exception as e:
+ logger.warning(f"Error deleting document from meilisearch: {e}")
+
+ async def load_index(
+ self,
+ workspace_dir: str,
+ should_retry: bool = True,
+ providers_to_load: Optional[List[ContextProvider]] = None,
+ ):
+ try:
+ async with Client(get_meilisearch_url()) as search_client:
+ # First, create the index if it doesn't exist
+ # The index is currently shared by all workspaces
+ await search_client.create_index(SEARCH_INDEX_NAME)
+ globalSearchIndex = await search_client.get_index(SEARCH_INDEX_NAME)
+ await globalSearchIndex.update_ranking_rules(
+ ["attribute", "words", "typo", "proximity", "sort", "exactness"]
+ )
+ await globalSearchIndex.update_searchable_attributes(
+ ["name", "description"]
+ )
+ await globalSearchIndex.update_filterable_attributes(
+ ["workspace_dir", "provider_name"]
+ )
+
+ async def load_context_provider(provider: ContextProvider):
+ context_items = await provider.provide_context_items(workspace_dir)
+ documents = [
+ {
+ "id": item.description.id.to_string(),
+ "name": item.description.name,
+ "description": item.description.description,
+ "content": item.content,
+ "workspace_dir": workspace_dir,
+ "provider_name": provider.title,
+ }
+ for item in context_items
+ ]
+ if len(documents) > 0:
+ await globalSearchIndex.add_documents(documents)
+
+ return len(documents)
+
+ async def safe_load(provider: ContextProvider):
+ ti = time.time()
+ try:
+ num_documents = await asyncio.wait_for(
+ load_context_provider(provider), timeout=20
+ )
+ except asyncio.TimeoutError:
+ logger.warning(
+ f"Failed to add documents to meilisearch for context provider {provider.__class__.__name__} in 20 seconds"
+ )
+ return
+ except Exception as e:
+ logger.warning(
+ f"Error adding documents to meilisearch for context provider {provider.__class__.__name__}: {e}"
+ )
+ return
+
+ tf = time.time()
+ logger.debug(
+ f"Loaded {num_documents} documents into meilisearch in {tf - ti} seconds for context provider {provider.title}"
+ )
+
+ tasks = [
+ safe_load(provider)
+ for _, provider in (
+ providers_to_load or self.context_providers
+ ).items()
+ ]
+ await asyncio.wait_for(asyncio.gather(*tasks), timeout=20)
+
+ except Exception as e:
+ logger.debug(f"Error loading meilisearch index: {e}")
+ if should_retry:
+ await restart_meilisearch()
+ try:
+ await asyncio.wait_for(poll_meilisearch_running(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Meilisearch did not restart in less than 20 seconds. Stopping polling."
+ )
+ await self.load_index(workspace_dir, False)
+
+ async def select_context_item(self, id: str, query: str):
+ """
+ Selects the ContextItem with the given id.
+ """
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in self.provider_titles:
+ raise ValueError(
+ f"Context provider with title {id.provider_title} not found"
+ )
+
+ posthog_logger.capture_event(
+ "select_context_item",
+ {
+ "provider_title": id.provider_title,
+ "item_id": id.item_id,
+ "query": query,
+ },
+ )
+ dev_data_logger.capture(
+ "select_context_item",
+ {
+ "provider_title": id.provider_title,
+ "item_id": id.item_id,
+ "query": query,
+ },
+ )
+ await self.context_providers[id.provider_title].add_context_item(id, query)
+
+ async def get_context_item(self, id: str, query: str) -> ContextItem:
+ """
+ Returns the ContextItem with the given id.
+ """
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in self.provider_titles:
+ raise ValueError(
+ f"Context provider with title {id.provider_title} not found"
+ )
+
+ return await self.context_providers[id.provider_title].get_item(id, query)
+
+ async def delete_context_with_ids(self, ids: List[str]):
+ """
+ Deletes the ContextItems with the given IDs, lets ContextProviders recalculate.
+ """
+
+ # Group by provider title
+ provider_title_to_ids: Dict[str, List[ContextItemId]] = {}
+ for id in ids:
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in provider_title_to_ids:
+ provider_title_to_ids[id.provider_title] = []
+ provider_title_to_ids[id.provider_title].append(id)
+
+ # Recalculate context for each updated provider
+ for provider_title, ids in provider_title_to_ids.items():
+ await self.context_providers[provider_title].delete_context_with_ids(ids)
+
+ async def clear_context(self):
+ """
+ Clears all context.
+ """
+ for provider in self.context_providers.values():
+ await self.context_providers[provider.title].clear_context()
+
+ async def manually_add_context_item(self, item: ContextItem):
+ """
+ Adds the given ContextItem to the list of ContextItems.
+ """
+ if item.description.id.provider_title not in self.provider_titles:
+ return
+
+ await self.context_providers[
+ item.description.id.provider_title
+ ].manually_add_context_item(item)
+
+
+"""
+Should define "ArgsTransformer" and "PromptTransformer" classes for the different LLMs. A standard way for them to ingest the
+same format of prompts so you don't have to redo all of this logic.
+"""
diff --git a/server/continuedev/core/env.py b/server/continuedev/core/env.py
new file mode 100644
index 00000000..60b86538
--- /dev/null
+++ b/server/continuedev/core/env.py
@@ -0,0 +1,31 @@
+import os
+
+from dotenv import load_dotenv
+
+
+def get_env_var(var_name: str):
+ load_dotenv()
+ return os.getenv(var_name)
+
+
+def make_sure_env_exists():
+ if not os.path.exists(".env"):
+ with open(".env", "w") as f:
+ f.write("")
+
+
+def save_env_var(var_name: str, var_value: str):
+ make_sure_env_exists()
+
+ with open(".env", "r") as f:
+ lines = f.readlines()
+ with open(".env", "w") as f:
+ values = {}
+ for line in lines:
+ key, value = line.split("=")
+ value = value.replace('"', "")
+ values[key] = value
+
+ values[var_name] = var_value
+ for key, value in values.items():
+ f.write(f'{key}="{value}"\n')
diff --git a/server/continuedev/core/lsp.py b/server/continuedev/core/lsp.py
new file mode 100644
index 00000000..fc26c85c
--- /dev/null
+++ b/server/continuedev/core/lsp.py
@@ -0,0 +1,416 @@
+import asyncio
+import threading
+from typing import List, Literal, Optional
+
+import aiohttp
+from pydantic import BaseModel
+
+from ..models.filesystem import RangeInFile
+from ..models.main import Position, Range
+
+
+def filepath_to_uri(filename: str) -> str:
+ return f"file://{filename}"
+
+
+def uri_to_filepath(uri: str) -> str:
+ if uri.startswith("file://"):
+ return uri[7:]
+ else:
+ return uri
+
+
+PORT = 8099
+
+
+class LSPClient:
+ ready: bool = False
+ lock: asyncio.Lock = asyncio.Lock()
+
+ def __init__(self, host: str, port: int, workspace_paths: List[str]):
+ self.host = host
+ self.port = port
+ self.session = aiohttp.ClientSession()
+ self.next_id = 0
+ self.workspace_paths = workspace_paths
+
+ async def connect(self):
+ print("Connecting")
+ self.ws = await self.session.ws_connect(f"ws://{self.host}:{self.port}/")
+ print("Connected")
+ self.ready = True
+
+ async def send(self, data):
+ await self.ws.send_json(data)
+
+ async def recv(self):
+ await self.lock.acquire()
+
+ try:
+ return await self.ws.receive_json()
+ finally:
+ self.lock.release()
+
+ async def close(self):
+ await self.ws.close()
+ await self.session.close()
+
+ async def call_method(self, method_name, **kwargs):
+ body = {
+ "jsonrpc": "2.0",
+ "id": self.next_id,
+ "method": method_name,
+ "params": kwargs,
+ }
+ self.next_id += 1
+ await self.send(body)
+ response = await self.recv()
+ return response
+
+ async def initialize(self):
+ initialization_args = {
+ "capabilities": {
+ "textDocument": {
+ "codeAction": {"dynamicRegistration": True},
+ "codeLens": {"dynamicRegistration": True},
+ "colorProvider": {"dynamicRegistration": True},
+ "completion": {
+ "completionItem": {
+ "commitCharactersSupport": True,
+ "documentationFormat": ["markdown", "plaintext"],
+ "snippetSupport": True,
+ },
+ "completionItemKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ ]
+ },
+ "contextSupport": True,
+ "dynamicRegistration": True,
+ },
+ "definition": {"dynamicRegistration": True},
+ "documentHighlight": {"dynamicRegistration": True},
+ "documentLink": {"dynamicRegistration": True},
+ "documentSymbol": {
+ "dynamicRegistration": True,
+ "symbolKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ 26,
+ ]
+ },
+ },
+ "formatting": {"dynamicRegistration": True},
+ "hover": {
+ "contentFormat": ["markdown", "plaintext"],
+ "dynamicRegistration": True,
+ },
+ "implementation": {"dynamicRegistration": True},
+ "onTypeFormatting": {"dynamicRegistration": True},
+ "publishDiagnostics": {"relatedInformation": True},
+ "rangeFormatting": {"dynamicRegistration": True},
+ "references": {"dynamicRegistration": True},
+ "rename": {"dynamicRegistration": True},
+ "signatureHelp": {
+ "dynamicRegistration": True,
+ "signatureInformation": {
+ "documentationFormat": ["markdown", "plaintext"]
+ },
+ },
+ "synchronization": {
+ "didSave": True,
+ "dynamicRegistration": True,
+ "willSave": True,
+ "willSaveWaitUntil": True,
+ },
+ "typeDefinition": {"dynamicRegistration": True},
+ },
+ "workspace": {
+ "applyEdit": True,
+ "configuration": True,
+ "didChangeConfiguration": {"dynamicRegistration": True},
+ "didChangeWatchedFiles": {"dynamicRegistration": True},
+ "executeCommand": {"dynamicRegistration": True},
+ "symbol": {
+ "dynamicRegistration": True,
+ "symbolKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ 26,
+ ]
+ },
+ },
+ "workspaceEdit": {"documentChanges": True},
+ "workspaceFolders": True,
+ },
+ },
+ "processId": 1234,
+ "rootPath": None,
+ "rootUri": filepath_to_uri(self.workspace_paths[0]),
+ "initializationOptions": {},
+ "trace": "off",
+ "workspaceFolders": [
+ {
+ "uri": filepath_to_uri(workspacePath),
+ "name": workspacePath.split("/")[-1],
+ }
+ for workspacePath in self.workspace_paths
+ ],
+ }
+ return await self.call_method("initialize", **initialization_args)
+
+ async def goto_definition(self, filepath: str, position: Position):
+ return await self.call_method(
+ "textDocument/definition",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ position=position.dict(),
+ )
+
+ async def document_symbol(self, filepath: str):
+ return await self.call_method(
+ "textDocument/documentSymbol",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ )
+
+ async def find_references(
+ self, filepath: str, position: Position, include_declaration: bool = False
+ ):
+ return await self.call_method(
+ "textDocument/references",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ position=position.dict(),
+ context={"includeDeclaration": include_declaration},
+ )
+
+ async def folding_range(self, filepath: str):
+ response = await self.call_method(
+ "textDocument/foldingRange",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ )
+ return response["result"]
+
+
+async def start_language_server() -> threading.Thread:
+ """Manually start the python language server. Not used currently."""
+ raise NotImplementedError()
+ # try:
+ # kill_proc(PORT)
+ # thread = threading.Thread(
+ # target=start_ws_lang_server,
+ # args=(PORT, False, PythonLSPServer),
+ # )
+ # thread.daemon = True
+ # thread.start()
+
+ # except Exception as e:
+ # logger.warning("Could not start TCP server: %s", e)
+
+ # await asyncio.sleep(2)
+
+ # return thread
+
+
+class DocumentSymbol(BaseModel):
+ name: str
+ containerName: Optional[str] = None
+ kind: int
+ location: RangeInFile
+
+
+class FoldingRange(BaseModel):
+ range: Range
+ kind: Optional[Literal["comment", "imports", "region"]] = None
+
+
+class ContinueLSPClient(BaseModel):
+ workspace_dir: str
+
+ lsp_client: LSPClient = None
+ lsp_thread: Optional[threading.Thread] = None
+
+ @property
+ def ready(self):
+ if self.lsp_client is None:
+ return False
+ return self.lsp_client.ready
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("lsp_client", None)
+ return original_dict
+
+ async def start(self):
+ self.lsp_thread = await start_language_server()
+ self.lsp_client = LSPClient("localhost", PORT, [self.workspace_dir])
+ await self.lsp_client.connect()
+ await self.lsp_client.initialize()
+
+ async def stop(self):
+ await self.lsp_client.close()
+ if self.lsp_thread:
+ self.lsp_thread.join()
+
+ def location_to_range_in_file(self, location):
+ return RangeInFile(
+ filepath=uri_to_filepath(location["uri"]),
+ range=Range.from_shorthand(
+ location["range"]["start"]["line"],
+ location["range"]["start"]["character"],
+ location["range"]["end"]["line"],
+ location["range"]["end"]["character"],
+ ),
+ )
+
+ async def goto_definition(
+ self, position: Position, filename: str
+ ) -> List[RangeInFile]:
+ response = self.lsp_client.goto_definition(
+ filename,
+ position,
+ )
+ return [self.location_to_range_in_file(x) for x in response]
+
+ async def find_references(
+ self, position: Position, filename: str, include_declaration: bool = False
+ ) -> List[RangeInFile]:
+ response = await self.lsp_client.find_references(
+ filename,
+ position,
+ include_declaration=include_declaration,
+ )
+ return [self.location_to_range_in_file(x) for x in response["result"]]
+
+ async def document_symbol(self, filepath: str) -> List:
+ response = await self.lsp_client.document_symbol(filepath)
+ return [
+ DocumentSymbol(
+ name=x["name"],
+ containerName=x["containerName"],
+ kind=x["kind"],
+ location=self.location_to_range_in_file(x["location"]),
+ )
+ for x in response["result"]
+ ]
+
+ async def folding_range(self, filepath: str) -> List[FoldingRange]:
+ response = await self.lsp_client.folding_range(filepath)
+
+ return [
+ FoldingRange(
+ range=Range.from_shorthand(
+ x["startLine"],
+ x.get("startCharacter", 0),
+ x["endLine"] if "endCharacter" in x else x["endLine"] + 1,
+ x.get("endCharacter", 0),
+ ),
+ kind=x.get("kind"),
+ )
+ for x in response
+ ]
+
+ async def get_enclosing_folding_range_of_position(
+ self, position: Position, filepath: str
+ ) -> Optional[FoldingRange]:
+ ranges = await self.folding_range(filepath)
+
+ max_start_position = Position(line=0, character=0)
+ max_range = None
+ for r in ranges:
+ if r.range.contains(position):
+ if r.range.start > max_start_position:
+ max_start_position = r.range.start
+ max_range = r
+
+ return max_range
+
+ async def get_enclosing_folding_range(
+ self, range_in_file: RangeInFile
+ ) -> Optional[FoldingRange]:
+ ranges = await self.folding_range(range_in_file.filepath)
+
+ max_start_position = Position(line=0, character=0)
+ max_range = None
+ for r in ranges:
+ if r.range.contains(range_in_file.range.start) and r.range.contains(
+ range_in_file.range.end
+ ):
+ if r.range.start > max_start_position:
+ max_start_position = r.range.start
+ max_range = r
+
+ return max_range
diff --git a/server/continuedev/core/main.py b/server/continuedev/core/main.py
new file mode 100644
index 00000000..617a5aaa
--- /dev/null
+++ b/server/continuedev/core/main.py
@@ -0,0 +1,437 @@
+import json
+from typing import Any, Coroutine, Dict, List, Literal, Optional, Union
+
+from pydantic import BaseModel, validator
+from pydantic.schema import schema
+
+from ..models.main import ContinueBaseModel
+from .observation import Observation
+
+ChatMessageRole = Literal["assistant", "user", "system", "function"]
+
+
+class FunctionCall(ContinueBaseModel):
+ name: str
+ arguments: str
+
+
+class ChatMessage(ContinueBaseModel):
+ role: ChatMessageRole
+ content: Union[str, None] = None
+ name: Union[str, None] = None
+ # A summary for pruning chat context to fit context window. Often the Step name.
+ summary: str
+ function_call: Union[FunctionCall, None] = None
+
+ def to_dict(self, with_functions: bool) -> Dict:
+ d = self.dict()
+ del d["summary"]
+ if d["function_call"] is not None:
+ d["function_call"]["name"] = d["function_call"]["name"].replace(" ", "")
+
+ if d["content"] is None:
+ d["content"] = ""
+ for key, value in list(d.items()):
+ if value is None:
+ del d[key]
+
+ if not with_functions:
+ if d["role"] == "function":
+ d["role"] = "assistant"
+ if "name" in d:
+ del d["name"]
+ if "function_call" in d:
+ del d["function_call"]
+ return d
+
+
+def resolve_refs(schema_data):
+ def traverse(obj):
+ if isinstance(obj, dict):
+ if "$ref" in obj:
+ ref = obj["$ref"]
+ parts = ref.split("/")
+ ref_obj = schema_data
+ for part in parts[1:]:
+ ref_obj = ref_obj[part]
+ return traverse(ref_obj)
+ else:
+ for key, value in obj.items():
+ obj[key] = traverse(value)
+ elif isinstance(obj, list):
+ for i in range(len(obj)):
+ obj[i] = traverse(obj[i])
+ return obj
+
+ return traverse(schema_data)
+
+
+unincluded_parameters = [
+ "system_message",
+ "chat_context",
+ "manage_own_chat_context",
+ "hide",
+ "name",
+ "description",
+]
+
+
+def step_to_json_schema(step) -> str:
+ pydantic_class = step.__class__
+ schema_data = schema([pydantic_class])
+ resolved_schema = resolve_refs(schema_data)
+ parameters = resolved_schema["definitions"][pydantic_class.__name__]
+ for parameter in unincluded_parameters:
+ if parameter in parameters["properties"]:
+ del parameters["properties"][parameter]
+ return {
+ "name": step.name.replace(" ", ""),
+ "description": step.description or "",
+ "parameters": parameters,
+ }
+
+
+def step_to_fn_call_arguments(step: "Step") -> str:
+ args = step.dict()
+ for parameter in unincluded_parameters:
+ if parameter in args:
+ del args[parameter]
+ return json.dumps(args)
+
+
+class HistoryNode(ContinueBaseModel):
+ """A point in history, a list of which make up History"""
+
+ step: "Step"
+ observation: Union[Observation, None]
+ depth: int
+ deleted: bool = False
+ active: bool = True
+ logs: List[str] = []
+ context_used: List["ContextItem"] = []
+
+ def to_chat_messages(self) -> List[ChatMessage]:
+ if self.step.description is None or self.step.manage_own_chat_context:
+ return self.step.chat_context
+ return self.step.chat_context + [
+ ChatMessage(
+ role="assistant",
+ name=self.step.__class__.__name__,
+ content=self.step.description or f"Ran function {self.step.name}",
+ summary=f"Called function {self.step.name}",
+ )
+ ]
+
+
+class History(ContinueBaseModel):
+ """A history of steps taken and their results"""
+
+ timeline: List[HistoryNode]
+ current_index: int
+
+ def to_chat_history(self) -> List[ChatMessage]:
+ msgs = []
+ for node in self.timeline:
+ if not node.step.hide:
+ msgs += node.to_chat_messages()
+ return msgs
+
+ def add_node(self, node: HistoryNode) -> int:
+ """Add node and return the index where it was added"""
+ self.timeline.insert(self.current_index + 1, node)
+ self.current_index += 1
+ return self.current_index
+
+ def get_current(self) -> Union[HistoryNode, None]:
+ if self.current_index < 0:
+ return None
+ return self.timeline[self.current_index]
+
+ def get_last_at_depth(
+ self, depth: int, include_current: bool = False
+ ) -> Union[HistoryNode, None]:
+ i = self.current_index if include_current else self.current_index - 1
+ while i >= 0:
+ if (
+ self.timeline[i].depth == depth
+ and type(self.timeline[i].step).__name__ != "ManualEditStep"
+ ):
+ return self.timeline[i]
+ i -= 1
+ return None
+
+ def get_last_at_same_depth(self) -> Union[HistoryNode, None]:
+ return self.get_last_at_depth(self.get_current().depth)
+
+ def remove_current_and_substeps(self):
+ self.timeline.pop(self.current_index)
+ while self.get_current() is not None and self.get_current().depth > 0:
+ self.timeline.pop(self.current_index)
+
+ def take_next_step(self) -> Union["Step", None]:
+ if self.has_future():
+ self.current_index += 1
+ current_state = self.get_current()
+ if current_state is None:
+ return None
+ return current_state.step
+ return None
+
+ def get_current_index(self) -> int:
+ return self.current_index
+
+ def has_future(self) -> bool:
+ return self.current_index < len(self.timeline) - 1
+
+ def step_back(self):
+ self.current_index -= 1
+
+ def last_observation(self) -> Union[Observation, None]:
+ state = self.get_last_at_same_depth()
+ if state is None:
+ return None
+ return state.observation
+
+ def pop_step(self, index: int = None) -> Union[HistoryNode, None]:
+ index = index if index is not None else self.current_index
+ if index < 0 or self.current_index < 0:
+ return None
+
+ node = self.timeline.pop(index)
+
+ if index <= self.current_index:
+ self.current_index -= 1
+
+ return node.step
+
+ @classmethod
+ def from_empty(cls):
+ return cls(timeline=[], current_index=-1)
+
+
+class SlashCommandDescription(ContinueBaseModel):
+ name: str
+ description: str
+
+
+class ContextItemId(BaseModel):
+ """
+ A ContextItemId is a unique identifier for a ContextItem.
+ """
+
+ provider_title: str
+ item_id: str
+
+ @validator("provider_title", "item_id")
+ def must_be_valid_id(cls, v):
+ import re
+
+ if not re.match(r"^[0-9a-zA-Z_-]*$", v):
+ raise ValueError(
+ "Both provider_title and item_id can only include characters 0-9, a-z, A-Z, -, and _"
+ )
+ return v
+
+ def to_string(self) -> str:
+ return f"{self.provider_title}-{self.item_id}"
+
+ @staticmethod
+ def from_string(string: str) -> "ContextItemId":
+ provider_title, *rest = string.split("-")
+ item_id = "-".join(rest)
+ return ContextItemId(provider_title=provider_title, item_id=item_id)
+
+
+class ContextItemDescription(BaseModel):
+ """
+ A ContextItemDescription is a description of a ContextItem that is displayed to the user when they type '@'.
+
+ The id can be used to retrieve the ContextItem from the ContextManager.
+ """
+
+ name: str
+ description: str
+ id: ContextItemId
+
+
+class ContextItem(BaseModel):
+ """
+ A ContextItem is a single item that is stored in the ContextManager.
+ """
+
+ description: ContextItemDescription
+ content: str
+
+ @validator("content", pre=True)
+ def content_must_be_string(cls, v):
+ if v is None:
+ return ""
+ return v
+
+ editing: bool = False
+ editable: bool = False
+
+
+class SessionInfo(ContinueBaseModel):
+ session_id: str
+ title: str
+ date_created: str
+ workspace_directory: Optional[str] = None
+
+
+class ContinueConfig(ContinueBaseModel):
+ system_message: Optional[str]
+ temperature: Optional[float]
+
+ class Config:
+ extra = "allow"
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("policy", None)
+ return original_dict
+
+
+class ContextProviderDescription(BaseModel):
+ title: str
+ display_title: str
+ description: str
+ dynamic: bool
+ requires_query: bool
+
+
+class FullState(ContinueBaseModel):
+ """A full state of the program, including the history"""
+
+ history: History
+ active: bool
+ user_input_queue: List[str]
+ slash_commands: List[SlashCommandDescription]
+ adding_highlighted_code: bool
+ selected_context_items: List[ContextItem]
+ session_info: Optional[SessionInfo] = None
+ config: ContinueConfig
+ saved_context_groups: Dict[str, List[ContextItem]] = {}
+ context_providers: List[ContextProviderDescription] = []
+ meilisearch_url: Optional[str] = None
+
+
+class ContinueSDK:
+ ...
+
+
+class Models:
+ ...
+
+
+class Policy(ContinueBaseModel):
+ """A rule that determines which step to take next"""
+
+ # Note that history is mutable, kinda sus
+ def next(
+ self, config: ContinueConfig, history: History = History.from_empty()
+ ) -> "Step":
+ raise NotImplementedError
+
+
+class Step(ContinueBaseModel):
+ name: str = None
+ hide: bool = False
+ description: Union[str, None] = None
+
+ class_name: str = "Step"
+
+ @validator("class_name", pre=True, always=True)
+ def class_name_is_class_name(cls, class_name):
+ return cls.__name__
+
+ system_message: Union[str, None] = None
+ chat_context: List[ChatMessage] = []
+ manage_own_chat_context: bool = False
+
+ class Config:
+ copy_on_model_validation = False
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self.description is not None:
+ return self.description
+ return "Running step: " + self.name
+
+ def dict(self, *args, **kwargs):
+ d = super().dict(*args, **kwargs)
+ # Make sure description is always a string
+ d["description"] = self.description or ""
+ return d
+
+ @validator("name", pre=True, always=True)
+ def name_is_class_name(cls, name):
+ if name is None:
+ return cls.__name__
+ return name
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ raise NotImplementedError
+
+ async def __call__(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return await self.run(sdk)
+
+ def __rshift__(self, other: "Step"):
+ steps = []
+ if isinstance(self, SequentialStep):
+ steps = self.steps
+ else:
+ steps.append(self)
+ if isinstance(other, SequentialStep):
+ steps += other.steps
+ else:
+ steps.append(other)
+ return SequentialStep(steps=steps)
+
+
+class SequentialStep(Step):
+ steps: List[Step]
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ for step in self.steps:
+ observation = await sdk.run_step(step)
+ return observation
+
+
+class ValidatorObservation(Observation):
+ passed: bool
+ observation: Observation
+
+
+class Validator(Step):
+ def run(self, sdk: ContinueSDK) -> ValidatorObservation:
+ raise NotImplementedError
+
+
+class Context:
+ key_value: Dict[str, Any] = {}
+
+ def set(self, key: str, value: Any):
+ self.key_value[key] = value
+
+ def get(self, key: str) -> Any:
+ return self.key_value.get(key, None)
+
+
+class ContinueCustomException(Exception):
+ title: str
+ message: str
+ with_step: Union[Step, None]
+
+ def __init__(
+ self,
+ message: str,
+ title: str = "Error while running step:",
+ with_step: Union[Step, None] = None,
+ ):
+ self.message = message
+ self.title = title
+ self.with_step = with_step
+
+
+HistoryNode.update_forward_refs()
diff --git a/server/continuedev/core/models.py b/server/continuedev/core/models.py
new file mode 100644
index 00000000..21ebd8f6
--- /dev/null
+++ b/server/continuedev/core/models.py
@@ -0,0 +1,113 @@
+from typing import List, Optional
+
+from pydantic import BaseModel
+
+from ..libs.llm.anthropic import AnthropicLLM
+from ..libs.llm.base import LLM
+from ..libs.llm.ggml import GGML
+from ..libs.llm.google_palm_api import GooglePaLMAPI
+from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
+from ..libs.llm.hf_tgi import HuggingFaceTGI
+from ..libs.llm.llamacpp import LlamaCpp
+from ..libs.llm.ollama import Ollama
+from ..libs.llm.openai import OpenAI
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from ..libs.llm.replicate import ReplicateLLM
+from ..libs.llm.together import TogetherLLM
+
+
+class ContinueSDK(BaseModel):
+ pass
+
+
+ALL_MODEL_ROLES = [
+ "default",
+ "summarize",
+ "edit",
+ "chat",
+]
+
+MODEL_CLASSES = {
+ cls.__name__: cls
+ for cls in [
+ OpenAI,
+ OpenAIFreeTrial,
+ GGML,
+ TogetherLLM,
+ AnthropicLLM,
+ ReplicateLLM,
+ Ollama,
+ LlamaCpp,
+ HuggingFaceInferenceAPI,
+ HuggingFaceTGI,
+ GooglePaLMAPI,
+ ]
+}
+
+MODEL_MODULE_NAMES = {
+ "OpenAI": "openai",
+ "OpenAIFreeTrial": "openai_free_trial",
+ "GGML": "ggml",
+ "TogetherLLM": "together",
+ "AnthropicLLM": "anthropic",
+ "ReplicateLLM": "replicate",
+ "Ollama": "ollama",
+ "LlamaCpp": "llamacpp",
+ "HuggingFaceInferenceAPI": "hf_inference_api",
+ "HuggingFaceTGI": "hf_tgi",
+ "GooglePaLMAPI": "google_palm_api",
+}
+
+
+class Models(BaseModel):
+ """Main class that holds the current model configuration"""
+
+ default: LLM
+ summarize: Optional[LLM] = None
+ edit: Optional[LLM] = None
+ chat: Optional[LLM] = None
+
+ saved: List[LLM] = []
+
+ # TODO namespace these away to not confuse readers,
+ # or split Models into ModelsConfig, which gets turned into Models
+ sdk: ContinueSDK = None
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("sdk", None)
+ return original_dict
+
+ @property
+ def all_models(self):
+ models = [getattr(self, role) for role in ALL_MODEL_ROLES]
+ return [model for model in models if model is not None]
+
+ @property
+ def system_message(self) -> Optional[str]:
+ if self.sdk:
+ return self.sdk.config.system_message
+ return None
+
+ def set_system_message(self, msg: str):
+ for model in self.all_models:
+ if model.system_message is None:
+ model.system_message = msg
+
+ async def start(self, sdk: "ContinueSDK"):
+ """Start each of the LLMs, or fall back to default"""
+ self.sdk = sdk
+
+ for role in ALL_MODEL_ROLES:
+ model = getattr(self, role)
+ if model is None:
+ setattr(self, role, self.default)
+ else:
+ await sdk.start_model(model)
+
+ self.set_system_message(self.system_message)
+
+ async def stop(self, sdk: "ContinueSDK"):
+ """Stop each LLM (if it's not the default, which is shared)"""
+ for model in self.all_models:
+ await model.stop()
diff --git a/server/continuedev/core/observation.py b/server/continuedev/core/observation.py
new file mode 100644
index 00000000..8a5e454e
--- /dev/null
+++ b/server/continuedev/core/observation.py
@@ -0,0 +1,41 @@
+from pydantic import BaseModel, validator
+
+from ..models.main import Traceback
+
+
+class Observation(BaseModel):
+ pass
+
+
+class TracebackObservation(Observation):
+ traceback: Traceback
+
+
+class ValidatorObservation(Observation):
+ passed: bool
+
+
+class UserInputObservation(Observation):
+ user_input: str
+
+
+class DictObservation(Observation):
+ values: dict
+
+ def __getitem__(self, key):
+ return self.values[key]
+
+
+class TextObservation(Observation):
+ text: str
+
+ @validator("text", pre=True, always=True)
+ def text_not_none(cls, v):
+ if v is None:
+ return ""
+ return v
+
+
+class InternalErrorObservation(Observation):
+ title: str
+ error: str
diff --git a/server/continuedev/core/sdk.py b/server/continuedev/core/sdk.py
new file mode 100644
index 00000000..408168f6
--- /dev/null
+++ b/server/continuedev/core/sdk.py
@@ -0,0 +1,309 @@
+import os
+import traceback
+from typing import Coroutine, List, Optional, Union
+
+from ..libs.llm.base import LLM
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.logging import logger
+from ..libs.util.paths import (
+ convertConfigImports,
+ getConfigFilePath,
+ getDiffsFolderPath,
+)
+from ..libs.util.telemetry import posthog_logger
+from ..models.filesystem import RangeInFile
+from ..models.filesystem_edit import (
+ AddDirectory,
+ AddFile,
+ DeleteDirectory,
+ DeleteFile,
+ FileEdit,
+ FileSystemEdit,
+)
+from ..models.main import Range
+from ..server.ide_protocol import AbstractIdeProtocolServer
+from .abstract_sdk import AbstractContinueSDK
+from .config import ContinueConfig
+from .lsp import ContinueLSPClient
+from .main import (
+ ChatMessage,
+ Context,
+ ContinueCustomException,
+ History,
+ HistoryNode,
+ Step,
+)
+from .models import Models
+from .observation import Observation
+from .steps import (
+ DefaultModelEditCodeStep,
+ FileSystemEditStep,
+ MessageStep,
+ RangeInFileWithContents,
+ ShellCommandsStep,
+ WaitForUserConfirmationStep,
+)
+
+
+class Autopilot:
+ pass
+
+
+class ContinueSDK(AbstractContinueSDK):
+ """The SDK provided as parameters to a step"""
+
+ ide: AbstractIdeProtocolServer
+ models: Models
+ lsp: Optional[ContinueLSPClient] = None
+ context: Context
+ config: ContinueConfig
+ __autopilot: Autopilot
+
+ def __init__(self, autopilot: Autopilot):
+ self.ide = autopilot.ide
+ self.__autopilot = autopilot
+ self.context = autopilot.context
+
+ async def load(self, config: Optional[ContinueConfig] = None):
+ # Create necessary directories
+ getDiffsFolderPath()
+
+ try:
+ self.config = config or self._load_config_dot_py()
+ except Exception as e:
+ logger.error(f"Failed to load config.py: {traceback.format_exception(e)}")
+
+ self.config = (
+ ContinueConfig()
+ if self._last_valid_config is None
+ else self._last_valid_config
+ )
+
+ formatted_err = "\n".join(traceback.format_exception(e))
+ msg_step = MessageStep(
+ name="Invalid Continue Config File", message=formatted_err
+ )
+ msg_step.description = f"Falling back to default config settings due to the following error in `~/.continue/config.py`.\n```\n{formatted_err}\n```\n\nIt's possible this was caused by an update to the Continue config format. If you'd like to see the new recommended default `config.py`, check [here](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/constants/default_config.py).\n\nIf the error is related to OpenAIServerInfo, see the updated way of using these parameters [here](https://continue.dev/docs/customization#azure-openai-service)."
+ self.history.add_node(
+ HistoryNode(step=msg_step, observation=None, depth=0, active=False)
+ )
+ await self.ide.setFileOpen(getConfigFilePath())
+
+ # Start models
+ self.models = self.config.models
+ await self.models.start(self)
+
+ # Start LSP
+ # async def start_lsp():
+ # try:
+ # sdk.lsp = ContinueLSPClient(
+ # workspace_dir=sdk.ide.workspace_directory,
+ # )
+ # await sdk.lsp.start()
+ # except Exception as e:
+ # logger.warning(f"Failed to start LSP client: {e}", exc_info=False)
+ # sdk.lsp = None
+
+ # create_async_task(
+ # start_lsp(), on_error=lambda e: logger.error("Failed to setup LSP: %s", e)
+ # )
+
+ # When the config is loaded, setup posthog logger
+ posthog_logger.setup(
+ self.ide.unique_id, self.config.allow_anonymous_telemetry, self.ide.ide_info
+ )
+ dev_data_logger.setup(self.config.user_token, self.config.data_server_url)
+
+ @classmethod
+ async def create(
+ cls, autopilot: Autopilot, config: Optional[ContinueConfig] = None
+ ) -> "ContinueSDK":
+ sdk = ContinueSDK(autopilot)
+ autopilot.continue_sdk = sdk
+
+ await sdk.load(config=config)
+
+ return sdk
+
+ @property
+ def history(self) -> History:
+ return self.__autopilot.history
+
+ def write_log(self, message: str):
+ self.history.timeline[self.history.current_index].logs.append(message)
+
+ async def start_model(self, llm: LLM):
+ await llm.start(unique_id=self.ide.unique_id, write_log=self.write_log)
+
+ async def _ensure_absolute_path(self, path: str) -> str:
+ if os.path.isabs(path):
+ return path
+
+ # Else if in workspace
+ workspace_path = os.path.join(self.ide.workspace_directory, path)
+ if os.path.exists(workspace_path):
+ return workspace_path
+ else:
+ # Check if it matches any of the open files, then use that absolute path
+ open_files = await self.ide.getOpenFiles()
+ for open_file in open_files:
+ if os.path.basename(open_file) == os.path.basename(path):
+ return open_file
+ raise Exception(f"Path {path} does not exist")
+
+ async def run_step(self, step: Step) -> Coroutine[Observation, None, None]:
+ return await self.__autopilot._run_singular_step(step)
+
+ async def apply_filesystem_edit(
+ self, edit: FileSystemEdit, name: str = None, description: str = None
+ ):
+ return await self.run_step(
+ FileSystemEditStep(
+ edit=edit, description=description, **({"name": name} if name else {})
+ )
+ )
+
+ async def wait_for_user_input(self) -> str:
+ return await self.__autopilot.wait_for_user_input()
+
+ async def wait_for_user_confirmation(self, prompt: str):
+ return await self.run_step(WaitForUserConfirmationStep(prompt=prompt))
+
+ async def run(
+ self,
+ commands: Union[List[str], str],
+ cwd: str = None,
+ name: str = None,
+ description: str = None,
+ handle_error: bool = True,
+ ) -> Coroutine[str, None, None]:
+ commands = commands if isinstance(commands, List) else [commands]
+ return (
+ await self.run_step(
+ ShellCommandsStep(
+ cmds=commands,
+ cwd=cwd,
+ description=description,
+ handle_error=handle_error,
+ **({"name": name} if name else {}),
+ )
+ )
+ ).text
+
+ async def edit_file(
+ self,
+ filename: str,
+ prompt: str,
+ name: str = None,
+ description: str = "",
+ range: Range = None,
+ ):
+ filepath = await self._ensure_absolute_path(filename)
+
+ await self.ide.setFileOpen(filepath)
+ contents = await self.ide.readFile(filepath)
+ await self.run_step(
+ DefaultModelEditCodeStep(
+ range_in_files=[
+ RangeInFile(filepath=filepath, range=range)
+ if range is not None
+ else RangeInFile.from_entire_file(filepath, contents)
+ ],
+ user_input=prompt,
+ description=description,
+ **({"name": name} if name else {}),
+ )
+ )
+
+ async def append_to_file(self, filename: str, content: str):
+ filepath = await self._ensure_absolute_path(filename)
+ previous_content = await self.ide.readFile(filepath)
+ file_edit = FileEdit.from_append(filepath, previous_content, content)
+ await self.ide.applyFileSystemEdit(file_edit)
+
+ async def add_file(self, filename: str, content: Union[str, None]):
+ filepath = await self._ensure_absolute_path(filename)
+ dir_name = os.path.dirname(filepath)
+ os.makedirs(dir_name, exist_ok=True)
+ return await self.run_step(
+ FileSystemEditStep(edit=AddFile(filepath=filepath, content=content))
+ )
+
+ async def delete_file(self, filename: str):
+ filename = await self._ensure_absolute_path(filename)
+ return await self.run_step(
+ FileSystemEditStep(edit=DeleteFile(filepath=filename))
+ )
+
+ async def add_directory(self, path: str):
+ path = await self._ensure_absolute_path(path)
+ return await self.run_step(FileSystemEditStep(edit=AddDirectory(path=path)))
+
+ async def delete_directory(self, path: str):
+ path = await self._ensure_absolute_path(path)
+ return await self.run_step(FileSystemEditStep(edit=DeleteDirectory(path=path)))
+
+ _last_valid_config: ContinueConfig = None
+
+ def _load_config_dot_py(self, retry: bool = True) -> ContinueConfig:
+ try:
+ path = getConfigFilePath()
+ config = ContinueConfig.from_filepath(path)
+ self._last_valid_config = config
+
+ logger.debug("Loaded Continue config file from %s", path)
+
+ return config
+ except ModuleNotFoundError as e:
+ if not retry:
+ raise e
+ # Check if the module was "continuedev.src"
+ if e.name == "continuedev.src":
+ convertConfigImports(shorten=True)
+ return self._load_config_dot_py(retry=False)
+ else:
+ raise e
+
+ def get_code_context(
+ self, only_editing: bool = False
+ ) -> List[RangeInFileWithContents]:
+ highlighted_ranges = self.__autopilot.context_manager.context_providers[
+ "code"
+ ].highlighted_ranges
+ context = (
+ list(filter(lambda x: x.item.editing, highlighted_ranges))
+ if only_editing
+ else highlighted_ranges
+ )
+ return [c.rif for c in context]
+
+ def set_loading_message(self, message: str):
+ # self.__autopilot.set_loading_message(message)
+ raise NotImplementedError()
+
+ def raise_exception(
+ self, message: str, title: str, with_step: Union[Step, None] = None
+ ):
+ raise ContinueCustomException(message, title, with_step)
+
+ async def get_chat_context(self) -> List[ChatMessage]:
+ history_context = self.history.to_chat_history()
+
+ context_messages: List[
+ ChatMessage
+ ] = await self.__autopilot.context_manager.get_chat_messages()
+
+ # Insert at the end, but don't insert after latest user message or function call
+ for msg in context_messages:
+ history_context.insert(-1, msg)
+
+ return history_context
+
+ async def update_ui(self):
+ await self.__autopilot.update_subscribers()
+
+ async def clear_history(self):
+ await self.__autopilot.clear_history()
+
+ def current_step_was_deleted(self):
+ return self.history.timeline[self.history.current_index].deleted
diff --git a/server/continuedev/core/steps.py b/server/continuedev/core/steps.py
new file mode 100644
index 00000000..5c20dd15
--- /dev/null
+++ b/server/continuedev/core/steps.py
@@ -0,0 +1,963 @@
+# These steps are depended upon by ContinueSDK
+import difflib
+import subprocess
+from textwrap import dedent
+from typing import Coroutine, List, Optional, Union
+
+from ..libs.llm.base import LLM
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from ..libs.util.count_tokens import DEFAULT_MAX_TOKENS
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.strings import (
+ dedent_and_get_common_whitespace,
+ remove_quotes_and_escapes,
+)
+from ..libs.util.telemetry import posthog_logger
+from ..libs.util.templating import render_prompt_template
+from ..models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents
+from ..models.filesystem_edit import (
+ EditDiff,
+ FileEdit,
+ FileEditWithFullContents,
+ FileSystemEdit,
+)
+
+# from ....libs.llm.replicate import ReplicateLLM
+from ..models.main import Range
+from .main import ChatMessage, ContinueCustomException, Step
+from .observation import Observation, TextObservation, UserInputObservation
+
+
+class ContinueSDK:
+ pass
+
+
+class Models:
+ pass
+
+
+class ReversibleStep(Step):
+ async def reverse(self, sdk: ContinueSDK):
+ raise NotImplementedError
+
+
+class MessageStep(Step):
+ name: str = "Message"
+ message: str
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.message
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return TextObservation(text=self.message)
+
+
+class DisplayErrorStep(Step):
+ name: str = "Error in the Continue server"
+
+ title: str = "Error in the Continue server"
+ message: str = "There was an error in the Continue server."
+
+ @staticmethod
+ def from_exception(e: Exception) -> "DisplayErrorStep":
+ if isinstance(e, ContinueCustomException):
+ return DisplayErrorStep(title=e.title, message=e.message, name=e.title)
+
+ return DisplayErrorStep(message=str(e))
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.message
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ raise ContinueCustomException(message=self.message, title=self.title)
+
+
+class FileSystemEditStep(ReversibleStep):
+ edit: FileSystemEdit
+ _diff: Union[EditDiff, None] = None
+
+ hide: bool = True
+
+ async def run(self, sdk: "ContinueSDK") -> Coroutine[Observation, None, None]:
+ self._diff = await sdk.ide.applyFileSystemEdit(self.edit)
+ return None
+
+ async def reverse(self, sdk: "ContinueSDK"):
+ await sdk.ide.applyFileSystemEdit(self._diff.backward)
+ # Where and when should file saves happen?
+
+
+def output_contains_error(output: str) -> bool:
+ return "Traceback" in output or "SyntaxError" in output
+
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class ShellCommandsStep(Step):
+ cmds: List[str]
+ cwd: Union[str, None] = None
+ name: str = "Run Shell Commands"
+ handle_error: bool = True
+
+ _err_text: Union[str, None] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self._err_text is not None:
+ return f"Error when running shell commands:\n```\n{self._err_text}\n```"
+
+ cmds_str = "\n".join(self.cmds)
+ return await models.summarize.complete(
+ f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:"
+ )
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ process = subprocess.Popen(
+ "/bin/bash",
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ cwd=self.cwd or sdk.ide.workspace_directory,
+ )
+
+ stdin_input = "\n".join(self.cmds)
+ out, err = process.communicate(stdin_input.encode())
+
+ # If it fails, return the error
+ if err is not None and err != "":
+ self._err_text = err
+ return TextObservation(text=err)
+
+ return None
+
+
+class DefaultModelEditCodeStep(Step):
+ user_input: str
+ model: Optional[LLM] = None
+ range_in_files: List[RangeInFile]
+ name: str = "Editing Code"
+ hide = False
+ description: str = ""
+ _prompt: str = dedent(
+ """\
+ Take the file prefix and suffix into account, but only rewrite the code_to_edit as specified in the user_request. The code you write in modified_code_to_edit will replace the code between the code_to_edit tags. Do NOT preface your answer or write anything other than code. The </modified_code_to_edit> tag should be written to indicate the end of the modified code section. Do not ever use nested tags.
+
+ Example:
+
+ <file_prefix>
+ class Database:
+ def __init__(self):
+ self._data = {{}}
+
+ def get(self, key):
+ return self._data[key]
+
+ </file_prefix>
+ <code_to_edit>
+ def set(self, key, value):
+ self._data[key] = value
+ </code_to_edit>
+ <file_suffix>
+
+ def clear_all():
+ self._data = {{}}
+ </file_suffix>
+ <user_request>
+ Raise an error if the key already exists.
+ </user_request>
+ <modified_code_to_edit>
+ def set(self, key, value):
+ if key in self._data:
+ raise KeyError(f"Key {{key}} already exists")
+ self._data[key] = value
+ </modified_code_to_edit>
+
+ Main task:
+ """
+ )
+ _previous_contents: str = ""
+ _new_contents: str = ""
+ _prompt_and_completion: str = ""
+
+ summary_prompt: str = "Please briefly explain the changes made to the code above. Give no more than 2-3 sentences, and use markdown bullet points:"
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ name = await models.summarize.complete(
+ f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:"
+ )
+ self.name = remove_quotes_and_escapes(name)
+
+ if self._previous_contents.strip() == self._new_contents.strip():
+ return "No edits were made"
+ else:
+ return None
+
+ async def get_prompt_parts(
+ self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str
+ ):
+ # We don't know here all of the functions being passed in.
+ # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
+ # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
+ if self.model is not None:
+ await sdk.start_model(self.model)
+
+ model_to_use = self.model or sdk.models.edit
+ max_tokens = int(model_to_use.context_length / 2)
+
+ TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200
+ if (
+ model_to_use.count_tokens(rif.contents)
+ > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE
+ ):
+ self.description += "\n\n**It looks like you've selected a large range to edit, which may take a while to complete. If you'd like to cancel, click the 'X' button above. If you highlight a more specific range, Continue will only edit within it.**"
+
+ # At this point, we also increase the max_tokens parameter so it doesn't stop in the middle of generation
+ # Increase max_tokens to be double the size of the range
+ # But don't exceed twice default max tokens
+ max_tokens = int(
+ min(model_to_use.count_tokens(rif.contents), DEFAULT_MAX_TOKENS) * 2.5
+ )
+
+ BUFFER_FOR_FUNCTIONS = 400
+ total_tokens = (
+ model_to_use.count_tokens(
+ full_file_contents + self._prompt + self.user_input
+ )
+ + BUFFER_FOR_FUNCTIONS
+ + max_tokens
+ )
+
+ # If using 3.5 and overflows, upgrade to 3.5.16k
+ if model_to_use.model == "gpt-3.5-turbo":
+ if total_tokens > model_to_use.context_length:
+ model_to_use = OpenAIFreeTrial(model="gpt-3.5-turbo-0613")
+ await sdk.start_model(model_to_use)
+
+ # Remove tokens from the end first, and then the start to clear space
+ # This part finds the start and end lines
+ full_file_contents_lst = full_file_contents.split("\n")
+ max_start_line = rif.range.start.line
+ min_end_line = rif.range.end.line
+ cur_start_line = 0
+ cur_end_line = len(full_file_contents_lst) - 1
+
+ if total_tokens > model_to_use.context_length:
+ while cur_end_line > min_end_line:
+ total_tokens -= model_to_use.count_tokens(
+ full_file_contents_lst[cur_end_line]
+ )
+ cur_end_line -= 1
+ if total_tokens < model_to_use.context_length:
+ break
+
+ if total_tokens > model_to_use.context_length:
+ while cur_start_line < max_start_line:
+ cur_start_line += 1
+ total_tokens -= model_to_use.count_tokens(
+ full_file_contents_lst[cur_start_line]
+ )
+ if total_tokens < model_to_use.context_length:
+ break
+
+ # Now use the found start/end lines to get the prefix and suffix strings
+ file_prefix = "\n".join(full_file_contents_lst[cur_start_line:max_start_line])
+ file_suffix = "\n".join(full_file_contents_lst[min_end_line : cur_end_line - 1])
+
+ # Move any surrounding blank line in rif.contents to the prefix/suffix
+ # TODO: Keep track of start line of the range, because it's needed below for offset stuff
+ if len(rif.contents) > 0:
+ lines = rif.contents.splitlines(keepends=True)
+ first_line = lines[0] if lines else None
+ while first_line and first_line.strip() == "":
+ file_prefix += first_line
+ rif.contents = rif.contents[len(first_line) :]
+ lines = rif.contents.splitlines(keepends=True)
+ first_line = lines[0] if lines else None
+
+ last_line = lines[-1] if lines else None
+ while last_line and last_line.strip() == "":
+ file_suffix = last_line + file_suffix
+ rif.contents = rif.contents[: len(rif.contents) - len(last_line)]
+ lines = rif.contents.splitlines(keepends=True)
+ last_line = lines[-1] if lines else None
+
+ while rif.contents.startswith("\n"):
+ file_prefix += "\n"
+ rif.contents = rif.contents[1:]
+ while rif.contents.endswith("\n"):
+ file_suffix = "\n" + file_suffix
+ rif.contents = rif.contents[:-1]
+
+ return file_prefix, rif.contents, file_suffix, model_to_use, max_tokens
+
+ def compile_prompt(
+ self, file_prefix: str, contents: str, file_suffix: str, sdk: ContinueSDK
+ ) -> str:
+ if contents.strip() == "":
+ # Separate prompt for insertion at the cursor, the other tends to cause it to repeat whole file
+ prompt = dedent(
+ f"""\
+<file_prefix>
+{file_prefix}
+</file_prefix>
+<insertion_code_here>
+<file_suffix>
+{file_suffix}
+</file_suffix>
+<user_request>
+{self.user_input}
+</user_request>
+
+Please output the code to be inserted at the cursor in order to fulfill the user_request. Do NOT preface your answer or write anything other than code. You should not write any tags, just the code. Make sure to correctly indent the code:"""
+ )
+ return prompt
+
+ prompt = self._prompt
+ if file_prefix.strip() != "":
+ prompt += dedent(
+ f"""
+<file_prefix>
+{file_prefix}
+</file_prefix>"""
+ )
+ prompt += dedent(
+ f"""
+<code_to_edit>
+{contents}
+</code_to_edit>"""
+ )
+ if file_suffix.strip() != "":
+ prompt += dedent(
+ f"""
+<file_suffix>
+{file_suffix}
+</file_suffix>"""
+ )
+ prompt += dedent(
+ f"""
+<user_request>
+{self.user_input}
+</user_request>
+<modified_code_to_edit>
+"""
+ )
+
+ return prompt
+
+ def is_end_line(self, line: str) -> bool:
+ return "</modified_code_to_edit>" in line or "</code_to_edit>" in line
+
+ def line_to_be_ignored(self, line: str, is_first_line: bool = False) -> bool:
+ return (
+ "```" in line
+ or "<modified_code_to_edit>" in line
+ or "<file_prefix>" in line
+ or "</file_prefix>" in line
+ or "<file_suffix>" in line
+ or "</file_suffix>" in line
+ or "<user_request>" in line
+ or "</user_request>" in line
+ or "<code_to_edit>" in line
+ )
+
+ async def stream_rif(self, rif: RangeInFileWithContents, sdk: ContinueSDK):
+ await sdk.ide.saveFile(rif.filepath)
+ full_file_contents = await sdk.ide.readFile(rif.filepath)
+
+ (
+ file_prefix,
+ contents,
+ file_suffix,
+ model_to_use,
+ max_tokens,
+ ) = await self.get_prompt_parts(rif, sdk, full_file_contents)
+ contents, common_whitespace = dedent_and_get_common_whitespace(contents)
+ prompt = self.compile_prompt(file_prefix, contents, file_suffix, sdk)
+ full_file_contents_lines = full_file_contents.split("\n")
+
+ lines_to_display = []
+
+ async def sendDiffUpdate(
+ lines: List[str], sdk: ContinueSDK, final: bool = False
+ ):
+ nonlocal full_file_contents_lines, rif, lines_to_display
+
+ completion = "\n".join(lines)
+
+ full_prefix_lines = full_file_contents_lines[: rif.range.start.line]
+ full_suffix_lines = full_file_contents_lines[rif.range.end.line :]
+
+ # Don't do this at the very end, just show the inserted code
+ if final:
+ lines_to_display = []
+ # Only recalculate at every new-line, because this is sort of expensive
+ elif completion.endswith("\n"):
+ contents_lines = rif.contents.split("\n")
+ rewritten_lines = 0
+ for line in lines:
+ for i in range(rewritten_lines, len(contents_lines)):
+ if (
+ difflib.SequenceMatcher(
+ None, line, contents_lines[i]
+ ).ratio()
+ > 0.7
+ and contents_lines[i].strip() != ""
+ ):
+ rewritten_lines = i + 1
+ break
+ lines_to_display = contents_lines[rewritten_lines:]
+
+ new_file_contents = (
+ "\n".join(full_prefix_lines)
+ + "\n"
+ + completion
+ + "\n"
+ + (
+ "\n".join(lines_to_display) + "\n"
+ if len(lines_to_display) > 0
+ else ""
+ )
+ + "\n".join(full_suffix_lines)
+ )
+
+ step_index = sdk.history.current_index
+
+ await sdk.ide.showDiff(rif.filepath, new_file_contents, step_index)
+
+ # Important state variables
+ # -------------------------
+ original_lines = [] if rif.contents == "" else rif.contents.split("\n")
+ # In the actual file, taking into account block offset
+ current_line_in_file = rif.range.start.line
+ current_block_lines = []
+ original_lines_below_previous_blocks = original_lines
+ # The start of the current block in file, taking into account block offset
+ current_block_start = -1
+ offset_from_blocks = 0
+
+ # Don't end the block until you've matched N simultaneous lines
+ # This helps avoid many tiny blocks
+ LINES_TO_MATCH_BEFORE_ENDING_BLOCK = 2
+ # If a line has been matched at the end of the block, this is its index within original_lines_below_previous_blocks
+ # Except we are keeping track of multiple potentialities, so it's a list
+ # We always check the lines following each of these leads, but if multiple make it out at the end, we use the first one
+ # This is a tuple of (index_of_last_matched_line, number_of_lines_matched)
+ indices_of_last_matched_lines = []
+
+ async def handle_generated_line(line: str):
+ nonlocal current_block_start, current_line_in_file, original_lines, original_lines_below_previous_blocks, current_block_lines, indices_of_last_matched_lines, LINES_TO_MATCH_BEFORE_ENDING_BLOCK, offset_from_blocks
+
+ # Highlight the line to show progress
+ line_to_highlight = current_line_in_file - len(current_block_lines)
+ if False:
+ await sdk.ide.highlightCode(
+ RangeInFile(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(
+ line_to_highlight, 0, line_to_highlight, 0
+ ),
+ ),
+ "#FFFFFF22" if len(current_block_lines) == 0 else "#00FF0022",
+ )
+
+ if len(current_block_lines) == 0:
+ # Set this as the start of the next block
+ current_block_start = (
+ rif.range.start.line
+ + len(original_lines)
+ - len(original_lines_below_previous_blocks)
+ + offset_from_blocks
+ )
+ if (
+ len(original_lines_below_previous_blocks) > 0
+ and line == original_lines_below_previous_blocks[0]
+ ):
+ # Line is equal to the next line in file, move past this line
+ original_lines_below_previous_blocks = (
+ original_lines_below_previous_blocks[1:]
+ )
+ return
+
+ # In a block, and have already matched at least one line
+ # Check if the next line matches, for each of the candidates
+ matches_found = []
+ first_valid_match = None
+ for (
+ index_of_last_matched_line,
+ num_lines_matched,
+ ) in indices_of_last_matched_lines:
+ if (
+ index_of_last_matched_line + 1
+ < len(original_lines_below_previous_blocks)
+ and line
+ == original_lines_below_previous_blocks[
+ index_of_last_matched_line + 1
+ ]
+ ):
+ matches_found.append(
+ (index_of_last_matched_line + 1, num_lines_matched + 1)
+ )
+ if (
+ first_valid_match is None
+ and num_lines_matched + 1 >= LINES_TO_MATCH_BEFORE_ENDING_BLOCK
+ ):
+ first_valid_match = (
+ index_of_last_matched_line + 1,
+ num_lines_matched + 1,
+ )
+ indices_of_last_matched_lines = matches_found
+
+ if first_valid_match is not None:
+ # We've matched the required number of lines, insert suggestion!
+
+ # We added some lines to the block that were matched (including maybe some blank lines)
+ # So here we will strip all matching lines from the end of current_block_lines
+ lines_stripped = []
+ index_of_last_line_in_block = first_valid_match[0]
+ while (
+ len(current_block_lines) > 0
+ and current_block_lines[-1]
+ == original_lines_below_previous_blocks[
+ index_of_last_line_in_block - 1
+ ]
+ ):
+ lines_stripped.append(current_block_lines.pop())
+ index_of_last_line_in_block -= 1
+
+ # It's also possible that some lines match at the beginning of the block
+ # lines_stripped_at_beginning = []
+ # j = 0
+ # while len(current_block_lines) > 0 and current_block_lines[0] == original_lines_below_previous_blocks[first_valid_match[0] - first_valid_match[1] + j]:
+ # lines_stripped_at_beginning.append(
+ # current_block_lines.pop(0))
+ # j += 1
+ # # current_block_start += 1
+
+ # Insert the suggestion
+ replacement = "\n".join(current_block_lines)
+ start_line = current_block_start
+ end_line = current_block_start + index_of_last_line_in_block
+
+ if False:
+ await sdk.ide.showSuggestion(
+ FileEdit(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(start_line, 0, end_line, 0),
+ replacement=replacement,
+ )
+ )
+
+ # Reset current block / update variables
+ current_line_in_file += 1
+ offset_from_blocks += len(current_block_lines)
+ original_lines_below_previous_blocks = (
+ original_lines_below_previous_blocks[
+ index_of_last_line_in_block + 1 :
+ ]
+ )
+ current_block_lines = []
+ current_block_start = -1
+ indices_of_last_matched_lines = []
+
+ return
+
+ # Always look for new matching candidates
+ new_matches = []
+ for i in range(len(original_lines_below_previous_blocks)):
+ og_line = original_lines_below_previous_blocks[i]
+ # TODO: It's a bit sus to be disqualifying empty lines.
+ # What you ideally do is find ALL matches, and then throw them out as you check the following lines
+ if og_line == line: # and og_line.strip() != "":
+ new_matches.append((i, 1))
+ indices_of_last_matched_lines += new_matches
+
+ # Make sure they are sorted by index
+ indices_of_last_matched_lines = sorted(
+ indices_of_last_matched_lines, key=lambda x: x[0]
+ )
+
+ current_block_lines.append(line)
+
+ messages = await sdk.get_chat_context()
+ # Delete the last user and assistant messages
+ i = len(messages) - 1
+ deleted = 0
+ while i >= 0 and deleted < 2:
+ if messages[i].role == "user" or messages[i].role == "assistant":
+ messages.pop(i)
+ deleted += 1
+ i -= 1
+ messages.append(
+ ChatMessage(role="user", content=prompt, summary=self.user_input)
+ )
+
+ lines_of_prefix_copied = 0
+ lines = []
+ unfinished_line = ""
+ completion_lines_covered = 0
+ repeating_file_suffix = False
+ line_below_highlighted_range = file_suffix.lstrip().split("\n")[0]
+
+ # Use custom templates defined by the model
+ if template := model_to_use.prompt_templates.get("edit"):
+ rendered = render_prompt_template(
+ template,
+ messages[:-1],
+ {
+ "code_to_edit": rif.contents,
+ "user_input": self.user_input,
+ "file_prefix": file_prefix,
+ "file_suffix": file_suffix,
+ },
+ )
+ if isinstance(rendered, str):
+ messages = [
+ ChatMessage(
+ role="user",
+ content=rendered,
+ summary=self.user_input,
+ )
+ ]
+ else:
+ messages = rendered
+
+ generator = model_to_use.stream_complete(
+ rendered,
+ temperature=sdk.config.temperature,
+ max_tokens=min(max_tokens, model_to_use.context_length // 2),
+ )
+
+ else:
+
+ async def gen():
+ async for chunk in model_to_use.stream_chat(
+ messages,
+ temperature=sdk.config.temperature,
+ max_tokens=min(max_tokens, model_to_use.context_length // 2),
+ ):
+ if "content" in chunk:
+ yield chunk["content"]
+
+ generator = gen()
+
+ posthog_logger.capture_event(
+ "model_use",
+ {"model": model_to_use.model, "provider": model_to_use.__class__.__name__},
+ )
+ dev_data_logger.capture(
+ "model_use",
+ {"model": model_to_use.model, "provider": model_to_use.__class__.__name__},
+ )
+
+ try:
+ async for chunk in generator:
+ # Stop early if it is repeating the file_suffix or the step was deleted
+ if repeating_file_suffix:
+ break
+ if sdk.current_step_was_deleted():
+ return
+
+ # Accumulate lines
+ chunk_lines = chunk.split("\n")
+ chunk_lines[0] = unfinished_line + chunk_lines[0]
+ if chunk.endswith("\n"):
+ unfinished_line = ""
+ chunk_lines.pop() # because this will be an empty string
+ else:
+ unfinished_line = chunk_lines.pop()
+
+ # Deal with newly accumulated lines
+ for i in range(len(chunk_lines)):
+ # Trailing whitespace doesn't matter
+ chunk_lines[i] = chunk_lines[i].rstrip()
+ chunk_lines[i] = common_whitespace + chunk_lines[i]
+
+ # Lines that should signify the end of generation
+ if self.is_end_line(chunk_lines[i]):
+ break
+ # Lines that should be ignored, like the <> tags
+ elif self.line_to_be_ignored(
+ chunk_lines[i], completion_lines_covered == 0
+ ):
+ continue # noice
+ # Check if we are currently just copying the prefix
+ elif (
+ (lines_of_prefix_copied > 0 or completion_lines_covered == 0)
+ and lines_of_prefix_copied < len(file_prefix.splitlines())
+ and chunk_lines[i]
+ == full_file_contents_lines[lines_of_prefix_copied]
+ ):
+ # This is a sketchy way of stopping it from repeating the file_prefix. Is a bug if output happens to have a matching line
+ lines_of_prefix_copied += 1
+ continue # also nice
+ # Because really short lines might be expected to be repeated, this is only a !heuristic!
+ # Stop when it starts copying the file_suffix
+ elif (
+ chunk_lines[i].strip() == line_below_highlighted_range.strip()
+ and len(chunk_lines[i].strip()) > 4
+ and not (
+ len(original_lines_below_previous_blocks) > 0
+ and chunk_lines[i].strip()
+ == original_lines_below_previous_blocks[0].strip()
+ )
+ ):
+ repeating_file_suffix = True
+ break
+
+ # If none of the above, insert the line!
+ if False:
+ await handle_generated_line(chunk_lines[i])
+
+ lines.append(chunk_lines[i])
+ completion_lines_covered += 1
+ current_line_in_file += 1
+
+ await sendDiffUpdate(
+ lines
+ + [
+ common_whitespace
+ if unfinished_line.startswith("<")
+ else (common_whitespace + unfinished_line)
+ ],
+ sdk,
+ )
+ finally:
+ await generator.aclose()
+ # Add the unfinished line
+ if (
+ unfinished_line != ""
+ and not self.line_to_be_ignored(
+ unfinished_line, completion_lines_covered == 0
+ )
+ and not self.is_end_line(unfinished_line)
+ ):
+ unfinished_line = common_whitespace + unfinished_line
+ lines.append(unfinished_line)
+ await handle_generated_line(unfinished_line)
+ completion_lines_covered += 1
+ current_line_in_file += 1
+
+ await sendDiffUpdate(lines, sdk, final=True)
+
+ if False:
+ # If the current block isn't empty, add that suggestion
+ if len(current_block_lines) > 0:
+ # We have a chance to back-track here for blank lines that are repeats of the end of the original
+ # Don't want to have the same ending in both the original and the generated, can just leave it there
+ num_to_remove = 0
+ for i in range(-1, -len(current_block_lines) - 1, -1):
+ if len(original_lines_below_previous_blocks) == 0:
+ break
+ if (
+ current_block_lines[i]
+ == original_lines_below_previous_blocks[-1]
+ ):
+ num_to_remove += 1
+ original_lines_below_previous_blocks.pop()
+ else:
+ break
+ current_block_lines = (
+ current_block_lines[:-num_to_remove]
+ if num_to_remove > 0
+ else current_block_lines
+ )
+
+ # It's also possible that some lines match at the beginning of the block
+ # while len(current_block_lines) > 0 and len(original_lines_below_previous_blocks) > 0 and current_block_lines[0] == original_lines_below_previous_blocks[0]:
+ # current_block_lines.pop(0)
+ # original_lines_below_previous_blocks.pop(0)
+ # current_block_start += 1
+
+ await sdk.ide.showSuggestion(
+ FileEdit(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(
+ current_block_start,
+ 0,
+ current_block_start
+ + len(original_lines_below_previous_blocks),
+ 0,
+ ),
+ replacement="\n".join(current_block_lines),
+ )
+ )
+
+ # Record the completion
+ completion = "\n".join(lines)
+ self._previous_contents = "\n".join(original_lines)
+ self._new_contents = completion
+ self._prompt_and_completion += prompt + completion
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ await sdk.update_ui()
+
+ rif_with_contents = []
+ for range_in_file in map(
+ lambda x: RangeInFile(
+ filepath=x.filepath,
+ # Only consider the range line-by-line. Maybe later don't if it's only a single line.
+ range=x.range.to_full_lines(),
+ ),
+ self.range_in_files,
+ ):
+ file_contents = await sdk.ide.readRangeInFile(range_in_file)
+ rif_with_contents.append(
+ RangeInFileWithContents.from_range_in_file(range_in_file, file_contents)
+ )
+
+ rif_dict = {}
+ for rif in rif_with_contents:
+ rif_dict[rif.filepath] = rif.contents
+
+ for rif in rif_with_contents:
+ await sdk.ide.setSuggestionsLocked(rif.filepath, True)
+ await self.stream_rif(rif, sdk)
+ await sdk.ide.setSuggestionsLocked(rif.filepath, False)
+
+ changes = "\n".join(
+ difflib.ndiff(
+ self._previous_contents.splitlines(),
+ self._new_contents.splitlines(),
+ )
+ )
+
+ if sdk.config.disable_summaries:
+ self.name = ""
+ self.description = f"Edited {len(self.range_in_files)} files"
+ await sdk.update_ui()
+ else:
+ self.name = "Generating summary"
+ self.description = ""
+ async for chunk in sdk.models.summarize.stream_complete(
+ dedent(
+ f"""\
+ Diff summary: "{self.user_input}"
+
+ ```diff
+ {changes}
+ ```
+
+ {self.summary_prompt}"""
+ )
+ ):
+ self.description += chunk
+ await sdk.update_ui()
+
+ sdk.context.set("last_edit_user_input", self.user_input)
+ sdk.context.set("last_edit_diff", changes)
+ sdk.context.set("last_edit_range", self.range_in_files[-1].range)
+
+
+class EditFileStep(Step):
+ filepath: str
+ prompt: str
+ hide: bool = True
+ model: Optional[LLM] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Editing file: " + self.filepath
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ file_contents = await sdk.ide.readFile(self.filepath)
+ await sdk.run_step(
+ DefaultModelEditCodeStep(
+ range_in_files=[
+ RangeInFile.from_entire_file(self.filepath, file_contents)
+ ],
+ user_input=self.prompt,
+ model=self.model,
+ )
+ )
+
+
+class ManualEditStep(ReversibleStep):
+ edit_diff: EditDiff
+ hide: bool = True
+
+ hide: bool = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Manual edit step"
+ # TODO - only handling FileEdit here, but need all other types of FileSystemEdits
+ # Also requires the merge_file_edit function
+ # return llm.complete(dedent(f"""This code was replaced:
+
+ # {self.edit_diff.backward.replacement}
+
+ # With this code:
+
+ # {self.edit_diff.forward.replacement}
+
+ # Maximally concise summary of changes in bullet points (can use markdown):
+ # """))
+
+ @classmethod
+ def from_sequence(cls, edits: List[FileEditWithFullContents]) -> "ManualEditStep":
+ diffs = []
+ for edit in edits:
+ _, diff = FileSystem.apply_edit_to_str(edit.fileContents, edit.fileEdit)
+ diffs.append(diff)
+ return cls(edit_diff=EditDiff.from_sequence(diffs))
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return None
+
+ async def reverse(self, sdk: ContinueSDK):
+ await sdk.ide.applyFileSystemEdit(self.edit_diff.backward)
+
+
+class UserInputStep(Step):
+ user_input: str
+ name: str = "User Input"
+ hide: bool = False
+
+ manage_own_chat_context: bool = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self.description is not None:
+ return self.description
+ return self.user_input
+
+ async def run(
+ self, sdk: ContinueSDK
+ ) -> Coroutine[UserInputObservation, None, None]:
+ self.chat_context.append(
+ ChatMessage(role="user", content=self.user_input, summary=self.user_input)
+ )
+ self.description = self.user_input
+ return UserInputObservation(user_input=self.user_input)
+
+
+class WaitForUserInputStep(Step):
+ prompt: str
+ name: str = "Waiting for user input"
+
+ _description: Union[str, None] = None
+ _response: Union[str, None] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self._response is None:
+ return self.prompt
+ else:
+ return f"{self.prompt}\n\n`{self._response}`"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ self.description = self.prompt
+ resp = await sdk.wait_for_user_input()
+ self.description = f"{self.prompt}\n\n`{resp}`"
+ return TextObservation(text=resp)
+
+
+class WaitForUserConfirmationStep(Step):
+ prompt: str
+ name: str = "Waiting for user confirmation"
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.prompt
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ self.description = self.prompt
+ resp = await sdk.wait_for_user_input()
+ return TextObservation(text=resp)