summaryrefslogtreecommitdiff
path: root/server
diff options
context:
space:
mode:
authorNate Sesti <33237525+sestinj@users.noreply.github.com>2023-10-09 18:37:27 -0700
committerGitHub <noreply@github.com>2023-10-09 18:37:27 -0700
commitf09150617ed2454f3074bcf93f53aae5ae637d40 (patch)
tree5cfe614a64d921dfe58b049f426d67a8b832c71f /server
parent985304a213f620cdff3f8f65f74ed7e3b79be29d (diff)
downloadsncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.gz
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.tar.bz2
sncontinue-f09150617ed2454f3074bcf93f53aae5ae637d40.zip
Preview (#541)
* Strong typing (#533) * refactor: :recycle: get rid of continuedev.src.continuedev structure * refactor: :recycle: switching back to server folder * feat: :sparkles: make config.py imports shorter * feat: :bookmark: publish as pre-release vscode extension * refactor: :recycle: refactor and add more completion params to ui * build: :building_construction: download from preview S3 * fix: :bug: fix paths * fix: :green_heart: package:pre-release * ci: :green_heart: more time for tests * fix: :green_heart: fix build scripts * fix: :bug: fix import in run.py * fix: :bookmark: update version to try again * ci: 💚 Update package.json version [skip ci] * refactor: :fire: don't check for old extensions version * fix: :bug: small bug fixes * fix: :bug: fix config.py import paths * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: platform-specific builds test #1 * feat: :green_heart: ship with binary * fix: :green_heart: fix copy statement to include.exe for windows * fix: :green_heart: cd extension before packaging * chore: :loud_sound: count tokens generated * fix: :green_heart: remove npm_config_arch * fix: :green_heart: publish as pre-release! * chore: :bookmark: update version * perf: :green_heart: hardcode distro paths * fix: :bug: fix yaml syntax error * chore: :bookmark: update version * fix: :green_heart: update permissions and version * feat: :bug: kill old server if needed * feat: :lipstick: update marketplace icon for pre-release * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: auto-reload for config.py * feat: :wrench: update default config.py imports * feat: :sparkles: codelens in config.py * feat: :sparkles: select model param count from UI * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: more model options, ollama error handling * perf: :zap: don't show server loading immediately * fix: :bug: fixing small UI details * ci: 💚 Update package.json version [skip ci] * feat: :rocket: headers param on LLM class * fix: :bug: fix headers for openai.;y * feat: :sparkles: highlight code on cmd+shift+L * ci: 💚 Update package.json version [skip ci] * feat: :lipstick: sticky top bar in gui.tsx * fix: :loud_sound: websocket logging and horizontal scrollbar * ci: 💚 Update package.json version [skip ci] * feat: :sparkles: allow AzureOpenAI Service through GGML * ci: 💚 Update package.json version [skip ci] * fix: :bug: fix automigration * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: upload binaries in ci, download apple silicon * chore: :fire: remove notes * fix: :green_heart: use curl to download binary * fix: :green_heart: set permissions on apple silicon binary * fix: :green_heart: testing * fix: :green_heart: cleanup file * fix: :green_heart: fix preview.yaml * fix: :green_heart: only upload once per binary * fix: :green_heart: install rosetta * ci: :green_heart: download binary after tests * ci: 💚 Update package.json version [skip ci] * ci: :green_heart: prepare ci for merge to main --------- Co-authored-by: GitHub Action <action@github.com>
Diffstat (limited to 'server')
-rw-r--r--server/README.md88
-rw-r--r--server/continuedev/__init__.py19
-rw-r--r--server/continuedev/__main__.py30
-rw-r--r--server/continuedev/core/abstract_sdk.py82
-rw-r--r--server/continuedev/core/autopilot.py746
-rw-r--r--server/continuedev/core/config.py114
-rw-r--r--server/continuedev/core/context.py516
-rw-r--r--server/continuedev/core/env.py31
-rw-r--r--server/continuedev/core/lsp.py416
-rw-r--r--server/continuedev/core/main.py437
-rw-r--r--server/continuedev/core/models.py113
-rw-r--r--server/continuedev/core/observation.py41
-rw-r--r--server/continuedev/core/sdk.py309
-rw-r--r--server/continuedev/core/steps.py963
-rw-r--r--server/continuedev/headless/__init__.py20
-rw-r--r--server/continuedev/headless/headless_ide.py181
-rw-r--r--server/continuedev/libs/__init__.py0
-rw-r--r--server/continuedev/libs/chroma/.gitignore1
-rw-r--r--server/continuedev/libs/chroma/query.py218
-rw-r--r--server/continuedev/libs/chroma/update.py66
-rw-r--r--server/continuedev/libs/constants/default_config.py88
-rw-r--r--server/continuedev/libs/constants/main.py6
-rw-r--r--server/continuedev/libs/llm/__init__.py14
-rw-r--r--server/continuedev/libs/llm/anthropic.py74
-rw-r--r--server/continuedev/libs/llm/base.py458
-rw-r--r--server/continuedev/libs/llm/ggml.py226
-rw-r--r--server/continuedev/libs/llm/google_palm_api.py50
-rw-r--r--server/continuedev/libs/llm/hf_inference_api.py78
-rw-r--r--server/continuedev/libs/llm/hf_tgi.py65
-rw-r--r--server/continuedev/libs/llm/hugging_face.py19
-rw-r--r--server/continuedev/libs/llm/llamacpp.py86
-rw-r--r--server/continuedev/libs/llm/ollama.py106
-rw-r--r--server/continuedev/libs/llm/openai.py156
-rw-r--r--server/continuedev/libs/llm/openai_free_trial.py83
-rw-r--r--server/continuedev/libs/llm/prompt_utils.py76
-rw-r--r--server/continuedev/libs/llm/prompts/chat.py174
-rw-r--r--server/continuedev/libs/llm/prompts/edit.py27
-rw-r--r--server/continuedev/libs/llm/proxy_server.py108
-rw-r--r--server/continuedev/libs/llm/queued.py77
-rw-r--r--server/continuedev/libs/llm/replicate.py78
-rw-r--r--server/continuedev/libs/llm/text_gen_interface.py114
-rw-r--r--server/continuedev/libs/llm/together.py125
-rw-r--r--server/continuedev/libs/util/calculate_diff.py154
-rw-r--r--server/continuedev/libs/util/commonregex.py144
-rw-r--r--server/continuedev/libs/util/copy_codebase.py121
-rw-r--r--server/continuedev/libs/util/count_tokens.py206
-rw-r--r--server/continuedev/libs/util/create_async_task.py38
-rw-r--r--server/continuedev/libs/util/devdata.py67
-rw-r--r--server/continuedev/libs/util/edit_config.py149
-rw-r--r--server/continuedev/libs/util/errors.py2
-rw-r--r--server/continuedev/libs/util/filter_files.py33
-rw-r--r--server/continuedev/libs/util/logging.py47
-rw-r--r--server/continuedev/libs/util/map_path.py16
-rw-r--r--server/continuedev/libs/util/paths.py148
-rw-r--r--server/continuedev/libs/util/queue.py17
-rw-r--r--server/continuedev/libs/util/ripgrep.py25
-rw-r--r--server/continuedev/libs/util/step_name_to_steps.py47
-rw-r--r--server/continuedev/libs/util/strings.py64
-rw-r--r--server/continuedev/libs/util/telemetry.py108
-rw-r--r--server/continuedev/libs/util/templating.py76
-rw-r--r--server/continuedev/libs/util/traceback/traceback_parsers.py56
-rw-r--r--server/continuedev/models/__init__.py0
-rw-r--r--server/continuedev/models/filesystem.py398
-rw-r--r--server/continuedev/models/filesystem_edit.py164
-rw-r--r--server/continuedev/models/generate_json_schema.py54
-rw-r--r--server/continuedev/models/main.py229
-rw-r--r--server/continuedev/models/reference/generate.py144
-rw-r--r--server/continuedev/plugins/context_providers/__init__.py7
-rw-r--r--server/continuedev/plugins/context_providers/diff.py73
-rw-r--r--server/continuedev/plugins/context_providers/dynamic.py75
-rw-r--r--server/continuedev/plugins/context_providers/embeddings.py81
-rw-r--r--server/continuedev/plugins/context_providers/file.py136
-rw-r--r--server/continuedev/plugins/context_providers/filetree.py89
-rw-r--r--server/continuedev/plugins/context_providers/github.py49
-rw-r--r--server/continuedev/plugins/context_providers/google.py70
-rw-r--r--server/continuedev/plugins/context_providers/highlighted_code.py293
-rw-r--r--server/continuedev/plugins/context_providers/search.py90
-rw-r--r--server/continuedev/plugins/context_providers/terminal.py49
-rw-r--r--server/continuedev/plugins/context_providers/url.py104
-rw-r--r--server/continuedev/plugins/context_providers/util.py5
-rw-r--r--server/continuedev/plugins/policies/commit.py77
-rw-r--r--server/continuedev/plugins/policies/default.py85
-rw-r--r--server/continuedev/plugins/policies/headless.py18
-rw-r--r--server/continuedev/plugins/recipes/AddTransformRecipe/README.md9
-rw-r--r--server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md142
-rw-r--r--server/continuedev/plugins/recipes/AddTransformRecipe/main.py31
-rw-r--r--server/continuedev/plugins/recipes/AddTransformRecipe/steps.py106
-rw-r--r--server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md7
-rw-r--r--server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py43
-rw-r--r--server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md0
-rw-r--r--server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py40
-rw-r--r--server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py243
-rw-r--r--server/continuedev/plugins/recipes/DDtoBQRecipe/README.md3
-rw-r--r--server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md85
-rw-r--r--server/continuedev/plugins/recipes/DDtoBQRecipe/main.py31
-rw-r--r--server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py119
-rw-r--r--server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md0
-rw-r--r--server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py86
-rw-r--r--server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py125
-rw-r--r--server/continuedev/plugins/recipes/README.md19
-rw-r--r--server/continuedev/plugins/recipes/TemplateRecipe/README.md7
-rw-r--r--server/continuedev/plugins/recipes/TemplateRecipe/main.py29
-rw-r--r--server/continuedev/plugins/recipes/WritePytestsRecipe/README.md7
-rw-r--r--server/continuedev/plugins/recipes/WritePytestsRecipe/main.py52
-rw-r--r--server/continuedev/plugins/steps/README.md50
-rw-r--r--server/continuedev/plugins/steps/__init__.py13
-rw-r--r--server/continuedev/plugins/steps/chat.py379
-rw-r--r--server/continuedev/plugins/steps/chroma.py86
-rw-r--r--server/continuedev/plugins/steps/clear_history.py10
-rw-r--r--server/continuedev/plugins/steps/cmd.py30
-rw-r--r--server/continuedev/plugins/steps/comment_code.py16
-rw-r--r--server/continuedev/plugins/steps/custom_command.py29
-rw-r--r--server/continuedev/plugins/steps/draft/abstract_method.py21
-rw-r--r--server/continuedev/plugins/steps/draft/redux.py50
-rw-r--r--server/continuedev/plugins/steps/draft/typeorm.py54
-rw-r--r--server/continuedev/plugins/steps/feedback.py14
-rw-r--r--server/continuedev/plugins/steps/find_and_replace.py30
-rw-r--r--server/continuedev/plugins/steps/help.py70
-rw-r--r--server/continuedev/plugins/steps/input/nl_multiselect.py32
-rw-r--r--server/continuedev/plugins/steps/main.py422
-rw-r--r--server/continuedev/plugins/steps/on_traceback.py206
-rw-r--r--server/continuedev/plugins/steps/open_config.py17
-rw-r--r--server/continuedev/plugins/steps/react.py44
-rw-r--r--server/continuedev/plugins/steps/refactor.py136
-rw-r--r--server/continuedev/plugins/steps/search_directory.py84
-rw-r--r--server/continuedev/plugins/steps/setup_model.py38
-rw-r--r--server/continuedev/plugins/steps/share_session.py52
-rw-r--r--server/continuedev/plugins/steps/steps_on_startup.py19
-rw-r--r--server/continuedev/plugins/steps/welcome.py40
-rw-r--r--server/continuedev/server/gui.py459
-rw-r--r--server/continuedev/server/ide.py680
-rw-r--r--server/continuedev/server/ide_protocol.py170
-rw-r--r--server/continuedev/server/main.py109
-rw-r--r--server/continuedev/server/meilisearch_server.py196
-rw-r--r--server/continuedev/server/session_manager.py192
-rw-r--r--server/dev_requirements.txt2
-rwxr-xr-xserver/install-dependencies.sh16
-rw-r--r--server/main.py5
-rw-r--r--server/notes.md101
-rw-r--r--server/poetry.lock2414
-rw-r--r--server/poetry.toml2
-rw-r--r--server/pyproject.toml47
-rw-r--r--server/requirements.txt27
-rw-r--r--server/tests/__init__.py0
-rw-r--r--server/tests/llm_test.py179
-rw-r--r--server/tests/step_test.py68
-rw-r--r--server/tests/util/__init__.py0
-rw-r--r--server/tests/util/config.py19
-rw-r--r--server/tests/util/openai_mock.py139
-rw-r--r--server/tests/util/prompts.py2
150 files changed, 18440 insertions, 0 deletions
diff --git a/server/README.md b/server/README.md
new file mode 100644
index 00000000..25fb640e
--- /dev/null
+++ b/server/README.md
@@ -0,0 +1,88 @@
+# Continue PyPI Package
+
+This package contains the [Continue](https://github.com/continuedev/continue) server and core classes needed to build your own recipes.
+
+Continue is a Python library for automating repetitive sequences of software development tasks using language models. Using our VS Code extension, you can build, run, and refine these recipes as they natively interact with your codebase. Read the docs [here](https://continue.dev/docs) or download the VS Code extension [here](https://marketplace.visualstudio.com/items?itemName=Continue.continue).
+
+## Continue Server
+
+The Continue server acts as a bridge between the Continue React app and your IDE, running your recipes and acting on the codebase.
+
+Start it by running the following commands:
+
+1. `cd server`
+2. Make sure packages are installed with `poetry install`
+ - If poetry is not installed, you can install with
+ ```bash
+ curl -sSL https://install.python-poetry.org | python3 -
+ ```
+ (official instructions [here](https://python-poetry.org/docs/#installing-with-the-official-installer))
+3. `poetry shell` to activate the virtual environment
+4. `python3 -m continuedev.server.main` to start the server
+
+Once you've validated that this works, you'll often want to use a debugger, in which case we've provided a launch configuration for VS Code in `.vscode/launch.json`. To start the debugger in VS Code, ensure that the workspace directory is the root of the `continue` repo, then press F5.
+
+> [!NOTE]
+> To start the debugger, you'll have to select the poetry Python interpreter
+> (`/path-to-poetry-venv/bin/python3`) in the bottom right of the VS Code window. If you
+> don't see this, you may have to install the [Python
+> extension](https://marketplace.visualstudio.com/items?itemName=ms-python.python).
+
+## Scripts
+
+`poetry run typegen` to generate JSONSchema .json files from the Pydantic types defined in the `models` directory.
+
+`poetry build` will output wheel and tarball files in `./dist`.
+
+## Writing Steps
+
+See the `continuedev/libs/steps` folder for examples of writing a Continue step. See our documentation for tutorials.
+
+## How to contribute
+
+Open a [new GitHub Issue](https://github.com/continuedev/continue/issues/new) or comment on [an existing one](https://github.com/continuedev/continue/issues). Let us know what you would like to contribute, and we will help you make it happen!
+
+For more a more detailed contributing guide, see [CONTRIBUTING.md](../CONTRIBUTING.md).
+
+## Install from source
+
+#### 1. Clone this repo
+
+Recommended: Run this command to use SSH
+
+```bash
+git clone git@github.com:continuedev/continue.git
+```
+
+Alternative: Run this command to use HTTPS
+
+```bash
+git clone https://github.com/continuedev/continue
+```
+
+#### 2. Install Continue
+
+Run this command to use the install script
+
+```bash
+cd continue/extension/scripts && python3 install_from_source.py
+```
+
+> [!IMPORTANT]
+> Ensure you have a Java Runtime Environment (JRE) installed. Verify this by typing `java
+-version` in your command prompt or terminal. If a version number appears, you're set.
+> If not, download and install a JRE from Oracle's website or through a package manager,
+> for example Homebrew.
+>
+> ```sh
+> brew install openjdk@11
+> ```
+
+# Understanding the codebase
+
+- [Continue Server README](./README.md): learn about the core of Continue, which can be downloaded as a [PyPI package](https://pypi.org/project/continuedev/)
+- [VS Code Extension README](../extension/README.md): learn about the capabilities of our extension—the first implementation of Continue's IDE Protocol—which makes it possible to use use Continue in VS Code and GitHub Codespaces
+- [Continue GUI README](../extension/react-app/): learn about the React app that lets users interact with the server and is placed adjacent to the text editor in any supported IDE
+- [Schema README](../schema/README.md): learn about the JSON Schema types generated from Pydantic models, which we use across the `server/` and `extension/` directories
+- [Continue Docs README](../docs/README.md): learn how our [docs](https://continue.dev/docs) are written and built
+- [How to debug the VS Code Extension README](../extension/src/README.md): learn how to set up the VS Code extension, so you can debug it
diff --git a/server/continuedev/__init__.py b/server/continuedev/__init__.py
new file mode 100644
index 00000000..1b4776a8
--- /dev/null
+++ b/server/continuedev/__init__.py
@@ -0,0 +1,19 @@
+import asyncio
+from typing import Union
+
+from .core.config import ContinueConfig
+from .core.main import Step
+from .headless import start_headless_session
+
+
+def run(step_or_config: Union[Step, ContinueConfig]):
+ if isinstance(step_or_config, ContinueConfig):
+ config = step_or_config
+ else:
+ config = ContinueConfig()
+ config.steps_on_startup = [step_or_config]
+
+ loop = asyncio.get_event_loop()
+ loop.run_until_complete(start_headless_session(config=config))
+ tasks = asyncio.all_tasks(loop)
+ loop.run_until_complete(asyncio.gather(*tasks))
diff --git a/server/continuedev/__main__.py b/server/continuedev/__main__.py
new file mode 100644
index 00000000..caaba117
--- /dev/null
+++ b/server/continuedev/__main__.py
@@ -0,0 +1,30 @@
+from typing import Optional
+
+import typer
+
+from . import run
+from .server.main import run_server
+
+app = typer.Typer()
+
+
+@app.command()
+def main(
+ port: int = typer.Option(65432, help="server port"),
+ host: str = typer.Option("127.0.0.1", help="server host"),
+ meilisearch_url: Optional[str] = typer.Option(
+ None, help="The URL of the MeiliSearch server if running manually"
+ ),
+ config: Optional[str] = typer.Option(
+ None, help="The path to the configuration file"
+ ),
+ headless: bool = typer.Option(False, help="Run in headless mode"),
+):
+ if headless:
+ run(config)
+ else:
+ run_server(port=port, host=host, meilisearch_url=meilisearch_url)
+
+
+if __name__ == "__main__":
+ app()
diff --git a/server/continuedev/core/abstract_sdk.py b/server/continuedev/core/abstract_sdk.py
new file mode 100644
index 00000000..fdb99d47
--- /dev/null
+++ b/server/continuedev/core/abstract_sdk.py
@@ -0,0 +1,82 @@
+from abc import ABC, abstractmethod
+from typing import Coroutine, List, Union
+
+from ..models.filesystem_edit import FileSystemEdit
+from .config import ContinueConfig
+from .main import ChatMessage, History, Step
+from .observation import Observation
+
+"""
+[[Generate]]
+[Prompt]
+Write an abstract class AbstractContinueSDK(ABC) that has all of the same methods as the ContinueSDK class, but without any implementation.
+All methods should be documented with the same docstrings as the ContinueSDK class and have the same types.
+[Context]
+./sdk.py:ContinueSDK
+"""
+
+
+class AbstractContinueSDK(ABC):
+ """The SDK provided as parameters to a step"""
+
+ @property
+ def history(self) -> History:
+ return self.__autopilot.history
+
+ @abstractmethod
+ async def _ensure_absolute_path(self, path: str) -> str:
+ pass
+
+ @abstractmethod
+ async def run_step(self, step: Step) -> Coroutine[Observation, None, None]:
+ pass
+
+ @abstractmethod
+ async def apply_filesystem_edit(self, edit: FileSystemEdit):
+ pass
+
+ @abstractmethod
+ async def wait_for_user_input(self) -> str:
+ pass
+
+ @abstractmethod
+ async def wait_for_user_confirmation(self, prompt: str):
+ pass
+
+ @abstractmethod
+ async def run(self, commands: Union[List[str], str], cwd: str = None):
+ pass
+
+ @abstractmethod
+ async def edit_file(self, filename: str, prompt: str):
+ pass
+
+ @abstractmethod
+ async def append_to_file(self, filename: str, content: str):
+ pass
+
+ @abstractmethod
+ async def add_file(self, filename: str, content: Union[str, None]):
+ pass
+
+ @abstractmethod
+ async def delete_file(self, filename: str):
+ pass
+
+ @abstractmethod
+ async def add_directory(self, path: str):
+ pass
+
+ @abstractmethod
+ async def delete_directory(self, path: str):
+ pass
+
+ config: ContinueConfig
+
+ @abstractmethod
+ def set_loading_message(self, message: str):
+ pass
+
+ @abstractmethod
+ async def get_chat_context(self) -> List[ChatMessage]:
+ pass
diff --git a/server/continuedev/core/autopilot.py b/server/continuedev/core/autopilot.py
new file mode 100644
index 00000000..11c05378
--- /dev/null
+++ b/server/continuedev/core/autopilot.py
@@ -0,0 +1,746 @@
+import json
+import os
+import time
+import traceback
+import uuid
+from functools import cached_property
+from typing import Callable, Coroutine, Dict, List, Optional
+
+import redbaron
+from aiohttp import ClientPayloadError
+from openai import error as openai_errors
+from pydantic import root_validator
+
+from ..libs.llm.prompts.chat import template_alpaca_messages
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.edit_config import edit_config_property
+from ..libs.util.logging import logger
+from ..libs.util.paths import getSavedContextGroupsPath
+from ..libs.util.queue import AsyncSubscriptionQueue
+from ..libs.util.strings import remove_quotes_and_escapes
+from ..libs.util.telemetry import posthog_logger
+from ..libs.util.traceback.traceback_parsers import (
+ get_javascript_traceback,
+ get_python_traceback,
+)
+from ..models.filesystem import RangeInFileWithContents
+from ..models.filesystem_edit import FileEditWithFullContents
+from ..models.main import ContinueBaseModel
+from ..plugins.context_providers.file import FileContextProvider
+from ..plugins.context_providers.highlighted_code import HighlightedCodeContextProvider
+from ..plugins.policies.default import DefaultPolicy
+from ..plugins.steps.on_traceback import DefaultOnTracebackStep
+from ..server.ide_protocol import AbstractIdeProtocolServer
+from ..server.meilisearch_server import get_meilisearch_url, stop_meilisearch
+from .config import ContinueConfig
+from .context import ContextManager
+from .main import (
+ Context,
+ ContextItem,
+ ContinueCustomException,
+ FullState,
+ History,
+ HistoryNode,
+ Policy,
+ SessionInfo,
+ Step,
+)
+from .observation import InternalErrorObservation, Observation
+from .sdk import ContinueSDK
+from .steps import DisplayErrorStep, ManualEditStep, ReversibleStep, UserInputStep
+
+
+def get_error_title(e: Exception) -> str:
+ if isinstance(e, openai_errors.APIError):
+ return "OpenAI is overloaded with requests. Please try again."
+ elif isinstance(e, openai_errors.RateLimitError):
+ return "This OpenAI API key has been rate limited. Please try again."
+ elif isinstance(e, openai_errors.Timeout):
+ return "OpenAI timed out. Please try again."
+ elif (
+ isinstance(e, openai_errors.InvalidRequestError)
+ and e.code == "context_length_exceeded"
+ ):
+ return e._message
+ elif isinstance(e, ClientPayloadError):
+ return "The request failed. Please try again."
+ elif isinstance(e, openai_errors.APIConnectionError):
+ return 'The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to ""'
+ elif isinstance(e, openai_errors.InvalidRequestError):
+ return "Invalid request sent to OpenAI. Please try again."
+ elif "rate_limit_ip_middleware" in e.__str__():
+ return "You have reached your limit for free usage of our token. You can continue using Continue by entering your own OpenAI API key in VS Code settings."
+ elif e.__str__().startswith("Cannot connect to host"):
+ return (
+ "The request failed. Please check your internet connection and try again."
+ )
+ return e.__str__() or e.__repr__()
+
+
+class Autopilot(ContinueBaseModel):
+ ide: AbstractIdeProtocolServer
+
+ policy: Policy = DefaultPolicy()
+ history: History = History.from_empty()
+ context: Context = Context()
+ full_state: Optional[FullState] = None
+ session_info: Optional[SessionInfo] = None
+ context_manager: ContextManager = ContextManager()
+ continue_sdk: ContinueSDK = None
+
+ _on_update_callbacks: List[Callable[[FullState], None]] = []
+
+ _active: bool = False
+ _should_halt: bool = False
+ _main_user_input_queue: List[str] = []
+
+ _user_input_queue = AsyncSubscriptionQueue()
+ _retry_queue = AsyncSubscriptionQueue()
+
+ started: bool = False
+
+ async def load(
+ self, config: Optional[ContinueConfig] = None, only_reloading: bool = False
+ ):
+ self.continue_sdk = await ContinueSDK.create(self, config=config)
+ if override_policy := self.continue_sdk.config.policy_override:
+ self.policy = override_policy
+
+ # Load documents into the search index
+ logger.debug("Starting context manager")
+ await self.context_manager.start(
+ self.continue_sdk.config.context_providers
+ + [
+ HighlightedCodeContextProvider(ide=self.ide),
+ FileContextProvider(workspace_dir=self.ide.workspace_directory),
+ ],
+ self.continue_sdk,
+ only_reloading=only_reloading,
+ )
+
+ async def start(
+ self,
+ full_state: Optional[FullState] = None,
+ config: Optional[ContinueConfig] = None,
+ ):
+ await self.load(config=config, only_reloading=False)
+
+ if full_state is not None:
+ self.history = full_state.history
+ self.session_info = full_state.session_info
+
+ # Load saved context groups
+ context_groups_file = getSavedContextGroupsPath()
+ try:
+ with open(context_groups_file, "r") as f:
+ json_ob = json.load(f)
+ for title, context_group in json_ob.items():
+ self._saved_context_groups[title] = [
+ ContextItem(**item) for item in context_group
+ ]
+ except Exception as e:
+ logger.warning(
+ f"Failed to load saved_context_groups.json: {e}. Reverting to empty list."
+ )
+ self._saved_context_groups = {}
+
+ self.started = True
+
+ async def reload_config(self):
+ await self.load(config=None, only_reloading=True)
+ await self.update_subscribers()
+
+ async def cleanup(self):
+ stop_meilisearch()
+
+ class Config:
+ arbitrary_types_allowed = True
+ keep_untouched = (cached_property,)
+
+ @root_validator(pre=True)
+ def fill_in_values(cls, values):
+ full_state: FullState = values.get("full_state")
+ if full_state is not None:
+ values["history"] = full_state.history
+ return values
+
+ async def get_full_state(self) -> FullState:
+ full_state = FullState(
+ history=self.history,
+ active=self._active,
+ user_input_queue=self._main_user_input_queue,
+ slash_commands=self.get_available_slash_commands(),
+ adding_highlighted_code=self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code
+ if "code" in self.context_manager.context_providers
+ else False,
+ selected_context_items=await self.context_manager.get_selected_items()
+ if self.context_manager is not None
+ else [],
+ session_info=self.session_info,
+ config=self.continue_sdk.config,
+ saved_context_groups=self._saved_context_groups,
+ context_providers=self.context_manager.get_provider_descriptions(),
+ meilisearch_url=get_meilisearch_url(),
+ )
+ self.full_state = full_state
+ return full_state
+
+ def get_available_slash_commands(self) -> List[Dict]:
+ custom_commands = (
+ list(
+ map(
+ lambda x: {"name": x.name, "description": x.description},
+ self.continue_sdk.config.custom_commands,
+ )
+ )
+ or []
+ )
+ slash_commands = (
+ list(
+ map(
+ lambda x: {"name": x.name, "description": x.description},
+ self.continue_sdk.config.slash_commands,
+ )
+ )
+ or []
+ )
+ cmds = custom_commands + slash_commands
+ cmds.sort(key=lambda x: x["name"] == "edit", reverse=True)
+ return cmds
+
+ async def clear_history(self):
+ # Reset history
+ self.history = History.from_empty()
+ self._main_user_input_queue = []
+ self._active = False
+
+ # Clear context
+ # await self.context_manager.clear_context()
+
+ await self.update_subscribers()
+
+ def on_update(self, callback: Coroutine["FullState", None, None]):
+ """Subscribe to changes to state"""
+ self._on_update_callbacks.append(callback)
+
+ async def update_subscribers(self):
+ full_state = await self.get_full_state()
+ for callback in self._on_update_callbacks:
+ await callback(full_state)
+
+ def give_user_input(self, input: str, index: int):
+ self._user_input_queue.post(str(index), input)
+
+ async def wait_for_user_input(self) -> str:
+ self._active = False
+ await self.update_subscribers()
+ user_input = await self._user_input_queue.get(str(self.history.current_index))
+ self._active = True
+ await self.update_subscribers()
+ return user_input
+
+ _manual_edits_buffer: List[FileEditWithFullContents] = []
+
+ async def reverse_to_index(self, index: int):
+ try:
+ while self.history.get_current_index() >= index:
+ current_step = self.history.get_current().step
+ self.history.step_back()
+ if issubclass(current_step.__class__, ReversibleStep):
+ await current_step.reverse(self.continue_sdk)
+
+ await self.update_subscribers()
+ except Exception as e:
+ logger.debug(e)
+
+ def handle_manual_edits(self, edits: List[FileEditWithFullContents]):
+ for edit in edits:
+ self._manual_edits_buffer.append(edit)
+ # TODO: You're storing a lot of unnecessary data here. Can compress into EditDiffs on the spot, and merge.
+ # self._manual_edits_buffer = merge_file_edit(self._manual_edits_buffer, edit)
+ # Note that this is being overridden to do nothing in DemoAgent
+
+ async def handle_command_output(self, output: str):
+ get_traceback_funcs = [get_python_traceback, get_javascript_traceback]
+ for get_tb_func in get_traceback_funcs:
+ traceback = get_tb_func(output)
+ if (
+ traceback is not None
+ and self.continue_sdk.config.on_traceback is not None
+ ):
+ step = self.continue_sdk.config.on_traceback(output=output)
+ await self._run_singular_step(step)
+
+ async def handle_debug_terminal(self, content: str):
+ """Run the debug terminal step"""
+ # step = self.continue_sdk.config.on_traceback(output=content)
+ step = DefaultOnTracebackStep(output=content)
+ await self._run_singular_step(step)
+
+ async def handle_highlighted_code(
+ self,
+ range_in_files: List[RangeInFileWithContents],
+ edit: Optional[bool] = False,
+ ):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ # Add to context manager
+ await self.context_manager.context_providers["code"].handle_highlighted_code(
+ range_in_files, edit
+ )
+
+ await self.update_subscribers()
+
+ _step_depth: int = 0
+
+ async def retry_at_index(self, index: int):
+ self.history.timeline[index].step.hide = True
+ self._retry_queue.post(str(index), None)
+
+ async def delete_at_index(self, index: int):
+ if not self.history.timeline[index].active:
+ self.history.timeline[index].step.hide = True
+
+ self.history.timeline[index].deleted = True
+ self.history.timeline[index].active = False
+
+ await self.update_subscribers()
+
+ async def edit_step_at_index(self, user_input: str, index: int):
+ node_to_rerun = self.history.timeline[index].copy()
+ step_to_rerun = node_to_rerun.step
+ step_to_rerun.user_input = user_input
+ step_to_rerun.description = user_input
+
+ # Halt the agent's currently running jobs (delete them)
+ while len(self.history.timeline) > index:
+ # Remove from timeline
+ node_to_delete = self.history.timeline.pop()
+ # Delete so it is stopped if in the middle of running
+ node_to_delete.deleted = True
+
+ self.history.current_index = index - 1
+
+ # Set the context to the context used by that step
+ await self.context_manager.clear_context()
+ for context_item in node_to_rerun.context_used:
+ await self.context_manager.manually_add_context_item(context_item)
+
+ await self.update_subscribers()
+
+ # Rerun from the current step
+ await self.run_from_step(step_to_rerun)
+
+ async def delete_context_with_ids(
+ self, ids: List[str], index: Optional[int] = None
+ ):
+ if index is None:
+ await self.context_manager.delete_context_with_ids(ids)
+ else:
+ self.history.timeline[index].context_used = list(
+ filter(
+ lambda item: item.description.id.to_string() not in ids,
+ self.history.timeline[index].context_used,
+ )
+ )
+ await self.update_subscribers()
+
+ async def toggle_adding_highlighted_code(self):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code = not self.context_manager.context_providers[
+ "code"
+ ].adding_highlighted_code
+ await self.update_subscribers()
+
+ async def set_editing_at_ids(self, ids: List[str]):
+ if "code" not in self.context_manager.context_providers:
+ return
+
+ await self.context_manager.context_providers["code"].set_editing_at_ids(ids)
+ await self.update_subscribers()
+
+ async def _run_singular_step(
+ self, step: "Step", is_future_step: bool = False
+ ) -> Coroutine[Observation, None, None]:
+ # Allow config to set disallowed steps
+ if step.__class__.__name__ in self.continue_sdk.config.disallowed_steps:
+ return None
+
+ # If a parent step is deleted/cancelled, don't run this step
+ # TODO: This was problematic because when running a step after deleting one, it seemed to think that was the parent
+ # last_depth = self._step_depth
+ # i = self.history.current_index
+ # while i >= 0 and self.history.timeline[i].depth == last_depth - 1:
+ # if self.history.timeline[i].deleted:
+ # return None
+ # last_depth = self.history.timeline[i].depth
+ # i -= 1
+
+ # Log the context and step to dev data
+ context_used = await self.context_manager.get_selected_items()
+ posthog_logger.capture_event(
+ "step run", {"step_name": step.name, "params": step.dict()}
+ )
+ step_id = uuid.uuid4().hex
+ dev_data_logger.capture(
+ "step_run",
+ {"step_name": step.name, "params": step.dict(), "step_id": step_id},
+ )
+ dev_data_logger.capture(
+ "context_used",
+ {
+ "context": list(
+ map(
+ lambda item: item.dict(),
+ context_used,
+ )
+ ),
+ "step_id": step_id,
+ },
+ )
+
+ if not is_future_step:
+ # Check manual edits buffer, clear out if needed by creating a ManualEditStep
+ if len(self._manual_edits_buffer) > 0:
+ manualEditsStep = ManualEditStep.from_sequence(
+ self._manual_edits_buffer
+ )
+ self._manual_edits_buffer = []
+ await self._run_singular_step(manualEditsStep)
+
+ # Update history - do this first so we get top-first tree ordering
+ index_of_history_node = self.history.add_node(
+ HistoryNode(
+ step=step,
+ observation=None,
+ depth=self._step_depth,
+ context_used=context_used,
+ )
+ )
+
+ # Call all subscribed callbacks
+ await self.update_subscribers()
+
+ # Try to run step and handle errors
+ self._step_depth += 1
+
+ caught_error = False
+ try:
+ observation = await step(self.continue_sdk)
+ except Exception as e:
+ if (
+ index_of_history_node >= len(self.history.timeline)
+ or self.history.timeline[index_of_history_node].deleted
+ ):
+ # If step was deleted/cancelled, don't show error or allow retry
+ return None
+
+ caught_error = True
+
+ is_continue_custom_exception = (
+ issubclass(e.__class__, ContinueCustomException)
+ or e.__class__.__name__ == ContinueCustomException.__name__
+ )
+
+ error_string = (
+ e.message
+ if is_continue_custom_exception
+ else "\n".join(traceback.format_exception(e))
+ )
+ error_title = (
+ e.title if is_continue_custom_exception else get_error_title(e)
+ )
+
+ # Attach an InternalErrorObservation to the step and unhide it.
+ logger.error(f"Error while running step: \n{error_string}\n{error_title}")
+ posthog_logger.capture_event(
+ "step error",
+ {
+ "error_message": error_string,
+ "error_title": error_title,
+ "step_name": step.name,
+ "params": step.dict(),
+ },
+ )
+
+ observation = InternalErrorObservation(
+ error=error_string, title=error_title
+ )
+
+ # Reveal this step, but hide all of the following steps (its substeps)
+ step_was_hidden = step.hide
+
+ step.hide = False
+ i = self.history.get_current_index()
+ while self.history.timeline[i].step.name != step.name:
+ self.history.timeline[i].step.hide = True
+ i -= 1
+
+ # i is now the index of the step that we want to show/rerun
+ self.history.timeline[i].observation = observation
+ self.history.timeline[i].active = False
+
+ await self.update_subscribers()
+
+ # ContinueCustomException can optionally specify a step to run on the error
+ if is_continue_custom_exception and e.with_step is not None:
+ await self._run_singular_step(e.with_step)
+
+ # Wait for a retry signal and then resume the step
+ self._active = False
+ await self._retry_queue.get(str(i))
+ self._active = True
+ # You might consider a "ignore and continue" button
+ # want it to have same step depth, so have to decrement
+ self._step_depth -= 1
+ copy_step = step.copy()
+ copy_step.hide = step_was_hidden
+ observation = await self._run_singular_step(copy_step)
+ self._step_depth += 1
+
+ self._step_depth -= 1
+
+ # Add observation to history, unless already attached error observation
+ if not caught_error and index_of_history_node < len(self.history.timeline):
+ self.history.timeline[index_of_history_node].observation = observation
+ self.history.timeline[index_of_history_node].active = False
+ await self.update_subscribers()
+
+ # Update its description
+ async def update_description():
+ if self.continue_sdk.config.disable_summaries:
+ return
+
+ description = await step.describe(self.continue_sdk.models)
+ if description is not None:
+ step.description = description
+ # Update subscribers with new description
+ await self.update_subscribers()
+
+ create_async_task(
+ update_description(),
+ on_error=lambda e: self.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ ),
+ )
+
+ # Create the session title if not done yet
+ if self.session_info is None or self.session_info.title is None:
+ visible_nodes = list(
+ filter(lambda node: not node.step.hide, self.history.timeline)
+ )
+
+ user_input = None
+ should_create_title = False
+ for visible_node in visible_nodes:
+ if isinstance(visible_node.step, UserInputStep):
+ if user_input is None:
+ user_input = visible_node.step.user_input
+ else:
+ # More than one user input, so don't create title
+ should_create_title = False
+ break
+ elif user_input is None:
+ continue
+ else:
+ # Already have user input, now have the next step
+ should_create_title = True
+ break
+
+ # Only create the title if the step after the first input is done
+ if should_create_title:
+ create_async_task(
+ self.create_title(backup=user_input),
+ on_error=lambda e: self.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ ),
+ )
+
+ return observation
+
+ async def run_from_step(self, step: "Step"):
+ # if self._active:
+ # raise RuntimeError("Autopilot is already running")
+ self._active = True
+
+ next_step = step
+ is_future_step = False
+ while not (next_step is None or self._should_halt):
+ if is_future_step:
+ # If future step, then we are replaying and need to delete the step from history so it can be replaced
+ self.history.remove_current_and_substeps()
+
+ await self._run_singular_step(next_step, is_future_step)
+
+ if next_step := self.policy.next(self.continue_sdk.config, self.history):
+ is_future_step = False
+ elif next_step := self.history.take_next_step():
+ is_future_step = True
+ else:
+ next_step = None
+
+ self._active = False
+
+ # Doing this so active can make it to the frontend after steps are done. But want better state syncing tools
+ await self.update_subscribers()
+
+ async def run_from_observation(self, observation: Observation):
+ next_step = self.policy.next(self.continue_sdk.config, self.history)
+ await self.run_from_step(next_step)
+
+ async def run_policy(self):
+ first_step = self.policy.next(self.continue_sdk.config, self.history)
+ await self.run_from_step(first_step)
+
+ async def _request_halt(self):
+ if self._active:
+ self._should_halt = True
+ while self._active:
+ time.sleep(0.1)
+ self._should_halt = False
+ return None
+
+ def set_current_session_title(self, title: str):
+ self.session_info = SessionInfo(
+ title=title,
+ session_id=self.ide.session_id,
+ date_created=str(time.time()),
+ workspace_directory=self.ide.workspace_directory,
+ )
+
+ async def create_title(self, backup: str = None):
+ # Use the first input and first response to create title for session info, and make the session saveable
+ if self.session_info is not None and self.session_info.title is not None:
+ return
+
+ if self.continue_sdk.config.disable_summaries:
+ if backup is not None:
+ title = backup
+ else:
+ title = "New Session"
+ else:
+ chat_history = list(
+ map(lambda x: x.dict(), await self.continue_sdk.get_chat_context())
+ )
+ chat_history_str = template_alpaca_messages(chat_history)
+ title = await self.continue_sdk.models.summarize.complete(
+ f"{chat_history_str}\n\nGive a short title to describe the above chat session. Do not put quotes around the title. Do not use more than 6 words. The title is: ",
+ max_tokens=20,
+ log=False,
+ )
+ title = remove_quotes_and_escapes(title)
+
+ self.set_current_session_title(title)
+ await self.update_subscribers()
+ dev_data_logger.capture("new_session", self.session_info.dict())
+
+ async def accept_user_input(self, user_input: str):
+ self._main_user_input_queue.append(user_input)
+ # await self.update_subscribers()
+
+ if len(self._main_user_input_queue) > 1:
+ return
+
+ # await self._request_halt()
+ # Just run the step that takes user input, and
+ # then up to the policy to decide how to deal with it.
+ self._main_user_input_queue.pop(0)
+ # await self.update_subscribers()
+ await self.run_from_step(UserInputStep(user_input=user_input))
+
+ while len(self._main_user_input_queue) > 0:
+ await self.run_from_step(
+ UserInputStep(user_input=self._main_user_input_queue.pop(0))
+ )
+
+ async def accept_refinement_input(self, user_input: str, index: int):
+ await self._request_halt()
+ await self.reverse_to_index(index)
+ await self.run_from_step(UserInputStep(user_input=user_input))
+
+ async def reject_diff(self, step_index: int):
+ # Hide the edit step and the UserInputStep before it
+ self.history.timeline[step_index].step.hide = True
+ for i in range(step_index - 1, -1, -1):
+ if isinstance(self.history.timeline[i].step, UserInputStep):
+ self.history.timeline[i].step.hide = True
+ break
+ await self.update_subscribers()
+
+ async def select_context_item(self, id: str, query: str):
+ await self.context_manager.select_context_item(id, query)
+ await self.update_subscribers()
+
+ async def select_context_item_at_index(self, id: str, query: str, index: int):
+ # TODO: This is different from how it works for the main input
+ # Ideally still tracked through the ContextProviders
+ # so they can watch for duplicates
+ context_item = await self.context_manager.get_context_item(id, query)
+ if context_item is None:
+ return
+ self.history.timeline[index].context_used.append(context_item)
+ await self.update_subscribers()
+
+ async def set_config_attr(self, key_path: List[str], value: redbaron.RedBaron):
+ edit_config_property(key_path, value)
+ await self.update_subscribers()
+
+ _saved_context_groups: Dict[str, List[ContextItem]] = {}
+
+ def _persist_context_groups(self):
+ context_groups_file = getSavedContextGroupsPath()
+ if os.path.exists(context_groups_file):
+ with open(context_groups_file, "w") as f:
+ dict_to_save = {
+ title: [item.dict() for item in context_items]
+ for title, context_items in self._saved_context_groups.items()
+ }
+ json.dump(dict_to_save, f)
+
+ async def save_context_group(self, title: str, context_items: List[ContextItem]):
+ self._saved_context_groups[title] = context_items
+ await self.update_subscribers()
+
+ # Update saved context groups
+ self._persist_context_groups()
+
+ posthog_logger.capture_event(
+ "save_context_group", {"title": title, "length": len(context_items)}
+ )
+
+ async def select_context_group(self, id: str):
+ if id not in self._saved_context_groups:
+ logger.warning(f"Context group {id} not found")
+ return
+ context_group = self._saved_context_groups[id]
+ await self.context_manager.clear_context()
+ for item in context_group:
+ await self.context_manager.manually_add_context_item(item)
+ await self.update_subscribers()
+
+ posthog_logger.capture_event(
+ "select_context_group", {"title": id, "length": len(context_group)}
+ )
+ dev_data_logger.capture(
+ "select_context_group", {"title": id, "items": context_group}
+ )
+
+ async def delete_context_group(self, id: str):
+ if id not in self._saved_context_groups:
+ logger.warning(f"Context group {id} not found")
+ return
+ del self._saved_context_groups[id]
+ await self.update_subscribers()
+
+ # Update saved context groups
+ self._persist_context_groups()
+
+ posthog_logger.capture_event("delete_context_group", {"title": id})
diff --git a/server/continuedev/core/config.py b/server/continuedev/core/config.py
new file mode 100644
index 00000000..2bbb42cc
--- /dev/null
+++ b/server/continuedev/core/config.py
@@ -0,0 +1,114 @@
+from typing import Dict, List, Optional, Type
+
+from pydantic import BaseModel, Field, validator
+
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from .context import ContextProvider
+from .main import Policy, Step
+from .models import Models
+
+
+class SlashCommand(BaseModel):
+ name: str
+ description: str
+ step: Type[Step]
+ params: Optional[Dict] = {}
+
+ def dict(self, *args, **kwargs):
+ return {
+ "name": self.name,
+ "description": self.description,
+ "params": self.params,
+ "step": self.step.__name__,
+ }
+
+
+class CustomCommand(BaseModel):
+ name: str
+ prompt: str
+ description: str
+
+
+class ContinueConfig(BaseModel):
+ """
+ Continue can be deeply customized by editing the `ContinueConfig` object in `~/.continue/config.py` (`%userprofile%\.continue\config.py` for Windows) on your machine. This class is instantiated from the config file for every new session.
+ """
+
+ steps_on_startup: List[Step] = Field(
+ [],
+ description="Steps that will be automatically run at the beginning of a new session",
+ )
+ disallowed_steps: Optional[List[str]] = Field(
+ [],
+ description="Steps that are not allowed to be run, and will be skipped if attempted",
+ )
+ allow_anonymous_telemetry: Optional[bool] = Field(
+ True,
+ description="If this field is set to True, we will collect anonymous telemetry as described in the documentation page on telemetry. If set to False, we will not collect any data.",
+ )
+ models: Models = Field(
+ Models(
+ default=OpenAIFreeTrial(model="gpt-4"),
+ summarize=OpenAIFreeTrial(model="gpt-3.5-turbo"),
+ ),
+ description="Configuration for the models used by Continue. Read more about how to configure models in the documentation.",
+ )
+ temperature: Optional[float] = Field(
+ 0.5,
+ description="The temperature parameter for sampling from the LLM. Higher temperatures will result in more random output, while lower temperatures will result in more predictable output. This value ranges from 0 to 1.",
+ )
+ custom_commands: Optional[List[CustomCommand]] = Field(
+ [
+ CustomCommand(
+ name="test",
+ description="This is an example custom command. Use /config to edit it and create more",
+ prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
+ )
+ ],
+ description="An array of custom commands that allow you to reuse prompts. Each has name, description, and prompt properties. When you enter /<name> in the text input, it will act as a shortcut to the prompt.",
+ )
+ slash_commands: Optional[List[SlashCommand]] = Field(
+ [],
+ description="An array of slash commands that let you map custom Steps to a shortcut.",
+ )
+ on_traceback: Optional[Step] = Field(
+ None,
+ description="The step that will be run when a traceback is detected (when you use the shortcut cmd+shift+R)",
+ )
+ system_message: Optional[str] = Field(
+ None, description="A system message that will always be followed by the LLM"
+ )
+ policy_override: Optional[Policy] = Field(
+ None,
+ description="A Policy object that can be used to override the default behavior of Continue, for example in order to build custom agents that take multiple steps at a time.",
+ )
+ context_providers: List[ContextProvider] = Field(
+ [],
+ description="A list of ContextProvider objects that can be used to provide context to the LLM by typing '@'. Read more about ContextProviders in the documentation.",
+ )
+ user_token: Optional[str] = Field(
+ None, description="An optional token to identify the user."
+ )
+ data_server_url: Optional[str] = Field(
+ "https://us-west1-autodebug.cloudfunctions.net",
+ description="The URL of the server where development data is sent. No data is sent unless a valid user token is provided.",
+ )
+ disable_summaries: Optional[bool] = Field(
+ False,
+ description="If set to `True`, Continue will not generate summaries for each Step. This can be useful if you want to save on compute.",
+ )
+
+ @validator("temperature", pre=True)
+ def temperature_validator(cls, v):
+ return max(0.0, min(1.0, v))
+
+ @staticmethod
+ def from_filepath(filepath: str) -> "ContinueConfig":
+ # Use importlib to load the config file config.py at the given path
+ import importlib.util
+
+ spec = importlib.util.spec_from_file_location("config", filepath)
+ config = importlib.util.module_from_spec(spec)
+ spec.loader.exec_module(config)
+
+ return config.config
diff --git a/server/continuedev/core/context.py b/server/continuedev/core/context.py
new file mode 100644
index 00000000..547a1593
--- /dev/null
+++ b/server/continuedev/core/context.py
@@ -0,0 +1,516 @@
+import asyncio
+import time
+from abc import abstractmethod
+from typing import Awaitable, Callable, Dict, List, Optional
+
+from meilisearch_python_async import Client
+from pydantic import BaseModel, Field
+
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.logging import logger
+from ..libs.util.telemetry import posthog_logger
+from ..server.meilisearch_server import (
+ check_meilisearch_running,
+ get_meilisearch_url,
+ poll_meilisearch_running,
+ restart_meilisearch,
+ start_meilisearch,
+)
+from .main import (
+ ChatMessage,
+ ContextItem,
+ ContextItemDescription,
+ ContextItemId,
+ ContextProviderDescription,
+)
+
+
+class ContinueSDK(BaseModel):
+ """To avoid circular imports"""
+
+ ...
+
+
+SEARCH_INDEX_NAME = "continue_context_items"
+
+
+class ContextProvider(BaseModel):
+ """
+ The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'.
+ When you type '@', the context provider will be asked to populate a list of options.
+ These options will be updated on each keystroke.
+ When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object).
+ """
+
+ title: str = Field(
+ ...,
+ description="The title of the ContextProvider. This is what must be typed in the input to trigger the ContextProvider.",
+ )
+ sdk: ContinueSDK = Field(
+ None, description="The ContinueSDK instance accessible by the ContextProvider"
+ )
+ delete_documents: Callable[[List[str]], Awaitable] = Field(
+ None, description="Function to delete documents"
+ )
+ update_documents: Callable[[List[ContextItem], str], Awaitable] = Field(
+ None, description="Function to update documents"
+ )
+
+ display_title: str = Field(
+ ...,
+ description="The display title of the ContextProvider shown in the dropdown menu",
+ )
+ description: str = Field(
+ ...,
+ description="A description of the ContextProvider displayed in the dropdown menu",
+ )
+ dynamic: bool = Field(
+ ..., description="Indicates whether the ContextProvider is dynamic"
+ )
+ requires_query: bool = Field(
+ False,
+ description="Indicates whether the ContextProvider requires a query. For example, the SearchContextProvider requires you to type '@search <STRING_TO_SEARCH>'. This will change the behavior of the UI so that it can indicate the expectation for a query.",
+ )
+
+ selected_items: List[ContextItem] = Field(
+ [], description="List of selected items in the ContextProvider"
+ )
+
+ def dict(self, *args, **kwargs):
+ original_dict = super().dict(*args, **kwargs)
+ original_dict.pop("sdk", None)
+ original_dict.pop("delete_documents", None)
+ original_dict.pop("update_documents", None)
+ return original_dict
+
+ async def start(self, sdk: ContinueSDK, delete_documents, update_documents):
+ """
+ Starts the context provider.
+
+ Default implementation sets the sdk.
+ """
+ self.sdk = sdk
+ self.delete_documents = delete_documents
+ self.update_documents = update_documents
+
+ async def get_selected_items(self) -> List[ContextItem]:
+ """
+ Returns all of the selected ContextItems.
+
+ Default implementation simply returns self.selected_items.
+
+ Other implementations may add an async processing step.
+ """
+ return self.selected_items
+
+ @abstractmethod
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ """
+ Provide documents for search index. This is run on startup.
+
+ This is the only method that must be implemented.
+ """
+
+ async def get_chat_messages(self) -> List[ChatMessage]:
+ """
+ Returns all of the chat messages for the context provider.
+
+ Default implementation has a string template.
+ """
+ return [
+ ChatMessage(
+ role="user",
+ content=f"{item.description.name}: {item.description.description}\n\n{item.content}",
+ summary=item.description.description,
+ )
+ for item in await self.get_selected_items()
+ ]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ """
+ Returns the ContextItem with the given id.
+
+ Default implementation uses the search index to get the item.
+ """
+ async with Client(get_meilisearch_url()) as search_client:
+ try:
+ result = await search_client.index(SEARCH_INDEX_NAME).get_document(
+ id.to_string()
+ )
+ return ContextItem(
+ description=ContextItemDescription(
+ name=result["name"], description=result["description"], id=id
+ ),
+ content=result["content"],
+ )
+ except Exception as e:
+ logger.warning(f"Error while retrieving document from meilisearch: {e}")
+
+ return None
+
+ async def delete_context_with_ids(self, ids: List[ContextItemId]):
+ """
+ Deletes the ContextItems with the given IDs, lets ContextProviders recalculate.
+
+ Default implementation simply deletes those with the given ids.
+ """
+ id_strings = {id.to_string() for id in ids}
+ self.selected_items = list(
+ filter(
+ lambda item: item.description.id.to_string() not in id_strings,
+ self.selected_items,
+ )
+ )
+
+ async def clear_context(self):
+ """
+ Clears all context.
+
+ Default implementation simply clears the selected items.
+ """
+ self.selected_items = []
+
+ async def add_context_item(self, id: ContextItemId, query: str):
+ """
+ Adds the given ContextItem to the list of ContextItems.
+
+ Default implementation simply appends the item, not allowing duplicates.
+
+ This method also allows you not to have to load all of the information until an item is selected.
+ """
+
+ # Don't add duplicate context
+ for item in self.selected_items:
+ if item.description.id.item_id == id.item_id:
+ return
+
+ if new_item := await self.get_item(id, query):
+ self.selected_items.append(new_item)
+
+ async def manually_add_context_item(self, context_item: ContextItem):
+ for item in self.selected_items:
+ if item.description.id.item_id == context_item.description.id.item_id:
+ return
+
+ self.selected_items.append(context_item)
+
+
+class ContextManager:
+ """
+ The context manager is responsible for storing the context to be passed to the LLM, including
+ - ContextItems (highlighted code, GitHub Issues, etc.)
+ - ChatMessages in the history
+ - System Message
+ - Functions
+
+ It is responsible for compiling all of this information into a single prompt without exceeding the token limit.
+ """
+
+ def get_provider_descriptions(self) -> List[ContextProviderDescription]:
+ """
+ Returns a list of ContextProviderDescriptions for each context provider.
+ """
+ return [
+ ContextProviderDescription(
+ title=provider.title,
+ display_title=provider.display_title,
+ description=provider.description,
+ dynamic=provider.dynamic,
+ requires_query=provider.requires_query,
+ )
+ for provider in self.context_providers.values()
+ if provider.title != "code"
+ ]
+
+ async def get_selected_items(self) -> List[ContextItem]:
+ """
+ Returns all of the selected ContextItems.
+ """
+ return sum(
+ [
+ await provider.get_selected_items()
+ for provider in self.context_providers.values()
+ ],
+ [],
+ )
+
+ async def get_chat_messages(self) -> List[ChatMessage]:
+ """
+ Returns chat messages from each provider.
+ """
+ return sum(
+ [
+ await provider.get_chat_messages()
+ for provider in self.context_providers.values()
+ ],
+ [],
+ )
+
+ def __init__(self):
+ self.context_providers = {}
+ self.provider_titles = set()
+
+ async def start(
+ self,
+ context_providers: List[ContextProvider],
+ sdk: ContinueSDK,
+ only_reloading: bool = False,
+ ):
+ """
+ Starts the context manager.
+ """
+ new_context_providers = {
+ provider.title: provider
+ for provider in context_providers
+ if provider.title not in self.provider_titles
+ }
+
+ self.context_providers = {
+ provider.title: provider for provider in context_providers
+ }
+ self.provider_titles = {provider.title for provider in context_providers}
+
+ for provider in context_providers:
+ await provider.start(
+ sdk,
+ ContextManager.delete_documents,
+ ContextManager.update_documents,
+ )
+
+ async def on_err(e):
+ logger.warning(f"Error loading meilisearch index: {e}")
+
+ # Start MeiliSearch in the background without blocking
+ async def load_index(providers_to_load: List[ContextProvider]):
+ running = await check_meilisearch_running()
+ if not running:
+ await start_meilisearch()
+ try:
+ await asyncio.wait_for(poll_meilisearch_running(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Meilisearch did not start in less than 20 seconds. Stopping polling."
+ )
+ return
+
+ logger.debug("Loading Meilisearch index...")
+ await self.load_index(
+ sdk.ide.workspace_directory, providers_to_load=providers_to_load
+ )
+ logger.debug("Loaded Meilisearch index")
+
+ providers_to_load = (
+ new_context_providers if only_reloading else context_providers
+ )
+ create_async_task(load_index(providers_to_load), on_err)
+
+ @staticmethod
+ async def update_documents(context_items: List[ContextItem], workspace_dir: str):
+ """
+ Updates the documents in the search index.
+ """
+ documents = [
+ {
+ "id": item.description.id.to_string(),
+ "name": item.description.name,
+ "description": item.description.description,
+ "content": item.content,
+ "workspace_dir": workspace_dir,
+ "provider_name": item.description.id.provider_title,
+ }
+ for item in context_items
+ ]
+ async with Client(get_meilisearch_url()) as search_client:
+
+ async def add_docs():
+ index = await search_client.get_index(SEARCH_INDEX_NAME)
+ await index.add_documents(documents or [])
+
+ try:
+ await asyncio.wait_for(add_docs(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning("Failed to add document to meilisearch in 20 seconds")
+ except Exception as e:
+ logger.warning(f"Error adding document to meilisearch: {e}")
+
+ @staticmethod
+ async def delete_documents(ids):
+ """
+ Deletes the documents in the search index.
+ """
+ async with Client(get_meilisearch_url()) as search_client:
+ try:
+ await asyncio.wait_for(
+ search_client.index(SEARCH_INDEX_NAME).delete_documents(ids),
+ timeout=20,
+ )
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Failed to delete document from meilisearch in 20 seconds"
+ )
+ except Exception as e:
+ logger.warning(f"Error deleting document from meilisearch: {e}")
+
+ async def load_index(
+ self,
+ workspace_dir: str,
+ should_retry: bool = True,
+ providers_to_load: Optional[List[ContextProvider]] = None,
+ ):
+ try:
+ async with Client(get_meilisearch_url()) as search_client:
+ # First, create the index if it doesn't exist
+ # The index is currently shared by all workspaces
+ await search_client.create_index(SEARCH_INDEX_NAME)
+ globalSearchIndex = await search_client.get_index(SEARCH_INDEX_NAME)
+ await globalSearchIndex.update_ranking_rules(
+ ["attribute", "words", "typo", "proximity", "sort", "exactness"]
+ )
+ await globalSearchIndex.update_searchable_attributes(
+ ["name", "description"]
+ )
+ await globalSearchIndex.update_filterable_attributes(
+ ["workspace_dir", "provider_name"]
+ )
+
+ async def load_context_provider(provider: ContextProvider):
+ context_items = await provider.provide_context_items(workspace_dir)
+ documents = [
+ {
+ "id": item.description.id.to_string(),
+ "name": item.description.name,
+ "description": item.description.description,
+ "content": item.content,
+ "workspace_dir": workspace_dir,
+ "provider_name": provider.title,
+ }
+ for item in context_items
+ ]
+ if len(documents) > 0:
+ await globalSearchIndex.add_documents(documents)
+
+ return len(documents)
+
+ async def safe_load(provider: ContextProvider):
+ ti = time.time()
+ try:
+ num_documents = await asyncio.wait_for(
+ load_context_provider(provider), timeout=20
+ )
+ except asyncio.TimeoutError:
+ logger.warning(
+ f"Failed to add documents to meilisearch for context provider {provider.__class__.__name__} in 20 seconds"
+ )
+ return
+ except Exception as e:
+ logger.warning(
+ f"Error adding documents to meilisearch for context provider {provider.__class__.__name__}: {e}"
+ )
+ return
+
+ tf = time.time()
+ logger.debug(
+ f"Loaded {num_documents} documents into meilisearch in {tf - ti} seconds for context provider {provider.title}"
+ )
+
+ tasks = [
+ safe_load(provider)
+ for _, provider in (
+ providers_to_load or self.context_providers
+ ).items()
+ ]
+ await asyncio.wait_for(asyncio.gather(*tasks), timeout=20)
+
+ except Exception as e:
+ logger.debug(f"Error loading meilisearch index: {e}")
+ if should_retry:
+ await restart_meilisearch()
+ try:
+ await asyncio.wait_for(poll_meilisearch_running(), timeout=20)
+ except asyncio.TimeoutError:
+ logger.warning(
+ "Meilisearch did not restart in less than 20 seconds. Stopping polling."
+ )
+ await self.load_index(workspace_dir, False)
+
+ async def select_context_item(self, id: str, query: str):
+ """
+ Selects the ContextItem with the given id.
+ """
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in self.provider_titles:
+ raise ValueError(
+ f"Context provider with title {id.provider_title} not found"
+ )
+
+ posthog_logger.capture_event(
+ "select_context_item",
+ {
+ "provider_title": id.provider_title,
+ "item_id": id.item_id,
+ "query": query,
+ },
+ )
+ dev_data_logger.capture(
+ "select_context_item",
+ {
+ "provider_title": id.provider_title,
+ "item_id": id.item_id,
+ "query": query,
+ },
+ )
+ await self.context_providers[id.provider_title].add_context_item(id, query)
+
+ async def get_context_item(self, id: str, query: str) -> ContextItem:
+ """
+ Returns the ContextItem with the given id.
+ """
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in self.provider_titles:
+ raise ValueError(
+ f"Context provider with title {id.provider_title} not found"
+ )
+
+ return await self.context_providers[id.provider_title].get_item(id, query)
+
+ async def delete_context_with_ids(self, ids: List[str]):
+ """
+ Deletes the ContextItems with the given IDs, lets ContextProviders recalculate.
+ """
+
+ # Group by provider title
+ provider_title_to_ids: Dict[str, List[ContextItemId]] = {}
+ for id in ids:
+ id: ContextItemId = ContextItemId.from_string(id)
+ if id.provider_title not in provider_title_to_ids:
+ provider_title_to_ids[id.provider_title] = []
+ provider_title_to_ids[id.provider_title].append(id)
+
+ # Recalculate context for each updated provider
+ for provider_title, ids in provider_title_to_ids.items():
+ await self.context_providers[provider_title].delete_context_with_ids(ids)
+
+ async def clear_context(self):
+ """
+ Clears all context.
+ """
+ for provider in self.context_providers.values():
+ await self.context_providers[provider.title].clear_context()
+
+ async def manually_add_context_item(self, item: ContextItem):
+ """
+ Adds the given ContextItem to the list of ContextItems.
+ """
+ if item.description.id.provider_title not in self.provider_titles:
+ return
+
+ await self.context_providers[
+ item.description.id.provider_title
+ ].manually_add_context_item(item)
+
+
+"""
+Should define "ArgsTransformer" and "PromptTransformer" classes for the different LLMs. A standard way for them to ingest the
+same format of prompts so you don't have to redo all of this logic.
+"""
diff --git a/server/continuedev/core/env.py b/server/continuedev/core/env.py
new file mode 100644
index 00000000..60b86538
--- /dev/null
+++ b/server/continuedev/core/env.py
@@ -0,0 +1,31 @@
+import os
+
+from dotenv import load_dotenv
+
+
+def get_env_var(var_name: str):
+ load_dotenv()
+ return os.getenv(var_name)
+
+
+def make_sure_env_exists():
+ if not os.path.exists(".env"):
+ with open(".env", "w") as f:
+ f.write("")
+
+
+def save_env_var(var_name: str, var_value: str):
+ make_sure_env_exists()
+
+ with open(".env", "r") as f:
+ lines = f.readlines()
+ with open(".env", "w") as f:
+ values = {}
+ for line in lines:
+ key, value = line.split("=")
+ value = value.replace('"', "")
+ values[key] = value
+
+ values[var_name] = var_value
+ for key, value in values.items():
+ f.write(f'{key}="{value}"\n')
diff --git a/server/continuedev/core/lsp.py b/server/continuedev/core/lsp.py
new file mode 100644
index 00000000..fc26c85c
--- /dev/null
+++ b/server/continuedev/core/lsp.py
@@ -0,0 +1,416 @@
+import asyncio
+import threading
+from typing import List, Literal, Optional
+
+import aiohttp
+from pydantic import BaseModel
+
+from ..models.filesystem import RangeInFile
+from ..models.main import Position, Range
+
+
+def filepath_to_uri(filename: str) -> str:
+ return f"file://{filename}"
+
+
+def uri_to_filepath(uri: str) -> str:
+ if uri.startswith("file://"):
+ return uri[7:]
+ else:
+ return uri
+
+
+PORT = 8099
+
+
+class LSPClient:
+ ready: bool = False
+ lock: asyncio.Lock = asyncio.Lock()
+
+ def __init__(self, host: str, port: int, workspace_paths: List[str]):
+ self.host = host
+ self.port = port
+ self.session = aiohttp.ClientSession()
+ self.next_id = 0
+ self.workspace_paths = workspace_paths
+
+ async def connect(self):
+ print("Connecting")
+ self.ws = await self.session.ws_connect(f"ws://{self.host}:{self.port}/")
+ print("Connected")
+ self.ready = True
+
+ async def send(self, data):
+ await self.ws.send_json(data)
+
+ async def recv(self):
+ await self.lock.acquire()
+
+ try:
+ return await self.ws.receive_json()
+ finally:
+ self.lock.release()
+
+ async def close(self):
+ await self.ws.close()
+ await self.session.close()
+
+ async def call_method(self, method_name, **kwargs):
+ body = {
+ "jsonrpc": "2.0",
+ "id": self.next_id,
+ "method": method_name,
+ "params": kwargs,
+ }
+ self.next_id += 1
+ await self.send(body)
+ response = await self.recv()
+ return response
+
+ async def initialize(self):
+ initialization_args = {
+ "capabilities": {
+ "textDocument": {
+ "codeAction": {"dynamicRegistration": True},
+ "codeLens": {"dynamicRegistration": True},
+ "colorProvider": {"dynamicRegistration": True},
+ "completion": {
+ "completionItem": {
+ "commitCharactersSupport": True,
+ "documentationFormat": ["markdown", "plaintext"],
+ "snippetSupport": True,
+ },
+ "completionItemKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ ]
+ },
+ "contextSupport": True,
+ "dynamicRegistration": True,
+ },
+ "definition": {"dynamicRegistration": True},
+ "documentHighlight": {"dynamicRegistration": True},
+ "documentLink": {"dynamicRegistration": True},
+ "documentSymbol": {
+ "dynamicRegistration": True,
+ "symbolKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ 26,
+ ]
+ },
+ },
+ "formatting": {"dynamicRegistration": True},
+ "hover": {
+ "contentFormat": ["markdown", "plaintext"],
+ "dynamicRegistration": True,
+ },
+ "implementation": {"dynamicRegistration": True},
+ "onTypeFormatting": {"dynamicRegistration": True},
+ "publishDiagnostics": {"relatedInformation": True},
+ "rangeFormatting": {"dynamicRegistration": True},
+ "references": {"dynamicRegistration": True},
+ "rename": {"dynamicRegistration": True},
+ "signatureHelp": {
+ "dynamicRegistration": True,
+ "signatureInformation": {
+ "documentationFormat": ["markdown", "plaintext"]
+ },
+ },
+ "synchronization": {
+ "didSave": True,
+ "dynamicRegistration": True,
+ "willSave": True,
+ "willSaveWaitUntil": True,
+ },
+ "typeDefinition": {"dynamicRegistration": True},
+ },
+ "workspace": {
+ "applyEdit": True,
+ "configuration": True,
+ "didChangeConfiguration": {"dynamicRegistration": True},
+ "didChangeWatchedFiles": {"dynamicRegistration": True},
+ "executeCommand": {"dynamicRegistration": True},
+ "symbol": {
+ "dynamicRegistration": True,
+ "symbolKind": {
+ "valueSet": [
+ 1,
+ 2,
+ 3,
+ 4,
+ 5,
+ 6,
+ 7,
+ 8,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 16,
+ 17,
+ 18,
+ 19,
+ 20,
+ 21,
+ 22,
+ 23,
+ 24,
+ 25,
+ 26,
+ ]
+ },
+ },
+ "workspaceEdit": {"documentChanges": True},
+ "workspaceFolders": True,
+ },
+ },
+ "processId": 1234,
+ "rootPath": None,
+ "rootUri": filepath_to_uri(self.workspace_paths[0]),
+ "initializationOptions": {},
+ "trace": "off",
+ "workspaceFolders": [
+ {
+ "uri": filepath_to_uri(workspacePath),
+ "name": workspacePath.split("/")[-1],
+ }
+ for workspacePath in self.workspace_paths
+ ],
+ }
+ return await self.call_method("initialize", **initialization_args)
+
+ async def goto_definition(self, filepath: str, position: Position):
+ return await self.call_method(
+ "textDocument/definition",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ position=position.dict(),
+ )
+
+ async def document_symbol(self, filepath: str):
+ return await self.call_method(
+ "textDocument/documentSymbol",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ )
+
+ async def find_references(
+ self, filepath: str, position: Position, include_declaration: bool = False
+ ):
+ return await self.call_method(
+ "textDocument/references",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ position=position.dict(),
+ context={"includeDeclaration": include_declaration},
+ )
+
+ async def folding_range(self, filepath: str):
+ response = await self.call_method(
+ "textDocument/foldingRange",
+ textDocument={"uri": filepath_to_uri(filepath)},
+ )
+ return response["result"]
+
+
+async def start_language_server() -> threading.Thread:
+ """Manually start the python language server. Not used currently."""
+ raise NotImplementedError()
+ # try:
+ # kill_proc(PORT)
+ # thread = threading.Thread(
+ # target=start_ws_lang_server,
+ # args=(PORT, False, PythonLSPServer),
+ # )
+ # thread.daemon = True
+ # thread.start()
+
+ # except Exception as e:
+ # logger.warning("Could not start TCP server: %s", e)
+
+ # await asyncio.sleep(2)
+
+ # return thread
+
+
+class DocumentSymbol(BaseModel):
+ name: str
+ containerName: Optional[str] = None
+ kind: int
+ location: RangeInFile
+
+
+class FoldingRange(BaseModel):
+ range: Range
+ kind: Optional[Literal["comment", "imports", "region"]] = None
+
+
+class ContinueLSPClient(BaseModel):
+ workspace_dir: str
+
+ lsp_client: LSPClient = None
+ lsp_thread: Optional[threading.Thread] = None
+
+ @property
+ def ready(self):
+ if self.lsp_client is None:
+ return False
+ return self.lsp_client.ready
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("lsp_client", None)
+ return original_dict
+
+ async def start(self):
+ self.lsp_thread = await start_language_server()
+ self.lsp_client = LSPClient("localhost", PORT, [self.workspace_dir])
+ await self.lsp_client.connect()
+ await self.lsp_client.initialize()
+
+ async def stop(self):
+ await self.lsp_client.close()
+ if self.lsp_thread:
+ self.lsp_thread.join()
+
+ def location_to_range_in_file(self, location):
+ return RangeInFile(
+ filepath=uri_to_filepath(location["uri"]),
+ range=Range.from_shorthand(
+ location["range"]["start"]["line"],
+ location["range"]["start"]["character"],
+ location["range"]["end"]["line"],
+ location["range"]["end"]["character"],
+ ),
+ )
+
+ async def goto_definition(
+ self, position: Position, filename: str
+ ) -> List[RangeInFile]:
+ response = self.lsp_client.goto_definition(
+ filename,
+ position,
+ )
+ return [self.location_to_range_in_file(x) for x in response]
+
+ async def find_references(
+ self, position: Position, filename: str, include_declaration: bool = False
+ ) -> List[RangeInFile]:
+ response = await self.lsp_client.find_references(
+ filename,
+ position,
+ include_declaration=include_declaration,
+ )
+ return [self.location_to_range_in_file(x) for x in response["result"]]
+
+ async def document_symbol(self, filepath: str) -> List:
+ response = await self.lsp_client.document_symbol(filepath)
+ return [
+ DocumentSymbol(
+ name=x["name"],
+ containerName=x["containerName"],
+ kind=x["kind"],
+ location=self.location_to_range_in_file(x["location"]),
+ )
+ for x in response["result"]
+ ]
+
+ async def folding_range(self, filepath: str) -> List[FoldingRange]:
+ response = await self.lsp_client.folding_range(filepath)
+
+ return [
+ FoldingRange(
+ range=Range.from_shorthand(
+ x["startLine"],
+ x.get("startCharacter", 0),
+ x["endLine"] if "endCharacter" in x else x["endLine"] + 1,
+ x.get("endCharacter", 0),
+ ),
+ kind=x.get("kind"),
+ )
+ for x in response
+ ]
+
+ async def get_enclosing_folding_range_of_position(
+ self, position: Position, filepath: str
+ ) -> Optional[FoldingRange]:
+ ranges = await self.folding_range(filepath)
+
+ max_start_position = Position(line=0, character=0)
+ max_range = None
+ for r in ranges:
+ if r.range.contains(position):
+ if r.range.start > max_start_position:
+ max_start_position = r.range.start
+ max_range = r
+
+ return max_range
+
+ async def get_enclosing_folding_range(
+ self, range_in_file: RangeInFile
+ ) -> Optional[FoldingRange]:
+ ranges = await self.folding_range(range_in_file.filepath)
+
+ max_start_position = Position(line=0, character=0)
+ max_range = None
+ for r in ranges:
+ if r.range.contains(range_in_file.range.start) and r.range.contains(
+ range_in_file.range.end
+ ):
+ if r.range.start > max_start_position:
+ max_start_position = r.range.start
+ max_range = r
+
+ return max_range
diff --git a/server/continuedev/core/main.py b/server/continuedev/core/main.py
new file mode 100644
index 00000000..617a5aaa
--- /dev/null
+++ b/server/continuedev/core/main.py
@@ -0,0 +1,437 @@
+import json
+from typing import Any, Coroutine, Dict, List, Literal, Optional, Union
+
+from pydantic import BaseModel, validator
+from pydantic.schema import schema
+
+from ..models.main import ContinueBaseModel
+from .observation import Observation
+
+ChatMessageRole = Literal["assistant", "user", "system", "function"]
+
+
+class FunctionCall(ContinueBaseModel):
+ name: str
+ arguments: str
+
+
+class ChatMessage(ContinueBaseModel):
+ role: ChatMessageRole
+ content: Union[str, None] = None
+ name: Union[str, None] = None
+ # A summary for pruning chat context to fit context window. Often the Step name.
+ summary: str
+ function_call: Union[FunctionCall, None] = None
+
+ def to_dict(self, with_functions: bool) -> Dict:
+ d = self.dict()
+ del d["summary"]
+ if d["function_call"] is not None:
+ d["function_call"]["name"] = d["function_call"]["name"].replace(" ", "")
+
+ if d["content"] is None:
+ d["content"] = ""
+ for key, value in list(d.items()):
+ if value is None:
+ del d[key]
+
+ if not with_functions:
+ if d["role"] == "function":
+ d["role"] = "assistant"
+ if "name" in d:
+ del d["name"]
+ if "function_call" in d:
+ del d["function_call"]
+ return d
+
+
+def resolve_refs(schema_data):
+ def traverse(obj):
+ if isinstance(obj, dict):
+ if "$ref" in obj:
+ ref = obj["$ref"]
+ parts = ref.split("/")
+ ref_obj = schema_data
+ for part in parts[1:]:
+ ref_obj = ref_obj[part]
+ return traverse(ref_obj)
+ else:
+ for key, value in obj.items():
+ obj[key] = traverse(value)
+ elif isinstance(obj, list):
+ for i in range(len(obj)):
+ obj[i] = traverse(obj[i])
+ return obj
+
+ return traverse(schema_data)
+
+
+unincluded_parameters = [
+ "system_message",
+ "chat_context",
+ "manage_own_chat_context",
+ "hide",
+ "name",
+ "description",
+]
+
+
+def step_to_json_schema(step) -> str:
+ pydantic_class = step.__class__
+ schema_data = schema([pydantic_class])
+ resolved_schema = resolve_refs(schema_data)
+ parameters = resolved_schema["definitions"][pydantic_class.__name__]
+ for parameter in unincluded_parameters:
+ if parameter in parameters["properties"]:
+ del parameters["properties"][parameter]
+ return {
+ "name": step.name.replace(" ", ""),
+ "description": step.description or "",
+ "parameters": parameters,
+ }
+
+
+def step_to_fn_call_arguments(step: "Step") -> str:
+ args = step.dict()
+ for parameter in unincluded_parameters:
+ if parameter in args:
+ del args[parameter]
+ return json.dumps(args)
+
+
+class HistoryNode(ContinueBaseModel):
+ """A point in history, a list of which make up History"""
+
+ step: "Step"
+ observation: Union[Observation, None]
+ depth: int
+ deleted: bool = False
+ active: bool = True
+ logs: List[str] = []
+ context_used: List["ContextItem"] = []
+
+ def to_chat_messages(self) -> List[ChatMessage]:
+ if self.step.description is None or self.step.manage_own_chat_context:
+ return self.step.chat_context
+ return self.step.chat_context + [
+ ChatMessage(
+ role="assistant",
+ name=self.step.__class__.__name__,
+ content=self.step.description or f"Ran function {self.step.name}",
+ summary=f"Called function {self.step.name}",
+ )
+ ]
+
+
+class History(ContinueBaseModel):
+ """A history of steps taken and their results"""
+
+ timeline: List[HistoryNode]
+ current_index: int
+
+ def to_chat_history(self) -> List[ChatMessage]:
+ msgs = []
+ for node in self.timeline:
+ if not node.step.hide:
+ msgs += node.to_chat_messages()
+ return msgs
+
+ def add_node(self, node: HistoryNode) -> int:
+ """Add node and return the index where it was added"""
+ self.timeline.insert(self.current_index + 1, node)
+ self.current_index += 1
+ return self.current_index
+
+ def get_current(self) -> Union[HistoryNode, None]:
+ if self.current_index < 0:
+ return None
+ return self.timeline[self.current_index]
+
+ def get_last_at_depth(
+ self, depth: int, include_current: bool = False
+ ) -> Union[HistoryNode, None]:
+ i = self.current_index if include_current else self.current_index - 1
+ while i >= 0:
+ if (
+ self.timeline[i].depth == depth
+ and type(self.timeline[i].step).__name__ != "ManualEditStep"
+ ):
+ return self.timeline[i]
+ i -= 1
+ return None
+
+ def get_last_at_same_depth(self) -> Union[HistoryNode, None]:
+ return self.get_last_at_depth(self.get_current().depth)
+
+ def remove_current_and_substeps(self):
+ self.timeline.pop(self.current_index)
+ while self.get_current() is not None and self.get_current().depth > 0:
+ self.timeline.pop(self.current_index)
+
+ def take_next_step(self) -> Union["Step", None]:
+ if self.has_future():
+ self.current_index += 1
+ current_state = self.get_current()
+ if current_state is None:
+ return None
+ return current_state.step
+ return None
+
+ def get_current_index(self) -> int:
+ return self.current_index
+
+ def has_future(self) -> bool:
+ return self.current_index < len(self.timeline) - 1
+
+ def step_back(self):
+ self.current_index -= 1
+
+ def last_observation(self) -> Union[Observation, None]:
+ state = self.get_last_at_same_depth()
+ if state is None:
+ return None
+ return state.observation
+
+ def pop_step(self, index: int = None) -> Union[HistoryNode, None]:
+ index = index if index is not None else self.current_index
+ if index < 0 or self.current_index < 0:
+ return None
+
+ node = self.timeline.pop(index)
+
+ if index <= self.current_index:
+ self.current_index -= 1
+
+ return node.step
+
+ @classmethod
+ def from_empty(cls):
+ return cls(timeline=[], current_index=-1)
+
+
+class SlashCommandDescription(ContinueBaseModel):
+ name: str
+ description: str
+
+
+class ContextItemId(BaseModel):
+ """
+ A ContextItemId is a unique identifier for a ContextItem.
+ """
+
+ provider_title: str
+ item_id: str
+
+ @validator("provider_title", "item_id")
+ def must_be_valid_id(cls, v):
+ import re
+
+ if not re.match(r"^[0-9a-zA-Z_-]*$", v):
+ raise ValueError(
+ "Both provider_title and item_id can only include characters 0-9, a-z, A-Z, -, and _"
+ )
+ return v
+
+ def to_string(self) -> str:
+ return f"{self.provider_title}-{self.item_id}"
+
+ @staticmethod
+ def from_string(string: str) -> "ContextItemId":
+ provider_title, *rest = string.split("-")
+ item_id = "-".join(rest)
+ return ContextItemId(provider_title=provider_title, item_id=item_id)
+
+
+class ContextItemDescription(BaseModel):
+ """
+ A ContextItemDescription is a description of a ContextItem that is displayed to the user when they type '@'.
+
+ The id can be used to retrieve the ContextItem from the ContextManager.
+ """
+
+ name: str
+ description: str
+ id: ContextItemId
+
+
+class ContextItem(BaseModel):
+ """
+ A ContextItem is a single item that is stored in the ContextManager.
+ """
+
+ description: ContextItemDescription
+ content: str
+
+ @validator("content", pre=True)
+ def content_must_be_string(cls, v):
+ if v is None:
+ return ""
+ return v
+
+ editing: bool = False
+ editable: bool = False
+
+
+class SessionInfo(ContinueBaseModel):
+ session_id: str
+ title: str
+ date_created: str
+ workspace_directory: Optional[str] = None
+
+
+class ContinueConfig(ContinueBaseModel):
+ system_message: Optional[str]
+ temperature: Optional[float]
+
+ class Config:
+ extra = "allow"
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("policy", None)
+ return original_dict
+
+
+class ContextProviderDescription(BaseModel):
+ title: str
+ display_title: str
+ description: str
+ dynamic: bool
+ requires_query: bool
+
+
+class FullState(ContinueBaseModel):
+ """A full state of the program, including the history"""
+
+ history: History
+ active: bool
+ user_input_queue: List[str]
+ slash_commands: List[SlashCommandDescription]
+ adding_highlighted_code: bool
+ selected_context_items: List[ContextItem]
+ session_info: Optional[SessionInfo] = None
+ config: ContinueConfig
+ saved_context_groups: Dict[str, List[ContextItem]] = {}
+ context_providers: List[ContextProviderDescription] = []
+ meilisearch_url: Optional[str] = None
+
+
+class ContinueSDK:
+ ...
+
+
+class Models:
+ ...
+
+
+class Policy(ContinueBaseModel):
+ """A rule that determines which step to take next"""
+
+ # Note that history is mutable, kinda sus
+ def next(
+ self, config: ContinueConfig, history: History = History.from_empty()
+ ) -> "Step":
+ raise NotImplementedError
+
+
+class Step(ContinueBaseModel):
+ name: str = None
+ hide: bool = False
+ description: Union[str, None] = None
+
+ class_name: str = "Step"
+
+ @validator("class_name", pre=True, always=True)
+ def class_name_is_class_name(cls, class_name):
+ return cls.__name__
+
+ system_message: Union[str, None] = None
+ chat_context: List[ChatMessage] = []
+ manage_own_chat_context: bool = False
+
+ class Config:
+ copy_on_model_validation = False
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self.description is not None:
+ return self.description
+ return "Running step: " + self.name
+
+ def dict(self, *args, **kwargs):
+ d = super().dict(*args, **kwargs)
+ # Make sure description is always a string
+ d["description"] = self.description or ""
+ return d
+
+ @validator("name", pre=True, always=True)
+ def name_is_class_name(cls, name):
+ if name is None:
+ return cls.__name__
+ return name
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ raise NotImplementedError
+
+ async def __call__(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return await self.run(sdk)
+
+ def __rshift__(self, other: "Step"):
+ steps = []
+ if isinstance(self, SequentialStep):
+ steps = self.steps
+ else:
+ steps.append(self)
+ if isinstance(other, SequentialStep):
+ steps += other.steps
+ else:
+ steps.append(other)
+ return SequentialStep(steps=steps)
+
+
+class SequentialStep(Step):
+ steps: List[Step]
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ for step in self.steps:
+ observation = await sdk.run_step(step)
+ return observation
+
+
+class ValidatorObservation(Observation):
+ passed: bool
+ observation: Observation
+
+
+class Validator(Step):
+ def run(self, sdk: ContinueSDK) -> ValidatorObservation:
+ raise NotImplementedError
+
+
+class Context:
+ key_value: Dict[str, Any] = {}
+
+ def set(self, key: str, value: Any):
+ self.key_value[key] = value
+
+ def get(self, key: str) -> Any:
+ return self.key_value.get(key, None)
+
+
+class ContinueCustomException(Exception):
+ title: str
+ message: str
+ with_step: Union[Step, None]
+
+ def __init__(
+ self,
+ message: str,
+ title: str = "Error while running step:",
+ with_step: Union[Step, None] = None,
+ ):
+ self.message = message
+ self.title = title
+ self.with_step = with_step
+
+
+HistoryNode.update_forward_refs()
diff --git a/server/continuedev/core/models.py b/server/continuedev/core/models.py
new file mode 100644
index 00000000..21ebd8f6
--- /dev/null
+++ b/server/continuedev/core/models.py
@@ -0,0 +1,113 @@
+from typing import List, Optional
+
+from pydantic import BaseModel
+
+from ..libs.llm.anthropic import AnthropicLLM
+from ..libs.llm.base import LLM
+from ..libs.llm.ggml import GGML
+from ..libs.llm.google_palm_api import GooglePaLMAPI
+from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
+from ..libs.llm.hf_tgi import HuggingFaceTGI
+from ..libs.llm.llamacpp import LlamaCpp
+from ..libs.llm.ollama import Ollama
+from ..libs.llm.openai import OpenAI
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from ..libs.llm.replicate import ReplicateLLM
+from ..libs.llm.together import TogetherLLM
+
+
+class ContinueSDK(BaseModel):
+ pass
+
+
+ALL_MODEL_ROLES = [
+ "default",
+ "summarize",
+ "edit",
+ "chat",
+]
+
+MODEL_CLASSES = {
+ cls.__name__: cls
+ for cls in [
+ OpenAI,
+ OpenAIFreeTrial,
+ GGML,
+ TogetherLLM,
+ AnthropicLLM,
+ ReplicateLLM,
+ Ollama,
+ LlamaCpp,
+ HuggingFaceInferenceAPI,
+ HuggingFaceTGI,
+ GooglePaLMAPI,
+ ]
+}
+
+MODEL_MODULE_NAMES = {
+ "OpenAI": "openai",
+ "OpenAIFreeTrial": "openai_free_trial",
+ "GGML": "ggml",
+ "TogetherLLM": "together",
+ "AnthropicLLM": "anthropic",
+ "ReplicateLLM": "replicate",
+ "Ollama": "ollama",
+ "LlamaCpp": "llamacpp",
+ "HuggingFaceInferenceAPI": "hf_inference_api",
+ "HuggingFaceTGI": "hf_tgi",
+ "GooglePaLMAPI": "google_palm_api",
+}
+
+
+class Models(BaseModel):
+ """Main class that holds the current model configuration"""
+
+ default: LLM
+ summarize: Optional[LLM] = None
+ edit: Optional[LLM] = None
+ chat: Optional[LLM] = None
+
+ saved: List[LLM] = []
+
+ # TODO namespace these away to not confuse readers,
+ # or split Models into ModelsConfig, which gets turned into Models
+ sdk: ContinueSDK = None
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("sdk", None)
+ return original_dict
+
+ @property
+ def all_models(self):
+ models = [getattr(self, role) for role in ALL_MODEL_ROLES]
+ return [model for model in models if model is not None]
+
+ @property
+ def system_message(self) -> Optional[str]:
+ if self.sdk:
+ return self.sdk.config.system_message
+ return None
+
+ def set_system_message(self, msg: str):
+ for model in self.all_models:
+ if model.system_message is None:
+ model.system_message = msg
+
+ async def start(self, sdk: "ContinueSDK"):
+ """Start each of the LLMs, or fall back to default"""
+ self.sdk = sdk
+
+ for role in ALL_MODEL_ROLES:
+ model = getattr(self, role)
+ if model is None:
+ setattr(self, role, self.default)
+ else:
+ await sdk.start_model(model)
+
+ self.set_system_message(self.system_message)
+
+ async def stop(self, sdk: "ContinueSDK"):
+ """Stop each LLM (if it's not the default, which is shared)"""
+ for model in self.all_models:
+ await model.stop()
diff --git a/server/continuedev/core/observation.py b/server/continuedev/core/observation.py
new file mode 100644
index 00000000..8a5e454e
--- /dev/null
+++ b/server/continuedev/core/observation.py
@@ -0,0 +1,41 @@
+from pydantic import BaseModel, validator
+
+from ..models.main import Traceback
+
+
+class Observation(BaseModel):
+ pass
+
+
+class TracebackObservation(Observation):
+ traceback: Traceback
+
+
+class ValidatorObservation(Observation):
+ passed: bool
+
+
+class UserInputObservation(Observation):
+ user_input: str
+
+
+class DictObservation(Observation):
+ values: dict
+
+ def __getitem__(self, key):
+ return self.values[key]
+
+
+class TextObservation(Observation):
+ text: str
+
+ @validator("text", pre=True, always=True)
+ def text_not_none(cls, v):
+ if v is None:
+ return ""
+ return v
+
+
+class InternalErrorObservation(Observation):
+ title: str
+ error: str
diff --git a/server/continuedev/core/sdk.py b/server/continuedev/core/sdk.py
new file mode 100644
index 00000000..408168f6
--- /dev/null
+++ b/server/continuedev/core/sdk.py
@@ -0,0 +1,309 @@
+import os
+import traceback
+from typing import Coroutine, List, Optional, Union
+
+from ..libs.llm.base import LLM
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.logging import logger
+from ..libs.util.paths import (
+ convertConfigImports,
+ getConfigFilePath,
+ getDiffsFolderPath,
+)
+from ..libs.util.telemetry import posthog_logger
+from ..models.filesystem import RangeInFile
+from ..models.filesystem_edit import (
+ AddDirectory,
+ AddFile,
+ DeleteDirectory,
+ DeleteFile,
+ FileEdit,
+ FileSystemEdit,
+)
+from ..models.main import Range
+from ..server.ide_protocol import AbstractIdeProtocolServer
+from .abstract_sdk import AbstractContinueSDK
+from .config import ContinueConfig
+from .lsp import ContinueLSPClient
+from .main import (
+ ChatMessage,
+ Context,
+ ContinueCustomException,
+ History,
+ HistoryNode,
+ Step,
+)
+from .models import Models
+from .observation import Observation
+from .steps import (
+ DefaultModelEditCodeStep,
+ FileSystemEditStep,
+ MessageStep,
+ RangeInFileWithContents,
+ ShellCommandsStep,
+ WaitForUserConfirmationStep,
+)
+
+
+class Autopilot:
+ pass
+
+
+class ContinueSDK(AbstractContinueSDK):
+ """The SDK provided as parameters to a step"""
+
+ ide: AbstractIdeProtocolServer
+ models: Models
+ lsp: Optional[ContinueLSPClient] = None
+ context: Context
+ config: ContinueConfig
+ __autopilot: Autopilot
+
+ def __init__(self, autopilot: Autopilot):
+ self.ide = autopilot.ide
+ self.__autopilot = autopilot
+ self.context = autopilot.context
+
+ async def load(self, config: Optional[ContinueConfig] = None):
+ # Create necessary directories
+ getDiffsFolderPath()
+
+ try:
+ self.config = config or self._load_config_dot_py()
+ except Exception as e:
+ logger.error(f"Failed to load config.py: {traceback.format_exception(e)}")
+
+ self.config = (
+ ContinueConfig()
+ if self._last_valid_config is None
+ else self._last_valid_config
+ )
+
+ formatted_err = "\n".join(traceback.format_exception(e))
+ msg_step = MessageStep(
+ name="Invalid Continue Config File", message=formatted_err
+ )
+ msg_step.description = f"Falling back to default config settings due to the following error in `~/.continue/config.py`.\n```\n{formatted_err}\n```\n\nIt's possible this was caused by an update to the Continue config format. If you'd like to see the new recommended default `config.py`, check [here](https://github.com/continuedev/continue/blob/main/continuedev/src/continuedev/libs/constants/default_config.py).\n\nIf the error is related to OpenAIServerInfo, see the updated way of using these parameters [here](https://continue.dev/docs/customization#azure-openai-service)."
+ self.history.add_node(
+ HistoryNode(step=msg_step, observation=None, depth=0, active=False)
+ )
+ await self.ide.setFileOpen(getConfigFilePath())
+
+ # Start models
+ self.models = self.config.models
+ await self.models.start(self)
+
+ # Start LSP
+ # async def start_lsp():
+ # try:
+ # sdk.lsp = ContinueLSPClient(
+ # workspace_dir=sdk.ide.workspace_directory,
+ # )
+ # await sdk.lsp.start()
+ # except Exception as e:
+ # logger.warning(f"Failed to start LSP client: {e}", exc_info=False)
+ # sdk.lsp = None
+
+ # create_async_task(
+ # start_lsp(), on_error=lambda e: logger.error("Failed to setup LSP: %s", e)
+ # )
+
+ # When the config is loaded, setup posthog logger
+ posthog_logger.setup(
+ self.ide.unique_id, self.config.allow_anonymous_telemetry, self.ide.ide_info
+ )
+ dev_data_logger.setup(self.config.user_token, self.config.data_server_url)
+
+ @classmethod
+ async def create(
+ cls, autopilot: Autopilot, config: Optional[ContinueConfig] = None
+ ) -> "ContinueSDK":
+ sdk = ContinueSDK(autopilot)
+ autopilot.continue_sdk = sdk
+
+ await sdk.load(config=config)
+
+ return sdk
+
+ @property
+ def history(self) -> History:
+ return self.__autopilot.history
+
+ def write_log(self, message: str):
+ self.history.timeline[self.history.current_index].logs.append(message)
+
+ async def start_model(self, llm: LLM):
+ await llm.start(unique_id=self.ide.unique_id, write_log=self.write_log)
+
+ async def _ensure_absolute_path(self, path: str) -> str:
+ if os.path.isabs(path):
+ return path
+
+ # Else if in workspace
+ workspace_path = os.path.join(self.ide.workspace_directory, path)
+ if os.path.exists(workspace_path):
+ return workspace_path
+ else:
+ # Check if it matches any of the open files, then use that absolute path
+ open_files = await self.ide.getOpenFiles()
+ for open_file in open_files:
+ if os.path.basename(open_file) == os.path.basename(path):
+ return open_file
+ raise Exception(f"Path {path} does not exist")
+
+ async def run_step(self, step: Step) -> Coroutine[Observation, None, None]:
+ return await self.__autopilot._run_singular_step(step)
+
+ async def apply_filesystem_edit(
+ self, edit: FileSystemEdit, name: str = None, description: str = None
+ ):
+ return await self.run_step(
+ FileSystemEditStep(
+ edit=edit, description=description, **({"name": name} if name else {})
+ )
+ )
+
+ async def wait_for_user_input(self) -> str:
+ return await self.__autopilot.wait_for_user_input()
+
+ async def wait_for_user_confirmation(self, prompt: str):
+ return await self.run_step(WaitForUserConfirmationStep(prompt=prompt))
+
+ async def run(
+ self,
+ commands: Union[List[str], str],
+ cwd: str = None,
+ name: str = None,
+ description: str = None,
+ handle_error: bool = True,
+ ) -> Coroutine[str, None, None]:
+ commands = commands if isinstance(commands, List) else [commands]
+ return (
+ await self.run_step(
+ ShellCommandsStep(
+ cmds=commands,
+ cwd=cwd,
+ description=description,
+ handle_error=handle_error,
+ **({"name": name} if name else {}),
+ )
+ )
+ ).text
+
+ async def edit_file(
+ self,
+ filename: str,
+ prompt: str,
+ name: str = None,
+ description: str = "",
+ range: Range = None,
+ ):
+ filepath = await self._ensure_absolute_path(filename)
+
+ await self.ide.setFileOpen(filepath)
+ contents = await self.ide.readFile(filepath)
+ await self.run_step(
+ DefaultModelEditCodeStep(
+ range_in_files=[
+ RangeInFile(filepath=filepath, range=range)
+ if range is not None
+ else RangeInFile.from_entire_file(filepath, contents)
+ ],
+ user_input=prompt,
+ description=description,
+ **({"name": name} if name else {}),
+ )
+ )
+
+ async def append_to_file(self, filename: str, content: str):
+ filepath = await self._ensure_absolute_path(filename)
+ previous_content = await self.ide.readFile(filepath)
+ file_edit = FileEdit.from_append(filepath, previous_content, content)
+ await self.ide.applyFileSystemEdit(file_edit)
+
+ async def add_file(self, filename: str, content: Union[str, None]):
+ filepath = await self._ensure_absolute_path(filename)
+ dir_name = os.path.dirname(filepath)
+ os.makedirs(dir_name, exist_ok=True)
+ return await self.run_step(
+ FileSystemEditStep(edit=AddFile(filepath=filepath, content=content))
+ )
+
+ async def delete_file(self, filename: str):
+ filename = await self._ensure_absolute_path(filename)
+ return await self.run_step(
+ FileSystemEditStep(edit=DeleteFile(filepath=filename))
+ )
+
+ async def add_directory(self, path: str):
+ path = await self._ensure_absolute_path(path)
+ return await self.run_step(FileSystemEditStep(edit=AddDirectory(path=path)))
+
+ async def delete_directory(self, path: str):
+ path = await self._ensure_absolute_path(path)
+ return await self.run_step(FileSystemEditStep(edit=DeleteDirectory(path=path)))
+
+ _last_valid_config: ContinueConfig = None
+
+ def _load_config_dot_py(self, retry: bool = True) -> ContinueConfig:
+ try:
+ path = getConfigFilePath()
+ config = ContinueConfig.from_filepath(path)
+ self._last_valid_config = config
+
+ logger.debug("Loaded Continue config file from %s", path)
+
+ return config
+ except ModuleNotFoundError as e:
+ if not retry:
+ raise e
+ # Check if the module was "continuedev.src"
+ if e.name == "continuedev.src":
+ convertConfigImports(shorten=True)
+ return self._load_config_dot_py(retry=False)
+ else:
+ raise e
+
+ def get_code_context(
+ self, only_editing: bool = False
+ ) -> List[RangeInFileWithContents]:
+ highlighted_ranges = self.__autopilot.context_manager.context_providers[
+ "code"
+ ].highlighted_ranges
+ context = (
+ list(filter(lambda x: x.item.editing, highlighted_ranges))
+ if only_editing
+ else highlighted_ranges
+ )
+ return [c.rif for c in context]
+
+ def set_loading_message(self, message: str):
+ # self.__autopilot.set_loading_message(message)
+ raise NotImplementedError()
+
+ def raise_exception(
+ self, message: str, title: str, with_step: Union[Step, None] = None
+ ):
+ raise ContinueCustomException(message, title, with_step)
+
+ async def get_chat_context(self) -> List[ChatMessage]:
+ history_context = self.history.to_chat_history()
+
+ context_messages: List[
+ ChatMessage
+ ] = await self.__autopilot.context_manager.get_chat_messages()
+
+ # Insert at the end, but don't insert after latest user message or function call
+ for msg in context_messages:
+ history_context.insert(-1, msg)
+
+ return history_context
+
+ async def update_ui(self):
+ await self.__autopilot.update_subscribers()
+
+ async def clear_history(self):
+ await self.__autopilot.clear_history()
+
+ def current_step_was_deleted(self):
+ return self.history.timeline[self.history.current_index].deleted
diff --git a/server/continuedev/core/steps.py b/server/continuedev/core/steps.py
new file mode 100644
index 00000000..5c20dd15
--- /dev/null
+++ b/server/continuedev/core/steps.py
@@ -0,0 +1,963 @@
+# These steps are depended upon by ContinueSDK
+import difflib
+import subprocess
+from textwrap import dedent
+from typing import Coroutine, List, Optional, Union
+
+from ..libs.llm.base import LLM
+from ..libs.llm.openai_free_trial import OpenAIFreeTrial
+from ..libs.util.count_tokens import DEFAULT_MAX_TOKENS
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.strings import (
+ dedent_and_get_common_whitespace,
+ remove_quotes_and_escapes,
+)
+from ..libs.util.telemetry import posthog_logger
+from ..libs.util.templating import render_prompt_template
+from ..models.filesystem import FileSystem, RangeInFile, RangeInFileWithContents
+from ..models.filesystem_edit import (
+ EditDiff,
+ FileEdit,
+ FileEditWithFullContents,
+ FileSystemEdit,
+)
+
+# from ....libs.llm.replicate import ReplicateLLM
+from ..models.main import Range
+from .main import ChatMessage, ContinueCustomException, Step
+from .observation import Observation, TextObservation, UserInputObservation
+
+
+class ContinueSDK:
+ pass
+
+
+class Models:
+ pass
+
+
+class ReversibleStep(Step):
+ async def reverse(self, sdk: ContinueSDK):
+ raise NotImplementedError
+
+
+class MessageStep(Step):
+ name: str = "Message"
+ message: str
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.message
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return TextObservation(text=self.message)
+
+
+class DisplayErrorStep(Step):
+ name: str = "Error in the Continue server"
+
+ title: str = "Error in the Continue server"
+ message: str = "There was an error in the Continue server."
+
+ @staticmethod
+ def from_exception(e: Exception) -> "DisplayErrorStep":
+ if isinstance(e, ContinueCustomException):
+ return DisplayErrorStep(title=e.title, message=e.message, name=e.title)
+
+ return DisplayErrorStep(message=str(e))
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.message
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ raise ContinueCustomException(message=self.message, title=self.title)
+
+
+class FileSystemEditStep(ReversibleStep):
+ edit: FileSystemEdit
+ _diff: Union[EditDiff, None] = None
+
+ hide: bool = True
+
+ async def run(self, sdk: "ContinueSDK") -> Coroutine[Observation, None, None]:
+ self._diff = await sdk.ide.applyFileSystemEdit(self.edit)
+ return None
+
+ async def reverse(self, sdk: "ContinueSDK"):
+ await sdk.ide.applyFileSystemEdit(self._diff.backward)
+ # Where and when should file saves happen?
+
+
+def output_contains_error(output: str) -> bool:
+ return "Traceback" in output or "SyntaxError" in output
+
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class ShellCommandsStep(Step):
+ cmds: List[str]
+ cwd: Union[str, None] = None
+ name: str = "Run Shell Commands"
+ handle_error: bool = True
+
+ _err_text: Union[str, None] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self._err_text is not None:
+ return f"Error when running shell commands:\n```\n{self._err_text}\n```"
+
+ cmds_str = "\n".join(self.cmds)
+ return await models.summarize.complete(
+ f"{cmds_str}\n\nSummarize what was done in these shell commands, using markdown bullet points:"
+ )
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ process = subprocess.Popen(
+ "/bin/bash",
+ stdin=subprocess.PIPE,
+ stdout=subprocess.PIPE,
+ cwd=self.cwd or sdk.ide.workspace_directory,
+ )
+
+ stdin_input = "\n".join(self.cmds)
+ out, err = process.communicate(stdin_input.encode())
+
+ # If it fails, return the error
+ if err is not None and err != "":
+ self._err_text = err
+ return TextObservation(text=err)
+
+ return None
+
+
+class DefaultModelEditCodeStep(Step):
+ user_input: str
+ model: Optional[LLM] = None
+ range_in_files: List[RangeInFile]
+ name: str = "Editing Code"
+ hide = False
+ description: str = ""
+ _prompt: str = dedent(
+ """\
+ Take the file prefix and suffix into account, but only rewrite the code_to_edit as specified in the user_request. The code you write in modified_code_to_edit will replace the code between the code_to_edit tags. Do NOT preface your answer or write anything other than code. The </modified_code_to_edit> tag should be written to indicate the end of the modified code section. Do not ever use nested tags.
+
+ Example:
+
+ <file_prefix>
+ class Database:
+ def __init__(self):
+ self._data = {{}}
+
+ def get(self, key):
+ return self._data[key]
+
+ </file_prefix>
+ <code_to_edit>
+ def set(self, key, value):
+ self._data[key] = value
+ </code_to_edit>
+ <file_suffix>
+
+ def clear_all():
+ self._data = {{}}
+ </file_suffix>
+ <user_request>
+ Raise an error if the key already exists.
+ </user_request>
+ <modified_code_to_edit>
+ def set(self, key, value):
+ if key in self._data:
+ raise KeyError(f"Key {{key}} already exists")
+ self._data[key] = value
+ </modified_code_to_edit>
+
+ Main task:
+ """
+ )
+ _previous_contents: str = ""
+ _new_contents: str = ""
+ _prompt_and_completion: str = ""
+
+ summary_prompt: str = "Please briefly explain the changes made to the code above. Give no more than 2-3 sentences, and use markdown bullet points:"
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ name = await models.summarize.complete(
+ f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:"
+ )
+ self.name = remove_quotes_and_escapes(name)
+
+ if self._previous_contents.strip() == self._new_contents.strip():
+ return "No edits were made"
+ else:
+ return None
+
+ async def get_prompt_parts(
+ self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str
+ ):
+ # We don't know here all of the functions being passed in.
+ # We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
+ # Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
+ if self.model is not None:
+ await sdk.start_model(self.model)
+
+ model_to_use = self.model or sdk.models.edit
+ max_tokens = int(model_to_use.context_length / 2)
+
+ TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200
+ if (
+ model_to_use.count_tokens(rif.contents)
+ > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE
+ ):
+ self.description += "\n\n**It looks like you've selected a large range to edit, which may take a while to complete. If you'd like to cancel, click the 'X' button above. If you highlight a more specific range, Continue will only edit within it.**"
+
+ # At this point, we also increase the max_tokens parameter so it doesn't stop in the middle of generation
+ # Increase max_tokens to be double the size of the range
+ # But don't exceed twice default max tokens
+ max_tokens = int(
+ min(model_to_use.count_tokens(rif.contents), DEFAULT_MAX_TOKENS) * 2.5
+ )
+
+ BUFFER_FOR_FUNCTIONS = 400
+ total_tokens = (
+ model_to_use.count_tokens(
+ full_file_contents + self._prompt + self.user_input
+ )
+ + BUFFER_FOR_FUNCTIONS
+ + max_tokens
+ )
+
+ # If using 3.5 and overflows, upgrade to 3.5.16k
+ if model_to_use.model == "gpt-3.5-turbo":
+ if total_tokens > model_to_use.context_length:
+ model_to_use = OpenAIFreeTrial(model="gpt-3.5-turbo-0613")
+ await sdk.start_model(model_to_use)
+
+ # Remove tokens from the end first, and then the start to clear space
+ # This part finds the start and end lines
+ full_file_contents_lst = full_file_contents.split("\n")
+ max_start_line = rif.range.start.line
+ min_end_line = rif.range.end.line
+ cur_start_line = 0
+ cur_end_line = len(full_file_contents_lst) - 1
+
+ if total_tokens > model_to_use.context_length:
+ while cur_end_line > min_end_line:
+ total_tokens -= model_to_use.count_tokens(
+ full_file_contents_lst[cur_end_line]
+ )
+ cur_end_line -= 1
+ if total_tokens < model_to_use.context_length:
+ break
+
+ if total_tokens > model_to_use.context_length:
+ while cur_start_line < max_start_line:
+ cur_start_line += 1
+ total_tokens -= model_to_use.count_tokens(
+ full_file_contents_lst[cur_start_line]
+ )
+ if total_tokens < model_to_use.context_length:
+ break
+
+ # Now use the found start/end lines to get the prefix and suffix strings
+ file_prefix = "\n".join(full_file_contents_lst[cur_start_line:max_start_line])
+ file_suffix = "\n".join(full_file_contents_lst[min_end_line : cur_end_line - 1])
+
+ # Move any surrounding blank line in rif.contents to the prefix/suffix
+ # TODO: Keep track of start line of the range, because it's needed below for offset stuff
+ if len(rif.contents) > 0:
+ lines = rif.contents.splitlines(keepends=True)
+ first_line = lines[0] if lines else None
+ while first_line and first_line.strip() == "":
+ file_prefix += first_line
+ rif.contents = rif.contents[len(first_line) :]
+ lines = rif.contents.splitlines(keepends=True)
+ first_line = lines[0] if lines else None
+
+ last_line = lines[-1] if lines else None
+ while last_line and last_line.strip() == "":
+ file_suffix = last_line + file_suffix
+ rif.contents = rif.contents[: len(rif.contents) - len(last_line)]
+ lines = rif.contents.splitlines(keepends=True)
+ last_line = lines[-1] if lines else None
+
+ while rif.contents.startswith("\n"):
+ file_prefix += "\n"
+ rif.contents = rif.contents[1:]
+ while rif.contents.endswith("\n"):
+ file_suffix = "\n" + file_suffix
+ rif.contents = rif.contents[:-1]
+
+ return file_prefix, rif.contents, file_suffix, model_to_use, max_tokens
+
+ def compile_prompt(
+ self, file_prefix: str, contents: str, file_suffix: str, sdk: ContinueSDK
+ ) -> str:
+ if contents.strip() == "":
+ # Separate prompt for insertion at the cursor, the other tends to cause it to repeat whole file
+ prompt = dedent(
+ f"""\
+<file_prefix>
+{file_prefix}
+</file_prefix>
+<insertion_code_here>
+<file_suffix>
+{file_suffix}
+</file_suffix>
+<user_request>
+{self.user_input}
+</user_request>
+
+Please output the code to be inserted at the cursor in order to fulfill the user_request. Do NOT preface your answer or write anything other than code. You should not write any tags, just the code. Make sure to correctly indent the code:"""
+ )
+ return prompt
+
+ prompt = self._prompt
+ if file_prefix.strip() != "":
+ prompt += dedent(
+ f"""
+<file_prefix>
+{file_prefix}
+</file_prefix>"""
+ )
+ prompt += dedent(
+ f"""
+<code_to_edit>
+{contents}
+</code_to_edit>"""
+ )
+ if file_suffix.strip() != "":
+ prompt += dedent(
+ f"""
+<file_suffix>
+{file_suffix}
+</file_suffix>"""
+ )
+ prompt += dedent(
+ f"""
+<user_request>
+{self.user_input}
+</user_request>
+<modified_code_to_edit>
+"""
+ )
+
+ return prompt
+
+ def is_end_line(self, line: str) -> bool:
+ return "</modified_code_to_edit>" in line or "</code_to_edit>" in line
+
+ def line_to_be_ignored(self, line: str, is_first_line: bool = False) -> bool:
+ return (
+ "```" in line
+ or "<modified_code_to_edit>" in line
+ or "<file_prefix>" in line
+ or "</file_prefix>" in line
+ or "<file_suffix>" in line
+ or "</file_suffix>" in line
+ or "<user_request>" in line
+ or "</user_request>" in line
+ or "<code_to_edit>" in line
+ )
+
+ async def stream_rif(self, rif: RangeInFileWithContents, sdk: ContinueSDK):
+ await sdk.ide.saveFile(rif.filepath)
+ full_file_contents = await sdk.ide.readFile(rif.filepath)
+
+ (
+ file_prefix,
+ contents,
+ file_suffix,
+ model_to_use,
+ max_tokens,
+ ) = await self.get_prompt_parts(rif, sdk, full_file_contents)
+ contents, common_whitespace = dedent_and_get_common_whitespace(contents)
+ prompt = self.compile_prompt(file_prefix, contents, file_suffix, sdk)
+ full_file_contents_lines = full_file_contents.split("\n")
+
+ lines_to_display = []
+
+ async def sendDiffUpdate(
+ lines: List[str], sdk: ContinueSDK, final: bool = False
+ ):
+ nonlocal full_file_contents_lines, rif, lines_to_display
+
+ completion = "\n".join(lines)
+
+ full_prefix_lines = full_file_contents_lines[: rif.range.start.line]
+ full_suffix_lines = full_file_contents_lines[rif.range.end.line :]
+
+ # Don't do this at the very end, just show the inserted code
+ if final:
+ lines_to_display = []
+ # Only recalculate at every new-line, because this is sort of expensive
+ elif completion.endswith("\n"):
+ contents_lines = rif.contents.split("\n")
+ rewritten_lines = 0
+ for line in lines:
+ for i in range(rewritten_lines, len(contents_lines)):
+ if (
+ difflib.SequenceMatcher(
+ None, line, contents_lines[i]
+ ).ratio()
+ > 0.7
+ and contents_lines[i].strip() != ""
+ ):
+ rewritten_lines = i + 1
+ break
+ lines_to_display = contents_lines[rewritten_lines:]
+
+ new_file_contents = (
+ "\n".join(full_prefix_lines)
+ + "\n"
+ + completion
+ + "\n"
+ + (
+ "\n".join(lines_to_display) + "\n"
+ if len(lines_to_display) > 0
+ else ""
+ )
+ + "\n".join(full_suffix_lines)
+ )
+
+ step_index = sdk.history.current_index
+
+ await sdk.ide.showDiff(rif.filepath, new_file_contents, step_index)
+
+ # Important state variables
+ # -------------------------
+ original_lines = [] if rif.contents == "" else rif.contents.split("\n")
+ # In the actual file, taking into account block offset
+ current_line_in_file = rif.range.start.line
+ current_block_lines = []
+ original_lines_below_previous_blocks = original_lines
+ # The start of the current block in file, taking into account block offset
+ current_block_start = -1
+ offset_from_blocks = 0
+
+ # Don't end the block until you've matched N simultaneous lines
+ # This helps avoid many tiny blocks
+ LINES_TO_MATCH_BEFORE_ENDING_BLOCK = 2
+ # If a line has been matched at the end of the block, this is its index within original_lines_below_previous_blocks
+ # Except we are keeping track of multiple potentialities, so it's a list
+ # We always check the lines following each of these leads, but if multiple make it out at the end, we use the first one
+ # This is a tuple of (index_of_last_matched_line, number_of_lines_matched)
+ indices_of_last_matched_lines = []
+
+ async def handle_generated_line(line: str):
+ nonlocal current_block_start, current_line_in_file, original_lines, original_lines_below_previous_blocks, current_block_lines, indices_of_last_matched_lines, LINES_TO_MATCH_BEFORE_ENDING_BLOCK, offset_from_blocks
+
+ # Highlight the line to show progress
+ line_to_highlight = current_line_in_file - len(current_block_lines)
+ if False:
+ await sdk.ide.highlightCode(
+ RangeInFile(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(
+ line_to_highlight, 0, line_to_highlight, 0
+ ),
+ ),
+ "#FFFFFF22" if len(current_block_lines) == 0 else "#00FF0022",
+ )
+
+ if len(current_block_lines) == 0:
+ # Set this as the start of the next block
+ current_block_start = (
+ rif.range.start.line
+ + len(original_lines)
+ - len(original_lines_below_previous_blocks)
+ + offset_from_blocks
+ )
+ if (
+ len(original_lines_below_previous_blocks) > 0
+ and line == original_lines_below_previous_blocks[0]
+ ):
+ # Line is equal to the next line in file, move past this line
+ original_lines_below_previous_blocks = (
+ original_lines_below_previous_blocks[1:]
+ )
+ return
+
+ # In a block, and have already matched at least one line
+ # Check if the next line matches, for each of the candidates
+ matches_found = []
+ first_valid_match = None
+ for (
+ index_of_last_matched_line,
+ num_lines_matched,
+ ) in indices_of_last_matched_lines:
+ if (
+ index_of_last_matched_line + 1
+ < len(original_lines_below_previous_blocks)
+ and line
+ == original_lines_below_previous_blocks[
+ index_of_last_matched_line + 1
+ ]
+ ):
+ matches_found.append(
+ (index_of_last_matched_line + 1, num_lines_matched + 1)
+ )
+ if (
+ first_valid_match is None
+ and num_lines_matched + 1 >= LINES_TO_MATCH_BEFORE_ENDING_BLOCK
+ ):
+ first_valid_match = (
+ index_of_last_matched_line + 1,
+ num_lines_matched + 1,
+ )
+ indices_of_last_matched_lines = matches_found
+
+ if first_valid_match is not None:
+ # We've matched the required number of lines, insert suggestion!
+
+ # We added some lines to the block that were matched (including maybe some blank lines)
+ # So here we will strip all matching lines from the end of current_block_lines
+ lines_stripped = []
+ index_of_last_line_in_block = first_valid_match[0]
+ while (
+ len(current_block_lines) > 0
+ and current_block_lines[-1]
+ == original_lines_below_previous_blocks[
+ index_of_last_line_in_block - 1
+ ]
+ ):
+ lines_stripped.append(current_block_lines.pop())
+ index_of_last_line_in_block -= 1
+
+ # It's also possible that some lines match at the beginning of the block
+ # lines_stripped_at_beginning = []
+ # j = 0
+ # while len(current_block_lines) > 0 and current_block_lines[0] == original_lines_below_previous_blocks[first_valid_match[0] - first_valid_match[1] + j]:
+ # lines_stripped_at_beginning.append(
+ # current_block_lines.pop(0))
+ # j += 1
+ # # current_block_start += 1
+
+ # Insert the suggestion
+ replacement = "\n".join(current_block_lines)
+ start_line = current_block_start
+ end_line = current_block_start + index_of_last_line_in_block
+
+ if False:
+ await sdk.ide.showSuggestion(
+ FileEdit(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(start_line, 0, end_line, 0),
+ replacement=replacement,
+ )
+ )
+
+ # Reset current block / update variables
+ current_line_in_file += 1
+ offset_from_blocks += len(current_block_lines)
+ original_lines_below_previous_blocks = (
+ original_lines_below_previous_blocks[
+ index_of_last_line_in_block + 1 :
+ ]
+ )
+ current_block_lines = []
+ current_block_start = -1
+ indices_of_last_matched_lines = []
+
+ return
+
+ # Always look for new matching candidates
+ new_matches = []
+ for i in range(len(original_lines_below_previous_blocks)):
+ og_line = original_lines_below_previous_blocks[i]
+ # TODO: It's a bit sus to be disqualifying empty lines.
+ # What you ideally do is find ALL matches, and then throw them out as you check the following lines
+ if og_line == line: # and og_line.strip() != "":
+ new_matches.append((i, 1))
+ indices_of_last_matched_lines += new_matches
+
+ # Make sure they are sorted by index
+ indices_of_last_matched_lines = sorted(
+ indices_of_last_matched_lines, key=lambda x: x[0]
+ )
+
+ current_block_lines.append(line)
+
+ messages = await sdk.get_chat_context()
+ # Delete the last user and assistant messages
+ i = len(messages) - 1
+ deleted = 0
+ while i >= 0 and deleted < 2:
+ if messages[i].role == "user" or messages[i].role == "assistant":
+ messages.pop(i)
+ deleted += 1
+ i -= 1
+ messages.append(
+ ChatMessage(role="user", content=prompt, summary=self.user_input)
+ )
+
+ lines_of_prefix_copied = 0
+ lines = []
+ unfinished_line = ""
+ completion_lines_covered = 0
+ repeating_file_suffix = False
+ line_below_highlighted_range = file_suffix.lstrip().split("\n")[0]
+
+ # Use custom templates defined by the model
+ if template := model_to_use.prompt_templates.get("edit"):
+ rendered = render_prompt_template(
+ template,
+ messages[:-1],
+ {
+ "code_to_edit": rif.contents,
+ "user_input": self.user_input,
+ "file_prefix": file_prefix,
+ "file_suffix": file_suffix,
+ },
+ )
+ if isinstance(rendered, str):
+ messages = [
+ ChatMessage(
+ role="user",
+ content=rendered,
+ summary=self.user_input,
+ )
+ ]
+ else:
+ messages = rendered
+
+ generator = model_to_use.stream_complete(
+ rendered,
+ temperature=sdk.config.temperature,
+ max_tokens=min(max_tokens, model_to_use.context_length // 2),
+ )
+
+ else:
+
+ async def gen():
+ async for chunk in model_to_use.stream_chat(
+ messages,
+ temperature=sdk.config.temperature,
+ max_tokens=min(max_tokens, model_to_use.context_length // 2),
+ ):
+ if "content" in chunk:
+ yield chunk["content"]
+
+ generator = gen()
+
+ posthog_logger.capture_event(
+ "model_use",
+ {"model": model_to_use.model, "provider": model_to_use.__class__.__name__},
+ )
+ dev_data_logger.capture(
+ "model_use",
+ {"model": model_to_use.model, "provider": model_to_use.__class__.__name__},
+ )
+
+ try:
+ async for chunk in generator:
+ # Stop early if it is repeating the file_suffix or the step was deleted
+ if repeating_file_suffix:
+ break
+ if sdk.current_step_was_deleted():
+ return
+
+ # Accumulate lines
+ chunk_lines = chunk.split("\n")
+ chunk_lines[0] = unfinished_line + chunk_lines[0]
+ if chunk.endswith("\n"):
+ unfinished_line = ""
+ chunk_lines.pop() # because this will be an empty string
+ else:
+ unfinished_line = chunk_lines.pop()
+
+ # Deal with newly accumulated lines
+ for i in range(len(chunk_lines)):
+ # Trailing whitespace doesn't matter
+ chunk_lines[i] = chunk_lines[i].rstrip()
+ chunk_lines[i] = common_whitespace + chunk_lines[i]
+
+ # Lines that should signify the end of generation
+ if self.is_end_line(chunk_lines[i]):
+ break
+ # Lines that should be ignored, like the <> tags
+ elif self.line_to_be_ignored(
+ chunk_lines[i], completion_lines_covered == 0
+ ):
+ continue # noice
+ # Check if we are currently just copying the prefix
+ elif (
+ (lines_of_prefix_copied > 0 or completion_lines_covered == 0)
+ and lines_of_prefix_copied < len(file_prefix.splitlines())
+ and chunk_lines[i]
+ == full_file_contents_lines[lines_of_prefix_copied]
+ ):
+ # This is a sketchy way of stopping it from repeating the file_prefix. Is a bug if output happens to have a matching line
+ lines_of_prefix_copied += 1
+ continue # also nice
+ # Because really short lines might be expected to be repeated, this is only a !heuristic!
+ # Stop when it starts copying the file_suffix
+ elif (
+ chunk_lines[i].strip() == line_below_highlighted_range.strip()
+ and len(chunk_lines[i].strip()) > 4
+ and not (
+ len(original_lines_below_previous_blocks) > 0
+ and chunk_lines[i].strip()
+ == original_lines_below_previous_blocks[0].strip()
+ )
+ ):
+ repeating_file_suffix = True
+ break
+
+ # If none of the above, insert the line!
+ if False:
+ await handle_generated_line(chunk_lines[i])
+
+ lines.append(chunk_lines[i])
+ completion_lines_covered += 1
+ current_line_in_file += 1
+
+ await sendDiffUpdate(
+ lines
+ + [
+ common_whitespace
+ if unfinished_line.startswith("<")
+ else (common_whitespace + unfinished_line)
+ ],
+ sdk,
+ )
+ finally:
+ await generator.aclose()
+ # Add the unfinished line
+ if (
+ unfinished_line != ""
+ and not self.line_to_be_ignored(
+ unfinished_line, completion_lines_covered == 0
+ )
+ and not self.is_end_line(unfinished_line)
+ ):
+ unfinished_line = common_whitespace + unfinished_line
+ lines.append(unfinished_line)
+ await handle_generated_line(unfinished_line)
+ completion_lines_covered += 1
+ current_line_in_file += 1
+
+ await sendDiffUpdate(lines, sdk, final=True)
+
+ if False:
+ # If the current block isn't empty, add that suggestion
+ if len(current_block_lines) > 0:
+ # We have a chance to back-track here for blank lines that are repeats of the end of the original
+ # Don't want to have the same ending in both the original and the generated, can just leave it there
+ num_to_remove = 0
+ for i in range(-1, -len(current_block_lines) - 1, -1):
+ if len(original_lines_below_previous_blocks) == 0:
+ break
+ if (
+ current_block_lines[i]
+ == original_lines_below_previous_blocks[-1]
+ ):
+ num_to_remove += 1
+ original_lines_below_previous_blocks.pop()
+ else:
+ break
+ current_block_lines = (
+ current_block_lines[:-num_to_remove]
+ if num_to_remove > 0
+ else current_block_lines
+ )
+
+ # It's also possible that some lines match at the beginning of the block
+ # while len(current_block_lines) > 0 and len(original_lines_below_previous_blocks) > 0 and current_block_lines[0] == original_lines_below_previous_blocks[0]:
+ # current_block_lines.pop(0)
+ # original_lines_below_previous_blocks.pop(0)
+ # current_block_start += 1
+
+ await sdk.ide.showSuggestion(
+ FileEdit(
+ filepath=rif.filepath,
+ range=Range.from_shorthand(
+ current_block_start,
+ 0,
+ current_block_start
+ + len(original_lines_below_previous_blocks),
+ 0,
+ ),
+ replacement="\n".join(current_block_lines),
+ )
+ )
+
+ # Record the completion
+ completion = "\n".join(lines)
+ self._previous_contents = "\n".join(original_lines)
+ self._new_contents = completion
+ self._prompt_and_completion += prompt + completion
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ await sdk.update_ui()
+
+ rif_with_contents = []
+ for range_in_file in map(
+ lambda x: RangeInFile(
+ filepath=x.filepath,
+ # Only consider the range line-by-line. Maybe later don't if it's only a single line.
+ range=x.range.to_full_lines(),
+ ),
+ self.range_in_files,
+ ):
+ file_contents = await sdk.ide.readRangeInFile(range_in_file)
+ rif_with_contents.append(
+ RangeInFileWithContents.from_range_in_file(range_in_file, file_contents)
+ )
+
+ rif_dict = {}
+ for rif in rif_with_contents:
+ rif_dict[rif.filepath] = rif.contents
+
+ for rif in rif_with_contents:
+ await sdk.ide.setSuggestionsLocked(rif.filepath, True)
+ await self.stream_rif(rif, sdk)
+ await sdk.ide.setSuggestionsLocked(rif.filepath, False)
+
+ changes = "\n".join(
+ difflib.ndiff(
+ self._previous_contents.splitlines(),
+ self._new_contents.splitlines(),
+ )
+ )
+
+ if sdk.config.disable_summaries:
+ self.name = ""
+ self.description = f"Edited {len(self.range_in_files)} files"
+ await sdk.update_ui()
+ else:
+ self.name = "Generating summary"
+ self.description = ""
+ async for chunk in sdk.models.summarize.stream_complete(
+ dedent(
+ f"""\
+ Diff summary: "{self.user_input}"
+
+ ```diff
+ {changes}
+ ```
+
+ {self.summary_prompt}"""
+ )
+ ):
+ self.description += chunk
+ await sdk.update_ui()
+
+ sdk.context.set("last_edit_user_input", self.user_input)
+ sdk.context.set("last_edit_diff", changes)
+ sdk.context.set("last_edit_range", self.range_in_files[-1].range)
+
+
+class EditFileStep(Step):
+ filepath: str
+ prompt: str
+ hide: bool = True
+ model: Optional[LLM] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Editing file: " + self.filepath
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ file_contents = await sdk.ide.readFile(self.filepath)
+ await sdk.run_step(
+ DefaultModelEditCodeStep(
+ range_in_files=[
+ RangeInFile.from_entire_file(self.filepath, file_contents)
+ ],
+ user_input=self.prompt,
+ model=self.model,
+ )
+ )
+
+
+class ManualEditStep(ReversibleStep):
+ edit_diff: EditDiff
+ hide: bool = True
+
+ hide: bool = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Manual edit step"
+ # TODO - only handling FileEdit here, but need all other types of FileSystemEdits
+ # Also requires the merge_file_edit function
+ # return llm.complete(dedent(f"""This code was replaced:
+
+ # {self.edit_diff.backward.replacement}
+
+ # With this code:
+
+ # {self.edit_diff.forward.replacement}
+
+ # Maximally concise summary of changes in bullet points (can use markdown):
+ # """))
+
+ @classmethod
+ def from_sequence(cls, edits: List[FileEditWithFullContents]) -> "ManualEditStep":
+ diffs = []
+ for edit in edits:
+ _, diff = FileSystem.apply_edit_to_str(edit.fileContents, edit.fileEdit)
+ diffs.append(diff)
+ return cls(edit_diff=EditDiff.from_sequence(diffs))
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ return None
+
+ async def reverse(self, sdk: ContinueSDK):
+ await sdk.ide.applyFileSystemEdit(self.edit_diff.backward)
+
+
+class UserInputStep(Step):
+ user_input: str
+ name: str = "User Input"
+ hide: bool = False
+
+ manage_own_chat_context: bool = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self.description is not None:
+ return self.description
+ return self.user_input
+
+ async def run(
+ self, sdk: ContinueSDK
+ ) -> Coroutine[UserInputObservation, None, None]:
+ self.chat_context.append(
+ ChatMessage(role="user", content=self.user_input, summary=self.user_input)
+ )
+ self.description = self.user_input
+ return UserInputObservation(user_input=self.user_input)
+
+
+class WaitForUserInputStep(Step):
+ prompt: str
+ name: str = "Waiting for user input"
+
+ _description: Union[str, None] = None
+ _response: Union[str, None] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ if self._response is None:
+ return self.prompt
+ else:
+ return f"{self.prompt}\n\n`{self._response}`"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ self.description = self.prompt
+ resp = await sdk.wait_for_user_input()
+ self.description = f"{self.prompt}\n\n`{resp}`"
+ return TextObservation(text=resp)
+
+
+class WaitForUserConfirmationStep(Step):
+ prompt: str
+ name: str = "Waiting for user confirmation"
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return self.prompt
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ self.description = self.prompt
+ resp = await sdk.wait_for_user_input()
+ return TextObservation(text=resp)
diff --git a/server/continuedev/headless/__init__.py b/server/continuedev/headless/__init__.py
new file mode 100644
index 00000000..2ecdcce6
--- /dev/null
+++ b/server/continuedev/headless/__init__.py
@@ -0,0 +1,20 @@
+from typing import Optional, Union
+
+import typer
+
+from ..core.config import ContinueConfig
+from ..server.session_manager import Session, session_manager
+from .headless_ide import LocalIdeProtocol
+
+app = typer.Typer()
+
+
+async def start_headless_session(
+ config: Optional[Union[str, ContinueConfig]] = None
+) -> Session:
+ if config is not None:
+ if isinstance(config, str):
+ config: ContinueConfig = ContinueConfig.from_filepath(config)
+
+ ide = LocalIdeProtocol()
+ return await session_manager.new_session(ide, config=config)
diff --git a/server/continuedev/headless/headless_ide.py b/server/continuedev/headless/headless_ide.py
new file mode 100644
index 00000000..088da2c9
--- /dev/null
+++ b/server/continuedev/headless/headless_ide.py
@@ -0,0 +1,181 @@
+import os
+import subprocess
+import uuid
+from typing import Any, Callable, Coroutine, List, Optional
+
+from dotenv import load_dotenv
+from fastapi import WebSocket
+
+from ..models.filesystem import (
+ FileSystem,
+ RangeInFile,
+ RangeInFileWithContents,
+ RealFileSystem,
+)
+from ..models.filesystem_edit import EditDiff, FileEdit, FileSystemEdit
+from ..server.ide_protocol import AbstractIdeProtocolServer
+
+load_dotenv()
+
+
+def get_mac_address():
+ mac_num = hex(uuid.getnode()).replace("0x", "").upper()
+ mac = "-".join(mac_num[i : i + 2] for i in range(0, 11, 2))
+ return mac
+
+
+class LocalIdeProtocol(AbstractIdeProtocolServer):
+ websocket: WebSocket = None
+ session_id: Optional[str]
+ workspace_directory: str = os.getcwd()
+ unique_id: str = get_mac_address()
+
+ filesystem: FileSystem = RealFileSystem()
+
+ async def handle_json(self, data: Any):
+ """Handle a json message"""
+ pass
+
+ def showSuggestion(self, file_edit: FileEdit):
+ """Show a suggestion to the user"""
+ pass
+
+ async def setFileOpen(self, filepath: str, open: bool = True):
+ """Set whether a file is open"""
+ pass
+
+ async def showMessage(self, message: str):
+ """Show a message to the user"""
+ print(message)
+
+ async def showVirtualFile(self, name: str, contents: str):
+ """Show a virtual file"""
+ pass
+
+ async def setSuggestionsLocked(self, filepath: str, locked: bool = True):
+ """Set whether suggestions are locked"""
+ pass
+
+ async def getSessionId(self):
+ """Get a new session ID"""
+ pass
+
+ async def showSuggestionsAndWait(self, suggestions: List[FileEdit]) -> bool:
+ """Show suggestions to the user and wait for a response"""
+ pass
+
+ def onAcceptRejectSuggestion(self, accepted: bool):
+ """Called when the user accepts or rejects a suggestion"""
+ pass
+
+ def onFileSystemUpdate(self, update: FileSystemEdit):
+ """Called when a file system update is received"""
+ pass
+
+ def onCloseGUI(self, session_id: str):
+ """Called when a GUI is closed"""
+ pass
+
+ def onOpenGUIRequest(self):
+ """Called when a GUI is requested to be opened"""
+ pass
+
+ async def getOpenFiles(self) -> List[str]:
+ """Get a list of open files"""
+ pass
+
+ async def getVisibleFiles(self) -> List[str]:
+ """Get a list of visible files"""
+ pass
+
+ async def getHighlightedCode(self) -> List[RangeInFile]:
+ """Get a list of highlighted code"""
+ pass
+
+ async def readFile(self, filepath: str) -> str:
+ """Read a file"""
+ return self.filesystem.read(filepath)
+
+ async def readRangeInFile(self, range_in_file: RangeInFile) -> str:
+ """Read a range in a file"""
+ return self.filesystem.read_range_in_file(range_in_file)
+
+ async def editFile(self, edit: FileEdit):
+ """Edit a file"""
+ self.filesystem.apply_file_edit(edit)
+
+ async def applyFileSystemEdit(self, edit: FileSystemEdit) -> EditDiff:
+ """Apply a file edit"""
+ return self.filesystem.apply_edit(edit)
+
+ async def saveFile(self, filepath: str):
+ """Save a file"""
+ pass
+
+ async def getUserSecret(self, key: str):
+ """Get a user secret"""
+ return os.environ.get(key)
+
+ async def highlightCode(self, range_in_file: RangeInFile, color: str):
+ """Highlight code"""
+ pass
+
+ async def runCommand(self, command: str) -> str:
+ """Run a command using subprocess (don't pass, actually implement)"""
+ return subprocess.check_output(command, shell=True).decode("utf-8")
+
+ def onHighlightedCodeUpdate(self, range_in_files: List[RangeInFileWithContents]):
+ """Called when highlighted code is updated"""
+ pass
+
+ def onDeleteAtIndex(self, index: int):
+ """Called when a step is deleted at a given index"""
+ pass
+
+ async def showDiff(self, filepath: str, replacement: str, step_index: int):
+ """Show a diff"""
+ pass
+
+ def subscribeToFilesCreated(self, callback: Callable[[List[str]], None]):
+ """Subscribe to files created event"""
+ pass
+
+ def subscribeToFilesDeleted(self, callback: Callable[[List[str]], None]):
+ """Subscribe to files deleted event"""
+ pass
+
+ def subscribeToFilesRenamed(self, callback: Callable[[List[str], List[str]], None]):
+ """Subscribe to files renamed event"""
+ pass
+
+ def subscribeToFileSaved(self, callback: Callable[[str, str], None]):
+ """Subscribe to file saved event"""
+ pass
+
+ def onFilesCreated(self, filepaths: List[str]):
+ """Called when files are created"""
+ pass
+
+ def onFilesDeleted(self, filepaths: List[str]):
+ """Called when files are deleted"""
+ pass
+
+ def onFilesRenamed(self, old_filepaths: List[str], new_filepaths: List[str]):
+ """Called when files are renamed"""
+ pass
+
+ def onFileSaved(self, filepath: str, contents: str):
+ """Called when a file is saved"""
+ pass
+
+ async def fileExists(self, filepath: str) -> Coroutine[Any, Any, str]:
+ """Check if a file exists"""
+ return self.filesystem.exists(filepath)
+
+ async def getTerminalContents(self) -> Coroutine[Any, Any, str]:
+ return ""
+
+ async def listDirectoryContents(
+ self, directory: str, recursive: bool = False
+ ) -> List[str]:
+ return self.filesystem.list_directory_contents(directory, recursive=recursive)
diff --git a/server/continuedev/libs/__init__.py b/server/continuedev/libs/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/continuedev/libs/__init__.py
diff --git a/server/continuedev/libs/chroma/.gitignore b/server/continuedev/libs/chroma/.gitignore
new file mode 100644
index 00000000..6320cd24
--- /dev/null
+++ b/server/continuedev/libs/chroma/.gitignore
@@ -0,0 +1 @@
+data \ No newline at end of file
diff --git a/server/continuedev/libs/chroma/query.py b/server/continuedev/libs/chroma/query.py
new file mode 100644
index 00000000..d77cce49
--- /dev/null
+++ b/server/continuedev/libs/chroma/query.py
@@ -0,0 +1,218 @@
+import json
+import os
+import subprocess
+from functools import cached_property
+from typing import List, Tuple
+
+from llama_index import (
+ Document,
+ GPTVectorStoreIndex,
+ StorageContext,
+ load_index_from_storage,
+)
+from llama_index.langchain_helpers.text_splitter import TokenTextSplitter
+
+from ..util.logging import logger
+from .update import filter_ignored_files, load_gpt_index_documents
+
+
+class ChromaIndexManager:
+ workspace_dir: str
+
+ def __init__(self, workspace_dir: str):
+ self.workspace_dir = workspace_dir
+
+ @cached_property
+ def current_commit(self) -> str:
+ """Get the current commit."""
+ return (
+ subprocess.check_output(
+ ["git", "rev-parse", "HEAD"], cwd=self.workspace_dir
+ )
+ .decode("utf-8")
+ .strip()
+ )
+
+ @cached_property
+ def current_branch(self) -> str:
+ """Get the current branch."""
+ return (
+ subprocess.check_output(
+ ["git", "rev-parse", "--abbrev-ref", "HEAD"], cwd=self.workspace_dir
+ )
+ .decode("utf-8")
+ .strip()
+ )
+
+ @cached_property
+ def index_dir(self) -> str:
+ return os.path.join(
+ self.workspace_dir, ".continue", "chroma", self.current_branch
+ )
+
+ @cached_property
+ def git_root_dir(self):
+ """Get the root directory of a Git repository."""
+ try:
+ return (
+ subprocess.check_output(
+ ["git", "rev-parse", "--show-toplevel"], cwd=self.workspace_dir
+ )
+ .strip()
+ .decode()
+ )
+ except subprocess.CalledProcessError:
+ return None
+
+ def check_index_exists(self):
+ return os.path.exists(os.path.join(self.index_dir, "metadata.json"))
+
+ def create_codebase_index(self):
+ """Create a new index for the current branch."""
+ if not self.check_index_exists():
+ os.makedirs(self.index_dir)
+ else:
+ return
+
+ documents = load_gpt_index_documents(self.workspace_dir)
+
+ chunks = {}
+ doc_chunks = []
+ for doc in documents:
+ text_splitter = TokenTextSplitter()
+ try:
+ text_chunks = text_splitter.split_text(doc.text)
+ except:
+ logger.warning(f"ERROR (probably found special token): {doc.text}")
+ continue # lol
+ filename = doc.extra_info["filename"]
+ chunks[filename] = len(text_chunks)
+ for i, text in enumerate(text_chunks):
+ doc_chunks.append(Document(text, doc_id=f"{filename}::{i}"))
+
+ with open(f"{self.index_dir}/metadata.json", "w") as f:
+ json.dump({"commit": self.current_commit, "chunks": chunks}, f, indent=4)
+
+ index = GPTVectorStoreIndex([])
+
+ for chunk in doc_chunks:
+ index.insert(chunk)
+
+ # d = 1536 # Dimension of text-ada-embedding-002
+ # faiss_index = faiss.IndexFlatL2(d)
+ # index = GPTFaissIndex(documents, faiss_index=faiss_index)
+ # index.save_to_disk(f"{index_dir_for(branch)}/index.json", faiss_index_save_path=f"{index_dir_for(branch)}/index_faiss_core.index")
+
+ index.storage_context.persist(persist_dir=self.index_dir)
+
+ logger.debug("Codebase index created")
+
+ def get_modified_deleted_files(self) -> Tuple[List[str], List[str]]:
+ """Get a list of all files that have been modified since the last commit."""
+ metadata = f"{self.index_dir}/metadata.json"
+ with open(metadata, "r") as f:
+ previous_commit = json.load(f)["commit"]
+
+ modified_deleted_files = (
+ subprocess.check_output(
+ ["git", "diff", "--name-only", previous_commit, self.current_commit]
+ )
+ .decode("utf-8")
+ .strip()
+ )
+ modified_deleted_files = modified_deleted_files.split("\n")
+ modified_deleted_files = [f for f in modified_deleted_files if f]
+
+ deleted_files = [
+ f
+ for f in modified_deleted_files
+ if not os.path.exists(os.path.join(self.workspace_dir, f))
+ ]
+ modified_files = [
+ f
+ for f in modified_deleted_files
+ if os.path.exists(os.path.join(self.workspace_dir, f))
+ ]
+
+ return filter_ignored_files(
+ modified_files, self.index_dir
+ ), filter_ignored_files(deleted_files, self.index_dir)
+
+ def update_codebase_index(self):
+ """Update the index with a list of files."""
+
+ if not self.check_index_exists():
+ self.create_codebase_index()
+ else:
+ # index = GPTFaissIndex.load_from_disk(f"{index_dir_for(branch)}/index.json", faiss_index_save_path=f"{index_dir_for(branch)}/index_faiss_core.index")
+ index = GPTVectorStoreIndex.load_from_disk(f"{self.index_dir}/index.json")
+ modified_files, deleted_files = self.get_modified_deleted_files()
+
+ with open(f"{self.index_dir}/metadata.json", "r") as f:
+ metadata = json.load(f)
+
+ for file in deleted_files:
+ num_chunks = metadata["chunks"][file]
+ for i in range(num_chunks):
+ index.delete(f"{file}::{i}")
+
+ del metadata["chunks"][file]
+
+ logger.debug(f"Deleted {file}")
+
+ for file in modified_files:
+ if file in metadata["chunks"]:
+ num_chunks = metadata["chunks"][file]
+
+ for i in range(num_chunks):
+ index.delete(f"{file}::{i}")
+
+ logger.debug(f"Deleted old version of {file}")
+
+ with open(file, "r") as f:
+ text = f.read()
+
+ text_splitter = TokenTextSplitter()
+ text_chunks = text_splitter.split_text(text)
+
+ for i, text in enumerate(text_chunks):
+ index.insert(Document(text, doc_id=f"{file}::{i}"))
+
+ metadata["chunks"][file] = len(text_chunks)
+
+ logger.debug(f"Inserted new version of {file}")
+
+ metadata["commit"] = self.current_commit
+
+ with open(f"{self.index_dir}/metadata.json", "w") as f:
+ json.dump(metadata, f, indent=4)
+
+ logger.debug("Codebase index updated")
+
+ def query_codebase_index(self, query: str) -> str:
+ """Query the codebase index."""
+ if not self.check_index_exists():
+ logger.debug(f"No index found for the codebase at {self.index_dir}")
+ return ""
+
+ storage_context = StorageContext.from_defaults(persist_dir=self.index_dir)
+ index = load_index_from_storage(storage_context)
+ # index = GPTVectorStoreIndex.load_from_disk(path)
+ engine = index.as_query_engine()
+ return engine.query(query)
+
+ def query_additional_index(self, query: str) -> str:
+ """Query the additional index."""
+ index = GPTVectorStoreIndex.load_from_disk(
+ os.path.join(self.index_dir, "additional_index.json")
+ )
+ return index.query(query)
+
+ def replace_additional_index(self, info: str):
+ """Replace the additional index with the given info."""
+ with open(f"{self.index_dir}/additional_context.txt", "w") as f:
+ f.write(info)
+ documents = [Document(info)]
+ index = GPTVectorStoreIndex(documents)
+ index.save_to_disk(f"{self.index_dir}/additional_index.json")
+ logger.debug("Additional index replaced")
diff --git a/server/continuedev/libs/chroma/update.py b/server/continuedev/libs/chroma/update.py
new file mode 100644
index 00000000..7a1217f9
--- /dev/null
+++ b/server/continuedev/libs/chroma/update.py
@@ -0,0 +1,66 @@
+# import faiss
+import os
+import subprocess
+from typing import List
+
+from dotenv import load_dotenv
+from llama_index import Document, SimpleDirectoryReader
+
+load_dotenv()
+
+FILE_TYPES_TO_IGNORE = [".pyc", ".png", ".jpg", ".jpeg", ".gif", ".svg", ".ico"]
+
+
+def filter_ignored_files(files: List[str], root_dir: str):
+ """Further filter files before indexing."""
+ for file in files:
+ if (
+ file.endswith(tuple(FILE_TYPES_TO_IGNORE))
+ or file.startswith(".git")
+ or file.startswith("archive")
+ ):
+ continue # nice
+ yield root_dir + "/" + file
+
+
+def get_git_ignored_files(root_dir: str):
+ """Get the list of ignored files in a Git repository."""
+ try:
+ output = (
+ subprocess.check_output(
+ ["git", "ls-files", "--ignored", "--others", "--exclude-standard"],
+ cwd=root_dir,
+ )
+ .strip()
+ .decode()
+ )
+ return output.split("\n")
+ except subprocess.CalledProcessError:
+ return []
+
+
+def get_all_files(root_dir: str):
+ """Get a list of all files in a directory."""
+ for dir_path, _, file_names in os.walk(root_dir):
+ for file_name in file_names:
+ yield os.path.join(os.path.relpath(dir_path, root_dir), file_name)
+
+
+def get_input_files(root_dir: str):
+ """Get a list of all files in a Git repository that are not ignored."""
+ ignored_files = set(get_git_ignored_files(root_dir))
+ all_files = set(get_all_files(root_dir))
+ nonignored_files = all_files - ignored_files
+ return filter_ignored_files(nonignored_files, root_dir)
+
+
+def load_gpt_index_documents(root: str) -> List[Document]:
+ """Loads a list of GPTIndex Documents, respecting .gitignore files."""
+ # Get input files
+ input_files = get_input_files(root)
+ # Use SimpleDirectoryReader to load the files into Documents
+ return SimpleDirectoryReader(
+ root,
+ input_files=input_files,
+ file_metadata=lambda filename: {"filename": filename},
+ ).load_data()
diff --git a/server/continuedev/libs/constants/default_config.py b/server/continuedev/libs/constants/default_config.py
new file mode 100644
index 00000000..a007eef1
--- /dev/null
+++ b/server/continuedev/libs/constants/default_config.py
@@ -0,0 +1,88 @@
+default_config = """\
+\"\"\"
+This is the Continue configuration file.
+
+See https://continue.dev/docs/customization to for documentation of the available options.
+\"\"\"
+
+from continuedev.core.models import Models
+from continuedev.core.config import CustomCommand, SlashCommand, ContinueConfig
+from continuedev.libs.llm import OpenAIFreeTrial
+
+from continuedev.plugins.context_providers import (
+ DiffContextProvider,
+ TerminalContextProvider,
+ URLContextProvider,
+ GitHubIssuesContextProvider
+)
+from continuedev.plugins.steps import (
+ ClearHistoryStep,
+ CommentCodeStep,
+ EditHighlightedCodeStep,
+ GenerateShellCommandStep,
+ OpenConfigStep,
+)
+from continuedev.plugins.steps.share_session import ShareSessionStep
+
+config = ContinueConfig(
+ allow_anonymous_telemetry=True,
+ models=Models(
+ default=OpenAIFreeTrial(api_key="", model="gpt-4"),
+ summarize=OpenAIFreeTrial(api_key="", model="gpt-3.5-turbo")
+ ),
+ system_message=None,
+ temperature=0.5,
+ custom_commands=[
+ CustomCommand(
+ name="test",
+ description="Write unit tests for highlighted code",
+ prompt="Write a comprehensive set of unit tests for the selected code. It should setup, run tests that check for correctness including important edge cases, and teardown. Ensure that the tests are complete and sophisticated. Give the tests just as chat output, don't edit any file.",
+ )
+ ],
+ slash_commands=[
+ SlashCommand(
+ name="edit",
+ description="Edit highlighted code",
+ step=EditHighlightedCodeStep,
+ ),
+ SlashCommand(
+ name="config",
+ description="Customize Continue",
+ step=OpenConfigStep,
+ ),
+ SlashCommand(
+ name="comment",
+ description="Write comments for the highlighted code",
+ step=CommentCodeStep,
+ ),
+ SlashCommand(
+ name="clear",
+ description="Clear step history",
+ step=ClearHistoryStep,
+ ),
+ SlashCommand(
+ name="share",
+ description="Download and share this session",
+ step=ShareSessionStep,
+ ),
+ SlashCommand(
+ name="cmd",
+ description="Generate a shell command",
+ step=GenerateShellCommandStep,
+ ),
+ ],
+ context_providers=[
+ # GitHubIssuesContextProvider(
+ # repo_name="<your github username or organization>/<your repo name>",
+ # auth_token="<your github auth token>"
+ # ),
+ DiffContextProvider(),
+ URLContextProvider(
+ preset_urls = [
+ # Add any common urls you reference here so they appear in autocomplete
+ ]
+ ),
+ TerminalContextProvider(),
+ ],
+)
+"""
diff --git a/server/continuedev/libs/constants/main.py b/server/continuedev/libs/constants/main.py
new file mode 100644
index 00000000..f5964df6
--- /dev/null
+++ b/server/continuedev/libs/constants/main.py
@@ -0,0 +1,6 @@
+## PATHS ##
+
+CONTINUE_GLOBAL_FOLDER = ".continue"
+CONTINUE_SESSIONS_FOLDER = "sessions"
+CONTINUE_SERVER_FOLDER = "server"
+CONTINUE_SERVER_VERSION_FILE = "server_version.txt"
diff --git a/server/continuedev/libs/llm/__init__.py b/server/continuedev/libs/llm/__init__.py
new file mode 100644
index 00000000..829ffede
--- /dev/null
+++ b/server/continuedev/libs/llm/__init__.py
@@ -0,0 +1,14 @@
+from .anthropic import AnthropicLLM # noqa: F401
+from .ggml import GGML # noqa: F401
+from .google_palm_api import GooglePaLMAPI # noqa: F401
+from .hf_inference_api import HuggingFaceInferenceAPI # noqa: F401
+from .hf_tgi import HuggingFaceTGI # noqa: F401
+from .llamacpp import LlamaCpp # noqa: F401
+from .ollama import Ollama # noqa: F401
+from .openai import OpenAI # noqa: F401
+from .openai_free_trial import OpenAIFreeTrial # noqa: F401
+from .proxy_server import ProxyServer # noqa: F401
+from .queued import QueuedLLM # noqa: F401
+from .replicate import ReplicateLLM # noqa: F401
+from .text_gen_interface import TextGenUI # noqa: F401
+from .together import TogetherLLM # noqa: F401
diff --git a/server/continuedev/libs/llm/anthropic.py b/server/continuedev/libs/llm/anthropic.py
new file mode 100644
index 00000000..7d0708f1
--- /dev/null
+++ b/server/continuedev/libs/llm/anthropic.py
@@ -0,0 +1,74 @@
+from typing import Any, Callable, Coroutine
+
+from anthropic import AI_PROMPT, HUMAN_PROMPT, AsyncAnthropic
+
+from .base import LLM, CompletionOptions
+from .prompts.chat import anthropic_template_messages
+
+
+class AnthropicLLM(LLM):
+ """
+ Import the `AnthropicLLM` class and set it as the default model:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.anthropic import AnthropicLLM
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=AnthropicLLM(api_key="<API_KEY>", model="claude-2")
+ )
+ )
+ ```
+
+ Claude 2 is not yet publicly released. You can request early access [here](https://www.anthropic.com/earlyaccess).
+
+ """
+
+ api_key: str
+ "Anthropic API key"
+
+ model: str = "claude-2"
+
+ _async_client: AsyncAnthropic = None
+
+ template_messages: Callable = anthropic_template_messages
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def start(self, **kwargs):
+ await super().start(**kwargs)
+ self._async_client = AsyncAnthropic(api_key=self.api_key)
+
+ if self.model == "claude-2":
+ self.context_length = 100_000
+
+ def collect_args(self, options: CompletionOptions):
+ options.stop = None
+ args = super().collect_args(options)
+
+ if "max_tokens" in args:
+ args["max_tokens_to_sample"] = args["max_tokens"]
+ del args["max_tokens"]
+ if "frequency_penalty" in args:
+ del args["frequency_penalty"]
+ if "presence_penalty" in args:
+ del args["presence_penalty"]
+ return args
+
+ async def _stream_complete(self, prompt: str, options):
+ args = self.collect_args(options)
+ prompt = f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}"
+
+ async for chunk in await self._async_client.completions.create(
+ prompt=prompt, stream=True, **args
+ ):
+ yield chunk.completion
+
+ async def _complete(self, prompt: str, options) -> Coroutine[Any, Any, str]:
+ args = self.collect_args(options)
+ prompt = f"{HUMAN_PROMPT} {prompt} {AI_PROMPT}"
+ return (
+ await self._async_client.completions.create(prompt=prompt, **args)
+ ).completion
diff --git a/server/continuedev/libs/llm/base.py b/server/continuedev/libs/llm/base.py
new file mode 100644
index 00000000..d77cb9fc
--- /dev/null
+++ b/server/continuedev/libs/llm/base.py
@@ -0,0 +1,458 @@
+import ssl
+from typing import Any, Callable, Coroutine, Dict, Generator, List, Optional, Union
+
+import aiohttp
+import certifi
+from pydantic import Field, validator
+
+from ...core.main import ChatMessage
+from ...models.main import ContinueBaseModel
+from ..util.count_tokens import (
+ DEFAULT_ARGS,
+ DEFAULT_MAX_TOKENS,
+ compile_chat_messages,
+ count_tokens,
+ format_chat_messages,
+ prune_raw_prompt_from_top,
+)
+from ..util.devdata import dev_data_logger
+from ..util.telemetry import posthog_logger
+
+
+class CompletionOptions(ContinueBaseModel):
+ """Options for the completion."""
+
+ @validator(
+ "*",
+ pre=True,
+ always=True,
+ )
+ def ignore_none_and_set_default(cls, value, field):
+ return value if value is not None else field.default
+
+ model: Optional[str] = Field(None, description="The model name")
+ temperature: Optional[float] = Field(
+ None, description="The temperature of the completion."
+ )
+ top_p: Optional[float] = Field(None, description="The top_p of the completion.")
+ top_k: Optional[int] = Field(None, description="The top_k of the completion.")
+ presence_penalty: Optional[float] = Field(
+ None, description="The presence penalty Aof the completion."
+ )
+ frequency_penalty: Optional[float] = Field(
+ None, description="The frequency penalty of the completion."
+ )
+ stop: Optional[List[str]] = Field(
+ None, description="The stop tokens of the completion."
+ )
+ max_tokens: int = Field(
+ DEFAULT_MAX_TOKENS, description="The maximum number of tokens to generate."
+ )
+ functions: Optional[List[Any]] = Field(
+ None, description="The functions/tools to make available to the model."
+ )
+
+
+class LLM(ContinueBaseModel):
+ title: Optional[str] = Field(
+ None,
+ description="A title that will identify this model in the model selection dropdown",
+ )
+
+ unique_id: Optional[str] = Field(None, description="The unique ID of the user.")
+ model: str = Field(
+ ..., description="The name of the model to be used (e.g. gpt-4, codellama)"
+ )
+
+ system_message: Optional[str] = Field(
+ None, description="A system message that will always be followed by the LLM"
+ )
+
+ context_length: int = Field(
+ 2048,
+ description="The maximum context length of the LLM in tokens, as counted by count_tokens.",
+ )
+
+ stop_tokens: Optional[List[str]] = Field(
+ None, description="Tokens that will stop the completion."
+ )
+ temperature: Optional[float] = Field(
+ None, description="The temperature of the completion."
+ )
+ top_p: Optional[float] = Field(None, description="The top_p of the completion.")
+ top_k: Optional[int] = Field(None, description="The top_k of the completion.")
+ presence_penalty: Optional[float] = Field(
+ None, description="The presence penalty Aof the completion."
+ )
+ frequency_penalty: Optional[float] = Field(
+ None, description="The frequency penalty of the completion."
+ )
+
+ timeout: Optional[int] = Field(
+ 300,
+ description="Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts.",
+ )
+ verify_ssl: Optional[bool] = Field(
+ None, description="Whether to verify SSL certificates for requests."
+ )
+ ca_bundle_path: str = Field(
+ None,
+ description="Path to a custom CA bundle to use when making the HTTP request",
+ )
+ proxy: Optional[str] = Field(
+ None,
+ description="Proxy URL to use when making the HTTP request",
+ )
+ headers: Optional[Dict[str, str]] = Field(
+ None,
+ description="Headers to use when making the HTTP request",
+ )
+ prompt_templates: dict = Field(
+ {},
+ description='A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.',
+ )
+
+ template_messages: Optional[Callable[[List[Dict[str, str]]], str]] = Field(
+ None,
+ description="A function that takes a list of messages and returns a prompt. This ensures that models like llama2, which are trained on specific chat formats, will always receive input in that format.",
+ )
+ write_log: Optional[Callable[[str], None]] = Field(
+ None,
+ description="A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass.",
+ )
+
+ api_key: Optional[str] = Field(
+ None, description="The API key for the LLM provider."
+ )
+
+ class Config:
+ arbitrary_types_allowed = True
+ extra = "allow"
+ fields = {
+ "title": {
+ "description": "A title that will identify this model in the model selection dropdown"
+ },
+ "system_message": {
+ "description": "A system message that will always be followed by the LLM"
+ },
+ "context_length": {
+ "description": "The maximum context length of the LLM in tokens, as counted by count_tokens."
+ },
+ "unique_id": {"description": "The unique ID of the user."},
+ "model": {
+ "description": "The name of the model to be used (e.g. gpt-4, codellama)"
+ },
+ "timeout": {
+ "description": "Set the timeout for each request to the LLM. If you are running a local LLM that takes a while to respond, you might want to set this to avoid timeouts."
+ },
+ "prompt_templates": {
+ "description": 'A dictionary of prompt templates that can be used to customize the behavior of the LLM in certain situations. For example, set the "edit" key in order to change the prompt that is used for the /edit slash command. Each value in the dictionary is a string templated in mustache syntax, and filled in at runtime with the variables specific to the situation. See the documentation for more information.'
+ },
+ "template_messages": {
+ "description": "A function that takes a list of messages and returns a prompt. This ensures that models like llama2, which are trained on specific chat formats, will always receive input in that format."
+ },
+ "write_log": {
+ "description": "A function that is called upon every prompt and completion, by default to log to the file which can be viewed by clicking on the magnifying glass."
+ },
+ "api_key": {"description": "The API key for the LLM provider."},
+ "verify_ssl": {
+ "description": "Whether to verify SSL certificates for requests."
+ },
+ "ca_bundle_path": {
+ "description": "Path to a custom CA bundle to use when making the HTTP request"
+ },
+ "headers": {
+ "description": "Headers to use when making the HTTP request"
+ },
+ "proxy": {"description": "Proxy URL to use when making the HTTP request"},
+ "stop_tokens": {"description": "Tokens that will stop the completion."},
+ "temperature": {
+ "description": "The sampling temperature used for generation."
+ },
+ "top_p": {
+ "description": "The top_p sampling parameter used for generation."
+ },
+ "top_k": {
+ "description": "The top_k sampling parameter used for generation."
+ },
+ "presence_penalty": {
+ "description": "The presence penalty used for completions."
+ },
+ "frequency_penalty": {
+ "description": "The frequency penalty used for completions."
+ },
+ }
+
+ def dict(self, **kwargs):
+ original_dict = super().dict(**kwargs)
+ original_dict.pop("write_log")
+ if self.template_messages is not None:
+ original_dict["template_messages"] = self.template_messages.__name__
+ original_dict.pop("unique_id")
+ original_dict["class_name"] = self.__class__.__name__
+ return original_dict
+
+ async def start(
+ self, write_log: Callable[[str], None] = None, unique_id: Optional[str] = None
+ ):
+ """Start the connection to the LLM."""
+ self.write_log = write_log
+ self.unique_id = unique_id
+
+ async def stop(self):
+ """Stop the connection to the LLM."""
+ pass
+
+ def create_client_session(self):
+ if self.verify_ssl is False:
+ return aiohttp.ClientSession(
+ connector=aiohttp.TCPConnector(verify_ssl=False),
+ timeout=aiohttp.ClientTimeout(total=self.timeout),
+ headers=self.headers
+ )
+ else:
+ ca_bundle_path = (
+ certifi.where() if self.ca_bundle_path is None else self.ca_bundle_path
+ )
+ ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
+ return aiohttp.ClientSession(
+ connector=aiohttp.TCPConnector(ssl_context=ssl_context),
+ timeout=aiohttp.ClientTimeout(total=self.timeout),
+ headers=self.headers,
+ )
+
+ def collect_args(self, options: CompletionOptions) -> Dict[str, Any]:
+ """Collect the arguments for the LLM."""
+ args = {**DEFAULT_ARGS.copy(), "model": self.model}
+ args.update(options.dict(exclude_unset=True, exclude_none=True))
+ return args
+
+ def compile_chat_messages(
+ self,
+ options: CompletionOptions,
+ msgs: List[ChatMessage],
+ functions: Optional[List[Any]] = None,
+ ) -> List[Dict]:
+ return compile_chat_messages(
+ model_name=options.model,
+ msgs=msgs,
+ context_length=self.context_length,
+ max_tokens=options.max_tokens,
+ functions=functions,
+ system_message=self.system_message,
+ )
+
+ def template_prompt_like_messages(self, prompt: str) -> str:
+ if self.template_messages is None:
+ return prompt
+
+ msgs = [{"role": "user", "content": prompt}]
+ if self.system_message is not None:
+ msgs.insert(0, {"role": "system", "content": self.system_message})
+
+ return self.template_messages(msgs)
+
+ async def stream_complete(
+ self,
+ prompt: str,
+ raw: bool = False,
+ model: str = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: int = None,
+ presence_penalty: float = None,
+ frequency_penalty: float = None,
+ stop: Optional[List[str]] = None,
+ max_tokens: Optional[int] = None,
+ functions: Optional[List[Any]] = None,
+ log: bool = True,
+ ) -> Generator[Union[Any, List, Dict], None, None]:
+ """Yield completion response, either streamed or not."""
+ options = CompletionOptions(
+ model=model or self.model,
+ temperature=temperature or self.temperature,
+ top_p=top_p or self.top_p,
+ top_k=top_k or self.top_k,
+ presence_penalty=presence_penalty or self.presence_penalty,
+ frequency_penalty=frequency_penalty or self.frequency_penalty,
+ stop=stop or self.stop_tokens,
+ max_tokens=max_tokens,
+ functions=functions,
+ )
+
+ prompt = prune_raw_prompt_from_top(
+ self.model, self.context_length, prompt, options.max_tokens
+ )
+
+ if not raw:
+ prompt = self.template_prompt_like_messages(prompt)
+
+ if log:
+ self.write_log(prompt)
+
+ completion = ""
+ async for chunk in self._stream_complete(prompt=prompt, options=options):
+ yield chunk
+ completion += chunk
+
+ # if log:
+ # self.write_log(f"Completion: \n\n{completion}")
+
+ dev_data_logger.capture(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+ posthog_logger.capture_event(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+
+ async def complete(
+ self,
+ prompt: str,
+ raw: bool = False,
+ model: str = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: int = None,
+ presence_penalty: float = None,
+ frequency_penalty: float = None,
+ stop: Optional[List[str]] = None,
+ max_tokens: Optional[int] = None,
+ functions: Optional[List[Any]] = None,
+ log: bool = True,
+ ) -> str:
+ """Yield completion response, either streamed or not."""
+ options = CompletionOptions(
+ model=model or self.model,
+ temperature=temperature or self.temperature,
+ top_p=top_p or self.top_p,
+ top_k=top_k or self.top_k,
+ presence_penalty=presence_penalty or self.presence_penalty,
+ frequency_penalty=frequency_penalty or self.frequency_penalty,
+ stop=stop or self.stop_tokens,
+ max_tokens=max_tokens,
+ functions=functions,
+ )
+
+ prompt = prune_raw_prompt_from_top(
+ self.model, self.context_length, prompt, options.max_tokens
+ )
+
+ if not raw:
+ prompt = self.template_prompt_like_messages(prompt)
+
+ if log:
+ self.write_log(prompt)
+
+ completion = await self._complete(prompt=prompt, options=options)
+
+ # if log:
+ # self.write_log(f"Completion: \n\n{completion}")
+
+ dev_data_logger.capture(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+ posthog_logger.capture_event(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+
+ return completion
+
+ async def stream_chat(
+ self,
+ messages: List[ChatMessage],
+ model: str = None,
+ temperature: float = None,
+ top_p: float = None,
+ top_k: int = None,
+ presence_penalty: float = None,
+ frequency_penalty: float = None,
+ stop: Optional[List[str]] = None,
+ max_tokens: Optional[int] = None,
+ functions: Optional[List[Any]] = None,
+ log: bool = True,
+ ) -> Generator[Union[Any, List, Dict], None, None]:
+ """Yield completion response, either streamed or not."""
+ options = CompletionOptions(
+ model=model or self.model,
+ temperature=temperature or self.temperature,
+ top_p=top_p or self.top_p,
+ top_k=top_k or self.top_k,
+ presence_penalty=presence_penalty or self.presence_penalty,
+ frequency_penalty=frequency_penalty or self.frequency_penalty,
+ stop=stop or self.stop_tokens,
+ max_tokens=max_tokens,
+ functions=functions,
+ )
+
+ messages = self.compile_chat_messages(
+ options=options, msgs=messages, functions=functions
+ )
+ if self.template_messages is not None:
+ prompt = self.template_messages(messages)
+ else:
+ prompt = format_chat_messages(messages)
+
+ if log:
+ self.write_log(prompt)
+
+ completion = ""
+
+ # Use the template_messages function if it exists and do a raw completion
+ if self.template_messages is None:
+ async for chunk in self._stream_chat(messages=messages, options=options):
+ yield chunk
+ if "content" in chunk:
+ completion += chunk["content"]
+ else:
+ async for chunk in self._stream_complete(prompt=prompt, options=options):
+ yield {"role": "assistant", "content": chunk}
+ completion += chunk
+
+ # if log:
+ # self.write_log(f"Completion: \n\n{completion}")
+
+ dev_data_logger.capture(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+ posthog_logger.capture_event(
+ "tokens_generated",
+ {"model": self.model, "tokens": self.count_tokens(completion)},
+ )
+
+ def _stream_complete(
+ self, prompt, options: CompletionOptions
+ ) -> Generator[str, None, None]:
+ """Stream the completion through generator."""
+ raise NotImplementedError
+
+ async def _complete(
+ self, prompt: str, options: CompletionOptions
+ ) -> Coroutine[Any, Any, str]:
+ """Return the completion of the text with the given temperature."""
+ completion = ""
+ async for chunk in self._stream_complete(prompt=prompt, options=options):
+ completion += chunk
+ return completion
+
+ async def _stream_chat(
+ self, messages: List[ChatMessage], options: CompletionOptions
+ ) -> Generator[Union[Any, List, Dict], None, None]:
+ """Stream the chat through generator."""
+ if self.template_messages is None:
+ raise NotImplementedError(
+ "You must either implement template_messages or _stream_chat"
+ )
+
+ async for chunk in self._stream_complete(
+ prompt=self.template_messages(messages), options=options
+ ):
+ yield {"role": "assistant", "content": chunk}
+
+ def count_tokens(self, text: str):
+ """Return the number of tokens in the given text."""
+ return count_tokens(self.model, text)
diff --git a/server/continuedev/libs/llm/ggml.py b/server/continuedev/libs/llm/ggml.py
new file mode 100644
index 00000000..55d580a8
--- /dev/null
+++ b/server/continuedev/libs/llm/ggml.py
@@ -0,0 +1,226 @@
+import json
+from typing import Any, Callable, Coroutine, Dict, List, Literal, Optional
+
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from ..util.logging import logger
+from .base import LLM, CompletionOptions
+from .openai import CHAT_MODELS
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class GGML(LLM):
+ """
+ See our [5 minute quickstart](https://github.com/continuedev/ggml-server-example) to run any model locally with ggml. While these models don't yet perform as well, they are free, entirely private, and run offline.
+
+ Once the model is running on localhost:8000, change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.ggml import GGML
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=GGML(
+ max_context_length=2048,
+ server_url="http://localhost:8000")
+ )
+ )
+ ```
+ """
+
+ server_url: str = Field(
+ "http://localhost:8000",
+ description="URL of the OpenAI-compatible server where the model is being served",
+ )
+ model: str = Field(
+ "ggml", description="The name of the model to use (optional for the GGML class)"
+ )
+
+ api_base: Optional[str] = Field(None, description="OpenAI API base URL.")
+
+ api_type: Optional[Literal["azure", "openai"]] = Field(
+ None, description="OpenAI API type."
+ )
+
+ api_version: Optional[str] = Field(
+ None, description="OpenAI API version. For use with Azure OpenAI Service."
+ )
+
+ engine: Optional[str] = Field(
+ None, description="OpenAI engine. For use with Azure OpenAI Service."
+ )
+
+ template_messages: Optional[
+ Callable[[List[Dict[str, str]]], str]
+ ] = llama2_template_messages
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def get_headers(self):
+ headers = {
+ "Content-Type": "application/json",
+ }
+ if self.api_key is not None:
+ if self.api_type == "azure":
+ headers["api-key"] = self.api_key
+ else:
+ headers["Authorization"] = f"Bearer {self.api_key}"
+
+ return headers
+
+ def get_full_server_url(self, endpoint: str):
+ endpoint = endpoint.lstrip("/").rstrip("/")
+
+ if self.api_type == "azure":
+ if self.engine is None or self.api_version is None or self.api_base is None:
+ raise Exception(
+ "For Azure OpenAI Service, you must specify engine, api_version, and api_base."
+ )
+
+ return f"{self.api_base}/openai/deployments/{self.engine}/{endpoint}?api-version={self.api_version}"
+ else:
+ return f"{self.server_url}/v1/{endpoint}"
+
+ async def _raw_stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ self.get_full_server_url(endpoint="completions"),
+ json={
+ "prompt": prompt,
+ "stream": True,
+ **args,
+ },
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(
+ f"Error calling /chat/completions endpoint: {resp.status}"
+ )
+
+ async for line in resp.content.iter_any():
+ if line:
+ chunks = line.decode("utf-8")
+ for chunk in chunks.split("\n"):
+ if (
+ chunk.startswith(": ping - ")
+ or chunk.startswith("data: [DONE]")
+ or chunk.strip() == ""
+ ):
+ continue
+ elif chunk.startswith("data: "):
+ chunk = chunk[6:]
+ try:
+ j = json.loads(chunk)
+ except Exception:
+ continue
+ if (
+ "choices" in j
+ and len(j["choices"]) > 0
+ and "text" in j["choices"][0]
+ ):
+ yield j["choices"][0]["text"]
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+
+ async def generator():
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ self.get_full_server_url(endpoint="chat/completions"),
+ json={"messages": messages, "stream": True, **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(
+ f"Error calling /chat/completions endpoint: {resp.status}"
+ )
+
+ async for line, end in resp.content.iter_chunks():
+ json_chunk = line.decode("utf-8")
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if (
+ chunk.strip() == ""
+ or json_chunk.startswith(": ping - ")
+ or json_chunk.startswith("data: [DONE]")
+ ):
+ continue
+ try:
+ yield json.loads(chunk[6:])["choices"][0]["delta"]
+ except:
+ pass
+
+ # Because quite often the first attempt fails, and it works thereafter
+ try:
+ async for chunk in generator():
+ yield chunk
+ except Exception as e:
+ logger.warning(f"Error calling /chat/completions endpoint: {e}")
+ async for chunk in generator():
+ yield chunk
+
+ async def _raw_complete(self, prompt: str, options) -> Coroutine[Any, Any, str]:
+ args = self.collect_args(options)
+
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ self.get_full_server_url(endpoint="completions"),
+ json={
+ "prompt": prompt,
+ **args,
+ },
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(
+ f"Error calling /chat/completions endpoint: {resp.status}"
+ )
+
+ text = await resp.text()
+ try:
+ completion = json.loads(text)["choices"][0]["text"]
+ return completion
+ except Exception as e:
+ raise Exception(
+ f"Error calling /completion endpoint: {e}\n\nResponse text: {text}"
+ )
+
+ async def _complete(self, prompt: str, options: CompletionOptions):
+ completion = ""
+ if self.model in CHAT_MODELS:
+ async for chunk in self._stream_chat(
+ [{"role": "user", "content": prompt}], options
+ ):
+ if "content" in chunk:
+ completion += chunk["content"]
+
+ else:
+ async for chunk in self._raw_stream_complete(prompt, options):
+ completion += chunk
+
+ return completion
+
+ async def _stream_complete(self, prompt, options: CompletionOptions):
+ if self.model in CHAT_MODELS:
+ async for chunk in self._stream_chat(
+ [{"role": "user", "content": prompt}], options
+ ):
+ if "content" in chunk:
+ yield chunk["content"]
+
+ else:
+ async for chunk in self._raw_stream_complete(prompt, options):
+ yield chunk
diff --git a/server/continuedev/libs/llm/google_palm_api.py b/server/continuedev/libs/llm/google_palm_api.py
new file mode 100644
index 00000000..3379fefe
--- /dev/null
+++ b/server/continuedev/libs/llm/google_palm_api.py
@@ -0,0 +1,50 @@
+from typing import List
+
+import requests
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM
+
+
+class GooglePaLMAPI(LLM):
+ """
+ The Google PaLM API is currently in public preview, so production applications are not supported yet. However, you can [create an API key in Google MakerSuite](https://makersuite.google.com/u/2/app/apikey) and begin trying out the `chat-bison-001` model. Change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.core.models import Models
+ from continuedev.libs.llm.hf_inference_api import GooglePaLMAPI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=GooglePaLMAPI(
+ model="chat-bison-001"
+ api_key="<MAKERSUITE_API_KEY>",
+ )
+ )
+ ```
+ """
+
+ api_key: str = Field(..., description="Google PaLM API key")
+
+ model: str = "chat-bison-001"
+
+ async def _stream_complete(self, prompt, options):
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = {"prompt": {"messages": [{"content": prompt}]}}
+ response = requests.post(api_url, json=body)
+ yield response.json()["candidates"][0]["content"]
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ msg_lst = []
+ for message in messages:
+ msg_lst.append({"content": message["content"]})
+
+ api_url = f"https://generativelanguage.googleapis.com/v1beta2/models/{self.model}:generateMessage?key={self.api_key}"
+ body = {"prompt": {"messages": msg_lst}}
+ response = requests.post(api_url, json=body)
+ yield {
+ "content": response.json()["candidates"][0]["content"],
+ "role": "assistant",
+ }
diff --git a/server/continuedev/libs/llm/hf_inference_api.py b/server/continuedev/libs/llm/hf_inference_api.py
new file mode 100644
index 00000000..990ec7c8
--- /dev/null
+++ b/server/continuedev/libs/llm/hf_inference_api.py
@@ -0,0 +1,78 @@
+from typing import Callable, Dict, List, Union
+
+from huggingface_hub import InferenceClient
+from pydantic import Field
+
+from .base import LLM, CompletionOptions
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class HuggingFaceInferenceAPI(LLM):
+ """
+ Hugging Face Inference API is a great option for newly released language models. Sign up for an account and add billing [here](https://huggingface.co/settings/billing), access the Inference Endpoints [here](https://ui.endpoints.huggingface.co), click on “New endpoint”, and fill out the form (e.g. select a model like [WizardCoder-Python-34B-V1.0](https://huggingface.co/WizardLM/WizardCoder-Python-34B-V1.0)), and then deploy your model by clicking “Create Endpoint”. Change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.core.models import Models
+ from continuedev.libs.llm.hf_inference_api import HuggingFaceInferenceAPI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=HuggingFaceInferenceAPI(
+ endpoint_url="<INFERENCE_API_ENDPOINT_URL>",
+ hf_token="<HUGGING_FACE_TOKEN>",
+ )
+ )
+ ```
+ """
+
+ model: str = Field(
+ "Hugging Face Inference API",
+ description="The name of the model to use (optional for the HuggingFaceInferenceAPI class)",
+ )
+ hf_token: str = Field(..., description="Your Hugging Face API token")
+ endpoint_url: str = Field(
+ None, description="Your Hugging Face Inference API endpoint URL"
+ )
+
+ template_messages: Union[
+ Callable[[List[Dict[str, str]]], str], None
+ ] = llama2_template_messages
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options: CompletionOptions):
+ options.stop = None
+ args = super().collect_args(options)
+
+ if "max_tokens" in args:
+ args["max_new_tokens"] = args["max_tokens"]
+ del args["max_tokens"]
+ if "stop" in args:
+ args["stop_sequences"] = args["stop"]
+ del args["stop"]
+
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ client = InferenceClient(self.endpoint_url, token=self.hf_token)
+
+ stream = client.text_generation(prompt, stream=True, details=True, **args)
+
+ for r in stream:
+ # skip special tokens
+ if r.token.special:
+ continue
+ # stop if we encounter a stop sequence
+ if options.stop is not None:
+ if r.token.text in options.stop:
+ break
+ yield r.token.text
diff --git a/server/continuedev/libs/llm/hf_tgi.py b/server/continuedev/libs/llm/hf_tgi.py
new file mode 100644
index 00000000..62458db4
--- /dev/null
+++ b/server/continuedev/libs/llm/hf_tgi.py
@@ -0,0 +1,65 @@
+import json
+from typing import Any, Callable, List
+
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM, CompletionOptions
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class HuggingFaceTGI(LLM):
+ model: str = "huggingface-tgi"
+ server_url: str = Field(
+ "http://localhost:8080", description="URL of your TGI server"
+ )
+
+ template_messages: Callable[[List[ChatMessage]], str] = llama2_template_messages
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options: CompletionOptions) -> Any:
+ args = super().collect_args(options)
+ args = {**args, "max_new_tokens": args.get("max_tokens", 1024), "best_of": 1}
+ args.pop("max_tokens", None)
+ args.pop("model", None)
+ args.pop("functions", None)
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ f"{self.server_url}/generate_stream",
+ json={"inputs": prompt, "parameters": args},
+ headers={"Content-Type": "application/json"},
+ proxy=self.proxy,
+ ) as resp:
+ async for line in resp.content.iter_any():
+ if line:
+ text = line.decode("utf-8")
+ chunks = text.split("\n")
+
+ for chunk in chunks:
+ if chunk.startswith("data: "):
+ chunk = chunk[len("data: ") :]
+ elif chunk.startswith("data:"):
+ chunk = chunk[len("data:") :]
+
+ if chunk.strip() == "":
+ continue
+
+ try:
+ json_chunk = json.loads(chunk)
+ except Exception as e:
+ print(f"Error parsing JSON: {e}")
+ continue
+
+ yield json_chunk["token"]["text"]
diff --git a/server/continuedev/libs/llm/hugging_face.py b/server/continuedev/libs/llm/hugging_face.py
new file mode 100644
index 00000000..c2e934c0
--- /dev/null
+++ b/server/continuedev/libs/llm/hugging_face.py
@@ -0,0 +1,19 @@
+# TODO: This class is far out of date
+
+from transformers import AutoModelForCausalLM, AutoTokenizer
+
+from .llm import LLM
+
+
+class HuggingFace(LLM):
+ def __init__(self, model_path: str = "Salesforce/codegen-2B-mono"):
+ self.model_path = model_path
+ self.tokenizer = AutoTokenizer.from_pretrained(model_path)
+ self.model = AutoModelForCausalLM.from_pretrained(model_path)
+
+ def complete(self, prompt: str, **kwargs):
+ args = {"max_tokens": 100}
+ args.update(kwargs)
+ input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids
+ generated_ids = self.model.generate(input_ids, max_length=args["max_tokens"])
+ return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
diff --git a/server/continuedev/libs/llm/llamacpp.py b/server/continuedev/libs/llm/llamacpp.py
new file mode 100644
index 00000000..bc856a52
--- /dev/null
+++ b/server/continuedev/libs/llm/llamacpp.py
@@ -0,0 +1,86 @@
+import json
+from typing import Any, Callable, Dict
+
+from pydantic import Field
+
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class LlamaCpp(LLM):
+ """
+ Run the llama.cpp server binary to start the API server. If running on a remote server, be sure to set host to 0.0.0.0:
+
+ ```shell
+ .\server.exe -c 4096 --host 0.0.0.0 -t 16 --mlock -m models\meta\llama\codellama-7b-instruct.Q8_0.gguf
+ ```
+
+ After it's up and running, change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.llamacpp import LlamaCpp
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=LlamaCpp(
+ max_context_length=4096,
+ server_url="http://localhost:8080")
+ )
+ )
+ ```
+ """
+
+ model: str = "llamacpp"
+ server_url: str = Field("http://localhost:8080", description="URL of the server")
+
+ llama_cpp_args: Dict[str, Any] = Field(
+ {"stop": ["[INST]"]},
+ description="A list of additional arguments to pass to llama.cpp. See [here](https://github.com/ggerganov/llama.cpp/tree/master/examples/server#api-endpoints) for the complete catalog of options.",
+ )
+
+ template_messages: Callable = llama2_template_messages
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options) -> Any:
+ args = super().collect_args(options)
+ if "max_tokens" in args:
+ args["n_predict"] = args["max_tokens"]
+ del args["max_tokens"]
+ if "frequency_penalty" in args:
+ del args["frequency_penalty"]
+ if "presence_penalty" in args:
+ del args["presence_penalty"]
+
+ for k, v in self.llama_cpp_args.items():
+ if k not in args:
+ args[k] = v
+
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+ headers = {"Content-Type": "application/json"}
+
+ async def server_generator():
+ async with self.create_client_session() as client_session:
+ async with client_session.post(
+ f"{self.server_url}/completion",
+ json={"prompt": prompt, "stream": True, **args},
+ headers=headers,
+ proxy=self.proxy,
+ ) as resp:
+ async for line in resp.content:
+ content = line.decode("utf-8")
+ if content.strip() == "":
+ continue
+ yield json.loads(content[6:])["content"]
+
+ async for chunk in server_generator():
+ yield chunk
diff --git a/server/continuedev/libs/llm/ollama.py b/server/continuedev/libs/llm/ollama.py
new file mode 100644
index 00000000..82cbc852
--- /dev/null
+++ b/server/continuedev/libs/llm/ollama.py
@@ -0,0 +1,106 @@
+import json
+from typing import Callable
+
+import aiohttp
+from pydantic import Field
+
+from ...core.main import ContinueCustomException
+from ..util.logging import logger
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class Ollama(LLM):
+ """
+ [Ollama](https://ollama.ai/) is an application for Mac and Linux that makes it easy to locally run open-source models, including Llama-2. Download the app from the website, and it will walk you through setup in a couple of minutes. You can also read more in their [README](https://github.com/jmorganca/ollama). Continue can then be configured to use the `Ollama` LLM class:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.ollama import Ollama
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=Ollama(model="llama2")
+ )
+ )
+ ```
+ """
+
+ model: str = "llama2"
+ server_url: str = Field(
+ "http://localhost:11434", description="URL of the Ollama server"
+ )
+
+ _client_session: aiohttp.ClientSession = None
+
+ template_messages: Callable = llama2_template_messages
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def start(self, **kwargs):
+ await super().start(**kwargs)
+ self._client_session = self.create_client_session()
+ try:
+ async with self._client_session.post(
+ f"{self.server_url}/api/generate",
+ proxy=self.proxy,
+ json={
+ "prompt": "",
+ "model": self.model,
+ },
+ ) as _:
+ pass
+ except Exception as e:
+ logger.warning(f"Error pre-loading Ollama model: {e}")
+
+ async def stop(self):
+ await self._client_session.close()
+
+ async def get_downloaded_models(self):
+ async with self._client_session.get(
+ f"{self.server_url}/api/tags",
+ proxy=self.proxy,
+ ) as resp:
+ js_data = await resp.json()
+ return list(map(lambda x: x["name"], js_data["models"]))
+
+ async def _stream_complete(self, prompt, options):
+ async with self._client_session.post(
+ f"{self.server_url}/api/generate",
+ json={
+ "template": prompt,
+ "model": self.model,
+ "system": self.system_message,
+ "options": {"temperature": options.temperature},
+ },
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status == 400:
+ txt = await resp.text()
+ extra_msg = ""
+ if "no such file" in txt:
+ extra_msg = f"\n\nThis means that the model '{self.model}' is not downloaded.\n\nYou have the following models downloaded: {', '.join(await self.get_downloaded_models())}.\n\nTo download this model, run `ollama run {self.model}` in your terminal."
+ raise ContinueCustomException(
+ f"Ollama returned an error: {txt}{extra_msg}",
+ "Invalid request to Ollama",
+ )
+ elif resp.status != 200:
+ raise ContinueCustomException(
+ f"Ollama returned an error: {await resp.text()}",
+ "Invalid request to Ollama",
+ )
+ async for line in resp.content.iter_any():
+ if line:
+ json_chunk = line.decode("utf-8")
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if chunk.strip() != "":
+ j = json.loads(chunk)
+ if "response" in j:
+ yield j["response"]
diff --git a/server/continuedev/libs/llm/openai.py b/server/continuedev/libs/llm/openai.py
new file mode 100644
index 00000000..ba29279b
--- /dev/null
+++ b/server/continuedev/libs/llm/openai.py
@@ -0,0 +1,156 @@
+from typing import Callable, List, Literal, Optional
+
+import certifi
+import openai
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM
+
+CHAT_MODELS = {
+ "gpt-3.5-turbo",
+ "gpt-3.5-turbo-16k",
+ "gpt-4",
+ "gpt-3.5-turbo-0613",
+ "gpt-4-32k",
+}
+MAX_TOKENS_FOR_MODEL = {
+ "gpt-3.5-turbo": 4096,
+ "gpt-3.5-turbo-0613": 4096,
+ "gpt-3.5-turbo-16k": 16_384,
+ "gpt-4": 8192,
+ "gpt-35-turbo-16k": 16_384,
+ "gpt-35-turbo-0613": 4096,
+ "gpt-35-turbo": 4096,
+ "gpt-4-32k": 32_768,
+}
+
+
+class OpenAI(LLM):
+ """
+ The OpenAI class can be used to access OpenAI models like gpt-4 and gpt-3.5-turbo.
+
+ If you are locally serving a model that uses an OpenAI-compatible server, you can simply change the `api_base` in the `OpenAI` class like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.openai import OpenAI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=OpenAI(
+ api_key="EMPTY",
+ model="<MODEL_NAME>",
+ api_base="http://localhost:8000", # change to your server
+ )
+ )
+ )
+ ```
+
+ Options for serving models locally with an OpenAI-compatible server include:
+
+ - [text-gen-webui](https://github.com/oobabooga/text-generation-webui/tree/main/extensions/openai#setup--installation)
+ - [FastChat](https://github.com/lm-sys/FastChat/blob/main/docs/openai_api.md)
+ - [LocalAI](https://localai.io/basics/getting_started/)
+ - [llama-cpp-python](https://github.com/abetlen/llama-cpp-python#web-server)
+ """
+
+ api_key: str = Field(
+ ...,
+ description="OpenAI API key",
+ )
+
+ proxy: Optional[str] = Field(None, description="Proxy URL to use for requests.")
+
+ api_base: Optional[str] = Field(None, description="OpenAI API base URL.")
+
+ api_type: Optional[Literal["azure", "openai"]] = Field(
+ None, description="OpenAI API type."
+ )
+
+ api_version: Optional[str] = Field(
+ None, description="OpenAI API version. For use with Azure OpenAI Service."
+ )
+
+ engine: Optional[str] = Field(
+ None, description="OpenAI engine. For use with Azure OpenAI Service."
+ )
+
+ async def start(
+ self, unique_id: Optional[str] = None, write_log: Callable[[str], None] = None
+ ):
+ await super().start(write_log=write_log, unique_id=unique_id)
+
+ if self.context_length is None:
+ self.context_length = MAX_TOKENS_FOR_MODEL.get(self.model, 4096)
+
+ openai.api_key = self.api_key
+ if self.api_type is not None:
+ openai.api_type = self.api_type
+ if self.api_base is not None:
+ openai.api_base = self.api_base
+ if self.api_version is not None:
+ openai.api_version = self.api_version
+
+ if self.verify_ssl is not None and self.verify_ssl is False:
+ openai.verify_ssl_certs = False
+
+ if self.proxy is not None:
+ openai.proxy = self.proxy
+
+ openai.ca_bundle_path = self.ca_bundle_path or certifi.where()
+
+ def collect_args(self, options):
+ args = super().collect_args(options)
+ if self.engine is not None:
+ args["engine"] = self.engine
+
+ if not args["model"].endswith("0613") and "functions" in args:
+ del args["functions"]
+
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+ args["stream"] = True
+
+ if args["model"] in CHAT_MODELS:
+ async for chunk in await openai.ChatCompletion.acreate(
+ messages=[{"role": "user", "content": prompt}],
+ **args,
+ headers=self.headers,
+ ):
+ if len(chunk.choices) > 0 and "content" in chunk.choices[0].delta:
+ yield chunk.choices[0].delta.content
+ else:
+ async for chunk in await openai.Completion.acreate(prompt=prompt, **args, headers=self.headers):
+ if len(chunk.choices) > 0:
+ yield chunk.choices[0].text
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+
+ async for chunk in await openai.ChatCompletion.acreate(
+ messages=messages,
+ stream=True,
+ **args,
+ headers=self.headers,
+ ):
+ if not hasattr(chunk, "choices") or len(chunk.choices) == 0:
+ continue
+ yield chunk.choices[0].delta
+
+ async def _complete(self, prompt: str, options):
+ args = self.collect_args(options)
+
+ if args["model"] in CHAT_MODELS:
+ resp = await openai.ChatCompletion.acreate(
+ messages=[{"role": "user", "content": prompt}],
+ **args,
+ headers=self.headers,
+ )
+ return resp.choices[0].message.content
+ else:
+ return (
+ (await openai.Completion.acreate(prompt=prompt, **args, headers=self.headers)).choices[0].text
+ )
diff --git a/server/continuedev/libs/llm/openai_free_trial.py b/server/continuedev/libs/llm/openai_free_trial.py
new file mode 100644
index 00000000..b6e707f9
--- /dev/null
+++ b/server/continuedev/libs/llm/openai_free_trial.py
@@ -0,0 +1,83 @@
+from typing import Callable, List, Optional
+
+from ...core.main import ChatMessage
+from .base import LLM
+from .openai import OpenAI
+from .proxy_server import ProxyServer
+
+
+class OpenAIFreeTrial(LLM):
+ """
+ With the `OpenAIFreeTrial` `LLM`, new users can try out Continue with GPT-4 using a proxy server that securely makes calls to OpenAI using our API key. Continue should just work the first time you install the extension in VS Code.
+
+ Once you are using Continue regularly though, you will need to add an OpenAI API key that has access to GPT-4 by following these steps:
+
+ 1. Copy your API key from https://platform.openai.com/account/api-keys
+ 2. Open `~/.continue/config.py`. You can do this by using the '/config' command in Continue
+ 3. Change the default LLMs to look like this:
+
+ ```python title="~/.continue/config.py"
+ API_KEY = "<API_KEY>"
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=OpenAIFreeTrial(model="gpt-4", api_key=API_KEY),
+ summarize=OpenAIFreeTrial(model="gpt-3.5-turbo", api_key=API_KEY)
+ )
+ )
+ ```
+
+ The `OpenAIFreeTrial` class will automatically switch to using your API key instead of ours. If you'd like to explicitly use one or the other, you can use the `ProxyServer` or `OpenAI` classes instead.
+
+ These classes support any models available through the OpenAI API, assuming your API key has access, including "gpt-4", "gpt-3.5-turbo", "gpt-3.5-turbo-16k", and "gpt-4-32k".
+ """
+
+ api_key: Optional[str] = None
+
+ llm: Optional[LLM] = None
+
+ def update_llm_properties(self):
+ if self.llm is not None:
+ self.llm.system_message = self.system_message
+
+ async def start(
+ self, write_log: Callable[[str], None] = None, unique_id: Optional[str] = None
+ ):
+ await super().start(write_log=write_log, unique_id=unique_id)
+ if self.api_key is None or self.api_key.strip() == "":
+ self.llm = ProxyServer(
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
+ else:
+ self.llm = OpenAI(
+ api_key=self.api_key,
+ model=self.model,
+ verify_ssl=self.verify_ssl,
+ ca_bundle_path=self.ca_bundle_path,
+ )
+
+ await self.llm.start(write_log=write_log, unique_id=unique_id)
+
+ async def stop(self):
+ await self.llm.stop()
+
+ async def _complete(self, prompt: str, options):
+ self.update_llm_properties()
+ return await self.llm._complete(prompt, options)
+
+ async def _stream_complete(self, prompt, options):
+ self.update_llm_properties()
+ resp = self.llm._stream_complete(prompt, options)
+ async for item in resp:
+ yield item
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ self.update_llm_properties()
+ resp = self.llm._stream_chat(messages=messages, options=options)
+ async for item in resp:
+ yield item
+
+ def count_tokens(self, text: str):
+ return self.llm.count_tokens(text)
diff --git a/server/continuedev/libs/llm/prompt_utils.py b/server/continuedev/libs/llm/prompt_utils.py
new file mode 100644
index 00000000..930b5220
--- /dev/null
+++ b/server/continuedev/libs/llm/prompt_utils.py
@@ -0,0 +1,76 @@
+from typing import Dict, List, Union
+
+from ...models.filesystem import RangeInFileWithContents
+from ...models.filesystem_edit import FileEdit
+
+
+class MarkdownStyleEncoderDecoder:
+ # Filename -> the part of the file you care about
+ range_in_files: List[RangeInFileWithContents]
+
+ def __init__(self, range_in_files: List[RangeInFileWithContents]):
+ self.range_in_files = range_in_files
+
+ def encode(self) -> str:
+ return "\n\n".join(
+ [
+ f"File ({rif.filepath})\n```\n{rif.contents}\n```"
+ for rif in self.range_in_files
+ ]
+ )
+
+ def _suggestions_to_file_edits(self, suggestions: Dict[str, str]) -> List[FileEdit]:
+ file_edits: List[FileEdit] = []
+ for suggestion_filepath, suggestion in suggestions.items():
+ matching_rifs = list(
+ filter(lambda r: r.filepath == suggestion_filepath, self.range_in_files)
+ )
+ if len(matching_rifs) > 0:
+ range_in_file = matching_rifs[0]
+ file_edits.append(
+ FileEdit(
+ range=range_in_file.range,
+ filepath=range_in_file.filepath,
+ replacement=suggestion,
+ )
+ )
+
+ return file_edits
+
+ def _decode_to_suggestions(self, completion: str) -> Dict[str, str]:
+ if len(self.range_in_files) == 0:
+ return {}
+
+ if "```" not in completion:
+ completion = "```\n" + completion + "\n```"
+ if completion.strip().splitlines()[0].strip() == "```":
+ first_filepath = self.range_in_files[0].filepath
+ completion = f"File ({first_filepath})\n" + completion
+
+ suggestions: Dict[str, str] = {}
+ current_file_lines: List[str] = []
+ current_filepath: Union[str, None] = None
+ last_was_file = False
+ inside_file = False
+ for line in completion.splitlines():
+ if line.strip().startswith("File ("):
+ last_was_file = True
+ current_filepath = line.strip()[6:-1]
+ elif last_was_file and line.startswith("```"):
+ last_was_file = False
+ inside_file = True
+ elif inside_file:
+ if line.startswith("```"):
+ inside_file = False
+ suggestions[current_filepath] = "\n".join(current_file_lines)
+ current_file_lines = []
+ current_filepath = None
+ else:
+ current_file_lines.append(line)
+
+ return suggestions
+
+ def decode(self, completion: str) -> List[FileEdit]:
+ suggestions = self._decode_to_suggestions(completion)
+ file_edits = self._suggestions_to_file_edits(suggestions)
+ return file_edits
diff --git a/server/continuedev/libs/llm/prompts/chat.py b/server/continuedev/libs/llm/prompts/chat.py
new file mode 100644
index 00000000..036f1b1a
--- /dev/null
+++ b/server/continuedev/libs/llm/prompts/chat.py
@@ -0,0 +1,174 @@
+from textwrap import dedent
+from typing import Dict, List
+
+from anthropic import AI_PROMPT, HUMAN_PROMPT
+
+
+def anthropic_template_messages(messages: List[Dict[str, str]]) -> str:
+ prompt = ""
+
+ # Anthropic prompt must start with a Human turn
+ if (
+ len(messages) > 0
+ and messages[0]["role"] != "user"
+ and messages[0]["role"] != "system"
+ ):
+ prompt += f"{HUMAN_PROMPT} Hello."
+ for msg in messages:
+ prompt += f"{HUMAN_PROMPT if (msg['role'] == 'user' or msg['role'] == 'system') else AI_PROMPT} {msg['content']} "
+
+ prompt += AI_PROMPT
+ return prompt
+
+
+def template_alpaca_messages(msgs: List[Dict[str, str]]) -> str:
+ prompt = ""
+
+ if msgs[0]["role"] == "system":
+ prompt += f"{msgs[0]['content']}\n"
+ msgs.pop(0)
+
+ for msg in msgs:
+ prompt += "### Instruction:\n" if msg["role"] == "user" else "### Response:\n"
+ prompt += f"{msg['content']}\n"
+
+ prompt += "### Response:\n"
+
+ return prompt
+
+
+def raw_input_template(msgs: List[Dict[str, str]]) -> str:
+ return msgs[-1]["content"]
+
+
+SQL_CODER_DEFAULT_SCHEMA = """\
+CREATE TABLE products (
+ product_id INTEGER PRIMARY KEY, -- Unique ID for each product
+ name VARCHAR(50), -- Name of the product
+ price DECIMAL(10,2), -- Price of each unit of the product
+ quantity INTEGER -- Current quantity in stock
+);
+
+CREATE TABLE customers (
+ customer_id INTEGER PRIMARY KEY, -- Unique ID for each customer
+ name VARCHAR(50), -- Name of the customer
+ address VARCHAR(100) -- Mailing address of the customer
+);
+
+CREATE TABLE salespeople (
+ salesperson_id INTEGER PRIMARY KEY, -- Unique ID for each salesperson
+ name VARCHAR(50), -- Name of the salesperson
+ region VARCHAR(50) -- Geographic sales region
+);
+
+CREATE TABLE sales (
+ sale_id INTEGER PRIMARY KEY, -- Unique ID for each sale
+ product_id INTEGER, -- ID of product sold
+ customer_id INTEGER, -- ID of customer who made purchase
+ salesperson_id INTEGER, -- ID of salesperson who made the sale
+ sale_date DATE, -- Date the sale occurred
+ quantity INTEGER -- Quantity of product sold
+);
+
+CREATE TABLE product_suppliers (
+ supplier_id INTEGER PRIMARY KEY, -- Unique ID for each supplier
+ product_id INTEGER, -- Product ID supplied
+ supply_price DECIMAL(10,2) -- Unit price charged by supplier
+);
+
+-- sales.product_id can be joined with products.product_id
+-- sales.customer_id can be joined with customers.customer_id
+-- sales.salesperson_id can be joined with salespeople.salesperson_id
+-- product_suppliers.product_id can be joined with products.product_id
+"""
+
+
+def _sqlcoder_template_messages(
+ msgs: List[Dict[str, str]], schema: str = SQL_CODER_DEFAULT_SCHEMA
+) -> str:
+ question = msgs[-1]["content"]
+ return f"""\
+Your task is to convert a question into a SQL query, given a Postgres database schema.
+Adhere to these rules:
+- **Deliberately go through the question and database schema word by word** to appropriately answer the question
+- **Use Table Aliases** to prevent ambiguity. For example, `SELECT table1.col1, table2.col1 FROM table1 JOIN table2 ON table1.id = table2.id`.
+- When creating a ratio, always cast the numerator as float
+
+### Input:
+Generate a SQL query that answers the question `{question}`.
+This query will run on a database whose schema is represented in this string:
+{schema}
+
+### Response:
+Based on your instructions, here is the SQL query I have generated to answer the question `{question}`:
+```sql
+"""
+
+
+def sqlcoder_template_messages(schema: str = SQL_CODER_DEFAULT_SCHEMA):
+ if schema == "<MY_DATABASE_SCHEMA>" or schema == "":
+ schema = SQL_CODER_DEFAULT_SCHEMA
+
+ def fn(msgs):
+ return _sqlcoder_template_messages(msgs, schema=schema)
+
+ fn.__name__ = "sqlcoder_template_messages"
+ return fn
+
+
+def llama2_template_messages(msgs: List[Dict[str, str]]) -> str:
+ if len(msgs) == 0:
+ return ""
+
+ if msgs[0]["role"] == "assistant":
+ # These models aren't trained to handle assistant message coming first,
+ # and typically these are just introduction messages from Continue
+ msgs.pop(0)
+
+ prompt = ""
+ has_system = msgs[0]["role"] == "system"
+
+ if has_system and msgs[0]["content"].strip() == "":
+ has_system = False
+ msgs = msgs[1:]
+
+ if has_system:
+ system_message = dedent(
+ f"""\
+ <<SYS>>
+ {msgs[0]["content"]}
+ <</SYS>>
+
+ """
+ )
+ if len(msgs) > 1:
+ prompt += f"[INST] {system_message}{msgs[1]['content']} [/INST]"
+ else:
+ prompt += f"[INST] {system_message} [/INST]"
+ return
+
+ for i in range(2 if has_system else 0, len(msgs)):
+ if msgs[i]["role"] == "user":
+ prompt += f"[INST] {msgs[i]['content']} [/INST]"
+ else:
+ prompt += msgs[i]["content"] + " "
+
+ return prompt
+
+
+def code_llama_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return f"[INST] {msgs[-1]['content']}\n[/INST]"
+
+
+def extra_space_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return f" {msgs[-1]['content']}"
+
+
+def code_llama_python_template_messages(msgs: List[Dict[str, str]]) -> str:
+ return dedent(
+ f"""\
+ [INST]
+ You are an expert Python programmer and personal assistant, here is your task: {msgs[-1]['content']}
+ Your answer should start with a [PYTHON] tag and end with a [/PYTHON] tag.
+ [/INST]"""
+ )
diff --git a/server/continuedev/libs/llm/prompts/edit.py b/server/continuedev/libs/llm/prompts/edit.py
new file mode 100644
index 00000000..eaa694c5
--- /dev/null
+++ b/server/continuedev/libs/llm/prompts/edit.py
@@ -0,0 +1,27 @@
+from textwrap import dedent
+
+simplified_edit_prompt = dedent(
+ """\
+ Consider the following code:
+ ```
+ {{{code_to_edit}}}
+ ```
+ Edit the code to perfectly satisfy the following user request:
+ {{{user_input}}}
+ Output nothing except for the code. No code block, no English explanation, no start/end tags."""
+)
+
+simplest_edit_prompt = dedent(
+ """\
+ Here is the code before editing:
+ ```
+ {{{code_to_edit}}}
+ ```
+
+ Here is the edit requested:
+ "{{{user_input}}}"
+
+ Here is the code after editing:"""
+)
+
+codellama_infill_edit_prompt = "{{file_prefix}}<FILL>{{file_suffix}}"
diff --git a/server/continuedev/libs/llm/proxy_server.py b/server/continuedev/libs/llm/proxy_server.py
new file mode 100644
index 00000000..7c3462eb
--- /dev/null
+++ b/server/continuedev/libs/llm/proxy_server.py
@@ -0,0 +1,108 @@
+import json
+import traceback
+from typing import List
+
+import aiohttp
+
+from ...core.main import ChatMessage
+from ..util.telemetry import posthog_logger
+from .base import LLM
+
+# SERVER_URL = "http://127.0.0.1:8080"
+SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app"
+
+MAX_TOKENS_FOR_MODEL = {
+ "gpt-3.5-turbo": 4096,
+ "gpt-3.5-turbo-0613": 4096,
+ "gpt-3.5-turbo-16k": 16384,
+ "gpt-4": 8192,
+}
+
+
+class ProxyServer(LLM):
+ _client_session: aiohttp.ClientSession
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ async def start(
+ self,
+ **kwargs,
+ ):
+ await super().start(**kwargs)
+ self._client_session = self.create_client_session()
+
+ self.context_length = MAX_TOKENS_FOR_MODEL[self.model]
+
+ async def stop(self):
+ await self._client_session.close()
+
+ def get_headers(self):
+ return {"unique_id": self.unique_id}
+
+ async def _complete(self, prompt: str, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{SERVER_URL}/complete",
+ json={"messages": [{"role": "user", "content": prompt}], **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ resp_text = await resp.text()
+ if resp.status != 200:
+ raise Exception(resp_text)
+
+ return resp_text
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+ async with self._client_session.post(
+ f"{SERVER_URL}/stream_chat",
+ json={"messages": messages, **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(await resp.text())
+
+ async for line in resp.content.iter_chunks():
+ if line[1]:
+ try:
+ json_chunk = line[0].decode("utf-8")
+ json_chunk = "{}" if json_chunk == "" else json_chunk
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if chunk.strip() != "":
+ loaded_chunk = json.loads(chunk)
+ yield loaded_chunk
+
+ except Exception as e:
+ posthog_logger.capture_event(
+ "proxy_server_parse_error",
+ {
+ "error_title": "Proxy server stream_chat parsing failed",
+ "error_message": "\n".join(
+ traceback.format_exception(e)
+ ),
+ },
+ )
+ else:
+ break
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{SERVER_URL}/stream_complete",
+ json={"messages": [{"role": "user", "content": prompt}], **args},
+ headers=self.get_headers(),
+ proxy=self.proxy,
+ ) as resp:
+ if resp.status != 200:
+ raise Exception(await resp.text())
+
+ async for line in resp.content.iter_any():
+ if line:
+ decoded_line = line.decode("utf-8")
+ yield decoded_line
diff --git a/server/continuedev/libs/llm/queued.py b/server/continuedev/libs/llm/queued.py
new file mode 100644
index 00000000..2db749eb
--- /dev/null
+++ b/server/continuedev/libs/llm/queued.py
@@ -0,0 +1,77 @@
+import asyncio
+from typing import Any, List, Union
+
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM, CompletionOptions
+
+
+class QueuedLLM(LLM):
+ """
+ QueuedLLM exists to make up for LLM servers that cannot handle multiple requests at once. It uses a lock to ensure that only one request is being processed at a time.
+
+ If you are already using another LLM class and are experiencing this problem, you can just wrap it with the QueuedLLM class like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.queued import QueuedLLM
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=QueuedLLM(llm=<OTHER_LLM_CLASS>)
+ )
+ )
+ ```
+ """
+
+ llm: LLM = Field(..., description="The LLM to wrap with a lock")
+ _lock: asyncio.Lock
+
+ model: str = "queued"
+
+ def dict(self, **kwargs):
+ return self.llm.dict(**kwargs)
+
+ async def start(self, *args, **kwargs):
+ await super().start(*args, **kwargs)
+ await self.llm.start(*args, **kwargs)
+ self._lock = asyncio.Lock()
+ self.model = self.llm.model
+ self.template_messages = self.llm.template_messages
+ self.prompt_templates = self.llm.prompt_templates
+ self.context_length = self.llm.context_length
+
+ async def stop(self):
+ await self.llm.stop()
+
+ def collect_args(self, options: CompletionOptions):
+ return self.llm.collect_args(options)
+
+ def compile_chat_messages(
+ self,
+ options: CompletionOptions,
+ msgs: List[ChatMessage],
+ functions: Union[List[Any], None] = None,
+ ):
+ return self.llm.compile_chat_messages(options, msgs, functions)
+
+ def template_prompt_like_messages(self, prompt: str) -> str:
+ return self.llm.template_prompt_like_messages(prompt)
+
+ async def _complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ resp = await self.llm._complete(prompt, options)
+ return resp
+
+ async def _stream_complete(self, prompt: str, options: CompletionOptions):
+ async with self._lock:
+ async for chunk in self.llm._stream_complete(prompt, options):
+ yield chunk
+
+ async def _stream_chat(
+ self, messages: List[ChatMessage], options: CompletionOptions
+ ):
+ async with self._lock:
+ async for chunk in self.llm._stream_chat(messages, options):
+ yield chunk
diff --git a/server/continuedev/libs/llm/replicate.py b/server/continuedev/libs/llm/replicate.py
new file mode 100644
index 00000000..3423193b
--- /dev/null
+++ b/server/continuedev/libs/llm/replicate.py
@@ -0,0 +1,78 @@
+import concurrent.futures
+from typing import List
+
+import replicate
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM
+from .prompts.edit import simplified_edit_prompt
+
+
+class ReplicateLLM(LLM):
+ """
+ Replicate is a great option for newly released language models or models that you've deployed through their platform. Sign up for an account [here](https://replicate.ai/), copy your API key, and then select any model from the [Replicate Streaming List](https://replicate.com/collections/streaming-language-models). Change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.core.models import Models
+ from continuedev.libs.llm.replicate import ReplicateLLM
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=ReplicateLLM(
+ model="replicate/codellama-13b-instruct:da5676342de1a5a335b848383af297f592b816b950a43d251a0a9edd0113604b",
+ api_key="my-replicate-api-key")
+ )
+ )
+ ```
+
+ If you don't specify the `model` parameter, it will default to `replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781`.
+ """
+
+ api_key: str = Field(..., description="Replicate API key")
+
+ model: str = "replicate/llama-2-70b-chat:58d078176e02c219e11eb4da5a02a7830a283b14cf8f94537af893ccff5ee781"
+
+ _client: replicate.Client = None
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ async def start(self, **kwargs):
+ await super().start(**kwargs)
+ self._client = replicate.Client(api_token=self.api_key)
+
+ async def _complete(self, prompt: str, options):
+ def helper():
+ output = self._client.run(
+ self.model, input={"message": prompt, "prompt": prompt}
+ )
+ completion = ""
+ for item in output:
+ completion += item
+
+ return completion
+
+ with concurrent.futures.ThreadPoolExecutor() as executor:
+ future = executor.submit(helper)
+ completion = future.result()
+
+ return completion
+
+ async def _stream_complete(self, prompt, options):
+ for item in self._client.run(
+ self.model, input={"message": prompt, "prompt": prompt}
+ ):
+ yield item
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ for item in self._client.run(
+ self.model,
+ input={
+ "message": messages[-1]["content"],
+ "prompt": messages[-1]["content"],
+ },
+ ):
+ yield {"content": item, "role": "assistant"}
diff --git a/server/continuedev/libs/llm/text_gen_interface.py b/server/continuedev/libs/llm/text_gen_interface.py
new file mode 100644
index 00000000..225fd3b6
--- /dev/null
+++ b/server/continuedev/libs/llm/text_gen_interface.py
@@ -0,0 +1,114 @@
+import json
+from typing import Any, Callable, Dict, List, Union
+
+import websockets
+from pydantic import Field
+
+from ...core.main import ChatMessage
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplest_edit_prompt
+
+
+class TextGenUI(LLM):
+ """
+ TextGenUI is a comprehensive, open-source language model UI and local server. You can set it up with an OpenAI-compatible server plugin, but if for some reason that doesn't work, you can use this class like so:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.libs.llm.text_gen_interface import TextGenUI
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=TextGenUI(
+ model="<MODEL_NAME>",
+ )
+ )
+ )
+ ```
+ """
+
+ model: str = "text-gen-ui"
+ server_url: str = Field(
+ "http://localhost:5000", description="URL of your TextGenUI server"
+ )
+ streaming_url: str = Field(
+ "http://localhost:5005",
+ description="URL of your TextGenUI streaming server (separate from main server URL)",
+ )
+
+ prompt_templates = {
+ "edit": simplest_edit_prompt,
+ }
+
+ template_messages: Union[
+ Callable[[List[Dict[str, str]]], str], None
+ ] = llama2_template_messages
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ def collect_args(self, options) -> Any:
+ args = super().collect_args(options)
+ args = {**args, "max_new_tokens": options.max_tokens}
+ args.pop("max_tokens", None)
+ return args
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ payload = json.dumps({"prompt": prompt, "stream": True, **args})
+ async with websockets.connect(
+ f"{ws_url}/api/v1/stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ yield incoming_data["text"]
+ case "stream_end":
+ break
+
+ async def _stream_chat(self, messages: List[ChatMessage], options):
+ args = self.collect_args(options)
+
+ async def generator():
+ ws_url = f"{self.streaming_url.replace('http://', 'ws://').replace('https://', 'wss://')}"
+ history = list(map(lambda x: x["content"], messages))
+ payload = json.dumps(
+ {
+ "user_input": messages[-1]["content"],
+ "history": {"internal": [history], "visible": [history]},
+ "stream": True,
+ **args,
+ }
+ )
+ async with websockets.connect(
+ f"{ws_url}/api/v1/chat-stream", ping_interval=None
+ ) as websocket:
+ await websocket.send(payload)
+
+ prev = ""
+ while True:
+ incoming_data = await websocket.recv()
+ incoming_data = json.loads(incoming_data)
+
+ match incoming_data["event"]:
+ case "text_stream":
+ visible = incoming_data["history"]["visible"][-1]
+ if len(visible) > 0:
+ yield {
+ "role": "assistant",
+ "content": visible[-1].replace(prev, ""),
+ }
+ prev = visible[-1]
+ case "stream_end":
+ break
+
+ async for chunk in generator():
+ yield chunk
diff --git a/server/continuedev/libs/llm/together.py b/server/continuedev/libs/llm/together.py
new file mode 100644
index 00000000..35b3a424
--- /dev/null
+++ b/server/continuedev/libs/llm/together.py
@@ -0,0 +1,125 @@
+import json
+from typing import Callable
+
+import aiohttp
+from pydantic import Field
+
+from ...core.main import ContinueCustomException
+from ..util.logging import logger
+from .base import LLM
+from .prompts.chat import llama2_template_messages
+from .prompts.edit import simplified_edit_prompt
+
+
+class TogetherLLM(LLM):
+ """
+ The Together API is a cloud platform for running large AI models. You can sign up [here](https://api.together.xyz/signup), copy your API key on the initial welcome screen, and then hit the play button on any model from the [Together Models list](https://docs.together.ai/docs/models-inference). Change `~/.continue/config.py` to look like this:
+
+ ```python title="~/.continue/config.py"
+ from continuedev.core.models import Models
+ from continuedev.libs.llm.together import TogetherLLM
+
+ config = ContinueConfig(
+ ...
+ models=Models(
+ default=TogetherLLM(
+ api_key="<API_KEY>",
+ model="togethercomputer/llama-2-13b-chat"
+ )
+ )
+ )
+ ```
+ """
+
+ api_key: str = Field(..., description="Together API key")
+
+ model: str = "togethercomputer/RedPajama-INCITE-7B-Instruct"
+ base_url: str = Field(
+ "https://api.together.xyz",
+ description="The base URL for your Together API instance",
+ )
+
+ _client_session: aiohttp.ClientSession = None
+
+ template_messages: Callable = llama2_template_messages
+
+ prompt_templates = {
+ "edit": simplified_edit_prompt,
+ }
+
+ async def start(self, **kwargs):
+ await super().start(**kwargs)
+ self._client_session = aiohttp.ClientSession(
+ connector=aiohttp.TCPConnector(verify_ssl=self.verify_ssl),
+ timeout=aiohttp.ClientTimeout(total=self.timeout),
+ )
+
+ async def stop(self):
+ await self._client_session.close()
+
+ async def _stream_complete(self, prompt, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{self.base_url}/inference",
+ json={
+ "prompt": prompt,
+ "stream_tokens": True,
+ **args,
+ },
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ proxy=self.proxy,
+ ) as resp:
+ async for line in resp.content.iter_chunks():
+ if line[1]:
+ json_chunk = line[0].decode("utf-8")
+ if json_chunk.startswith(": ping - ") or json_chunk.startswith(
+ "data: [DONE]"
+ ):
+ continue
+
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if chunk.strip() != "":
+ if chunk.startswith("data: "):
+ chunk = chunk[6:]
+ if chunk == "[DONE]":
+ break
+ try:
+ json_chunk = json.loads(chunk)
+ except Exception as e:
+ logger.warning(f"Invalid JSON chunk: {chunk}\n\n{e}")
+ continue
+ if "choices" in json_chunk:
+ yield json_chunk["choices"][0]["text"]
+
+ async def _complete(self, prompt: str, options):
+ args = self.collect_args(options)
+
+ async with self._client_session.post(
+ f"{self.base_url}/inference",
+ json={"prompt": prompt, **args},
+ headers={"Authorization": f"Bearer {self.api_key}"},
+ proxy=self.proxy,
+ ) as resp:
+ text = await resp.text()
+ j = json.loads(text)
+ try:
+ if "choices" not in j["output"]:
+ raise Exception(text)
+ if "output" in j:
+ return j["output"]["choices"][0]["text"]
+ except Exception as e:
+ j = await resp.json()
+ if "error" in j:
+ if j["error"].startswith("invalid hexlify value"):
+ raise ContinueCustomException(
+ message=f"Invalid Together API key:\n\n{j['error']}",
+ title="Together API Error",
+ )
+ else:
+ raise ContinueCustomException(
+ message=j["error"], title="Together API Error"
+ )
+
+ raise e
diff --git a/server/continuedev/libs/util/calculate_diff.py b/server/continuedev/libs/util/calculate_diff.py
new file mode 100644
index 00000000..99301ae7
--- /dev/null
+++ b/server/continuedev/libs/util/calculate_diff.py
@@ -0,0 +1,154 @@
+import difflib
+from typing import List
+
+from ...models.filesystem import FileEdit
+from ...models.main import Position, Range
+
+
+def calculate_diff(filepath: str, original: str, updated: str) -> List[FileEdit]:
+ s = difflib.SequenceMatcher(None, original, updated)
+ offset = 0 # The indices are offset by previous deletions/insertions
+ edits = []
+ for tag, i1, i2, j1, j2 in s.get_opcodes():
+ i1, i2, j1, j2 = i1 + offset, i2 + offset, j1 + offset, j2 + offset
+ replacement = updated[j1:j2]
+ if tag == "equal":
+ pass
+ elif tag == "delete":
+ edits.append(
+ FileEdit.from_deletion(filepath, Range.from_indices(original, i1, i2))
+ )
+ offset -= i2 - i1
+ elif tag == "insert":
+ edits.append(
+ FileEdit.from_insertion(
+ filepath, Position.from_index(original, i1), replacement
+ )
+ )
+ offset += j2 - j1
+ elif tag == "replace":
+ edits.append(
+ FileEdit(
+ filepath=filepath,
+ range=Range.from_indices(original, i1, i2),
+ replacement=replacement,
+ )
+ )
+ offset += (j2 - j1) - (i2 - i1)
+ else:
+ raise Exception("Unexpected difflib.SequenceMatcher tag: " + tag)
+
+ return edits
+
+
+def calculate_diff2(filepath: str, original: str, updated: str) -> List[FileEdit]:
+ # original_lines = original.splitlines()
+ # updated_lines = updated.splitlines()
+ # offset = 0
+ # while len(original_lines) and len(updated_lines) and original_lines[0] == updated_lines[0]:
+ # original_lines = original_lines[1:]
+ # updated_lines = updated_lines[1:]
+
+ # while len(original_lines) and len(updated_lines) and original_lines[-1] == updated_lines[-1]:
+ # original_lines = original_lines[:-1]
+ # updated_lines = updated_lines[:-1]
+
+ # original = "\n".join(original_lines)
+ # updated = "\n".join(updated_lines)
+
+ edits = []
+ max_iterations = 1000
+ i = 0
+ while not original == updated:
+ # TODO - For some reason it can't handle a single newline at the end of the file?
+ s = difflib.SequenceMatcher(None, original, updated)
+ opcodes = s.get_opcodes()
+ for edit_index in range(len(opcodes)):
+ tag, i1, i2, j1, j2 = s.get_opcodes()[edit_index]
+ replacement = updated[j1:j2]
+ if tag == "equal":
+ continue # ;)
+ elif tag == "delete":
+ edits.append(
+ FileEdit.from_deletion(
+ filepath, Range.from_indices(original, i1, i2)
+ )
+ )
+ elif tag == "insert":
+ edits.append(
+ FileEdit.from_insertion(
+ filepath, Position.from_index(original, i1), replacement
+ )
+ )
+ elif tag == "replace":
+ edits.append(
+ FileEdit(
+ filepath=filepath,
+ range=Range.from_indices(original, i1, i2),
+ replacement=replacement,
+ )
+ )
+ else:
+ raise Exception("Unexpected difflib.SequenceMatcher tag: " + tag)
+ break
+
+ original = apply_edit_to_str(original, edits[-1])
+
+ i += 1
+ if i > max_iterations:
+ raise Exception("Max iterations reached")
+
+ return edits
+
+
+def read_range_in_str(s: str, r: Range) -> str:
+ lines = s.splitlines()[r.start.line : r.end.line + 1]
+ if len(lines) == 0:
+ return ""
+
+ lines[0] = lines[0][r.start.character :]
+ lines[-1] = lines[-1][: r.end.character + 1]
+ return "\n".join(lines)
+
+
+def apply_edit_to_str(s: str, edit: FileEdit) -> str:
+ read_range_in_str(s, edit.range)
+
+ # Split lines and deal with some edge cases (could obviously be nicer)
+ lines = s.splitlines()
+ if s.startswith("\n"):
+ lines.insert(0, "")
+ if s.endswith("\n"):
+ lines.append("")
+
+ if len(lines) == 0:
+ lines = [""]
+
+ end = Position(line=edit.range.end.line, character=edit.range.end.character)
+ if edit.range.end.line == len(lines) and edit.range.end.character == 0:
+ end = Position(
+ line=edit.range.end.line - 1,
+ character=len(lines[min(len(lines) - 1, edit.range.end.line - 1)]),
+ )
+
+ before_lines = lines[: edit.range.start.line]
+ after_lines = lines[end.line + 1 :]
+ between_str = (
+ lines[min(len(lines) - 1, edit.range.start.line)][: edit.range.start.character]
+ + edit.replacement
+ + lines[min(len(lines) - 1, end.line)][end.character + 1 :]
+ )
+
+ Range(
+ start=edit.range.start,
+ end=Position(
+ line=edit.range.start.line + len(edit.replacement.splitlines()) - 1,
+ character=edit.range.start.character
+ + len(edit.replacement.splitlines()[-1])
+ if edit.replacement != ""
+ else 0,
+ ),
+ )
+
+ lines = before_lines + between_str.splitlines() + after_lines
+ return "\n".join(lines)
diff --git a/server/continuedev/libs/util/commonregex.py b/server/continuedev/libs/util/commonregex.py
new file mode 100644
index 00000000..c2f6bb82
--- /dev/null
+++ b/server/continuedev/libs/util/commonregex.py
@@ -0,0 +1,144 @@
+# coding: utf-8
+import re
+from typing import Any
+
+date = re.compile(
+ "(?:(?<!\:)(?<!\:\d)[0-3]?\d(?:st|nd|rd|th)?\s+(?:of\s+)?(?:jan\.?|january|feb\.?|february|mar\.?|march|apr\.?|april|may|jun\.?|june|jul\.?|july|aug\.?|august|sep\.?|september|oct\.?|october|nov\.?|november|dec\.?|december)|(?:jan\.?|january|feb\.?|february|mar\.?|march|apr\.?|april|may|jun\.?|june|jul\.?|july|aug\.?|august|sep\.?|september|oct\.?|october|nov\.?|november|dec\.?|december)\s+(?<!\:)(?<!\:\d)[0-3]?\d(?:st|nd|rd|th)?)(?:\,)?\s*(?:\d{4})?|[0-3]?\d[-\./][0-3]?\d[-\./]\d{2,4}",
+ re.IGNORECASE,
+)
+time = re.compile("\d{1,2}:\d{2} ?(?:[ap]\.?m\.?)?|\d[ap]\.?m\.?", re.IGNORECASE)
+phone = re.compile(
+ """((?:(?<![\d-])(?:\+?\d{1,3}[-.\s*]?)?(?:\(?\d{3}\)?[-.\s*]?)?\d{3}[-.\s*]?\d{4}(?![\d-]))|(?:(?<![\d-])(?:(?:\(\+?\d{2}\))|(?:\+?\d{2}))\s*\d{2}\s*\d{3}\s*\d{4}(?![\d-])))"""
+)
+phones_with_exts = re.compile(
+ "((?:(?:\+?1\s*(?:[.-]\s*)?)?(?:\(\s*(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9])\s*\)|(?:[2-9]1[02-9]|[2-9][02-8]1|[2-9][02-8][02-9]))\s*(?:[.-]\s*)?)?(?:[2-9]1[02-9]|[2-9][02-9]1|[2-9][02-9]{2})\s*(?:[.-]\s*)?(?:[0-9]{4})(?:\s*(?:#|x\.?|ext\.?|extension)\s*(?:\d+)?))",
+ re.IGNORECASE,
+)
+link = re.compile(
+ "(?i)((?:https?://|www\d{0,3}[.])?[a-z0-9.\-]+[.](?:(?:international)|(?:construction)|(?:contractors)|(?:enterprises)|(?:photography)|(?:immobilien)|(?:management)|(?:technology)|(?:directory)|(?:education)|(?:equipment)|(?:institute)|(?:marketing)|(?:solutions)|(?:builders)|(?:clothing)|(?:computer)|(?:democrat)|(?:diamonds)|(?:graphics)|(?:holdings)|(?:lighting)|(?:plumbing)|(?:training)|(?:ventures)|(?:academy)|(?:careers)|(?:company)|(?:domains)|(?:florist)|(?:gallery)|(?:guitars)|(?:holiday)|(?:kitchen)|(?:recipes)|(?:shiksha)|(?:singles)|(?:support)|(?:systems)|(?:agency)|(?:berlin)|(?:camera)|(?:center)|(?:coffee)|(?:estate)|(?:kaufen)|(?:luxury)|(?:monash)|(?:museum)|(?:photos)|(?:repair)|(?:social)|(?:tattoo)|(?:travel)|(?:viajes)|(?:voyage)|(?:build)|(?:cheap)|(?:codes)|(?:dance)|(?:email)|(?:glass)|(?:house)|(?:ninja)|(?:photo)|(?:shoes)|(?:solar)|(?:today)|(?:aero)|(?:arpa)|(?:asia)|(?:bike)|(?:buzz)|(?:camp)|(?:club)|(?:coop)|(?:farm)|(?:gift)|(?:guru)|(?:info)|(?:jobs)|(?:kiwi)|(?:land)|(?:limo)|(?:link)|(?:menu)|(?:mobi)|(?:moda)|(?:name)|(?:pics)|(?:pink)|(?:post)|(?:rich)|(?:ruhr)|(?:sexy)|(?:tips)|(?:wang)|(?:wien)|(?:zone)|(?:biz)|(?:cab)|(?:cat)|(?:ceo)|(?:com)|(?:edu)|(?:gov)|(?:int)|(?:mil)|(?:net)|(?:onl)|(?:org)|(?:pro)|(?:red)|(?:tel)|(?:uno)|(?:xxx)|(?:ac)|(?:ad)|(?:ae)|(?:af)|(?:ag)|(?:ai)|(?:al)|(?:am)|(?:an)|(?:ao)|(?:aq)|(?:ar)|(?:as)|(?:at)|(?:au)|(?:aw)|(?:ax)|(?:az)|(?:ba)|(?:bb)|(?:bd)|(?:be)|(?:bf)|(?:bg)|(?:bh)|(?:bi)|(?:bj)|(?:bm)|(?:bn)|(?:bo)|(?:br)|(?:bs)|(?:bt)|(?:bv)|(?:bw)|(?:by)|(?:bz)|(?:ca)|(?:cc)|(?:cd)|(?:cf)|(?:cg)|(?:ch)|(?:ci)|(?:ck)|(?:cl)|(?:cm)|(?:cn)|(?:co)|(?:cr)|(?:cu)|(?:cv)|(?:cw)|(?:cx)|(?:cy)|(?:cz)|(?:de)|(?:dj)|(?:dk)|(?:dm)|(?:do)|(?:dz)|(?:ec)|(?:ee)|(?:eg)|(?:er)|(?:es)|(?:et)|(?:eu)|(?:fi)|(?:fj)|(?:fk)|(?:fm)|(?:fo)|(?:fr)|(?:ga)|(?:gb)|(?:gd)|(?:ge)|(?:gf)|(?:gg)|(?:gh)|(?:gi)|(?:gl)|(?:gm)|(?:gn)|(?:gp)|(?:gq)|(?:gr)|(?:gs)|(?:gt)|(?:gu)|(?:gw)|(?:gy)|(?:hk)|(?:hm)|(?:hn)|(?:hr)|(?:ht)|(?:hu)|(?:id)|(?:ie)|(?:il)|(?:im)|(?:in)|(?:io)|(?:iq)|(?:ir)|(?:is)|(?:it)|(?:je)|(?:jm)|(?:jo)|(?:jp)|(?:ke)|(?:kg)|(?:kh)|(?:ki)|(?:km)|(?:kn)|(?:kp)|(?:kr)|(?:kw)|(?:ky)|(?:kz)|(?:la)|(?:lb)|(?:lc)|(?:li)|(?:lk)|(?:lr)|(?:ls)|(?:lt)|(?:lu)|(?:lv)|(?:ly)|(?:ma)|(?:mc)|(?:md)|(?:me)|(?:mg)|(?:mh)|(?:mk)|(?:ml)|(?:mm)|(?:mn)|(?:mo)|(?:mp)|(?:mq)|(?:mr)|(?:ms)|(?:mt)|(?:mu)|(?:mv)|(?:mw)|(?:mx)|(?:my)|(?:mz)|(?:na)|(?:nc)|(?:ne)|(?:nf)|(?:ng)|(?:ni)|(?:nl)|(?:no)|(?:np)|(?:nr)|(?:nu)|(?:nz)|(?:om)|(?:pa)|(?:pe)|(?:pf)|(?:pg)|(?:ph)|(?:pk)|(?:pl)|(?:pm)|(?:pn)|(?:pr)|(?:ps)|(?:pt)|(?:pw)|(?:py)|(?:qa)|(?:re)|(?:ro)|(?:rs)|(?:ru)|(?:rw)|(?:sa)|(?:sb)|(?:sc)|(?:sd)|(?:se)|(?:sg)|(?:sh)|(?:si)|(?:sj)|(?:sk)|(?:sl)|(?:sm)|(?:sn)|(?:so)|(?:sr)|(?:st)|(?:su)|(?:sv)|(?:sx)|(?:sy)|(?:sz)|(?:tc)|(?:td)|(?:tf)|(?:tg)|(?:th)|(?:tj)|(?:tk)|(?:tl)|(?:tm)|(?:tn)|(?:to)|(?:tp)|(?:tr)|(?:tt)|(?:tv)|(?:tw)|(?:tz)|(?:ua)|(?:ug)|(?:uk)|(?:us)|(?:uy)|(?:uz)|(?:va)|(?:vc)|(?:ve)|(?:vg)|(?:vi)|(?:vn)|(?:vu)|(?:wf)|(?:ws)|(?:ye)|(?:yt)|(?:za)|(?:zm)|(?:zw))(?:/[^\s()<>]+[^\s`!()\[\]{};:'\".,<>?\xab\xbb\u201c\u201d\u2018\u2019])?)",
+ re.IGNORECASE,
+)
+email = re.compile(
+ "([a-z0-9!#$%&'*+\/=?^_`{|.}~-]+@(?:[a-z0-9](?:[a-z0-9-]*[a-z0-9])?\.)+[a-z0-9](?:[a-z0-9-]*[a-z0-9])?)",
+ re.IGNORECASE,
+)
+ip = re.compile(
+ "(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)",
+ re.IGNORECASE,
+)
+ipv6 = re.compile(
+ "\s*(?!.*::.*::)(?:(?!:)|:(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)){3})\s*",
+ re.VERBOSE | re.IGNORECASE | re.DOTALL,
+)
+price = re.compile("[$]\s?[+-]?[0-9]{1,3}(?:(?:,?[0-9]{3}))*(?:\.[0-9]{1,2})?")
+hex_color = re.compile("(#(?:[0-9a-fA-F]{8})|#(?:[0-9a-fA-F]{3}){1,2})\\b")
+credit_card = re.compile("((?:(?:\\d{4}[- ]?){3}\\d{4}|\\d{15,16}))(?![\\d])")
+btc_address = re.compile(
+ "(?<![a-km-zA-HJ-NP-Z0-9])[13][a-km-zA-HJ-NP-Z0-9]{26,33}(?![a-km-zA-HJ-NP-Z0-9])"
+)
+street_address = re.compile(
+ "\d{1,4} [\w\s]{1,20}(?:street|st|avenue|ave|road|rd|highway|hwy|square|sq|trail|trl|drive|dr|court|ct|park|parkway|pkwy|circle|cir|boulevard|blvd)\W?(?=\s|$)",
+ re.IGNORECASE,
+)
+zip_code = re.compile(r"\b\d{5}(?:[-\s]\d{4})?\b")
+po_box = re.compile(r"P\.? ?O\.? Box \d+", re.IGNORECASE)
+ssn = re.compile(
+ "(?!000|666|333)0*(?:[0-6][0-9][0-9]|[0-7][0-6][0-9]|[0-7][0-7][0-2])[- ](?!00)[0-9]{2}[- ](?!0000)[0-9]{4}"
+)
+
+regexes = {
+ "dates": date,
+ "times": time,
+ "phones": phone,
+ "phones_with_exts": phones_with_exts,
+ "emails": email,
+ "ips": ip,
+ "ipv6s": ipv6,
+ "prices": price,
+ "hex_colors": hex_color,
+ "credit_cards": credit_card,
+ "btc_addresses": btc_address,
+ "street_addresses": street_address,
+ "zip_codes": zip_code,
+ "po_boxes": po_box,
+ "ssn_number": ssn,
+}
+
+placeholders = {
+ "dates": "<DATE>",
+ "times": "<TIME>",
+ "phones": "<PHONE>",
+ "phones_with_exts": "<PHONE_WITH_EXT>",
+ "emails": "<EMAIL>",
+ "ips": "<IP>",
+ "ipv6s": "<IPV6>",
+ "prices": "<PRICE>",
+ "hex_colors": "<HEX_COLOR>",
+ "credit_cards": "<CREDIT_CARD>",
+ "btc_addresses": "<BTC_ADDRESS>",
+ "street_addresses": "<STREET_ADDRESS>",
+ "zip_codes": "<ZIP_CODE>",
+ "po_boxes": "<PO_BOX>",
+ "ssn_number": "<SSN>",
+}
+
+
+class regex:
+ def __init__(self, obj, regex):
+ self.obj = obj
+ self.regex = regex
+
+ def __call__(self, *args):
+ def regex_method(text=None):
+ return [x.strip() for x in self.regex.findall(text or self.obj.text)]
+
+ return regex_method
+
+
+class CommonRegex(object):
+ def __init__(self, text=""):
+ self.text = text
+
+ for k, v in list(regexes.items()):
+ setattr(self, k, regex(self, v)(self))
+
+ if text:
+ for key in list(regexes.keys()):
+ method = getattr(self, key)
+ setattr(self, key, method())
+
+
+pii_parser = CommonRegex()
+
+
+def clean_pii_from_str(text: str):
+ """Replace personally identifiable information (PII) with placeholders."""
+ for regex_name, regex in list(regexes.items()):
+ placeholder = placeholders[regex_name]
+ text = regex.sub(placeholder, text)
+
+ return text
+
+
+def clean_pii_from_any(v: Any) -> Any:
+ """Replace personally identifiable information (PII) with placeholders. Not guaranteed to return same type as input."""
+ if isinstance(v, str):
+ return clean_pii_from_str(v)
+ elif isinstance(v, dict):
+ cleaned_dict = {}
+ for key, value in v.items():
+ cleaned_dict[key] = clean_pii_from_any(value)
+ return cleaned_dict
+ elif isinstance(v, list):
+ return [clean_pii_from_any(x) for x in v]
+ else:
+ # Try to convert to string
+ try:
+ orig_text = str(v)
+ cleaned_text = clean_pii_from_str(orig_text)
+ if orig_text != cleaned_text:
+ return cleaned_text
+ else:
+ return v
+ except:
+ return v
diff --git a/server/continuedev/libs/util/copy_codebase.py b/server/continuedev/libs/util/copy_codebase.py
new file mode 100644
index 00000000..78f38148
--- /dev/null
+++ b/server/continuedev/libs/util/copy_codebase.py
@@ -0,0 +1,121 @@
+import os
+import shutil
+from pathlib import Path
+from typing import Iterable, List, Union
+
+from watchdog.events import PatternMatchingEventHandler
+from watchdog.observers import Observer
+
+from ...core.autopilot import Autopilot
+from ...models.filesystem import FileSystem
+from ...models.main import (
+ AddDirectory,
+ AddFile,
+ DeleteDirectory,
+ DeleteFile,
+ FileSystemEdit,
+ RenameDirectory,
+ RenameFile,
+ SequentialFileSystemEdit,
+)
+from .map_path import map_path
+
+
+def create_copy(orig_root: str, copy_root: str = None, ignore: Iterable[str] = []):
+ # TODO: Make ignore a spec, like .gitignore
+ if copy_root is None:
+ copy_root = Path(orig_root) / ".continue-copy"
+ ignore.append(str(copy_root))
+ ignore = set(ignore)
+
+ os.mkdir(copy_root)
+ # I think you're messing up a lot of absolute paths here
+ for child in os.listdir():
+ if os.path.isdir(child):
+ if child not in ignore:
+ os.mkdir(map_path(child))
+ create_copy(Path(orig_root) / child, Path(copy_root) / child, ignore)
+ else:
+ os.symlink(child, map_path(child))
+ else:
+ if child not in ignore:
+ shutil.copyfile(child, map_path(child))
+ else:
+ os.symlink(child, map_path(child))
+
+
+# The whole usage of watchdog here should only be specific to RealFileSystem, you want to have a different "Observer" class for VirtualFileSystem, which would depend on being sent notifications
+class CopyCodebaseEventHandler(PatternMatchingEventHandler):
+ def __init__(
+ self,
+ ignore_directories: List[str],
+ ignore_patterns: List[str],
+ autopilot: Autopilot,
+ orig_root: str,
+ copy_root: str,
+ filesystem: FileSystem,
+ ):
+ super().__init__(
+ ignore_directories=ignore_directories, ignore_patterns=ignore_patterns
+ )
+ self.autopilot = autopilot
+ self.orig_root = orig_root
+ self.copy_root = copy_root
+ self.filesystem = filesystem
+
+ # For now, we'll just make the update immediately, but eventually need to sync with autopilot.
+ # It should be the autopilot that makes the update right? It's just another action, everything comes from a single stream.
+
+ def _event_to_edit(self, event) -> Union[FileSystemEdit, None]:
+ # NOTE: You'll need to map paths to create both an action within the copy filesystem (the one you take) and one in the original filesystem (the one you'll record and allow the user to accept). Basically just need a converter built in to the FileSystemEdit class
+ src = event.src_path()
+ if event.is_directory:
+ if event.event_type == "moved":
+ return RenameDirectory(src, event.dest_path())
+ elif event.event_type == "deleted":
+ return DeleteDirectory(src)
+ elif event.event_type == "created":
+ return AddDirectory(src)
+ else:
+ if event.event_type == "moved":
+ return RenameFile(src, event.dest_path())
+ elif event.event_type == "deleted":
+ return DeleteFile(src)
+ elif event.event_type == "created":
+ contents = self.filesystem.read(src)
+ # Unclear whether it will always pass a "modified" event right after if something like echo "abc" > newfile.txt happens
+ return AddFile(src, contents)
+ elif event.event_type == "modified":
+ # Watchdog doesn't pass the contents or edit, so have to get it myself and diff
+ updated = self.filesystem.read(src)
+ copy_filepath = map_path(src, self.orig_root, self.copy_root)
+ old = self.filesystem.read(copy_filepath)
+
+ edits = calculate_diff(src, updated, old)
+ return SequentialFileSystemEdit(edits)
+ return None
+
+ def on_any_event(self, event):
+ edit = self._event_to_edit(event)
+ if edit is None:
+ return
+ edit = edit.with_mapped_paths(self.orig_root, self.copy_root)
+ action = ManualEditAction(edit)
+ self.autopilot.act(action)
+
+
+def maintain_copy_workspace(
+ autopilot: Autopilot, filesystem: FileSystem, orig_root: str, copy_root: str
+):
+ observer = Observer()
+ event_handler = CopyCodebaseEventHandler(
+ [".git"], [], autopilot, orig_root, copy_root, filesystem
+ )
+ observer.schedule(event_handler, orig_root, recursive=True)
+ observer.start()
+ try:
+ while observer.isAlive():
+ observer.join(1)
+ finally:
+ observer.stop()
+ observer.join()
diff --git a/server/continuedev/libs/util/count_tokens.py b/server/continuedev/libs/util/count_tokens.py
new file mode 100644
index 00000000..d895a2cf
--- /dev/null
+++ b/server/continuedev/libs/util/count_tokens.py
@@ -0,0 +1,206 @@
+import json
+from typing import Dict, List, Union
+
+from ...core.main import ChatMessage
+from .templating import render_templated_string
+
+# TODO move many of these into specific LLM.properties() function that
+# contains max tokens, if its a chat model or not, default args (not all models
+# want to be run at 0.5 temp). also lets custom models made for long contexts
+# exist here (likg LLongMA)
+aliases = {
+ "ggml": "gpt-3.5-turbo",
+ "claude-2": "gpt-3.5-turbo",
+}
+DEFAULT_MAX_TOKENS = 1024
+DEFAULT_ARGS = {
+ "max_tokens": DEFAULT_MAX_TOKENS,
+ "temperature": 0.5,
+}
+
+already_saw_import_err = False
+
+
+def encoding_for_model(model_name: str):
+ global already_saw_import_err
+ if already_saw_import_err:
+ return None
+
+ try:
+ import tiktoken
+ from tiktoken_ext import openai_public # noqa: F401
+
+ try:
+ return tiktoken.encoding_for_model(aliases.get(model_name, model_name))
+ except Exception as _:
+ return tiktoken.encoding_for_model("gpt-3.5-turbo")
+ except Exception as e:
+ print("Error importing tiktoken", e)
+ already_saw_import_err = True
+ return None
+
+
+def count_tokens(model_name: str, text: Union[str, None]):
+ if text is None:
+ return 0
+ encoding = encoding_for_model(model_name)
+ if encoding is None:
+ # Make a safe estimate given that tokens are usually typically ~4 characters on average
+ return len(text) // 2
+ return len(encoding.encode(text, disallowed_special=()))
+
+
+def count_chat_message_tokens(model_name: str, chat_message: ChatMessage) -> int:
+ # Doing simpler, safer version of what is here:
+ # https://github.com/openai/openai-cookbook/blob/main/examples/How_to_count_tokens_with_tiktoken.ipynb
+ # every message follows <|start|>{role/name}\n{content}<|end|>\n
+ TOKENS_PER_MESSAGE = 4
+ return count_tokens(model_name, chat_message.content) + TOKENS_PER_MESSAGE
+
+
+def prune_raw_prompt_from_top(
+ model_name: str, context_length: int, prompt: str, tokens_for_completion: int
+):
+ max_tokens = context_length - tokens_for_completion
+ encoding = encoding_for_model(model_name)
+
+ if encoding is None:
+ desired_length_in_chars = max_tokens * 2
+ return prompt[-desired_length_in_chars:]
+
+ tokens = encoding.encode(prompt, disallowed_special=())
+ if len(tokens) <= max_tokens:
+ return prompt
+ else:
+ return encoding.decode(tokens[-max_tokens:])
+
+
+def prune_chat_history(
+ model_name: str,
+ chat_history: List[ChatMessage],
+ context_length: int,
+ tokens_for_completion: int,
+):
+ total_tokens = tokens_for_completion + sum(
+ count_chat_message_tokens(model_name, message) for message in chat_history
+ )
+
+ # 1. Replace beyond last 5 messages with summary
+ i = 0
+ while total_tokens > context_length and i < len(chat_history) - 5:
+ message = chat_history[0]
+ total_tokens -= count_tokens(model_name, message.content)
+ total_tokens += count_tokens(model_name, message.summary)
+ message.content = message.summary
+ i += 1
+
+ # 2. Remove entire messages until the last 5
+ while (
+ len(chat_history) > 5
+ and total_tokens > context_length
+ and len(chat_history) > 0
+ ):
+ message = chat_history.pop(0)
+ total_tokens -= count_tokens(model_name, message.content)
+
+ # 3. Truncate message in the last 5, except last 1
+ i = 0
+ while (
+ total_tokens > context_length
+ and len(chat_history) > 0
+ and i < len(chat_history) - 1
+ ):
+ message = chat_history[i]
+ total_tokens -= count_tokens(model_name, message.content)
+ total_tokens += count_tokens(model_name, message.summary)
+ message.content = message.summary
+ i += 1
+
+ # 4. Remove entire messages in the last 5, except last 1
+ while total_tokens > context_length and len(chat_history) > 1:
+ message = chat_history.pop(0)
+ total_tokens -= count_tokens(model_name, message.content)
+
+ # 5. Truncate last message
+ if total_tokens > context_length and len(chat_history) > 0:
+ message = chat_history[0]
+ message.content = prune_raw_prompt_from_top(
+ model_name, context_length, message.content, tokens_for_completion
+ )
+ total_tokens = context_length
+
+ return chat_history
+
+
+# In case we've missed weird edge cases
+TOKEN_BUFFER_FOR_SAFETY = 100
+
+
+def compile_chat_messages(
+ model_name: str,
+ msgs: Union[List[ChatMessage], None],
+ context_length: int,
+ max_tokens: int,
+ prompt: Union[str, None] = None,
+ functions: Union[List, None] = None,
+ system_message: Union[str, None] = None,
+) -> List[Dict]:
+ """
+ The total number of tokens is system_message + sum(msgs) + functions + prompt after it is converted to a message
+ """
+
+ msgs_copy = [msg.copy(deep=True) for msg in msgs] if msgs is not None else []
+
+ if prompt is not None:
+ prompt_msg = ChatMessage(role="user", content=prompt, summary=prompt)
+ msgs_copy += [prompt_msg]
+
+ if system_message is not None and system_message.strip() != "":
+ # NOTE: System message takes second precedence to user prompt, so it is placed just before
+ # but move back to start after processing
+ rendered_system_message = render_templated_string(system_message)
+ system_chat_msg = ChatMessage(
+ role="system",
+ content=rendered_system_message,
+ summary=rendered_system_message,
+ )
+ # insert at second-to-last position
+ msgs_copy.insert(-1, system_chat_msg)
+
+ # Add tokens from functions
+ function_tokens = 0
+ if functions is not None:
+ for function in functions:
+ function_tokens += count_tokens(model_name, json.dumps(function))
+
+ if max_tokens + function_tokens + TOKEN_BUFFER_FOR_SAFETY >= context_length:
+ raise ValueError(
+ f"max_tokens ({max_tokens}) is too close to context_length ({context_length}), which doesn't leave room for chat history. This would cause incoherent responses. Try increasing the context_length parameter of the model in your config file."
+ )
+
+ msgs_copy = prune_chat_history(
+ model_name,
+ msgs_copy,
+ context_length,
+ function_tokens + max_tokens + TOKEN_BUFFER_FOR_SAFETY,
+ )
+
+ history = [msg.to_dict(with_functions=functions is not None) for msg in msgs_copy]
+
+ # Move system message back to start
+ if (
+ system_message is not None
+ and len(history) >= 2
+ and history[-2]["role"] == "system"
+ ):
+ system_message_dict = history.pop(-2)
+ history.insert(0, system_message_dict)
+
+ return history
+
+
+def format_chat_messages(messages: List[ChatMessage]) -> str:
+ formatted = ""
+ for msg in messages:
+ formatted += f"<{msg['role'].capitalize()}>\n{msg['content']}\n\n"
+ return formatted
diff --git a/server/continuedev/libs/util/create_async_task.py b/server/continuedev/libs/util/create_async_task.py
new file mode 100644
index 00000000..232d3fa1
--- /dev/null
+++ b/server/continuedev/libs/util/create_async_task.py
@@ -0,0 +1,38 @@
+import asyncio
+import traceback
+from typing import Callable, Coroutine, Optional
+
+import nest_asyncio
+
+from .logging import logger
+from .telemetry import posthog_logger
+
+nest_asyncio.apply()
+
+
+def create_async_task(
+ coro: Coroutine, on_error: Optional[Callable[[Exception], Coroutine]] = None
+):
+ """asyncio.create_task and log errors by adding a callback"""
+ task = asyncio.create_task(coro)
+
+ def callback(future: asyncio.Future):
+ try:
+ future.result()
+ except Exception as e:
+ formatted_tb = "\n".join(traceback.format_exception(e))
+ logger.critical(f"Exception caught from async task: {formatted_tb}")
+ posthog_logger.capture_event(
+ "async_task_error",
+ {
+ "error_title": e.__str__() or e.__repr__(),
+ "error_message": "\n".join(traceback.format_exception(e)),
+ },
+ )
+
+ # Log the error to the GUI
+ if on_error is not None:
+ asyncio.create_task(on_error(e))
+
+ task.add_done_callback(callback)
+ return task
diff --git a/server/continuedev/libs/util/devdata.py b/server/continuedev/libs/util/devdata.py
new file mode 100644
index 00000000..61b4351d
--- /dev/null
+++ b/server/continuedev/libs/util/devdata.py
@@ -0,0 +1,67 @@
+"""
+This file contains mechanisms for logging development data to files, SQL databases, and other formats.
+"""
+
+
+import json
+from datetime import datetime
+from typing import Any, Dict
+
+import aiohttp
+
+from .create_async_task import create_async_task
+from .logging import logger
+from .paths import getDevDataFilePath
+
+
+class DevDataLogger:
+ user_token: str = None
+ data_server_url: str = None
+
+ def setup(self, user_token: str = None, data_server_url: str = None):
+ self.user_token = user_token
+ self.data_server_url = data_server_url
+
+ def _to_data_server(self, table_name: str, data: Dict[str, Any]):
+ async def _async_helper(self, table_name: str, data: Dict[str, Any]):
+ if self.user_token is None or self.data_server_url is None:
+ return
+
+ async with aiohttp.ClientSession() as session:
+ await session.post(
+ f"{self.data_server_url}/event",
+ headers={"Authorization": f"Bearer {self.user_token}"},
+ json={
+ "table_name": table_name,
+ "data": data,
+ "user_token": self.user_token,
+ },
+ )
+
+ create_async_task(
+ _async_helper(self, table_name, data),
+ lambda e: logger.warning(f"Failed to send dev data: {e}"),
+ )
+
+ def _static_columns(self):
+ return {
+ "user_token": self.user_token or "NO_USER_TOKEN",
+ "timestamp": datetime.now().isoformat(),
+ }
+
+ def _to_local(self, table_name: str, data: Dict[str, Any]):
+ filepath = getDevDataFilePath(table_name)
+ with open(filepath, "a") as f:
+ json_line = json.dumps(data)
+ f.write(f"{json_line}\n")
+
+ def capture(self, table_name: str, data: Dict[str, Any]):
+ try:
+ data = {**self._static_columns(), **data}
+ self._to_data_server(table_name, data)
+ self._to_local(table_name, data)
+ except Exception as e:
+ logger.warning(f"Failed to capture dev data: {e}")
+
+
+dev_data_logger = DevDataLogger()
diff --git a/server/continuedev/libs/util/edit_config.py b/server/continuedev/libs/util/edit_config.py
new file mode 100644
index 00000000..4dc427d2
--- /dev/null
+++ b/server/continuedev/libs/util/edit_config.py
@@ -0,0 +1,149 @@
+import threading
+from typing import Any, Dict, List
+
+import redbaron
+
+from .paths import getConfigFilePath
+
+
+def get_config_source():
+ config_file_path = getConfigFilePath()
+ with open(config_file_path, "r") as file:
+ source_code = file.read()
+ return source_code
+
+
+def load_red():
+ source_code = get_config_source()
+
+ red = redbaron.RedBaron(source_code)
+ return red
+
+
+def get_config_node(red):
+ for node in red:
+ if node.type == "assignment" and node.target.value == "config":
+ return node
+ else:
+ raise Exception("Config file appears to be improperly formatted")
+
+
+def edit_property(
+ args: redbaron.RedBaron, key_path: List[str], value: redbaron.RedBaron
+):
+ for i in range(len(args)):
+ node = args[i]
+ if node.type != "call_argument":
+ continue
+
+ if node.target.value == key_path[0]:
+ if len(key_path) > 1:
+ edit_property(node.value.value[1].value, key_path[1:], value)
+ else:
+ args[i].value = value
+ return
+
+
+edit_lock = threading.Lock()
+
+
+def edit_config_property(key_path: List[str], value: redbaron.RedBaron):
+ with edit_lock:
+ red = load_red()
+ config = get_config_node(red)
+ config_args = config.value.value[1].value
+ edit_property(config_args, key_path, value)
+
+ with open(getConfigFilePath(), "w") as file:
+ file.write(red.dumps())
+
+
+def add_config_import(line: str):
+ # check if the import already exists
+ source = get_config_source()
+ if line in source:
+ return
+
+ with edit_lock:
+ red = load_red()
+ # if it doesn't exist, add it
+ red.insert(1, line)
+
+ with open(getConfigFilePath(), "w") as file:
+ file.write(red.dumps())
+
+
+filtered_attrs = {
+ "class_name",
+ "name",
+ "llm",
+}
+
+filtered_attrs_when_new = {"timeout", "prompt_templates"}
+
+
+def escape_string(string: str) -> str:
+ return string.replace('"', '\\"').replace("'", "\\'")
+
+
+def display_val(v: Any, k: str = None):
+ if k == "template_messages":
+ return v
+
+ if isinstance(v, str):
+ return f'"{escape_string(v)}"'
+ return str(v)
+
+
+def is_default(llm, k, v):
+ if k == "template_messages" and llm.__fields__[k].default is not None:
+ return llm.__fields__[k].default.__name__ == v
+ return v == llm.__fields__[k].default
+
+
+def display_llm_class(llm, new: bool = False):
+ sep = ",\n\t\t\t"
+ args = sep.join(
+ [
+ f"{k}={display_val(v, k)}"
+ for k, v in llm.dict().items()
+ if k not in filtered_attrs and v is not None and not is_default(llm, k, v)
+ ]
+ )
+ return f"{llm.__class__.__name__}(\n\t\t\t{args}\n\t\t)"
+
+
+def create_obj_node(
+ class_name: str, args: Dict[str, str], tabs: int = 1
+) -> redbaron.RedBaron:
+ args = [f"{key}={value}" for key, value in args.items()]
+ t = "\t" * tabs
+ new_line = "\n\t" + t
+ sep = "," + new_line
+
+ return redbaron.RedBaron(f"{class_name}({new_line}{sep.join(args)}\n{t})")[0]
+
+
+def create_string_node(string: str) -> redbaron.RedBaron:
+ string = escape_string(string)
+ if "\n" in string:
+ return redbaron.RedBaron(f'"""{string}"""')[0]
+ return redbaron.RedBaron(f'"{string}"')[0]
+
+
+def create_literal_node(literal: str) -> redbaron.RedBaron:
+ return redbaron.RedBaron(literal)[0]
+
+
+def create_float_node(float: float) -> redbaron.RedBaron:
+ return redbaron.RedBaron(f"{float}")[0]
+
+
+# Example:
+# edit_config_property(
+# [
+# "models",
+# "default",
+# ],
+# create_obj_node("OpenAI", {"api_key": '""', "model": '"gpt-4"'}),
+# )
diff --git a/server/continuedev/libs/util/errors.py b/server/continuedev/libs/util/errors.py
new file mode 100644
index 00000000..46074cfc
--- /dev/null
+++ b/server/continuedev/libs/util/errors.py
@@ -0,0 +1,2 @@
+class SessionNotFound(Exception):
+ pass
diff --git a/server/continuedev/libs/util/filter_files.py b/server/continuedev/libs/util/filter_files.py
new file mode 100644
index 00000000..6ebaa274
--- /dev/null
+++ b/server/continuedev/libs/util/filter_files.py
@@ -0,0 +1,33 @@
+import fnmatch
+from typing import List
+
+DEFAULT_IGNORE_DIRS = [
+ ".git",
+ ".vscode",
+ ".idea",
+ ".vs",
+ ".venv",
+ "env",
+ ".env",
+ "node_modules",
+ "dist",
+ "build",
+ "target",
+ "out",
+ "bin",
+ ".pytest_cache",
+ ".vscode-test",
+ ".continue",
+ "__pycache__",
+]
+
+DEFAULT_IGNORE_PATTERNS = DEFAULT_IGNORE_DIRS + list(
+ filter(lambda d: f"**/{d}", DEFAULT_IGNORE_DIRS)
+)
+
+
+def should_filter_path(
+ path: str, ignore_patterns: List[str] = DEFAULT_IGNORE_PATTERNS
+) -> bool:
+ """Returns whether a file should be filtered"""
+ return any(fnmatch.fnmatch(path, pattern) for pattern in ignore_patterns)
diff --git a/server/continuedev/libs/util/logging.py b/server/continuedev/libs/util/logging.py
new file mode 100644
index 00000000..a4dc3562
--- /dev/null
+++ b/server/continuedev/libs/util/logging.py
@@ -0,0 +1,47 @@
+import logging
+import os
+
+from .paths import getLogFilePath
+
+logfile_path = getLogFilePath()
+
+try:
+ # Truncate the logs that are more than a day old
+ if os.path.exists(logfile_path) and os.path.getsize(logfile_path) > 32 * 1024:
+ tail = None
+ with open(logfile_path, "rb") as f:
+ f.seek(-32 * 1024, os.SEEK_END)
+ tail = f.read().decode("utf-8")
+
+ if tail is not None:
+ with open(logfile_path, "w") as f:
+ f.write(tail)
+
+except Exception as e:
+ print("Error truncating log file: {}".format(e))
+
+# Create a logger
+logger = logging.getLogger(__name__)
+logger.setLevel(logging.DEBUG)
+
+# Create a file handler
+file_handler = logging.FileHandler(logfile_path)
+file_handler.setLevel(logging.DEBUG)
+
+# Create a console handler
+console_handler = logging.StreamHandler()
+console_handler.setLevel(logging.DEBUG)
+
+# Create a formatter
+formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] %(message)s")
+
+# Add the formatter to the handlers
+file_handler.setFormatter(formatter)
+console_handler.setFormatter(formatter)
+
+# Add the handlers to the logger
+logger.addHandler(file_handler)
+logger.addHandler(console_handler)
+
+# Log a test message
+logger.debug("------ Begin Logs ------")
diff --git a/server/continuedev/libs/util/map_path.py b/server/continuedev/libs/util/map_path.py
new file mode 100644
index 00000000..1dddc2e9
--- /dev/null
+++ b/server/continuedev/libs/util/map_path.py
@@ -0,0 +1,16 @@
+from pathlib import Path
+
+
+def map_path(path: str, orig_root: str, copy_root: str) -> Path:
+ path = Path(path)
+ if path.is_relative_to(orig_root):
+ if path.is_absolute():
+ return Path(copy_root) / path.relative_to(orig_root)
+ else:
+ return path
+ else:
+ if path.is_absolute():
+ return path
+ else:
+ # For this one, you need to know the directory from which the relative path is being used.
+ return Path(orig_root) / path
diff --git a/server/continuedev/libs/util/paths.py b/server/continuedev/libs/util/paths.py
new file mode 100644
index 00000000..22e4b5b9
--- /dev/null
+++ b/server/continuedev/libs/util/paths.py
@@ -0,0 +1,148 @@
+import os
+import re
+from typing import Optional
+
+from ..constants.default_config import default_config
+from ..constants.main import (
+ CONTINUE_GLOBAL_FOLDER,
+ CONTINUE_SERVER_FOLDER,
+ CONTINUE_SESSIONS_FOLDER,
+)
+
+
+def find_data_file(filename):
+ datadir = os.path.dirname(__file__)
+ return os.path.abspath(os.path.join(datadir, filename))
+
+
+def getGlobalFolderPath():
+ path = os.path.join(os.path.expanduser("~"), CONTINUE_GLOBAL_FOLDER)
+ os.makedirs(path, exist_ok=True)
+ return path
+
+
+def getSessionsFolderPath():
+ path = os.path.join(getGlobalFolderPath(), CONTINUE_SESSIONS_FOLDER)
+ os.makedirs(path, exist_ok=True)
+ return path
+
+
+def getServerFolderPath():
+ path = os.path.join(getGlobalFolderPath(), CONTINUE_SERVER_FOLDER)
+ os.makedirs(path, exist_ok=True)
+ return path
+
+
+def getDevDataFolderPath():
+ path = os.path.join(getGlobalFolderPath(), "dev_data")
+ os.makedirs(path, exist_ok=True)
+ return path
+
+
+def getDiffsFolderPath():
+ path = os.path.join(getGlobalFolderPath(), "diffs")
+ os.makedirs(path, exist_ok=True)
+ return path
+
+
+def getDevDataFilePath(table_name: str):
+ filepath = os.path.join(getDevDataFolderPath(), f"{table_name}.jsonl")
+ if not os.path.exists(filepath):
+ with open(filepath, "w") as f:
+ f.write("")
+
+ return filepath
+
+
+def getMeilisearchExePath():
+ binary_name = "meilisearch.exe" if os.name == "nt" else "meilisearch"
+ path = os.path.join(getServerFolderPath(), binary_name)
+ return path
+
+
+def getSessionFilePath(session_id: str):
+ path = os.path.join(getSessionsFolderPath(), f"{session_id}.json")
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ return path
+
+
+def getSessionsListFilePath():
+ path = os.path.join(getSessionsFolderPath(), "sessions.json")
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ if not os.path.exists(path):
+ with open(path, "w") as f:
+ f.write("[]")
+ return path
+
+
+def migrateConfigFile(existing: str) -> Optional[str]:
+ if existing.strip() == "":
+ return default_config
+
+ migrated = (
+ existing.replace("MaybeProxyOpenAI", "OpenAIFreeTrial")
+ .replace("maybe_proxy_openai", "openai_free_trial")
+ .replace("unused=", "saved=")
+ .replace("medium=", "summarize=")
+ )
+ if migrated != existing:
+ return migrated
+
+ return None
+
+
+def getConfigFilePath() -> str:
+ path = os.path.join(getGlobalFolderPath(), "config.py")
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+
+ if not os.path.exists(path):
+ with open(path, "w") as f:
+ f.write(default_config)
+ else:
+ # Make any necessary migrations
+ with open(path, "r") as f:
+ existing_content = f.read()
+
+ migrated = migrateConfigFile(existing_content)
+
+ if migrated is not None:
+ with open(path, "w") as f:
+ f.write(migrated)
+
+ return path
+
+
+def convertConfigImports(shorten: bool) -> str:
+ path = getConfigFilePath()
+ # Make any necessary migrations
+ with open(path, "r") as f:
+ existing_content = f.read()
+
+ if shorten:
+ migrated = existing_content.replace(
+ "from continuedev.src.continuedev.", "from continuedev."
+ )
+ else:
+ migrated = re.sub(
+ r"(?<!src\.)continuedev\.(?!src)",
+ "continuedev.",
+ existing_content,
+ )
+
+ with open(path, "w") as f:
+ f.write(migrated)
+
+
+def getLogFilePath():
+ path = os.path.join(getGlobalFolderPath(), "continue.log")
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ return path
+
+
+def getSavedContextGroupsPath():
+ path = os.path.join(getGlobalFolderPath(), "saved_context_groups.json")
+ os.makedirs(os.path.dirname(path), exist_ok=True)
+ if not os.path.exists(path):
+ with open(path, "w") as f:
+ f.write("\{\}")
+ return path
diff --git a/server/continuedev/libs/util/queue.py b/server/continuedev/libs/util/queue.py
new file mode 100644
index 00000000..e1f98cc6
--- /dev/null
+++ b/server/continuedev/libs/util/queue.py
@@ -0,0 +1,17 @@
+import asyncio
+from typing import Dict
+
+
+class AsyncSubscriptionQueue:
+ # The correct way to do this is probably to keep request IDs
+ queues: Dict[str, asyncio.Queue] = {}
+
+ def post(self, messageType: str, data: any):
+ if messageType not in self.queues:
+ self.queues.update({messageType: asyncio.Queue()})
+ self.queues[messageType].put_nowait(data)
+
+ async def get(self, message_type: str) -> any:
+ if message_type not in self.queues:
+ self.queues.update({message_type: asyncio.Queue()})
+ return await self.queues[message_type].get()
diff --git a/server/continuedev/libs/util/ripgrep.py b/server/continuedev/libs/util/ripgrep.py
new file mode 100644
index 00000000..f7e0af9a
--- /dev/null
+++ b/server/continuedev/libs/util/ripgrep.py
@@ -0,0 +1,25 @@
+import os
+
+
+def get_rg_path():
+ if os.name == "nt":
+ paths_to_try = [
+ f"C:\\Users\\{os.getlogin()}\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\node_modules.asar.unpacked\\@vscode\\ripgrep\\bin\\rg.exe",
+ f"C:\\Users\\{os.getlogin()}\\AppData\\Local\\Programs\\Microsoft VS Code\\resources\\app\\node_modules.asar.unpacked\\vscode-ripgrep\\bin\\rg.exe",
+ ]
+ for path in paths_to_try:
+ if os.path.exists(path):
+ rg_path = path
+ break
+ elif os.name == "posix":
+ if "darwin" in os.sys.platform:
+ rg_path = "/Applications/Visual Studio Code.app/Contents/Resources/app/node_modules.asar.unpacked/@vscode/ripgrep/bin/rg"
+ else:
+ rg_path = "/usr/share/code/resources/app/node_modules.asar.unpacked/vscode-ripgrep/bin/rg"
+ else:
+ rg_path = "rg"
+
+ if not os.path.exists(rg_path):
+ rg_path = "rg"
+
+ return rg_path
diff --git a/server/continuedev/libs/util/step_name_to_steps.py b/server/continuedev/libs/util/step_name_to_steps.py
new file mode 100644
index 00000000..25fd8ba3
--- /dev/null
+++ b/server/continuedev/libs/util/step_name_to_steps.py
@@ -0,0 +1,47 @@
+from typing import Dict
+
+from ...core.main import Step
+from ...core.steps import UserInputStep
+from ...libs.util.logging import logger
+from ...plugins.recipes.AddTransformRecipe.main import AddTransformRecipe
+from ...plugins.recipes.CreatePipelineRecipe.main import CreatePipelineRecipe
+from ...plugins.recipes.DDtoBQRecipe.main import DDtoBQRecipe
+from ...plugins.recipes.DeployPipelineAirflowRecipe.main import (
+ DeployPipelineAirflowRecipe,
+)
+from ...plugins.steps.chat import SimpleChatStep
+from ...plugins.steps.clear_history import ClearHistoryStep
+from ...plugins.steps.comment_code import CommentCodeStep
+from ...plugins.steps.feedback import FeedbackStep
+from ...plugins.steps.help import HelpStep
+from ...plugins.steps.main import EditHighlightedCodeStep
+from ...plugins.steps.on_traceback import DefaultOnTracebackStep
+from ...plugins.steps.open_config import OpenConfigStep
+
+# This mapping is used to convert from string in ContinueConfig json to corresponding Step class.
+# Used for example in slash_commands and steps_on_startup
+step_name_to_step_class = {
+ "UserInputStep": UserInputStep,
+ "EditHighlightedCodeStep": EditHighlightedCodeStep,
+ "SimpleChatStep": SimpleChatStep,
+ "CommentCodeStep": CommentCodeStep,
+ "FeedbackStep": FeedbackStep,
+ "AddTransformRecipe": AddTransformRecipe,
+ "CreatePipelineRecipe": CreatePipelineRecipe,
+ "DDtoBQRecipe": DDtoBQRecipe,
+ "DeployPipelineAirflowRecipe": DeployPipelineAirflowRecipe,
+ "DefaultOnTracebackStep": DefaultOnTracebackStep,
+ "ClearHistoryStep": ClearHistoryStep,
+ "OpenConfigStep": OpenConfigStep,
+ "HelpStep": HelpStep,
+}
+
+
+def get_step_from_name(step_name: str, params: Dict) -> Step:
+ try:
+ return step_name_to_step_class[step_name](**params)
+ except:
+ logger.error(
+ f"Incorrect parameters for step {step_name}. Parameters provided were: {params}"
+ )
+ raise
diff --git a/server/continuedev/libs/util/strings.py b/server/continuedev/libs/util/strings.py
new file mode 100644
index 00000000..f2b6035f
--- /dev/null
+++ b/server/continuedev/libs/util/strings.py
@@ -0,0 +1,64 @@
+from typing import Tuple
+
+
+def dedent_and_get_common_whitespace(s: str) -> Tuple[str, str]:
+ lines = s.splitlines()
+ if len(lines) == 0:
+ return "", ""
+
+ # Longest common whitespace prefix
+ lcp = lines[0].split(lines[0].strip())[0]
+ # Iterate through the lines
+ for i in range(1, len(lines)):
+ # Empty lines are wildcards
+ if lines[i].strip() == "":
+ continue # hey that's us!
+ # Iterate through the leading whitespace characters of the current line
+ for j in range(0, len(lcp)):
+ # If it doesn't have the same whitespace as lcp, then update lcp
+ if j >= len(lines[i]) or lcp[j] != lines[i][j]:
+ lcp = lcp[:j]
+ if lcp == "":
+ return s, ""
+ break
+
+ return "\n".join(map(lambda x: x.lstrip(lcp), lines)), lcp
+
+
+def strip_code_block(s: str) -> str:
+ """
+ Strips the code block from a string, if it has one.
+ """
+ if s.startswith("```\n") and s.endswith("\n```"):
+ return s[4:-4]
+ elif s.startswith("```") and s.endswith("```"):
+ return s[3:-3]
+ elif s.startswith("`") and s.endswith("`"):
+ return s[1:-1]
+ return s
+
+
+def remove_quotes_and_escapes(output: str) -> str:
+ """
+ Clean up the output of the completion API, removing unnecessary escapes and quotes
+ """
+ output = output.strip()
+
+ # Replace smart quotes
+ output = output.replace("“", '"')
+ output = output.replace("”", '"')
+ output = output.replace("‘", "'")
+ output = output.replace("’", "'")
+
+ # Remove escapes
+ output = output.replace('\\"', '"')
+ output = output.replace("\\'", "'")
+ output = output.replace("\\n", "\n")
+ output = output.replace("\\t", "\t")
+ output = output.replace("\\\\", "\\")
+ if (output.startswith('"') and output.endswith('"')) or (
+ output.startswith("'") and output.endswith("'")
+ ):
+ output = output[1:-1]
+
+ return output
diff --git a/server/continuedev/libs/util/telemetry.py b/server/continuedev/libs/util/telemetry.py
new file mode 100644
index 00000000..1772fe20
--- /dev/null
+++ b/server/continuedev/libs/util/telemetry.py
@@ -0,0 +1,108 @@
+import os
+import socket
+from typing import Any, Dict, Optional
+
+from dotenv import load_dotenv
+
+from ..constants.main import CONTINUE_SERVER_VERSION_FILE
+from .commonregex import clean_pii_from_any
+from .paths import getServerFolderPath
+
+load_dotenv()
+in_codespaces = os.getenv("CODESPACES") == "true"
+POSTHOG_API_KEY = "phc_JS6XFROuNbhJtVCEdTSYk6gl5ArRrTNMpCcguAXlSPs"
+
+
+def is_connected():
+ try:
+ # connect to the host -- tells us if the host is actually reachable
+ socket.create_connection(("www.google.com", 80))
+ return True
+ except OSError:
+ pass
+ return False
+
+
+class PostHogLogger:
+ unique_id: str = "NO_UNIQUE_ID"
+ allow_anonymous_telemetry: bool = False
+ ide_info: Optional[Dict] = None
+ posthog = None
+
+ def __init__(self, api_key: str):
+ self.api_key = api_key
+
+ def setup(
+ self, unique_id: str, allow_anonymous_telemetry: bool, ide_info: Optional[Dict]
+ ):
+ self.unique_id = unique_id or "NO_UNIQUE_ID"
+ self.allow_anonymous_telemetry = allow_anonymous_telemetry or False
+ self.ide_info = ide_info
+
+ # Capture initial event
+ self.capture_event("session_start", {"os": os.name})
+
+ def capture_event(self, event_name: str, event_properties: Any):
+ """Safely capture event. Telemetry should never be the reason Continue doesn't work"""
+ try:
+ self._capture_event(event_name, event_properties)
+ except Exception as e:
+ print(f"Failed to capture event: {e}")
+ pass
+
+ _found_disconnected: bool = False
+
+ def _capture_event(self, event_name: str, event_properties: Any):
+ # logger.debug(
+ # f"Logging to PostHog: {event_name} ({self.unique_id}, {self.allow_anonymous_telemetry}): {event_properties}")
+ telemetry_path = os.path.expanduser("~/.continue/telemetry.log")
+
+ # Make sure the telemetry file exists
+ if not os.path.exists(telemetry_path):
+ os.makedirs(os.path.dirname(telemetry_path), exist_ok=True)
+ open(telemetry_path, "w").close()
+
+ with open(telemetry_path, "a") as f:
+ str_to_write = f"{event_name}: {event_properties}\n{self.unique_id}\n{self.allow_anonymous_telemetry}\n\n"
+ f.write(str_to_write)
+
+ if not self.allow_anonymous_telemetry:
+ return
+
+ # Clean PII from event properties
+ event_properties = clean_pii_from_any(event_properties)
+
+ # Add additional properties that are on every event
+ if in_codespaces:
+ event_properties["codespaces"] = True
+
+ server_version_file = os.path.join(
+ getServerFolderPath(), CONTINUE_SERVER_VERSION_FILE
+ )
+ if os.path.exists(server_version_file):
+ with open(server_version_file, "r") as f:
+ event_properties["server_version"] = f.read()
+
+ # Add operating system
+ event_properties["os"] = os.name
+ if self.ide_info:
+ event_properties["ide_name"] = self.ide_info.get("name", None)
+ event_properties["ide_version"] = self.ide_info.get("version", None)
+ event_properties["ide_remote_name"] = self.ide_info.get("remoteName", None)
+
+ # Send event to PostHog
+ if self.posthog is None:
+ from posthog import Posthog
+
+ # The personal API key is necessary only if you want to use local evaluation of feature flags.
+ self.posthog = Posthog(self.api_key, host="https://app.posthog.com")
+
+ if is_connected():
+ self.posthog.capture(self.unique_id, event_name, event_properties)
+ else:
+ if not self._found_disconnected:
+ self._found_disconnected = True
+ raise ConnectionError("No internet connection")
+
+
+posthog_logger = PostHogLogger(api_key=POSTHOG_API_KEY)
diff --git a/server/continuedev/libs/util/templating.py b/server/continuedev/libs/util/templating.py
new file mode 100644
index 00000000..8d6a32fc
--- /dev/null
+++ b/server/continuedev/libs/util/templating.py
@@ -0,0 +1,76 @@
+import os
+from typing import Callable, Dict, List, Union
+
+import chevron
+
+from ...core.main import ChatMessage
+
+
+def get_vars_in_template(template):
+ """
+ Get the variables in a template
+ """
+ return [
+ token[1]
+ for token in chevron.tokenizer.tokenize(template)
+ if token[0] == "variable"
+ ]
+
+
+def escape_var(var: str) -> str:
+ """
+ Escape a variable so it can be used in a template
+ """
+ return var.replace(os.path.sep, "").replace(".", "")
+
+
+def render_templated_string(template: str) -> str:
+ """
+ Render system message or other templated string with mustache syntax.
+ Right now it only supports rendering absolute file paths as their contents.
+ """
+ vars = get_vars_in_template(template)
+
+ args = {}
+ for var in vars:
+ if var.startswith(os.path.sep):
+ # Escape vars which are filenames, because mustache doesn't allow / in variable names
+ escaped_var = escape_var(var)
+ template = template.replace(var, escaped_var)
+
+ if os.path.exists(var):
+ args[escaped_var] = open(var, "r").read()
+ else:
+ args[escaped_var] = ""
+
+ return chevron.render(template, args)
+
+
+"""
+A PromptTemplate can either be a template string (mustache syntax, e.g. {{user_input}}) or
+a function which takes the history and a dictionary of additional key-value pairs and returns
+either a string or a list of ChatMessages.
+If a string is returned, it will be assumed that the chat history should be ignored
+"""
+PromptTemplate = Union[
+ str, Callable[[ChatMessage, Dict[str, str]], Union[str, List[ChatMessage]]]
+]
+
+
+def render_prompt_template(
+ template: PromptTemplate, history: List[ChatMessage], other_data: Dict[str, str]
+) -> str:
+ """
+ Render a prompt template.
+ """
+ if isinstance(template, str):
+ data = {
+ "history": history,
+ **other_data,
+ }
+ if len(history) > 0 and history[0].role == "system":
+ data["system_message"] = history.pop(0).content
+
+ return chevron.render(template, data)
+ else:
+ return template(history, other_data)
diff --git a/server/continuedev/libs/util/traceback/traceback_parsers.py b/server/continuedev/libs/util/traceback/traceback_parsers.py
new file mode 100644
index 00000000..58a4f728
--- /dev/null
+++ b/server/continuedev/libs/util/traceback/traceback_parsers.py
@@ -0,0 +1,56 @@
+from boltons import tbutils
+
+from ....models.main import Traceback
+
+PYTHON_TRACEBACK_PREFIX = "Traceback (most recent call last):"
+
+
+def get_python_traceback(output: str) -> str:
+ if PYTHON_TRACEBACK_PREFIX in output:
+ tb_string = output.split(PYTHON_TRACEBACK_PREFIX)[-1]
+
+ # Then need to remove any lines below the traceback. Do this by noticing that
+ # the last line of the traceback is the first (other than they prefix) that doesn't begin with whitespace
+ lines = list(filter(lambda x: x.strip() != "", tb_string.splitlines()))
+ for i in range(len(lines) - 1):
+ if not lines[i].startswith(" "):
+ tb_string = "\n".join(lines[: i + 1])
+ break
+
+ return PYTHON_TRACEBACK_PREFIX + "\n" + tb_string
+ elif "SyntaxError" in output:
+ return "SyntaxError" + output.split("SyntaxError")[-1]
+ else:
+ return None
+
+
+def get_javascript_traceback(output: str) -> str:
+ lines = output.splitlines()
+ first_line = None
+ for i in range(len(lines) - 1):
+ segs = lines[i].split(":")
+ if (
+ len(segs) > 1
+ and segs[0] != ""
+ and segs[1].startswith(" ")
+ and lines[i + 1].strip().startswith("at")
+ ):
+ first_line = lines[i]
+ break
+
+ if first_line is not None:
+ return "\n".join(lines[lines.index(first_line) :])
+ else:
+ return None
+
+
+def parse_python_traceback(tb_string: str) -> Traceback:
+ # Remove anchor lines - tbutils doesn't always get them right
+ tb_string = "\n".join(
+ filter(
+ lambda x: x.strip().replace("~", "").replace("^", "") != "",
+ tb_string.splitlines(),
+ )
+ )
+ exc = tbutils.ParsedException.from_string(tb_string)
+ return Traceback.from_tbutil_parsed_exc(exc)
diff --git a/server/continuedev/models/__init__.py b/server/continuedev/models/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/continuedev/models/__init__.py
diff --git a/server/continuedev/models/filesystem.py b/server/continuedev/models/filesystem.py
new file mode 100644
index 00000000..27244c4b
--- /dev/null
+++ b/server/continuedev/models/filesystem.py
@@ -0,0 +1,398 @@
+import os
+from abc import abstractmethod
+from typing import Dict, List, Tuple
+
+from pydantic import BaseModel
+
+from ..models.main import AbstractModel, Position, Range
+from .filesystem_edit import (
+ AddDirectory,
+ AddFile,
+ DeleteDirectory,
+ DeleteFile,
+ EditDiff,
+ FileEdit,
+ FileSystemEdit,
+ RenameDirectory,
+ RenameFile,
+ SequentialFileSystemEdit,
+)
+
+
+class RangeInFile(BaseModel):
+ filepath: str
+ range: Range
+
+ def __hash__(self):
+ return hash((self.filepath, self.range))
+
+ @staticmethod
+ def from_entire_file(filepath: str, content: str) -> "RangeInFile":
+ range = Range.from_entire_file(content)
+ return RangeInFile(filepath=filepath, range=range)
+
+ def translated(self, lines: int):
+ return RangeInFile(filepath=self.filepath, range=self.range.translated(lines))
+
+
+class RangeInFileWithContents(RangeInFile):
+ """A range in a file with the contents of the range."""
+
+ contents: str
+
+ def __hash__(self):
+ return hash((self.filepath, self.range, self.contents))
+
+ def union(self, other: "RangeInFileWithContents") -> "RangeInFileWithContents":
+ assert self.filepath == other.filepath
+ # Use a placeholder variable for self and swap it with other if other comes before self
+ first = self
+ second = other
+ if other.range.start < self.range.start:
+ first = other
+ second = self
+
+ assert first.filepath == second.filepath
+
+ # Calculate union of contents
+ num_overlapping_lines = first.range.end.line - second.range.start.line + 1
+ union_lines = (
+ first.contents.splitlines()[:-num_overlapping_lines]
+ + second.contents.splitlines()
+ )
+
+ return RangeInFileWithContents(
+ filepath=first.filepath,
+ range=first.range.union(second.range),
+ contents="\n".join(union_lines),
+ )
+
+ @staticmethod
+ def from_entire_file(filepath: str, content: str) -> "RangeInFileWithContents":
+ lines = content.splitlines()
+ if not lines:
+ return RangeInFileWithContents(
+ filepath=filepath, range=Range.from_shorthand(0, 0, 0, 0), contents=""
+ )
+ return RangeInFileWithContents(
+ filepath=filepath,
+ range=Range.from_shorthand(0, 0, len(lines) - 1, len(lines[-1]) - 1),
+ contents=content,
+ )
+
+ @staticmethod
+ def from_range_in_file(rif: RangeInFile, content: str) -> "RangeInFileWithContents":
+ return RangeInFileWithContents(
+ filepath=rif.filepath, range=rif.range, contents=content
+ )
+
+
+class FileSystem(AbstractModel):
+ """An abstract filesystem that can read/write from a set of files."""
+
+ @abstractmethod
+ def read(self, path) -> str:
+ raise NotImplementedError
+
+ @abstractmethod
+ def readlines(self, path) -> List[str]:
+ raise NotImplementedError
+
+ @abstractmethod
+ def write(self, path, content):
+ raise NotImplementedError
+
+ @abstractmethod
+ def exists(self, path) -> bool:
+ raise NotImplementedError
+
+ @abstractmethod
+ def read_range_in_file(self, r: RangeInFile) -> str:
+ raise NotImplementedError
+
+ @abstractmethod
+ def rename_file(self, filepath: str, new_filepath: str):
+ raise NotImplementedError
+
+ @abstractmethod
+ def rename_directory(self, path: str, new_path: str):
+ raise NotImplementedError
+
+ @abstractmethod
+ def delete_file(self, filepath: str):
+ raise NotImplementedError
+
+ @abstractmethod
+ def delete_directory(self, path: str):
+ raise NotImplementedError
+
+ @abstractmethod
+ def add_directory(self, path: str):
+ raise NotImplementedError
+
+ @abstractmethod
+ def apply_file_edit(self, edit: FileEdit) -> EditDiff:
+ raise NotImplementedError
+
+ @abstractmethod
+ def apply_edit(self, edit: FileSystemEdit) -> EditDiff:
+ """Apply edit to filesystem, calculate the reverse edit, and return and EditDiff"""
+ raise NotImplementedError
+
+ @abstractmethod
+ def list_directory_contents(self, path: str, recursive: bool = False) -> List[str]:
+ """List the contents of a directory"""
+ raise NotImplementedError
+
+ @classmethod
+ def read_range_in_str(self, s: str, r: Range) -> str:
+ lines = s.split("\n")[r.start.line : r.end.line + 1]
+ if len(lines) == 0:
+ return ""
+
+ lines[0] = lines[0][r.start.character :]
+ lines[-1] = lines[-1][: r.end.character + 1]
+ return "\n".join(lines)
+
+ @classmethod
+ def apply_edit_to_str(cls, s: str, edit: FileEdit) -> Tuple[str, EditDiff]:
+ original = cls.read_range_in_str(s, edit.range)
+
+ # Split lines and deal with some edge cases (could obviously be nicer)
+ lines = s.splitlines()
+ if s.startswith("\n"):
+ lines.insert(0, "")
+ if s.endswith("\n"):
+ lines.append("")
+
+ if len(lines) == 0:
+ lines = [""]
+
+ end = Position(line=edit.range.end.line, character=edit.range.end.character)
+ if edit.range.end.line == len(lines) and edit.range.end.character == 0:
+ end = Position(
+ line=edit.range.end.line - 1,
+ character=len(lines[min(len(lines) - 1, edit.range.end.line - 1)]),
+ )
+
+ before_lines = lines[: edit.range.start.line]
+ after_lines = lines[end.line + 1 :]
+ between_str = (
+ lines[min(len(lines) - 1, edit.range.start.line)][
+ : edit.range.start.character
+ ]
+ + edit.replacement
+ + lines[min(len(lines) - 1, end.line)][end.character + 1 :]
+ )
+
+ new_range = Range(
+ start=edit.range.start,
+ end=Position(
+ line=edit.range.start.line + len(edit.replacement.splitlines()) - 1,
+ character=edit.range.start.character
+ + len(edit.replacement.splitlines()[-1])
+ if edit.replacement != ""
+ else 0,
+ ),
+ )
+
+ lines = before_lines + between_str.splitlines() + after_lines
+ return "\n".join(lines), EditDiff(
+ forward=edit,
+ backward=FileEdit(
+ filepath=edit.filepath, range=new_range, replacement=original
+ ),
+ )
+
+ def reverse_edit_on_str(self, s: str, diff: EditDiff) -> str:
+ lines = s.splitlines()
+
+ replacement_lines = diff.replacement.splitlines()
+ replacement_d_lines = len(replacement_lines)
+ replacement_d_chars = len(replacement_lines[-1])
+ replacement_range = Range(
+ start=diff.edit.range.start,
+ end=Position(
+ line=diff.edit.range.start + replacement_d_lines,
+ character=diff.edit.range.start.character + replacement_d_chars,
+ ),
+ )
+
+ before_lines = lines[: replacement_range.start.line]
+ after_lines = lines[replacement_range.end.line + 1 :]
+ between_str = (
+ lines[replacement_range.start.line][: replacement_range.start.character]
+ + diff.original
+ + lines[replacement_range.end.line][replacement_range.end.character + 1 :]
+ )
+
+ lines = before_lines + between_str.splitlines() + after_lines
+ return "\n".join(lines)
+
+ def apply_edit(self, edit: FileSystemEdit) -> EditDiff:
+ backward = None
+ if isinstance(edit, FileEdit):
+ diff = self.apply_file_edit(edit)
+ backward = diff.backward
+ elif isinstance(edit, AddFile):
+ self.write(edit.filepath, edit.content)
+ backward = DeleteFile(edit.filepath)
+ elif isinstance(edit, DeleteFile):
+ contents = self.read(edit.filepath)
+ backward = AddFile(edit.filepath, contents)
+ self.delete_file(edit.filepath)
+ elif isinstance(edit, RenameFile):
+ self.rename_file(edit.filepath, edit.new_filepath)
+ backward = RenameFile(
+ filepath=edit.new_filepath, new_filepath=edit.filepath
+ )
+ elif isinstance(edit, AddDirectory):
+ self.add_directory(edit.path)
+ backward = DeleteDirectory(edit.path)
+ elif isinstance(edit, DeleteDirectory):
+ # This isn't atomic!
+ backward_edits = []
+ for root, dirs, files in os.walk(edit.path, topdown=False):
+ for f in files:
+ path = os.path.join(root, f)
+ backward_edits.append(self.apply_edit(DeleteFile(path)))
+ for d in dirs:
+ path = os.path.join(root, d)
+ backward_edits.append(self.apply_edit(DeleteDirectory(path)))
+
+ backward_edits.append(self.apply_edit(DeleteDirectory(edit.path)))
+ backward_edits.reverse()
+ backward = SequentialFileSystemEdit(edits=backward_edits)
+ elif isinstance(edit, RenameDirectory):
+ self.rename_directory(edit.path, edit.new_path)
+ backward = RenameDirectory(path=edit.new_path, new_path=edit.path)
+ elif isinstance(edit, FileSystemEdit):
+ backward_edits = []
+ for edit in edit.next_edit():
+ backward_edits.append(self.apply_edit(edit))
+ backward_edits.reverse()
+ backward = SequentialFileSystemEdit(edits=backward_edits)
+ else:
+ raise TypeError("Unknown FileSystemEdit type: " + str(type(edit)))
+
+ return EditDiff(forward=edit, backward=backward)
+
+
+class RealFileSystem(FileSystem):
+ """A filesystem that reads/writes from the actual filesystem."""
+
+ def read(self, path) -> str:
+ with open(path, "r") as f:
+ return f.read()
+
+ def readlines(self, path) -> List[str]:
+ with open(path, "r") as f:
+ return f.readlines()
+
+ def write(self, path, content):
+ with open(path, "w") as f:
+ f.write(content)
+
+ def exists(self, path) -> bool:
+ return os.path.exists(path)
+
+ def read_range_in_file(self, r: RangeInFile) -> str:
+ return FileSystem.read_range_in_str(self.read(r.filepath), r.range)
+
+ def rename_file(self, filepath: str, new_filepath: str):
+ os.rename(filepath, new_filepath)
+
+ def rename_directory(self, path: str, new_path: str):
+ os.rename(path, new_path)
+
+ def delete_file(self, filepath: str):
+ os.remove(filepath)
+
+ def delete_directory(self, path: str):
+ raise NotImplementedError
+
+ def add_directory(self, path: str):
+ os.makedirs(path)
+
+ def apply_file_edit(self, edit: FileEdit) -> EditDiff:
+ old_content = self.read(edit.filepath)
+ new_content, diff = FileSystem.apply_edit_to_str(old_content, edit)
+ self.write(edit.filepath, new_content)
+ return diff
+
+ def list_directory_contents(self, path: str, recursive: bool = False) -> List[str]:
+ """List the contents of a directory"""
+ if recursive:
+ # Walk
+ paths = []
+ for root, dirs, files in os.walk(path):
+ for f in files:
+ paths.append(os.path.join(root, f))
+
+ return paths
+ return list(map(lambda x: os.path.join(path, x), os.listdir(path)))
+
+
+class VirtualFileSystem(FileSystem):
+ """A simulated filesystem from a mapping of filepath to file contents."""
+
+ files: Dict[str, str]
+
+ def __init__(self, files: Dict[str, str]):
+ self.files = files
+
+ def read(self, path) -> str:
+ return self.files[path]
+
+ def readlines(self, path) -> List[str]:
+ return self.files[path].splitlines()
+
+ def write(self, path, content):
+ self.files[path] = content
+
+ def exists(self, path) -> bool:
+ return path in self.files
+
+ def read_range_in_file(self, r: RangeInFile) -> str:
+ return FileSystem.read_range_in_str(self.read(r.filepath), r.range)
+
+ def rename_file(self, filepath: str, new_filepath: str):
+ self.files[new_filepath] = self.files[filepath]
+ del self.files[filepath]
+
+ def rename_directory(self, path: str, new_path: str):
+ for filepath in self.files:
+ if filepath.startswith(path):
+ new_filepath = new_path + filepath[len(path) :]
+ self.files[new_filepath] = self.files[filepath]
+ del self.files[filepath]
+
+ def delete_file(self, filepath: str):
+ del self.files[filepath]
+
+ def delete_directory(self, path: str):
+ raise NotImplementedError
+
+ def add_directory(self, path: str):
+ # For reasons as seen here and in delete_directory, a Dict[str, str] might not be the best representation. Could just preprocess to something better upon __init__
+ pass
+
+ def apply_file_edit(self, edit: FileEdit) -> EditDiff:
+ old_content = self.read(edit.filepath)
+ new_content, original = FileSystem.apply_edit_to_str(old_content, edit)
+ self.write(edit.filepath, new_content)
+ return EditDiff(edit=edit, original=original)
+
+ def list_directory_contents(self, path: str, recursive: bool = False) -> List[str]:
+ """List the contents of a directory"""
+ if recursive:
+ for filepath in self.files:
+ if filepath.startswith(path):
+ yield filepath
+
+ for filepath in self.files:
+ if filepath.startswith(path) and "/" not in filepath[len(path) :]:
+ yield filepath
+
+
+# TODO: Uniform errors thrown by any FileSystem subclass.
diff --git a/server/continuedev/models/filesystem_edit.py b/server/continuedev/models/filesystem_edit.py
new file mode 100644
index 00000000..9316ff46
--- /dev/null
+++ b/server/continuedev/models/filesystem_edit.py
@@ -0,0 +1,164 @@
+import os
+from abc import abstractmethod
+from typing import Generator, List
+
+from pydantic import BaseModel
+
+from ..libs.util.map_path import map_path
+from .main import Position, Range
+
+
+class FileSystemEdit(BaseModel):
+ @abstractmethod
+ def next_edit(self) -> Generator["FileSystemEdit", None, None]:
+ raise NotImplementedError
+
+ @abstractmethod
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ raise NotImplementedError
+
+
+class AtomicFileSystemEdit(FileSystemEdit):
+ def next_edit(self) -> Generator["FileSystemEdit", None, None]:
+ yield self
+
+
+class FileEdit(AtomicFileSystemEdit):
+ filepath: str
+ range: Range
+ replacement: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return FileEdit(
+ map_path(self.filepath, orig_root, copy_root), self.range, self.replacement
+ )
+
+ @staticmethod
+ def from_deletion(filepath: str, range: Range) -> "FileEdit":
+ return FileEdit(filepath=filepath, range=range, replacement="")
+
+ @staticmethod
+ def from_insertion(filepath: str, position: Position, content: str) -> "FileEdit":
+ return FileEdit(
+ filepath=filepath,
+ range=Range.from_shorthand(
+ position.line, position.character, position.line, position.character
+ ),
+ replacement=content,
+ )
+
+ @staticmethod
+ def from_append(
+ filepath: str, previous_content: str, appended_content: str
+ ) -> "FileEdit":
+ return FileEdit(
+ filepath=filepath,
+ range=Range.from_position(Position.from_end_of_file(previous_content)),
+ replacement=appended_content,
+ )
+
+
+class FileEditWithFullContents(BaseModel):
+ fileEdit: FileEdit
+ fileContents: str
+
+
+class AddFile(AtomicFileSystemEdit):
+ filepath: str
+ content: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return AddFile(
+ self, map_path(self.filepath, orig_root, copy_root), self.content
+ )
+
+
+class DeleteFile(AtomicFileSystemEdit):
+ filepath: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return DeleteFile(map_path(self.filepath, orig_root, copy_root))
+
+
+class RenameFile(AtomicFileSystemEdit):
+ filepath: str
+ new_filepath: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return RenameFile(
+ map_path(self.filepath, orig_root, copy_root),
+ map_path(self.new_filepath, orig_root, copy_root),
+ )
+
+
+class AddDirectory(AtomicFileSystemEdit):
+ path: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return AddDirectory(map_path(self.path, orig_root, copy_root))
+
+
+class DeleteDirectory(AtomicFileSystemEdit):
+ path: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return DeleteDirectory(map_path(self.path, orig_root, copy_root))
+
+
+class RenameDirectory(AtomicFileSystemEdit):
+ path: str
+ new_path: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return RenameDirectory(
+ map_path(self.filepath, orig_root, copy_root),
+ map_path(self.new_path, orig_root, copy_root),
+ )
+
+
+class DeleteDirectoryRecursive(FileSystemEdit):
+ path: str
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return DeleteDirectoryRecursive(map_path(self.path, orig_root, copy_root))
+
+ def next_edit(self) -> Generator[FileSystemEdit, None, None]:
+ yield DeleteDirectory(path=self.path)
+ for child in os.listdir(self.path):
+ child_path = os.path.join(self.path, child)
+ if os.path.isdir(child_path):
+ yield DeleteDirectoryRecursive(path=child_path)
+ else:
+ yield DeleteFile(filepath=child_path)
+
+
+class SequentialFileSystemEdit(FileSystemEdit):
+ edits: List[FileSystemEdit]
+
+ def with_mapped_paths(self, orig_root: str, copy_root: str) -> "FileSystemEdit":
+ return SequentialFileSystemEdit(
+ [edit.with_mapped_paths(orig_root, copy_root) for edit in self.edits]
+ )
+
+ def next_edit(self) -> Generator["FileSystemEdit", None, None]:
+ for edit in self.edits:
+ yield from edit.next_edit()
+
+
+class EditDiff(BaseModel):
+ """A reversible edit that can be applied to a file."""
+
+ forward: FileSystemEdit
+ backward: FileSystemEdit
+
+ @classmethod
+ def from_sequence(cls, diffs: List["EditDiff"]) -> "EditDiff":
+ forwards = []
+ backwards = []
+ for diff in diffs:
+ forwards.append(diff.forward)
+ backwards.insert(0, diff.backward)
+ return cls(
+ forward=SequentialFileSystemEdit(edits=forwards),
+ backward=SequentialFileSystemEdit(edits=backwards),
+ )
diff --git a/server/continuedev/models/generate_json_schema.py b/server/continuedev/models/generate_json_schema.py
new file mode 100644
index 00000000..88a1db68
--- /dev/null
+++ b/server/continuedev/models/generate_json_schema.py
@@ -0,0 +1,54 @@
+import os
+
+from pydantic import schema_json_of
+
+from ..core.config import ContinueConfig
+from ..core.context import ContextItem
+from ..core.main import FullState, History, HistoryNode, SessionInfo
+from ..core.models import Models
+from ..libs.llm.base import LLM
+from .filesystem import FileEdit, RangeInFile
+from .filesystem_edit import FileEditWithFullContents
+from .main import Position, Range, Traceback, TracebackFrame
+
+MODELS_TO_GENERATE = (
+ [Position, Range, Traceback, TracebackFrame]
+ + [RangeInFile, FileEdit]
+ + [FileEditWithFullContents]
+ + [History, HistoryNode, FullState, SessionInfo]
+ + [ContinueConfig]
+ + [ContextItem]
+ + [Models]
+ + [LLM]
+)
+
+RENAMES = {"ExampleClass": "RenamedName"}
+
+SCHEMA_DIR = "../schema/json"
+
+
+def clear_schemas():
+ for filename in os.listdir(SCHEMA_DIR):
+ if filename.endswith(".json"):
+ os.remove(os.path.join(SCHEMA_DIR, filename))
+
+
+def main():
+ clear_schemas()
+ for model in MODELS_TO_GENERATE:
+ title = RENAMES.get(model.__name__, model.__name__)
+ try:
+ json = schema_json_of(model, indent=2, title=title)
+ except Exception as e:
+ import traceback
+
+ print(f"Failed to generate json schema for {title}: {e}")
+ traceback.print_exc()
+ continue # pun intended
+
+ with open(f"{SCHEMA_DIR}/{title}.json", "w") as f:
+ f.write(json)
+
+
+if __name__ == "__main__":
+ main()
diff --git a/server/continuedev/models/main.py b/server/continuedev/models/main.py
new file mode 100644
index 00000000..5519d718
--- /dev/null
+++ b/server/continuedev/models/main.py
@@ -0,0 +1,229 @@
+from abc import ABC
+from functools import total_ordering
+from typing import List, Tuple, Union
+
+from pydantic import BaseModel, root_validator
+
+
+class ContinueBaseModel(BaseModel):
+ class Config:
+ underscore_attrs_are_private = True
+
+
+@total_ordering
+class Position(BaseModel):
+ line: int
+ character: int
+
+ def __hash__(self):
+ return hash((self.line, self.character))
+
+ def __eq__(self, other: "Position") -> bool:
+ return self.line == other.line and self.character == other.character
+
+ def __lt__(self, other: "Position") -> bool:
+ if self.line < other.line:
+ return True
+ elif self.line == other.line:
+ return self.character < other.character
+ else:
+ return False
+
+ @staticmethod
+ def from_index(string: str, index: int) -> "Position":
+ """Convert index in string to line and character"""
+ line = string.count("\n", 0, index)
+ if line == 0:
+ character = index
+ else:
+ character = index - string.rindex("\n", 0, index) - 1
+
+ return Position(line=line, character=character)
+
+ @staticmethod
+ def from_end_of_file(contents: str) -> "Position":
+ return Position.from_index(contents, len(contents))
+
+ def to_index(self, string: str) -> int:
+ """Convert line and character to index in string"""
+ lines = string.splitlines()
+ return sum(map(len, lines[: self.line])) + self.character
+
+
+class PositionInFile(BaseModel):
+ position: Position
+ filepath: str
+
+
+class Range(BaseModel):
+ """A range in a file. 0-indexed."""
+
+ start: Position
+ end: Position
+
+ def __lt__(self, other: "Range") -> bool:
+ return self.start < other.start or (
+ self.start == other.start and self.end < other.end
+ )
+
+ def __eq__(self, other: "Range") -> bool:
+ return self.start == other.start and self.end == other.end
+
+ def __hash__(self):
+ return hash((self.start, self.end))
+
+ def union(self, other: "Range") -> "Range":
+ return Range(
+ start=min(self.start, other.start),
+ end=max(self.end, other.end),
+ )
+
+ def is_empty(self) -> bool:
+ return self.start == self.end
+
+ def indices_in_string(self, string: str) -> Tuple[int, int]:
+ """Get the start and end indices of this range in the string"""
+ lines = string.splitlines()
+ if len(lines) == 0:
+ return (0, 0)
+
+ start_index = (
+ sum([len(line) + 1 for line in lines[: self.start.line]])
+ + self.start.character
+ )
+ end_index = (
+ sum([len(line) + 1 for line in lines[: self.end.line]]) + self.end.character
+ )
+ return (start_index, end_index)
+
+ def overlaps_with(self, other: "Range") -> bool:
+ return not (self.end < other.start or self.start > other.end)
+
+ def to_full_lines(self) -> "Range":
+ return Range(
+ start=Position(line=self.start.line, character=0),
+ end=Position(line=self.end.line + 1, character=0),
+ )
+
+ def translated(self, lines: int):
+ return Range(
+ start=Position(
+ line=self.start.line + lines, character=self.start.character
+ ),
+ end=Position(line=self.end.line + lines, character=self.end.character),
+ )
+
+ def contains(self, position: Position) -> bool:
+ return self.start <= position and position <= self.end
+
+ def merge_with(self, other: "Range") -> "Range":
+ return Range(
+ start=min(self.start, other.start).copy(),
+ end=max(self.end, other.end).copy(),
+ )
+
+ @staticmethod
+ def from_indices(string: str, start_index: int, end_index: int) -> "Range":
+ return Range(
+ start=Position.from_index(string, start_index),
+ end=Position.from_index(string, end_index),
+ )
+
+ @staticmethod
+ def from_shorthand(
+ start_line: int, start_char: int, end_line: int, end_char: int
+ ) -> "Range":
+ return Range(
+ start=Position(line=start_line, character=start_char),
+ end=Position(line=end_line, character=end_char),
+ )
+
+ @staticmethod
+ def from_entire_file(content: str) -> "Range":
+ lines = content.splitlines()
+ if len(lines) == 0:
+ return Range.from_shorthand(0, 0, 0, 0)
+ return Range.from_shorthand(0, 0, len(lines), 0)
+
+ @staticmethod
+ def from_snippet_in_file(content: str, snippet: str) -> "Range":
+ start_index = content.index(snippet)
+ end_index = start_index + len(snippet)
+ return Range.from_indices(content, start_index, end_index)
+
+ @staticmethod
+ def from_lines_snippet_in_file(content: str, snippet: str) -> "Range":
+ # lines is a substring of the content modulo whitespace on each line
+ content_lines = content.splitlines()
+ snippet_lines = snippet.splitlines()
+
+ start_line = -1
+ end_line = -1
+ looking_for_line = 0
+ for i in range(len(content_lines)):
+ if content_lines[i].strip() == snippet_lines[looking_for_line].strip():
+ if looking_for_line == len(snippet_lines) - 1:
+ start_line = i - len(snippet_lines) + 1
+ end_line = i
+ break
+ looking_for_line += 1
+ else:
+ looking_for_line = 0
+
+ if start_line == -1 or end_line == -1:
+ raise ValueError("Snippet not found in content")
+
+ return Range.from_shorthand(
+ start_line, 0, end_line, len(content_lines[end_line]) - 1
+ )
+
+ @staticmethod
+ def from_position(position: Position) -> "Range":
+ return Range(start=position, end=position)
+
+
+class AbstractModel(ABC, BaseModel):
+ @root_validator(pre=True)
+ def check_is_subclass(cls, values):
+ if not issubclass(cls, AbstractModel):
+ raise TypeError(
+ "AbstractModel subclasses must be subclasses of AbstractModel"
+ )
+
+
+class TracebackFrame(BaseModel):
+ filepath: str
+ lineno: int
+ function: str
+ code: Union[str, None]
+
+ def __eq__(self, other):
+ return (
+ self.filepath == other.filepath
+ and self.lineno == other.lineno
+ and self.function == other.function
+ )
+
+
+class Traceback(BaseModel):
+ frames: List[TracebackFrame]
+ message: str
+ error_type: str
+ full_traceback: Union[str, None]
+
+ @classmethod
+ def from_tbutil_parsed_exc(cls, tbutil_parsed_exc):
+ return cls(
+ frames=[
+ TracebackFrame(
+ filepath=frame["filepath"],
+ lineno=frame["lineno"],
+ function=frame["funcname"],
+ code=frame["source_line"],
+ )
+ for frame in tbutil_parsed_exc.frames
+ ],
+ message=tbutil_parsed_exc.exc_msg,
+ error_type=tbutil_parsed_exc.exc_type,
+ full_traceback=tbutil_parsed_exc.to_string(),
+ )
diff --git a/server/continuedev/models/reference/generate.py b/server/continuedev/models/reference/generate.py
new file mode 100644
index 00000000..74912f75
--- /dev/null
+++ b/server/continuedev/models/reference/generate.py
@@ -0,0 +1,144 @@
+import html
+import importlib
+import json
+from textwrap import dedent
+
+LLM_MODULES = [
+ ("openai", "OpenAI"),
+ ("anthropic", "AnthropicLLM"),
+ ("ggml", "GGML"),
+ ("llamacpp", "LlamaCpp"),
+ ("text_gen_interface", "TextGenUI"),
+ ("ollama", "Ollama"),
+ ("replicate", "ReplicateLLM"),
+ ("together", "TogetherLLM"),
+ ("hf_inference_api", "HuggingFaceInferenceAPI"),
+ ("hf_tgi", "HuggingFaceTGI"),
+ ("openai_free_trial", "OpenAIFreeTrial"),
+ ("google_palm_api", "GooglePaLMAPI"),
+ ("queued", "QueuedLLM"),
+]
+
+CONTEXT_PROVIDER_MODULES = [
+ ("diff", "DiffContextProvider"),
+ ("file", "FileContextProvider"),
+ ("filetree", "FileTreeContextProvider"),
+ ("github", "GitHubIssuesContextProvider"),
+ ("google", "GoogleContextProvider"),
+ ("search", "SearchContextProvider"),
+ ("terminal", "TerminalContextProvider"),
+ ("url", "URLContextProvider"),
+]
+
+
+def import_llm_module(module_name, module_title):
+ module_name = f"continuedev.libs.llm.{module_name}"
+ module = importlib.import_module(module_name)
+ obj = getattr(module, module_title)
+ return obj
+
+
+def import_context_provider_module(module_name, module_title):
+ module_name = f"continuedev.plugins.context_providers.{module_name}"
+ module = importlib.import_module(module_name)
+ obj = getattr(module, module_title)
+ return obj
+
+
+def docs_from_schema(schema, filepath, ignore_properties=[], inherited=[]):
+ # Generate markdown docs
+ properties = ""
+ inherited_properties = ""
+
+ def add_property(prop, details, only_required):
+ required = prop in schema.get("required", [])
+ if only_required != required or prop in ignore_properties:
+ return ""
+ required = "true" if required else "false"
+ return f"""<ClassPropertyRef name='{prop}' details='{html.escape(json.dumps(details))}' required={{{required}}} default="{html.escape(str(details.get("default", "")))}"/>\n"""
+
+ for prop, details in schema["properties"].items():
+ property = add_property(prop, details, True)
+ if prop in inherited:
+ inherited_properties += property
+ else:
+ properties += property
+
+ for prop, details in schema["properties"].items():
+ property = add_property(prop, details, False)
+ if prop in inherited:
+ inherited_properties += property
+ else:
+ properties += property
+
+ return dedent(
+ f"""\
+import ClassPropertyRef from '@site/src/components/ClassPropertyRef.tsx';
+
+# {schema['title']}
+
+{dedent(schema.get("description", ""))}
+
+[View the source](https://github.com/continuedev/continue/tree/main/continuedev/src/continuedev/{filepath})
+
+## Properties
+
+{properties}
+
+### Inherited Properties
+
+{inherited_properties}"""
+ )
+
+
+llm_module = importlib.import_module("continuedev.libs.llm.base")
+ctx_obj = getattr(llm_module, "LLM")
+schema = ctx_obj.schema()
+ctx_properties = schema["properties"].keys()
+
+for module_name, module_title in LLM_MODULES:
+ obj = import_llm_module(module_name, module_title)
+ schema = obj.schema()
+ markdown_docs = docs_from_schema(
+ schema, f"libs/llm/{module_name}.py", inherited=ctx_properties
+ )
+ with open(f"docs/docs/reference/Models/{module_title.lower()}.md", "w") as f:
+ f.write(markdown_docs)
+
+config_module = importlib.import_module("continuedev.core.config")
+config_obj = getattr(config_module, "ContinueConfig")
+schema = config_obj.schema()
+markdown_docs = docs_from_schema(schema, "core/config.py")
+with open("docs/docs/reference/config.md", "w") as f:
+ f.write(markdown_docs)
+
+ctx_module = importlib.import_module("continuedev.core.context")
+ctx_obj = getattr(ctx_module, "ContextProvider")
+schema = ctx_obj.schema()
+ctx_properties = schema["properties"].keys()
+for module_name, module_title in CONTEXT_PROVIDER_MODULES:
+ obj = import_context_provider_module(module_name, module_title)
+ schema = obj.schema()
+ markdown_docs = docs_from_schema(
+ schema,
+ f"plugins/context_providers/{module_name}.py",
+ ignore_properties=[
+ "sdk",
+ "updated_documents",
+ "delete_documents",
+ "selected_items",
+ "ignore_patterns",
+ ],
+ inherited=ctx_properties,
+ )
+ with open(
+ f"docs/docs/reference/Context Providers/{module_title.lower()}.md", "w"
+ ) as f:
+ f.write(markdown_docs)
+
+# sdk_module = importlib.import_module("continuedev.core.sdk")
+# sdk_obj = getattr(sdk_module, "ContinueSDK")
+# schema = sdk_obj.schema()
+# markdown_docs = docs_from_schema(schema, "sdk", ignore_properties=[])
+# with open("docs/docs/reference/ContinueSDK.md", "w") as f:
+# f.write(markdown_docs)
diff --git a/server/continuedev/plugins/context_providers/__init__.py b/server/continuedev/plugins/context_providers/__init__.py
new file mode 100644
index 00000000..0123bb7b
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/__init__.py
@@ -0,0 +1,7 @@
+from .diff import DiffContextProvider # noqa: F401
+from .filetree import FileTreeContextProvider # noqa: F401
+from .github import GitHubIssuesContextProvider # noqa: F401
+from .google import GoogleContextProvider # noqa: F401
+from .search import SearchContextProvider # noqa: F401
+from .terminal import TerminalContextProvider # noqa: F401
+from .url import URLContextProvider # noqa: F401
diff --git a/server/continuedev/plugins/context_providers/diff.py b/server/continuedev/plugins/context_providers/diff.py
new file mode 100644
index 00000000..05da3547
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/diff.py
@@ -0,0 +1,73 @@
+import subprocess
+from typing import List
+
+from pydantic import Field
+
+from ...core.context import ContextProvider
+from ...core.main import (
+ ContextItem,
+ ContextItemDescription,
+ ContextItemId,
+ ContinueCustomException,
+)
+
+
+class DiffContextProvider(ContextProvider):
+ """
+ Type '@diff' to reference all of the changes you've made to your current branch. This is useful if you want to summarize what you've done or ask for a general review of your work before committing.
+ """
+
+ title = "diff"
+ display_title = "Diff"
+ description = "Output of 'git diff' in current repo"
+ dynamic = True
+
+ _DIFF_CONTEXT_ITEM_ID = "diff"
+
+ workspace_dir: str = Field(
+ None, description="The workspace directory in which to run `git diff`"
+ )
+
+ @property
+ def BASE_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name="Diff",
+ description="Reference the output of 'git diff' for the current workspace",
+ id=ContextItemId(
+ provider_title=self.title, item_id=self._DIFF_CONTEXT_ITEM_ID
+ ),
+ ),
+ )
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ self.workspace_dir = workspace_dir
+ return [self.BASE_CONTEXT_ITEM]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ result = subprocess.run(
+ ["git", "diff"], cwd=self.workspace_dir, capture_output=True, text=True
+ )
+ diff = result.stdout
+ error = result.stderr
+ if error.strip() != "":
+ if error.startswith("warning: Not a git repository"):
+ raise ContinueCustomException(
+ title="Not a git repository",
+ message="The @diff context provider only works in git repositories.",
+ )
+ raise ContinueCustomException(
+ title="Error running git diff",
+ message=f"Error running git diff:\n\n{error}",
+ )
+
+ if diff.strip() == "":
+ diff = "No changes"
+
+ ctx_item = self.BASE_CONTEXT_ITEM.copy()
+ ctx_item.content = diff
+ return ctx_item
diff --git a/server/continuedev/plugins/context_providers/dynamic.py b/server/continuedev/plugins/context_providers/dynamic.py
new file mode 100644
index 00000000..50567621
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/dynamic.py
@@ -0,0 +1,75 @@
+from abc import ABC, abstractmethod
+from typing import List
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from ...libs.util.create_async_task import create_async_task
+from .util import remove_meilisearch_disallowed_chars
+
+
+class DynamicProvider(ContextProvider, ABC):
+ """
+ A title representing the provider
+ """
+
+ title: str
+ """A name representing the provider. Probably use capitalized version of title"""
+
+ name: str
+
+ workspace_dir: str = None
+ dynamic: bool = True
+
+ @property
+ def BASE_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name=self.name,
+ description=self.description,
+ id=ContextItemId(provider_title=self.title, item_id=self.title),
+ ),
+ )
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ self.workspace_dir = workspace_dir
+ create_async_task(self.setup())
+ return [self.BASE_CONTEXT_ITEM]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ query = query.lstrip(self.title + " ")
+ results = await self.get_content(query)
+
+ ctx_item = self.BASE_CONTEXT_ITEM.copy()
+ ctx_item.content = results
+ ctx_item.description.name = f"{self.name}: '{query}'"
+ ctx_item.description.id.item_id = remove_meilisearch_disallowed_chars(query)
+ return ctx_item
+
+ @abstractmethod
+ async def get_content(self, query: str) -> str:
+ """Retrieve the content given the query
+ (e.g. search the codebase, return search results)"""
+ raise NotImplementedError
+
+ @abstractmethod
+ async def setup(self):
+ """Run any setup needed (e.g. indexing the codebase)"""
+ raise NotImplementedError
+
+
+"""
+class ExampleDynamicProvider(DynamicProvider):
+ title = "example"
+ name = "Example"
+ description = "Example description"
+
+ async def get_content(self, query: str) -> str:
+ return f"Example content for '{query}'"
+
+ async def setup(self):
+ print("Example setup")
+"""
diff --git a/server/continuedev/plugins/context_providers/embeddings.py b/server/continuedev/plugins/context_providers/embeddings.py
new file mode 100644
index 00000000..86cba311
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/embeddings.py
@@ -0,0 +1,81 @@
+import os
+import uuid
+from typing import List, Optional
+
+from pydantic import BaseModel
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from ...libs.chroma.query import ChromaIndexManager
+
+
+class EmbeddingResult(BaseModel):
+ filename: str
+ content: str
+
+
+class EmbeddingsProvider(ContextProvider):
+ title = "embed"
+
+ display_title = "Embeddings Search"
+ description = "Search the codebase using embeddings"
+ dynamic = True
+ requires_query = True
+
+ workspace_directory: str
+
+ EMBEDDINGS_CONTEXT_ITEM_ID = "embeddings"
+
+ index_manager: Optional[ChromaIndexManager] = None
+
+ class Config:
+ arbitrary_types_allowed = True
+
+ @property
+ def index(self):
+ if self.index_manager is None:
+ self.index_manager = ChromaIndexManager(self.workspace_directory)
+ return self.index_manager
+
+ @property
+ def BASE_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name="Embedding Search",
+ description="Enter a query to embedding search codebase",
+ id=ContextItemId(
+ provider_title=self.title, item_id=self.EMBEDDINGS_CONTEXT_ITEM_ID
+ ),
+ ),
+ )
+
+ async def _get_query_results(self, query: str) -> str:
+ results = self.index.query_codebase_index(query)
+
+ ret = []
+ for node in results.source_nodes:
+ resource_name = list(node.node.relationships.values())[0]
+ filepath = resource_name[: resource_name.index("::")]
+ ret.append(EmbeddingResult(filename=filepath, content=node.node.text))
+
+ return ret
+
+ async def provide_context_items(self) -> List[ContextItem]:
+ self.index.create_codebase_index() # TODO Synchronous here is not ideal
+
+ return [self.BASE_CONTEXT_ITEM]
+
+ async def add_context_item(self, id: ContextItemId, query: str):
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ results = await self._get_query_results(query)
+
+ for i in range(len(results)):
+ result = results[i]
+ ctx_item = self.BASE_CONTEXT_ITEM.copy()
+ ctx_item.description.name = os.path.basename(result.filename)
+ ctx_item.content = f"{result.filename}\n```\n{result.content}\n```"
+ ctx_item.description.id.item_id = uuid.uuid4().hex
+ self.selected_items.append(ctx_item)
diff --git a/server/continuedev/plugins/context_providers/file.py b/server/continuedev/plugins/context_providers/file.py
new file mode 100644
index 00000000..4cfbcfdb
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/file.py
@@ -0,0 +1,136 @@
+import asyncio
+import os
+from typing import List, Optional
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from ...core.sdk import ContinueSDK
+from ...libs.util.filter_files import DEFAULT_IGNORE_PATTERNS
+from ...libs.util.logging import logger
+from .util import remove_meilisearch_disallowed_chars
+
+MAX_SIZE_IN_CHARS = 50_000
+
+
+async def get_file_contents(filepath: str, sdk: ContinueSDK) -> str:
+ try:
+ return (await sdk.ide.readFile(filepath))[:MAX_SIZE_IN_CHARS]
+ except Exception as _:
+ return None
+
+
+class FileContextProvider(ContextProvider):
+ """
+ The FileContextProvider is a ContextProvider that allows you to search files in the open workspace.
+ """
+
+ title = "file"
+ ignore_patterns: List[str] = DEFAULT_IGNORE_PATTERNS
+
+ display_title = "Files"
+ description = "Reference files in the current workspace"
+ dynamic = False
+
+ async def start(self, *args):
+ await super().start(*args)
+
+ async def on_file_saved(filepath: str, contents: str):
+ item = await self.get_context_item_for_filepath(filepath)
+ if item is None:
+ return
+ await self.update_documents([item], self.sdk.ide.workspace_directory)
+
+ async def on_files_created(filepaths: List[str]):
+ items = await asyncio.gather(
+ *[
+ self.get_context_item_for_filepath(filepath)
+ for filepath in filepaths
+ ]
+ )
+ items = [item for item in items if item is not None]
+ await self.update_documents(items, self.sdk.ide.workspace_directory)
+
+ async def on_files_deleted(filepaths: List[str]):
+ ids = [self.get_id_for_filepath(filepath) for filepath in filepaths]
+
+ await self.delete_documents(ids)
+
+ async def on_files_renamed(old_filepaths: List[str], new_filepaths: List[str]):
+ if self.sdk.ide.workspace_directory is None:
+ return
+
+ old_ids = [self.get_id_for_filepath(filepath) for filepath in old_filepaths]
+ new_docs = await asyncio.gather(
+ *[
+ self.get_context_item_for_filepath(filepath)
+ for filepath in new_filepaths
+ ]
+ )
+ new_docs = [doc for doc in new_docs if doc is not None]
+
+ await self.delete_documents(old_ids)
+ await self.update_documents(new_docs, self.sdk.ide.workspace_directory)
+
+ self.sdk.ide.subscribeToFileSaved(on_file_saved)
+ self.sdk.ide.subscribeToFilesCreated(on_files_created)
+ self.sdk.ide.subscribeToFilesDeleted(on_files_deleted)
+ self.sdk.ide.subscribeToFilesRenamed(on_files_renamed)
+
+ def get_id_for_filepath(self, absolute_filepath: str) -> str:
+ return remove_meilisearch_disallowed_chars(absolute_filepath)
+
+ async def get_context_item_for_filepath(
+ self, absolute_filepath: str
+ ) -> Optional[ContextItem]:
+ content = await get_file_contents(absolute_filepath, self.sdk)
+ if content is None:
+ return None
+
+ workspace_dir = self.sdk.ide.workspace_directory
+ if (
+ os.path.splitdrive(workspace_dir)[0]
+ != os.path.splitdrive(absolute_filepath)[0]
+ ):
+ workspace_dir = (
+ os.path.splitdrive(absolute_filepath)[0]
+ + os.path.splitdrive(workspace_dir)[1]
+ )
+
+ try:
+ relative_to_workspace = os.path.relpath(absolute_filepath, workspace_dir)
+ except Exception as e:
+ logger.warning(f"Error getting relative path: {e}")
+ return None
+
+ return ContextItem(
+ content=content[: min(2000, len(content))],
+ description=ContextItemDescription(
+ name=os.path.basename(absolute_filepath),
+ # We should add the full path to the ContextItem
+ # It warrants a data modeling discussion and has no immediate use case
+ description=relative_to_workspace,
+ id=ContextItemId(
+ provider_title=self.title,
+ item_id=self.get_id_for_filepath(absolute_filepath),
+ ),
+ ),
+ )
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ contents = await self.sdk.ide.listDirectoryContents(workspace_dir, True)
+ if contents is None:
+ return []
+
+ absolute_filepaths: List[str] = []
+ for filepath in contents[:1000]:
+ absolute_filepaths.append(filepath)
+
+ items = await asyncio.gather(
+ *[
+ self.get_context_item_for_filepath(filepath)
+ for filepath in absolute_filepaths
+ ]
+ )
+ items = list(filter(lambda item: item is not None, items))
+
+ return items
diff --git a/server/continuedev/plugins/context_providers/filetree.py b/server/continuedev/plugins/context_providers/filetree.py
new file mode 100644
index 00000000..5b3d3a50
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/filetree.py
@@ -0,0 +1,89 @@
+from typing import List
+
+from pydantic import BaseModel, Field
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+
+
+class Directory(BaseModel):
+ name: str
+ files: List[str]
+ directories: List["Directory"]
+
+
+def format_file_tree(tree: Directory, indentation: str = "") -> str:
+ result = ""
+ for file in tree.files:
+ result += f"{indentation}{file}\n"
+
+ for directory in tree.directories:
+ result += f"{indentation}{directory.name}/\n"
+ result += format_file_tree(directory, indentation + " ")
+
+ return result
+
+
+def split_path(path: str, with_root=None) -> List[str]:
+ parts = path.split("/") if "/" in path else path.split("\\")
+ if with_root is not None:
+ root_parts = split_path(with_root)
+ parts = parts[len(root_parts) - 1 :]
+
+ return parts
+
+
+class FileTreeContextProvider(ContextProvider):
+ """Type '@tree' to reference the contents of your current workspace. The LLM will be able to see the nested directory structure of your project."""
+
+ title = "tree"
+ display_title = "File Tree"
+ description = "Add a formatted file tree of this directory to the context"
+ dynamic = True
+
+ workspace_dir: str = Field(None, description="The workspace directory to display")
+
+ async def _get_file_tree(self, directory: str) -> str:
+ contents = await self.sdk.ide.listDirectoryContents(directory, recursive=True)
+
+ tree = Directory(
+ name=split_path(self.workspace_dir)[-1], files=[], directories=[]
+ )
+
+ for file in contents:
+ parts = split_path(file, with_root=self.workspace_dir)
+
+ current_tree = tree
+ for part in parts[:-1]:
+ if part not in [d.name for d in current_tree.directories]:
+ current_tree.directories.append(
+ Directory(name=part, files=[], directories=[])
+ )
+
+ current_tree = [d for d in current_tree.directories if d.name == part][
+ 0
+ ]
+
+ current_tree.files.append(parts[-1])
+
+ return format_file_tree(tree)
+
+ async def _filetree_context_item(self):
+ return ContextItem(
+ content=await self._get_file_tree(self.workspace_dir),
+ description=ContextItemDescription(
+ name="File Tree",
+ description="Add a formatted file tree of this directory to the context",
+ id=ContextItemId(provider_title=self.title, item_id=self.title),
+ ),
+ )
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ self.workspace_dir = workspace_dir
+ return [await self._filetree_context_item()]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ return await self._filetree_context_item()
diff --git a/server/continuedev/plugins/context_providers/github.py b/server/continuedev/plugins/context_providers/github.py
new file mode 100644
index 00000000..c031f310
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/github.py
@@ -0,0 +1,49 @@
+from typing import List
+
+from github import Auth, Github
+from pydantic import Field
+
+from ...core.context import (
+ ContextItem,
+ ContextItemDescription,
+ ContextItemId,
+ ContextProvider,
+)
+
+
+class GitHubIssuesContextProvider(ContextProvider):
+ """
+ The GitHubIssuesContextProvider is a ContextProvider that allows you to search GitHub issues in a repo. Type '@issue' to reference the title and contents of an issue.
+ """
+
+ title = "issues"
+ repo_name: str = Field(
+ ..., description="The name of the GitHub repo from which to pull issues"
+ )
+ auth_token: str = Field(
+ ...,
+ description="The GitHub auth token to use to authenticate with the GitHub API",
+ )
+
+ display_title = "GitHub Issues"
+ description = "Reference GitHub issues"
+ dynamic = False
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ auth = Auth.Token(self.auth_token)
+ gh = Github(auth=auth)
+
+ repo = gh.get_repo(self.repo_name)
+ issues = repo.get_issues().get_page(0)
+
+ return [
+ ContextItem(
+ content=issue.body,
+ description=ContextItemDescription(
+ name=f"Issue #{issue.number}",
+ description=issue.title,
+ id=ContextItemId(provider_title=self.title, item_id=issue.id),
+ ),
+ )
+ for issue in issues
+ ]
diff --git a/server/continuedev/plugins/context_providers/google.py b/server/continuedev/plugins/context_providers/google.py
new file mode 100644
index 00000000..852f4e9a
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/google.py
@@ -0,0 +1,70 @@
+import json
+from typing import List
+
+import aiohttp
+from pydantic import Field
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from .util import remove_meilisearch_disallowed_chars
+
+
+class GoogleContextProvider(ContextProvider):
+ """Type '@google' to reference the results of a Google search. For example, type "@google python tutorial" if you want to search and discuss ways of learning Python."""
+
+ title = "google"
+ display_title = "Google"
+ description = "Search Google"
+ dynamic = True
+ requires_query = True
+
+ serper_api_key: str = Field(
+ ...,
+ description="Your SerpAPI key, used to programmatically make Google searches. You can get a key at https://serper.dev.",
+ )
+
+ _GOOGLE_CONTEXT_ITEM_ID = "google_search"
+
+ @property
+ def BASE_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name="Google Search",
+ description="Enter a query to search google",
+ id=ContextItemId(
+ provider_title=self.title, item_id=self._GOOGLE_CONTEXT_ITEM_ID
+ ),
+ ),
+ )
+
+ async def _google_search(self, query: str) -> str:
+ url = "https://google.serper.dev/search"
+
+ payload = json.dumps({"q": query})
+ headers = {"X-API-KEY": self.serper_api_key, "Content-Type": "application/json"}
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(url, headers=headers, data=payload) as response:
+ return await response.text()
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ return [self.BASE_CONTEXT_ITEM]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ results = await self._google_search(query)
+ json_results = json.loads(results)
+ content = f"Google Search: {query}\n\n"
+ if answerBox := json_results.get("answerBox"):
+ content += f"Answer Box ({answerBox['title']}): {answerBox['answer']}\n\n"
+
+ for result in json_results["organic"]:
+ content += f"{result['title']}\n{result['link']}\n{result['snippet']}\n\n"
+
+ ctx_item = self.BASE_CONTEXT_ITEM.copy()
+ ctx_item.content = content
+ ctx_item.description.id.item_id = remove_meilisearch_disallowed_chars(query)
+ return ctx_item
diff --git a/server/continuedev/plugins/context_providers/highlighted_code.py b/server/continuedev/plugins/context_providers/highlighted_code.py
new file mode 100644
index 00000000..3304a71d
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/highlighted_code.py
@@ -0,0 +1,293 @@
+import os
+from typing import Any, Dict, List, Optional
+
+from pydantic import BaseModel
+
+from ...core.context import (
+ ContextItem,
+ ContextItemDescription,
+ ContextItemId,
+ ContextProvider,
+)
+from ...core.main import ChatMessage
+from ...models.filesystem import RangeInFileWithContents
+from ...models.main import Range
+
+
+class HighlightedRangeContextItem(BaseModel):
+ rif: RangeInFileWithContents
+ item: ContextItem
+
+
+class HighlightedCodeContextProvider(ContextProvider):
+ """
+ The ContextProvider class is a plugin that lets you provide new information to the LLM by typing '@'.
+ When you type '@', the context provider will be asked to populate a list of options.
+ These options will be updated on each keystroke.
+ When you hit enter on an option, the context provider will add that item to the autopilot's list of context (which is all stored in the ContextManager object).
+ """
+
+ title = "code"
+ display_title = "Highlighted Code"
+ description = "Highlight code"
+ dynamic = True
+
+ ide: Any # IdeProtocolServer
+
+ highlighted_ranges: List[HighlightedRangeContextItem] = []
+ adding_highlighted_code: bool = True
+ # Controls whether you can have more than one highlighted range. Now always True.
+
+ should_get_fallback_context_item: bool = True
+ last_added_fallback: bool = False
+
+ async def _get_fallback_context_item(self) -> HighlightedRangeContextItem:
+ # Used to automatically include the currently open file. Disabled for now.
+ return None
+
+ if not self.should_get_fallback_context_item:
+ return None
+
+ visible_files = await self.ide.getVisibleFiles()
+ if len(visible_files) > 0:
+ content = await self.ide.readFile(visible_files[0])
+ rif = RangeInFileWithContents.from_entire_file(visible_files[0], content)
+
+ item = self._rif_to_context_item(rif, 0, True)
+ item.description.name = self._rif_to_name(rif, show_line_nums=False)
+
+ self.last_added_fallback = True
+ return HighlightedRangeContextItem(rif=rif, item=item)
+
+ return None
+
+ async def get_selected_items(self) -> List[ContextItem]:
+ items = [hr.item for hr in self.highlighted_ranges]
+
+ if len(items) == 0 and (
+ fallback_item := await self._get_fallback_context_item()
+ ):
+ items = [fallback_item.item]
+
+ return items
+
+ async def get_chat_messages(self) -> List[ContextItem]:
+ ranges = self.highlighted_ranges
+ if len(ranges) == 0 and (
+ fallback_item := await self._get_fallback_context_item()
+ ):
+ ranges = [fallback_item]
+
+ return [
+ ChatMessage(
+ role="user",
+ content=f"Code in this file is highlighted ({r.rif.filepath}):\n```\n{r.rif.contents}\n```",
+ summary=f"Code in this file is highlighted: {r.rif.filepath}",
+ )
+ for r in ranges
+ ]
+
+ def _make_sure_is_editing_range(self):
+ """If none of the highlighted ranges are currently being edited, the first should be selected"""
+ if len(self.highlighted_ranges) == 0:
+ return
+ if not any(map(lambda x: x.item.editing, self.highlighted_ranges)):
+ self.highlighted_ranges[0].item.editing = True
+
+ def _disambiguate_highlighted_ranges(self):
+ """If any files have the same name, also display their folder name"""
+ name_status: Dict[
+ str, set
+ ] = {} # basename -> set of full paths with that basename
+ for hr in self.highlighted_ranges:
+ basename = os.path.basename(hr.rif.filepath)
+ if basename in name_status:
+ name_status[basename].add(hr.rif.filepath)
+ else:
+ name_status[basename] = {hr.rif.filepath}
+
+ for hr in self.highlighted_ranges:
+ basename = os.path.basename(hr.rif.filepath)
+ if len(name_status[basename]) > 1:
+ hr.item.description.name = self._rif_to_name(
+ hr.rif,
+ display_filename=os.path.join(
+ os.path.basename(os.path.dirname(hr.rif.filepath)), basename
+ ),
+ )
+ else:
+ hr.item.description.name = self._rif_to_name(
+ hr.rif, display_filename=basename
+ )
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ return []
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ raise NotImplementedError()
+
+ async def clear_context(self):
+ self.highlighted_ranges = []
+ self.adding_highlighted_code = False
+ self.should_get_fallback_context_item = True
+ self.last_added_fallback = False
+
+ async def delete_context_with_ids(
+ self, ids: List[ContextItemId]
+ ) -> List[ContextItem]:
+ ids_to_delete = [id.item_id for id in ids]
+
+ kept_ranges = []
+ for hr in self.highlighted_ranges:
+ if hr.item.description.id.item_id not in ids_to_delete:
+ kept_ranges.append(hr)
+ self.highlighted_ranges = kept_ranges
+
+ self._make_sure_is_editing_range()
+
+ if len(self.highlighted_ranges) == 0 and self.last_added_fallback:
+ self.should_get_fallback_context_item = False
+
+ return [hr.item for hr in self.highlighted_ranges]
+
+ def _rif_to_name(
+ self,
+ rif: RangeInFileWithContents,
+ display_filename: str = None,
+ show_line_nums: bool = True,
+ ) -> str:
+ line_nums = (
+ f" ({rif.range.start.line + 1}-{rif.range.end.line + 1})"
+ if show_line_nums
+ else ""
+ )
+ return f"{display_filename or os.path.basename(rif.filepath)}{line_nums}"
+
+ def _rif_to_context_item(
+ self, rif: RangeInFileWithContents, idx: int, editing: bool
+ ) -> ContextItem:
+ return ContextItem(
+ description=ContextItemDescription(
+ name=self._rif_to_name(rif),
+ description=rif.filepath,
+ id=ContextItemId(provider_title=self.title, item_id=str(idx)),
+ ),
+ content=rif.contents,
+ editing=editing if editing is not None else False,
+ editable=True,
+ )
+
+ async def handle_highlighted_code(
+ self,
+ range_in_files: List[RangeInFileWithContents],
+ edit: Optional[bool] = False,
+ ):
+ self.should_get_fallback_context_item = True
+ self.last_added_fallback = False
+
+ # Filter out rifs from ~/.continue/diffs folder
+ range_in_files = [
+ rif
+ for rif in range_in_files
+ if not os.path.dirname(rif.filepath)
+ == os.path.expanduser("~/.continue/diffs")
+ ]
+
+ # If not adding highlighted code
+ if not self.adding_highlighted_code:
+ if (
+ len(self.highlighted_ranges) == 1
+ and len(range_in_files) <= 1
+ and (
+ len(range_in_files) == 0
+ or range_in_files[0].range.start == range_in_files[0].range.end
+ )
+ ):
+ # If un-highlighting the range to edit, then remove the range
+ self.highlighted_ranges = []
+ elif len(range_in_files) > 0:
+ # Otherwise, replace the current range with the new one
+ # This is the first range to be highlighted
+ self.highlighted_ranges = [
+ HighlightedRangeContextItem(
+ rif=range_in_files[0],
+ item=self._rif_to_context_item(range_in_files[0], 0, edit),
+ )
+ ]
+
+ return
+
+ # If editing, make sure none of the other ranges are editing
+ if edit:
+ for hr in self.highlighted_ranges:
+ hr.item.editing = False
+
+ # If new range overlaps with any existing, keep the existing but merged
+ new_ranges = []
+ for i, new_hr in enumerate(range_in_files):
+ found_overlap_with = None
+ for existing_rif in self.highlighted_ranges:
+ if (
+ new_hr.filepath == existing_rif.rif.filepath
+ and new_hr.range.overlaps_with(existing_rif.rif.range)
+ ):
+ existing_rif.rif.range = existing_rif.rif.range.merge_with(
+ new_hr.range
+ )
+ found_overlap_with = existing_rif
+ break
+
+ if found_overlap_with is None:
+ new_ranges.append(
+ HighlightedRangeContextItem(
+ rif=new_hr,
+ item=self._rif_to_context_item(
+ new_hr, len(self.highlighted_ranges) + i, edit
+ ),
+ )
+ )
+ elif edit:
+ # Want to update the range so it's only the newly selected portion
+ found_overlap_with.rif.range = new_hr.range
+ found_overlap_with.item.editing = True
+
+ self.highlighted_ranges = self.highlighted_ranges + new_ranges
+
+ self._make_sure_is_editing_range()
+ self._disambiguate_highlighted_ranges()
+
+ async def set_editing_at_ids(self, ids: List[str]):
+ # Don't do anything if there are no valid ids here
+ count = 0
+ for hr in self.highlighted_ranges:
+ if hr.item.description.id.item_id in ids:
+ count += 1
+
+ if count == 0:
+ return
+
+ for hr in self.highlighted_ranges:
+ hr.item.editing = hr.item.description.id.item_id in ids
+
+ async def add_context_item(
+ self, id: ContextItemId, query: str, prev: List[ContextItem] = None
+ ) -> List[ContextItem]:
+ raise NotImplementedError()
+
+ async def manually_add_context_item(self, context_item: ContextItem):
+ full_file_content = await self.ide.readFile(
+ context_item.description.description
+ )
+ self.highlighted_ranges.append(
+ HighlightedRangeContextItem(
+ rif=RangeInFileWithContents(
+ filepath=context_item.description.description,
+ range=Range.from_lines_snippet_in_file(
+ content=full_file_content,
+ snippet=context_item.content,
+ ),
+ contents=context_item.content,
+ ),
+ item=context_item,
+ )
+ )
diff --git a/server/continuedev/plugins/context_providers/search.py b/server/continuedev/plugins/context_providers/search.py
new file mode 100644
index 00000000..a36b2a0a
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/search.py
@@ -0,0 +1,90 @@
+from typing import List
+
+from pydantic import Field
+from ripgrepy import Ripgrepy
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from ...libs.util.logging import logger
+from ...libs.util.ripgrep import get_rg_path
+from .util import remove_meilisearch_disallowed_chars
+
+
+class SearchContextProvider(ContextProvider):
+ """Type '@search' to reference the results of codebase search, just like the results you would get from VS Code search."""
+
+ title = "search"
+ display_title = "Search"
+ description = "Search the workspace for all matches of an exact string (e.g. '@search console.log')"
+ dynamic = True
+ requires_query = True
+
+ _SEARCH_CONTEXT_ITEM_ID = "search"
+
+ workspace_dir: str = Field(None, description="The workspace directory to search")
+
+ @property
+ def BASE_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name="Search",
+ description="Search the workspace for all matches of an exact string (e.g. '@search console.log')",
+ id=ContextItemId(
+ provider_title=self.title, item_id=self._SEARCH_CONTEXT_ITEM_ID
+ ),
+ ),
+ )
+
+ async def _search(self, query: str) -> str:
+ rg = Ripgrepy(query, self.workspace_dir, rg_path=get_rg_path())
+ results = rg.I().context(2).run()
+ return f"Search results in workspace for '{query}':\n\n{results}"
+
+ # Custom display below - TODO
+
+ # Gather results per file
+ file_to_matches = {}
+ for result in results:
+ if result["type"] == "match":
+ data = result["data"]
+ filepath = data["path"]["text"]
+ if filepath not in file_to_matches:
+ file_to_matches[filepath] = []
+
+ line_num_and_line = f"{data['line_number']}: {data['lines']['text']}"
+ file_to_matches[filepath].append(line_num_and_line)
+
+ # Format results
+ content = f"Search results in workspace for '{query}':\n\n"
+ for filepath, matches in file_to_matches.items():
+ content += f"{filepath}\n"
+ for match in matches:
+ content += f"{match}\n"
+ content += "\n"
+
+ return content
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ self.workspace_dir = workspace_dir
+
+ try:
+ Ripgrepy("", workspace_dir, rg_path=get_rg_path())
+ except Exception as e:
+ logger.warning(f"Failed to initialize ripgrepy: {e}")
+ return []
+
+ return [self.BASE_CONTEXT_ITEM]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ query = query.lstrip("search ")
+ results = await self._search(query)
+
+ ctx_item = self.BASE_CONTEXT_ITEM.copy()
+ ctx_item.content = results
+ ctx_item.description.name = f"Search: '{query}'"
+ ctx_item.description.id.item_id = remove_meilisearch_disallowed_chars(query)
+ return ctx_item
diff --git a/server/continuedev/plugins/context_providers/terminal.py b/server/continuedev/plugins/context_providers/terminal.py
new file mode 100644
index 00000000..c63239e4
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/terminal.py
@@ -0,0 +1,49 @@
+from typing import Any, Coroutine, List
+
+from pydantic import Field
+
+from ...core.context import ContextProvider
+from ...core.main import ChatMessage, ContextItem, ContextItemDescription, ContextItemId
+
+
+class TerminalContextProvider(ContextProvider):
+ """Type '@terminal' to reference the contents of your IDE's terminal."""
+
+ title = "terminal"
+ display_title = "Terminal"
+ description = "Reference the contents of the terminal"
+ dynamic = True
+
+ get_last_n_commands: int = Field(
+ 3, description="The number of previous commands to reference"
+ )
+
+ def _terminal_context_item(self, content: str = ""):
+ return ContextItem(
+ content=content,
+ description=ContextItemDescription(
+ name="Terminal",
+ description="Reference the contents of the VS Code terminal",
+ id=ContextItemId(provider_title=self.title, item_id=self.title),
+ ),
+ )
+
+ async def get_chat_messages(self) -> Coroutine[Any, Any, List[ChatMessage]]:
+ msgs = await super().get_chat_messages()
+ for msg in msgs:
+ msg.summary = msg.content[-1000:]
+ return msgs
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ return [self._terminal_context_item()]
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ terminal_contents = await self.sdk.ide.getTerminalContents(
+ self.get_last_n_commands
+ )
+ terminal_contents = terminal_contents[-5000:]
+
+ return self._terminal_context_item(terminal_contents)
diff --git a/server/continuedev/plugins/context_providers/url.py b/server/continuedev/plugins/context_providers/url.py
new file mode 100644
index 00000000..1ed7c18e
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/url.py
@@ -0,0 +1,104 @@
+from typing import List
+
+import requests
+from bs4 import BeautifulSoup
+from pydantic import Field
+
+from ...core.context import ContextProvider
+from ...core.main import ContextItem, ContextItemDescription, ContextItemId
+from .util import remove_meilisearch_disallowed_chars
+
+
+class URLContextProvider(ContextProvider):
+ """Type '@url' to reference the contents of a URL. You can either reference preset URLs, or reference one dynamically by typing '@url https://example.com'. The text contents of the page will be fetched and used as context."""
+
+ title = "url"
+ display_title = "URL"
+ description = "Reference the contents of a webpage"
+ dynamic = True
+ requires_query = True
+
+ # Allows users to provide a list of preset urls
+ preset_urls: List[str] = Field(
+ [],
+ description="A list of preset URLs that you will be able to quickly reference by typing '@url'",
+ )
+
+ # Static items loaded from preset_urls
+ static_url_context_items: List[ContextItem] = []
+
+ # There is only a single dynamic url context item, so it has a static id
+ _DYNAMIC_URL_CONTEXT_ITEM_ID = "url"
+
+ # This is a template dynamic item that will generate context item on demand
+ # when get item is called
+ @property
+ def DYNAMIC_CONTEXT_ITEM(self):
+ return ContextItem(
+ content="",
+ description=ContextItemDescription(
+ name="Dynamic URL",
+ description="Reference the contents of a webpage (e.g. '@url https://www.w3schools.com/python/python_ref_functions.asp')",
+ id=ContextItemId(
+ provider_title=self.title, item_id=self._DYNAMIC_URL_CONTEXT_ITEM_ID
+ ),
+ ),
+ )
+
+ def static_url_context_item_from_url(self, url: str) -> ContextItem:
+ content, title = self._get_url_text_contents_and_title(url)
+ return ContextItem(
+ content=content,
+ description=ContextItemDescription(
+ name=title,
+ description=f"Contents of {url}",
+ id=ContextItemId(
+ provider_title=self.title,
+ item_id=remove_meilisearch_disallowed_chars(url),
+ ),
+ ),
+ )
+
+ def _get_url_text_contents_and_title(self, url: str) -> (str, str):
+ response = requests.get(url)
+ soup = BeautifulSoup(response.text, "html.parser")
+ title = url.replace("https://", "").replace("http://", "").replace("www.", "")
+ if soup.title is not None:
+ title = soup.title.string
+ return soup.get_text(), title
+
+ async def provide_context_items(self, workspace_dir: str) -> List[ContextItem]:
+ self.static_url_context_items = [
+ self.static_url_context_item_from_url(url) for url in self.preset_urls
+ ]
+
+ return [self.DYNAMIC_CONTEXT_ITEM] + self.static_url_context_items
+
+ async def get_item(self, id: ContextItemId, query: str) -> ContextItem:
+ # Check if the item is a static item
+ matching_static_item = next(
+ (
+ item
+ for item in self.static_url_context_items
+ if item.description.id.item_id == id.item_id
+ ),
+ None,
+ )
+ if matching_static_item:
+ return matching_static_item
+
+ # Check if the item is the dynamic item
+ if not id.provider_title == self.title:
+ raise Exception("Invalid provider title for item")
+
+ # Generate the dynamic item
+ url = query.lstrip("url ").strip()
+ if url is None or url == "":
+ return None
+ content, title = self._get_url_text_contents_and_title(url)
+
+ ctx_item = self.DYNAMIC_CONTEXT_ITEM.copy()
+ ctx_item.content = content
+ ctx_item.description.name = title
+ ctx_item.description.id.item_id = remove_meilisearch_disallowed_chars(url)
+ return ctx_item
diff --git a/server/continuedev/plugins/context_providers/util.py b/server/continuedev/plugins/context_providers/util.py
new file mode 100644
index 00000000..61bea8aa
--- /dev/null
+++ b/server/continuedev/plugins/context_providers/util.py
@@ -0,0 +1,5 @@
+import re
+
+
+def remove_meilisearch_disallowed_chars(id: str) -> str:
+ return re.sub(r"[^0-9a-zA-Z_-]", "", id)
diff --git a/server/continuedev/plugins/policies/commit.py b/server/continuedev/plugins/policies/commit.py
new file mode 100644
index 00000000..2fa43676
--- /dev/null
+++ b/server/continuedev/plugins/policies/commit.py
@@ -0,0 +1,77 @@
+# An agent that makes a full commit in the background
+# Plans
+# Write code
+# Reviews code
+# Cleans up
+
+# It's important that agents are configurable, because people need to be able to specify
+# which hooks they want to run. Specific linter, run tests, etc.
+# And all of this can be easily specified in the Policy.
+
+
+from textwrap import dedent
+from typing import Literal
+
+from ...core.config import ContinueConfig
+from ...core.main import History, Policy, Step
+from ...core.observation import TextObservation
+from ...core.sdk import ContinueSDK
+
+
+class PlanStep(Step):
+ user_input: str
+
+ _prompt = dedent(
+ """\
+ You were given the following instructions: "{user_input}".
+
+ Create a plan for how you will complete the task.
+
+ Here are relevant files:
+
+ {relevant_files}
+
+ Your plan will include:
+ 1. A high-level description of how you are going to accomplish the task
+ 2. A list of which files you will edit
+ 3. A description of what you will change in each file
+ """
+ )
+
+ async def run(self, sdk: ContinueSDK):
+ plan = await sdk.models.default.complete(
+ self._prompt.format(
+ {"user_input": self.user_input, "relevant_files": "TODO"}
+ )
+ )
+ return TextObservation(text=plan)
+
+
+class WriteCommitStep(Step):
+ async def run(self, sdk: ContinueSDK):
+ pass
+
+
+class ReviewCodeStep(Step):
+ async def run(self, sdk: ContinueSDK):
+ pass
+
+
+class CleanupStep(Step):
+ async def run(self, sdk: ContinueSDK):
+ pass
+
+
+class CommitPolicy(Policy):
+ user_input: str
+
+ current_step: Literal["plan", "write", "review", "cleanup"] = "plan"
+
+ def next(self, config: ContinueConfig, history: History) -> Step:
+ if history.get_current() is None:
+ return (
+ PlanStep(user_input=self.user_input)
+ >> WriteCommitStep()
+ >> ReviewCodeStep()
+ >> CleanupStep()
+ )
diff --git a/server/continuedev/plugins/policies/default.py b/server/continuedev/plugins/policies/default.py
new file mode 100644
index 00000000..574d2a1c
--- /dev/null
+++ b/server/continuedev/plugins/policies/default.py
@@ -0,0 +1,85 @@
+from typing import Type, Union
+
+from ...core.config import ContinueConfig
+from ...core.main import History, Policy, Step
+from ...core.observation import UserInputObservation
+from ..steps.chat import SimpleChatStep
+from ..steps.custom_command import CustomCommandStep
+from ..steps.main import EditHighlightedCodeStep
+from ..steps.steps_on_startup import StepsOnStartupStep
+
+
+def parse_slash_command(inp: str, config: ContinueConfig) -> Union[None, Step]:
+ """
+ Parses a slash command, returning the command name and the rest of the input.
+ """
+ if inp.startswith("/"):
+ command_name = inp.split(" ")[0].strip()
+ after_command = " ".join(inp.split(" ")[1:])
+
+ for slash_command in config.slash_commands:
+ if slash_command.name == command_name[1:]:
+ params = slash_command.params
+ params["user_input"] = after_command
+ try:
+ return slash_command.step(**params)
+ except TypeError as e:
+ raise Exception(
+ f"Incorrect params used for slash command '{command_name}': {e}"
+ )
+ return None
+
+
+def parse_custom_command(inp: str, config: ContinueConfig) -> Union[None, Step]:
+ command_name = inp.split(" ")[0].strip()
+ after_command = " ".join(inp.split(" ")[1:])
+ for custom_cmd in config.custom_commands:
+ if custom_cmd.name == command_name[1:]:
+ slash_command = parse_slash_command(custom_cmd.prompt, config)
+ if slash_command is not None:
+ return slash_command
+ return CustomCommandStep(
+ name=custom_cmd.name,
+ description=custom_cmd.description,
+ prompt=custom_cmd.prompt,
+ user_input=after_command,
+ slash_command=command_name,
+ )
+ return None
+
+
+class DefaultPolicy(Policy):
+ default_step: Type[Step] = SimpleChatStep
+ default_params: dict = {}
+
+ def next(self, config: ContinueConfig, history: History) -> Step:
+ # At the very start, run initial Steps specified in the config
+ if history.get_current() is None:
+ return StepsOnStartupStep()
+
+ observation = history.get_current().observation
+ if observation is not None and isinstance(observation, UserInputObservation):
+ # This could be defined with ObservationTypePolicy. Ergonomics not right though.
+ user_input = observation.user_input
+
+ slash_command = parse_slash_command(user_input, config)
+ if slash_command is not None:
+ if (
+ getattr(slash_command, "user_input", None) is None
+ and history.get_current().step.user_input is not None
+ ):
+ history.get_current().step.user_input = (
+ history.get_current().step.user_input.split()[0]
+ )
+ return slash_command
+
+ custom_command = parse_custom_command(user_input, config)
+ if custom_command is not None:
+ return custom_command
+
+ if user_input.startswith("/edit"):
+ return EditHighlightedCodeStep(user_input=user_input[5:])
+
+ return self.default_step(**self.default_params)
+
+ return None
diff --git a/server/continuedev/plugins/policies/headless.py b/server/continuedev/plugins/policies/headless.py
new file mode 100644
index 00000000..9fa0f3f2
--- /dev/null
+++ b/server/continuedev/plugins/policies/headless.py
@@ -0,0 +1,18 @@
+from ...core.config import ContinueConfig
+from ...core.main import History, Policy, Step
+from ...core.observation import TextObservation
+from ...core.steps import ShellCommandsStep
+from ...plugins.steps.on_traceback import DefaultOnTracebackStep
+
+
+class HeadlessPolicy(Policy):
+ command: str
+
+ def next(self, config: ContinueConfig, history: History) -> Step:
+ if history.get_current() is None:
+ return ShellCommandsStep(cmds=[self.command])
+ observation = history.get_current().observation
+ if isinstance(observation, TextObservation):
+ return DefaultOnTracebackStep(output=observation.text)
+
+ return None
diff --git a/server/continuedev/plugins/recipes/AddTransformRecipe/README.md b/server/continuedev/plugins/recipes/AddTransformRecipe/README.md
new file mode 100644
index 00000000..78d603a2
--- /dev/null
+++ b/server/continuedev/plugins/recipes/AddTransformRecipe/README.md
@@ -0,0 +1,9 @@
+# AddTransformRecipe
+
+Uses the Chess.com API example to show how to add map and filter Python transforms to a dlt pipeline.
+
+Background
+
+- https://dlthub.com/docs/general-usage/resource#filter-transform-and-pivot-data
+- https://dlthub.com/docs/customizations/customizing-pipelines/renaming_columns
+- https://dlthub.com/docs/customizations/customizing-pipelines/pseudonymizing_columns
diff --git a/server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md b/server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md
new file mode 100644
index 00000000..864aea87
--- /dev/null
+++ b/server/continuedev/plugins/recipes/AddTransformRecipe/dlt_transform_docs.md
@@ -0,0 +1,142 @@
+# Customize resources
+
+## Filter, transform and pivot data
+
+You can attach any number of transformations that are evaluated on item per item basis to your resource. The available transformation types:
+
+- map - transform the data item (resource.add_map)
+- filter - filter the data item (resource.add_filter)
+- yield map - a map that returns iterator (so single row may generate many rows - resource.add_yield_map)
+
+Example: We have a resource that loads a list of users from an api endpoint. We want to customize it so:
+
+- we remove users with user_id == 'me'
+- we anonymize user data
+ Here's our resource:
+
+```python
+import dlt
+
+@dlt.resource(write_disposition='replace')
+def users():
+ ...
+ users = requests.get(...)
+ ...
+ yield users
+```
+
+Here's our script that defines transformations and loads the data.
+
+```python
+from pipedrive import users
+
+def anonymize_user(user_data):
+ user_data['user_id'] = hash_str(user_data['user_id'])
+ user_data['user_email'] = hash_str(user_data['user_email'])
+ return user_data
+
+# add the filter and anonymize function to users resource and enumerate
+for user in users().add_filter(lambda user: user['user_id'] != 'me').add_map(anonymize_user):
+print(user)
+```
+
+Here is a more complex example of a filter transformation:
+
+ # Renaming columns
+ ## Renaming columns by replacing the special characters
+
+ In the example below, we create a dummy source with special characters in the name. We then write a function that we intend to apply to the resource to modify its output (i.e. replacing the German umlaut): replace_umlauts_in_dict_keys.
+ ```python
+ import dlt
+
+ # create a dummy source with umlauts (special characters) in key names (um)
+ @dlt.source
+ def dummy_source(prefix: str = None):
+ @dlt.resource
+ def dummy_data():
+ for _ in range(100):
+ yield {f'Objekt_{_}':{'Größe':_, 'Äquivalenzprüfung':True}}
+ return dummy_data(),
+
+ def replace_umlauts_in_dict_keys(d):
+ # Replaces umlauts in dictionary keys with standard characters.
+ umlaut_map = {'ä': 'ae', 'ö': 'oe', 'ü': 'ue', 'ß': 'ss', 'Ä': 'Ae', 'Ö': 'Oe', 'Ü': 'Ue'}
+ result = {}
+ for k, v in d.items():
+ new_key = ''.join(umlaut_map.get(c, c) for c in k)
+ if isinstance(v, dict):
+ result[new_key] = replace_umlauts_in_dict_keys(v)
+ else:
+ result[new_key] = v
+ return result
+
+ # We can add the map function to the resource
+
+ # 1. Create an instance of the source so you can edit it.
+ data_source = dummy_source()
+
+ # 2. Modify this source instance's resource
+ data_source = data_source.dummy_data().add_map(replace_umlauts_in_dict_keys)
+
+ # 3. Inspect your result
+ for row in data_source:
+ print(row)
+
+ # {'Objekt_0': {'Groesse': 0, 'Aequivalenzpruefung': True}}
+ # ...
+ ```
+
+Here is a more complex example of a map transformation:
+
+# Pseudonymizing columns
+
+## Pseudonymizing (or anonymizing) columns by replacing the special characters
+
+Pseudonymization is a deterministic way to hide personally identifiable info (PII), enabling us to consistently achieve the same mapping. If instead you wish to anonymize, you can delete the data, or replace it with a constant. In the example below, we create a dummy source with a PII column called 'name', which we replace with deterministic hashes (i.e. replacing the German umlaut).
+
+```python
+import dlt
+import hashlib
+
+@dlt.source
+def dummy_source(prefix: str = None):
+ @dlt.resource
+ def dummy_data():
+ for _ in range(3):
+ yield {'id':_, 'name': f'Jane Washington {_}'}
+ return dummy_data(),
+
+def pseudonymize_name(doc):
+ Pseudonmyisation is a deterministic type of PII-obscuring
+ Its role is to allow identifying users by their hash, without revealing the underlying info.
+
+ # add a constant salt to generate
+ salt = 'WI@N57%zZrmk#88c'
+ salted_string = doc['name'] + salt
+ sh = hashlib.sha256()
+ sh.update(salted_string.encode())
+ hashed_string = sh.digest().hex()
+ doc['name'] = hashed_string
+ return doc
+
+ # run it as is
+ for row in dummy_source().dummy_data().add_map(pseudonymize_name):
+ print(row)
+
+ #{'id': 0, 'name': '96259edb2b28b48bebce8278c550e99fbdc4a3fac8189e6b90f183ecff01c442'}
+ #{'id': 1, 'name': '92d3972b625cbd21f28782fb5c89552ce1aa09281892a2ab32aee8feeb3544a1'}
+ #{'id': 2, 'name': '443679926a7cff506a3b5d5d094dc7734861352b9e0791af5d39db5a7356d11a'}
+
+ # Or create an instance of the data source, modify the resource and run the source.
+
+ # 1. Create an instance of the source so you can edit it.
+ data_source = dummy_source()
+ # 2. Modify this source instance's resource
+ data_source = data_source.dummy_data().add_map(replace_umlauts_in_dict_keys)
+ # 3. Inspect your result
+ for row in data_source:
+ print(row)
+
+ pipeline = dlt.pipeline(pipeline_name='example', destination='bigquery', dataset_name='normalized_data')
+ load_info = pipeline.run(data_source)
+```
diff --git a/server/continuedev/plugins/recipes/AddTransformRecipe/main.py b/server/continuedev/plugins/recipes/AddTransformRecipe/main.py
new file mode 100644
index 00000000..583cef1a
--- /dev/null
+++ b/server/continuedev/plugins/recipes/AddTransformRecipe/main.py
@@ -0,0 +1,31 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import MessageStep, WaitForUserInputStep
+from .steps import AddTransformStep, SetUpChessPipelineStep
+
+
+class AddTransformRecipe(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ text_observation = await sdk.run_step(
+ MessageStep(
+ message=dedent(
+ """\
+ This recipe will walk you through the process of adding a transform to a dlt pipeline that uses the chess.com API source. With the help of Continue, you will:
+ - Set up a dlt pipeline for the chess.com API
+ - Add a filter or map transform to the pipeline
+ - Run the pipeline and view the transformed data in a Streamlit app"""
+ ),
+ name="Add transformation to a dlt pipeline",
+ )
+ >> SetUpChessPipelineStep()
+ >> WaitForUserInputStep(
+ prompt="How do you want to transform the Chess.com API data before loading it? For example, you could filter out games that ended in a draw."
+ )
+ )
+ await sdk.run_step(
+ AddTransformStep(transform_description=text_observation.text)
+ )
diff --git a/server/continuedev/plugins/recipes/AddTransformRecipe/steps.py b/server/continuedev/plugins/recipes/AddTransformRecipe/steps.py
new file mode 100644
index 00000000..61638374
--- /dev/null
+++ b/server/continuedev/plugins/recipes/AddTransformRecipe/steps.py
@@ -0,0 +1,106 @@
+import os
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK, Models
+from ....core.steps import MessageStep
+from ....libs.util.paths import find_data_file
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class SetUpChessPipelineStep(Step):
+ hide: bool = True
+ name: str = "Setup Chess.com API dlt Pipeline"
+
+ async def describe(self, models: Models):
+ return "This step will create a new dlt pipeline that loads data from the chess.com API."
+
+ async def run(self, sdk: ContinueSDK):
+ # running commands to get started when creating a new dlt pipeline
+ await sdk.run(
+ [
+ "python3 -m venv .env",
+ "source .env/bin/activate",
+ "pip install dlt",
+ "dlt --non-interactive init chess duckdb",
+ "pip install -r requirements.txt",
+ "pip install pandas streamlit", # Needed for the pipeline show step later
+ ],
+ name="Set up Python environment",
+ description=dedent(
+ """\
+ - Create a Python virtual environment: `python3 -m venv .env`
+ - Activate the virtual environment: `source .env/bin/activate`
+ - Install dlt: `pip install dlt`
+ - Create a new dlt pipeline called "chess" that loads data into a local DuckDB instance: `dlt init chess duckdb`
+ - Install the Python dependencies for the pipeline: `pip install -r requirements.txt`"""
+ ),
+ )
+
+
+class AddTransformStep(Step):
+ hide: bool = True
+
+ # e.g. "Use the `python-chess` library to decode the moves in the game data"
+ transform_description: str
+
+ async def run(self, sdk: ContinueSDK):
+ source_name = "chess"
+ filename = f"{source_name}_pipeline.py"
+ abs_filepath = os.path.join(sdk.ide.workspace_directory, filename)
+
+ # Open the file and highlight the function to be edited
+ await sdk.ide.setFileOpen(abs_filepath)
+
+ await sdk.run_step(
+ MessageStep(
+ message=dedent(
+ """\
+ This step will customize your resource function with a transform of your choice:
+ - Add a filter or map transformation depending on your request
+ - Load the data into a local DuckDB instance
+ - Open up a Streamlit app for you to view the data"""
+ ),
+ name="Write transformation function",
+ )
+ )
+
+ with open(find_data_file("dlt_transform_docs.md")) as f:
+ dlt_transform_docs = f.read()
+
+ prompt = dedent(
+ f"""\
+ Task: Write a transform function using the description below and then use `add_map` or `add_filter` from the `dlt` library to attach it a resource.
+
+ Description: {self.transform_description}
+
+ Here are some docs pages that will help you better understand how to use `dlt`.
+
+ {dlt_transform_docs}"""
+ )
+
+ # edit the pipeline to add a transform function and attach it to a resource
+ await sdk.edit_file(
+ filename=filename,
+ prompt=prompt,
+ name=f"Writing transform function {AI_ASSISTED_STRING}",
+ )
+
+ await sdk.wait_for_user_confirmation(
+ "Press Continue to confirm that the changes are okay before we run the pipeline."
+ )
+
+ # run the pipeline and load the data
+ await sdk.run(
+ f"python3 {filename}",
+ name="Run the pipeline",
+ description=f"Running `python3 {filename}` to load the data into a local DuckDB instance",
+ )
+
+ # run a streamlit app to show the data
+ await sdk.run(
+ f"dlt pipeline {source_name}_pipeline show",
+ name="Show data in a Streamlit app",
+ description=f"Running `dlt pipeline {source_name} show` to show the data in a Streamlit app, where you can view and play with the data.",
+ )
diff --git a/server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md
new file mode 100644
index 00000000..df66104f
--- /dev/null
+++ b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/README.md
@@ -0,0 +1,7 @@
+# ContinueRecipeRecipe
+
+A recipe for building recipes!
+
+## How to use this recipe
+
+This recipe takes a single input, a description of the recipe to be built.
diff --git a/server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py
new file mode 100644
index 00000000..3dff2e15
--- /dev/null
+++ b/server/continuedev/plugins/recipes/ContinueRecipeRecipe/main.py
@@ -0,0 +1,43 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....plugins.steps.main import EditHighlightedCodeStep
+
+
+class ContinueStepStep(Step):
+ name: str = "Write your own Continue Step."
+ prompt: str
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run_step(
+ EditHighlightedCodeStep(
+ user_input=dedent(
+ f"""\
+ Here is an example of a Step that runs a command and then edits a file.
+
+ ```python
+ from ...core.main import Step
+ from ...core.sdk import ContinueSDK
+
+ class RunCommandAndEditFileStep(Step):
+ name: str = "Run a command and then edit a file."
+ command: str
+ file_path: str
+ prompt: str
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run([command])
+ await sdk.edit_file(filename=self.file_path, prompt=self.prompt)
+ ```
+
+ Please edit the code to write your own Step that does the following:
+
+ {self.prompt}
+
+ It should be a subclass of Step as above, implementing the `run` method, and using pydantic attributes to define the parameters.
+
+ """
+ )
+ )
+ )
diff --git a/server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md b/server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/README.md
diff --git a/server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py b/server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py
new file mode 100644
index 00000000..56e6f055
--- /dev/null
+++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/main.py
@@ -0,0 +1,40 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import MessageStep, WaitForUserInputStep
+from .steps import RunQueryStep, SetupPipelineStep, ValidatePipelineStep
+
+
+class CreatePipelineRecipe(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ text_observation = await sdk.run_step(
+ MessageStep(
+ name="Building your first dlt pipeline",
+ message=dedent(
+ """\
+ This recipe will walk you through the process of creating a dlt pipeline for your chosen data source. With the help of Continue, you will:
+ - Create a Python virtual environment with dlt installed
+ - Run `dlt init` to generate a pipeline template
+ - Write the code to call the API
+ - Add any required API keys to the `secrets.toml` file
+ - Test that the API call works
+ - Load the data into a local DuckDB instance
+ - Write a query to view the data"""
+ ),
+ )
+ >> WaitForUserInputStep(
+ prompt="What API do you want to load data from? (e.g. weatherapi.com, chess.com)"
+ )
+ )
+ await sdk.run_step(
+ SetupPipelineStep(api_description=text_observation.text)
+ >> ValidatePipelineStep()
+ >> RunQueryStep()
+ >> MessageStep(
+ name="Congrats!",
+ message="You've successfully created your first dlt pipeline! 🎉",
+ )
+ )
diff --git a/server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py b/server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py
new file mode 100644
index 00000000..65e7182d
--- /dev/null
+++ b/server/continuedev/plugins/recipes/CreatePipelineRecipe/steps.py
@@ -0,0 +1,243 @@
+import os
+import time
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK, Models
+from ....core.steps import MessageStep
+from ....models.filesystem import RangeInFile
+from ....models.filesystem_edit import AddFile, FileEdit
+from ....models.main import Range
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class SetupPipelineStep(Step):
+ hide: bool = True
+ name: str = "Setup dlt Pipeline"
+
+ api_description: str # e.g. "I want to load data from the weatherapi.com API"
+
+ async def describe(self, models: Models):
+ return dedent(
+ f"""\
+ This step will create a new dlt pipeline that loads data from an API, as per your request:
+ {self.api_description}
+ """
+ )
+
+ async def run(self, sdk: ContinueSDK):
+ sdk.context.set("api_description", self.api_description)
+
+ source_name = (
+ await sdk.models.summarize.complete(
+ f"Write a snake_case name for the data source described by {self.api_description}: "
+ )
+ ).strip()
+ filename = f"{source_name}.py"
+
+ # running commands to get started when creating a new dlt pipeline
+ await sdk.run(
+ [
+ "python3 -m venv .env",
+ "source .env/bin/activate",
+ "pip install dlt",
+ f"dlt --non-interactive init {source_name} duckdb",
+ "pip install -r requirements.txt",
+ ],
+ description=dedent(
+ f"""\
+ Running the following commands:
+ - `python3 -m venv .env`: Create a Python virtual environment
+ - `source .env/bin/activate`: Activate the virtual environment
+ - `pip install dlt`: Install dlt
+ - `dlt init {source_name} duckdb`: Create a new dlt pipeline called {source_name} that loads data into a local DuckDB instance
+ - `pip install -r requirements.txt`: Install the Python dependencies for the pipeline"""
+ ),
+ name="Setup Python environment",
+ )
+
+ # editing the resource function to call the requested API
+ resource_function_range = Range.from_shorthand(15, 0, 30, 0)
+ await sdk.ide.highlightCode(
+ RangeInFile(
+ filepath=os.path.join(await sdk.ide.getWorkspaceDirectory(), filename),
+ range=resource_function_range,
+ ),
+ "#ffa50033",
+ )
+
+ # sdk.set_loading_message("Writing code to call the API...")
+ await sdk.edit_file(
+ range=resource_function_range,
+ filename=filename,
+ prompt=f"Edit the resource function to call the API described by this: {self.api_description}. Do not move or remove the exit() call in __main__.",
+ name=f"Edit the resource function to call the API {AI_ASSISTED_STRING}",
+ )
+
+ time.sleep(1)
+
+ # wait for user to put API key in secrets.toml
+ await sdk.ide.setFileOpen(
+ await sdk.ide.getWorkspaceDirectory() + "/.dlt/secrets.toml"
+ )
+ await sdk.wait_for_user_confirmation(
+ "If this service requires an API key, please add it to the `secrets.toml` file and then press `Continue`."
+ )
+
+ sdk.context.set("source_name", source_name)
+
+
+class ValidatePipelineStep(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ workspace_dir = await sdk.ide.getWorkspaceDirectory()
+ source_name = sdk.context.get("source_name")
+ filename = f"{source_name}.py"
+
+ # await sdk.run_step(MessageStep(name="Validate the pipeline", message=dedent("""\
+ # Next, we will validate that your dlt pipeline is working as expected:
+ # - Test that the API call works
+ # - Load the data into a local DuckDB instance
+ # - Write a query to view the data
+ # """)))
+
+ # test that the API call works
+ output = await sdk.run(
+ f"python3 {filename}",
+ name="Test the pipeline",
+ description=f"Running `python3 {filename}` to test loading data from the API",
+ handle_error=False,
+ )
+
+ # If it fails, return the error
+ if "Traceback" in output or "SyntaxError" in output:
+ output = "Traceback" + output.split("Traceback")[-1]
+ file_content = await sdk.ide.readFile(os.path.join(workspace_dir, filename))
+ suggestion = await sdk.models.summarize.complete(
+ dedent(
+ f"""\
+ ```python
+ {file_content}
+ ```
+ This above code is a dlt pipeline that loads data from an API. The function with the @resource decorator is responsible for calling the API and returning the data. While attempting to run the pipeline, the following error occurred:
+
+ ```ascii
+ {output}
+ ```
+
+ This is a brief summary of the error followed by a suggestion on how it can be fixed by editing the resource function:"""
+ )
+ )
+
+ api_documentation_url = await sdk.models.summarize.complete(
+ dedent(
+ f"""\
+ The API I am trying to call is the '{sdk.context.get('api_description')}'. I tried calling it in the @resource function like this:
+ ```python
+ {file_content}
+ ```
+ What is the URL for the API documentation that will help me learn how to make this call? Please format in markdown so I can click the link."""
+ )
+ )
+
+ sdk.raise_exception(
+ title=f"Error while running pipeline.\nFix the resource function in {filename} and rerun this step",
+ message=output,
+ with_step=MessageStep(
+ name=f"Suggestion to solve error {AI_ASSISTED_STRING}",
+ message=dedent(
+ f"""\
+ {suggestion}
+
+ {api_documentation_url}
+
+ After you've fixed the code, click the retry button at the top of the Validate Pipeline step above."""
+ ),
+ ),
+ )
+
+ # remove exit() from the main main function
+ await sdk.run_step(
+ MessageStep(
+ name="Remove early exit() from main function",
+ message="Remove the early exit() from the main function now that we are done testing and want the pipeline to load the data into DuckDB.",
+ )
+ )
+
+ contents = await sdk.ide.readFile(os.path.join(workspace_dir, filename))
+ replacement = "\n".join(
+ list(filter(lambda line: line.strip() != "exit()", contents.split("\n")))
+ )
+ await sdk.ide.applyFileSystemEdit(
+ FileEdit(
+ filepath=os.path.join(workspace_dir, filename),
+ replacement=replacement,
+ range=Range.from_entire_file(contents),
+ )
+ )
+
+ # load the data into the DuckDB instance
+ await sdk.run(
+ f"python3 {filename}",
+ name="Load data into DuckDB",
+ description=f"Running python3 {filename} to load data into DuckDB",
+ )
+
+ tables_query_code = dedent(
+ f"""\
+ import duckdb
+
+ # connect to DuckDB instance
+ conn = duckdb.connect(database="{source_name}.duckdb")
+
+ # list all tables
+ print(conn.sql("DESCRIBE"))"""
+ )
+
+ query_filename = os.path.join(workspace_dir, "query.py")
+ await sdk.apply_filesystem_edit(
+ AddFile(filepath=query_filename, content=tables_query_code),
+ name="Add query.py file",
+ description="Adding a file called `query.py` to the workspace that will run a test query on the DuckDB instance",
+ )
+
+
+class RunQueryStep(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ output = await sdk.run(
+ ".env/bin/python3 query.py",
+ name="Run test query",
+ description="Running `.env/bin/python3 query.py` to test that the data was loaded into DuckDB as expected",
+ handle_error=False,
+ )
+
+ if "Traceback" in output or "SyntaxError" in output:
+ suggestion = await sdk.models.summarize.complete(
+ dedent(
+ f"""\
+ ```python
+ {await sdk.ide.readFile(os.path.join(sdk.ide.workspace_directory, "query.py"))}
+ ```
+ This above code is a query that runs on the DuckDB instance. While attempting to run the query, the following error occurred:
+
+ ```ascii
+ {output}
+ ```
+
+ This is a brief summary of the error followed by a suggestion on how it can be fixed:"""
+ )
+ )
+
+ sdk.raise_exception(
+ title="Error while running query",
+ message=output,
+ with_step=MessageStep(
+ name=f"Suggestion to solve error {AI_ASSISTED_STRING}",
+ message=suggestion
+ + "\n\nIt is also very likely that no duckdb table was created, which can happen if the resource function did not yield any data. Please make sure that it is yielding data and then rerun this step.",
+ ),
+ )
diff --git a/server/continuedev/plugins/recipes/DDtoBQRecipe/README.md b/server/continuedev/plugins/recipes/DDtoBQRecipe/README.md
new file mode 100644
index 00000000..d50324f7
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/README.md
@@ -0,0 +1,3 @@
+# DDtoBQRecipe
+
+Move from using DuckDB to Google BigQuery as the destination for your `dlt` pipeline
diff --git a/server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md b/server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md
new file mode 100644
index 00000000..eb68e117
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/dlt_duckdb_to_bigquery_docs.md
@@ -0,0 +1,85 @@
+### Credentials Missing: ConfigFieldMissingException
+
+You'll see this exception if `dlt` cannot find your bigquery credentials. In the exception below all of them ('project_id', 'private_key', 'client_email') are missing. The exception gives you also the list of all lookups for configuration performed - [here we explain how to read such list](run-a-pipeline.md#missing-secret-or-configuration-values).
+
+```
+dlt.common.configuration.exceptions.ConfigFieldMissingException: Following fields are missing: ['project_id', 'private_key', 'client_email'] in configuration with spec GcpServiceAccountCredentials
+ for field "project_id" config providers and keys were tried in following order:
+ In Environment Variables key WEATHERAPI__DESTINATION__BIGQUERY__CREDENTIALS__PROJECT_ID was not found.
+ In Environment Variables key WEATHERAPI__DESTINATION__CREDENTIALS__PROJECT_ID was not found.
+```
+
+The most common cases for the exception:
+
+1. The secrets are not in `secrets.toml` at all
+2. The are placed in wrong section. For example the fragment below will not work:
+
+```toml
+[destination.bigquery]
+project_id = "project_id" # please set me up!
+```
+
+3. You run the pipeline script from the **different** folder from which it is saved. For example `python weatherapi_demo/weatherapi.py` will run the script from `weatherapi_demo` folder but the current working directory is folder above. This prevents `dlt` from finding `weatherapi_demo/.dlt/secrets.toml` and filling-in credentials.
+
+### Placeholders still in secrets.toml
+
+Here BigQuery complain that the format of the `private_key` is incorrect. Practically this most often happens if you forgot to replace the placeholders in `secrets.toml` with real values
+
+```
+<class 'dlt.destinations.exceptions.DestinationConnectionError'>
+Connection with BigQuerySqlClient to dataset name weatherapi_data failed. Please check if you configured the credentials at all and provided the right credentials values. You can be also denied access or your internet connection may be down. The actual reason given is: No key could be detected.
+```
+
+### Bigquery not enabled
+
+[You must enable Bigquery API.](https://console.cloud.google.com/apis/dashboard)
+
+```
+<class 'google.api_core.exceptions.Forbidden'>
+403 POST https://bigquery.googleapis.com/bigquery/v2/projects/bq-walkthrough/jobs?prettyPrint=false: BigQuery API has not been used in project 364286133232 before or it is disabled. Enable it by visiting https://console.developers.google.com/apis/api/bigquery.googleapis.com/overview?project=364286133232 then retry. If you enabled this API recently, wait a few minutes for the action to propagate to our systems and retry.
+
+Location: EU
+Job ID: a5f84253-3c10-428b-b2c8-1a09b22af9b2
+ [{'@type': 'type.googleapis.com/google.rpc.Help', 'links': [{'description': 'Google developers console API activation', 'url': 'https://console.developers.google.com/apis/api/bigquery.googleapis.com/overview?project=364286133232'}]}, {'@type': 'type.googleapis.com/google.rpc.ErrorInfo', 'reason': 'SERVICE_DISABLED', 'domain': 'googleapis.com', 'metadata': {'service': 'bigquery.googleapis.com', 'consumer': 'projects/364286133232'}}]
+```
+
+### Lack of permissions to create jobs
+
+Add `BigQuery Job User` as described in the [destination page](../destinations/bigquery.md).
+
+```
+<class 'google.api_core.exceptions.Forbidden'>
+403 POST https://bigquery.googleapis.com/bigquery/v2/projects/bq-walkthrough/jobs?prettyPrint=false: Access Denied: Project bq-walkthrough: User does not have bigquery.jobs.create permission in project bq-walkthrough.
+
+Location: EU
+Job ID: c1476d2c-883c-43f7-a5fe-73db195e7bcd
+```
+
+### Lack of permissions to query/write data
+
+Add `BigQuery Data Editor` as described in the [destination page](../destinations/bigquery.md).
+
+```
+<class 'dlt.destinations.exceptions.DatabaseTransientException'>
+403 Access Denied: Table bq-walkthrough:weatherapi_data._dlt_loads: User does not have permission to query table bq-walkthrough:weatherapi_data._dlt_loads, or perhaps it does not exist in location EU.
+
+Location: EU
+Job ID: 299a92a3-7761-45dd-a433-79fdeb0c1a46
+```
+
+### Lack of billing / BigQuery in sandbox mode
+
+`dlt` does not support BigQuery when project has no billing enabled. If you see a stack trace where following warning appears:
+
+```
+<class 'dlt.destinations.exceptions.DatabaseTransientException'>
+403 Billing has not been enabled for this project. Enable billing at https://console.cloud.google.com/billing. DML queries are not allowed in the free tier. Set up a billing account to remove this restriction.
+```
+
+or
+
+```
+2023-06-08 16:16:26,769|[WARNING ]|8096|dlt|load.py|complete_jobs:198|Job for weatherapi_resource_83b8ac9e98_4_jsonl retried in load 1686233775.932288 with message {"error_result":{"reason":"billingNotEnabled","message":"Billing has not been enabled for this project. Enable billing at https://console.cloud.google.com/billing. Table expiration time must be less than 60 days while in sandbox mode."},"errors":[{"reason":"billingNotEnabled","message":"Billing has not been enabled for this project. Enable billing at https://console.cloud.google.com/billing. Table expiration time must be less than 60 days while in sandbox mode."}],"job_start":"2023-06-08T14:16:26.850000Z","job_end":"2023-06-08T14:16:26.850000Z","job_id":"weatherapi_resource_83b8ac9e98_4_jsonl"}
+```
+
+you must enable the billing.
diff --git a/server/continuedev/plugins/recipes/DDtoBQRecipe/main.py b/server/continuedev/plugins/recipes/DDtoBQRecipe/main.py
new file mode 100644
index 00000000..65149500
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/main.py
@@ -0,0 +1,31 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import MessageStep
+from .steps import LoadDataStep, SetUpChessPipelineStep, SwitchDestinationStep
+
+# Based on the following guide:
+# https://github.com/dlt-hub/dlt/pull/392
+
+
+class DDtoBQRecipe(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run_step(
+ MessageStep(
+ name="Move from using DuckDB to Google BigQuery as the destination",
+ message=dedent(
+ """\
+ This recipe will walk you through the process of moving from using DuckDB to Google BigQuery as the destination for your dlt pipeline. With the help of Continue, you will:
+ - Set up a dlt pipeline for the chess.com API
+ - Switch destination from DuckDB to Google BigQuery
+ - Add BigQuery credentials to your secrets.toml file
+ - Run the pipeline again to load data to BigQuery"""
+ ),
+ )
+ >> SetUpChessPipelineStep()
+ >> SwitchDestinationStep()
+ >> LoadDataStep()
+ )
diff --git a/server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py b/server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py
new file mode 100644
index 00000000..dfe25d9e
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DDtoBQRecipe/steps.py
@@ -0,0 +1,119 @@
+import os
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK, Models
+from ....core.steps import MessageStep
+from ....libs.util.paths import find_data_file
+from ....plugins.steps.find_and_replace import FindAndReplaceStep
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class SetUpChessPipelineStep(Step):
+ hide: bool = True
+ name: str = "Setup Chess.com API dlt Pipeline"
+
+ async def describe(self, models: Models):
+ return "This step will create a new dlt pipeline that loads data from the chess.com API."
+
+ async def run(self, sdk: ContinueSDK):
+ # running commands to get started when creating a new dlt pipeline
+ await sdk.run(
+ [
+ "python3 -m venv .env",
+ "source .env/bin/activate",
+ "pip install dlt",
+ "dlt --non-interactive init chess duckdb",
+ "pip install -r requirements.txt",
+ ],
+ name="Set up Python environment",
+ description=dedent(
+ """\
+ Running the following commands:
+ - `python3 -m venv .env`: Create a Python virtual environment
+ - `source .env/bin/activate`: Activate the virtual environment
+ - `pip install dlt`: Install dlt
+ - `dlt init chess duckdb`: Create a new dlt pipeline called "chess" that loads data into a local DuckDB instance
+ - `pip install -r requirements.txt`: Install the Python dependencies for the pipeline"""
+ ),
+ )
+
+
+class SwitchDestinationStep(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ # Switch destination from DuckDB to Google BigQuery
+ filepath = os.path.join(sdk.ide.workspace_directory, "chess_pipeline.py")
+ await sdk.run_step(
+ FindAndReplaceStep(
+ filepath=filepath,
+ pattern="destination='duckdb'",
+ replacement="destination='bigquery'",
+ )
+ )
+
+ # Add BigQuery credentials to your secrets.toml file
+ template = dedent(
+ """\
+ [destination.bigquery.credentials]
+ location = "US" # change the location of the data
+ project_id = "project_id" # please set me up!
+ private_key = "private_key" # please set me up!
+ client_email = "client_email" # please set me up!"""
+ )
+
+ # wait for user to put API key in secrets.toml
+ secrets_path = os.path.join(sdk.ide.workspace_directory, ".dlt/secrets.toml")
+ await sdk.ide.setFileOpen(secrets_path)
+ await sdk.append_to_file(secrets_path, template)
+
+ # append template to bottom of secrets.toml
+ await sdk.wait_for_user_confirmation(
+ "Please add your GCP credentials to `secrets.toml` file and then press `Continue`"
+ )
+
+
+class LoadDataStep(Step):
+ name: str = "Load data to BigQuery"
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ # Run the pipeline again to load data to BigQuery
+ output = await sdk.run(
+ ".env/bin/python3 chess_pipeline.py",
+ name="Load data to BigQuery",
+ description="Running `.env/bin/python3 chess_pipeline.py` to load data to Google BigQuery",
+ )
+
+ if "Traceback" in output or "SyntaxError" in output:
+ with open(find_data_file("dlt_duckdb_to_bigquery_docs.md"), "r") as f:
+ docs = f.read()
+
+ output = "Traceback" + output.split("Traceback")[-1]
+ suggestion = await sdk.models.default.complete(
+ dedent(
+ f"""\
+ When trying to load data into BigQuery, the following error occurred:
+
+ ```ascii
+ {output}
+ ```
+
+ Here is documentation describing common errors and their causes/solutions:
+
+ {docs}
+
+ This is a brief summary of the error followed by a suggestion on how it can be fixed:"""
+ )
+ )
+
+ sdk.raise_exception(
+ title="Error while running query",
+ message=output,
+ with_step=MessageStep(
+ name=f"Suggestion to solve error {AI_ASSISTED_STRING}",
+ message=suggestion,
+ ),
+ )
diff --git a/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/README.md
diff --git a/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py
new file mode 100644
index 00000000..5b0bd320
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/main.py
@@ -0,0 +1,86 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import MessageStep
+from ....plugins.steps.input.nl_multiselect import NLMultiselectStep
+from .steps import DeployAirflowStep, RunPipelineStep, SetupPipelineStep
+
+# https://github.com/dlt-hub/dlt-deploy-template/blob/master/airflow-composer/dag_template.py
+# https://www.notion.so/dlthub/Deploy-a-pipeline-with-Airflow-245fd1058652479494307ead0b5565f3
+# 1. What verified pipeline do you want to deploy with Airflow?
+# 2. Set up selected verified pipeline
+# 3. Deploy selected verified pipeline with Airflow
+# 4. Set up Airflow locally?
+
+
+class DeployPipelineAirflowRecipe(Step):
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ source_name = await sdk.run_step(
+ MessageStep(
+ name="Deploying a pipeline to Airflow",
+ message=dedent(
+ """\
+ This recipe will show you how to deploy a pipeline to Airflow. With the help of Continue, you will:
+ - Select a dlt-verified pipeline
+ - Setup the pipeline
+ - Deploy it to Airflow
+ - Optionally, setup Airflow locally"""
+ ),
+ )
+ >> NLMultiselectStep(
+ prompt=dedent(
+ """\
+ Which verified pipeline do you want to deploy with Airflow? The options are:
+ - Asana
+ - Chess.com
+ - Facebook Ads
+ - GitHub
+ - Google Analytics
+ - Google Sheets
+ - HubSpot
+ - Jira
+ - Matomo
+ - Mux
+ - Notion
+ - Pipedrive
+ - Pokemon
+ - Salesforce
+ - Shopify
+ - Strapi
+ - Stripe
+ - SQL Database
+ - Workable
+ - Zendesk"""
+ ),
+ options=[
+ "asana_dlt",
+ "chess",
+ "github",
+ "google_analytics",
+ "google_sheets",
+ "hubspot",
+ "matomo",
+ "pipedrive",
+ "shopify_dlt",
+ "strapi",
+ "zendesk",
+ "facebook_ads",
+ "jira",
+ "mux",
+ "notion",
+ "pokemon",
+ "salesforce",
+ "stripe_analytics",
+ "sql_database",
+ "workable",
+ ],
+ )
+ )
+ await sdk.run_step(
+ SetupPipelineStep(source_name=source_name)
+ >> RunPipelineStep(source_name=source_name)
+ >> DeployAirflowStep(source_name=source_name)
+ )
diff --git a/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py
new file mode 100644
index 00000000..e4a932af
--- /dev/null
+++ b/server/continuedev/plugins/recipes/DeployPipelineAirflowRecipe/steps.py
@@ -0,0 +1,125 @@
+import os
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK, Models
+from ....core.steps import MessageStep
+from ....plugins.steps.find_and_replace import FindAndReplaceStep
+
+AI_ASSISTED_STRING = "(✨ AI-Assisted ✨)"
+
+
+class SetupPipelineStep(Step):
+ hide: bool = True
+ name: str = "Setup dlt Pipeline"
+
+ source_name: str
+
+ async def describe(self, models: Models):
+ pass
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run(
+ [
+ "python3 -m venv .env",
+ "source .env/bin/activate",
+ "pip install dlt",
+ f"dlt --non-interactive init {self.source_name} duckdb",
+ "pip install -r requirements.txt",
+ ],
+ description=dedent(
+ f"""\
+ Running the following commands:
+ - `python3 -m venv .env`: Create a Python virtual environment
+ - `source .env/bin/activate`: Activate the virtual environment
+ - `pip install dlt`: Install dlt
+ - `dlt init {self.source_name} duckdb`: Create a new dlt pipeline called {self.source_name} that loads data into a local DuckDB instance
+ - `pip install -r requirements.txt`: Install the Python dependencies for the pipeline"""
+ ),
+ name="Setup Python environment",
+ )
+
+
+class RunPipelineStep(Step):
+ hide: bool = True
+ name: str = "Run dlt Pipeline"
+
+ source_name: str
+
+ async def describe(self, models: Models):
+ pass
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run(
+ [
+ f"python3 {self.source_name}_pipeline.py",
+ ],
+ description=dedent(
+ f"""\
+ Running the command `python3 {self.source_name}_pipeline.py to run the pipeline: """
+ ),
+ name="Run dlt pipeline",
+ )
+
+
+class DeployAirflowStep(Step):
+ hide: bool = True
+ source_name: str
+
+ async def run(self, sdk: ContinueSDK):
+ # Run dlt command to deploy pipeline to Airflow
+ await sdk.run(
+ [
+ "git init",
+ f"dlt --non-interactive deploy {self.source_name}_pipeline.py airflow-composer",
+ ],
+ description="Running `dlt deploy airflow` to deploy the dlt pipeline to Airflow",
+ name="Deploy dlt pipeline to Airflow",
+ )
+
+ # Get filepaths, open the DAG file
+ directory = await sdk.ide.getWorkspaceDirectory()
+ pipeline_filepath = os.path.join(directory, f"{self.source_name}_pipeline.py")
+ dag_filepath = os.path.join(
+ directory, f"dags/dag_{self.source_name}_pipeline.py"
+ )
+
+ await sdk.ide.setFileOpen(dag_filepath)
+
+ # Replace the pipeline name and dataset name
+ await sdk.run_step(
+ FindAndReplaceStep(
+ filepath=pipeline_filepath,
+ pattern="'pipeline_name'",
+ replacement=f"'{self.source_name}_pipeline'",
+ )
+ )
+ await sdk.run_step(
+ FindAndReplaceStep(
+ filepath=pipeline_filepath,
+ pattern="'dataset_name'",
+ replacement=f"'{self.source_name}_data'",
+ )
+ )
+ await sdk.run_step(
+ FindAndReplaceStep(
+ filepath=pipeline_filepath,
+ pattern="pipeline_or_source_script",
+ replacement=f"{self.source_name}_pipeline",
+ )
+ )
+
+ # Prompt the user for the DAG schedule
+ # edit_dag_range = Range.from_shorthand(18, 0, 23, 0)
+ # await sdk.ide.highlightCode(range_in_file=RangeInFile(filepath=dag_filepath, range=edit_dag_range), color="#33993333")
+ # response = await sdk.run_step(WaitForUserInputStep(prompt="When would you like this Airflow DAG to run? (e.g. every day, every Monday, every 1st of the month, etc.)"))
+ # await sdk.edit_file(dag_filepath, prompt=f"Edit the DAG so that it runs at the following schedule: '{response.text}'",
+ # range=edit_dag_range)
+
+ # Tell the user to check the schedule and fill in owner, email, other default_args
+ await sdk.run_step(
+ MessageStep(
+ message="Fill in the owner, email, and other default_args in the DAG file with your own personal information. Then the DAG will be ready to run!",
+ name="Fill in default_args",
+ )
+ )
diff --git a/server/continuedev/plugins/recipes/README.md b/server/continuedev/plugins/recipes/README.md
new file mode 100644
index 00000000..9860b0e2
--- /dev/null
+++ b/server/continuedev/plugins/recipes/README.md
@@ -0,0 +1,19 @@
+# This is a collaborative collection of Continue recipes
+
+A recipe is technically just a [Step](../steps/README.md), but is intended to be more complex, composed of multiple sub-steps.
+
+Recipes here will automatically be made available in the [Continue VS Code extension](https://marketplace.visualstudio.com/items?itemName=Continue.continue).
+
+The `recipes` folder contains all recipes, each with the same structure. **If you wish to create your own recipe, please do the following:**
+
+1. Create a new subfolder in `recipes`, with the name of your recipe (for example `MyNewRecipe`).
+2. Make 2 files in this folder: 1) a `README.md` describing your recipe and how to use it and 2) a `main.py` including a single class with the name of your recipe (e.g. `MyNewRecipe`).
+3. Write any utility code other than the main recipe class in a separate file, which you can import in `main.py`. Particularly if you decide to break the recipe into multiple sub-steps, try to keep these separate.
+
+# Existing Recipes
+
+`ContinueRecipeRecipe` - Write a Continue recipe with Continue.
+
+`CreatePipelineRecipe` - Build a dlt pipeline from scratch for an API of your choice.
+
+`WritePytestsRecipe` - Write Pytest unit tests in a folder adjacent to your Python file.
diff --git a/server/continuedev/plugins/recipes/TemplateRecipe/README.md b/server/continuedev/plugins/recipes/TemplateRecipe/README.md
new file mode 100644
index 00000000..91d1123b
--- /dev/null
+++ b/server/continuedev/plugins/recipes/TemplateRecipe/README.md
@@ -0,0 +1,7 @@
+# TemplateRecipe
+
+This folder is a template that you can copy to create your own recipe.
+
+## How to use this recipe
+
+Explain here what users should know when using your recipe. What inputs does it have and what actions will it perform?
diff --git a/server/continuedev/plugins/recipes/TemplateRecipe/main.py b/server/continuedev/plugins/recipes/TemplateRecipe/main.py
new file mode 100644
index 00000000..01ae364d
--- /dev/null
+++ b/server/continuedev/plugins/recipes/TemplateRecipe/main.py
@@ -0,0 +1,29 @@
+from typing import Coroutine
+
+from ....core.main import Observation, Step
+from ....core.sdk import ContinueSDK, Models
+
+
+class TemplateRecipe(Step):
+ """
+ A simple recipe that appends a print statement to the currently open file.
+ Use this as a template to create your own!
+ """
+
+ # Parameters for the recipe
+ name: str
+
+ # A title for the recipe, to be displayed in the GUI
+ title = "Template Recipe"
+
+ # A description of what the recipe accomplished, to be displayed in the GUI
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return f"Appended a statement to print `Hello, {self.name}!` at the end of the file."
+
+ # The code executed when the recipe is run
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ visible_files = await sdk.ide.getVisibleFiles()
+ await sdk.edit_file(
+ filename=visible_files[0],
+ prompt=f"Append a statement to print `Hello, {self.name}!` at the end of the file.",
+ )
diff --git a/server/continuedev/plugins/recipes/WritePytestsRecipe/README.md b/server/continuedev/plugins/recipes/WritePytestsRecipe/README.md
new file mode 100644
index 00000000..5ce33ecb
--- /dev/null
+++ b/server/continuedev/plugins/recipes/WritePytestsRecipe/README.md
@@ -0,0 +1,7 @@
+# CreatePytestsRecipe
+
+A recipe for writing unit tests in Pytest.
+
+# How to use this recipe
+
+Call this recipe with a python file open that you would like to test. It will create tests in a `tests/` folder adjacent to the file with the test file given the same name prepended by `test_`.
diff --git a/server/continuedev/plugins/recipes/WritePytestsRecipe/main.py b/server/continuedev/plugins/recipes/WritePytestsRecipe/main.py
new file mode 100644
index 00000000..63edabc6
--- /dev/null
+++ b/server/continuedev/plugins/recipes/WritePytestsRecipe/main.py
@@ -0,0 +1,52 @@
+import os
+from textwrap import dedent
+from typing import Union
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....models.filesystem_edit import AddDirectory, AddFile
+
+
+class WritePytestsRecipe(Step):
+ for_filepath: Union[str, None] = None
+ user_input: str = "Write unit tests for this file."
+
+ async def describe(self, models):
+ return f"Writing unit tests for {self.for_filepath}"
+
+ async def run(self, sdk: ContinueSDK):
+ if self.for_filepath is None:
+ self.for_filepath = (await sdk.ide.getVisibleFiles())[0]
+
+ filename = os.path.basename(self.for_filepath)
+ dirname = os.path.dirname(self.for_filepath)
+
+ path_dir = os.path.join(dirname, "tests")
+ if not os.path.exists(path_dir):
+ await sdk.apply_filesystem_edit(AddDirectory(path=path_dir))
+
+ path = os.path.join(path_dir, f"test_{filename}")
+ if os.path.exists(path):
+ return None
+
+ for_file_contents = await sdk.ide.readFile(self.for_filepath)
+
+ prompt = dedent(
+ f"""\
+ This is the file you will write unit tests for:
+
+ ```python
+ {for_file_contents}
+ ```
+
+ Here are additional instructions:
+
+ "{self.user_input}"
+
+ Here is a complete set of pytest unit tests:"""
+ )
+ tests = await sdk.models.summarize.complete(prompt)
+
+ await sdk.apply_filesystem_edit(AddFile(filepath=path, content=tests))
+
+ return None
diff --git a/server/continuedev/plugins/steps/README.md b/server/continuedev/plugins/steps/README.md
new file mode 100644
index 00000000..a8cae90b
--- /dev/null
+++ b/server/continuedev/plugins/steps/README.md
@@ -0,0 +1,50 @@
+# Steps
+
+Steps are the composable unit of action in Continue. They define a `run` method which has access to the entire `ContinueSDK`, allowing you to take actions inside the IDE, call language models, and more. In this folder you can find a number of good examples.
+
+## How to write a step
+
+a. Start by creating a subclass of `Step`
+
+You should first consider what will be the parameters of your recipe. These are defined as attributes in the Pydantic class. For example, if you wanted a "filepath" attribute that would look like this:
+
+```python
+class HelloWorldStep(Step):
+ filepath: str
+ ...
+```
+
+b. Next, write the `run` method
+
+This method takes the ContinueSDK as a parameter, giving you all the tools you need to write your steps (if it's missing something, let us know, we'll add it!). You can write any code inside the run method; this is what will happen when your step is run, line for line. As an example, here's a step that will open a file and append "Hello World!":
+
+```python
+class HelloWorldStep(Step):
+ filepath: str
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.ide.setFileOpen(self.filepath)
+ await sdk.append_to_file(self.filepath, "Hello World!")
+```
+
+c. Finally, every Step is displayed with a description of what it has done
+
+If you'd like to override the default description of your step, which is just the class name, then implement the `describe` method. You can:
+
+- Return a static string
+- Store state in a class attribute (prepend with a double underscore, which signifies (through Pydantic) that this is not a parameter for the Step, just internal state) during the run method, and then grab this in the describe method.
+- Use state in conjunction with the `models` parameter of the describe method to autogenerate a description with a language model. For example, if you'd used an attribute called `__code_written` to store a string representing some code that was written, you could implement describe as `return models.summarize.complete(f"{self.\_\_code_written}\n\nSummarize the changes made in the above code.")`.
+
+Here's an example:
+
+```python
+class HelloWorldStep(Step):
+ filepath: str
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.ide.setFileOpen(self.filepath)
+ await sdk.append_to_file(self.filepath, "Hello World!")
+
+ def describe(self, models: Models):
+ return f"Appended 'Hello World!' to {self.filepath}"
+```
diff --git a/server/continuedev/plugins/steps/__init__.py b/server/continuedev/plugins/steps/__init__.py
new file mode 100644
index 00000000..a181a956
--- /dev/null
+++ b/server/continuedev/plugins/steps/__init__.py
@@ -0,0 +1,13 @@
+# from .chroma import (
+# AnswerQuestionChroma, # noqa: F401
+# CreateCodebaseIndexChroma, # noqa: F401
+# EditFileChroma, # noqa: F401
+# )
+from .clear_history import ClearHistoryStep # noqa: F401
+from .cmd import GenerateShellCommandStep # noqa: F401
+from .comment_code import CommentCodeStep # noqa: F401
+from .help import HelpStep # noqa: F401
+from .main import EditHighlightedCodeStep # noqa: F401
+from .open_config import OpenConfigStep # noqa: F401
+
+# from .share_session import ShareSessionStep # noqa: F401
diff --git a/server/continuedev/plugins/steps/chat.py b/server/continuedev/plugins/steps/chat.py
new file mode 100644
index 00000000..1b0f76f9
--- /dev/null
+++ b/server/continuedev/plugins/steps/chat.py
@@ -0,0 +1,379 @@
+import html
+import json
+import os
+from textwrap import dedent
+from typing import Any, Coroutine, List
+
+import openai
+from directory_tree import display_tree
+from dotenv import load_dotenv
+from pydantic import Field
+
+from ...core.main import ChatMessage, FunctionCall, Models, Step, step_to_json_schema
+from ...core.sdk import ContinueSDK
+from ...core.steps import MessageStep
+from ...libs.llm.openai import OpenAI
+from ...libs.llm.openai_free_trial import OpenAIFreeTrial
+from ...libs.util.devdata import dev_data_logger
+from ...libs.util.strings import remove_quotes_and_escapes
+from ...libs.util.telemetry import posthog_logger
+from .main import EditHighlightedCodeStep
+
+load_dotenv()
+OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
+openai.api_key = OPENAI_API_KEY
+
+FREE_USAGE_STEP_NAME = "Please enter OpenAI API key"
+
+
+def add_ellipsis(text: str, max_length: int = 200) -> str:
+ if len(text) > max_length:
+ return text[: max_length - 3] + "..."
+ return text
+
+
+class SimpleChatStep(Step):
+ name: str = "Generating Response..."
+ manage_own_chat_context: bool = True
+ description: str = ""
+ messages: List[ChatMessage] = None
+
+ async def run(self, sdk: ContinueSDK):
+ # Check if proxy server API key
+ if (
+ isinstance(sdk.models.default, OpenAIFreeTrial)
+ and (
+ sdk.models.default.api_key is None
+ or sdk.models.default.api_key.strip() == ""
+ )
+ and len(list(filter(lambda x: not x.step.hide, sdk.history.timeline))) >= 10
+ and len(
+ list(
+ filter(
+ lambda x: x.step.name == FREE_USAGE_STEP_NAME,
+ sdk.history.timeline,
+ )
+ )
+ )
+ == 0
+ ):
+ await sdk.run_step(
+ MessageStep(
+ name=FREE_USAGE_STEP_NAME,
+ message=dedent(
+ """\
+ To make it easier to use Continue, you're getting limited free usage. When you have the chance, please enter your own OpenAI key in `~/.continue/config.py`. You can open the file by using the '/config' slash command in the text box below.
+
+ Here's an example of how to edit the file:
+ ```python
+ ...
+ config=ContinueConfig(
+ ...
+ models=Models(
+ default=OpenAIFreeTrial(api_key="<API_KEY>", model="gpt-4"),
+ summarize=OpenAIFreeTrial(api_key="<API_KEY>", model="gpt-3.5-turbo")
+ )
+ )
+ ```
+
+ You can also learn more about customizations [here](https://continue.dev/docs/customization).
+ """
+ ),
+ )
+ )
+
+ messages = self.messages or await sdk.get_chat_context()
+
+ generator = sdk.models.chat.stream_chat(
+ messages, temperature=sdk.config.temperature
+ )
+
+ posthog_logger.capture_event(
+ "model_use",
+ {
+ "model": sdk.models.default.model,
+ "provider": sdk.models.default.__class__.__name__,
+ },
+ )
+ dev_data_logger.capture(
+ "model_use",
+ {
+ "model": sdk.models.default.model,
+ "provider": sdk.models.default.__class__.__name__,
+ },
+ )
+
+ async for chunk in generator:
+ if sdk.current_step_was_deleted():
+ # So that the message doesn't disappear
+ self.hide = False
+ await sdk.update_ui()
+ break
+
+ if "content" in chunk:
+ self.description += chunk["content"]
+
+ # HTML unencode
+ end_size = len(chunk["content"]) - 6
+ if "&" in self.description[-end_size:]:
+ self.description = self.description[:-end_size] + html.unescape(
+ self.description[-end_size:]
+ )
+
+ await sdk.update_ui()
+
+ if sdk.config.disable_summaries:
+ self.name = ""
+ else:
+ self.name = "Generating title..."
+ await sdk.update_ui()
+ self.name = add_ellipsis(
+ remove_quotes_and_escapes(
+ await sdk.models.summarize.complete(
+ f'"{self.description}"\n\nPlease write a short title summarizing the message quoted above. Use no more than 10 words:',
+ max_tokens=20,
+ log=False,
+ )
+ ),
+ 200,
+ )
+
+ await sdk.update_ui()
+
+ self.chat_context.append(
+ ChatMessage(role="assistant", content=self.description, summary=self.name)
+ )
+
+ # TODO: Never actually closing.
+ await generator.aclose()
+
+
+class AddFileStep(Step):
+ name: str = "Add File"
+ description = "Add a file to the workspace. Should always view the directory tree before this."
+ filename: str
+ file_contents: str
+
+ async def describe(
+ self, models: Models
+ ) -> Coroutine[Any, Any, Coroutine[str, None, None]]:
+ return f"Added a file named `{self.filename}` to the workspace."
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.add_file(self.filename, self.file_contents)
+
+ await sdk.ide.setFileOpen(
+ os.path.join(sdk.ide.workspace_directory, self.filename)
+ )
+
+
+class DeleteFileStep(Step):
+ name: str = "Delete File"
+ description = "Delete a file from the workspace."
+ filename: str
+
+ async def describe(
+ self, models: Models
+ ) -> Coroutine[Any, Any, Coroutine[str, None, None]]:
+ return f"Deleted a file named `{self.filename}` from the workspace."
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.delete_file(self.filename)
+
+
+class AddDirectoryStep(Step):
+ name: str = "Add Directory"
+ description = "Add a directory to the workspace."
+ directory_name: str
+
+ async def describe(
+ self, models: Models
+ ) -> Coroutine[Any, Any, Coroutine[str, None, None]]:
+ return f"Added a directory named `{self.directory_name}` to the workspace."
+
+ async def run(self, sdk: ContinueSDK):
+ try:
+ await sdk.add_directory(self.directory_name)
+ except FileExistsError:
+ self.description = f"Directory {self.directory_name} already exists."
+
+
+class RunTerminalCommandStep(Step):
+ name: str = "Run Terminal Command"
+ description: str = "Run a terminal command."
+ command: str
+
+ async def run(self, sdk: ContinueSDK):
+ self.description = f"Copy this command and run in your terminal:\n\n```bash\n{self.command}\n```"
+
+
+class ViewDirectoryTreeStep(Step):
+ name: str = "View Directory Tree"
+ description: str = "View the directory tree to learn which folder and files exist. You should always do this before adding new files."
+
+ async def describe(
+ self, models: Models
+ ) -> Coroutine[Any, Any, Coroutine[str, None, None]]:
+ return "Viewed the directory tree."
+
+ async def run(self, sdk: ContinueSDK):
+ self.description = (
+ f"```\n{display_tree(sdk.ide.workspace_directory, True, max_depth=2)}\n```"
+ )
+
+
+class EditFileStep(Step):
+ name: str = "Edit File"
+ description: str = "Edit a file in the workspace that is not currently open."
+ filename: str = Field(..., description="The name of the file to edit.")
+ instructions: str = Field(..., description="The instructions to edit the file.")
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.edit_file(self.filename, self.instructions)
+
+
+class ChatWithFunctions(Step):
+ user_input: str
+ functions: List[Step] = [
+ AddFileStep(filename="", file_contents=""),
+ EditFileStep(filename="", instructions=""),
+ EditHighlightedCodeStep(user_input=""),
+ ViewDirectoryTreeStep(),
+ AddDirectoryStep(directory_name=""),
+ DeleteFileStep(filename=""),
+ RunTerminalCommandStep(command=""),
+ ]
+ name: str = "Input"
+ manage_own_chat_context: bool = True
+ description: str = ""
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.update_ui()
+
+ step_name_step_class_map = {
+ step.name.replace(" ", ""): step.__class__ for step in self.functions
+ }
+
+ functions = [step_to_json_schema(function) for function in self.functions]
+
+ self.chat_context.append(
+ ChatMessage(role="user", content=self.user_input, summary=self.user_input)
+ )
+
+ last_function_called_name = None
+ last_function_called_params = None
+ while True:
+ was_function_called = False
+ func_args = ""
+ func_name = ""
+ msg_content = ""
+ msg_step = None
+
+ gpt350613 = OpenAI(model="gpt-3.5-turbo-0613")
+ await sdk.start_model(gpt350613)
+
+ async for msg_chunk in gpt350613.stream_chat(
+ await sdk.get_chat_context(), functions=functions
+ ):
+ if sdk.current_step_was_deleted():
+ return
+
+ if "content" in msg_chunk and msg_chunk["content"] is not None:
+ msg_content += msg_chunk["content"]
+ # if last_function_called_index_in_history is not None:
+ # while sdk.history.timeline[last_function_called_index].step.hide:
+ # last_function_called_index += 1
+ # sdk.history.timeline[last_function_called_index_in_history].step.description = msg_content
+ if msg_step is None:
+ msg_step = MessageStep(
+ name="Chat", message=msg_chunk["content"]
+ )
+ await sdk.run_step(msg_step)
+ else:
+ msg_step.description = msg_content
+ await sdk.update_ui()
+ elif "function_call" in msg_chunk or func_name != "":
+ was_function_called = True
+ if "function_call" in msg_chunk:
+ if "arguments" in msg_chunk["function_call"]:
+ func_args += msg_chunk["function_call"]["arguments"]
+ if "name" in msg_chunk["function_call"]:
+ func_name += msg_chunk["function_call"]["name"]
+
+ if not was_function_called:
+ self.chat_context.append(
+ ChatMessage(
+ role="assistant", content=msg_content, summary=msg_content
+ )
+ )
+ break
+ else:
+ if func_name == "python" and "python" not in step_name_step_class_map:
+ # GPT must be fine-tuned to believe this exists, but it doesn't always
+ func_name = "EditHighlightedCodeStep"
+ func_args = json.dumps({"user_input": self.user_input})
+ # self.chat_context.append(ChatMessage(
+ # role="assistant",
+ # content=None,
+ # function_call=FunctionCall(
+ # name=func_name,
+ # arguments=func_args
+ # ),
+ # summary=f"Called function {func_name}"
+ # ))
+ # self.chat_context.append(ChatMessage(
+ # role="user",
+ # content="The 'python' function does not exist. Don't call it. Try again to call another function.",
+ # summary="'python' function does not exist."
+ # ))
+ # msg_step.hide = True
+ # continue
+ # Call the function, then continue to chat
+ func_args = "{}" if func_args == "" else func_args
+ try:
+ fn_call_params = json.loads(func_args)
+ except json.JSONDecodeError:
+ raise Exception("The model returned invalid JSON. Please try again")
+ self.chat_context.append(
+ ChatMessage(
+ role="assistant",
+ content=None,
+ function_call=FunctionCall(name=func_name, arguments=func_args),
+ summary=f"Called function {func_name}",
+ )
+ )
+ sdk.history.current_index + 1
+ if func_name not in step_name_step_class_map:
+ raise Exception(
+ f"The model tried to call a function ({func_name}) that does not exist. Please try again."
+ )
+
+ # if func_name == "AddFileStep":
+ # step_to_run.hide = True
+ # self.description += f"\nAdded file `{func_args['filename']}`"
+ # elif func_name == "AddDirectoryStep":
+ # step_to_run.hide = True
+ # self.description += f"\nAdded directory `{func_args['directory_name']}`"
+ # else:
+ # self.description += f"\n`Running function {func_name}`\n\n"
+ if func_name == "EditHighlightedCodeStep":
+ fn_call_params["user_input"] = self.user_input
+ elif func_name == "EditFile":
+ fn_call_params["instructions"] = self.user_input
+
+ step_to_run = step_name_step_class_map[func_name](**fn_call_params)
+ if (
+ last_function_called_name is not None
+ and last_function_called_name == func_name
+ and last_function_called_params is not None
+ and last_function_called_params == fn_call_params
+ ):
+ # If it's calling the same function more than once in a row, it's probably looping and confused
+ return
+ last_function_called_name = func_name
+ last_function_called_params = fn_call_params
+
+ await sdk.run_step(step_to_run)
+ await sdk.update_ui()
diff --git a/server/continuedev/plugins/steps/chroma.py b/server/continuedev/plugins/steps/chroma.py
new file mode 100644
index 00000000..f357a872
--- /dev/null
+++ b/server/continuedev/plugins/steps/chroma.py
@@ -0,0 +1,86 @@
+from textwrap import dedent
+from typing import Coroutine, Union
+
+from ...core.main import Step
+from ...core.observation import Observation
+from ...core.sdk import ContinueSDK
+from ...core.steps import EditFileStep
+from ...libs.chroma.query import ChromaIndexManager
+
+
+class CreateCodebaseIndexChroma(Step):
+ name: str = "Create Codebase Index"
+ hide: bool = True
+
+ async def describe(self, llm) -> Coroutine[str, None, None]:
+ return "Indexing the codebase..."
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ index = ChromaIndexManager(await sdk.ide.getWorkspaceDirectory())
+ if not index.check_index_exists():
+ self.hide = False
+ index.create_codebase_index()
+
+
+class AnswerQuestionChroma(Step):
+ question: str
+ _answer: Union[str, None] = None
+ name: str = "Answer Question"
+
+ async def describe(self, llm) -> Coroutine[str, None, None]:
+ if self._answer is None:
+ return f"Answering the question: {self.question}"
+ else:
+ return self._answer
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ index = ChromaIndexManager(await sdk.ide.getWorkspaceDirectory())
+ results = index.query_codebase_index(self.question)
+
+ code_snippets = ""
+
+ files = []
+ for node in results.source_nodes:
+ resource_name = list(node.node.relationships.values())[0]
+ filepath = resource_name[: resource_name.index("::")]
+ files.append(filepath)
+ code_snippets += f"""{filepath}```\n{node.node.text}\n```\n\n"""
+
+ prompt = dedent(
+ f"""Here are a few snippets of code that might be useful in answering the question:
+
+ {code_snippets}
+
+ Here is the question to answer:
+
+ {self.question}
+
+ Here is the answer:"""
+ )
+
+ answer = await sdk.models.summarize.complete(prompt)
+ # Make paths relative to the workspace directory
+ answer = answer.replace(await sdk.ide.getWorkspaceDirectory(), "")
+
+ self._answer = answer
+
+ await sdk.ide.setFileOpen(files[0])
+
+
+class EditFileChroma(Step):
+ request: str
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ index = ChromaIndexManager(await sdk.ide.getWorkspaceDirectory())
+ results = index.query_codebase_index(self.request)
+
+ resource_name = list(results.source_nodes[0].node.relationships.values())[0]
+ filepath = resource_name[: resource_name.index("::")]
+
+ await sdk.run_step(
+ EditFileStep(
+ filepath=filepath,
+ prompt=f"Here is the code:\n\n{{code}}\n\nHere is the user request:\n\n{self.request}\n\nHere is the code after making the requested changes:\n",
+ )
+ )
diff --git a/server/continuedev/plugins/steps/clear_history.py b/server/continuedev/plugins/steps/clear_history.py
new file mode 100644
index 00000000..8f21518b
--- /dev/null
+++ b/server/continuedev/plugins/steps/clear_history.py
@@ -0,0 +1,10 @@
+from ...core.main import Step
+from ...core.sdk import ContinueSDK
+
+
+class ClearHistoryStep(Step):
+ name: str = "Clear History"
+ hide: bool = True
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.clear_history()
diff --git a/server/continuedev/plugins/steps/cmd.py b/server/continuedev/plugins/steps/cmd.py
new file mode 100644
index 00000000..a38f6323
--- /dev/null
+++ b/server/continuedev/plugins/steps/cmd.py
@@ -0,0 +1,30 @@
+from textwrap import dedent
+from typing import Coroutine
+
+from ...core.main import Step
+from ...core.observation import Observation
+from ...core.sdk import ContinueSDK
+from ...libs.util.strings import remove_quotes_and_escapes
+
+
+class GenerateShellCommandStep(Step):
+ user_input: str
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ cmd = await sdk.models.default.complete(
+ dedent(
+ f"""\
+ The user has made a request to run a shell command. Their description of what it should do is:
+
+ "{self.user_input}"
+
+ Please write a shell command that will do what the user requested. Your output should consist of only the command itself, without any explanation or example output. Do not use any newlines. Only output the command that when inserted into the terminal will do precisely what was requested.
+ """
+ )
+ )
+
+ cmd = remove_quotes_and_escapes(cmd.strip()).replace("\n", "").replace("\r", "")
+
+ await sdk.ide.runCommand(cmd)
+
+ self.description = f"Generated shell command: {cmd}"
diff --git a/server/continuedev/plugins/steps/comment_code.py b/server/continuedev/plugins/steps/comment_code.py
new file mode 100644
index 00000000..1eee791d
--- /dev/null
+++ b/server/continuedev/plugins/steps/comment_code.py
@@ -0,0 +1,16 @@
+from ...core.main import ContinueSDK, Models, Step
+from .main import EditHighlightedCodeStep
+
+
+class CommentCodeStep(Step):
+ hide: bool = True
+
+ async def describe(self, models: Models):
+ return "Writing comments"
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.run_step(
+ EditHighlightedCodeStep(
+ user_input="Write comprehensive comments in the canonical format for every class and function"
+ )
+ )
diff --git a/server/continuedev/plugins/steps/custom_command.py b/server/continuedev/plugins/steps/custom_command.py
new file mode 100644
index 00000000..4128415b
--- /dev/null
+++ b/server/continuedev/plugins/steps/custom_command.py
@@ -0,0 +1,29 @@
+from ...core.main import Step
+from ...core.sdk import ContinueSDK, Models
+from ...libs.util.templating import render_templated_string
+from ..steps.chat import SimpleChatStep
+
+
+class CustomCommandStep(Step):
+ name: str
+ prompt: str
+ user_input: str
+ slash_command: str
+ hide: bool = True
+
+ async def describe(self, models: Models):
+ return self.prompt
+
+ async def run(self, sdk: ContinueSDK):
+ task = render_templated_string(self.prompt)
+
+ prompt_user_input = f"Task: {task}. Additional info: {self.user_input}"
+ messages = await sdk.get_chat_context()
+ # Find the last chat message with this slash command and replace it with the user input
+ for i in range(len(messages) - 1, -1, -1):
+ if messages[i].role == "user" and messages[i].content.startswith(
+ self.slash_command
+ ):
+ messages[i] = messages[i].copy(update={"content": prompt_user_input})
+ break
+ await sdk.run_step(SimpleChatStep(messages=messages))
diff --git a/server/continuedev/plugins/steps/draft/abstract_method.py b/server/continuedev/plugins/steps/draft/abstract_method.py
new file mode 100644
index 00000000..7ceefe9b
--- /dev/null
+++ b/server/continuedev/plugins/steps/draft/abstract_method.py
@@ -0,0 +1,21 @@
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+
+
+class ImplementAbstractMethodStep(Step):
+ name: str = "Implement abstract method for all subclasses"
+ method_name: str
+ class_name: str
+
+ async def run(self, sdk: ContinueSDK):
+ if sdk.lsp is None:
+ self.description = "Language Server Protocol is not enabled"
+ return
+
+ implementations = await sdk.lsp.go_to_implementations(self.class_name)
+
+ for implementation in implementations:
+ await sdk.edit_file(
+ range_in_files=[implementation.range_in_file],
+ prompt=f"Implement method `{self.method_name}` for this subclass of `{self.class_name}`",
+ )
diff --git a/server/continuedev/plugins/steps/draft/redux.py b/server/continuedev/plugins/steps/draft/redux.py
new file mode 100644
index 00000000..83b5e592
--- /dev/null
+++ b/server/continuedev/plugins/steps/draft/redux.py
@@ -0,0 +1,50 @@
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import EditFileStep
+
+
+class EditReduxStateStep(Step):
+ description: str # e.g. "I want to load data from the weatherapi.com API"
+
+ async def run(self, sdk: ContinueSDK):
+ # Find the right file to edit
+
+ # RootStore
+ store_filename = ""
+ sdk.run_step(
+ EditFileStep(
+ filename=store_filename,
+ prompt=f"Edit the root store to add a new slice for {self.description}",
+ )
+ )
+ store_file_contents = await sdk.ide.readFile(store_filename)
+
+ # Selector
+ selector_filename = ""
+ sdk.run_step(
+ EditFileStep(
+ filepath=selector_filename,
+ prompt=f"Edit the selector to add a new property for {self.description}. The store looks like this: {store_file_contents}",
+ )
+ )
+
+ # Reducer
+ reducer_filename = ""
+ sdk.run_step(
+ EditFileStep(
+ filepath=reducer_filename,
+ prompt=f"Edit the reducer to add a new property for {self.description}. The store looks like this: {store_file_contents}",
+ )
+ )
+ """
+ Starts with implementing selector
+ 1. RootStore
+ 2. Selector
+ 3. Reducer or entire slice
+
+ Need to first determine whether this is an:
+ 1. edit
+ 2. add new reducer and property in existing slice
+ 3. add whole new slice
+ 4. build redux from scratch
+ """
diff --git a/server/continuedev/plugins/steps/draft/typeorm.py b/server/continuedev/plugins/steps/draft/typeorm.py
new file mode 100644
index 00000000..c79fa041
--- /dev/null
+++ b/server/continuedev/plugins/steps/draft/typeorm.py
@@ -0,0 +1,54 @@
+from textwrap import dedent
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+
+
+class CreateTableStep(Step):
+ sql_str: str
+ name: str = "Create a table in TypeORM"
+
+ async def run(self, sdk: ContinueSDK):
+ # Write TypeORM entity
+ entity_name = self.sql_str.split(" ")[2].capitalize()
+ await sdk.edit_file(
+ f"src/entity/{entity_name}.ts",
+ dedent(
+ f"""\
+ {self.sql_str}
+
+ Write a TypeORM entity called {entity_name} for this table, importing as necessary:"""
+ ),
+ )
+
+ # Add entity to data-source.ts
+ await sdk.edit_file(
+ filepath="src/data-source.ts", prompt=f"Add the {entity_name} entity:"
+ )
+
+ # Generate blank migration for the entity
+ out = await sdk.run(
+ f"npx typeorm migration:create ./src/migration/Create{entity_name}Table"
+ )
+ migration_filepath = out.text.split(" ")[1]
+
+ # Wait for user input
+ await sdk.wait_for_user_confirmation("Fill in the migration?")
+
+ # Fill in the migration
+ await sdk.edit_file(
+ migration_filepath,
+ dedent(
+ f"""\
+ This is the table that was created:
+
+ {self.sql_str}
+
+ Fill in the migration for the table:"""
+ ),
+ )
+
+ # Run the migration
+ await sdk.run(
+ "npx typeorm-ts-node-commonjs migration:run -d ./src/data-source.ts"
+ )
diff --git a/server/continuedev/plugins/steps/feedback.py b/server/continuedev/plugins/steps/feedback.py
new file mode 100644
index 00000000..df1142a1
--- /dev/null
+++ b/server/continuedev/plugins/steps/feedback.py
@@ -0,0 +1,14 @@
+from ...core.main import Models, Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.telemetry import posthog_logger
+
+
+class FeedbackStep(Step):
+ user_input: str
+ name = "Thanks for your feedback!"
+
+ async def describe(self, models: Models):
+ return f"`{self.user_input}`\n\nWe'll see your feedback and make improvements as soon as possible. If you'd like to directly email us, you can contact [nate@continue.dev](mailto:nate@continue.dev?subject=Feedback%20On%20Continue)."
+
+ async def run(self, sdk: ContinueSDK):
+ posthog_logger.capture_event("feedback", {"feedback": self.user_input})
diff --git a/server/continuedev/plugins/steps/find_and_replace.py b/server/continuedev/plugins/steps/find_and_replace.py
new file mode 100644
index 00000000..287e286d
--- /dev/null
+++ b/server/continuedev/plugins/steps/find_and_replace.py
@@ -0,0 +1,30 @@
+from ...core.main import Models, Step
+from ...core.sdk import ContinueSDK
+from ...models.filesystem_edit import FileEdit, Range
+
+
+class FindAndReplaceStep(Step):
+ name: str = "Find and replace"
+ filepath: str
+ pattern: str
+ replacement: str
+
+ async def describe(self, models: Models):
+ return f"Replaced all instances of `{self.pattern}` with `{self.replacement}` in `{self.filepath}`"
+
+ async def run(self, sdk: ContinueSDK):
+ file_content = await sdk.ide.readFile(self.filepath)
+ while self.pattern in file_content:
+ start_index = file_content.index(self.pattern)
+ end_index = start_index + len(self.pattern)
+ await sdk.ide.applyFileSystemEdit(
+ FileEdit(
+ filepath=self.filepath,
+ range=Range.from_indices(file_content, start_index, end_index - 1),
+ replacement=self.replacement,
+ )
+ )
+ file_content = (
+ file_content[:start_index] + self.replacement + file_content[end_index:]
+ )
+ await sdk.ide.saveFile(self.filepath)
diff --git a/server/continuedev/plugins/steps/help.py b/server/continuedev/plugins/steps/help.py
new file mode 100644
index 00000000..148dddb8
--- /dev/null
+++ b/server/continuedev/plugins/steps/help.py
@@ -0,0 +1,70 @@
+from textwrap import dedent
+
+from ...core.main import ChatMessage, Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.telemetry import posthog_logger
+
+help = dedent(
+ """\
+ Continue is an open-source coding autopilot. It is a VS Code extension that brings the power of ChatGPT to your IDE.
+
+ It gathers context for you and stores your interactions automatically, so that you can avoid copy/paste now and benefit from a customized Large Language Model (LLM) later.
+
+ Continue can be used to...
+ 1. Edit chunks of code with specific instructions (e.g. "/edit migrate this digital ocean terraform file into one that works for GCP")
+ 2. Get answers to questions without switching windows (e.g. "how do I find running process on port 8000?")
+ 3. Generate files from scratch (e.g. "/edit Create a Python CLI tool that uses the posthog api to get events from DAUs")
+
+ You tell Continue to edit a specific section of code by highlighting it. If you highlight multiple code sections, then it will only edit the one with the purple glow around it. You can switch which one has the purple glow by clicking the paint brush.
+
+ If you don't highlight any code, then Continue will insert at the location of your cursor.
+
+ Continue passes all of the sections of code you highlight, the code above and below the to-be edited highlighted code section, and all previous steps above input box as context to the LLM.
+
+ You can use cmd+m (Mac) / ctrl+m (Windows) to open Continue. You can use cmd+shift+e / ctrl+shift+e to open file Explorer. You can add your own OpenAI API key to VS Code Settings with `cmd+,`
+
+ If Continue is stuck loading, try using `cmd+shift+p` to open the command palette, search "Reload Window", and then select it. This will reload VS Code and Continue and often fixes issues.
+
+ If you have feedback, please use /feedback to let us know how you would like to use Continue. We are excited to hear from you!"""
+)
+
+
+class HelpStep(Step):
+ name: str = "Help"
+ user_input: str
+ manage_own_chat_context: bool = True
+ description: str = ""
+
+ async def run(self, sdk: ContinueSDK):
+ question = self.user_input
+
+ if question.strip() == "":
+ self.description = help
+ else:
+ self.description = "The following output is generated by a language model, which may hallucinate. Type just '/help'to see a fixed answer. You can also learn more by reading [the docs](https://continue.dev/docs).\n\n"
+ prompt = dedent(
+ f"""
+ Information:
+
+ {help}
+
+ Instructions:
+
+ Please us the information below to provide a succinct answer to the following question: {question}
+
+ Do not cite any slash commands other than those you've been told about, which are: /edit and /feedback. Never refer or link to any URL."""
+ )
+
+ self.chat_context.append(
+ ChatMessage(role="user", content=prompt, summary="Help")
+ )
+ messages = await sdk.get_chat_context()
+ generator = sdk.models.default.stream_chat(messages)
+ async for chunk in generator:
+ if "content" in chunk:
+ self.description += chunk["content"]
+ await sdk.update_ui()
+
+ posthog_logger.capture_event(
+ "help", {"question": question, "answer": self.description}
+ )
diff --git a/server/continuedev/plugins/steps/input/nl_multiselect.py b/server/continuedev/plugins/steps/input/nl_multiselect.py
new file mode 100644
index 00000000..f4b5e7a6
--- /dev/null
+++ b/server/continuedev/plugins/steps/input/nl_multiselect.py
@@ -0,0 +1,32 @@
+from typing import List, Union
+
+from ....core.main import Step
+from ....core.sdk import ContinueSDK
+from ....core.steps import WaitForUserInputStep
+
+
+class NLMultiselectStep(Step):
+ hide: bool = True
+
+ prompt: str
+ options: List[str]
+
+ async def run(self, sdk: ContinueSDK):
+ user_response = (
+ await sdk.run_step(WaitForUserInputStep(prompt=self.prompt))
+ ).text
+
+ def extract_option(text: str) -> Union[str, None]:
+ for option in self.options:
+ if option in text:
+ return option
+ return None
+
+ first_try = extract_option(user_response.lower())
+ if first_try is not None:
+ return first_try
+
+ gpt_parsed = await sdk.models.default.complete(
+ f"These are the available options are: [{', '.join(self.options)}]. The user requested {user_response}. This is the exact string from the options array that they selected:"
+ )
+ return extract_option(gpt_parsed) or self.options[0]
diff --git a/server/continuedev/plugins/steps/main.py b/server/continuedev/plugins/steps/main.py
new file mode 100644
index 00000000..936fd7e0
--- /dev/null
+++ b/server/continuedev/plugins/steps/main.py
@@ -0,0 +1,422 @@
+import os
+from textwrap import dedent
+from typing import Coroutine, List, Optional, Union
+
+from pydantic import BaseModel, Field
+
+from ...core.main import ContinueCustomException, Step
+from ...core.observation import Observation
+from ...core.sdk import ContinueSDK, Models
+from ...core.steps import DefaultModelEditCodeStep
+from ...libs.llm.base import LLM
+from ...libs.llm.prompt_utils import MarkdownStyleEncoderDecoder
+from ...libs.util.calculate_diff import calculate_diff2
+from ...libs.util.logging import logger
+from ...models.filesystem import RangeInFile, RangeInFileWithContents
+from ...models.filesystem_edit import EditDiff, FileEdit
+from ...models.main import Range, Traceback
+
+
+class Policy(BaseModel):
+ pass
+
+
+class RunPolicyUntilDoneStep(Step):
+ policy: "Policy"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ next_step = self.policy.next(sdk.config, sdk.history)
+ while next_step is not None:
+ observation = await sdk.run_step(next_step)
+ next_step = self.policy.next(sdk.config, sdk.history)
+ return observation
+
+
+class FasterEditHighlightedCodeStep(Step):
+ user_input: str
+ hide = True
+ _completion: str = "Edit Code"
+ _edit_diffs: Union[List[EditDiff], None] = None
+ _prompt: str = dedent(
+ """\
+ You will be given code to edit in order to perfectly satisfy the user request. All the changes you make must be described as replacements, which you should format in the following way:
+ FILEPATH
+ <FILE_TO_EDIT>
+ REPLACE_ME
+ <CODE_TO_REPLACE>
+ REPLACE_WITH
+ <CODE_TO_REPLACE_WITH>
+
+ where <CODE_TO_REPLACE> and <CODE_TO_REPLACE_WITH> can be multiple lines, but should be the minimum needed to make the edit. Be sure to maintain existing whitespace at the start of lines.
+
+ For example, if you want to replace the code `x = 1` with `x = 2` in main.py, you would write:
+ FILEPATH
+ main.py
+ REPLACE_ME
+ x = 1
+ REPLACE_WITH
+ x = 2
+ If you wanted to delete the code
+ ```
+ def sum(a, b):
+ return a + b
+ ```
+ in main.py, you would write:
+ FILEPATH
+ main.py
+ REPLACE_ME
+ def sum(a, b):
+ return a + b
+ REPLACE_WITH
+
+ You may need to make multiple edits; respond with exactly as many as needed.
+
+ Below is the code before changes:
+
+ {code}
+
+ This is the user request: "{user_input}"
+ Here is the description of changes to make:
+"""
+ )
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Editing highlighted code"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ range_in_files = await sdk.get_code_context(only_editing=True)
+ if len(range_in_files) == 0:
+ # Get the full contents of all visible files
+ files = await sdk.ide.getVisibleFiles()
+ contents = {}
+ for file in files:
+ contents[file] = await sdk.ide.readFile(file)
+
+ range_in_files = [
+ RangeInFileWithContents.from_entire_file(filepath, content)
+ for filepath, content in contents.items()
+ ]
+
+ enc_dec = MarkdownStyleEncoderDecoder(range_in_files)
+ code_string = enc_dec.encode()
+ prompt = self._prompt.format(code=code_string, user_input=self.user_input)
+
+ rif_dict = {}
+ for rif in range_in_files:
+ rif_dict[rif.filepath] = rif.contents
+
+ completion = await sdk.models.summarize.complete(prompt)
+
+ # Temporarily doing this to generate description.
+ self._prompt = prompt
+ self._completion = completion
+ logger.debug(completion)
+
+ # ALTERNATIVE DECODING STEP HERE
+ raw_file_edits = []
+ lines = completion.split("\n")
+ current_edit = {}
+ status = "FILEPATH"
+ for i in range(0, len(lines)):
+ line = lines[i]
+ if line == "FILEPATH":
+ if "FILEPATH" in current_edit:
+ raw_file_edits.append(current_edit)
+ current_edit = {}
+ status = "FILEPATH"
+ elif line == "REPLACE_ME":
+ status = "REPLACE_ME"
+ elif line == "REPLACE_WITH":
+ status = "REPLACE_WITH"
+ elif status == "FILEPATH":
+ current_edit["filepath"] = line
+ elif status == "REPLACE_ME":
+ if "replace_me" in current_edit:
+ current_edit["replace_me"] += "\n" + line
+ else:
+ current_edit["replace_me"] = line
+ elif status == "REPLACE_WITH":
+ if "replace_with" in current_edit:
+ current_edit["replace_with"] += "\n" + line
+ else:
+ current_edit["replace_with"] = line
+ if "filepath" in current_edit:
+ raw_file_edits.append(current_edit)
+
+ file_edits = []
+ for edit in raw_file_edits:
+ filepath = edit["filepath"]
+ replace_me = edit["replace_me"]
+ replace_with = edit["replace_with"]
+ file_edits.append(
+ FileEdit(
+ filepath=filepath,
+ range=Range.from_lines_snippet_in_file(
+ content=rif_dict[filepath], snippet=replace_me
+ ),
+ replacement=replace_with,
+ )
+ )
+ # ------------------------------
+
+ self._edit_diffs = []
+ for file_edit in file_edits:
+ diff = await sdk.apply_filesystem_edit(file_edit)
+ self._edit_diffs.append(diff)
+
+ for filepath in set([file_edit.filepath for file_edit in file_edits]):
+ await sdk.ide.saveFile(filepath)
+ await sdk.ide.setFileOpen(filepath)
+
+ return None
+
+
+class StarCoderEditHighlightedCodeStep(Step):
+ user_input: str
+ name: str = "Editing Code"
+ hide = False
+ _prompt: str = "<commit_before>{code}<commit_msg>{user_request}<commit_after>"
+
+ _prompt_and_completion: str = ""
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return await models.summarize.complete(
+ f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:"
+ )
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ range_in_files = await sdk.get_code_context(only_editing=True)
+ found_highlighted_code = len(range_in_files) > 0
+ if not found_highlighted_code:
+ # Get the full contents of all visible files
+ files = await sdk.ide.getVisibleFiles()
+ contents = {}
+ for file in files:
+ contents[file] = await sdk.ide.readFile(file)
+
+ range_in_files = [
+ RangeInFileWithContents.from_entire_file(filepath, content)
+ for filepath, content in contents.items()
+ ]
+
+ rif_dict = {}
+ for rif in range_in_files:
+ rif_dict[rif.filepath] = rif.contents
+
+ for rif in range_in_files:
+ prompt = self._prompt.format(
+ code=rif.contents, user_request=self.user_input
+ )
+
+ if found_highlighted_code:
+ full_file_contents = await sdk.ide.readFile(rif.filepath)
+ segs = full_file_contents.split(rif.contents)
+ prompt = f"<file_prefix>{segs[0]}<file_suffix>{segs[1]}" + prompt
+
+ completion = str(await sdk.models.starcoder.complete(prompt))
+ eot_token = "<|endoftext|>"
+ completion = completion.removesuffix(eot_token)
+
+ if found_highlighted_code:
+ rif.contents = segs[0] + rif.contents + segs[1]
+ completion = segs[0] + completion + segs[1]
+
+ self._prompt_and_completion += prompt + completion
+
+ edits = calculate_diff2(
+ rif.filepath, rif.contents, completion.removesuffix("\n")
+ )
+ for edit in edits:
+ await sdk.ide.applyFileSystemEdit(edit)
+
+ # await sdk.ide.applyFileSystemEdit(
+ # FileEdit(filepath=rif.filepath, range=rif.range, replacement=completion))
+ await sdk.ide.saveFile(rif.filepath)
+ await sdk.ide.setFileOpen(rif.filepath)
+
+
+def decode_escaped_path(path: str) -> str:
+ """We use a custom escaping scheme to record the full path of a file as a
+ corresponding basename, but withut URL encoding, because then the URI just gets
+ interpreted as a full path again."""
+ return path.replace("$f$", "/").replace("$b$", "\\")
+
+
+def encode_escaped_path(path: str) -> str:
+ """We use a custom escaping scheme to record the full path of a file as a
+ corresponding basename, but withut URL encoding, because then the URI just gets
+ interpreted as a full path again."""
+ return path.replace("/", "$f$").replace("\\", "$b$")
+
+
+class EditAlreadyEditedRangeStep(Step):
+ hide = True
+ model: Optional[LLM] = None
+ range_in_file: RangeInFile
+
+ user_input: str
+
+ _prompt = dedent(
+ """\
+ You were previously asked to edit this code. The request was:
+
+ "{prev_user_input}"
+
+ And you generated this diff:
+
+ {diff}
+
+ Could you please re-edit this code to follow these secondary instructions?
+
+ "{user_input}"
+ """
+ )
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ if os.path.basename(self.range_in_file.filepath) in os.listdir(
+ os.path.expanduser(os.path.join("~", ".continue", "diffs"))
+ ):
+ decoded_basename = decode_escaped_path(
+ os.path.basename(self.range_in_file.filepath)
+ )
+ self.range_in_file.filepath = decoded_basename
+
+ self.range_in_file.range = sdk.context.get("last_edit_range")
+
+ if self.range_in_file.range.start == self.range_in_file.range.end:
+ self.range_in_file.range = Range.from_entire_file(
+ await sdk.ide.readFile(self.range_in_file.filepath)
+ )
+
+ await sdk.run_step(
+ DefaultModelEditCodeStep(
+ model=self.model,
+ user_input=self._prompt.format(
+ prev_user_input=sdk.context.get("last_edit_user_input"),
+ diff=sdk.context.get("last_edit_diff"),
+ user_input=self.user_input,
+ ),
+ range_in_files=[self.range_in_file],
+ )
+ )
+
+
+class EditHighlightedCodeStep(Step):
+ user_input: str = Field(
+ ...,
+ title="User Input",
+ description="The natural language request describing how to edit the code",
+ )
+ model: Optional[LLM] = None
+ hide = True
+ description: str = "Change the contents of the currently highlighted code or open file. You should call this function if the user asks seems to be asking for a code change."
+
+ summary_prompt: Optional[str] = None
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return "Editing code"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ range_in_files = sdk.get_code_context(only_editing=True)
+
+ # If nothing highlighted, insert at the cursor if possible
+ if len(range_in_files) == 0:
+ highlighted_code = await sdk.ide.getHighlightedCode()
+ if highlighted_code is not None:
+ for rif in highlighted_code:
+ if rif.range.start == rif.range.end:
+ range_in_files.append(
+ RangeInFileWithContents.from_range_in_file(rif, "")
+ )
+
+ # If still no highlighted code, raise error
+ if len(range_in_files) == 0:
+ raise ContinueCustomException(
+ message="Please highlight some code and try again.",
+ title="No Code Selected (highlight and select with cmd+shift+M)",
+ )
+
+ # If all of the ranges are point ranges, only edit the last one
+ if all([rif.range.start == rif.range.end for rif in range_in_files]):
+ range_in_files = [range_in_files[-1]]
+
+ range_in_files = list(
+ map(
+ lambda x: RangeInFile(filepath=x.filepath, range=x.range),
+ range_in_files,
+ )
+ )
+
+ for range_in_file in range_in_files:
+ # Check whether re-editing
+ if (
+ os.path.dirname(range_in_file.filepath)
+ == os.path.expanduser(os.path.join("~", ".continue", "diffs"))
+ or encode_escaped_path(range_in_file.filepath)
+ in os.listdir(
+ os.path.expanduser(os.path.join("~", ".continue", "diffs"))
+ )
+ ) and sdk.context.get("last_edit_user_input") is not None:
+ await sdk.run_step(
+ EditAlreadyEditedRangeStep(
+ range_in_file=range_in_file,
+ user_input=self.user_input,
+ model=self.model,
+ )
+ )
+ return
+
+ args = {
+ "user_input": self.user_input,
+ "range_in_files": range_in_files,
+ "model": self.model,
+ }
+ if self.summary_prompt:
+ args["summary_prompt"] = self.summary_prompt
+
+ await sdk.run_step(DefaultModelEditCodeStep(**args))
+
+
+class UserInputStep(Step):
+ user_input: str
+
+
+class SolveTracebackStep(Step):
+ traceback: Traceback
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return f"```\n{self.traceback.full_traceback}\n```"
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ prompt = dedent(
+ """I ran into this problem with my Python code:
+
+ {traceback}
+
+ Below are the files that might need to be fixed:
+
+ {code}
+
+ This is what the code should be in order to avoid the problem:
+ """
+ ).format(traceback=self.traceback.full_traceback, code="{code}")
+
+ range_in_files = []
+ for frame in self.traceback.frames:
+ content = await sdk.ide.readFile(frame.filepath)
+ range_in_files.append(RangeInFile.from_entire_file(frame.filepath, content))
+
+ await sdk.run_step(
+ DefaultModelEditCodeStep(range_in_files=range_in_files, user_input=prompt)
+ )
+ return None
+
+
+class EmptyStep(Step):
+ hide: bool = True
+
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ return ""
+
+ async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
+ pass
diff --git a/server/continuedev/plugins/steps/on_traceback.py b/server/continuedev/plugins/steps/on_traceback.py
new file mode 100644
index 00000000..b72ce809
--- /dev/null
+++ b/server/continuedev/plugins/steps/on_traceback.py
@@ -0,0 +1,206 @@
+import os
+from textwrap import dedent
+from typing import Dict, List, Optional, Tuple
+
+from ...core.main import ChatMessage, ContinueCustomException, Step
+from ...core.sdk import ContinueSDK
+from ...core.steps import UserInputStep
+from ...libs.util.filter_files import should_filter_path
+from ...libs.util.traceback.traceback_parsers import (
+ get_javascript_traceback,
+ get_python_traceback,
+ parse_python_traceback,
+)
+from ...models.filesystem import RangeInFile
+from ...models.main import Range, Traceback, TracebackFrame
+from .chat import SimpleChatStep
+
+
+def extract_traceback_str(output: str) -> str:
+ tb = output.strip()
+ for tb_parser in [get_python_traceback, get_javascript_traceback]:
+ if parsed_tb := tb_parser(tb):
+ return parsed_tb
+
+
+class DefaultOnTracebackStep(Step):
+ output: str
+ name: str = "Help With Traceback"
+ hide: bool = True
+
+ async def find_relevant_files(self, sdk: ContinueSDK):
+ # Add context for any files in the traceback that are in the workspace
+ for line in self.output.split("\n"):
+ segs = line.split(" ")
+ for seg in segs:
+ if (
+ seg.startswith(os.path.sep)
+ and os.path.exists(seg) # TODO: Use sdk.ide.fileExists
+ and os.path.commonprefix([seg, sdk.ide.workspace_directory])
+ == sdk.ide.workspace_directory
+ ):
+ file_contents = await sdk.ide.readFile(seg)
+ self.chat_context.append(
+ ChatMessage(
+ role="user",
+ content=f"The contents of {seg}:\n```\n{file_contents}\n```",
+ summary="",
+ )
+ )
+ # TODO: The ideal is that these are added as context items, so then the user can see them
+ # And this function is where you can get arbitrarily fancy about adding context
+
+ async def run(self, sdk: ContinueSDK):
+ if self.output.strip() == "":
+ raise ContinueCustomException(
+ title="No terminal open",
+ message="You must have a terminal open in order to automatically debug with Continue.",
+ )
+
+ if get_python_traceback(self.output) is not None and sdk.lsp is not None:
+ await sdk.run_step(SolvePythonTracebackStep(output=self.output))
+ return
+
+ tb = extract_traceback_str(self.output) or self.output[-8000:]
+
+ await sdk.run_step(
+ UserInputStep(
+ user_input=f"""I got the following error, can you please help explain how to fix it?\n\n{tb}""",
+ )
+ )
+ await sdk.run_step(SimpleChatStep(name="Help With Traceback"))
+
+
+def filter_frames(frames: List[TracebackFrame]) -> List[TracebackFrame]:
+ """Filter out frames that are not relevant to the user's code."""
+ return list(filter(lambda x: should_filter_path(x.filepath), frames))
+
+
+def find_external_call(
+ frames: List[TracebackFrame],
+) -> Optional[Tuple[TracebackFrame, TracebackFrame]]:
+ """Moving up from the bottom of the stack, if the frames are not user code, then find the last frame before it becomes user code."""
+ if not should_filter_path(frames[-1].filepath):
+ # No external call, error comes directly from user code
+ return None
+
+ for i in range(len(frames) - 2, -1, -1):
+ if not should_filter_path(frames[i].filepath):
+ return frames[i], frames[i + 1]
+
+
+def get_func_source_for_frame(frame: Dict) -> str:
+ """Get the source for the function called in the frame."""
+ pass
+
+
+async def fetch_docs_for_external_call(external_call: Dict, next_frame: Dict) -> str:
+ """Fetch docs for the external call."""
+ pass
+
+
+class SolvePythonTracebackStep(Step):
+ output: str
+ name: str = "Solve Traceback"
+ hide: bool = True
+
+ async def external_call_prompt(
+ self, sdk: ContinueSDK, external_call: Tuple[Dict, Dict], tb_string: str
+ ) -> str:
+ external_call, next_frame = external_call
+ source_line = external_call["source_line"]
+ external_func_source = get_func_source_for_frame(next_frame)
+ docs = await fetch_docs_for_external_call(external_call, next_frame)
+
+ prompt = dedent(
+ f"""\
+ I got the following error:
+
+ {tb_string}
+
+ I tried to call an external library like this:
+
+ ```python
+ {source_line}
+ ```
+
+ This is the definition of the function I tried to call:
+
+ ```python
+ {external_func_source}
+ ```
+
+ Here's the documentation for the external library I tried to call:
+
+ {docs}
+
+ Explain how to fix the error.
+ """
+ )
+
+ return prompt
+
+ async def normal_traceback_prompt(
+ self, sdk: ContinueSDK, tb: Traceback, tb_string: str
+ ) -> str:
+ function_bodies = await get_functions_from_traceback(tb, sdk)
+
+ prompt = (
+ "Here are the functions from the traceback (most recent call last):\n\n"
+ )
+ for i, function_body in enumerate(function_bodies):
+ prompt += f'File "{tb.frames[i].filepath}", line {tb.frames[i].lineno}, in {tb.frames[i].function}\n\n```python\n{function_body or tb.frames[i].code}\n```\n\n'
+
+ prompt += (
+ "Here is the traceback:\n\n```\n"
+ + tb_string
+ + "\n```\n\nExplain how to fix the error."
+ )
+
+ return prompt
+
+ async def run(self, sdk: ContinueSDK):
+ tb_string = get_python_traceback(self.output)
+ tb = parse_python_traceback(tb_string)
+
+ if external_call := find_external_call(tb.frames):
+ prompt = await self.external_call_prompt(sdk, external_call, tb_string)
+ else:
+ prompt = await self.normal_traceback_prompt(sdk, tb, tb_string)
+
+ await sdk.run_step(
+ UserInputStep(
+ user_input=prompt,
+ )
+ )
+ await sdk.run_step(SimpleChatStep(name="Help With Traceback"))
+
+
+async def get_function_body(frame: TracebackFrame, sdk: ContinueSDK) -> Optional[str]:
+ """Get the function body from the traceback frame."""
+ if sdk.lsp is None:
+ return None
+
+ document_symbols = await sdk.lsp.document_symbol(frame.filepath)
+ for symbol in document_symbols:
+ if symbol.name == frame.function:
+ r = symbol.location.range
+ return await sdk.ide.readRangeInFile(
+ RangeInFile(
+ filepath=frame.filepath,
+ range=Range.from_shorthand(
+ r.start.line, r.start.character, r.end.line, r.end.character
+ ),
+ )
+ )
+ return None
+
+
+async def get_functions_from_traceback(tb: Traceback, sdk: ContinueSDK) -> List[str]:
+ """Get the function bodies from the traceback."""
+ function_bodies = []
+ for frame in tb.frames:
+ if frame.function:
+ function_bodies.append(await get_function_body(frame, sdk))
+
+ return function_bodies
diff --git a/server/continuedev/plugins/steps/open_config.py b/server/continuedev/plugins/steps/open_config.py
new file mode 100644
index 00000000..c57939f8
--- /dev/null
+++ b/server/continuedev/plugins/steps/open_config.py
@@ -0,0 +1,17 @@
+from textwrap import dedent
+
+from ...core.main import Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.paths import getConfigFilePath
+
+
+class OpenConfigStep(Step):
+ name: str = "Open config"
+
+ async def describe(self, models):
+ return dedent(
+ 'Read [the docs](https://continue.dev/docs/customization/overview) to learn more about how you can customize Continue using `"config.py"`.'
+ )
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.ide.setFileOpen(getConfigFilePath())
diff --git a/server/continuedev/plugins/steps/react.py b/server/continuedev/plugins/steps/react.py
new file mode 100644
index 00000000..1b9bc265
--- /dev/null
+++ b/server/continuedev/plugins/steps/react.py
@@ -0,0 +1,44 @@
+from textwrap import dedent
+from typing import List, Tuple, Union
+
+from ...core.main import Step
+from ...core.sdk import ContinueSDK
+
+
+class NLDecisionStep(Step):
+ user_input: str
+ default_step: Union[Step, None] = None
+ steps: List[Tuple[Step, str]]
+
+ hide: bool = False
+ name: str = "Deciding what to do next"
+
+ async def run(self, sdk: ContinueSDK):
+ step_descriptions = "\n".join(
+ [f"- {step[0].name}: {step[1]}" for step in self.steps]
+ )
+ prompt = dedent(
+ f"""\
+ The following steps are available, in the format "- [step name]: [step description]":
+ {step_descriptions}
+
+ The user gave the following input:
+
+ {self.user_input}
+
+ Select the step which should be taken next to satisfy the user input. Say only the name of the selected step. You must choose one:"""
+ )
+
+ resp = (await sdk.models.summarize.complete(prompt)).lower()
+
+ step_to_run = None
+ for step in self.steps:
+ if step[0].name.lower() in resp:
+ step_to_run = step[0]
+
+ step_to_run = step_to_run or self.default_step or self.steps[0]
+
+ self.hide = True
+ await sdk.update_ui()
+
+ await sdk.run_step(step_to_run)
diff --git a/server/continuedev/plugins/steps/refactor.py b/server/continuedev/plugins/steps/refactor.py
new file mode 100644
index 00000000..56e9e09e
--- /dev/null
+++ b/server/continuedev/plugins/steps/refactor.py
@@ -0,0 +1,136 @@
+import asyncio
+from typing import List, Optional
+
+from ripgrepy import Ripgrepy
+
+from ...core.main import Step
+from ...core.models import Models
+from ...core.sdk import ContinueSDK
+from ...libs.llm.prompts.edit import simplified_edit_prompt
+from ...libs.util.ripgrep import get_rg_path
+from ...libs.util.strings import remove_quotes_and_escapes, strip_code_block
+from ...libs.util.templating import render_prompt_template
+from ...models.filesystem import RangeInFile
+from ...models.filesystem_edit import FileEdit
+from ...models.main import PositionInFile, Range
+
+
+class RefactorReferencesStep(Step):
+ name: str = "Refactor references of a symbol"
+ user_input: str
+ symbol_location: PositionInFile
+
+ async def describe(self, models: Models):
+ return f"Renamed all instances of `{self.function_name}` to `{self.new_function_name}` in `{self.filepath}`"
+
+ async def run(self, sdk: ContinueSDK):
+ while sdk.lsp is None or not sdk.lsp.ready:
+ await asyncio.sleep(0.1)
+
+ references = await sdk.lsp.find_references(
+ self.symbol_location.position, self.symbol_location.filepath, False
+ )
+ await sdk.run_step(
+ ParallelEditStep(user_input=self.user_input, range_in_files=references)
+ )
+
+
+class RefactorBySearchStep(Step):
+ name: str = "Refactor by search"
+
+ pattern: str
+ user_input: str
+
+ rg_path: Optional[str] = None
+ "Optional path to ripgrep executable"
+
+ def get_range_for_result(self, result) -> RangeInFile:
+ pass
+
+ async def run(self, sdk: ContinueSDK):
+ rg = Ripgrepy(
+ self.pattern,
+ sdk.ide.workspace_directory,
+ rg_path=self.rg_path or get_rg_path(),
+ )
+
+ results = rg.I().context(2).run()
+ range_in_files = [self.get_range_for_result(result) for result in results]
+
+ await sdk.run_step(
+ ParallelEditStep(user_input=self.user_input, range_in_files=range_in_files)
+ )
+
+
+class ParallelEditStep(Step):
+ name: str = "Edit multiple ranges in parallel"
+ user_input: str
+ range_in_files: List[RangeInFile]
+
+ hide: bool = True
+
+ async def single_edit(self, sdk: ContinueSDK, range_in_file: RangeInFile):
+ # TODO: Can use folding info to get a more intuitively shaped range
+ expanded_range = await sdk.lsp.get_enclosing_folding_range(range_in_file)
+ if (
+ expanded_range is None
+ or expanded_range.range.start.line != range_in_file.range.start.line
+ ):
+ expanded_range = Range.from_shorthand(
+ range_in_file.range.start.line, 0, range_in_file.range.end.line + 1, 0
+ )
+ else:
+ expanded_range = expanded_range.range
+
+ new_rif = RangeInFile(
+ filepath=range_in_file.filepath,
+ range=expanded_range,
+ )
+ code_to_edit = await sdk.ide.readRangeInFile(range_in_file=new_rif)
+
+ # code_to_edit, common_whitespace = dedent_and_get_common_whitespace(code_to_edit)
+
+ prompt = render_prompt_template(
+ simplified_edit_prompt,
+ history=[],
+ other_data={
+ "code_to_edit": code_to_edit,
+ "user_input": self.user_input,
+ },
+ )
+ print(prompt + "\n\n-------------------\n\n")
+
+ new_code = await sdk.models.edit.complete(prompt=prompt)
+ new_code = strip_code_block(remove_quotes_and_escapes(new_code)) + "\n"
+ # new_code = (
+ # "\n".join([common_whitespace + line for line in new_code.split("\n")])
+ # + "\n"
+ # )
+
+ print(new_code + "\n\n-------------------\n\n")
+
+ await sdk.ide.applyFileSystemEdit(
+ FileEdit(
+ filepath=range_in_file.filepath,
+ range=expanded_range,
+ replacement=new_code,
+ )
+ )
+
+ async def edit_file(self, sdk: ContinueSDK, filepath: str):
+ ranges_in_file = [
+ range_in_file
+ for range_in_file in self.range_in_files
+ if range_in_file.filepath == filepath
+ ]
+ # Sort in reverse order so that we don't mess up the ranges
+ ranges_in_file.sort(key=lambda x: x.range.start.line, reverse=True)
+ for i in range(len(ranges_in_file)):
+ await self.single_edit(sdk=sdk, range_in_file=ranges_in_file[i])
+
+ async def run(self, sdk: ContinueSDK):
+ tasks = []
+ for filepath in set([rif.filepath for rif in self.range_in_files]):
+ tasks.append(self.edit_file(sdk=sdk, filepath=filepath))
+
+ await asyncio.gather(*tasks)
diff --git a/server/continuedev/plugins/steps/search_directory.py b/server/continuedev/plugins/steps/search_directory.py
new file mode 100644
index 00000000..83516719
--- /dev/null
+++ b/server/continuedev/plugins/steps/search_directory.py
@@ -0,0 +1,84 @@
+import asyncio
+import os
+import re
+from textwrap import dedent
+from typing import List, Union
+
+from ...core.main import Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.create_async_task import create_async_task
+from ...models.filesystem import RangeInFile
+from ...models.main import Range
+
+# Already have some code for this somewhere
+IGNORE_DIRS = ["env", "venv", ".venv"]
+IGNORE_FILES = [".env"]
+
+
+def find_all_matches_in_dir(pattern: str, dirpath: str) -> List[RangeInFile]:
+ range_in_files = []
+ for root, dirs, files in os.walk(dirpath):
+ dirname = os.path.basename(root)
+ if dirname.startswith(".") or dirname in IGNORE_DIRS:
+ continue # continue!
+ for file in files:
+ if file in IGNORE_FILES:
+ continue # pun intended
+ with open(os.path.join(root, file), "r") as f:
+ # Find the index of all occurrences of the pattern in the file. Use re.
+ file_content = f.read()
+ results = re.finditer(pattern, file_content)
+ range_in_files += [
+ RangeInFile(
+ filepath=os.path.join(root, file),
+ range=Range.from_indices(
+ file_content, result.start(), result.end()
+ ),
+ )
+ for result in results
+ ]
+
+ return range_in_files
+
+
+class WriteRegexPatternStep(Step):
+ user_request: str
+
+ async def run(self, sdk: ContinueSDK):
+ # Ask the user for a regex pattern
+ pattern = await sdk.models.summarize.complete(
+ dedent(
+ f"""\
+ This is the user request:
+
+ {self.user_request}
+
+ Please write either a regex pattern or just a string that be used with python's re module to find all matches requested by the user. It will be used as `re.findall(<PATTERN_YOU_WILL_WRITE>, file_content)`. Your output should be only the regex or string, nothing else:"""
+ )
+ )
+
+ return pattern
+
+
+class EditAllMatchesStep(Step):
+ pattern: str
+ user_request: str
+ directory: Union[str, None] = None
+
+ async def run(self, sdk: ContinueSDK):
+ # Search all files for a given string
+ range_in_files = find_all_matches_in_dir(
+ self.pattern, self.directory or await sdk.ide.getWorkspaceDirectory()
+ )
+
+ tasks = [
+ create_async_task(
+ sdk.edit_file(
+ range=range_in_file.range,
+ filename=range_in_file.filepath,
+ prompt=self.user_request,
+ )
+ )
+ for range_in_file in range_in_files
+ ]
+ await asyncio.gather(*tasks)
diff --git a/server/continuedev/plugins/steps/setup_model.py b/server/continuedev/plugins/steps/setup_model.py
new file mode 100644
index 00000000..87e52f1b
--- /dev/null
+++ b/server/continuedev/plugins/steps/setup_model.py
@@ -0,0 +1,38 @@
+from ...core.main import Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.paths import getConfigFilePath
+from ...models.filesystem import RangeInFile
+from ...models.main import Range
+
+MODEL_CLASS_TO_MESSAGE = {
+ "OpenAI": "Obtain your OpenAI API key from [here](https://platform.openai.com/account/api-keys) and paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
+ "OpenAIFreeTrial": "To get started with OpenAI models, obtain your OpenAI API key from [here](https://platform.openai.com/account/api-keys) and paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
+ "AnthropicLLM": "To get started with Anthropic, you first need to sign up for the beta [here](https://claude.ai/login) to obtain an API key. Once you have the key, paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then reload the VS Code window for changes to take effect.",
+ "ReplicateLLM": "To get started with Replicate, sign up to obtain an API key [here](https://replicate.ai/), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
+ "Ollama": "To get started with Ollama, download the app from [ollama.ai](https://ollama.ai/). Once it is downloaded, be sure to pull at least one model and use its name in the model field in config.py (e.g. `model='codellama'`).",
+ "GGML": "GGML models can be run locally using the `llama-cpp-python` library. To learn how to set up a local llama-cpp-python server, read [here](https://github.com/continuedev/ggml-server-example). Once it is started on port 8000, you're all set!",
+ "TogetherLLM": "To get started using models from Together, first obtain your Together API key from [here](https://together.ai). Paste it into the `api_key` field at config.models.default.api_key in `config.py`. Then, on their models page, press 'start' on the model of your choice and make sure the `model=` parameter in the config file for the `TogetherLLM` class reflects the name of this model. Finally, reload the VS Code window for changes to take effect.",
+ "LlamaCpp": "To get started with this model, clone the [`llama.cpp` repo](https://github.com/ggerganov/llama.cpp) and follow the instructions to set up the server [here](https://github.com/ggerganov/llama.cpp/blob/master/examples/server/README.md#build). Any of the parameters described in the README can be passed to the `llama_cpp_args` field in the `LlamaCpp` class in `config.py`.",
+ "HuggingFaceInferenceAPI": "To get started with the HuggingFace Inference API, first deploy a model and obtain your API key from [here](https://huggingface.co/inference-api). Paste it into the `hf_token` field at config.models.default.hf_token in `config.py`. Finally, reload the VS Code window for changes to take effect.",
+ "GooglePaLMAPI": "To get started with the Google PaLM API, create an API key in Makersuite [here](https://makersuite.google.com/u/2/app/apikey), then paste it into the `api_key` field at config.models.default.api_key in `config.py`.",
+}
+
+
+class SetupModelStep(Step):
+ model_class: str
+ name: str = "Setup model in config.py"
+
+ async def run(self, sdk: ContinueSDK):
+ await sdk.ide.setFileOpen(getConfigFilePath())
+ self.description = MODEL_CLASS_TO_MESSAGE.get(
+ self.model_class, "Please finish setting up this model in `config.py`"
+ )
+
+ config_contents = await sdk.ide.readFile(getConfigFilePath())
+ start = config_contents.find("default=") + len("default=")
+ end = config_contents.find("saved=") - 1
+ range = Range.from_indices(config_contents, start, end)
+ range.end.line -= 1
+ await sdk.ide.highlightCode(
+ RangeInFile(filepath=getConfigFilePath(), range=range)
+ )
diff --git a/server/continuedev/plugins/steps/share_session.py b/server/continuedev/plugins/steps/share_session.py
new file mode 100644
index 00000000..1d68dc90
--- /dev/null
+++ b/server/continuedev/plugins/steps/share_session.py
@@ -0,0 +1,52 @@
+import json
+import os
+import time
+from typing import Optional
+
+from ...core.main import FullState, Step
+from ...core.sdk import ContinueSDK
+from ...libs.util.paths import getGlobalFolderPath, getSessionFilePath
+from ...server.session_manager import session_manager
+
+
+class ShareSessionStep(Step):
+ session_id: Optional[str] = None
+
+ async def run(self, sdk: ContinueSDK):
+ if self.session_id is None:
+ self.session_id = sdk.ide.session_id
+
+ await session_manager.persist_session(self.session_id)
+ time.sleep(0.5)
+
+ # Load the session data and format as a markdown file
+ session_filepath = getSessionFilePath(self.session_id)
+ with open(session_filepath, "r") as f:
+ session_state = FullState(**json.load(f))
+
+ import datetime
+
+ date_created = datetime.datetime.fromtimestamp(
+ float(session_state.session_info.date_created)
+ ).strftime("%Y-%m-%d %H:%M:%S")
+ content = f"This is a session transcript from [Continue](https://continue.dev) on {date_created}.\n\n"
+
+ for node in session_state.history.timeline[:-2]:
+ if node.step.hide:
+ continue # ay
+
+ content += f"## {node.step.name}\n"
+ content += f"{node.step.description}\n\n"
+
+ # Save to a markdown file
+ save_filepath = os.path.join(
+ getGlobalFolderPath(), f"{session_state.session_info.title}.md"
+ )
+
+ with open(save_filepath, "w") as f:
+ f.write(content)
+
+ # Open the file
+ await sdk.ide.setFileOpen(save_filepath)
+
+ self.description = f"The session transcript has been saved to a markdown file at {save_filepath}."
diff --git a/server/continuedev/plugins/steps/steps_on_startup.py b/server/continuedev/plugins/steps/steps_on_startup.py
new file mode 100644
index 00000000..58d56703
--- /dev/null
+++ b/server/continuedev/plugins/steps/steps_on_startup.py
@@ -0,0 +1,19 @@
+from ...core.main import Step
+from ...core.sdk import ContinueSDK, Models
+
+
+class StepsOnStartupStep(Step):
+ hide: bool = True
+
+ async def describe(self, models: Models):
+ return "Running steps on startup"
+
+ async def run(self, sdk: ContinueSDK):
+ steps_on_startup = sdk.config.steps_on_startup
+
+ for step_type in steps_on_startup:
+ if isinstance(step_type, Step):
+ step = step_type
+ else:
+ step = step_type()
+ await sdk.run_step(step)
diff --git a/server/continuedev/plugins/steps/welcome.py b/server/continuedev/plugins/steps/welcome.py
new file mode 100644
index 00000000..ef1acfc1
--- /dev/null
+++ b/server/continuedev/plugins/steps/welcome.py
@@ -0,0 +1,40 @@
+import os
+from textwrap import dedent
+
+from ...core.main import Step
+from ...core.sdk import ContinueSDK, Models
+from ...models.filesystem_edit import AddFile
+
+
+class WelcomeStep(Step):
+ name: str = "Welcome to Continue!"
+ hide: bool = True
+
+ async def describe(self, models: Models):
+ return "Welcome to Continue!"
+
+ async def run(self, sdk: ContinueSDK):
+ continue_dir = os.path.expanduser("~/.continue")
+ filepath = os.path.join(continue_dir, "calculator.py")
+ if os.path.exists(filepath):
+ return
+ if not os.path.exists(continue_dir):
+ os.mkdir(continue_dir)
+
+ await sdk.ide.applyFileSystemEdit(
+ AddFile(
+ filepath=filepath,
+ content=dedent(
+ """\
+ \"\"\"
+ Welcome to Continue! To learn how to use it, delete this comment and try to use Continue for the following:
+ - "Write me a calculator class"
+ - Ask for a new method (e.g. "exp", "mod", "sqrt")
+ - Type /comment to write comments for the entire class
+ - Ask about how the class works, how to write it in another language, etc.
+ \"\"\""""
+ ),
+ )
+ )
+
+ # await sdk.ide.setFileOpen(filepath=filepath)
diff --git a/server/continuedev/server/gui.py b/server/continuedev/server/gui.py
new file mode 100644
index 00000000..82767f5e
--- /dev/null
+++ b/server/continuedev/server/gui.py
@@ -0,0 +1,459 @@
+import asyncio
+import json
+import traceback
+from typing import Any, List, Optional, Type, TypeVar
+
+from fastapi import APIRouter, Depends, WebSocket
+from pydantic import BaseModel
+from starlette.websockets import WebSocketDisconnect, WebSocketState
+from uvicorn.main import Server
+
+from ..core.main import ContextItem
+from ..core.models import ALL_MODEL_ROLES, MODEL_CLASSES, MODEL_MODULE_NAMES
+from ..core.steps import DisplayErrorStep
+from ..libs.llm.prompts.chat import (
+ llama2_template_messages,
+ sqlcoder_template_messages,
+ template_alpaca_messages,
+)
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.edit_config import (
+ add_config_import,
+ create_float_node,
+ create_obj_node,
+ create_string_node,
+ display_llm_class,
+)
+from ..libs.util.logging import logger
+from ..libs.util.queue import AsyncSubscriptionQueue
+from ..libs.util.telemetry import posthog_logger
+from .session_manager import Session, session_manager
+
+router = APIRouter(prefix="/gui", tags=["gui"])
+
+# Graceful shutdown by closing websockets
+original_handler = Server.handle_exit
+
+
+class AppStatus:
+ should_exit = False
+
+ @staticmethod
+ def handle_exit(*args, **kwargs):
+ AppStatus.should_exit = True
+ logger.debug("Shutting down")
+ original_handler(*args, **kwargs)
+
+
+Server.handle_exit = AppStatus.handle_exit
+
+
+async def websocket_session(session_id: str) -> Session:
+ return await session_manager.get_session(session_id)
+
+
+T = TypeVar("T", bound=BaseModel)
+
+# You should probably abstract away the websocket stuff into a separate class
+
+
+class GUIProtocolServer:
+ websocket: WebSocket
+ session: Session
+ sub_queue: AsyncSubscriptionQueue = AsyncSubscriptionQueue()
+
+ def __init__(self, session: Session):
+ self.session = session
+
+ async def _send_json(self, message_type: str, data: Any):
+ if self.websocket.application_state == WebSocketState.DISCONNECTED:
+ return
+ await self.websocket.send_json({"messageType": message_type, "data": data})
+
+ async def _receive_json(self, message_type: str, timeout: int = 20) -> Any:
+ try:
+ return await asyncio.wait_for(
+ self.sub_queue.get(message_type), timeout=timeout
+ )
+ except asyncio.TimeoutError:
+ raise Exception("GUI Protocol _receive_json timed out after 20 seconds")
+
+ async def _send_and_receive_json(
+ self, data: Any, resp_model: Type[T], message_type: str
+ ) -> T:
+ await self._send_json(message_type, data)
+ resp = await self._receive_json(message_type)
+ return resp_model.parse_obj(resp)
+
+ def on_error(self, e: Exception):
+ return self.session.autopilot.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ )
+
+ def handle_json(self, message_type: str, data: Any):
+ if message_type == "main_input":
+ self.on_main_input(data["input"])
+ elif message_type == "step_user_input":
+ self.on_step_user_input(data["input"], data["index"])
+ elif message_type == "refinement_input":
+ self.on_refinement_input(data["input"], data["index"])
+ elif message_type == "reverse_to_index":
+ self.on_reverse_to_index(data["index"])
+ elif message_type == "retry_at_index":
+ self.on_retry_at_index(data["index"])
+ elif message_type == "clear_history":
+ self.on_clear_history()
+ elif message_type == "set_current_session_title":
+ self.set_current_session_title(data["title"])
+ elif message_type == "delete_at_index":
+ self.on_delete_at_index(data["index"])
+ elif message_type == "delete_context_with_ids":
+ self.on_delete_context_with_ids(data["ids"], data.get("index", None))
+ elif message_type == "toggle_adding_highlighted_code":
+ self.on_toggle_adding_highlighted_code()
+ elif message_type == "set_editing_at_ids":
+ self.on_set_editing_at_ids(data["ids"])
+ elif message_type == "show_logs_at_index":
+ self.on_show_logs_at_index(data["index"])
+ elif message_type == "show_context_virtual_file":
+ self.show_context_virtual_file(data.get("index", None))
+ elif message_type == "select_context_item":
+ self.select_context_item(data["id"], data["query"])
+ elif message_type == "select_context_item_at_index":
+ self.select_context_item_at_index(data["id"], data["query"], data["index"])
+ elif message_type == "load_session":
+ self.load_session(data.get("session_id", None))
+ elif message_type == "edit_step_at_index":
+ self.edit_step_at_index(data.get("user_input", ""), data["index"])
+ elif message_type == "set_system_message":
+ self.set_system_message(data["message"])
+ elif message_type == "set_temperature":
+ self.set_temperature(float(data["temperature"]))
+ elif message_type == "add_model_for_role":
+ self.add_model_for_role(data["role"], data["model_class"], data["model"])
+ elif message_type == "set_model_for_role_from_index":
+ self.set_model_for_role_from_index(data["role"], data["index"])
+ elif message_type == "save_context_group":
+ self.save_context_group(
+ data["title"], [ContextItem(**item) for item in data["context_items"]]
+ )
+ elif message_type == "select_context_group":
+ self.select_context_group(data["id"])
+ elif message_type == "delete_context_group":
+ self.delete_context_group(data["id"])
+
+ def on_main_input(self, input: str):
+ # Do something with user input
+ create_async_task(
+ self.session.autopilot.accept_user_input(input), self.on_error
+ )
+
+ def on_reverse_to_index(self, index: int):
+ # Reverse the history to the given index
+ create_async_task(self.session.autopilot.reverse_to_index(index), self.on_error)
+
+ def on_step_user_input(self, input: str, index: int):
+ create_async_task(
+ self.session.autopilot.give_user_input(input, index), self.on_error
+ )
+
+ def on_refinement_input(self, input: str, index: int):
+ create_async_task(
+ self.session.autopilot.accept_refinement_input(input, index), self.on_error
+ )
+
+ def on_retry_at_index(self, index: int):
+ create_async_task(self.session.autopilot.retry_at_index(index), self.on_error)
+
+ def on_clear_history(self):
+ create_async_task(self.session.autopilot.clear_history(), self.on_error)
+
+ def on_delete_at_index(self, index: int):
+ create_async_task(self.session.autopilot.delete_at_index(index), self.on_error)
+
+ def edit_step_at_index(self, user_input: str, index: int):
+ create_async_task(
+ self.session.autopilot.edit_step_at_index(user_input, index),
+ self.on_error,
+ )
+
+ def on_delete_context_with_ids(self, ids: List[str], index: Optional[int] = None):
+ create_async_task(
+ self.session.autopilot.delete_context_with_ids(ids, index), self.on_error
+ )
+
+ def on_toggle_adding_highlighted_code(self):
+ create_async_task(
+ self.session.autopilot.toggle_adding_highlighted_code(), self.on_error
+ )
+ posthog_logger.capture_event("toggle_adding_highlighted_code", {})
+
+ def on_set_editing_at_ids(self, ids: List[str]):
+ create_async_task(self.session.autopilot.set_editing_at_ids(ids), self.on_error)
+
+ def on_show_logs_at_index(self, index: int):
+ name = "Continue Prompt"
+ logs = "\n\n############################################\n\n".join(
+ ["This is the prompt that was sent to the LLM during this step"]
+ + self.session.autopilot.continue_sdk.history.timeline[index].logs
+ )
+ create_async_task(
+ self.session.autopilot.ide.showVirtualFile(name, logs), self.on_error
+ )
+ posthog_logger.capture_event("show_logs_at_index", {})
+
+ def show_context_virtual_file(self, index: Optional[int] = None):
+ async def async_stuff():
+ if index is None:
+ context_items = (
+ await self.session.autopilot.context_manager.get_selected_items()
+ )
+ elif index < len(self.session.autopilot.continue_sdk.history.timeline):
+ context_items = self.session.autopilot.continue_sdk.history.timeline[
+ index
+ ].context_used
+
+ ctx = "\n\n-----------------------------------\n\n".join(
+ ["These are the context items that will be passed to the LLM"]
+ + list(map(lambda x: x.content, context_items))
+ )
+ await self.session.autopilot.ide.showVirtualFile(
+ "Continue - Selected Context", ctx
+ )
+
+ create_async_task(
+ async_stuff(),
+ self.on_error,
+ )
+
+ def select_context_item(self, id: str, query: str):
+ """Called when user selects an item from the dropdown"""
+ create_async_task(
+ self.session.autopilot.select_context_item(id, query), self.on_error
+ )
+
+ def select_context_item_at_index(self, id: str, query: str, index: int):
+ """Called when user selects an item from the dropdown for prev UserInputStep"""
+ create_async_task(
+ self.session.autopilot.select_context_item_at_index(id, query, index),
+ self.on_error,
+ )
+
+ def load_session(self, session_id: Optional[str] = None):
+ async def load_and_tell_to_reconnect():
+ new_session_id = await session_manager.load_session(
+ self.session.session_id, session_id
+ )
+ await self._send_json(
+ "reconnect_at_session", {"session_id": new_session_id}
+ )
+
+ create_async_task(load_and_tell_to_reconnect(), self.on_error)
+
+ posthog_logger.capture_event("load_session", {"session_id": session_id})
+
+ def set_current_session_title(self, title: str):
+ self.session.autopilot.set_current_session_title(title)
+
+ def set_system_message(self, message: str):
+ self.session.autopilot.continue_sdk.config.system_message = message
+ self.session.autopilot.continue_sdk.models.set_system_message(message)
+
+ create_async_task(
+ self.session.autopilot.set_config_attr(
+ ["system_message"], create_string_node(message)
+ ),
+ self.on_error,
+ )
+ posthog_logger.capture_event("set_system_message", {"system_message": message})
+
+ def set_temperature(self, temperature: float):
+ self.session.autopilot.continue_sdk.config.temperature = temperature
+ create_async_task(
+ self.session.autopilot.set_config_attr(
+ ["temperature"], create_float_node(temperature)
+ ),
+ self.on_error,
+ )
+ posthog_logger.capture_event("set_temperature", {"temperature": temperature})
+
+ def set_model_for_role_from_index(self, role: str, index: int):
+ async def async_stuff():
+ models = self.session.autopilot.continue_sdk.config.models
+
+ # Set models in SDK
+ temp = models.default
+ models.default = models.saved[index]
+ models.saved[index] = temp
+ await self.session.autopilot.continue_sdk.start_model(models.default)
+
+ # Set models in config.py
+ JOINER = ",\n\t\t"
+ models_args = {
+ "saved": f"[{JOINER.join([display_llm_class(llm) for llm in models.saved])}]",
+ ("default" if role == "*" else role): display_llm_class(models.default),
+ }
+
+ await self.session.autopilot.set_config_attr(
+ ["models"],
+ create_obj_node("Models", models_args),
+ )
+
+ for other_role in ALL_MODEL_ROLES:
+ if other_role != "default":
+ models.__setattr__(other_role, models.default)
+
+ await self.session.autopilot.continue_sdk.update_ui()
+
+ create_async_task(async_stuff(), self.on_error)
+
+ def add_model_for_role(self, role: str, model_class: str, model: Any):
+ models = self.session.autopilot.continue_sdk.config.models
+
+ model_copy = model.copy()
+ if "api_key" in model_copy:
+ del model_copy["api_key"]
+ if "hf_token" in model_copy:
+ del model_copy["hf_token"]
+ posthog_logger.capture_event(
+ "select_model_for_role",
+ {"role": role, "model_class": model_class, "model": model_copy},
+ )
+
+ if role == "*":
+
+ async def async_stuff():
+ # Remove all previous models in roles and place in saved
+ saved_models = models.saved
+ existing_saved_models = set(
+ [display_llm_class(llm) for llm in saved_models]
+ )
+ for role in ALL_MODEL_ROLES:
+ val = models.__getattribute__(role)
+ if (
+ val is not None
+ and display_llm_class(val) not in existing_saved_models
+ ):
+ saved_models.append(val)
+ existing_saved_models.add(display_llm_class(val))
+ models.__setattr__(role, None)
+
+ # Add the requisite import to config.py
+ add_config_import(
+ f"from continuedev.libs.llm.{MODEL_MODULE_NAMES[model_class]} import {model_class}"
+ )
+ if "template_messages" in model:
+ add_config_import(
+ f"from continuedev.libs.llm.prompts.chat import {model['template_messages']}"
+ )
+
+ # Set and start the new default model
+
+ if "template_messages" in model:
+ sqtm = sqlcoder_template_messages("<MY_DATABASE_SCHEMA>")
+ sqtm.__name__ = 'sqlcoder_template_messages("<MY_DATABASE_SCHEMA>")'
+ model["template_messages"] = {
+ "llama2_template_messages": llama2_template_messages,
+ "template_alpaca_messages": template_alpaca_messages,
+ "sqlcoder_template_messages": sqtm,
+ }[model["template_messages"]]
+ new_model = MODEL_CLASSES[model_class](**model)
+ models.default = new_model
+ await self.session.autopilot.continue_sdk.start_model(models.default)
+
+ # Construct and set the new models object
+ JOINER = ",\n\t\t"
+ saved_model_strings = set(
+ [display_llm_class(llm) for llm in saved_models]
+ )
+ models_args = {
+ "default": display_llm_class(models.default, True),
+ "saved": f"[{JOINER.join(saved_model_strings)}]",
+ }
+
+ await self.session.autopilot.set_config_attr(
+ ["models"],
+ create_obj_node("Models", models_args),
+ )
+
+ # Set all roles (in-memory) to the new default model
+ for role in ALL_MODEL_ROLES:
+ if role != "default":
+ models.__setattr__(role, models.default)
+
+ # Display setup help
+ # await self.session.autopilot.continue_sdk.run_step(
+ # SetupModelStep(model_class=model_class)
+ # )
+
+ create_async_task(async_stuff(), self.on_error)
+ else:
+ # TODO
+ pass
+
+ def save_context_group(self, title: str, context_items: List[ContextItem]):
+ create_async_task(
+ self.session.autopilot.save_context_group(title, context_items),
+ self.on_error,
+ )
+
+ def select_context_group(self, id: str):
+ create_async_task(
+ self.session.autopilot.select_context_group(id), self.on_error
+ )
+
+ def delete_context_group(self, id: str):
+ create_async_task(
+ self.session.autopilot.delete_context_group(id), self.on_error
+ )
+
+
+@router.websocket("/ws")
+async def websocket_endpoint(
+ websocket: WebSocket, session: Session = Depends(websocket_session)
+):
+ try:
+ logger.debug(f"Received websocket connection at url: {websocket.url}")
+ await websocket.accept()
+
+ logger.debug("Session started")
+ session_manager.register_websocket(session.session_id, websocket)
+ protocol = GUIProtocolServer(session)
+ protocol.websocket = websocket
+
+ # Update any history that may have happened before connection
+ await protocol.session.autopilot.update_subscribers()
+
+ while AppStatus.should_exit is False:
+ message = await websocket.receive_text()
+ logger.debug(f"Received GUI message {message}")
+ if isinstance(message, str):
+ message = json.loads(message)
+
+ if "messageType" not in message or "data" not in message:
+ continue # :o
+ message_type = message["messageType"]
+ data = message["data"]
+
+ protocol.handle_json(message_type, data)
+ except WebSocketDisconnect:
+ logger.debug("GUI websocket disconnected")
+ except Exception as e:
+ # Log, send to PostHog, and send to GUI
+ logger.debug(f"ERROR in gui websocket: {e}")
+ err_msg = "\n".join(traceback.format_exception(e))
+ posthog_logger.capture_event(
+ "gui_error",
+ {"error_title": e.__str__() or e.__repr__(), "error_message": err_msg},
+ )
+
+ await session.autopilot.ide.showMessage(err_msg)
+
+ raise e
+ finally:
+ logger.debug("Closing gui websocket")
+ if websocket.client_state != WebSocketState.DISCONNECTED:
+ await websocket.close()
+
+ await session_manager.persist_session(session.session_id)
+ await session_manager.remove_session(session.session_id)
diff --git a/server/continuedev/server/ide.py b/server/continuedev/server/ide.py
new file mode 100644
index 00000000..7f9af77a
--- /dev/null
+++ b/server/continuedev/server/ide.py
@@ -0,0 +1,680 @@
+# This is a separate server from server/main.py
+import asyncio
+import json
+import os
+import traceback
+import uuid
+from typing import Any, Callable, Coroutine, Dict, List, Optional, Type, TypeVar, Union
+
+import nest_asyncio
+from fastapi import APIRouter, WebSocket
+from pydantic import BaseModel
+from starlette.websockets import WebSocketDisconnect, WebSocketState
+from uvicorn.main import Server
+
+from ..core.main import ContinueCustomException
+from ..core.steps import DisplayErrorStep
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.devdata import dev_data_logger
+from ..libs.util.logging import logger
+from ..libs.util.queue import AsyncSubscriptionQueue
+from ..libs.util.telemetry import posthog_logger
+from ..models.filesystem import (
+ EditDiff,
+ FileSystem,
+ RangeInFile,
+ RangeInFileWithContents,
+ RealFileSystem,
+)
+from ..models.filesystem_edit import (
+ AddDirectory,
+ AddFile,
+ DeleteDirectory,
+ DeleteFile,
+ FileEdit,
+ FileEditWithFullContents,
+ FileSystemEdit,
+ RenameDirectory,
+ RenameFile,
+ SequentialFileSystemEdit,
+)
+from .gui import session_manager
+from .ide_protocol import AbstractIdeProtocolServer
+from .session_manager import SessionManager
+
+nest_asyncio.apply()
+
+
+router = APIRouter(prefix="/ide", tags=["ide"])
+
+
+# Graceful shutdown by closing websockets
+original_handler = Server.handle_exit
+
+
+class AppStatus:
+ should_exit = False
+
+ @staticmethod
+ def handle_exit(*args, **kwargs):
+ AppStatus.should_exit = True
+ logger.debug("Shutting down")
+ original_handler(*args, **kwargs)
+
+
+Server.handle_exit = AppStatus.handle_exit
+
+
+# TYPES #
+
+
+class FileEditsUpdate(BaseModel):
+ fileEdits: List[FileEditWithFullContents]
+
+
+class OpenFilesResponse(BaseModel):
+ openFiles: List[str]
+
+
+class VisibleFilesResponse(BaseModel):
+ visibleFiles: List[str]
+
+
+class HighlightedCodeResponse(BaseModel):
+ highlightedCode: List[RangeInFile]
+
+
+class ShowSuggestionRequest(BaseModel):
+ suggestion: FileEdit
+
+
+class ShowSuggestionResponse(BaseModel):
+ suggestion: FileEdit
+ accepted: bool
+
+
+class ReadFileResponse(BaseModel):
+ contents: str
+
+
+class EditFileResponse(BaseModel):
+ fileEdit: FileEditWithFullContents
+
+
+class WorkspaceDirectoryResponse(BaseModel):
+ workspaceDirectory: str
+
+
+class GetUserSecretResponse(BaseModel):
+ value: str
+
+
+class RunCommandResponse(BaseModel):
+ output: str = ""
+
+
+class UniqueIdResponse(BaseModel):
+ uniqueId: str
+
+
+class TerminalContentsResponse(BaseModel):
+ contents: str
+
+
+class ListDirectoryContentsResponse(BaseModel):
+ contents: List[str]
+
+
+class FileExistsResponse(BaseModel):
+ exists: bool
+
+
+T = TypeVar("T", bound=BaseModel)
+
+
+class cached_property_no_none:
+ def __init__(self, func):
+ self.func = func
+
+ def __get__(self, instance, owner):
+ if instance is None:
+ return self
+ value = self.func(instance)
+ if value is not None:
+ setattr(instance, self.func.__name__, value)
+ return value
+
+ def __repr__(self):
+ return f"<cached_property_no_none '{self.func.__name__}'>"
+
+
+class IdeProtocolServer(AbstractIdeProtocolServer):
+ websocket: WebSocket
+ session_manager: SessionManager
+ sub_queue: AsyncSubscriptionQueue = AsyncSubscriptionQueue()
+ session_id: Union[str, None] = None
+
+ ide_info: Optional[Dict] = None
+
+ def __init__(self, session_manager: SessionManager, websocket: WebSocket):
+ self.websocket = websocket
+ self.session_manager = session_manager
+
+ workspace_directory: str = None
+ unique_id: str = None
+
+ async def initialize(self, session_id: str) -> List[str]:
+ self.session_id = session_id
+ await self._send_json("workspaceDirectory", {})
+ await self._send_json("uniqueId", {})
+ await self._send_json("ide", {})
+ other_msgs = []
+ while True:
+ msg_string = await self.websocket.receive_text()
+ message = json.loads(msg_string)
+ if "messageType" not in message or "data" not in message:
+ continue # <-- hey that's the name of this repo!
+ message_type = message["messageType"]
+ data = message["data"]
+ logger.debug(f"Received message while initializing {message_type}")
+ if message_type == "workspaceDirectory":
+ self.workspace_directory = data["workspaceDirectory"]
+ elif message_type == "uniqueId":
+ self.unique_id = data["uniqueId"]
+ elif message_type == "ide":
+ self.ide_info = data
+ else:
+ other_msgs.append(msg_string)
+
+ if self.workspace_directory is not None and self.unique_id is not None:
+ break
+ return other_msgs
+
+ async def _send_json(self, message_type: str, data: Any):
+ # TODO: You breakpointed here, set it to disconnected, and then saw
+ # that even after reloading, it couldn't connect the server.
+ # Is this because there is an IDE registered without a websocket?
+ # This shouldn't count as registered in that case.
+ try:
+ if self.websocket.application_state == WebSocketState.DISCONNECTED:
+ logger.debug(
+ f"Tried to send message, but websocket is disconnected: {message_type}"
+ )
+ return
+ # logger.debug(f"Sending IDE message: {message_type}")
+ await self.websocket.send_json({"messageType": message_type, "data": data})
+ except RuntimeError as e:
+ logger.warning(f"Error sending IDE message, websocket probably closed: {e}")
+
+ async def _receive_json(
+ self, message_type: str, timeout: int = 20, message=None
+ ) -> Any:
+ try:
+ return await asyncio.wait_for(
+ self.sub_queue.get(message_type), timeout=timeout
+ )
+ except asyncio.TimeoutError:
+ raise ContinueCustomException(
+ title=f"IDE Protocol _receive_json timed out after 20 seconds: {message_type}",
+ message=f"IDE Protocol _receive_json timed out after 20 seconds. The message sent was: {message or ''}",
+ )
+
+ async def _send_and_receive_json(
+ self, data: Any, resp_model: Type[T], message_type: str
+ ) -> T:
+ await self._send_json(message_type, data)
+ resp = await self._receive_json(message_type, message=data)
+ return resp_model.parse_obj(resp)
+
+ async def handle_json(self, message_type: str, data: Any):
+ if message_type == "getSessionId":
+ await self.getSessionId()
+ elif message_type == "setFileOpen":
+ await self.setFileOpen(data["filepath"], data["open"])
+ elif message_type == "setSuggestionsLocked":
+ await self.setSuggestionsLocked(data["filepath"], data["locked"])
+ elif message_type == "fileEdits":
+ fileEdits = list(
+ map(lambda d: FileEditWithFullContents.parse_obj(d), data["fileEdits"])
+ )
+ self.onFileEdits(fileEdits)
+ elif message_type == "highlightedCodePush":
+ self.onHighlightedCodeUpdate(
+ [RangeInFileWithContents(**rif) for rif in data["highlightedCode"]],
+ edit=data.get("edit", False),
+ )
+ elif message_type == "commandOutput":
+ output = data["output"]
+ self.onCommandOutput(output)
+ elif message_type == "debugTerminal":
+ content = data["contents"]
+ self.onDebugTerminal(content)
+ elif message_type == "acceptRejectSuggestion":
+ self.onAcceptRejectSuggestion(data["accepted"])
+ elif message_type == "acceptRejectDiff":
+ self.onAcceptRejectDiff(data["accepted"], data["stepIndex"])
+ elif message_type == "mainUserInput":
+ self.onMainUserInput(data["input"])
+ elif message_type == "deleteAtIndex":
+ self.onDeleteAtIndex(data["index"])
+ elif message_type in [
+ "highlightedCode",
+ "openFiles",
+ "visibleFiles",
+ "readFile",
+ "editFile",
+ "getUserSecret",
+ "runCommand",
+ "getTerminalContents",
+ "listDirectoryContents",
+ "fileExists",
+ ]:
+ self.sub_queue.post(message_type, data)
+ elif message_type == "workspaceDirectory":
+ self.workspace_directory = data["workspaceDirectory"]
+ elif message_type == "uniqueId":
+ self.unique_id = data["uniqueId"]
+ elif message_type == "ide":
+ self.ide_info = data
+ elif message_type == "filesCreated":
+ self.onFilesCreated(data["filepaths"])
+ elif message_type == "filesDeleted":
+ self.onFilesDeleted(data["filepaths"])
+ elif message_type == "filesRenamed":
+ self.onFilesRenamed(data["old_filepaths"], data["new_filepaths"])
+ elif message_type == "fileSaved":
+ self.onFileSaved(data["filepath"], data["contents"])
+ else:
+ raise ValueError("Unknown message type", message_type)
+
+ async def showSuggestion(self, file_edit: FileEdit):
+ await self._send_json("showSuggestion", {"edit": file_edit.dict()})
+
+ async def showDiff(self, filepath: str, replacement: str, step_index: int):
+ await self._send_json(
+ "showDiff",
+ {
+ "filepath": filepath,
+ "replacement": replacement,
+ "step_index": step_index,
+ },
+ )
+
+ async def setFileOpen(self, filepath: str, open: bool = True):
+ # Autopilot needs access to this.
+ await self._send_json("setFileOpen", {"filepath": filepath, "open": open})
+
+ async def showMessage(self, message: str):
+ await self._send_json("showMessage", {"message": message})
+
+ async def showVirtualFile(self, name: str, contents: str):
+ await self._send_json("showVirtualFile", {"name": name, "contents": contents})
+
+ async def setSuggestionsLocked(self, filepath: str, locked: bool = True):
+ # Lock suggestions in the file so they don't ruin the offset before others are inserted
+ await self._send_json(
+ "setSuggestionsLocked", {"filepath": filepath, "locked": locked}
+ )
+
+ async def getSessionId(self):
+ new_session = await asyncio.wait_for(
+ self.session_manager.new_session(self, self.session_id), timeout=5
+ )
+ session_id = new_session.session_id
+ logger.debug(f"Sending session id: {session_id}")
+ await self._send_json("getSessionId", {"sessionId": session_id})
+
+ async def highlightCode(self, range_in_file: RangeInFile, color: str = "#00ff0022"):
+ await self._send_json(
+ "highlightCode", {"rangeInFile": range_in_file.dict(), "color": color}
+ )
+
+ async def runCommand(self, command: str) -> str:
+ return (
+ await self._send_and_receive_json(
+ {"command": command}, RunCommandResponse, "runCommand"
+ )
+ ).output
+
+ async def showSuggestionsAndWait(self, suggestions: List[FileEdit]) -> bool:
+ ids = [str(uuid.uuid4()) for _ in suggestions]
+ for i in range(len(suggestions)):
+ self._send_json(
+ "showSuggestion", {"suggestion": suggestions[i], "suggestionId": ids[i]}
+ )
+ responses = await asyncio.gather(
+ *[
+ self._receive_json(ShowSuggestionResponse)
+ for i in range(len(suggestions))
+ ]
+ ) # WORKING ON THIS FLOW HERE. Fine now to just await for response, instead of doing something fancy with a "waiting" state on the autopilot.
+ # Just need connect the suggestionId to the IDE (and the gui)
+ return any([r.accepted for r in responses])
+
+ def on_error(self, e: Exception) -> Coroutine:
+ err_msg = "\n".join(traceback.format_exception(e))
+ e_title = e.__str__() or e.__repr__()
+ return self.showMessage(f"Error in Continue server: {e_title}\n {err_msg}")
+
+ def onAcceptRejectSuggestion(self, accepted: bool):
+ posthog_logger.capture_event("accept_reject_suggestion", {"accepted": accepted})
+ dev_data_logger.capture("accept_reject_suggestion", {"accepted": accepted})
+
+ def onAcceptRejectDiff(self, accepted: bool, step_index: int):
+ posthog_logger.capture_event("accept_reject_diff", {"accepted": accepted})
+ dev_data_logger.capture("accept_reject_diff", {"accepted": accepted})
+
+ if not accepted:
+ if autopilot := self.__get_autopilot():
+ create_async_task(
+ autopilot.reject_diff(step_index),
+ self.on_error,
+ )
+
+ def onFileSystemUpdate(self, update: FileSystemEdit):
+ # Access to Autopilot (so SessionManager)
+ pass
+
+ def onCloseGUI(self, session_id: str):
+ # Access to SessionManager
+ pass
+
+ def onOpenGUIRequest(self):
+ pass
+
+ def __get_autopilot(self):
+ if self.session_id not in self.session_manager.sessions:
+ return None
+
+ autopilot = self.session_manager.sessions[self.session_id].autopilot
+ return autopilot if autopilot.started else None
+
+ def onFileEdits(self, edits: List[FileEditWithFullContents]):
+ if autopilot := self.__get_autopilot():
+ pass
+
+ def onDeleteAtIndex(self, index: int):
+ if autopilot := self.__get_autopilot():
+ create_async_task(autopilot.delete_at_index(index), self.on_error)
+
+ def onCommandOutput(self, output: str):
+ if autopilot := self.__get_autopilot():
+ create_async_task(autopilot.handle_command_output(output), self.on_error)
+
+ def onDebugTerminal(self, content: str):
+ if autopilot := self.__get_autopilot():
+ create_async_task(autopilot.handle_debug_terminal(content), self.on_error)
+
+ def onHighlightedCodeUpdate(
+ self,
+ range_in_files: List[RangeInFileWithContents],
+ edit: Optional[bool] = False,
+ ):
+ if autopilot := self.__get_autopilot():
+ create_async_task(
+ autopilot.handle_highlighted_code(range_in_files, edit), self.on_error
+ )
+
+ ## Subscriptions ##
+
+ _files_created_callbacks = []
+ _files_deleted_callbacks = []
+ _files_renamed_callbacks = []
+ _file_saved_callbacks = []
+
+ def call_callback(self, callback, *args, **kwargs):
+ if asyncio.iscoroutinefunction(callback):
+ create_async_task(callback(*args, **kwargs), self.on_error)
+ else:
+ callback(*args, **kwargs)
+
+ def subscribeToFilesCreated(self, callback: Callable[[List[str]], None]):
+ self._files_created_callbacks.append(callback)
+
+ def subscribeToFilesDeleted(self, callback: Callable[[List[str]], None]):
+ self._files_deleted_callbacks.append(callback)
+
+ def subscribeToFilesRenamed(self, callback: Callable[[List[str], List[str]], None]):
+ self._files_renamed_callbacks.append(callback)
+
+ def subscribeToFileSaved(self, callback: Callable[[str, str], None]):
+ self._file_saved_callbacks.append(callback)
+
+ def onFilesCreated(self, filepaths: List[str]):
+ for callback in self._files_created_callbacks:
+ self.call_callback(callback, filepaths)
+
+ def onFilesDeleted(self, filepaths: List[str]):
+ for callback in self._files_deleted_callbacks:
+ self.call_callback(callback, filepaths)
+
+ def onFilesRenamed(self, old_filepaths: List[str], new_filepaths: List[str]):
+ for callback in self._files_renamed_callbacks:
+ self.call_callback(callback, old_filepaths, new_filepaths)
+
+ def onFileSaved(self, filepath: str, contents: str):
+ for callback in self._file_saved_callbacks:
+ self.call_callback(callback, filepath, contents)
+
+ # If ~/.continue/config.py was saved, auto-update the SDK
+ if filepath.endswith(".continue/config.py") or filepath.endswith(
+ ".continue\\config.py"
+ ):
+ if autopilot := self.__get_autopilot():
+ create_async_task(autopilot.reload_config(), self.on_error)
+
+ ## END Subscriptions ##
+
+ def onMainUserInput(self, input: str):
+ if autopilot := self.__get_autopilot():
+ create_async_task(autopilot.accept_user_input(input), self.on_error)
+
+ # Request information. Session doesn't matter.
+ async def getOpenFiles(self) -> List[str]:
+ resp = await self._send_and_receive_json({}, OpenFilesResponse, "openFiles")
+ return resp.openFiles
+
+ async def getVisibleFiles(self) -> List[str]:
+ resp = await self._send_and_receive_json(
+ {}, VisibleFilesResponse, "visibleFiles"
+ )
+ return resp.visibleFiles
+
+ async def getTerminalContents(self, commands: int = -1) -> str:
+ """Get the contents of the terminal, up to the last 'commands' commands, or all if commands is -1"""
+ resp = await self._send_and_receive_json(
+ {"commands": commands}, TerminalContentsResponse, "getTerminalContents"
+ )
+ return resp.contents.strip()
+
+ async def getHighlightedCode(self) -> List[RangeInFile]:
+ resp = await self._send_and_receive_json(
+ {}, HighlightedCodeResponse, "highlightedCode"
+ )
+ return resp.highlightedCode
+
+ async def readFile(self, filepath: str) -> str:
+ """Read a file"""
+ resp = await self._send_and_receive_json(
+ {"filepath": filepath}, ReadFileResponse, "readFile"
+ )
+ return resp.contents
+
+ async def fileExists(self, filepath: str) -> str:
+ """Check whether file exists"""
+ resp = await self._send_and_receive_json(
+ {"filepath": filepath}, FileExistsResponse, "fileExists"
+ )
+ return resp.exists
+
+ async def getUserSecret(self, key: str) -> str:
+ """Get a user secret"""
+ try:
+ resp = await self._send_and_receive_json(
+ {"key": key}, GetUserSecretResponse, "getUserSecret"
+ )
+ return resp.value
+ except Exception as e:
+ logger.debug(f"Error getting user secret: {e}")
+ return ""
+
+ async def saveFile(self, filepath: str):
+ """Save a file"""
+ await self._send_json("saveFile", {"filepath": filepath})
+
+ async def readRangeInFile(self, range_in_file: RangeInFile) -> str:
+ """Read a range in a file"""
+ full_contents = await self.readFile(range_in_file.filepath)
+ return FileSystem.read_range_in_str(full_contents, range_in_file.range)
+
+ async def editFile(self, edit: FileEdit) -> FileEditWithFullContents:
+ """Edit a file"""
+ resp = await self._send_and_receive_json(
+ {"edit": edit.dict()}, EditFileResponse, "editFile"
+ )
+ return resp.fileEdit
+
+ async def listDirectoryContents(
+ self, directory: str, recursive: bool = False
+ ) -> List[str]:
+ """List the contents of a directory"""
+ resp = await self._send_and_receive_json(
+ {"directory": directory, "recursive": recursive},
+ ListDirectoryContentsResponse,
+ "listDirectoryContents",
+ )
+ return resp.contents
+
+ async def applyFileSystemEdit(self, edit: FileSystemEdit) -> EditDiff:
+ """Apply a file edit"""
+ backward = None
+ fs = RealFileSystem()
+ if isinstance(edit, FileEdit):
+ file_edit = await self.editFile(edit)
+ _, diff = FileSystem.apply_edit_to_str(
+ file_edit.fileContents, file_edit.fileEdit
+ )
+ backward = diff.backward
+ elif isinstance(edit, AddFile):
+ fs.write(edit.filepath, edit.content)
+ backward = DeleteFile(filepath=edit.filepath)
+ elif isinstance(edit, DeleteFile):
+ contents = await self.readFile(edit.filepath)
+ backward = AddFile(filepath=edit.filepath, content=contents)
+ fs.delete_file(edit.filepath)
+ elif isinstance(edit, RenameFile):
+ fs.rename_file(edit.filepath, edit.new_filepath)
+ backward = RenameFile(
+ filepath=edit.new_filepath, new_filepath=edit.filepath
+ )
+ elif isinstance(edit, AddDirectory):
+ fs.add_directory(edit.path)
+ backward = DeleteDirectory(path=edit.path)
+ elif isinstance(edit, DeleteDirectory):
+ # This isn't atomic!
+ backward_edits = []
+ for root, dirs, files in os.walk(edit.path, topdown=False):
+ for f in files:
+ path = os.path.join(root, f)
+ edit_diff = await self.applyFileSystemEdit(
+ DeleteFile(filepath=path)
+ )
+ backward_edits.append(edit_diff)
+ for d in dirs:
+ path = os.path.join(root, d)
+ edit_diff = await self.applyFileSystemEdit(
+ DeleteDirectory(path=path)
+ )
+ backward_edits.append(edit_diff)
+
+ edit_diff = await self.applyFileSystemEdit(DeleteDirectory(path=edit.path))
+ backward_edits.append(edit_diff)
+ backward_edits.reverse()
+ backward = SequentialFileSystemEdit(edits=backward_edits)
+ elif isinstance(edit, RenameDirectory):
+ fs.rename_directory(edit.path, edit.new_path)
+ backward = RenameDirectory(path=edit.new_path, new_path=edit.path)
+ elif isinstance(edit, FileSystemEdit):
+ diffs = []
+ for edit in edit.next_edit():
+ edit_diff = await self.applyFileSystemEdit(edit)
+ diffs.append(edit_diff)
+ backward = EditDiff.from_sequence(diffs=diffs).backward
+ else:
+ raise TypeError("Unknown FileSystemEdit type: " + str(type(edit)))
+
+ return EditDiff(forward=edit, backward=backward)
+
+
+@router.websocket("/ws")
+async def websocket_endpoint(websocket: WebSocket, session_id: str = None):
+ try:
+ # Accept the websocket connection
+ await websocket.accept()
+ logger.debug(f"Accepted websocket connection from {websocket.client}")
+ await websocket.send_json({"messageType": "connected", "data": {}})
+
+ # Message handler
+ def handle_msg(msg):
+ try:
+ message = json.loads(msg)
+ except json.JSONDecodeError:
+ logger.critical(f"Error decoding json: {msg}")
+ return
+
+ if "messageType" not in message or "data" not in message:
+ return
+ message_type = message["messageType"]
+ data = message["data"]
+
+ # logger.debug(f"Received IDE message: {message_type}")
+ create_async_task(
+ ideProtocolServer.handle_json(message_type, data),
+ ideProtocolServer.on_error,
+ )
+
+ # Initialize the IDE Protocol Server
+ ideProtocolServer = IdeProtocolServer(session_manager, websocket)
+ if session_id is not None:
+ session_manager.registered_ides[session_id] = ideProtocolServer
+ other_msgs = await ideProtocolServer.initialize(session_id)
+ posthog_logger.capture_event(
+ "session_started", {"session_id": ideProtocolServer.session_id}
+ )
+
+ for other_msg in other_msgs:
+ handle_msg(other_msg)
+
+ # Handle messages
+ while AppStatus.should_exit is False:
+ message = await websocket.receive_text()
+ handle_msg(message)
+
+ except WebSocketDisconnect:
+ logger.debug("IDE websocket disconnected")
+ except Exception as e:
+ logger.debug(f"Error in ide websocket: {e}")
+ err_msg = "\n".join(traceback.format_exception(e))
+ posthog_logger.capture_event(
+ "gui_error",
+ {"error_title": e.__str__() or e.__repr__(), "error_message": err_msg},
+ )
+
+ if session_id is not None and session_id in session_manager.sessions:
+ await session_manager.sessions[session_id].autopilot.continue_sdk.run_step(
+ DisplayErrorStep.from_exception(e)
+ )
+ elif ideProtocolServer is not None:
+ await ideProtocolServer.showMessage(f"Error in Continue server: {err_msg}")
+
+ raise e
+ finally:
+ logger.debug("Closing ide websocket")
+ if websocket.client_state != WebSocketState.DISCONNECTED:
+ await websocket.close()
+
+ posthog_logger.capture_event(
+ "session_ended", {"session_id": ideProtocolServer.session_id}
+ )
+ if ideProtocolServer.session_id in session_manager.registered_ides:
+ session_manager.registered_ides.pop(ideProtocolServer.session_id)
diff --git a/server/continuedev/server/ide_protocol.py b/server/continuedev/server/ide_protocol.py
new file mode 100644
index 00000000..832dd338
--- /dev/null
+++ b/server/continuedev/server/ide_protocol.py
@@ -0,0 +1,170 @@
+from abc import ABC, abstractmethod
+from typing import Any, Callable, Dict, List, Optional, Union
+
+from fastapi import WebSocket
+
+from ..models.filesystem import RangeInFile, RangeInFileWithContents
+from ..models.filesystem_edit import EditDiff, FileEdit, FileSystemEdit
+
+
+class AbstractIdeProtocolServer(ABC):
+ websocket: WebSocket
+ session_id: Union[str, None]
+ ide_info: Optional[Dict] = None
+
+ @abstractmethod
+ async def handle_json(self, data: Any):
+ """Handle a json message"""
+
+ @abstractmethod
+ def showSuggestion(self, file_edit: FileEdit):
+ """Show a suggestion to the user"""
+
+ @abstractmethod
+ async def setFileOpen(self, filepath: str, open: bool = True):
+ """Set whether a file is open"""
+
+ @abstractmethod
+ async def showMessage(self, message: str):
+ """Show a message to the user"""
+
+ @abstractmethod
+ async def showVirtualFile(self, name: str, contents: str):
+ """Show a virtual file"""
+
+ @abstractmethod
+ async def setSuggestionsLocked(self, filepath: str, locked: bool = True):
+ """Set whether suggestions are locked"""
+
+ @abstractmethod
+ async def getSessionId(self):
+ """Get a new session ID"""
+
+ @abstractmethod
+ async def showSuggestionsAndWait(self, suggestions: List[FileEdit]) -> bool:
+ """Show suggestions to the user and wait for a response"""
+
+ @abstractmethod
+ def onAcceptRejectSuggestion(self, accepted: bool):
+ """Called when the user accepts or rejects a suggestion"""
+
+ @abstractmethod
+ def onFileSystemUpdate(self, update: FileSystemEdit):
+ """Called when a file system update is received"""
+
+ @abstractmethod
+ def onCloseGUI(self, session_id: str):
+ """Called when a GUI is closed"""
+
+ @abstractmethod
+ def onOpenGUIRequest(self):
+ """Called when a GUI is requested to be opened"""
+
+ @abstractmethod
+ async def getOpenFiles(self) -> List[str]:
+ """Get a list of open files"""
+
+ @abstractmethod
+ async def getVisibleFiles(self) -> List[str]:
+ """Get a list of visible files"""
+
+ @abstractmethod
+ async def getHighlightedCode(self) -> List[RangeInFile]:
+ """Get a list of highlighted code"""
+
+ @abstractmethod
+ async def readFile(self, filepath: str) -> str:
+ """Read a file"""
+
+ @abstractmethod
+ async def readRangeInFile(self, range_in_file: RangeInFile) -> str:
+ """Read a range in a file"""
+
+ @abstractmethod
+ async def editFile(self, edit: FileEdit):
+ """Edit a file"""
+
+ @abstractmethod
+ async def applyFileSystemEdit(self, edit: FileSystemEdit) -> EditDiff:
+ """Apply a file edit"""
+
+ @abstractmethod
+ async def saveFile(self, filepath: str):
+ """Save a file"""
+
+ @abstractmethod
+ async def getUserSecret(self, key: str):
+ """Get a user secret"""
+
+ @abstractmethod
+ async def highlightCode(self, range_in_file: RangeInFile, color: str):
+ """Highlight code"""
+
+ @abstractmethod
+ async def runCommand(self, command: str) -> str:
+ """Run a command"""
+
+ @abstractmethod
+ def onHighlightedCodeUpdate(
+ self,
+ range_in_files: List[RangeInFileWithContents],
+ edit: Optional[bool] = False,
+ ):
+ """Called when highlighted code is updated"""
+
+ @abstractmethod
+ def onDeleteAtIndex(self, index: int):
+ """Called when a step is deleted at a given index"""
+
+ @abstractmethod
+ async def showDiff(self, filepath: str, replacement: str, step_index: int):
+ """Show a diff"""
+
+ @abstractmethod
+ def subscribeToFilesCreated(self, callback: Callable[[List[str]], None]):
+ """Subscribe to files created event"""
+
+ @abstractmethod
+ def subscribeToFilesDeleted(self, callback: Callable[[List[str]], None]):
+ """Subscribe to files deleted event"""
+
+ @abstractmethod
+ def subscribeToFilesRenamed(self, callback: Callable[[List[str], List[str]], None]):
+ """Subscribe to files renamed event"""
+
+ @abstractmethod
+ def subscribeToFileSaved(self, callback: Callable[[str, str], None]):
+ """Subscribe to file saved event"""
+
+ @abstractmethod
+ def onFilesCreated(self, filepaths: List[str]):
+ """Called when files are created"""
+
+ @abstractmethod
+ def onFilesDeleted(self, filepaths: List[str]):
+ """Called when files are deleted"""
+
+ @abstractmethod
+ def onFilesRenamed(self, old_filepaths: List[str], new_filepaths: List[str]):
+ """Called when files are renamed"""
+
+ @abstractmethod
+ def onFileSaved(self, filepath: str, contents: str):
+ """Called when a file is saved"""
+
+ @abstractmethod
+ async def listDirectoryContents(
+ self, directory: str, recursive: bool = False
+ ) -> List[str]:
+ """List directory contents"""
+
+ @abstractmethod
+ async def fileExists(self, filepath: str) -> str:
+ """Check if a file exists"""
+
+ @abstractmethod
+ async def getTerminalContents(self, commands: int = -1) -> str:
+ """Get the terminal contents"""
+
+ workspace_directory: str
+ unique_id: str
diff --git a/server/continuedev/server/main.py b/server/continuedev/server/main.py
new file mode 100644
index 00000000..c5540d7d
--- /dev/null
+++ b/server/continuedev/server/main.py
@@ -0,0 +1,109 @@
+import argparse
+import asyncio
+import atexit
+from contextlib import asynccontextmanager
+from typing import Optional
+
+import uvicorn
+from fastapi import FastAPI
+from fastapi.middleware.cors import CORSMiddleware
+
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.logging import logger
+from .gui import router as gui_router
+from .ide import router as ide_router
+from .meilisearch_server import start_meilisearch, stop_meilisearch
+from .session_manager import router as sessions_router
+from .session_manager import session_manager
+
+meilisearch_url_global = None
+
+
+@asynccontextmanager
+async def lifespan(app: FastAPI):
+ async def on_err(e):
+ logger.warning(f"Error starting MeiliSearch: {e}")
+
+ try:
+ # start meilisearch without blocking server startup
+ create_async_task(start_meilisearch(url=meilisearch_url_global), on_err)
+ except Exception as e:
+ logger.warning(f"Error starting MeiliSearch: {e}")
+
+ yield
+ stop_meilisearch()
+
+
+app = FastAPI(lifespan=lifespan)
+
+app.include_router(ide_router)
+app.include_router(gui_router)
+app.include_router(sessions_router)
+
+# Add CORS support
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["*"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+
+@app.get("/health")
+def health():
+ logger.debug("Health check")
+ return {"status": "ok"}
+
+
+def run_server(
+ port: int = 65432, host: str = "127.0.0.1", meilisearch_url: Optional[str] = None
+):
+ try:
+ global meilisearch_url_global
+
+ meilisearch_url_global = meilisearch_url
+
+ config = uvicorn.Config(app, host=host, port=port)
+ server = uvicorn.Server(config)
+ server.run()
+ except PermissionError as e:
+ logger.critical(
+ f"Error starting Continue server: {e}. "
+ f"This means that port {port} is already in use, and is usually caused by another instance of the Continue server already running."
+ )
+ cleanup()
+ raise e
+
+ except Exception as e:
+ logger.critical(f"Error starting Continue server: {e}")
+ cleanup()
+ raise e
+
+
+async def cleanup_coroutine():
+ logger.debug("------ Cleaning Up ------")
+ for session_id in session_manager.sessions:
+ await session_manager.persist_session(session_id)
+
+
+def cleanup():
+ loop = asyncio.new_event_loop()
+ loop.run_until_complete(cleanup_coroutine())
+ loop.close()
+
+
+atexit.register(cleanup)
+
+if __name__ == "__main__":
+ try:
+ # add cli arg for server port
+ parser = argparse.ArgumentParser()
+ parser.add_argument("-p", "--port", help="server port", type=int, default=65432)
+ parser.add_argument("--host", help="server host", type=str, default="127.0.0.1")
+ args = parser.parse_args()
+ except Exception as e:
+ logger.critical(f"Error parsing command line arguments: {e}")
+ raise e
+
+ run_server(args.port, args.host)
diff --git a/server/continuedev/server/meilisearch_server.py b/server/continuedev/server/meilisearch_server.py
new file mode 100644
index 00000000..93761ce1
--- /dev/null
+++ b/server/continuedev/server/meilisearch_server.py
@@ -0,0 +1,196 @@
+import asyncio
+import os
+import shutil
+import subprocess
+from typing import Optional
+
+import aiofiles
+import aiohttp
+import psutil
+from meilisearch_python_async import Client
+
+from ..libs.util.logging import logger
+from ..libs.util.paths import getMeilisearchExePath, getServerFolderPath
+
+
+async def download_file(url: str, filename: str):
+ async with aiohttp.ClientSession() as session:
+ async with session.get(url) as resp:
+ if resp.status == 200:
+ f = await aiofiles.open(filename, mode="wb")
+ await f.write(await resp.read())
+ await f.close()
+
+
+async def download_meilisearch():
+ """
+ Downloads MeiliSearch.
+ """
+
+ serverPath = getServerFolderPath()
+
+ if os.name == "nt":
+ logger.debug("Downloading MeiliSearch for Windows...")
+ download_url = "https://github.com/meilisearch/meilisearch/releases/download/v1.3.2/meilisearch-windows-amd64.exe"
+ download_path = getMeilisearchExePath()
+ if not os.path.exists(download_path):
+ await download_file(download_url, download_path)
+ # subprocess.run(
+ # f"curl -L {download_url} -o {download_path}",
+ # shell=True,
+ # check=True,
+ # cwd=serverPath,
+ # )
+ else:
+ logger.debug("Downloading MeiliSearch with curl...")
+ subprocess.run(
+ "curl -L https://install.meilisearch.com | sh",
+ shell=True,
+ check=True,
+ cwd=serverPath,
+ )
+
+
+async def ensure_meilisearch_installed() -> bool:
+ """
+ Checks if MeiliSearch is installed.
+
+ Returns a bool indicating whether it was installed to begin with.
+ """
+ serverPath = getServerFolderPath()
+ meilisearchPath = getMeilisearchExePath()
+ dumpsPath = os.path.join(serverPath, "dumps")
+ dataMsPath = os.path.join(serverPath, "data.ms")
+
+ paths = [meilisearchPath, dumpsPath, dataMsPath]
+
+ existing_paths = set()
+ non_existing_paths = set()
+ for path in paths:
+ if os.path.exists(path):
+ existing_paths.add(path)
+ else:
+ non_existing_paths.add(path)
+
+ if len(non_existing_paths) > 0:
+ # Clear the meilisearch binary
+ if meilisearchPath in existing_paths:
+ try:
+ os.remove(meilisearchPath)
+ except:
+ pass
+ existing_paths.remove(meilisearchPath)
+
+ await download_meilisearch()
+
+ # Clear the existing directories
+ for p in existing_paths:
+ shutil.rmtree(p, ignore_errors=True)
+
+ return False
+
+ return True
+
+
+meilisearch_process = None
+DEFAULT_MEILISEARCH_URL = "http://localhost:7700"
+meilisearch_url = DEFAULT_MEILISEARCH_URL
+
+
+def get_meilisearch_url():
+ return meilisearch_url
+
+
+async def check_meilisearch_running() -> bool:
+ """
+ Checks if MeiliSearch is running.
+ """
+
+ try:
+ async with Client(meilisearch_url) as client:
+ try:
+ resp = await client.health()
+ if resp.status != "available":
+ return False
+ return True
+ except Exception:
+ return False
+ except Exception:
+ return False
+
+
+async def poll_meilisearch_running(frequency: int = 0.1) -> bool:
+ """
+ Polls MeiliSearch to see if it is running.
+ """
+ while True:
+ if await check_meilisearch_running():
+ return True
+ await asyncio.sleep(frequency)
+
+
+async def start_meilisearch(url: Optional[str] = None):
+ """
+ Starts the MeiliSearch server, wait for it.
+ """
+ global meilisearch_process, meilisearch_url
+
+ if url is not None:
+ logger.debug("Using MeiliSearch at URL: " + url)
+ meilisearch_url = url
+ return
+
+ serverPath = getServerFolderPath()
+
+ # Check if MeiliSearch is installed, if not download
+ was_already_installed = await ensure_meilisearch_installed()
+
+ # Check if MeiliSearch is running
+ if not await check_meilisearch_running() or not was_already_installed:
+ logger.debug("Starting MeiliSearch...")
+ binary_name = "meilisearch" if os.name == "nt" else "./meilisearch"
+ meilisearch_process = subprocess.Popen(
+ [binary_name, "--no-analytics"],
+ cwd=serverPath,
+ stdout=subprocess.DEVNULL,
+ stderr=subprocess.STDOUT,
+ close_fds=True,
+ start_new_session=True,
+ shell=True,
+ )
+
+ logger.debug("Meilisearch started")
+
+
+def stop_meilisearch():
+ """
+ Stops the MeiliSearch server.
+ """
+ global meilisearch_process
+ if meilisearch_process is not None:
+ meilisearch_process.terminate()
+ meilisearch_process.wait()
+ meilisearch_process = None
+
+
+def kill_proc(port):
+ for proc in psutil.process_iter():
+ try:
+ for conns in proc.connections(kind="inet"):
+ if conns.laddr.port == port:
+ proc.send_signal(psutil.signal.SIGTERM) # or SIGKILL
+ except psutil.AccessDenied:
+ logger.warning(f"Failed to kill process on port {port} (access denied)")
+ return
+ except psutil.ZombieProcess:
+ logger.warning(f"Failed to kill process on port {port} (zombie process)")
+ return
+ except psutil.NoSuchProcess:
+ logger.warning(f"Failed to kill process on port {port} (no such process)")
+ return
+
+
+async def restart_meilisearch():
+ stop_meilisearch()
+ kill_proc(7700)
+ await start_meilisearch(url=meilisearch_url)
diff --git a/server/continuedev/server/session_manager.py b/server/continuedev/server/session_manager.py
new file mode 100644
index 00000000..f0080104
--- /dev/null
+++ b/server/continuedev/server/session_manager.py
@@ -0,0 +1,192 @@
+import json
+import os
+import traceback
+from typing import Any, Coroutine, Dict, Optional, Union
+from uuid import uuid4
+
+from fastapi import APIRouter, WebSocket
+from fastapi.websockets import WebSocketState
+
+from ..core.autopilot import Autopilot
+from ..core.config import ContinueConfig
+from ..core.main import FullState
+from ..libs.util.create_async_task import create_async_task
+from ..libs.util.logging import logger
+from ..libs.util.paths import (
+ getSessionFilePath,
+ getSessionsFolderPath,
+ getSessionsListFilePath,
+)
+from .ide_protocol import AbstractIdeProtocolServer
+
+router = APIRouter(prefix="/sessions", tags=["sessions"])
+
+
+class Session:
+ session_id: str
+ autopilot: Autopilot
+ # The GUI websocket for the session
+ ws: Union[WebSocket, None]
+
+ def __init__(self, session_id: str, autopilot: Autopilot):
+ self.session_id = session_id
+ self.autopilot = autopilot
+ self.ws = None
+
+
+class SessionManager:
+ sessions: Dict[str, Session] = {}
+ # Mapping of session_id to IDE, where the IDE is still alive
+ registered_ides: Dict[str, AbstractIdeProtocolServer] = {}
+
+ async def get_session(self, session_id: str) -> Session:
+ if session_id not in self.sessions:
+ # Check then whether it is persisted by listing all files in the sessions folder
+ # And only if the IDE is still alive
+ sessions_folder = getSessionsFolderPath()
+ session_files = os.listdir(sessions_folder)
+ if (
+ f"{session_id}.json" in session_files
+ and session_id in self.registered_ides
+ ):
+ if self.registered_ides[session_id].session_id is not None:
+ return await self.new_session(
+ self.registered_ides[session_id], session_id=session_id
+ )
+
+ raise KeyError("Session ID not recognized", session_id)
+ return self.sessions[session_id]
+
+ async def new_session(
+ self,
+ ide: AbstractIdeProtocolServer,
+ session_id: Optional[str] = None,
+ config: Optional[ContinueConfig] = None,
+ ) -> Session:
+ logger.debug(f"New session: {session_id}")
+
+ # Load the persisted state (not being used right now)
+ full_state = None
+ if session_id is not None and os.path.exists(getSessionFilePath(session_id)):
+ with open(getSessionFilePath(session_id), "r") as f:
+ full_state = FullState(**json.load(f))
+
+ # Register the session and ide (do this first so that the autopilot can access the session)
+ autopilot = Autopilot(ide=ide)
+ session_id = session_id or str(uuid4())
+ ide.session_id = session_id
+ session = Session(session_id=session_id, autopilot=autopilot)
+ self.sessions[session_id] = session
+ self.registered_ides[session_id] = ide
+
+ # Set up the autopilot to update the GUI
+ async def on_update(state: FullState):
+ await session_manager.send_ws_data(
+ session_id, "state_update", {"state": state.dict()}
+ )
+
+ autopilot.on_update(on_update)
+
+ # Start the autopilot (must be after session is added to sessions) and the policy
+ try:
+ await autopilot.start(full_state=full_state, config=config)
+ except Exception as e:
+ await ide.on_error(e)
+
+ def on_error(e: Exception) -> Coroutine:
+ err_msg = "\n".join(traceback.format_exception(e))
+ return ide.showMessage(f"Error in Continue server: {err_msg}")
+
+ create_async_task(autopilot.run_policy(), on_error)
+ return session
+
+ async def remove_session(self, session_id: str):
+ logger.debug(f"Removing session: {session_id}")
+ if session_id in self.sessions:
+ if (
+ session_id in self.registered_ides
+ and self.registered_ides[session_id] is not None
+ ):
+ ws_to_close = self.registered_ides[session_id].websocket
+ if (
+ ws_to_close is not None
+ and ws_to_close.client_state != WebSocketState.DISCONNECTED
+ ):
+ await self.sessions[session_id].autopilot.ide.websocket.close()
+
+ del self.sessions[session_id]
+
+ async def persist_session(self, session_id: str):
+ """Save the session's FullState as a json file"""
+ full_state = await self.sessions[session_id].autopilot.get_full_state()
+ if full_state.session_info is None:
+ return
+
+ with open(getSessionFilePath(session_id), "w") as f:
+ json.dump(full_state.dict(), f)
+
+ # Read and update the sessions list
+ with open(getSessionsListFilePath(), "r") as f:
+ try:
+ sessions_list = json.load(f)
+ except json.JSONDecodeError:
+ raise Exception(
+ f"It looks like there is a JSON formatting error in your sessions.json file ({getSessionsListFilePath()}). Please fix this before creating a new session."
+ )
+
+ session_ids = [s["session_id"] for s in sessions_list]
+ if session_id not in session_ids:
+ sessions_list.append(full_state.session_info.dict())
+
+ for session_info in sessions_list:
+ if "workspace_directory" not in session_info:
+ session_info["workspace_directory"] = ""
+
+ with open(getSessionsListFilePath(), "w") as f:
+ json.dump(sessions_list, f)
+
+ async def load_session(
+ self, old_session_id: str, new_session_id: Optional[str] = None
+ ) -> str:
+ """Load the session's FullState from a json file"""
+
+ # First persist the current state
+ await self.persist_session(old_session_id)
+
+ # Delete the old session, but keep the IDE
+ ide = self.registered_ides[old_session_id]
+ del self.registered_ides[old_session_id]
+
+ # Start the new session
+ new_session = await self.new_session(ide, session_id=new_session_id)
+ return new_session.session_id
+
+ def register_websocket(self, session_id: str, ws: WebSocket):
+ self.sessions[session_id].ws = ws
+ logger.debug(f"Registered websocket for session {session_id}")
+
+ async def send_ws_data(self, session_id: str, message_type: str, data: Any):
+ if session_id not in self.sessions:
+ logger.warning(f"Session {session_id} not found")
+ return
+ if self.sessions[session_id].ws is None:
+ return
+
+ await self.sessions[session_id].ws.send_json(
+ {"messageType": message_type, "data": data}
+ )
+
+
+session_manager = SessionManager()
+
+
+@router.get("/list")
+async def list_sessions():
+ """List all sessions"""
+ sessions_list_file = getSessionsListFilePath()
+ if not os.path.exists(sessions_list_file):
+ print("Returning empty sessions list")
+ return []
+ sessions = json.load(open(sessions_list_file, "r"))
+ print("Returning sessions list: ", sessions)
+ return sessions
diff --git a/server/dev_requirements.txt b/server/dev_requirements.txt
new file mode 100644
index 00000000..2fa7631b
--- /dev/null
+++ b/server/dev_requirements.txt
@@ -0,0 +1,2 @@
+pytest==7.4.1
+pytest-asyncio==0.21.1 \ No newline at end of file
diff --git a/server/install-dependencies.sh b/server/install-dependencies.sh
new file mode 100755
index 00000000..8f1b5d27
--- /dev/null
+++ b/server/install-dependencies.sh
@@ -0,0 +1,16 @@
+
+#!/bin/bash
+
+# Check if Poetry is installed
+if ! command -v poetry &> /dev/null
+then
+ echo "Poetry not found, installing..."
+ curl -sSL https://install.python-poetry.org | python3 -
+fi
+
+# Install or update dependencies & create .venv if it doesn't exist
+echo "Installing dependencies..."
+poetry install
+
+echo "Running type generation..."
+poetry run typegen
diff --git a/server/main.py b/server/main.py
new file mode 100644
index 00000000..c40f9b96
--- /dev/null
+++ b/server/main.py
@@ -0,0 +1,5 @@
+from .continuedev.server.main import run_server
+
+
+def main():
+ run_server()
diff --git a/server/notes.md b/server/notes.md
new file mode 100644
index 00000000..469d4950
--- /dev/null
+++ b/server/notes.md
@@ -0,0 +1,101 @@
+### List of concrete things that will be built
+
+- Interface with language servers
+- Central place to initiate language model suggestions
+- Abstracted set of tools around language servers and other complicated sources of information
+- Way to keep track of reversible/replayable series of human/LLM changes to code, at better granularity that git
+- A library of prompts and tools to combine them to yield good examples
+- A basic LLM agnostic prompting interface
+- The server or something that can be integrated easily into an extension for any IDE
+- A CLI tool that can be called to make a one-off change on some codebase
+- A default interface that can run at localhost, but which we will also create a desktop application version of
+- Tools to parse LLM output to get file outputs
+- Parse and unparse tracebacks in any language
+- FileEdit/FileDiff creation from LLM output where you don't necessarily know the position of the lines
+- Test generation and tools to use
+- Tools/prompts for summarizing groups of file edits
+- Need to be able to remove/add files. Is there any other reversible action you should be considering? Does git track anything else? Renaming files, or adding/removing folders.
+
+There should be different levels of abstraction at which you can work with these concepts. One of them should be as simple as
+
+- You write a formatted string with FormattedStringPrompter
+- Specify a source for each of the strings, by a simple strongly typed enum, like traceback or something else
+ maybe not realistic or useful
+
+---
+
+- One big thing that happens as you're fixing errors is that you encounter a fork in the road. The language model should be able to present you with both options, and you just click to decide.
+- What I'm doing right now: I write a bunch of code without running it, then have to solve a bunch of errors at once, but small obvious ones. We can do this all automatically.
+
+---
+
+### Current limitations:
+
+- We are always specifying how to use the tools directly instead of letting the AI choose how to use them on its own. You should expand to allow this.
+- We want the history of both user and AI changes to be reflected as a single agent. So you need to watch for user updates to the filesystem. See https://pythonhosted.org/watchdog/quickstart.html#quickstart
+- Language servers are a big deal, you've not done anything about that quite yet.
+ - class to manage all of them, and some way to configure which to run.
+ - call them inside actions? Probably not. Does language server ever make changes? Maybe you just create a python client
+- You want this library to play well with IDEs, which means it should see file changes even before they are saved. What you're building might look more like a language server than anything else then. Just an extended language server. Something else that points at this is your need for watching the filesystem for changes. This is precisely what the LSP does.
+- Prompts don't always transfer well between models. So a prompt should actually have different versions for each model, instead of being just a single string.
+- Kind of weird syntax for creating your own actions, validators, etc... USE ANNOTATIONS
+- Stuff should be serializable
+- We also want to be able to answer questions, not just generate file edits.
+
+### Plugins
+
+Plugin is a more general word, which subsumes validator plugins, tool plugins, what else?
+
+### Continue as Extended Language Server
+
+- Language server capable of directly editing the filesystem and running commands.
+- Really just needs to write to files, or suggest file edits. But actually in an ideal future it can do more, like press every button in the IDE
+
+The question isn't now "do we want to use it," but "is it the actual thing we are building?" I've realized that we need 1) to watch files for changes and make suggestions based off of these, 2) need to be language agnostic, 3) need to plug in to any IDE ideally. All of these things are the bread and butter of LSP. It seems like what we might actually be building is a headless LSP client, or an LSP server with a backdoor, or an LSP server with more endpoints. Trying to figure out where it best fits in.
+
+- We're not totally focusing on responding to small updates, so it might be okay to later build our own endpoint to watch for non-save updates to files.
+- There aren't so many things that need to be done in their own language that aren't already done in LSP, are there?
+
+Overall, I think you should just think of this framework as a way of giving tools to language models and then putting them in a loop to edit, validate, run code. Tools are the plugins, and so you shouldn't have to build all of them, and they should be written in any language.
+
+The LSP Tool is just another tool. It will be common, so you want it built-in, but it's just another tool.
+The thing about LSP is that there's a lot of state going on, and it needs to be running the whole time.
+An edit in VS Code before saving can just be a hook to watch for, can replace the WatchDog thing.
+
+A cool feature of what we're doing is that we might summarize the changes made by a human, such that they can separate their work into describable and reversible parts.
+
+In essence, our framework makes it easy for people to match up problems to prompts. So people can share their solutions of the form "whenever you see this error, you can run this prompt with these tools, and it will fix it automatically".
+
+I'm finding that the validators are pretty tightly tied to the actions. Should this be reflected in a single class for both?
+
+---
+
+The final simplification you could make: policies are actions. So the very first action that is always called is actually a policy, but it might be instantiated with a specific action first.
+
+Don't do this right now. But you might want to, and make it DAGs all the way down.
+
+Other consideration: a good amount of work could go into defining the spaces of observations.
+
+"""
+What do they do that's interesting:
+
+- agent has get_allowed_tools() method
+- They have analog of Prompter with PromptTemplate
+- they pass an LLM object to instantiate the Chain object
+
+What doesn't LangChain do right?
+
+They don't have stopping as an action
+Not reversible
+"""
+
+Runners could also be pluginable. They are like the middleware for actions.
+
+- Ask for permission
+- Keep track of substeps in DAG
+- Keep locks on resources, have steps declare the resources they will use / want to lock up
+
+Policies should be generators! This makes it much more natural to group steps. Also means you can probably just define a decorator to a generator function that will turn it into a full policy class.
+This is just syntactic sugar though.
+
+can you also make an annotation for actions, so you just have to write the run function? And then automatically add it to the pluggy library somehow.
diff --git a/server/poetry.lock b/server/poetry.lock
new file mode 100644
index 00000000..23ff9094
--- /dev/null
+++ b/server/poetry.lock
@@ -0,0 +1,2414 @@
+# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand.
+
+[[package]]
+name = "aiofiles"
+version = "23.2.1"
+description = "File support for asyncio."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "aiofiles-23.2.1-py3-none-any.whl", hash = "sha256:19297512c647d4b27a2cf7c34caa7e405c0d60b5560618a29a9fe027b18b0107"},
+ {file = "aiofiles-23.2.1.tar.gz", hash = "sha256:84ec2218d8419404abcb9f0c02df3f34c6e0a68ed41072acfb1cef5cbc29051a"},
+]
+
+[[package]]
+name = "aiohttp"
+version = "3.8.5"
+description = "Async http client/server framework (asyncio)"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:a94159871304770da4dd371f4291b20cac04e8c94f11bdea1c3478e557fbe0d8"},
+ {file = "aiohttp-3.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:13bf85afc99ce6f9ee3567b04501f18f9f8dbbb2ea11ed1a2e079670403a7c84"},
+ {file = "aiohttp-3.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:2ce2ac5708501afc4847221a521f7e4b245abf5178cf5ddae9d5b3856ddb2f3a"},
+ {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:96943e5dcc37a6529d18766597c491798b7eb7a61d48878611298afc1fca946c"},
+ {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2ad5c3c4590bb3cc28b4382f031f3783f25ec223557124c68754a2231d989e2b"},
+ {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0c413c633d0512df4dc7fd2373ec06cc6a815b7b6d6c2f208ada7e9e93a5061d"},
+ {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:df72ac063b97837a80d80dec8d54c241af059cc9bb42c4de68bd5b61ceb37caa"},
+ {file = "aiohttp-3.8.5-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c48c5c0271149cfe467c0ff8eb941279fd6e3f65c9a388c984e0e6cf57538e14"},
+ {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:368a42363c4d70ab52c2c6420a57f190ed3dfaca6a1b19afda8165ee16416a82"},
+ {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7607ec3ce4993464368505888af5beb446845a014bc676d349efec0e05085905"},
+ {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:0d21c684808288a98914e5aaf2a7c6a3179d4df11d249799c32d1808e79503b5"},
+ {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:312fcfbacc7880a8da0ae8b6abc6cc7d752e9caa0051a53d217a650b25e9a691"},
+ {file = "aiohttp-3.8.5-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:ad093e823df03bb3fd37e7dec9d4670c34f9e24aeace76808fc20a507cace825"},
+ {file = "aiohttp-3.8.5-cp310-cp310-win32.whl", hash = "sha256:33279701c04351a2914e1100b62b2a7fdb9a25995c4a104259f9a5ead7ed4802"},
+ {file = "aiohttp-3.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:6e4a280e4b975a2e7745573e3fc9c9ba0d1194a3738ce1cbaa80626cc9b4f4df"},
+ {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ae871a964e1987a943d83d6709d20ec6103ca1eaf52f7e0d36ee1b5bebb8b9b9"},
+ {file = "aiohttp-3.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:461908b2578955045efde733719d62f2b649c404189a09a632d245b445c9c975"},
+ {file = "aiohttp-3.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:72a860c215e26192379f57cae5ab12b168b75db8271f111019509a1196dfc780"},
+ {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc14be025665dba6202b6a71cfcdb53210cc498e50068bc088076624471f8bb9"},
+ {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8af740fc2711ad85f1a5c034a435782fbd5b5f8314c9a3ef071424a8158d7f6b"},
+ {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:841cd8233cbd2111a0ef0a522ce016357c5e3aff8a8ce92bcfa14cef890d698f"},
+ {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5ed1c46fb119f1b59304b5ec89f834f07124cd23ae5b74288e364477641060ff"},
+ {file = "aiohttp-3.8.5-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:84f8ae3e09a34f35c18fa57f015cc394bd1389bce02503fb30c394d04ee6b938"},
+ {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:62360cb771707cb70a6fd114b9871d20d7dd2163a0feafe43fd115cfe4fe845e"},
+ {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:23fb25a9f0a1ca1f24c0a371523546366bb642397c94ab45ad3aedf2941cec6a"},
+ {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:b0ba0d15164eae3d878260d4c4df859bbdc6466e9e6689c344a13334f988bb53"},
+ {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5d20003b635fc6ae3f96d7260281dfaf1894fc3aa24d1888a9b2628e97c241e5"},
+ {file = "aiohttp-3.8.5-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:0175d745d9e85c40dcc51c8f88c74bfbaef9e7afeeeb9d03c37977270303064c"},
+ {file = "aiohttp-3.8.5-cp311-cp311-win32.whl", hash = "sha256:2e1b1e51b0774408f091d268648e3d57f7260c1682e7d3a63cb00d22d71bb945"},
+ {file = "aiohttp-3.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:043d2299f6dfdc92f0ac5e995dfc56668e1587cea7f9aa9d8a78a1b6554e5755"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:cae533195e8122584ec87531d6df000ad07737eaa3c81209e85c928854d2195c"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4f21e83f355643c345177a5d1d8079f9f28b5133bcd154193b799d380331d5d3"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a7a75ef35f2df54ad55dbf4b73fe1da96f370e51b10c91f08b19603c64004acc"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e2e9839e14dd5308ee773c97115f1e0a1cb1d75cbeeee9f33824fa5144c7634"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c44e65da1de4403d0576473e2344828ef9c4c6244d65cf4b75549bb46d40b8dd"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:78d847e4cde6ecc19125ccbc9bfac4a7ab37c234dd88fbb3c5c524e8e14da543"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:c7a815258e5895d8900aec4454f38dca9aed71085f227537208057853f9d13f2"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:8b929b9bd7cd7c3939f8bcfffa92fae7480bd1aa425279d51a89327d600c704d"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:5db3a5b833764280ed7618393832e0853e40f3d3e9aa128ac0ba0f8278d08649"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:a0215ce6041d501f3155dc219712bc41252d0ab76474615b9700d63d4d9292af"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:fd1ed388ea7fbed22c4968dd64bab0198de60750a25fe8c0c9d4bef5abe13824"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-win32.whl", hash = "sha256:6e6783bcc45f397fdebc118d772103d751b54cddf5b60fbcc958382d7dd64f3e"},
+ {file = "aiohttp-3.8.5-cp36-cp36m-win_amd64.whl", hash = "sha256:b5411d82cddd212644cf9360879eb5080f0d5f7d809d03262c50dad02f01421a"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:01d4c0c874aa4ddfb8098e85d10b5e875a70adc63db91f1ae65a4b04d3344cda"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e5980a746d547a6ba173fd5ee85ce9077e72d118758db05d229044b469d9029a"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2a482e6da906d5e6e653be079b29bc173a48e381600161c9932d89dfae5942ef"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:80bd372b8d0715c66c974cf57fe363621a02f359f1ec81cba97366948c7fc873"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c1161b345c0a444ebcf46bf0a740ba5dcf50612fd3d0528883fdc0eff578006a"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd56db019015b6acfaaf92e1ac40eb8434847d9bf88b4be4efe5bfd260aee692"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:153c2549f6c004d2754cc60603d4668899c9895b8a89397444a9c4efa282aaf4"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:4a01951fabc4ce26ab791da5f3f24dca6d9a6f24121746eb19756416ff2d881b"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:bfb9162dcf01f615462b995a516ba03e769de0789de1cadc0f916265c257e5d8"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:7dde0009408969a43b04c16cbbe252c4f5ef4574ac226bc8815cd7342d2028b6"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:4149d34c32f9638f38f544b3977a4c24052042affa895352d3636fa8bffd030a"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-win32.whl", hash = "sha256:68c5a82c8779bdfc6367c967a4a1b2aa52cd3595388bf5961a62158ee8a59e22"},
+ {file = "aiohttp-3.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:2cf57fb50be5f52bda004b8893e63b48530ed9f0d6c96c84620dc92fe3cd9b9d"},
+ {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca4bf3734c541dc4f374ad6010a68ff6c6748f00451707f39857f429ca36ced"},
+ {file = "aiohttp-3.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1274477e4c71ce8cfe6c1ec2f806d57c015ebf84d83373676036e256bc55d690"},
+ {file = "aiohttp-3.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:28c543e54710d6158fc6f439296c7865b29e0b616629767e685a7185fab4a6b9"},
+ {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:910bec0c49637d213f5d9877105d26e0c4a4de2f8b1b29405ff37e9fc0ad52b8"},
+ {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5443910d662db951b2e58eb70b0fbe6b6e2ae613477129a5805d0b66c54b6cb7"},
+ {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2e460be6978fc24e3df83193dc0cc4de46c9909ed92dd47d349a452ef49325b7"},
+ {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:fb1558def481d84f03b45888473fc5a1f35747b5f334ef4e7a571bc0dfcb11f8"},
+ {file = "aiohttp-3.8.5-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34dd0c107799dcbbf7d48b53be761a013c0adf5571bf50c4ecad5643fe9cfcd0"},
+ {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:aa1990247f02a54185dc0dff92a6904521172a22664c863a03ff64c42f9b5410"},
+ {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:0e584a10f204a617d71d359fe383406305a4b595b333721fa50b867b4a0a1548"},
+ {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:a3cf433f127efa43fee6b90ea4c6edf6c4a17109d1d037d1a52abec84d8f2e42"},
+ {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:c11f5b099adafb18e65c2c997d57108b5bbeaa9eeee64a84302c0978b1ec948b"},
+ {file = "aiohttp-3.8.5-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:84de26ddf621d7ac4c975dbea4c945860e08cccde492269db4e1538a6a6f3c35"},
+ {file = "aiohttp-3.8.5-cp38-cp38-win32.whl", hash = "sha256:ab88bafedc57dd0aab55fa728ea10c1911f7e4d8b43e1d838a1739f33712921c"},
+ {file = "aiohttp-3.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:5798a9aad1879f626589f3df0f8b79b3608a92e9beab10e5fda02c8a2c60db2e"},
+ {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:a6ce61195c6a19c785df04e71a4537e29eaa2c50fe745b732aa937c0c77169f3"},
+ {file = "aiohttp-3.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:773dd01706d4db536335fcfae6ea2440a70ceb03dd3e7378f3e815b03c97ab51"},
+ {file = "aiohttp-3.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f83a552443a526ea38d064588613aca983d0ee0038801bc93c0c916428310c28"},
+ {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1f7372f7341fcc16f57b2caded43e81ddd18df53320b6f9f042acad41f8e049a"},
+ {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea353162f249c8097ea63c2169dd1aa55de1e8fecbe63412a9bc50816e87b761"},
+ {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e5d47ae48db0b2dcf70bc8a3bc72b3de86e2a590fc299fdbbb15af320d2659de"},
+ {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d827176898a2b0b09694fbd1088c7a31836d1a505c243811c87ae53a3f6273c1"},
+ {file = "aiohttp-3.8.5-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3562b06567c06439d8b447037bb655ef69786c590b1de86c7ab81efe1c9c15d8"},
+ {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:4e874cbf8caf8959d2adf572a78bba17cb0e9d7e51bb83d86a3697b686a0ab4d"},
+ {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:6809a00deaf3810e38c628e9a33271892f815b853605a936e2e9e5129762356c"},
+ {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:33776e945d89b29251b33a7e7d006ce86447b2cfd66db5e5ded4e5cd0340585c"},
+ {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:eaeed7abfb5d64c539e2db173f63631455f1196c37d9d8d873fc316470dfbacd"},
+ {file = "aiohttp-3.8.5-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:e91d635961bec2d8f19dfeb41a539eb94bd073f075ca6dae6c8dc0ee89ad6f91"},
+ {file = "aiohttp-3.8.5-cp39-cp39-win32.whl", hash = "sha256:00ad4b6f185ec67f3e6562e8a1d2b69660be43070bd0ef6fcec5211154c7df67"},
+ {file = "aiohttp-3.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:c0a9034379a37ae42dea7ac1e048352d96286626251862e448933c0f59cbd79c"},
+ {file = "aiohttp-3.8.5.tar.gz", hash = "sha256:b9552ec52cc147dbf1944ac7ac98af7602e51ea2dcd076ed194ca3c0d1c7d0bc"},
+]
+
+[package.dependencies]
+aiosignal = ">=1.1.2"
+async-timeout = ">=4.0.0a3,<5.0"
+attrs = ">=17.3.0"
+charset-normalizer = ">=2.0,<4.0"
+frozenlist = ">=1.1.1"
+multidict = ">=4.5,<7.0"
+yarl = ">=1.0,<2.0"
+
+[package.extras]
+speedups = ["Brotli", "aiodns", "cchardet"]
+
+[[package]]
+name = "aiosignal"
+version = "1.3.1"
+description = "aiosignal: a list of registered asynchronous callbacks"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "aiosignal-1.3.1-py3-none-any.whl", hash = "sha256:f8376fb07dd1e86a584e4fcdec80b36b7f81aac666ebc724e2c090300dd83b17"},
+ {file = "aiosignal-1.3.1.tar.gz", hash = "sha256:54cd96e15e1649b75d6c87526a6ff0b6c1b0dd3459f43d9ca11d48c339b68cfc"},
+]
+
+[package.dependencies]
+frozenlist = ">=1.1.0"
+
+[[package]]
+name = "anthropic"
+version = "0.3.11"
+description = "Client library for the anthropic API"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "anthropic-0.3.11-py3-none-any.whl", hash = "sha256:5c81105cd9ee7388bff3fdb739aaddedc83bbae9b95d51c2d50c13b1ad106138"},
+ {file = "anthropic-0.3.11.tar.gz", hash = "sha256:2e0fa5351c9b368cbed0bbd7217deaa9409b82b56afaf244e2196e99eb4fe20e"},
+]
+
+[package.dependencies]
+anyio = ">=3.5.0,<4"
+distro = ">=1.7.0,<2"
+httpx = ">=0.23.0,<1"
+pydantic = ">=1.9.0,<3"
+tokenizers = ">=0.13.0"
+typing-extensions = ">=4.5,<5"
+
+[[package]]
+name = "anyio"
+version = "3.7.1"
+description = "High level compatibility layer for multiple asynchronous event loop implementations"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "anyio-3.7.1-py3-none-any.whl", hash = "sha256:91dee416e570e92c64041bd18b900d1d6fa78dff7048769ce5ac5ddad004fbb5"},
+ {file = "anyio-3.7.1.tar.gz", hash = "sha256:44a3c9aba0f5defa43261a8b3efb97891f2bd7d804e0e1f56419befa1adfc780"},
+]
+
+[package.dependencies]
+exceptiongroup = {version = "*", markers = "python_version < \"3.11\""}
+idna = ">=2.8"
+sniffio = ">=1.1"
+
+[package.extras]
+doc = ["Sphinx", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme (>=1.2.2)", "sphinxcontrib-jquery"]
+test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"]
+trio = ["trio (<0.22)"]
+
+[[package]]
+name = "appdirs"
+version = "1.4.4"
+description = "A small Python module for determining appropriate platform-specific dirs, e.g. a \"user data dir\"."
+optional = false
+python-versions = "*"
+files = [
+ {file = "appdirs-1.4.4-py2.py3-none-any.whl", hash = "sha256:a841dacd6b99318a741b166adb07e19ee71a274450e68237b4650ca1055ab128"},
+ {file = "appdirs-1.4.4.tar.gz", hash = "sha256:7d5d0167b2b1ba821647616af46a749d1c653740dd0d2415100fe26e27afdf41"},
+]
+
+[[package]]
+name = "async-timeout"
+version = "4.0.3"
+description = "Timeout context manager for asyncio programs"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "async-timeout-4.0.3.tar.gz", hash = "sha256:4640d96be84d82d02ed59ea2b7105a0f7b33abe8703703cd0ab0bf87c427522f"},
+ {file = "async_timeout-4.0.3-py3-none-any.whl", hash = "sha256:7405140ff1230c310e51dc27b3145b9092d659ce68ff733fb0cefe3ee42be028"},
+]
+
+[[package]]
+name = "attrs"
+version = "23.1.0"
+description = "Classes Without Boilerplate"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"},
+ {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"},
+]
+
+[package.extras]
+cov = ["attrs[tests]", "coverage[toml] (>=5.3)"]
+dev = ["attrs[docs,tests]", "pre-commit"]
+docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"]
+tests = ["attrs[tests-no-zope]", "zope-interface"]
+tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"]
+
+[[package]]
+name = "backoff"
+version = "2.2.1"
+description = "Function decoration for backoff and retry"
+optional = false
+python-versions = ">=3.7,<4.0"
+files = [
+ {file = "backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8"},
+ {file = "backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba"},
+]
+
+[[package]]
+name = "baron"
+version = "0.10.1"
+description = "Full Syntax Tree for python to make writing refactoring code a realist task"
+optional = false
+python-versions = "*"
+files = [
+ {file = "baron-0.10.1-py2.py3-none-any.whl", hash = "sha256:befb33f4b9e832c7cd1e3cf0eafa6dd3cb6ed4cb2544245147c019936f4e0a8a"},
+ {file = "baron-0.10.1.tar.gz", hash = "sha256:af822ad44d4eb425c8516df4239ac4fdba9fdb398ef77e4924cd7c9b4045bc2f"},
+]
+
+[package.dependencies]
+rply = "*"
+
+[[package]]
+name = "beautifulsoup4"
+version = "4.12.2"
+description = "Screen-scraping library"
+optional = false
+python-versions = ">=3.6.0"
+files = [
+ {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"},
+ {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"},
+]
+
+[package.dependencies]
+soupsieve = ">1.2"
+
+[package.extras]
+html5lib = ["html5lib"]
+lxml = ["lxml"]
+
+[[package]]
+name = "boltons"
+version = "23.0.0"
+description = "When they're not builtins, they're boltons."
+optional = false
+python-versions = "*"
+files = [
+ {file = "boltons-23.0.0-py2.py3-none-any.whl", hash = "sha256:f716a1b57698a5b19062f3146cb5ce3549904028a2f267c2c0cf584eea3ad75b"},
+ {file = "boltons-23.0.0.tar.gz", hash = "sha256:8c50a71829525835ca3c849c7ed2511610c972b4dddfcd41a4a5447222beb4b0"},
+]
+
+[[package]]
+name = "bs4"
+version = "0.0.1"
+description = "Dummy package for Beautiful Soup"
+optional = false
+python-versions = "*"
+files = [
+ {file = "bs4-0.0.1.tar.gz", hash = "sha256:36ecea1fd7cc5c0c6e4a1ff075df26d50da647b75376626cc186e2212886dd3a"},
+]
+
+[package.dependencies]
+beautifulsoup4 = "*"
+
+[[package]]
+name = "camel-converter"
+version = "3.0.2"
+description = "Converts a string from snake case to camel case or camel case to snake case"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "camel_converter-3.0.2-py3-none-any.whl", hash = "sha256:88e5d91be5b2dff9c0748ba515774c3421088922d9e77c39f8742eb41cb7db88"},
+ {file = "camel_converter-3.0.2.tar.gz", hash = "sha256:3b3d076e824ae979b271b4d497c90514c2b218811f76b0c368fb69da2556fe07"},
+]
+
+[package.extras]
+pydantic = ["pydantic (>=1.8.2)"]
+
+[[package]]
+name = "certifi"
+version = "2023.7.22"
+description = "Python package for providing Mozilla's CA Bundle."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "certifi-2023.7.22-py3-none-any.whl", hash = "sha256:92d6037539857d8206b8f6ae472e8b77db8058fec5937a1ef3f54304089edbb9"},
+ {file = "certifi-2023.7.22.tar.gz", hash = "sha256:539cc1d13202e33ca466e88b2807e29f4c13049d6d87031a3c110744495cb082"},
+]
+
+[[package]]
+name = "cffi"
+version = "1.15.1"
+description = "Foreign Function Interface for Python calling C code."
+optional = false
+python-versions = "*"
+files = [
+ {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"},
+ {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"},
+ {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"},
+ {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"},
+ {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"},
+ {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"},
+ {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"},
+ {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"},
+ {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"},
+ {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"},
+ {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"},
+ {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"},
+ {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"},
+ {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"},
+ {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"},
+ {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"},
+ {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"},
+ {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"},
+ {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"},
+ {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"},
+ {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"},
+ {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"},
+ {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"},
+ {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"},
+ {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"},
+ {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"},
+ {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"},
+ {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"},
+ {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"},
+ {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"},
+ {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"},
+ {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"},
+ {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"},
+]
+
+[package.dependencies]
+pycparser = "*"
+
+[[package]]
+name = "charset-normalizer"
+version = "3.2.0"
+description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet."
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "charset-normalizer-3.2.0.tar.gz", hash = "sha256:3bb3d25a8e6c0aedd251753a79ae98a093c7e7b471faa3aa9a93a81431987ace"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b87549028f680ca955556e3bd57013ab47474c3124dc069faa0b6545b6c9710"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7c70087bfee18a42b4040bb9ec1ca15a08242cf5867c58726530bdf3945672ed"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a103b3a7069b62f5d4890ae1b8f0597618f628b286b03d4bc9195230b154bfa9"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:94aea8eff76ee6d1cdacb07dd2123a68283cb5569e0250feab1240058f53b623"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:db901e2ac34c931d73054d9797383d0f8009991e723dab15109740a63e7f902a"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b0dac0ff919ba34d4df1b6131f59ce95b08b9065233446be7e459f95554c0dc8"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:193cbc708ea3aca45e7221ae58f0fd63f933753a9bfb498a3b474878f12caaad"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:09393e1b2a9461950b1c9a45d5fd251dc7c6f228acab64da1c9c0165d9c7765c"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:baacc6aee0b2ef6f3d308e197b5d7a81c0e70b06beae1f1fcacffdbd124fe0e3"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:bf420121d4c8dce6b889f0e8e4ec0ca34b7f40186203f06a946fa0276ba54029"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:c04a46716adde8d927adb9457bbe39cf473e1e2c2f5d0a16ceb837e5d841ad4f"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:aaf63899c94de41fe3cf934601b0f7ccb6b428c6e4eeb80da72c58eab077b19a"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:d62e51710986674142526ab9f78663ca2b0726066ae26b78b22e0f5e571238dd"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-win32.whl", hash = "sha256:04e57ab9fbf9607b77f7d057974694b4f6b142da9ed4a199859d9d4d5c63fe96"},
+ {file = "charset_normalizer-3.2.0-cp310-cp310-win_amd64.whl", hash = "sha256:48021783bdf96e3d6de03a6e39a1171ed5bd7e8bb93fc84cc649d11490f87cea"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:4957669ef390f0e6719db3613ab3a7631e68424604a7b448f079bee145da6e09"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:46fb8c61d794b78ec7134a715a3e564aafc8f6b5e338417cb19fe9f57a5a9bf2"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f779d3ad205f108d14e99bb3859aa7dd8e9c68874617c72354d7ecaec2a054ac"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f25c229a6ba38a35ae6e25ca1264621cc25d4d38dca2942a7fce0b67a4efe918"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:2efb1bd13885392adfda4614c33d3b68dee4921fd0ac1d3988f8cbb7d589e72a"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:1f30b48dd7fa1474554b0b0f3fdfdd4c13b5c737a3c6284d3cdc424ec0ffff3a"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:246de67b99b6851627d945db38147d1b209a899311b1305dd84916f2b88526c6"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9bd9b3b31adcb054116447ea22caa61a285d92e94d710aa5ec97992ff5eb7cf3"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:8c2f5e83493748286002f9369f3e6607c565a6a90425a3a1fef5ae32a36d749d"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:3170c9399da12c9dc66366e9d14da8bf7147e1e9d9ea566067bbce7bb74bd9c2"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7a4826ad2bd6b07ca615c74ab91f32f6c96d08f6fcc3902ceeedaec8cdc3bcd6"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:3b1613dd5aee995ec6d4c69f00378bbd07614702a315a2cf6c1d21461fe17c23"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:9e608aafdb55eb9f255034709e20d5a83b6d60c054df0802fa9c9883d0a937aa"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-win32.whl", hash = "sha256:f2a1d0fd4242bd8643ce6f98927cf9c04540af6efa92323e9d3124f57727bfc1"},
+ {file = "charset_normalizer-3.2.0-cp311-cp311-win_amd64.whl", hash = "sha256:681eb3d7e02e3c3655d1b16059fbfb605ac464c834a0c629048a30fad2b27489"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c57921cda3a80d0f2b8aec7e25c8aa14479ea92b5b51b6876d975d925a2ea346"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41b25eaa7d15909cf3ac4c96088c1f266a9a93ec44f87f1d13d4a0e86c81b982"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f058f6963fd82eb143c692cecdc89e075fa0828db2e5b291070485390b2f1c9c"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a7647ebdfb9682b7bb97e2a5e7cb6ae735b1c25008a70b906aecca294ee96cf4"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eef9df1eefada2c09a5e7a40991b9fc6ac6ef20b1372abd48d2794a316dc0449"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e03b8895a6990c9ab2cdcd0f2fe44088ca1c65ae592b8f795c3294af00a461c3"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:ee4006268ed33370957f55bf2e6f4d263eaf4dc3cfc473d1d90baff6ed36ce4a"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c4983bf937209c57240cff65906b18bb35e64ae872da6a0db937d7b4af845dd7"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:3bb7fda7260735efe66d5107fb7e6af6a7c04c7fce9b2514e04b7a74b06bf5dd"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:72814c01533f51d68702802d74f77ea026b5ec52793c791e2da806a3844a46c3"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:70c610f6cbe4b9fce272c407dd9d07e33e6bf7b4aa1b7ffb6f6ded8e634e3592"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-win32.whl", hash = "sha256:a401b4598e5d3f4a9a811f3daf42ee2291790c7f9d74b18d75d6e21dda98a1a1"},
+ {file = "charset_normalizer-3.2.0-cp37-cp37m-win_amd64.whl", hash = "sha256:c0b21078a4b56965e2b12f247467b234734491897e99c1d51cee628da9786959"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:95eb302ff792e12aba9a8b8f8474ab229a83c103d74a750ec0bd1c1eea32e669"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1a100c6d595a7f316f1b6f01d20815d916e75ff98c27a01ae817439ea7726329"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:6339d047dab2780cc6220f46306628e04d9750f02f983ddb37439ca47ced7149"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e4b749b9cc6ee664a3300bb3a273c1ca8068c46be705b6c31cf5d276f8628a94"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a38856a971c602f98472050165cea2cdc97709240373041b69030be15047691f"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:f87f746ee241d30d6ed93969de31e5ffd09a2961a051e60ae6bddde9ec3583aa"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89f1b185a01fe560bc8ae5f619e924407efca2191b56ce749ec84982fc59a32a"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e1c8a2f4c69e08e89632defbfabec2feb8a8d99edc9f89ce33c4b9e36ab63037"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:2f4ac36d8e2b4cc1aa71df3dd84ff8efbe3bfb97ac41242fbcfc053c67434f46"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:a386ebe437176aab38c041de1260cd3ea459c6ce5263594399880bbc398225b2"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:ccd16eb18a849fd8dcb23e23380e2f0a354e8daa0c984b8a732d9cfaba3a776d"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:e6a5bf2cba5ae1bb80b154ed68a3cfa2fa00fde979a7f50d6598d3e17d9ac20c"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:45de3f87179c1823e6d9e32156fb14c1927fcc9aba21433f088fdfb555b77c10"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-win32.whl", hash = "sha256:1000fba1057b92a65daec275aec30586c3de2401ccdcd41f8a5c1e2c87078706"},
+ {file = "charset_normalizer-3.2.0-cp38-cp38-win_amd64.whl", hash = "sha256:8b2c760cfc7042b27ebdb4a43a4453bd829a5742503599144d54a032c5dc7e9e"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:855eafa5d5a2034b4621c74925d89c5efef61418570e5ef9b37717d9c796419c"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:203f0c8871d5a7987be20c72442488a0b8cfd0f43b7973771640fc593f56321f"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e857a2232ba53ae940d3456f7533ce6ca98b81917d47adc3c7fd55dad8fab858"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5e86d77b090dbddbe78867a0275cb4df08ea195e660f1f7f13435a4649e954e5"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c4fb39a81950ec280984b3a44f5bd12819953dc5fa3a7e6fa7a80db5ee853952"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2dee8e57f052ef5353cf608e0b4c871aee320dd1b87d351c28764fc0ca55f9f4"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8700f06d0ce6f128de3ccdbc1acaea1ee264d2caa9ca05daaf492fde7c2a7200"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1920d4ff15ce893210c1f0c0e9d19bfbecb7983c76b33f046c13a8ffbd570252"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c1c76a1743432b4b60ab3358c937a3fe1341c828ae6194108a94c69028247f22"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:f7560358a6811e52e9c4d142d497f1a6e10103d3a6881f18d04dbce3729c0e2c"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:c8063cf17b19661471ecbdb3df1c84f24ad2e389e326ccaf89e3fb2484d8dd7e"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:cd6dbe0238f7743d0efe563ab46294f54f9bc8f4b9bcf57c3c666cc5bc9d1299"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:1249cbbf3d3b04902ff081ffbb33ce3377fa6e4c7356f759f3cd076cc138d020"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-win32.whl", hash = "sha256:6c409c0deba34f147f77efaa67b8e4bb83d2f11c8806405f76397ae5b8c0d1c9"},
+ {file = "charset_normalizer-3.2.0-cp39-cp39-win_amd64.whl", hash = "sha256:7095f6fbfaa55defb6b733cfeb14efaae7a29f0b59d8cf213be4e7ca0b857b80"},
+ {file = "charset_normalizer-3.2.0-py3-none-any.whl", hash = "sha256:8e098148dd37b4ce3baca71fb394c81dc5d9c7728c95df695d2dca218edf40e6"},
+]
+
+[[package]]
+name = "chevron"
+version = "0.14.0"
+description = "Mustache templating language renderer"
+optional = false
+python-versions = "*"
+files = [
+ {file = "chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443"},
+ {file = "chevron-0.14.0.tar.gz", hash = "sha256:87613aafdf6d77b6a90ff073165a61ae5086e21ad49057aa0e53681601800ebf"},
+]
+
+[[package]]
+name = "click"
+version = "8.1.7"
+description = "Composable command line interface toolkit"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "click-8.1.7-py3-none-any.whl", hash = "sha256:ae74fb96c20a0277a1d615f1e4d73c8414f5a98db8b799a7931d1582f3390c28"},
+ {file = "click-8.1.7.tar.gz", hash = "sha256:ca9853ad459e787e2192211578cc907e7594e294c7ccc834310722b41b9ca6de"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[[package]]
+name = "colorama"
+version = "0.4.6"
+description = "Cross-platform colored terminal text."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7"
+files = [
+ {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"},
+ {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"},
+]
+
+[[package]]
+name = "cryptography"
+version = "41.0.3"
+description = "cryptography is a package which provides cryptographic recipes and primitives to Python developers."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_universal2.whl", hash = "sha256:652627a055cb52a84f8c448185922241dd5217443ca194d5739b44612c5e6507"},
+ {file = "cryptography-41.0.3-cp37-abi3-macosx_10_12_x86_64.whl", hash = "sha256:8f09daa483aedea50d249ef98ed500569841d6498aa9c9f4b0531b9964658922"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4fd871184321100fb400d759ad0cddddf284c4b696568204d281c902fc7b0d81"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84537453d57f55a50a5b6835622ee405816999a7113267739a1b4581f83535bd"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_aarch64.whl", hash = "sha256:3fb248989b6363906827284cd20cca63bb1a757e0a2864d4c1682a985e3dca47"},
+ {file = "cryptography-41.0.3-cp37-abi3-manylinux_2_28_x86_64.whl", hash = "sha256:42cb413e01a5d36da9929baa9d70ca90d90b969269e5a12d39c1e0d475010116"},
+ {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:aeb57c421b34af8f9fe830e1955bf493a86a7996cc1338fe41b30047d16e962c"},
+ {file = "cryptography-41.0.3-cp37-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6af1c6387c531cd364b72c28daa29232162010d952ceb7e5ca8e2827526aceae"},
+ {file = "cryptography-41.0.3-cp37-abi3-win32.whl", hash = "sha256:0d09fb5356f975974dbcb595ad2d178305e5050656affb7890a1583f5e02a306"},
+ {file = "cryptography-41.0.3-cp37-abi3-win_amd64.whl", hash = "sha256:a983e441a00a9d57a4d7c91b3116a37ae602907a7618b882c8013b5762e80574"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-macosx_10_12_x86_64.whl", hash = "sha256:5259cb659aa43005eb55a0e4ff2c825ca111a0da1814202c64d28a985d33b087"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:67e120e9a577c64fe1f611e53b30b3e69744e5910ff3b6e97e935aeb96005858"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:7efe8041897fe7a50863e51b77789b657a133c75c3b094e51b5e4b5cec7bf906"},
+ {file = "cryptography-41.0.3-pp310-pypy310_pp73-win_amd64.whl", hash = "sha256:ce785cf81a7bdade534297ef9e490ddff800d956625020ab2ec2780a556c313e"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-macosx_10_12_x86_64.whl", hash = "sha256:57a51b89f954f216a81c9d057bf1a24e2f36e764a1ca9a501a6964eb4a6800dd"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:4c2f0d35703d61002a2bbdcf15548ebb701cfdd83cdc12471d2bae80878a4207"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:23c2d778cf829f7d0ae180600b17e9fceea3c2ef8b31a99e3c694cbbf3a24b84"},
+ {file = "cryptography-41.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:95dd7f261bb76948b52a5330ba5202b91a26fbac13ad0e9fc8a3ac04752058c7"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-macosx_10_12_x86_64.whl", hash = "sha256:41d7aa7cdfded09b3d73a47f429c298e80796c8e825ddfadc84c8a7f12df212d"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_aarch64.whl", hash = "sha256:d0d651aa754ef58d75cec6edfbd21259d93810b73f6ec246436a21b7841908de"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:ab8de0d091acbf778f74286f4989cf3d1528336af1b59f3e5d2ebca8b5fe49e1"},
+ {file = "cryptography-41.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a74fbcdb2a0d46fe00504f571a2a540532f4c188e6ccf26f1f178480117b33c4"},
+ {file = "cryptography-41.0.3.tar.gz", hash = "sha256:6d192741113ef5e30d89dcb5b956ef4e1578f304708701b8b73d38e3e1461f34"},
+]
+
+[package.dependencies]
+cffi = ">=1.12"
+
+[package.extras]
+docs = ["sphinx (>=5.3.0)", "sphinx-rtd-theme (>=1.1.1)"]
+docstest = ["pyenchant (>=1.6.11)", "sphinxcontrib-spelling (>=4.0.1)", "twine (>=1.12.0)"]
+nox = ["nox"]
+pep8test = ["black", "check-sdist", "mypy", "ruff"]
+sdist = ["build"]
+ssh = ["bcrypt (>=3.1.5)"]
+test = ["pretend", "pytest (>=6.2.0)", "pytest-benchmark", "pytest-cov", "pytest-xdist"]
+test-randomorder = ["pytest-randomly"]
+
+[[package]]
+name = "deprecated"
+version = "1.2.14"
+description = "Python @deprecated decorator to deprecate old python classes, functions or methods."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "Deprecated-1.2.14-py2.py3-none-any.whl", hash = "sha256:6fac8b097794a90302bdbb17b9b815e732d3c4720583ff1b198499d78470466c"},
+ {file = "Deprecated-1.2.14.tar.gz", hash = "sha256:e5323eb936458dccc2582dc6f9c322c852a775a27065ff2b0c4970b9d53d01b3"},
+]
+
+[package.dependencies]
+wrapt = ">=1.10,<2"
+
+[package.extras]
+dev = ["PyTest", "PyTest-Cov", "bump2version (<1)", "sphinx (<2)", "tox"]
+
+[[package]]
+name = "directory-tree"
+version = "0.0.3.1"
+description = "Utility Package that Displays out the Tree Structure of a Particular Directory."
+optional = false
+python-versions = "*"
+files = [
+ {file = "directory_tree-0.0.3.1-py3-none-any.whl", hash = "sha256:72411e4f1534afaaccadb21fc082c727a680b6a74e8d21a1406ffbe51389cd85"},
+ {file = "directory_tree-0.0.3.1.tar.gz", hash = "sha256:e4f40d60a45c4cdc0bc8e9ee29311f554dee6c969241c0eef8bcd92b4d4bcd4a"},
+]
+
+[package.extras]
+dev = ["pytest (>=3.7)"]
+
+[[package]]
+name = "distro"
+version = "1.8.0"
+description = "Distro - an OS platform information API"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "distro-1.8.0-py3-none-any.whl", hash = "sha256:99522ca3e365cac527b44bde033f64c6945d90eb9f769703caaec52b09bbd3ff"},
+ {file = "distro-1.8.0.tar.gz", hash = "sha256:02e111d1dc6a50abb8eed6bf31c3e48ed8b0830d1ea2a1b78c61765c2513fdd8"},
+]
+
+[[package]]
+name = "docstring-to-markdown"
+version = "0.12"
+description = "On the fly conversion of Python docstrings to markdown"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "docstring-to-markdown-0.12.tar.gz", hash = "sha256:40004224b412bd6f64c0f3b85bb357a41341afd66c4b4896709efa56827fb2bb"},
+ {file = "docstring_to_markdown-0.12-py3-none-any.whl", hash = "sha256:7df6311a887dccf9e770f51242ec002b19f0591994c4783be49d24cdc1df3737"},
+]
+
+[[package]]
+name = "exceptiongroup"
+version = "1.1.3"
+description = "Backport of PEP 654 (exception groups)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "exceptiongroup-1.1.3-py3-none-any.whl", hash = "sha256:343280667a4585d195ca1cf9cef84a4e178c4b6cf2274caef9859782b567d5e3"},
+ {file = "exceptiongroup-1.1.3.tar.gz", hash = "sha256:097acd85d473d75af5bb98e41b61ff7fe35efe6675e4f9370ec6ec5126d160e9"},
+]
+
+[package.extras]
+test = ["pytest (>=6)"]
+
+[[package]]
+name = "fastapi"
+version = "0.95.2"
+description = "FastAPI framework, high performance, easy to learn, fast to code, ready for production"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "fastapi-0.95.2-py3-none-any.whl", hash = "sha256:d374dbc4ef2ad9b803899bd3360d34c534adc574546e25314ab72c0c4411749f"},
+ {file = "fastapi-0.95.2.tar.gz", hash = "sha256:4d9d3e8c71c73f11874bcf5e33626258d143252e329a01002f767306c64fb982"},
+]
+
+[package.dependencies]
+pydantic = ">=1.6.2,<1.7 || >1.7,<1.7.1 || >1.7.1,<1.7.2 || >1.7.2,<1.7.3 || >1.7.3,<1.8 || >1.8,<1.8.1 || >1.8.1,<2.0.0"
+starlette = ">=0.27.0,<0.28.0"
+
+[package.extras]
+all = ["email-validator (>=1.1.1)", "httpx (>=0.23.0)", "itsdangerous (>=1.1.0)", "jinja2 (>=2.11.2)", "orjson (>=3.2.1)", "python-multipart (>=0.0.5)", "pyyaml (>=5.3.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0)", "uvicorn[standard] (>=0.12.0)"]
+dev = ["pre-commit (>=2.17.0,<3.0.0)", "ruff (==0.0.138)", "uvicorn[standard] (>=0.12.0,<0.21.0)"]
+doc = ["mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-markdownextradata-plugin (>=0.1.7,<0.3.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pyyaml (>=5.3.1,<7.0.0)", "typer-cli (>=0.0.13,<0.0.14)", "typer[all] (>=0.6.1,<0.8.0)"]
+test = ["anyio[trio] (>=3.2.1,<4.0.0)", "black (==23.1.0)", "coverage[toml] (>=6.5.0,<8.0)", "databases[sqlite] (>=0.3.2,<0.7.0)", "email-validator (>=1.1.1,<2.0.0)", "flask (>=1.1.2,<3.0.0)", "httpx (>=0.23.0,<0.24.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.982)", "orjson (>=3.2.1,<4.0.0)", "passlib[bcrypt] (>=1.7.2,<2.0.0)", "peewee (>=3.13.3,<4.0.0)", "pytest (>=7.1.3,<8.0.0)", "python-jose[cryptography] (>=3.3.0,<4.0.0)", "python-multipart (>=0.0.5,<0.0.7)", "pyyaml (>=5.3.1,<7.0.0)", "ruff (==0.0.138)", "sqlalchemy (>=1.3.18,<1.4.43)", "types-orjson (==3.6.2)", "types-ujson (==5.7.0.1)", "ujson (>=4.0.1,!=4.0.2,!=4.1.0,!=4.2.0,!=4.3.0,!=5.0.0,!=5.1.0,<6.0.0)"]
+
+[[package]]
+name = "filelock"
+version = "3.12.3"
+description = "A platform independent file lock."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "filelock-3.12.3-py3-none-any.whl", hash = "sha256:f067e40ccc40f2b48395a80fcbd4728262fab54e232e090a4063ab804179efeb"},
+ {file = "filelock-3.12.3.tar.gz", hash = "sha256:0ecc1dd2ec4672a10c8550a8182f1bd0c0a5088470ecd5a125e45f49472fac3d"},
+]
+
+[package.dependencies]
+typing-extensions = {version = ">=4.7.1", markers = "python_version < \"3.11\""}
+
+[package.extras]
+docs = ["furo (>=2023.7.26)", "sphinx (>=7.1.2)", "sphinx-autodoc-typehints (>=1.24)"]
+testing = ["covdefaults (>=2.3)", "coverage (>=7.3)", "diff-cover (>=7.7)", "pytest (>=7.4)", "pytest-cov (>=4.1)", "pytest-mock (>=3.11.1)", "pytest-timeout (>=2.1)"]
+
+[[package]]
+name = "frozenlist"
+version = "1.4.0"
+description = "A list-like structure which implements collections.abc.MutableSequence"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:764226ceef3125e53ea2cb275000e309c0aa5464d43bd72abd661e27fffc26ab"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d6484756b12f40003c6128bfcc3fa9f0d49a687e171186c2d85ec82e3758c559"},
+ {file = "frozenlist-1.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9ac08e601308e41eb533f232dbf6b7e4cea762f9f84f6357136eed926c15d12c"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d081f13b095d74b67d550de04df1c756831f3b83dc9881c38985834387487f1b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:71932b597f9895f011f47f17d6428252fc728ba2ae6024e13c3398a087c2cdea"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:981b9ab5a0a3178ff413bca62526bb784249421c24ad7381e39d67981be2c326"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e41f3de4df3e80de75845d3e743b3f1c4c8613c3997a912dbf0229fc61a8b963"},
+ {file = "frozenlist-1.4.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6918d49b1f90821e93069682c06ffde41829c346c66b721e65a5c62b4bab0300"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:0e5c8764c7829343d919cc2dfc587a8db01c4f70a4ebbc49abde5d4b158b007b"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:8d0edd6b1c7fb94922bf569c9b092ee187a83f03fb1a63076e7774b60f9481a8"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:e29cda763f752553fa14c68fb2195150bfab22b352572cb36c43c47bedba70eb"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:0c7c1b47859ee2cac3846fde1c1dc0f15da6cec5a0e5c72d101e0f83dcb67ff9"},
+ {file = "frozenlist-1.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:901289d524fdd571be1c7be054f48b1f88ce8dddcbdf1ec698b27d4b8b9e5d62"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win32.whl", hash = "sha256:1a0848b52815006ea6596c395f87449f693dc419061cc21e970f139d466dc0a0"},
+ {file = "frozenlist-1.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:b206646d176a007466358aa21d85cd8600a415c67c9bd15403336c331a10d956"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:de343e75f40e972bae1ef6090267f8260c1446a1695e77096db6cfa25e759a95"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ad2a9eb6d9839ae241701d0918f54c51365a51407fd80f6b8289e2dfca977cc3"},
+ {file = "frozenlist-1.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:bd7bd3b3830247580de99c99ea2a01416dfc3c34471ca1298bccabf86d0ff4dc"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bdf1847068c362f16b353163391210269e4f0569a3c166bc6a9f74ccbfc7e839"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:38461d02d66de17455072c9ba981d35f1d2a73024bee7790ac2f9e361ef1cd0c"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5a32087d720c608f42caed0ef36d2b3ea61a9d09ee59a5142d6070da9041b8f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:dd65632acaf0d47608190a71bfe46b209719bf2beb59507db08ccdbe712f969b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:261b9f5d17cac914531331ff1b1d452125bf5daa05faf73b71d935485b0c510b"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b89ac9768b82205936771f8d2eb3ce88503b1556324c9f903e7156669f521472"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:008eb8b31b3ea6896da16c38c1b136cb9fec9e249e77f6211d479db79a4eaf01"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:e74b0506fa5aa5598ac6a975a12aa8928cbb58e1f5ac8360792ef15de1aa848f"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:490132667476f6781b4c9458298b0c1cddf237488abd228b0b3650e5ecba7467"},
+ {file = "frozenlist-1.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:76d4711f6f6d08551a7e9ef28c722f4a50dd0fc204c56b4bcd95c6cc05ce6fbb"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win32.whl", hash = "sha256:a02eb8ab2b8f200179b5f62b59757685ae9987996ae549ccf30f983f40602431"},
+ {file = "frozenlist-1.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:515e1abc578dd3b275d6a5114030b1330ba044ffba03f94091842852f806f1c1"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:f0ed05f5079c708fe74bf9027e95125334b6978bf07fd5ab923e9e55e5fbb9d3"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:ca265542ca427bf97aed183c1676e2a9c66942e822b14dc6e5f42e038f92a503"},
+ {file = "frozenlist-1.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:491e014f5c43656da08958808588cc6c016847b4360e327a62cb308c791bd2d9"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:17ae5cd0f333f94f2e03aaf140bb762c64783935cc764ff9c82dff626089bebf"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1e78fb68cf9c1a6aa4a9a12e960a5c9dfbdb89b3695197aa7064705662515de2"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d5655a942f5f5d2c9ed93d72148226d75369b4f6952680211972a33e59b1dfdc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c11b0746f5d946fecf750428a95f3e9ebe792c1ee3b1e96eeba145dc631a9672"},
+ {file = "frozenlist-1.4.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e66d2a64d44d50d2543405fb183a21f76b3b5fd16f130f5c99187c3fb4e64919"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:88f7bc0fcca81f985f78dd0fa68d2c75abf8272b1f5c323ea4a01a4d7a614efc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:5833593c25ac59ede40ed4de6d67eb42928cca97f26feea219f21d0ed0959b79"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:fec520865f42e5c7f050c2a79038897b1c7d1595e907a9e08e3353293ffc948e"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:b826d97e4276750beca7c8f0f1a4938892697a6bcd8ec8217b3312dad6982781"},
+ {file = "frozenlist-1.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:ceb6ec0a10c65540421e20ebd29083c50e6d1143278746a4ef6bcf6153171eb8"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win32.whl", hash = "sha256:2b8bcf994563466db019fab287ff390fffbfdb4f905fc77bc1c1d604b1c689cc"},
+ {file = "frozenlist-1.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:a6c8097e01886188e5be3e6b14e94ab365f384736aa1fca6a0b9e35bd4a30bc7"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:6c38721585f285203e4b4132a352eb3daa19121a035f3182e08e437cface44bf"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:a0c6da9aee33ff0b1a451e867da0c1f47408112b3391dd43133838339e410963"},
+ {file = "frozenlist-1.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:93ea75c050c5bb3d98016b4ba2497851eadf0ac154d88a67d7a6816206f6fa7f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f61e2dc5ad442c52b4887f1fdc112f97caeff4d9e6ebe78879364ac59f1663e1"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa384489fefeb62321b238e64c07ef48398fe80f9e1e6afeff22e140e0850eef"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:10ff5faaa22786315ef57097a279b833ecab1a0bfb07d604c9cbb1c4cdc2ed87"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:007df07a6e3eb3e33e9a1fe6a9db7af152bbd8a185f9aaa6ece10a3529e3e1c6"},
+ {file = "frozenlist-1.4.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7f4f399d28478d1f604c2ff9119907af9726aed73680e5ed1ca634d377abb087"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:c5374b80521d3d3f2ec5572e05adc94601985cc526fb276d0c8574a6d749f1b3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:ce31ae3e19f3c902de379cf1323d90c649425b86de7bbdf82871b8a2a0615f3d"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:7211ef110a9194b6042449431e08c4d80c0481e5891e58d429df5899690511c2"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:556de4430ce324c836789fa4560ca62d1591d2538b8ceb0b4f68fb7b2384a27a"},
+ {file = "frozenlist-1.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7645a8e814a3ee34a89c4a372011dcd817964ce8cb273c8ed6119d706e9613e3"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win32.whl", hash = "sha256:19488c57c12d4e8095a922f328df3f179c820c212940a498623ed39160bc3c2f"},
+ {file = "frozenlist-1.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:6221d84d463fb110bdd7619b69cb43878a11d51cbb9394ae3105d082d5199167"},
+ {file = "frozenlist-1.4.0.tar.gz", hash = "sha256:09163bdf0b2907454042edb19f887c6d33806adc71fbd54afc14908bfdc22251"},
+]
+
+[[package]]
+name = "fsspec"
+version = "2023.9.0"
+description = "File-system specification"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "fsspec-2023.9.0-py3-none-any.whl", hash = "sha256:d55b9ab2a4c1f2b759888ae9f93e40c2aa72c0808132e87e282b549f9e6c4254"},
+ {file = "fsspec-2023.9.0.tar.gz", hash = "sha256:4dbf0fefee035b7c6d3bbbe6bc99b2f201f40d4dca95b67c2b719be77bcd917f"},
+]
+
+[package.extras]
+abfs = ["adlfs"]
+adl = ["adlfs"]
+arrow = ["pyarrow (>=1)"]
+dask = ["dask", "distributed"]
+devel = ["pytest", "pytest-cov"]
+dropbox = ["dropbox", "dropboxdrivefs", "requests"]
+full = ["adlfs", "aiohttp (!=4.0.0a0,!=4.0.0a1)", "dask", "distributed", "dropbox", "dropboxdrivefs", "fusepy", "gcsfs", "libarchive-c", "ocifs", "panel", "paramiko", "pyarrow (>=1)", "pygit2", "requests", "s3fs", "smbprotocol", "tqdm"]
+fuse = ["fusepy"]
+gcs = ["gcsfs"]
+git = ["pygit2"]
+github = ["requests"]
+gs = ["gcsfs"]
+gui = ["panel"]
+hdfs = ["pyarrow (>=1)"]
+http = ["aiohttp (!=4.0.0a0,!=4.0.0a1)", "requests"]
+libarchive = ["libarchive-c"]
+oci = ["ocifs"]
+s3 = ["s3fs"]
+sftp = ["paramiko"]
+smb = ["smbprotocol"]
+ssh = ["paramiko"]
+tqdm = ["tqdm"]
+
+[[package]]
+name = "h11"
+version = "0.14.0"
+description = "A pure-Python, bring-your-own-I/O implementation of HTTP/1.1"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "h11-0.14.0-py3-none-any.whl", hash = "sha256:e3fe4ac4b851c468cc8363d500db52c2ead036020723024a109d37346efaa761"},
+ {file = "h11-0.14.0.tar.gz", hash = "sha256:8f19fbbe99e72420ff35c00b27a34cb9937e902a8b810e2c88300c6f0a3b699d"},
+]
+
+[[package]]
+name = "httpcore"
+version = "0.17.3"
+description = "A minimal low-level HTTP client."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "httpcore-0.17.3-py3-none-any.whl", hash = "sha256:c2789b767ddddfa2a5782e3199b2b7f6894540b17b16ec26b2c4d8e103510b87"},
+ {file = "httpcore-0.17.3.tar.gz", hash = "sha256:a6f30213335e34c1ade7be6ec7c47f19f50c56db36abef1a9dfa3815b1cb3888"},
+]
+
+[package.dependencies]
+anyio = ">=3.0,<5.0"
+certifi = "*"
+h11 = ">=0.13,<0.15"
+sniffio = "==1.*"
+
+[package.extras]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "httpx"
+version = "0.24.1"
+description = "The next generation HTTP client."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "httpx-0.24.1-py3-none-any.whl", hash = "sha256:06781eb9ac53cde990577af654bd990a4949de37a28bdb4a230d434f3a30b9bd"},
+ {file = "httpx-0.24.1.tar.gz", hash = "sha256:5853a43053df830c20f8110c5e69fe44d035d850b2dfe795e196f00fdb774bdd"},
+]
+
+[package.dependencies]
+certifi = "*"
+httpcore = ">=0.15.0,<0.18.0"
+idna = "*"
+sniffio = "*"
+
+[package.extras]
+brotli = ["brotli", "brotlicffi"]
+cli = ["click (==8.*)", "pygments (==2.*)", "rich (>=10,<14)"]
+http2 = ["h2 (>=3,<5)"]
+socks = ["socksio (==1.*)"]
+
+[[package]]
+name = "huggingface-hub"
+version = "0.16.4"
+description = "Client library to download and publish models, datasets and other repos on the huggingface.co hub"
+optional = false
+python-versions = ">=3.7.0"
+files = [
+ {file = "huggingface_hub-0.16.4-py3-none-any.whl", hash = "sha256:0d3df29932f334fead024afc7cb4cc5149d955238b8b5e42dcf9740d6995a349"},
+ {file = "huggingface_hub-0.16.4.tar.gz", hash = "sha256:608c7d4f3d368b326d1747f91523dbd1f692871e8e2e7a4750314a2dd8b63e14"},
+]
+
+[package.dependencies]
+filelock = "*"
+fsspec = "*"
+packaging = ">=20.9"
+pyyaml = ">=5.1"
+requests = "*"
+tqdm = ">=4.42.1"
+typing-extensions = ">=3.7.4.3"
+
+[package.extras]
+all = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
+cli = ["InquirerPy (==0.3.4)"]
+dev = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "black (>=23.1,<24.0)", "gradio", "jedi", "mypy (==0.982)", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "ruff (>=0.0.241)", "soundfile", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3", "urllib3 (<2.0)"]
+fastai = ["fastai (>=2.4)", "fastcore (>=1.3.27)", "toml"]
+inference = ["aiohttp", "pydantic"]
+quality = ["black (>=23.1,<24.0)", "mypy (==0.982)", "ruff (>=0.0.241)"]
+tensorflow = ["graphviz", "pydot", "tensorflow"]
+testing = ["InquirerPy (==0.3.4)", "Jinja2", "Pillow", "aiohttp", "gradio", "jedi", "numpy", "pydantic", "pytest", "pytest-asyncio", "pytest-cov", "pytest-env", "pytest-vcr", "pytest-xdist", "soundfile", "urllib3 (<2.0)"]
+torch = ["torch"]
+typing = ["pydantic", "types-PyYAML", "types-requests", "types-simplejson", "types-toml", "types-tqdm", "types-urllib3"]
+
+[[package]]
+name = "idna"
+version = "3.4"
+description = "Internationalized Domain Names in Applications (IDNA)"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"},
+ {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"},
+]
+
+[[package]]
+name = "importlib-resources"
+version = "6.0.1"
+description = "Read resources from Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "importlib_resources-6.0.1-py3-none-any.whl", hash = "sha256:134832a506243891221b88b4ae1213327eea96ceb4e407a00d790bb0626f45cf"},
+ {file = "importlib_resources-6.0.1.tar.gz", hash = "sha256:4359457e42708462b9626a04657c6208ad799ceb41e5c58c57ffa0e6a098a5d4"},
+]
+
+[package.dependencies]
+zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[[package]]
+name = "iniconfig"
+version = "2.0.0"
+description = "brain-dead simple config-ini parsing"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"},
+ {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"},
+]
+
+[[package]]
+name = "jedi"
+version = "0.18.2"
+description = "An autocompletion tool for Python that can be used for text editors."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"},
+ {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"},
+]
+
+[package.dependencies]
+parso = ">=0.8.0,<0.9.0"
+
+[package.extras]
+docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"]
+
+[[package]]
+name = "jsonref"
+version = "1.1.0"
+description = "jsonref is a library for automatic dereferencing of JSON Reference objects for Python."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "jsonref-1.1.0-py3-none-any.whl", hash = "sha256:590dc7773df6c21cbf948b5dac07a72a251db28b0238ceecce0a2abfa8ec30a9"},
+ {file = "jsonref-1.1.0.tar.gz", hash = "sha256:32fe8e1d85af0fdefbebce950af85590b22b60f9e95443176adbde4e1ecea552"},
+]
+
+[[package]]
+name = "jsonschema"
+version = "4.19.0"
+description = "An implementation of JSON Schema validation for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema-4.19.0-py3-none-any.whl", hash = "sha256:043dc26a3845ff09d20e4420d6012a9c91c9aa8999fa184e7efcfeccb41e32cb"},
+ {file = "jsonschema-4.19.0.tar.gz", hash = "sha256:6e1e7569ac13be8139b2dd2c21a55d350066ee3f80df06c608b398cdc6f30e8f"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+jsonschema-specifications = ">=2023.03.6"
+pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""}
+referencing = ">=0.28.4"
+rpds-py = ">=0.7.1"
+
+[package.extras]
+format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"]
+format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"]
+
+[[package]]
+name = "jsonschema-specifications"
+version = "2023.7.1"
+description = "The JSON Schema meta-schemas and vocabularies, exposed as a Registry"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "jsonschema_specifications-2023.7.1-py3-none-any.whl", hash = "sha256:05adf340b659828a004220a9613be00fa3f223f2b82002e273dee62fd50524b1"},
+ {file = "jsonschema_specifications-2023.7.1.tar.gz", hash = "sha256:c91a50404e88a1f6ba40636778e2ee08f6e24c5613fe4c53ac24578a5a7f72bb"},
+]
+
+[package.dependencies]
+importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""}
+referencing = ">=0.28.0"
+
+[[package]]
+name = "meilisearch-python-async"
+version = "1.7.0"
+description = "A Python async client for the Meilisearch API"
+optional = false
+python-versions = ">=3.8,<4.0"
+files = [
+ {file = "meilisearch_python_async-1.7.0-py3-none-any.whl", hash = "sha256:ab70484481b8b331b01fd5561bb727e78e65a8ea40705fb21f1db7bd524bcb52"},
+ {file = "meilisearch_python_async-1.7.0.tar.gz", hash = "sha256:f3ba9a13f600df46ead1ec1302ee896261723a7368744a05c29eb6a8110cd88f"},
+]
+
+[package.dependencies]
+aiofiles = ">=0.7"
+camel-converter = ">=1.0.0"
+httpx = ">=0.17"
+pydantic = ">=1.8"
+PyJWT = ">=2.3.0"
+
+[[package]]
+name = "monotonic"
+version = "1.6"
+description = "An implementation of time.monotonic() for Python 2 & < 3.3"
+optional = false
+python-versions = "*"
+files = [
+ {file = "monotonic-1.6-py2.py3-none-any.whl", hash = "sha256:68687e19a14f11f26d140dd5c86f3dba4bf5df58003000ed467e0e2a69bca96c"},
+ {file = "monotonic-1.6.tar.gz", hash = "sha256:3a55207bcfed53ddd5c5bae174524062935efed17792e9de2ad0205ce9ad63f7"},
+]
+
+[[package]]
+name = "multidict"
+version = "6.0.4"
+description = "multidict implementation"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:0b1a97283e0c85772d613878028fec909f003993e1007eafa715b24b377cb9b8"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:eeb6dcc05e911516ae3d1f207d4b0520d07f54484c49dfc294d6e7d63b734171"},
+ {file = "multidict-6.0.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:d6d635d5209b82a3492508cf5b365f3446afb65ae7ebd755e70e18f287b0adf7"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c048099e4c9e9d615545e2001d3d8a4380bd403e1a0578734e0d31703d1b0c0b"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ea20853c6dbbb53ed34cb4d080382169b6f4554d394015f1bef35e881bf83547"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:16d232d4e5396c2efbbf4f6d4df89bfa905eb0d4dc5b3549d872ab898451f569"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:36c63aaa167f6c6b04ef2c85704e93af16c11d20de1d133e39de6a0e84582a93"},
+ {file = "multidict-6.0.4-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:64bdf1086b6043bf519869678f5f2757f473dee970d7abf6da91ec00acb9cb98"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:43644e38f42e3af682690876cff722d301ac585c5b9e1eacc013b7a3f7b696a0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:7582a1d1030e15422262de9f58711774e02fa80df0d1578995c76214f6954988"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:ddff9c4e225a63a5afab9dd15590432c22e8057e1a9a13d28ed128ecf047bbdc"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:ee2a1ece51b9b9e7752e742cfb661d2a29e7bcdba2d27e66e28a99f1890e4fa0"},
+ {file = "multidict-6.0.4-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a2e4369eb3d47d2034032a26c7a80fcb21a2cb22e1173d761a162f11e562caa5"},
+ {file = "multidict-6.0.4-cp310-cp310-win32.whl", hash = "sha256:574b7eae1ab267e5f8285f0fe881f17efe4b98c39a40858247720935b893bba8"},
+ {file = "multidict-6.0.4-cp310-cp310-win_amd64.whl", hash = "sha256:4dcbb0906e38440fa3e325df2359ac6cb043df8e58c965bb45f4e406ecb162cc"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:0dfad7a5a1e39c53ed00d2dd0c2e36aed4650936dc18fd9a1826a5ae1cad6f03"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:64da238a09d6039e3bd39bb3aee9c21a5e34f28bfa5aa22518581f910ff94af3"},
+ {file = "multidict-6.0.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ff959bee35038c4624250473988b24f846cbeb2c6639de3602c073f10410ceba"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:01a3a55bd90018c9c080fbb0b9f4891db37d148a0a18722b42f94694f8b6d4c9"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c5cb09abb18c1ea940fb99360ea0396f34d46566f157122c92dfa069d3e0e982"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:666daae833559deb2d609afa4490b85830ab0dfca811a98b70a205621a6109fe"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:11bdf3f5e1518b24530b8241529d2050014c884cf18b6fc69c0c2b30ca248710"},
+ {file = "multidict-6.0.4-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7d18748f2d30f94f498e852c67d61261c643b349b9d2a581131725595c45ec6c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:458f37be2d9e4c95e2d8866a851663cbc76e865b78395090786f6cd9b3bbf4f4"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:b1a2eeedcead3a41694130495593a559a668f382eee0727352b9a41e1c45759a"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7d6ae9d593ef8641544d6263c7fa6408cc90370c8cb2bbb65f8d43e5b0351d9c"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:5979b5632c3e3534e42ca6ff856bb24b2e3071b37861c2c727ce220d80eee9ed"},
+ {file = "multidict-6.0.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:dcfe792765fab89c365123c81046ad4103fcabbc4f56d1c1997e6715e8015461"},
+ {file = "multidict-6.0.4-cp311-cp311-win32.whl", hash = "sha256:3601a3cece3819534b11d4efc1eb76047488fddd0c85a3948099d5da4d504636"},
+ {file = "multidict-6.0.4-cp311-cp311-win_amd64.whl", hash = "sha256:81a4f0b34bd92df3da93315c6a59034df95866014ac08535fc819f043bfd51f0"},
+ {file = "multidict-6.0.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:67040058f37a2a51ed8ea8f6b0e6ee5bd78ca67f169ce6122f3e2ec80dfe9b78"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:853888594621e6604c978ce2a0444a1e6e70c8d253ab65ba11657659dcc9100f"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:39ff62e7d0f26c248b15e364517a72932a611a9b75f35b45be078d81bdb86603"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:af048912e045a2dc732847d33821a9d84ba553f5c5f028adbd364dd4765092ac"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b1e8b901e607795ec06c9e42530788c45ac21ef3aaa11dbd0c69de543bfb79a9"},
+ {file = "multidict-6.0.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:62501642008a8b9871ddfccbf83e4222cf8ac0d5aeedf73da36153ef2ec222d2"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:99b76c052e9f1bc0721f7541e5e8c05db3941eb9ebe7b8553c625ef88d6eefde"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:509eac6cf09c794aa27bcacfd4d62c885cce62bef7b2c3e8b2e49d365b5003fe"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:21a12c4eb6ddc9952c415f24eef97e3e55ba3af61f67c7bc388dcdec1404a067"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:5cad9430ab3e2e4fa4a2ef4450f548768400a2ac635841bc2a56a2052cdbeb87"},
+ {file = "multidict-6.0.4-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab55edc2e84460694295f401215f4a58597f8f7c9466faec545093045476327d"},
+ {file = "multidict-6.0.4-cp37-cp37m-win32.whl", hash = "sha256:5a4dcf02b908c3b8b17a45fb0f15b695bf117a67b76b7ad18b73cf8e92608775"},
+ {file = "multidict-6.0.4-cp37-cp37m-win_amd64.whl", hash = "sha256:6ed5f161328b7df384d71b07317f4d8656434e34591f20552c7bcef27b0ab88e"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5fc1b16f586f049820c5c5b17bb4ee7583092fa0d1c4e28b5239181ff9532e0c"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1502e24330eb681bdaa3eb70d6358e818e8e8f908a22a1851dfd4e15bc2f8161"},
+ {file = "multidict-6.0.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:b692f419760c0e65d060959df05f2a531945af31fda0c8a3b3195d4efd06de11"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:45e1ecb0379bfaab5eef059f50115b54571acfbe422a14f668fc8c27ba410e7e"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ddd3915998d93fbcd2566ddf9cf62cdb35c9e093075f862935573d265cf8f65d"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d43b61c59d82f2effb39a93c48b845efe23a3852d201ed2d24ba830d0b4cf2"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cc8e1d0c705233c5dd0c5e6460fbad7827d5d36f310a0fadfd45cc3029762258"},
+ {file = "multidict-6.0.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6aa0418fcc838522256761b3415822626f866758ee0bc6632c9486b179d0b52"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:6748717bb10339c4760c1e63da040f5f29f5ed6e59d76daee30305894069a660"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4d1a3d7ef5e96b1c9e92f973e43aa5e5b96c659c9bc3124acbbd81b0b9c8a951"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:4372381634485bec7e46718edc71528024fcdc6f835baefe517b34a33c731d60"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:fc35cb4676846ef752816d5be2193a1e8367b4c1397b74a565a9d0389c433a1d"},
+ {file = "multidict-6.0.4-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:4b9d9e4e2b37daddb5c23ea33a3417901fa7c7b3dee2d855f63ee67a0b21e5b1"},
+ {file = "multidict-6.0.4-cp38-cp38-win32.whl", hash = "sha256:e41b7e2b59679edfa309e8db64fdf22399eec4b0b24694e1b2104fb789207779"},
+ {file = "multidict-6.0.4-cp38-cp38-win_amd64.whl", hash = "sha256:d6c254ba6e45d8e72739281ebc46ea5eb5f101234f3ce171f0e9f5cc86991480"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:16ab77bbeb596e14212e7bab8429f24c1579234a3a462105cda4a66904998664"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bc779e9e6f7fda81b3f9aa58e3a6091d49ad528b11ed19f6621408806204ad35"},
+ {file = "multidict-6.0.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4ceef517eca3e03c1cceb22030a3e39cb399ac86bff4e426d4fc6ae49052cc60"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:281af09f488903fde97923c7744bb001a9b23b039a909460d0f14edc7bf59706"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:52f2dffc8acaba9a2f27174c41c9e57f60b907bb9f096b36b1a1f3be71c6284d"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b41156839806aecb3641f3208c0dafd3ac7775b9c4c422d82ee2a45c34ba81ca"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d5e3fc56f88cc98ef8139255cf8cd63eb2c586531e43310ff859d6bb3a6b51f1"},
+ {file = "multidict-6.0.4-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8316a77808c501004802f9beebde51c9f857054a0c871bd6da8280e718444449"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f70b98cd94886b49d91170ef23ec5c0e8ebb6f242d734ed7ed677b24d50c82cf"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:bf6774e60d67a9efe02b3616fee22441d86fab4c6d335f9d2051d19d90a40063"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:e69924bfcdda39b722ef4d9aa762b2dd38e4632b3641b1d9a57ca9cd18f2f83a"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:6b181d8c23da913d4ff585afd1155a0e1194c0b50c54fcfe286f70cdaf2b7176"},
+ {file = "multidict-6.0.4-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:52509b5be062d9eafc8170e53026fbc54cf3b32759a23d07fd935fb04fc22d95"},
+ {file = "multidict-6.0.4-cp39-cp39-win32.whl", hash = "sha256:27c523fbfbdfd19c6867af7346332b62b586eed663887392cff78d614f9ec313"},
+ {file = "multidict-6.0.4-cp39-cp39-win_amd64.whl", hash = "sha256:33029f5734336aa0d4c0384525da0387ef89148dc7191aae00ca5fb23d7aafc2"},
+ {file = "multidict-6.0.4.tar.gz", hash = "sha256:3666906492efb76453c0e7b97f2cf459b0682e7402c0489a95484965dbc1da49"},
+]
+
+[[package]]
+name = "nest-asyncio"
+version = "1.5.7"
+description = "Patch asyncio to allow nested event loops"
+optional = false
+python-versions = ">=3.5"
+files = [
+ {file = "nest_asyncio-1.5.7-py3-none-any.whl", hash = "sha256:5301c82941b550b3123a1ea772ba9a1c80bad3a182be8c1a5ae6ad3be57a9657"},
+ {file = "nest_asyncio-1.5.7.tar.gz", hash = "sha256:6a80f7b98f24d9083ed24608977c09dd608d83f91cccc24c9d2cba6d10e01c10"},
+]
+
+[[package]]
+name = "openai"
+version = "0.27.10"
+description = "Python client library for the OpenAI API"
+optional = false
+python-versions = ">=3.7.1"
+files = [
+ {file = "openai-0.27.10-py3-none-any.whl", hash = "sha256:beabd1757e3286fa166dde3b70ebb5ad8081af046876b47c14c41e203ed22a14"},
+ {file = "openai-0.27.10.tar.gz", hash = "sha256:60e09edf7100080283688748c6803b7b3b52d5a55d21890f3815292a0552d83b"},
+]
+
+[package.dependencies]
+aiohttp = "*"
+requests = ">=2.20"
+tqdm = "*"
+
+[package.extras]
+datalib = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)"]
+dev = ["black (>=21.6b0,<22.0)", "pytest (==6.*)", "pytest-asyncio", "pytest-mock"]
+embeddings = ["matplotlib", "numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "plotly", "scikit-learn (>=1.0.2)", "scipy", "tenacity (>=8.0.1)"]
+wandb = ["numpy", "openpyxl (>=3.0.7)", "pandas (>=1.2.3)", "pandas-stubs (>=1.1.0.11)", "wandb"]
+
+[[package]]
+name = "packaging"
+version = "23.1"
+description = "Core utilities for Python packages"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"},
+ {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"},
+]
+
+[[package]]
+name = "parso"
+version = "0.8.3"
+description = "A Python Parser"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"},
+ {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"},
+]
+
+[package.extras]
+qa = ["flake8 (==3.8.3)", "mypy (==0.782)"]
+testing = ["docopt", "pytest (<6.0.0)"]
+
+[[package]]
+name = "pkgutil-resolve-name"
+version = "1.3.10"
+description = "Resolve a name to an object."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"},
+ {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"},
+]
+
+[[package]]
+name = "pluggy"
+version = "1.3.0"
+description = "plugin and hook calling mechanisms for python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "pluggy-1.3.0-py3-none-any.whl", hash = "sha256:d89c696a773f8bd377d18e5ecda92b7a3793cbe66c87060a6fb58c7b6e1061f7"},
+ {file = "pluggy-1.3.0.tar.gz", hash = "sha256:cf61ae8f126ac6f7c451172cf30e3e43d3ca77615509771b3a984a0730651e12"},
+]
+
+[package.extras]
+dev = ["pre-commit", "tox"]
+testing = ["pytest", "pytest-benchmark"]
+
+[[package]]
+name = "posthog"
+version = "3.0.2"
+description = "Integrate PostHog into any python application."
+optional = false
+python-versions = "*"
+files = [
+ {file = "posthog-3.0.2-py2.py3-none-any.whl", hash = "sha256:a8c0af6f2401fbe50f90e68c4143d0824b54e872de036b1c2f23b5abb39d88ce"},
+ {file = "posthog-3.0.2.tar.gz", hash = "sha256:701fba6e446a4de687c6e861b587e7b7741955ad624bf34fe013c06a0fec6fb3"},
+]
+
+[package.dependencies]
+backoff = ">=1.10.0"
+monotonic = ">=1.5"
+python-dateutil = ">2.1"
+requests = ">=2.7,<3.0"
+six = ">=1.5"
+
+[package.extras]
+dev = ["black", "flake8", "flake8-print", "isort", "pre-commit"]
+sentry = ["django", "sentry-sdk"]
+test = ["coverage", "flake8", "freezegun (==0.3.15)", "mock (>=2.0.0)", "pylint", "pytest"]
+
+[[package]]
+name = "psutil"
+version = "5.9.5"
+description = "Cross-platform lib for process and system monitoring in Python."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"},
+ {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"},
+ {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"},
+ {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"},
+ {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"},
+ {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"},
+ {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"},
+ {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"},
+ {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"},
+ {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"},
+ {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"},
+]
+
+[package.extras]
+test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"]
+
+[[package]]
+name = "pycparser"
+version = "2.21"
+description = "C parser in Python"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*"
+files = [
+ {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"},
+ {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"},
+]
+
+[[package]]
+name = "pydantic"
+version = "1.10.12"
+description = "Data validation and settings management using python type hints"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pydantic-1.10.12-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:a1fcb59f2f355ec350073af41d927bf83a63b50e640f4dbaa01053a28b7a7718"},
+ {file = "pydantic-1.10.12-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:b7ccf02d7eb340b216ec33e53a3a629856afe1c6e0ef91d84a4e6f2fb2ca70fe"},
+ {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8fb2aa3ab3728d950bcc885a2e9eff6c8fc40bc0b7bb434e555c215491bcf48b"},
+ {file = "pydantic-1.10.12-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:771735dc43cf8383959dc9b90aa281f0b6092321ca98677c5fb6125a6f56d58d"},
+ {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca48477862372ac3770969b9d75f1bf66131d386dba79506c46d75e6b48c1e09"},
+ {file = "pydantic-1.10.12-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a5e7add47a5b5a40c49b3036d464e3c7802f8ae0d1e66035ea16aa5b7a3923ed"},
+ {file = "pydantic-1.10.12-cp310-cp310-win_amd64.whl", hash = "sha256:e4129b528c6baa99a429f97ce733fff478ec955513630e61b49804b6cf9b224a"},
+ {file = "pydantic-1.10.12-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b0d191db0f92dfcb1dec210ca244fdae5cbe918c6050b342d619c09d31eea0cc"},
+ {file = "pydantic-1.10.12-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:795e34e6cc065f8f498c89b894a3c6da294a936ee71e644e4bd44de048af1405"},
+ {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:69328e15cfda2c392da4e713443c7dbffa1505bc9d566e71e55abe14c97ddc62"},
+ {file = "pydantic-1.10.12-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2031de0967c279df0d8a1c72b4ffc411ecd06bac607a212892757db7462fc494"},
+ {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:ba5b2e6fe6ca2b7e013398bc7d7b170e21cce322d266ffcd57cca313e54fb246"},
+ {file = "pydantic-1.10.12-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:2a7bac939fa326db1ab741c9d7f44c565a1d1e80908b3797f7f81a4f86bc8d33"},
+ {file = "pydantic-1.10.12-cp311-cp311-win_amd64.whl", hash = "sha256:87afda5539d5140cb8ba9e8b8c8865cb5b1463924d38490d73d3ccfd80896b3f"},
+ {file = "pydantic-1.10.12-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:549a8e3d81df0a85226963611950b12d2d334f214436a19537b2efed61b7639a"},
+ {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:598da88dfa127b666852bef6d0d796573a8cf5009ffd62104094a4fe39599565"},
+ {file = "pydantic-1.10.12-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ba5c4a8552bff16c61882db58544116d021d0b31ee7c66958d14cf386a5b5350"},
+ {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c79e6a11a07da7374f46970410b41d5e266f7f38f6a17a9c4823db80dadf4303"},
+ {file = "pydantic-1.10.12-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:ab26038b8375581dc832a63c948f261ae0aa21f1d34c1293469f135fa92972a5"},
+ {file = "pydantic-1.10.12-cp37-cp37m-win_amd64.whl", hash = "sha256:e0a16d274b588767602b7646fa05af2782576a6cf1022f4ba74cbb4db66f6ca8"},
+ {file = "pydantic-1.10.12-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:6a9dfa722316f4acf4460afdf5d41d5246a80e249c7ff475c43a3a1e9d75cf62"},
+ {file = "pydantic-1.10.12-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a73f489aebd0c2121ed974054cb2759af8a9f747de120acd2c3394cf84176ccb"},
+ {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6b30bcb8cbfccfcf02acb8f1a261143fab622831d9c0989707e0e659f77a18e0"},
+ {file = "pydantic-1.10.12-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2fcfb5296d7877af406ba1547dfde9943b1256d8928732267e2653c26938cd9c"},
+ {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:2f9a6fab5f82ada41d56b0602606a5506aab165ca54e52bc4545028382ef1c5d"},
+ {file = "pydantic-1.10.12-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:dea7adcc33d5d105896401a1f37d56b47d443a2b2605ff8a969a0ed5543f7e33"},
+ {file = "pydantic-1.10.12-cp38-cp38-win_amd64.whl", hash = "sha256:1eb2085c13bce1612da8537b2d90f549c8cbb05c67e8f22854e201bde5d98a47"},
+ {file = "pydantic-1.10.12-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:ef6c96b2baa2100ec91a4b428f80d8f28a3c9e53568219b6c298c1125572ebc6"},
+ {file = "pydantic-1.10.12-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:6c076be61cd0177a8433c0adcb03475baf4ee91edf5a4e550161ad57fc90f523"},
+ {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2d5a58feb9a39f481eda4d5ca220aa8b9d4f21a41274760b9bc66bfd72595b86"},
+ {file = "pydantic-1.10.12-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e5f805d2d5d0a41633651a73fa4ecdd0b3d7a49de4ec3fadf062fe16501ddbf1"},
+ {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:1289c180abd4bd4555bb927c42ee42abc3aee02b0fb2d1223fb7c6e5bef87dbe"},
+ {file = "pydantic-1.10.12-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:5d1197e462e0364906cbc19681605cb7c036f2475c899b6f296104ad42b9f5fb"},
+ {file = "pydantic-1.10.12-cp39-cp39-win_amd64.whl", hash = "sha256:fdbdd1d630195689f325c9ef1a12900524dceb503b00a987663ff4f58669b93d"},
+ {file = "pydantic-1.10.12-py3-none-any.whl", hash = "sha256:b749a43aa51e32839c9d71dc67eb1e4221bb04af1033a32e3923d46f9effa942"},
+ {file = "pydantic-1.10.12.tar.gz", hash = "sha256:0fe8a415cea8f340e7a9af9c54fc71a649b43e8ca3cc732986116b3cb135d303"},
+]
+
+[package.dependencies]
+typing-extensions = ">=4.2.0"
+
+[package.extras]
+dotenv = ["python-dotenv (>=0.10.4)"]
+email = ["email-validator (>=1.0.3)"]
+
+[[package]]
+name = "pygithub"
+version = "1.59.1"
+description = "Use the full Github API v3"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "PyGithub-1.59.1-py3-none-any.whl", hash = "sha256:3d87a822e6c868142f0c2c4bf16cce4696b5a7a4d142a7bd160e1bdf75bc54a9"},
+ {file = "PyGithub-1.59.1.tar.gz", hash = "sha256:c44e3a121c15bf9d3a5cc98d94c9a047a5132a9b01d22264627f58ade9ddc217"},
+]
+
+[package.dependencies]
+deprecated = "*"
+pyjwt = {version = ">=2.4.0", extras = ["crypto"]}
+pynacl = ">=1.4.0"
+requests = ">=2.14.0"
+
+[[package]]
+name = "pyjwt"
+version = "2.8.0"
+description = "JSON Web Token implementation in Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "PyJWT-2.8.0-py3-none-any.whl", hash = "sha256:59127c392cc44c2da5bb3192169a91f429924e17aff6534d70fdc02ab3e04320"},
+ {file = "PyJWT-2.8.0.tar.gz", hash = "sha256:57e28d156e3d5c10088e0c68abb90bfac3df82b40a71bd0daa20c65ccd5c23de"},
+]
+
+[package.dependencies]
+cryptography = {version = ">=3.4.0", optional = true, markers = "extra == \"crypto\""}
+
+[package.extras]
+crypto = ["cryptography (>=3.4.0)"]
+dev = ["coverage[toml] (==5.0.4)", "cryptography (>=3.4.0)", "pre-commit", "pytest (>=6.0.0,<7.0.0)", "sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
+docs = ["sphinx (>=4.5.0,<5.0.0)", "sphinx-rtd-theme", "zope.interface"]
+tests = ["coverage[toml] (==5.0.4)", "pytest (>=6.0.0,<7.0.0)"]
+
+[[package]]
+name = "pynacl"
+version = "1.5.0"
+description = "Python binding to the Networking and Cryptography (NaCl) library"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyNaCl-1.5.0-cp36-abi3-macosx_10_10_universal2.whl", hash = "sha256:401002a4aaa07c9414132aaed7f6836ff98f59277a234704ff66878c2ee4a0d1"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.manylinux_2_24_aarch64.whl", hash = "sha256:52cb72a79269189d4e0dc537556f4740f7f0a9ec41c1322598799b0bdad4ef92"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a36d4a9dda1f19ce6e03c9a784a2921a4b726b02e1c736600ca9c22029474394"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.manylinux_2_24_x86_64.whl", hash = "sha256:0c84947a22519e013607c9be43706dd42513f9e6ae5d39d3613ca1e142fba44d"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:06b8f6fa7f5de8d5d2f7573fe8c863c051225a27b61e6860fd047b1775807858"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:a422368fc821589c228f4c49438a368831cb5bbc0eab5ebe1d7fac9dded6567b"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:61f642bf2378713e2c2e1de73444a3778e5f0a38be6fee0fe532fe30060282ff"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-win32.whl", hash = "sha256:e46dae94e34b085175f8abb3b0aaa7da40767865ac82c928eeb9e57e1ea8a543"},
+ {file = "PyNaCl-1.5.0-cp36-abi3-win_amd64.whl", hash = "sha256:20f42270d27e1b6a29f54032090b972d97f0a1b0948cc52392041ef7831fee93"},
+ {file = "PyNaCl-1.5.0.tar.gz", hash = "sha256:8ac7448f09ab85811607bdd21ec2464495ac8b7c66d146bf545b0f08fb9220ba"},
+]
+
+[package.dependencies]
+cffi = ">=1.4.1"
+
+[package.extras]
+docs = ["sphinx (>=1.6.5)", "sphinx-rtd-theme"]
+tests = ["hypothesis (>=3.27.0)", "pytest (>=3.2.1,!=3.3.0)"]
+
+[[package]]
+name = "pytest"
+version = "7.4.1"
+description = "pytest: simple powerful testing with Python"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-7.4.1-py3-none-any.whl", hash = "sha256:460c9a59b14e27c602eb5ece2e47bec99dc5fc5f6513cf924a7d03a578991b1f"},
+ {file = "pytest-7.4.1.tar.gz", hash = "sha256:2f2301e797521b23e4d2585a0a3d7b5e50fdddaaf7e7d6773ea26ddb17c213ab"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "sys_platform == \"win32\""}
+exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""}
+iniconfig = "*"
+packaging = "*"
+pluggy = ">=0.12,<2.0"
+tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""}
+
+[package.extras]
+testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"]
+
+[[package]]
+name = "pytest-asyncio"
+version = "0.21.1"
+description = "Pytest support for asyncio"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "pytest-asyncio-0.21.1.tar.gz", hash = "sha256:40a7eae6dded22c7b604986855ea48400ab15b069ae38116e8c01238e9eeb64d"},
+ {file = "pytest_asyncio-0.21.1-py3-none-any.whl", hash = "sha256:8666c1c8ac02631d7c51ba282e0c69a8a452b211ffedf2599099845da5c5c37b"},
+]
+
+[package.dependencies]
+pytest = ">=7.0.0"
+
+[package.extras]
+docs = ["sphinx (>=5.3)", "sphinx-rtd-theme (>=1.0)"]
+testing = ["coverage (>=6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (>=0.931)", "pytest-trio (>=0.7.0)"]
+
+[[package]]
+name = "python-dateutil"
+version = "2.8.2"
+description = "Extensions to the standard Python datetime module"
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7"
+files = [
+ {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"},
+ {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"},
+]
+
+[package.dependencies]
+six = ">=1.5"
+
+[[package]]
+name = "python-dotenv"
+version = "1.0.0"
+description = "Read key-value pairs from a .env file and set them as environment variables"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "python-dotenv-1.0.0.tar.gz", hash = "sha256:a8df96034aae6d2d50a4ebe8216326c61c3eb64836776504fcca410e5937a3ba"},
+ {file = "python_dotenv-1.0.0-py3-none-any.whl", hash = "sha256:f5971a9226b701070a4bf2c38c89e5a3f0d64de8debda981d1db98583009122a"},
+]
+
+[package.extras]
+cli = ["click (>=5.0)"]
+
+[[package]]
+name = "python-lsp-jsonrpc"
+version = "1.0.0"
+description = "JSON RPC 2.0 server library"
+optional = false
+python-versions = "*"
+files = [
+ {file = "python-lsp-jsonrpc-1.0.0.tar.gz", hash = "sha256:7bec170733db628d3506ea3a5288ff76aa33c70215ed223abdb0d95e957660bd"},
+ {file = "python_lsp_jsonrpc-1.0.0-py3-none-any.whl", hash = "sha256:079b143be64b0a378bdb21dff5e28a8c1393fe7e8a654ef068322d754e545fc7"},
+]
+
+[package.dependencies]
+ujson = ">=3.0.0"
+
+[package.extras]
+test = ["coverage", "pycodestyle", "pyflakes", "pylint", "pytest", "pytest-cov"]
+
+[[package]]
+name = "python-lsp-server"
+version = "1.7.4"
+description = "Python Language Server for the Language Server Protocol"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "python-lsp-server-1.7.4.tar.gz", hash = "sha256:c84254485a4d9431b24ecefd59741d21c00165611bcf6037bd7d54d0ed06a197"},
+ {file = "python_lsp_server-1.7.4-py3-none-any.whl", hash = "sha256:f8053f7aefcb60af4e91f3fab1a093b15ba0c4688ba67e6ab69e7b73e997b2cb"},
+]
+
+[package.dependencies]
+docstring-to-markdown = "*"
+jedi = ">=0.17.2,<0.19.0"
+pluggy = ">=1.0.0"
+python-lsp-jsonrpc = ">=1.0.0"
+setuptools = ">=39.0.0"
+ujson = ">=3.0.0"
+
+[package.extras]
+all = ["autopep8 (>=1.6.0,<2.1.0)", "flake8 (>=5.0.0,<7)", "mccabe (>=0.7.0,<0.8.0)", "pycodestyle (>=2.9.0,<2.11.0)", "pydocstyle (>=6.3.0,<6.4.0)", "pyflakes (>=2.5.0,<3.1.0)", "pylint (>=2.5.0,<3)", "rope (>1.2.0)", "whatthepatch (>=1.0.2,<2.0.0)", "yapf (>=0.33.0)"]
+autopep8 = ["autopep8 (>=1.6.0,<2.1.0)"]
+flake8 = ["flake8 (>=5.0.0,<7)"]
+mccabe = ["mccabe (>=0.7.0,<0.8.0)"]
+pycodestyle = ["pycodestyle (>=2.9.0,<2.11.0)"]
+pydocstyle = ["pydocstyle (>=6.3.0,<6.4.0)"]
+pyflakes = ["pyflakes (>=2.5.0,<3.1.0)"]
+pylint = ["pylint (>=2.5.0,<3)"]
+rope = ["rope (>1.2.0)"]
+test = ["coverage", "flaky", "matplotlib", "numpy", "pandas", "pylint (>=2.5.0,<3)", "pyqt5", "pytest", "pytest-cov"]
+websockets = ["websockets (>=10.3)"]
+yapf = ["whatthepatch (>=1.0.2,<2.0.0)", "yapf (>=0.33.0)"]
+
+[[package]]
+name = "pyyaml"
+version = "6.0.1"
+description = "YAML parser and emitter for Python"
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d858aa552c999bc8a8d57426ed01e40bef403cd8ccdd0fc5f6f04a00414cac2a"},
+ {file = "PyYAML-6.0.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:fd66fc5d0da6d9815ba2cebeb4205f95818ff4b79c3ebe268e75d961704af52f"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69b023b2b4daa7548bcfbd4aa3da05b3a74b772db9e23b982788168117739938"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:81e0b275a9ecc9c0c0c07b4b90ba548307583c125f54d5b6946cfee6360c733d"},
+ {file = "PyYAML-6.0.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba336e390cd8e4d1739f42dfe9bb83a3cc2e80f567d8805e11b46f4a943f5515"},
+ {file = "PyYAML-6.0.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:326c013efe8048858a6d312ddd31d56e468118ad4cdeda36c719bf5bb6192290"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win32.whl", hash = "sha256:bd4af7373a854424dabd882decdc5579653d7868b8fb26dc7d0e99f823aa5924"},
+ {file = "PyYAML-6.0.1-cp310-cp310-win_amd64.whl", hash = "sha256:fd1592b3fdf65fff2ad0004b5e363300ef59ced41c2e6b3a99d4089fa8c5435d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6965a7bc3cf88e5a1c3bd2e0b5c22f8d677dc88a455344035f03399034eb3007"},
+ {file = "PyYAML-6.0.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f003ed9ad21d6a4713f0a9b5a7a0a79e08dd0f221aff4525a2be4c346ee60aab"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:42f8152b8dbc4fe7d96729ec2b99c7097d656dc1213a3229ca5383f973a5ed6d"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:062582fca9fabdd2c8b54a3ef1c978d786e0f6b3a1510e0ac93ef59e0ddae2bc"},
+ {file = "PyYAML-6.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d2b04aac4d386b172d5b9692e2d2da8de7bfb6c387fa4f801fbf6fb2e6ba4673"},
+ {file = "PyYAML-6.0.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e7d73685e87afe9f3b36c799222440d6cf362062f78be1013661b00c5c6f678b"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win32.whl", hash = "sha256:1635fd110e8d85d55237ab316b5b011de701ea0f29d07611174a1b42f1444741"},
+ {file = "PyYAML-6.0.1-cp311-cp311-win_amd64.whl", hash = "sha256:bf07ee2fef7014951eeb99f56f39c9bb4af143d8aa3c21b1677805985307da34"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:855fb52b0dc35af121542a76b9a84f8d1cd886ea97c84703eaa6d88e37a2ad28"},
+ {file = "PyYAML-6.0.1-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:40df9b996c2b73138957fe23a16a4f0ba614f4c0efce1e9406a184b6d07fa3a9"},
+ {file = "PyYAML-6.0.1-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6c22bec3fbe2524cde73d7ada88f6566758a8f7227bfbf93a408a9d86bcc12a0"},
+ {file = "PyYAML-6.0.1-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:8d4e9c88387b0f5c7d5f281e55304de64cf7f9c0021a3525bd3b1c542da3b0e4"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win32.whl", hash = "sha256:d483d2cdf104e7c9fa60c544d92981f12ad66a457afae824d146093b8c294c54"},
+ {file = "PyYAML-6.0.1-cp312-cp312-win_amd64.whl", hash = "sha256:0d3304d8c0adc42be59c5f8a4d9e3d7379e6955ad754aa9d6ab7a398b59dd1df"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50550eb667afee136e9a77d6dc71ae76a44df8b3e51e41b77f6de2932bfe0f47"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1fe35611261b29bd1de0070f0b2f47cb6ff71fa6595c077e42bd0c419fa27b98"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:704219a11b772aea0d8ecd7058d0082713c3562b4e271b849ad7dc4a5c90c13c"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:afd7e57eddb1a54f0f1a974bc4391af8bcce0b444685d936840f125cf046d5bd"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win32.whl", hash = "sha256:fca0e3a251908a499833aa292323f32437106001d436eca0e6e7833256674585"},
+ {file = "PyYAML-6.0.1-cp36-cp36m-win_amd64.whl", hash = "sha256:f22ac1c3cac4dbc50079e965eba2c1058622631e526bd9afd45fedd49ba781fa"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:b1275ad35a5d18c62a7220633c913e1b42d44b46ee12554e5fd39c70a243d6a3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:18aeb1bf9a78867dc38b259769503436b7c72f7a1f1f4c93ff9a17de54319b27"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:596106435fa6ad000c2991a98fa58eeb8656ef2325d7e158344fb33864ed87e3"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:baa90d3f661d43131ca170712d903e6295d1f7a0f595074f151c0aed377c9b9c"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win32.whl", hash = "sha256:9046c58c4395dff28dd494285c82ba00b546adfc7ef001486fbf0324bc174fba"},
+ {file = "PyYAML-6.0.1-cp37-cp37m-win_amd64.whl", hash = "sha256:4fb147e7a67ef577a588a0e2c17b6db51dda102c71de36f8549b6816a96e1867"},
+ {file = "PyYAML-6.0.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:1d4c7e777c441b20e32f52bd377e0c409713e8bb1386e1099c2415f26e479595"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a0cd17c15d3bb3fa06978b4e8958dcdc6e0174ccea823003a106c7d4d7899ac5"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:28c119d996beec18c05208a8bd78cbe4007878c6dd15091efb73a30e90539696"},
+ {file = "PyYAML-6.0.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7e07cbde391ba96ab58e532ff4803f79c4129397514e1413a7dc761ccd755735"},
+ {file = "PyYAML-6.0.1-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:49a183be227561de579b4a36efbb21b3eab9651dd81b1858589f796549873dd6"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win32.whl", hash = "sha256:184c5108a2aca3c5b3d3bf9395d50893a7ab82a38004c8f61c258d4428e80206"},
+ {file = "PyYAML-6.0.1-cp38-cp38-win_amd64.whl", hash = "sha256:1e2722cc9fbb45d9b87631ac70924c11d3a401b2d7f410cc0e3bbf249f2dca62"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:9eb6caa9a297fc2c2fb8862bc5370d0303ddba53ba97e71f08023b6cd73d16a8"},
+ {file = "PyYAML-6.0.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c8098ddcc2a85b61647b2590f825f3db38891662cfc2fc776415143f599bb859"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5773183b6446b2c99bb77e77595dd486303b4faab2b086e7b17bc6bef28865f6"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:b786eecbdf8499b9ca1d697215862083bd6d2a99965554781d0d8d1ad31e13a0"},
+ {file = "PyYAML-6.0.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bc1bf2925a1ecd43da378f4db9e4f799775d6367bdb94671027b73b393a7c42c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:04ac92ad1925b2cff1db0cfebffb6ffc43457495c9b3c39d3fcae417d7125dc5"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win32.whl", hash = "sha256:faca3bdcf85b2fc05d06ff3fbc1f83e1391b3e724afa3feba7d13eeab355484c"},
+ {file = "PyYAML-6.0.1-cp39-cp39-win_amd64.whl", hash = "sha256:510c9deebc5c0225e8c96813043e62b680ba2f9c50a08d3724c7f28a747d1486"},
+ {file = "PyYAML-6.0.1.tar.gz", hash = "sha256:bfdf460b1736c775f2ba9f6a92bca30bc2095067b8a9d77876d1fad6cc3b4a43"},
+]
+
+[[package]]
+name = "redbaron"
+version = "0.9.2"
+description = "Abstraction on top of baron, a FST for python to make writing refactoring code a realistic task"
+optional = false
+python-versions = "*"
+files = [
+ {file = "redbaron-0.9.2-py2.py3-none-any.whl", hash = "sha256:d01032b6a848b5521a8d6ef72486315c2880f420956870cdd742e2b5a09b9bab"},
+ {file = "redbaron-0.9.2.tar.gz", hash = "sha256:472d0739ca6b2240bb2278ae428604a75472c9c12e86c6321e8c016139c0132f"},
+]
+
+[package.dependencies]
+baron = ">=0.7"
+
+[package.extras]
+notebook = ["pygments"]
+
+[[package]]
+name = "referencing"
+version = "0.30.2"
+description = "JSON Referencing + Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "referencing-0.30.2-py3-none-any.whl", hash = "sha256:449b6669b6121a9e96a7f9e410b245d471e8d48964c67113ce9afe50c8dd7bdf"},
+ {file = "referencing-0.30.2.tar.gz", hash = "sha256:794ad8003c65938edcdbc027f1933215e0d0ccc0291e3ce20a4d87432b59efc0"},
+]
+
+[package.dependencies]
+attrs = ">=22.2.0"
+rpds-py = ">=0.7.0"
+
+[[package]]
+name = "regex"
+version = "2023.8.8"
+description = "Alternative regular expression module, to replace re."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "regex-2023.8.8-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:88900f521c645f784260a8d346e12a1590f79e96403971241e64c3a265c8ecdb"},
+ {file = "regex-2023.8.8-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3611576aff55918af2697410ff0293d6071b7e00f4b09e005d614686ac4cd57c"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b8a0ccc8f2698f120e9e5742f4b38dc944c38744d4bdfc427616f3a163dd9de5"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c662a4cbdd6280ee56f841f14620787215a171c4e2d1744c9528bed8f5816c96"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cf0633e4a1b667bfe0bb10b5e53fe0d5f34a6243ea2530eb342491f1adf4f739"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:551ad543fa19e94943c5b2cebc54c73353ffff08228ee5f3376bd27b3d5b9800"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54de2619f5ea58474f2ac211ceea6b615af2d7e4306220d4f3fe690c91988a61"},
+ {file = "regex-2023.8.8-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:5ec4b3f0aebbbe2fc0134ee30a791af522a92ad9f164858805a77442d7d18570"},
+ {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:3ae646c35cb9f820491760ac62c25b6d6b496757fda2d51be429e0e7b67ae0ab"},
+ {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:ca339088839582d01654e6f83a637a4b8194d0960477b9769d2ff2cfa0fa36d2"},
+ {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:d9b6627408021452dcd0d2cdf8da0534e19d93d070bfa8b6b4176f99711e7f90"},
+ {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:bd3366aceedf274f765a3a4bc95d6cd97b130d1dda524d8f25225d14123c01db"},
+ {file = "regex-2023.8.8-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7aed90a72fc3654fba9bc4b7f851571dcc368120432ad68b226bd593f3f6c0b7"},
+ {file = "regex-2023.8.8-cp310-cp310-win32.whl", hash = "sha256:80b80b889cb767cc47f31d2b2f3dec2db8126fbcd0cff31b3925b4dc6609dcdb"},
+ {file = "regex-2023.8.8-cp310-cp310-win_amd64.whl", hash = "sha256:b82edc98d107cbc7357da7a5a695901b47d6eb0420e587256ba3ad24b80b7d0b"},
+ {file = "regex-2023.8.8-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1e7d84d64c84ad97bf06f3c8cb5e48941f135ace28f450d86af6b6512f1c9a71"},
+ {file = "regex-2023.8.8-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:ce0f9fbe7d295f9922c0424a3637b88c6c472b75eafeaff6f910494a1fa719ef"},
+ {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:06c57e14ac723b04458df5956cfb7e2d9caa6e9d353c0b4c7d5d54fcb1325c46"},
+ {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e7a9aaa5a1267125eef22cef3b63484c3241aaec6f48949b366d26c7250e0357"},
+ {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9b7408511fca48a82a119d78a77c2f5eb1b22fe88b0d2450ed0756d194fe7a9a"},
+ {file = "regex-2023.8.8-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:14dc6f2d88192a67d708341f3085df6a4f5a0c7b03dec08d763ca2cd86e9f559"},
+ {file = "regex-2023.8.8-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:48c640b99213643d141550326f34f0502fedb1798adb3c9eb79650b1ecb2f177"},
+ {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:0085da0f6c6393428bf0d9c08d8b1874d805bb55e17cb1dfa5ddb7cfb11140bf"},
+ {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:964b16dcc10c79a4a2be9f1273fcc2684a9eedb3906439720598029a797b46e6"},
+ {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:7ce606c14bb195b0e5108544b540e2c5faed6843367e4ab3deb5c6aa5e681208"},
+ {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:40f029d73b10fac448c73d6eb33d57b34607f40116e9f6e9f0d32e9229b147d7"},
+ {file = "regex-2023.8.8-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:3b8e6ea6be6d64104d8e9afc34c151926f8182f84e7ac290a93925c0db004bfd"},
+ {file = "regex-2023.8.8-cp311-cp311-win32.whl", hash = "sha256:942f8b1f3b223638b02df7df79140646c03938d488fbfb771824f3d05fc083a8"},
+ {file = "regex-2023.8.8-cp311-cp311-win_amd64.whl", hash = "sha256:51d8ea2a3a1a8fe4f67de21b8b93757005213e8ac3917567872f2865185fa7fb"},
+ {file = "regex-2023.8.8-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:e951d1a8e9963ea51efd7f150450803e3b95db5939f994ad3d5edac2b6f6e2b4"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:704f63b774218207b8ccc6c47fcef5340741e5d839d11d606f70af93ee78e4d4"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22283c769a7b01c8ac355d5be0715bf6929b6267619505e289f792b01304d898"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:91129ff1bb0619bc1f4ad19485718cc623a2dc433dff95baadbf89405c7f6b57"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de35342190deb7b866ad6ba5cbcccb2d22c0487ee0cbb251efef0843d705f0d4"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b993b6f524d1e274a5062488a43e3f9f8764ee9745ccd8e8193df743dbe5ee61"},
+ {file = "regex-2023.8.8-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:3026cbcf11d79095a32d9a13bbc572a458727bd5b1ca332df4a79faecd45281c"},
+ {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:293352710172239bf579c90a9864d0df57340b6fd21272345222fb6371bf82b3"},
+ {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:d909b5a3fff619dc7e48b6b1bedc2f30ec43033ba7af32f936c10839e81b9217"},
+ {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_ppc64le.whl", hash = "sha256:3d370ff652323c5307d9c8e4c62efd1956fb08051b0e9210212bc51168b4ff56"},
+ {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_s390x.whl", hash = "sha256:b076da1ed19dc37788f6a934c60adf97bd02c7eea461b73730513921a85d4235"},
+ {file = "regex-2023.8.8-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:e9941a4ada58f6218694f382e43fdd256e97615db9da135e77359da257a7168b"},
+ {file = "regex-2023.8.8-cp36-cp36m-win32.whl", hash = "sha256:a8c65c17aed7e15a0c824cdc63a6b104dfc530f6fa8cb6ac51c437af52b481c7"},
+ {file = "regex-2023.8.8-cp36-cp36m-win_amd64.whl", hash = "sha256:aadf28046e77a72f30dcc1ab185639e8de7f4104b8cb5c6dfa5d8ed860e57236"},
+ {file = "regex-2023.8.8-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:423adfa872b4908843ac3e7a30f957f5d5282944b81ca0a3b8a7ccbbfaa06103"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4ae594c66f4a7e1ea67232a0846649a7c94c188d6c071ac0210c3e86a5f92109"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e51c80c168074faa793685656c38eb7a06cbad7774c8cbc3ea05552d615393d8"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:09b7f4c66aa9d1522b06e31a54f15581c37286237208df1345108fcf4e050c18"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e73e5243af12d9cd6a9d6a45a43570dbe2e5b1cdfc862f5ae2b031e44dd95a8"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:941460db8fe3bd613db52f05259c9336f5a47ccae7d7def44cc277184030a116"},
+ {file = "regex-2023.8.8-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f0ccf3e01afeb412a1a9993049cb160d0352dba635bbca7762b2dc722aa5742a"},
+ {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:2e9216e0d2cdce7dbc9be48cb3eacb962740a09b011a116fd7af8c832ab116ca"},
+ {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:5cd9cd7170459b9223c5e592ac036e0704bee765706445c353d96f2890e816c8"},
+ {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:4873ef92e03a4309b3ccd8281454801b291b689f6ad45ef8c3658b6fa761d7ac"},
+ {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:239c3c2a339d3b3ddd51c2daef10874410917cd2b998f043c13e2084cb191684"},
+ {file = "regex-2023.8.8-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:1005c60ed7037be0d9dea1f9c53cc42f836188227366370867222bda4c3c6bd7"},
+ {file = "regex-2023.8.8-cp37-cp37m-win32.whl", hash = "sha256:e6bd1e9b95bc5614a7a9c9c44fde9539cba1c823b43a9f7bc11266446dd568e3"},
+ {file = "regex-2023.8.8-cp37-cp37m-win_amd64.whl", hash = "sha256:9a96edd79661e93327cfeac4edec72a4046e14550a1d22aa0dd2e3ca52aec921"},
+ {file = "regex-2023.8.8-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f2181c20ef18747d5f4a7ea513e09ea03bdd50884a11ce46066bb90fe4213675"},
+ {file = "regex-2023.8.8-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:a2ad5add903eb7cdde2b7c64aaca405f3957ab34f16594d2b78d53b8b1a6a7d6"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9233ac249b354c54146e392e8a451e465dd2d967fc773690811d3a8c240ac601"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:920974009fb37b20d32afcdf0227a2e707eb83fe418713f7a8b7de038b870d0b"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cd2b6c5dfe0929b6c23dde9624483380b170b6e34ed79054ad131b20203a1a63"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:96979d753b1dc3b2169003e1854dc67bfc86edf93c01e84757927f810b8c3c93"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2ae54a338191e1356253e7883d9d19f8679b6143703086245fb14d1f20196be9"},
+ {file = "regex-2023.8.8-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2162ae2eb8b079622176a81b65d486ba50b888271302190870b8cc488587d280"},
+ {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:c884d1a59e69e03b93cf0dfee8794c63d7de0ee8f7ffb76e5f75be8131b6400a"},
+ {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:cf9273e96f3ee2ac89ffcb17627a78f78e7516b08f94dc435844ae72576a276e"},
+ {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:83215147121e15d5f3a45d99abeed9cf1fe16869d5c233b08c56cdf75f43a504"},
+ {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:3f7454aa427b8ab9101f3787eb178057c5250478e39b99540cfc2b889c7d0586"},
+ {file = "regex-2023.8.8-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:f0640913d2c1044d97e30d7c41728195fc37e54d190c5385eacb52115127b882"},
+ {file = "regex-2023.8.8-cp38-cp38-win32.whl", hash = "sha256:0c59122ceccb905a941fb23b087b8eafc5290bf983ebcb14d2301febcbe199c7"},
+ {file = "regex-2023.8.8-cp38-cp38-win_amd64.whl", hash = "sha256:c12f6f67495ea05c3d542d119d270007090bad5b843f642d418eb601ec0fa7be"},
+ {file = "regex-2023.8.8-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:82cd0a69cd28f6cc3789cc6adeb1027f79526b1ab50b1f6062bbc3a0ccb2dbc3"},
+ {file = "regex-2023.8.8-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:bb34d1605f96a245fc39790a117ac1bac8de84ab7691637b26ab2c5efb8f228c"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:987b9ac04d0b38ef4f89fbc035e84a7efad9cdd5f1e29024f9289182c8d99e09"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9dd6082f4e2aec9b6a0927202c85bc1b09dcab113f97265127c1dc20e2e32495"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:7eb95fe8222932c10d4436e7a6f7c99991e3fdd9f36c949eff16a69246dee2dc"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7098c524ba9f20717a56a8d551d2ed491ea89cbf37e540759ed3b776a4f8d6eb"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b694430b3f00eb02c594ff5a16db30e054c1b9589a043fe9174584c6efa8033"},
+ {file = "regex-2023.8.8-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:b2aeab3895d778155054abea5238d0eb9a72e9242bd4b43f42fd911ef9a13470"},
+ {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:988631b9d78b546e284478c2ec15c8a85960e262e247b35ca5eaf7ee22f6050a"},
+ {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:67ecd894e56a0c6108ec5ab1d8fa8418ec0cff45844a855966b875d1039a2e34"},
+ {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:14898830f0a0eb67cae2bbbc787c1a7d6e34ecc06fbd39d3af5fe29a4468e2c9"},
+ {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:f2200e00b62568cfd920127782c61bc1c546062a879cdc741cfcc6976668dfcf"},
+ {file = "regex-2023.8.8-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9691a549c19c22d26a4f3b948071e93517bdf86e41b81d8c6ac8a964bb71e5a6"},
+ {file = "regex-2023.8.8-cp39-cp39-win32.whl", hash = "sha256:6ab2ed84bf0137927846b37e882745a827458689eb969028af8032b1b3dac78e"},
+ {file = "regex-2023.8.8-cp39-cp39-win_amd64.whl", hash = "sha256:5543c055d8ec7801901e1193a51570643d6a6ab8751b1f7dd9af71af467538bb"},
+ {file = "regex-2023.8.8.tar.gz", hash = "sha256:fcbdc5f2b0f1cd0f6a56cdb46fe41d2cce1e644e3b68832f3eeebc5fb0f7712e"},
+]
+
+[[package]]
+name = "replicate"
+version = "0.11.0"
+description = "Python client for Replicate"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "replicate-0.11.0-py3-none-any.whl", hash = "sha256:fbb8815068864dc822cd4fa7b6103d6f4089d6ef122abd6c3441ca0f0f110c46"},
+ {file = "replicate-0.11.0.tar.gz", hash = "sha256:4d54b5838c1552a6f76cc37c3af8d9a7998105382082d672acad31636ad443b5"},
+]
+
+[package.dependencies]
+packaging = "*"
+pydantic = ">1"
+requests = ">2"
+
+[package.extras]
+dev = ["black", "mypy", "pytest", "responses", "ruff"]
+
+[[package]]
+name = "requests"
+version = "2.31.0"
+description = "Python HTTP for Humans."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"},
+ {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"},
+]
+
+[package.dependencies]
+certifi = ">=2017.4.17"
+charset-normalizer = ">=2,<4"
+idna = ">=2.5,<4"
+urllib3 = ">=1.21.1,<3"
+
+[package.extras]
+socks = ["PySocks (>=1.5.6,!=1.5.7)"]
+use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"]
+
+[[package]]
+name = "ripgrepy"
+version = "2.0.0"
+description = ""
+optional = false
+python-versions = "*"
+files = [
+ {file = "ripgrepy-2.0.0.tar.gz", hash = "sha256:6dd871bafe859301097354d1f171540fbc9bd38d3f8f52f8a196dc28522085da"},
+]
+
+[[package]]
+name = "rpds-py"
+version = "0.10.2"
+description = "Python bindings to Rust's persistent data structures (rpds)"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "rpds_py-0.10.2-cp310-cp310-macosx_10_7_x86_64.whl", hash = "sha256:9f00d54b18dd837f1431d66b076737deb7c29ce3ebb8412ceaf44d5e1954ac0c"},
+ {file = "rpds_py-0.10.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:8f4d561f4728f825e3b793a53064b606ca0b6fc264f67d09e54af452aafc5b82"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:013d6c784150d10236a74b4094a79d96a256b814457e388fc5a4ba9efe24c402"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:bd1142d22fdb183a0fff66d79134bf644401437fed874f81066d314c67ee193c"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4a0536ed2b9297c75104e1a3da330828ba1b2639fa53b38d396f98bf7e3c68df"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:41bd430b7b63aa802c02964e331ac0b177148fef5f807d2c90d05ce71a52b4d4"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4e8474f7233fe1949ce4e03bea698a600c2d5d6b51dab6d6e6336dbe69acf23e"},
+ {file = "rpds_py-0.10.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:d9d7efaad48b859053b90dedd69bc92f2095084251e732e4c57ac9726bcb1e64"},
+ {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_aarch64.whl", hash = "sha256:5612b0b1de8d5114520094bd5fc3d04eb8af6f3e10d48ef05b7c8e77c1fd9545"},
+ {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_i686.whl", hash = "sha256:5d5eaf988951f6ecb6854ca3300b87123599c711183c83da7ce39717a7cbdbce"},
+ {file = "rpds_py-0.10.2-cp310-cp310-musllinux_1_2_x86_64.whl", hash = "sha256:75c8766734ac0053e1d683567e65e85306c4ec62631b0591caeb287ac8f72e08"},
+ {file = "rpds_py-0.10.2-cp310-none-win32.whl", hash = "sha256:8de9b88f0cbac73cfed34220d13c57849e62a7099a714b929142425e926d223a"},
+ {file = "rpds_py-0.10.2-cp310-none-win_amd64.whl", hash = "sha256:2275f1a022e2383da5d2d101fe11ccdcbae799148c4b83260a4b9309fa3e1fc2"},
+ {file = "rpds_py-0.10.2-cp311-cp311-macosx_10_7_x86_64.whl", hash = "sha256:dd91a7d7a9ce7f4983097c91ce211f3e5569cc21caa16f2692298a07e396f82b"},
+ {file = "rpds_py-0.10.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:e82b4a70cc67094f3f3fd77579702f48fcf1de7bdc67d79b8f1e24d089a6162c"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e281b71922208e00886e4b7ffbfcf27874486364f177418ab676f102130e7ec9"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b3eb1a0d2b6d232d1bcdfc3fcc5f7b004ab3fbd9203011a3172f051d4527c0b6"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02945ae38fd78efc40900f509890de84cfd5ffe2cd2939eeb3a8800dc68b87cb"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ccfb77f6dc8abffa6f1c7e3975ed9070a41ce5fcc11154d2bead8c1baa940f09"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:af52078719209bef33e38131486fd784832dd8d1dc9b85f00a44f6e7437dd021"},
+ {file = "rpds_py-0.10.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:56ba7c1100ed079527f2b995bf5486a2e557e6d5b733c52e8947476338815b69"},
+ {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:899b03a3be785a7e1ff84b237da71f0efa2f021512f147dd34ffdf7aa82cb678"},
+ {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:22e6de18f00583f06928cc8d0993104ecc62f7c6da6478db2255de89a30e45d1"},
+ {file = "rpds_py-0.10.2-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:edd74b760a6bb950397e7a7bd2f38e6700f6525062650b1d77c6d851b82f02c2"},
+ {file = "rpds_py-0.10.2-cp311-none-win32.whl", hash = "sha256:18909093944727e068ebfc92e2e6ed1c4fa44135507c1c0555213ce211c53214"},
+ {file = "rpds_py-0.10.2-cp311-none-win_amd64.whl", hash = "sha256:9568764e72d85cf7855ca78b48e07ed1be47bf230e2cea8dabda3c95f660b0ff"},
+ {file = "rpds_py-0.10.2-cp312-cp312-macosx_10_7_x86_64.whl", hash = "sha256:0fc625059b83695fbb4fc8b7a8b66fa94ff9c7b78c84fb9986cd53ff88a28d80"},
+ {file = "rpds_py-0.10.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c86231c66e4f422e7c13ea6200bb4048b3016c8bfd11b4fd0dabd04d2c8e3501"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:56777c57246e048908b550af9b81b0ec9cf804fd47cb7502ccd93238bd6025c2"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a4cb372e22e9c879bd9a9cc9b20b7c1fbf30a605ac953da45ecec05d8a6e1c77"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:aa3b3a43dabc4cc57a7800f526cbe03f71c69121e21b863fdf497b59b462b163"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:59d222086daa55421d599609b32d0ebe544e57654c4a0a1490c54a7ebaa67561"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:529aab727f54a937085184e7436e1d0e19975cf10115eda12d37a683e4ee5342"},
+ {file = "rpds_py-0.10.2-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:43e9b1531d6a898bdf086acb75c41265c7ec4331267d7619148d407efc72bd24"},
+ {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:c2772bb95062e3f9774140205cd65d8997e39620715486cf5f843cf4ad8f744c"},
+ {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:ba1b28e44f611f3f2b436bd8290050a61db4b59a8e24be4465f44897936b3824"},
+ {file = "rpds_py-0.10.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:5aba767e64b494483ad60c4873bec78d16205a21f8247c99749bd990d9c846c2"},
+ {file = "rpds_py-0.10.2-cp38-cp38-macosx_10_7_x86_64.whl", hash = "sha256:e1954f4b239d1a92081647eecfd51cbfd08ea16eb743b8af1cd0113258feea14"},
+ {file = "rpds_py-0.10.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:de4a2fd524993578fe093044f291b4b24aab134390030b3b9b5f87fd41ab7e75"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e69737bd56006a86fd5a78b2b85447580a6138c930a75eb9ef39fe03d90782b1"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:f40abbcc0a7d9a8a80870af839d317e6932533f98682aabd977add6c53beeb23"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:29ec8507664f94cc08457d98cfc41c3cdbddfa8952438e644177a29b04937876"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bcde80aefe7054fad6277762fb7e9d35c72ea479a485ae1bb14629c640987b30"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a65de5c02884760a14a58304fb6303f9ddfc582e630f385daea871e1bdb18686"},
+ {file = "rpds_py-0.10.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:e92e5817eb6bfed23aa5e45bfe30647b83602bdd6f9e25d63524d4e6258458b0"},
+ {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_aarch64.whl", hash = "sha256:2c8fc6c841ada60a86d29c9ebe2e8757c47eda6553f3596c560e59ca6e9b6fa1"},
+ {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_i686.whl", hash = "sha256:8557c807388e6617161fe51b1a4747ea8d1133f2d2ad8e79583439abebe58fbd"},
+ {file = "rpds_py-0.10.2-cp38-cp38-musllinux_1_2_x86_64.whl", hash = "sha256:00e97d43a36811b78fa9ad9d3329bf34f76a31e891a7031a2ac01450c9b168ab"},
+ {file = "rpds_py-0.10.2-cp38-none-win32.whl", hash = "sha256:1ed3d5385d14be894e12a9033be989e012214a9811e7194849c94032ad69682a"},
+ {file = "rpds_py-0.10.2-cp38-none-win_amd64.whl", hash = "sha256:02b4a2e28eb24dac4ef43dda4f6a6f7766e355179b143f7d0c76a1c5488a307b"},
+ {file = "rpds_py-0.10.2-cp39-cp39-macosx_10_7_x86_64.whl", hash = "sha256:2a55631b93e47956fbc97d69ba2054a8c6a4016f9a3064ec4e031f5f1030cb90"},
+ {file = "rpds_py-0.10.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:2ffbf1b38c88d0466de542e91b08225d51782282512f8e2b11715126c41fda48"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213f9ef5c02ec2f883c1075d25a873149daadbaea50d18d622e9db55ec9849c2"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b00150a9a3fd0a8efaa90bc2696c105b04039d50763dd1c95a34c88c5966cb57"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ab0f7aabdbce4a202e013083eeab71afdb85efa405dc4a06fea98cde81204675"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2cd0c9fb5d40887500b4ed818770c68ab4fa6e0395d286f9704be6751b1b7d98"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8578fc6c8bdd0201327503720fa581000b4bd3934abbf07e2628d1ad3de157d"},
+ {file = "rpds_py-0.10.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:2d27d08056fcd61ff47a0cd8407eff4d3e816c82cb6b9c6f0ce9a0ad49225f81"},
+ {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_aarch64.whl", hash = "sha256:c8f6526df47953b07c45b95c4d1da6b9a0861c0e5da0271db96bb1d807825412"},
+ {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_i686.whl", hash = "sha256:177c033e467a66a054dd3a9534167234a3d0b2e41445807b13b626e01da25d92"},
+ {file = "rpds_py-0.10.2-cp39-cp39-musllinux_1_2_x86_64.whl", hash = "sha256:9c74cbee9e532dc34371127f7686d6953e5153a1f22beab7f953d95ee4a0fe09"},
+ {file = "rpds_py-0.10.2-cp39-none-win32.whl", hash = "sha256:05a1382905026bdd560f806c8c7c16e0f3e3fb359ba8868203ca6e5799884968"},
+ {file = "rpds_py-0.10.2-cp39-none-win_amd64.whl", hash = "sha256:3fd503c27e7b7034128e30847ecdb4bff4ca5e60f29ad022a9f66ae8940d54ac"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-macosx_10_7_x86_64.whl", hash = "sha256:4a96147791e49e84207dd1530109aa0e9eeaf1c8b7a59f150047fc0fcdf9bb64"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-macosx_11_0_arm64.whl", hash = "sha256:203eb1532d51591d32e8dfafd60b5d31347ea7278c8da02b4b550287f6abe28b"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a2f416cdfe92f5fbb77177f5f3f7830059d1582db05f2c7119bf80069d1ab69b"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:b2660000e1a113869c86eb5cc07f3343467490f3cd9d0299f81da9ddae7137b7"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1adb04e4b4e41bf30aaa77eeb169c1b9ba9e5010e2e6ce8d6c17e1446edc9b68"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2bca97521ee786087f0c5ef318fef3eef0266a9c3deff88205523cf353af7394"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4969592e3cdeefa4cbb15a26cec102cbd4a1d6e5b695fac9fa026e19741138c8"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:df61f818edf7c8626bfa392f825860fb670b5f8336e238eb0ec7e2a5689cdded"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:b589d93a60e78fe55d5bc76ee8c2bf945dbdbb7cd16044c53e0307604e448de1"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_i686.whl", hash = "sha256:73da69e1f612c3e682e34dcb971272d90d6f27b2c99acff444ca455a89978574"},
+ {file = "rpds_py-0.10.2-pp310-pypy310_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:89438e8885a186c69fe31f7ef98bb2bf29688c466c3caf9060f404c0be89ae80"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-macosx_10_7_x86_64.whl", hash = "sha256:c4ecc4e9a5d73a816cae36ee6b5d8b7a0c72013cae1e101406e832887c3dc2d8"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-macosx_11_0_arm64.whl", hash = "sha256:907b214da5d2fcff0b6ddb83de1333890ca92abaf4bbf8d9c61dc1b95c87fd6e"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bb44644371eaa29a3aba7b69b1862d0d56f073bb7585baa32e4271a71a91ee82"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:80c3cf46511653f94dfe07c7c79ab105c4164d6e1dfcb35b7214fb9af53eaef4"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:eaba0613c759ebf95988a84f766ca6b7432d55ce399194f95dde588ad1be0878"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0527c97dcd8bb983822ee31d3760187083fd3ba18ac4dd22cf5347c89d5628f4"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9cdfd649011ce2d90cb0dd304c5aba1190fac0c266d19a9e2b96b81cfd150a09"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:75eea40355a8690459c7291ce6c8ce39c27bd223675c7da6619f510c728feb97"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:4f1b804cfad04f862d6a84af9d1ad941b06f671878f0f7ecad6c92007d423de6"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_i686.whl", hash = "sha256:bf77f9017fcfa1232f98598a637406e6c33982ccba8a5922339575c3e2b90ea5"},
+ {file = "rpds_py-0.10.2-pp38-pypy38_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:46c4c550bf59ce05d6bff2c98053822549aaf9fbaf81103edea325e03350bca1"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-macosx_10_7_x86_64.whl", hash = "sha256:46af4a742b90c7460e94214f923452c2c1d050a9da1d2b8d4c70cbc045e692b7"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-macosx_11_0_arm64.whl", hash = "sha256:2a86d246a160d98d820ee7d02dc18c923c228de095be362e57b9fd8970b2c4a1"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ae141c9017f8f473a6ee07a9425da021816a9f8c0683c2e5442f0ccf56b0fc62"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e1147bc3d0dd1e549d991110d0a09557ec9f925dbc1ca62871fcdab2ec9d716b"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:fce7a8ee8d0f682c953c0188735d823f0fcb62779bf92cd6ba473a8e730e26ad"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4c7f9d70f99e1fbcbf57c75328b80e1c0a7f6cad43e75efa90a97221be5efe15"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0b309908b6ff5ffbf6394818cb73b5a2a74073acee2c57fe8719046389aeff0d"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:3ff1f585a0fdc1415bd733b804f33d386064a308672249b14828130dd43e7c31"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_aarch64.whl", hash = "sha256:0188b580c490bccb031e9b67e9e8c695a3c44ac5e06218b152361eca847317c3"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_i686.whl", hash = "sha256:abe081453166e206e3a8c6d8ace57214c17b6d9477d7601ac14a365344dbc1f4"},
+ {file = "rpds_py-0.10.2-pp39-pypy39_pp73-musllinux_1_2_x86_64.whl", hash = "sha256:9118de88c16947eaf5b92f749e65b0501ea69e7c2be7bd6aefc12551622360e1"},
+ {file = "rpds_py-0.10.2.tar.gz", hash = "sha256:289073f68452b96e70990085324be7223944c7409973d13ddfe0eea1c1b5663b"},
+]
+
+[[package]]
+name = "rply"
+version = "0.7.8"
+description = "A pure Python Lex/Yacc that works with RPython"
+optional = false
+python-versions = "*"
+files = [
+ {file = "rply-0.7.8-py2.py3-none-any.whl", hash = "sha256:28ffd11d656c48aeb8c508eb382acd6a0bd906662624b34388751732a27807e7"},
+ {file = "rply-0.7.8.tar.gz", hash = "sha256:2a808ac25a4580a9991fc304d64434e299a8fc75760574492f242cbb5bb301c9"},
+]
+
+[package.dependencies]
+appdirs = "*"
+
+[[package]]
+name = "setuptools"
+version = "68.1.2"
+description = "Easily download, build, install, upgrade, and uninstall Python packages"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "setuptools-68.1.2-py3-none-any.whl", hash = "sha256:3d8083eed2d13afc9426f227b24fd1659489ec107c0e86cec2ffdde5c92e790b"},
+ {file = "setuptools-68.1.2.tar.gz", hash = "sha256:3d4dfa6d95f1b101d695a6160a7626e15583af71a5f52176efa5d39a054d475d"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5,<=7.1.2)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"]
+testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.develop (>=7.21)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"]
+testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"]
+
+[[package]]
+name = "six"
+version = "1.16.0"
+description = "Python 2 and 3 compatibility utilities"
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*"
+files = [
+ {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"},
+ {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"},
+]
+
+[[package]]
+name = "sniffio"
+version = "1.3.0"
+description = "Sniff out which async library your code is running under"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"},
+ {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"},
+]
+
+[[package]]
+name = "socksio"
+version = "1.0.0"
+description = "Sans-I/O implementation of SOCKS4, SOCKS4A, and SOCKS5."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "socksio-1.0.0-py3-none-any.whl", hash = "sha256:95dc1f15f9b34e8d7b16f06d74b8ccf48f609af32ab33c608d08761c5dcbb1f3"},
+ {file = "socksio-1.0.0.tar.gz", hash = "sha256:f88beb3da5b5c38b9890469de67d0cb0f9d494b78b106ca1845f96c10b91c4ac"},
+]
+
+[[package]]
+name = "soupsieve"
+version = "2.5"
+description = "A modern CSS selector implementation for Beautiful Soup."
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "soupsieve-2.5-py3-none-any.whl", hash = "sha256:eaa337ff55a1579b6549dc679565eac1e3d000563bcb1c8ab0d0fefbc0c2cdc7"},
+ {file = "soupsieve-2.5.tar.gz", hash = "sha256:5663d5a7b3bfaeee0bc4372e7fc48f9cff4940b3eec54a6451cc5299f1097690"},
+]
+
+[[package]]
+name = "starlette"
+version = "0.27.0"
+description = "The little ASGI library that shines."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "starlette-0.27.0-py3-none-any.whl", hash = "sha256:918416370e846586541235ccd38a474c08b80443ed31c578a418e2209b3eef91"},
+ {file = "starlette-0.27.0.tar.gz", hash = "sha256:6a6b0d042acb8d469a01eba54e9cda6cbd24ac602c4cd016723117d6a7e73b75"},
+]
+
+[package.dependencies]
+anyio = ">=3.4.0,<5"
+typing-extensions = {version = ">=3.10.0", markers = "python_version < \"3.10\""}
+
+[package.extras]
+full = ["httpx (>=0.22.0)", "itsdangerous", "jinja2", "python-multipart", "pyyaml"]
+
+[[package]]
+name = "tiktoken"
+version = "0.4.0"
+description = "tiktoken is a fast BPE tokeniser for use with OpenAI's models"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "tiktoken-0.4.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:176cad7f053d2cc82ce7e2a7c883ccc6971840a4b5276740d0b732a2b2011f8a"},
+ {file = "tiktoken-0.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:450d504892b3ac80207700266ee87c932df8efea54e05cefe8613edc963c1285"},
+ {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:00d662de1e7986d129139faf15e6a6ee7665ee103440769b8dedf3e7ba6ac37f"},
+ {file = "tiktoken-0.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5727d852ead18b7927b8adf558a6f913a15c7766725b23dbe21d22e243041b28"},
+ {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:c06cd92b09eb0404cedce3702fa866bf0d00e399439dad3f10288ddc31045422"},
+ {file = "tiktoken-0.4.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:9ec161e40ed44e4210d3b31e2ff426b4a55e8254f1023e5d2595cb60044f8ea6"},
+ {file = "tiktoken-0.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:1e8fa13cf9889d2c928b9e258e9dbbbf88ab02016e4236aae76e3b4f82dd8288"},
+ {file = "tiktoken-0.4.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bb2341836b725c60d0ab3c84970b9b5f68d4b733a7bcb80fb25967e5addb9920"},
+ {file = "tiktoken-0.4.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2ca30367ad750ee7d42fe80079d3092bd35bb266be7882b79c3bd159b39a17b0"},
+ {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3dc3df19ddec79435bb2a94ee46f4b9560d0299c23520803d851008445671197"},
+ {file = "tiktoken-0.4.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4d980fa066e962ef0f4dad0222e63a484c0c993c7a47c7dafda844ca5aded1f3"},
+ {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:329f548a821a2f339adc9fbcfd9fc12602e4b3f8598df5593cfc09839e9ae5e4"},
+ {file = "tiktoken-0.4.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:b1a038cee487931a5caaef0a2e8520e645508cde21717eacc9af3fbda097d8bb"},
+ {file = "tiktoken-0.4.0-cp311-cp311-win_amd64.whl", hash = "sha256:08efa59468dbe23ed038c28893e2a7158d8c211c3dd07f2bbc9a30e012512f1d"},
+ {file = "tiktoken-0.4.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f3020350685e009053829c1168703c346fb32c70c57d828ca3742558e94827a9"},
+ {file = "tiktoken-0.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:ba16698c42aad8190e746cd82f6a06769ac7edd415d62ba027ea1d99d958ed93"},
+ {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9c15d9955cc18d0d7ffcc9c03dc51167aedae98542238b54a2e659bd25fe77ed"},
+ {file = "tiktoken-0.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:64e1091c7103100d5e2c6ea706f0ec9cd6dc313e6fe7775ef777f40d8c20811e"},
+ {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:e87751b54eb7bca580126353a9cf17a8a8eaadd44edaac0e01123e1513a33281"},
+ {file = "tiktoken-0.4.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:e063b988b8ba8b66d6cc2026d937557437e79258095f52eaecfafb18a0a10c03"},
+ {file = "tiktoken-0.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:9c6dd439e878172dc163fced3bc7b19b9ab549c271b257599f55afc3a6a5edef"},
+ {file = "tiktoken-0.4.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8d1d97f83697ff44466c6bef5d35b6bcdb51e0125829a9c0ed1e6e39fb9a08fb"},
+ {file = "tiktoken-0.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:1b6bce7c68aa765f666474c7c11a7aebda3816b58ecafb209afa59c799b0dd2d"},
+ {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5a73286c35899ca51d8d764bc0b4d60838627ce193acb60cc88aea60bddec4fd"},
+ {file = "tiktoken-0.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d0394967d2236a60fd0aacef26646b53636423cc9c70c32f7c5124ebe86f3093"},
+ {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:dae2af6f03ecba5f679449fa66ed96585b2fa6accb7fd57d9649e9e398a94f44"},
+ {file = "tiktoken-0.4.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:55e251b1da3c293432179cf7c452cfa35562da286786be5a8b1ee3405c2b0dd2"},
+ {file = "tiktoken-0.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:c835d0ee1f84a5aa04921717754eadbc0f0a56cf613f78dfc1cf9ad35f6c3fea"},
+ {file = "tiktoken-0.4.0.tar.gz", hash = "sha256:59b20a819969735b48161ced9b92f05dc4519c17be4015cfb73b65270a243620"},
+]
+
+[package.dependencies]
+regex = ">=2022.1.18"
+requests = ">=2.26.0"
+
+[package.extras]
+blobfile = ["blobfile (>=2)"]
+
+[[package]]
+name = "tokenizers"
+version = "0.13.3"
+description = "Fast and Customizable Tokenizers"
+optional = false
+python-versions = "*"
+files = [
+ {file = "tokenizers-0.13.3-cp310-cp310-macosx_10_11_x86_64.whl", hash = "sha256:f3835c5be51de8c0a092058a4d4380cb9244fb34681fd0a295fbf0a52a5fdf33"},
+ {file = "tokenizers-0.13.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:4ef4c3e821730f2692489e926b184321e887f34fb8a6b80b8096b966ba663d07"},
+ {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c5fd1a6a25353e9aa762e2aae5a1e63883cad9f4e997c447ec39d071020459bc"},
+ {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ee0b1b311d65beab83d7a41c56a1e46ab732a9eed4460648e8eb0bd69fc2d059"},
+ {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:5ef4215284df1277dadbcc5e17d4882bda19f770d02348e73523f7e7d8b8d396"},
+ {file = "tokenizers-0.13.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a4d53976079cff8a033f778fb9adca2d9d69d009c02fa2d71a878b5f3963ed30"},
+ {file = "tokenizers-0.13.3-cp310-cp310-win32.whl", hash = "sha256:1f0e3b4c2ea2cd13238ce43548959c118069db7579e5d40ec270ad77da5833ce"},
+ {file = "tokenizers-0.13.3-cp310-cp310-win_amd64.whl", hash = "sha256:89649c00d0d7211e8186f7a75dfa1db6996f65edce4b84821817eadcc2d3c79e"},
+ {file = "tokenizers-0.13.3-cp311-cp311-macosx_10_11_universal2.whl", hash = "sha256:56b726e0d2bbc9243872b0144515ba684af5b8d8cd112fb83ee1365e26ec74c8"},
+ {file = "tokenizers-0.13.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:cc5c022ce692e1f499d745af293ab9ee6f5d92538ed2faf73f9708c89ee59ce6"},
+ {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f55c981ac44ba87c93e847c333e58c12abcbb377a0c2f2ef96e1a266e4184ff2"},
+ {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:f247eae99800ef821a91f47c5280e9e9afaeed9980fc444208d5aa6ba69ff148"},
+ {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4b3e3215d048e94f40f1c95802e45dcc37c5b05eb46280fc2ccc8cd351bff839"},
+ {file = "tokenizers-0.13.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9ba2b0bf01777c9b9bc94b53764d6684554ce98551fec496f71bc5be3a03e98b"},
+ {file = "tokenizers-0.13.3-cp311-cp311-win32.whl", hash = "sha256:cc78d77f597d1c458bf0ea7c2a64b6aa06941c7a99cb135b5969b0278824d808"},
+ {file = "tokenizers-0.13.3-cp311-cp311-win_amd64.whl", hash = "sha256:ecf182bf59bd541a8876deccf0360f5ae60496fd50b58510048020751cf1724c"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-macosx_10_11_x86_64.whl", hash = "sha256:0527dc5436a1f6bf2c0327da3145687d3bcfbeab91fed8458920093de3901b44"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:07cbb2c307627dc99b44b22ef05ff4473aa7c7cc1fec8f0a8b37d8a64b1a16d2"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4560dbdeaae5b7ee0d4e493027e3de6d53c991b5002d7ff95083c99e11dd5ac0"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:64064bd0322405c9374305ab9b4c07152a1474370327499911937fd4a76d004b"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8c6e2ab0f2e3d939ca66aa1d596602105fe33b505cd2854a4c1717f704c51de"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-win32.whl", hash = "sha256:6cc29d410768f960db8677221e497226e545eaaea01aa3613fa0fdf2cc96cff4"},
+ {file = "tokenizers-0.13.3-cp37-cp37m-win_amd64.whl", hash = "sha256:fc2a7fdf864554a0dacf09d32e17c0caa9afe72baf9dd7ddedc61973bae352d8"},
+ {file = "tokenizers-0.13.3-cp38-cp38-macosx_10_11_x86_64.whl", hash = "sha256:8791dedba834c1fc55e5f1521be325ea3dafb381964be20684b92fdac95d79b7"},
+ {file = "tokenizers-0.13.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:d607a6a13718aeb20507bdf2b96162ead5145bbbfa26788d6b833f98b31b26e1"},
+ {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3791338f809cd1bf8e4fee6b540b36822434d0c6c6bc47162448deee3f77d425"},
+ {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:c2f35f30e39e6aab8716f07790f646bdc6e4a853816cc49a95ef2a9016bf9ce6"},
+ {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:310204dfed5aa797128b65d63538a9837cbdd15da2a29a77d67eefa489edda26"},
+ {file = "tokenizers-0.13.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a0f9b92ea052305166559f38498b3b0cae159caea712646648aaa272f7160963"},
+ {file = "tokenizers-0.13.3-cp38-cp38-win32.whl", hash = "sha256:9a3fa134896c3c1f0da6e762d15141fbff30d094067c8f1157b9fdca593b5806"},
+ {file = "tokenizers-0.13.3-cp38-cp38-win_amd64.whl", hash = "sha256:8e7b0cdeace87fa9e760e6a605e0ae8fc14b7d72e9fc19c578116f7287bb873d"},
+ {file = "tokenizers-0.13.3-cp39-cp39-macosx_10_11_x86_64.whl", hash = "sha256:00cee1e0859d55507e693a48fa4aef07060c4bb6bd93d80120e18fea9371c66d"},
+ {file = "tokenizers-0.13.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:a23ff602d0797cea1d0506ce69b27523b07e70f6dda982ab8cf82402de839088"},
+ {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:70ce07445050b537d2696022dafb115307abdffd2a5c106f029490f84501ef97"},
+ {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:280ffe95f50eaaf655b3a1dc7ff1d9cf4777029dbbc3e63a74e65a056594abc3"},
+ {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:97acfcec592f7e9de8cadcdcda50a7134423ac8455c0166b28c9ff04d227b371"},
+ {file = "tokenizers-0.13.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd7730c98a3010cd4f523465867ff95cd9d6430db46676ce79358f65ae39797b"},
+ {file = "tokenizers-0.13.3-cp39-cp39-win32.whl", hash = "sha256:48625a108029cb1ddf42e17a81b5a3230ba6888a70c9dc14e81bc319e812652d"},
+ {file = "tokenizers-0.13.3-cp39-cp39-win_amd64.whl", hash = "sha256:bc0a6f1ba036e482db6453571c9e3e60ecd5489980ffd95d11dc9f960483d783"},
+ {file = "tokenizers-0.13.3.tar.gz", hash = "sha256:2e546dbb68b623008a5442353137fbb0123d311a6d7ba52f2667c8862a75af2e"},
+]
+
+[package.extras]
+dev = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
+docs = ["setuptools-rust", "sphinx", "sphinx-rtd-theme"]
+testing = ["black (==22.3)", "datasets", "numpy", "pytest", "requests"]
+
+[[package]]
+name = "tomli"
+version = "2.0.1"
+description = "A lil' TOML parser"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"},
+ {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"},
+]
+
+[[package]]
+name = "tqdm"
+version = "4.66.1"
+description = "Fast, Extensible Progress Meter"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "tqdm-4.66.1-py3-none-any.whl", hash = "sha256:d302b3c5b53d47bce91fea46679d9c3c6508cf6332229aa1e7d8653723793386"},
+ {file = "tqdm-4.66.1.tar.gz", hash = "sha256:d88e651f9db8d8551a62556d3cff9e3034274ca5d66e93197cf2490e2dcb69c7"},
+]
+
+[package.dependencies]
+colorama = {version = "*", markers = "platform_system == \"Windows\""}
+
+[package.extras]
+dev = ["pytest (>=6)", "pytest-cov", "pytest-timeout", "pytest-xdist"]
+notebook = ["ipywidgets (>=6)"]
+slack = ["slack-sdk"]
+telegram = ["requests"]
+
+[[package]]
+name = "typer"
+version = "0.7.0"
+description = "Typer, build great CLIs. Easy to code. Based on Python type hints."
+optional = false
+python-versions = ">=3.6"
+files = [
+ {file = "typer-0.7.0-py3-none-any.whl", hash = "sha256:b5e704f4e48ec263de1c0b3a2387cd405a13767d2f907f44c1a08cbad96f606d"},
+ {file = "typer-0.7.0.tar.gz", hash = "sha256:ff797846578a9f2a201b53442aedeb543319466870fbe1c701eab66dd7681165"},
+]
+
+[package.dependencies]
+click = ">=7.1.1,<9.0.0"
+
+[package.extras]
+all = ["colorama (>=0.4.3,<0.5.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+dev = ["autoflake (>=1.3.1,<2.0.0)", "flake8 (>=3.8.3,<4.0.0)", "pre-commit (>=2.17.0,<3.0.0)"]
+doc = ["cairosvg (>=2.5.2,<3.0.0)", "mdx-include (>=1.4.1,<2.0.0)", "mkdocs (>=1.1.2,<2.0.0)", "mkdocs-material (>=8.1.4,<9.0.0)", "pillow (>=9.3.0,<10.0.0)"]
+test = ["black (>=22.3.0,<23.0.0)", "coverage (>=6.2,<7.0)", "isort (>=5.0.6,<6.0.0)", "mypy (==0.910)", "pytest (>=4.4.0,<8.0.0)", "pytest-cov (>=2.10.0,<5.0.0)", "pytest-sugar (>=0.9.4,<0.10.0)", "pytest-xdist (>=1.32.0,<4.0.0)", "rich (>=10.11.0,<13.0.0)", "shellingham (>=1.3.0,<2.0.0)"]
+
+[[package]]
+name = "typing-extensions"
+version = "4.7.1"
+description = "Backported and Experimental Type Hints for Python 3.7+"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "typing_extensions-4.7.1-py3-none-any.whl", hash = "sha256:440d5dd3af93b060174bf433bccd69b0babc3b15b1a8dca43789fd7f61514b36"},
+ {file = "typing_extensions-4.7.1.tar.gz", hash = "sha256:b75ddc264f0ba5615db7ba217daeb99701ad295353c45f9e95963337ceeeffb2"},
+]
+
+[[package]]
+name = "ujson"
+version = "5.8.0"
+description = "Ultra fast JSON encoder and decoder for Python"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "ujson-5.8.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:f4511560d75b15ecb367eef561554959b9d49b6ec3b8d5634212f9fed74a6df1"},
+ {file = "ujson-5.8.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9399eaa5d1931a0ead49dce3ffacbea63f3177978588b956036bfe53cdf6af75"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c4e7bb7eba0e1963f8b768f9c458ecb193e5bf6977090182e2b4f4408f35ac76"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:40931d7c08c4ce99adc4b409ddb1bbb01635a950e81239c2382cfe24251b127a"},
+ {file = "ujson-5.8.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d53039d39de65360e924b511c7ca1a67b0975c34c015dd468fca492b11caa8f7"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:bdf04c6af3852161be9613e458a1fb67327910391de8ffedb8332e60800147a2"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:a70f776bda2e5072a086c02792c7863ba5833d565189e09fabbd04c8b4c3abba"},
+ {file = "ujson-5.8.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:f26629ac531d712f93192c233a74888bc8b8212558bd7d04c349125f10199fcf"},
+ {file = "ujson-5.8.0-cp310-cp310-win32.whl", hash = "sha256:7ecc33b107ae88405aebdb8d82c13d6944be2331ebb04399134c03171509371a"},
+ {file = "ujson-5.8.0-cp310-cp310-win_amd64.whl", hash = "sha256:3b27a8da7a080add559a3b73ec9ebd52e82cc4419f7c6fb7266e62439a055ed0"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:193349a998cd821483a25f5df30b44e8f495423840ee11b3b28df092ddfd0f7f"},
+ {file = "ujson-5.8.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:4ddeabbc78b2aed531f167d1e70387b151900bc856d61e9325fcdfefb2a51ad8"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5ce24909a9c25062e60653073dd6d5e6ec9d6ad7ed6e0069450d5b673c854405"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:27a2a3c7620ebe43641e926a1062bc04e92dbe90d3501687957d71b4bdddaec4"},
+ {file = "ujson-5.8.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2b852bdf920fe9f84e2a2c210cc45f1b64f763b4f7d01468b33f7791698e455e"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:20768961a6a706170497129960762ded9c89fb1c10db2989c56956b162e2a8a3"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:e0147d41e9fb5cd174207c4a2895c5e24813204499fd0839951d4c8784a23bf5"},
+ {file = "ujson-5.8.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:e3673053b036fd161ae7a5a33358ccae6793ee89fd499000204676baafd7b3aa"},
+ {file = "ujson-5.8.0-cp311-cp311-win32.whl", hash = "sha256:a89cf3cd8bf33a37600431b7024a7ccf499db25f9f0b332947fbc79043aad879"},
+ {file = "ujson-5.8.0-cp311-cp311-win_amd64.whl", hash = "sha256:3659deec9ab9eb19e8646932bfe6fe22730757c4addbe9d7d5544e879dc1b721"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_10_9_x86_64.whl", hash = "sha256:102bf31c56f59538cccdfec45649780ae00657e86247c07edac434cb14d5388c"},
+ {file = "ujson-5.8.0-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:299a312c3e85edee1178cb6453645217ba23b4e3186412677fa48e9a7f986de6"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f2e385a7679b9088d7bc43a64811a7713cc7c33d032d020f757c54e7d41931ae"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ad24ec130855d4430a682c7a60ca0bc158f8253ec81feed4073801f6b6cb681b"},
+ {file = "ujson-5.8.0-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:16fde596d5e45bdf0d7de615346a102510ac8c405098e5595625015b0d4b5296"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_aarch64.whl", hash = "sha256:6d230d870d1ce03df915e694dcfa3f4e8714369cce2346686dbe0bc8e3f135e7"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_i686.whl", hash = "sha256:9571de0c53db5cbc265945e08f093f093af2c5a11e14772c72d8e37fceeedd08"},
+ {file = "ujson-5.8.0-cp312-cp312-musllinux_1_1_x86_64.whl", hash = "sha256:7cba16b26efe774c096a5e822e4f27097b7c81ed6fb5264a2b3f5fd8784bab30"},
+ {file = "ujson-5.8.0-cp312-cp312-win32.whl", hash = "sha256:48c7d373ff22366eecfa36a52b9b55b0ee5bd44c2b50e16084aa88b9de038916"},
+ {file = "ujson-5.8.0-cp312-cp312-win_amd64.whl", hash = "sha256:5ac97b1e182d81cf395ded620528c59f4177eee024b4b39a50cdd7b720fdeec6"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2a64cc32bb4a436e5813b83f5aab0889927e5ea1788bf99b930fad853c5625cb"},
+ {file = "ujson-5.8.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:e54578fa8838ddc722539a752adfce9372474114f8c127bb316db5392d942f8b"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9721cd112b5e4687cb4ade12a7b8af8b048d4991227ae8066d9c4b3a6642a582"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d9707e5aacf63fb919f6237d6490c4e0244c7f8d3dc2a0f84d7dec5db7cb54c"},
+ {file = "ujson-5.8.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0be81bae295f65a6896b0c9030b55a106fb2dec69ef877253a87bc7c9c5308f7"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:ae7f4725c344bf437e9b881019c558416fe84ad9c6b67426416c131ad577df67"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:9ab282d67ef3097105552bf151438b551cc4bedb3f24d80fada830f2e132aeb9"},
+ {file = "ujson-5.8.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:94c7bd9880fa33fcf7f6d7f4cc032e2371adee3c5dba2922b918987141d1bf07"},
+ {file = "ujson-5.8.0-cp38-cp38-win32.whl", hash = "sha256:bf5737dbcfe0fa0ac8fa599eceafae86b376492c8f1e4b84e3adf765f03fb564"},
+ {file = "ujson-5.8.0-cp38-cp38-win_amd64.whl", hash = "sha256:11da6bed916f9bfacf13f4fc6a9594abd62b2bb115acfb17a77b0f03bee4cfd5"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:69b3104a2603bab510497ceabc186ba40fef38ec731c0ccaa662e01ff94a985c"},
+ {file = "ujson-5.8.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:9249fdefeb021e00b46025e77feed89cd91ffe9b3a49415239103fc1d5d9c29a"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2873d196725a8193f56dde527b322c4bc79ed97cd60f1d087826ac3290cf9207"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a4dafa9010c366589f55afb0fd67084acd8added1a51251008f9ff2c3e44042"},
+ {file = "ujson-5.8.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a42baa647a50fa8bed53d4e242be61023bd37b93577f27f90ffe521ac9dc7a3"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:f3554eaadffe416c6f543af442066afa6549edbc34fe6a7719818c3e72ebfe95"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:fb87decf38cc82bcdea1d7511e73629e651bdec3a43ab40985167ab8449b769c"},
+ {file = "ujson-5.8.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:407d60eb942c318482bbfb1e66be093308bb11617d41c613e33b4ce5be789adc"},
+ {file = "ujson-5.8.0-cp39-cp39-win32.whl", hash = "sha256:0fe1b7edaf560ca6ab023f81cbeaf9946a240876a993b8c5a21a1c539171d903"},
+ {file = "ujson-5.8.0-cp39-cp39-win_amd64.whl", hash = "sha256:3f9b63530a5392eb687baff3989d0fb5f45194ae5b1ca8276282fb647f8dcdb3"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:efeddf950fb15a832376c0c01d8d7713479fbeceaed1eaecb2665aa62c305aec"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d8283ac5d03e65f488530c43d6610134309085b71db4f675e9cf5dff96a8282"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:eb0142f6f10f57598655340a3b2c70ed4646cbe674191da195eb0985a9813b83"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:07d459aca895eb17eb463b00441986b021b9312c6c8cc1d06880925c7f51009c"},
+ {file = "ujson-5.8.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:d524a8c15cfc863705991d70bbec998456a42c405c291d0f84a74ad7f35c5109"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:d6f84a7a175c75beecde53a624881ff618e9433045a69fcfb5e154b73cdaa377"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b748797131ac7b29826d1524db1cc366d2722ab7afacc2ce1287cdafccddbf1f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e72ba76313d48a1a3a42e7dc9d1db32ea93fac782ad8dde6f8b13e35c229130"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f504117a39cb98abba4153bf0b46b4954cc5d62f6351a14660201500ba31fe7f"},
+ {file = "ujson-5.8.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:a8c91b6f4bf23f274af9002b128d133b735141e867109487d17e344d38b87d94"},
+ {file = "ujson-5.8.0.tar.gz", hash = "sha256:78e318def4ade898a461b3d92a79f9441e7e0e4d2ad5419abed4336d702c7425"},
+]
+
+[[package]]
+name = "urllib3"
+version = "1.26.15"
+description = "HTTP library with thread-safe connection pooling, file post, and more."
+optional = false
+python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*"
+files = [
+ {file = "urllib3-1.26.15-py2.py3-none-any.whl", hash = "sha256:aa751d169e23c7479ce47a0cb0da579e3ede798f994f5816a74e4f4500dcea42"},
+ {file = "urllib3-1.26.15.tar.gz", hash = "sha256:8a388717b9476f934a21484e8c8e61875ab60644d29b9b39e11e4b9dc1c6b305"},
+]
+
+[package.extras]
+brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)", "brotlipy (>=0.6.0)"]
+secure = ["certifi", "cryptography (>=1.3.4)", "idna (>=2.0.0)", "ipaddress", "pyOpenSSL (>=0.14)", "urllib3-secure-extra"]
+socks = ["PySocks (>=1.5.6,!=1.5.7,<2.0)"]
+
+[[package]]
+name = "uvicorn"
+version = "0.21.1"
+description = "The lightning-fast ASGI server."
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "uvicorn-0.21.1-py3-none-any.whl", hash = "sha256:e47cac98a6da10cd41e6fd036d472c6f58ede6c5dbee3dbee3ef7a100ed97742"},
+ {file = "uvicorn-0.21.1.tar.gz", hash = "sha256:0fac9cb342ba099e0d582966005f3fdba5b0290579fed4a6266dc702ca7bb032"},
+]
+
+[package.dependencies]
+click = ">=7.0"
+h11 = ">=0.8"
+
+[package.extras]
+standard = ["colorama (>=0.4)", "httptools (>=0.5.0)", "python-dotenv (>=0.13)", "pyyaml (>=5.1)", "uvloop (>=0.14.0,!=0.15.0,!=0.15.1)", "watchfiles (>=0.13)", "websockets (>=10.4)"]
+
+[[package]]
+name = "websockets"
+version = "11.0.3"
+description = "An implementation of the WebSocket Protocol (RFC 6455 & 7692)"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:3ccc8a0c387629aec40f2fc9fdcb4b9d5431954f934da3eaf16cdc94f67dbfac"},
+ {file = "websockets-11.0.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d67ac60a307f760c6e65dad586f556dde58e683fab03323221a4e530ead6f74d"},
+ {file = "websockets-11.0.3-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:84d27a4832cc1a0ee07cdcf2b0629a8a72db73f4cf6de6f0904f6661227f256f"},
+ {file = "websockets-11.0.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ffd7dcaf744f25f82190856bc26ed81721508fc5cbf2a330751e135ff1283564"},
+ {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7622a89d696fc87af8e8d280d9b421db5133ef5b29d3f7a1ce9f1a7bf7fcfa11"},
+ {file = "websockets-11.0.3-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bceab846bac555aff6427d060f2fcfff71042dba6f5fca7dc4f75cac815e57ca"},
+ {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:54c6e5b3d3a8936a4ab6870d46bdd6ec500ad62bde9e44462c32d18f1e9a8e54"},
+ {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:41f696ba95cd92dc047e46b41b26dd24518384749ed0d99bea0a941ca87404c4"},
+ {file = "websockets-11.0.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:86d2a77fd490ae3ff6fae1c6ceaecad063d3cc2320b44377efdde79880e11526"},
+ {file = "websockets-11.0.3-cp310-cp310-win32.whl", hash = "sha256:2d903ad4419f5b472de90cd2d40384573b25da71e33519a67797de17ef849b69"},
+ {file = "websockets-11.0.3-cp310-cp310-win_amd64.whl", hash = "sha256:1d2256283fa4b7f4c7d7d3e84dc2ece74d341bce57d5b9bf385df109c2a1a82f"},
+ {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e848f46a58b9fcf3d06061d17be388caf70ea5b8cc3466251963c8345e13f7eb"},
+ {file = "websockets-11.0.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aa5003845cdd21ac0dc6c9bf661c5beddd01116f6eb9eb3c8e272353d45b3288"},
+ {file = "websockets-11.0.3-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:b58cbf0697721120866820b89f93659abc31c1e876bf20d0b3d03cef14faf84d"},
+ {file = "websockets-11.0.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:660e2d9068d2bedc0912af508f30bbeb505bbbf9774d98def45f68278cea20d3"},
+ {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c1f0524f203e3bd35149f12157438f406eff2e4fb30f71221c8a5eceb3617b6b"},
+ {file = "websockets-11.0.3-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:def07915168ac8f7853812cc593c71185a16216e9e4fa886358a17ed0fd9fcf6"},
+ {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b30c6590146e53149f04e85a6e4fcae068df4289e31e4aee1fdf56a0dead8f97"},
+ {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:619d9f06372b3a42bc29d0cd0354c9bb9fb39c2cbc1a9c5025b4538738dbffaf"},
+ {file = "websockets-11.0.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:01f5567d9cf6f502d655151645d4e8b72b453413d3819d2b6f1185abc23e82dd"},
+ {file = "websockets-11.0.3-cp311-cp311-win32.whl", hash = "sha256:e1459677e5d12be8bbc7584c35b992eea142911a6236a3278b9b5ce3326f282c"},
+ {file = "websockets-11.0.3-cp311-cp311-win_amd64.whl", hash = "sha256:e7837cb169eca3b3ae94cc5787c4fed99eef74c0ab9506756eea335e0d6f3ed8"},
+ {file = "websockets-11.0.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:9f59a3c656fef341a99e3d63189852be7084c0e54b75734cde571182c087b152"},
+ {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2529338a6ff0eb0b50c7be33dc3d0e456381157a31eefc561771ee431134a97f"},
+ {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:34fd59a4ac42dff6d4681d8843217137f6bc85ed29722f2f7222bd619d15e95b"},
+ {file = "websockets-11.0.3-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:332d126167ddddec94597c2365537baf9ff62dfcc9db4266f263d455f2f031cb"},
+ {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:6505c1b31274723ccaf5f515c1824a4ad2f0d191cec942666b3d0f3aa4cb4007"},
+ {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f467ba0050b7de85016b43f5a22b46383ef004c4f672148a8abf32bc999a87f0"},
+ {file = "websockets-11.0.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:9d9acd80072abcc98bd2c86c3c9cd4ac2347b5a5a0cae7ed5c0ee5675f86d9af"},
+ {file = "websockets-11.0.3-cp37-cp37m-win32.whl", hash = "sha256:e590228200fcfc7e9109509e4d9125eace2042fd52b595dd22bbc34bb282307f"},
+ {file = "websockets-11.0.3-cp37-cp37m-win_amd64.whl", hash = "sha256:b16fff62b45eccb9c7abb18e60e7e446998093cdcb50fed33134b9b6878836de"},
+ {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:fb06eea71a00a7af0ae6aefbb932fb8a7df3cb390cc217d51a9ad7343de1b8d0"},
+ {file = "websockets-11.0.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:8a34e13a62a59c871064dfd8ffb150867e54291e46d4a7cf11d02c94a5275bae"},
+ {file = "websockets-11.0.3-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4841ed00f1026dfbced6fca7d963c4e7043aa832648671b5138008dc5a8f6d99"},
+ {file = "websockets-11.0.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a073fc9ab1c8aff37c99f11f1641e16da517770e31a37265d2755282a5d28aa"},
+ {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:68b977f21ce443d6d378dbd5ca38621755f2063d6fdb3335bda981d552cfff86"},
+ {file = "websockets-11.0.3-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e1a99a7a71631f0efe727c10edfba09ea6bee4166a6f9c19aafb6c0b5917d09c"},
+ {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:bee9fcb41db2a23bed96c6b6ead6489702c12334ea20a297aa095ce6d31370d0"},
+ {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:4b253869ea05a5a073ebfdcb5cb3b0266a57c3764cf6fe114e4cd90f4bfa5f5e"},
+ {file = "websockets-11.0.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:1553cb82942b2a74dd9b15a018dce645d4e68674de2ca31ff13ebc2d9f283788"},
+ {file = "websockets-11.0.3-cp38-cp38-win32.whl", hash = "sha256:f61bdb1df43dc9c131791fbc2355535f9024b9a04398d3bd0684fc16ab07df74"},
+ {file = "websockets-11.0.3-cp38-cp38-win_amd64.whl", hash = "sha256:03aae4edc0b1c68498f41a6772d80ac7c1e33c06c6ffa2ac1c27a07653e79d6f"},
+ {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:777354ee16f02f643a4c7f2b3eff8027a33c9861edc691a2003531f5da4f6bc8"},
+ {file = "websockets-11.0.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8c82f11964f010053e13daafdc7154ce7385ecc538989a354ccc7067fd7028fd"},
+ {file = "websockets-11.0.3-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:3580dd9c1ad0701169e4d6fc41e878ffe05e6bdcaf3c412f9d559389d0c9e016"},
+ {file = "websockets-11.0.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6f1a3f10f836fab6ca6efa97bb952300b20ae56b409414ca85bff2ad241d2a61"},
+ {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:df41b9bc27c2c25b486bae7cf42fccdc52ff181c8c387bfd026624a491c2671b"},
+ {file = "websockets-11.0.3-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279e5de4671e79a9ac877427f4ac4ce93751b8823f276b681d04b2156713b9dd"},
+ {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:1fdf26fa8a6a592f8f9235285b8affa72748dc12e964a5518c6c5e8f916716f7"},
+ {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:69269f3a0b472e91125b503d3c0b3566bda26da0a3261c49f0027eb6075086d1"},
+ {file = "websockets-11.0.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:97b52894d948d2f6ea480171a27122d77af14ced35f62e5c892ca2fae9344311"},
+ {file = "websockets-11.0.3-cp39-cp39-win32.whl", hash = "sha256:c7f3cb904cce8e1be667c7e6fef4516b98d1a6a0635a58a57528d577ac18a128"},
+ {file = "websockets-11.0.3-cp39-cp39-win_amd64.whl", hash = "sha256:c792ea4eabc0159535608fc5658a74d1a81020eb35195dd63214dcf07556f67e"},
+ {file = "websockets-11.0.3-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:f2e58f2c36cc52d41f2659e4c0cbf7353e28c8c9e63e30d8c6d3494dc9fdedcf"},
+ {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:de36fe9c02995c7e6ae6efe2e205816f5f00c22fd1fbf343d4d18c3d5ceac2f5"},
+ {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0ac56b661e60edd453585f4bd68eb6a29ae25b5184fd5ba51e97652580458998"},
+ {file = "websockets-11.0.3-pp37-pypy37_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e052b8467dd07d4943936009f46ae5ce7b908ddcac3fda581656b1b19c083d9b"},
+ {file = "websockets-11.0.3-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:42cc5452a54a8e46a032521d7365da775823e21bfba2895fb7b77633cce031bb"},
+ {file = "websockets-11.0.3-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:e6316827e3e79b7b8e7d8e3b08f4e331af91a48e794d5d8b099928b6f0b85f20"},
+ {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8531fdcad636d82c517b26a448dcfe62f720e1922b33c81ce695d0edb91eb931"},
+ {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c114e8da9b475739dde229fd3bc6b05a6537a88a578358bc8eb29b4030fac9c9"},
+ {file = "websockets-11.0.3-pp38-pypy38_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e063b1865974611313a3849d43f2c3f5368093691349cf3c7c8f8f75ad7cb280"},
+ {file = "websockets-11.0.3-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:92b2065d642bf8c0a82d59e59053dd2fdde64d4ed44efe4870fa816c1232647b"},
+ {file = "websockets-11.0.3-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:0ee68fe502f9031f19d495dae2c268830df2760c0524cbac5d759921ba8c8e82"},
+ {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:dcacf2c7a6c3a84e720d1bb2b543c675bf6c40e460300b628bab1b1efc7c034c"},
+ {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b67c6f5e5a401fc56394f191f00f9b3811fe843ee93f4a70df3c389d1adf857d"},
+ {file = "websockets-11.0.3-pp39-pypy39_pp73-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:1d5023a4b6a5b183dc838808087033ec5df77580485fc533e7dab2567851b0a4"},
+ {file = "websockets-11.0.3-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:ed058398f55163a79bb9f06a90ef9ccc063b204bb346c4de78efc5d15abfe602"},
+ {file = "websockets-11.0.3-py3-none-any.whl", hash = "sha256:6681ba9e7f8f3b19440921e99efbb40fc89f26cd71bf539e45d8c8a25c976dc6"},
+ {file = "websockets-11.0.3.tar.gz", hash = "sha256:88fc51d9a26b10fc331be344f1781224a375b78488fc343620184e95a4b27016"},
+]
+
+[[package]]
+name = "wrapt"
+version = "1.15.0"
+description = "Module for decorators, wrappers and monkey patching."
+optional = false
+python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7"
+files = [
+ {file = "wrapt-1.15.0-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:ca1cccf838cd28d5a0883b342474c630ac48cac5df0ee6eacc9c7290f76b11c1"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:e826aadda3cae59295b95343db8f3d965fb31059da7de01ee8d1c40a60398b29"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:5fc8e02f5984a55d2c653f5fea93531e9836abbd84342c1d1e17abc4a15084c2"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:96e25c8603a155559231c19c0349245eeb4ac0096fe3c1d0be5c47e075bd4f46"},
+ {file = "wrapt-1.15.0-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:40737a081d7497efea35ab9304b829b857f21558acfc7b3272f908d33b0d9d4c"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:f87ec75864c37c4c6cb908d282e1969e79763e0d9becdfe9fe5473b7bb1e5f09"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:1286eb30261894e4c70d124d44b7fd07825340869945c79d05bda53a40caa079"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:493d389a2b63c88ad56cdc35d0fa5752daac56ca755805b1b0c530f785767d5e"},
+ {file = "wrapt-1.15.0-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:58d7a75d731e8c63614222bcb21dd992b4ab01a399f1f09dd82af17bbfc2368a"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:21f6d9a0d5b3a207cdf7acf8e58d7d13d463e639f0c7e01d82cdb671e6cb7923"},
+ {file = "wrapt-1.15.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ce42618f67741d4697684e501ef02f29e758a123aa2d669e2d964ff734ee00ee"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41d07d029dd4157ae27beab04d22b8e261eddfc6ecd64ff7000b10dc8b3a5727"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:54accd4b8bc202966bafafd16e69da9d5640ff92389d33d28555c5fd4f25ccb7"},
+ {file = "wrapt-1.15.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2fbfbca668dd15b744418265a9607baa970c347eefd0db6a518aaf0cfbd153c0"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:76e9c727a874b4856d11a32fb0b389afc61ce8aaf281ada613713ddeadd1cfec"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:e20076a211cd6f9b44a6be58f7eeafa7ab5720eb796975d0c03f05b47d89eb90"},
+ {file = "wrapt-1.15.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:a74d56552ddbde46c246b5b89199cb3fd182f9c346c784e1a93e4dc3f5ec9975"},
+ {file = "wrapt-1.15.0-cp310-cp310-win32.whl", hash = "sha256:26458da5653aa5b3d8dc8b24192f574a58984c749401f98fff994d41d3f08da1"},
+ {file = "wrapt-1.15.0-cp310-cp310-win_amd64.whl", hash = "sha256:75760a47c06b5974aa5e01949bf7e66d2af4d08cb8c1d6516af5e39595397f5e"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ba1711cda2d30634a7e452fc79eabcadaffedf241ff206db2ee93dd2c89a60e7"},
+ {file = "wrapt-1.15.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56374914b132c702aa9aa9959c550004b8847148f95e1b824772d453ac204a72"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a89ce3fd220ff144bd9d54da333ec0de0399b52c9ac3d2ce34b569cf1a5748fb"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3bbe623731d03b186b3d6b0d6f51865bf598587c38d6f7b0be2e27414f7f214e"},
+ {file = "wrapt-1.15.0-cp311-cp311-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3abbe948c3cbde2689370a262a8d04e32ec2dd4f27103669a45c6929bcdbfe7c"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:b67b819628e3b748fd3c2192c15fb951f549d0f47c0449af0764d7647302fda3"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:7eebcdbe3677e58dd4c0e03b4f2cfa346ed4049687d839adad68cc38bb559c92"},
+ {file = "wrapt-1.15.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:74934ebd71950e3db69960a7da29204f89624dde411afbfb3b4858c1409b1e98"},
+ {file = "wrapt-1.15.0-cp311-cp311-win32.whl", hash = "sha256:bd84395aab8e4d36263cd1b9308cd504f6cf713b7d6d3ce25ea55670baec5416"},
+ {file = "wrapt-1.15.0-cp311-cp311-win_amd64.whl", hash = "sha256:a487f72a25904e2b4bbc0817ce7a8de94363bd7e79890510174da9d901c38705"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_i686.whl", hash = "sha256:4ff0d20f2e670800d3ed2b220d40984162089a6e2c9646fdb09b85e6f9a8fc29"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux1_x86_64.whl", hash = "sha256:9ed6aa0726b9b60911f4aed8ec5b8dd7bf3491476015819f56473ffaef8959bd"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_i686.whl", hash = "sha256:896689fddba4f23ef7c718279e42f8834041a21342d95e56922e1c10c0cc7afb"},
+ {file = "wrapt-1.15.0-cp35-cp35m-manylinux2010_x86_64.whl", hash = "sha256:75669d77bb2c071333417617a235324a1618dba66f82a750362eccbe5b61d248"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win32.whl", hash = "sha256:fbec11614dba0424ca72f4e8ba3c420dba07b4a7c206c8c8e4e73f2e98f4c559"},
+ {file = "wrapt-1.15.0-cp35-cp35m-win_amd64.whl", hash = "sha256:fd69666217b62fa5d7c6aa88e507493a34dec4fa20c5bd925e4bc12fce586639"},
+ {file = "wrapt-1.15.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:b0724f05c396b0a4c36a3226c31648385deb6a65d8992644c12a4963c70326ba"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bbeccb1aa40ab88cd29e6c7d8585582c99548f55f9b2581dfc5ba68c59a85752"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:38adf7198f8f154502883242f9fe7333ab05a5b02de7d83aa2d88ea621f13364"},
+ {file = "wrapt-1.15.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:578383d740457fa790fdf85e6d346fda1416a40549fe8db08e5e9bd281c6a475"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:a4cbb9ff5795cd66f0066bdf5947f170f5d63a9274f99bdbca02fd973adcf2a8"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:af5bd9ccb188f6a5fdda9f1f09d9f4c86cc8a539bd48a0bfdc97723970348418"},
+ {file = "wrapt-1.15.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:b56d5519e470d3f2fe4aa7585f0632b060d532d0696c5bdfb5e8319e1d0f69a2"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win32.whl", hash = "sha256:77d4c1b881076c3ba173484dfa53d3582c1c8ff1f914c6461ab70c8428b796c1"},
+ {file = "wrapt-1.15.0-cp36-cp36m-win_amd64.whl", hash = "sha256:077ff0d1f9d9e4ce6476c1a924a3332452c1406e59d90a2cf24aeb29eeac9420"},
+ {file = "wrapt-1.15.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:5c5aa28df055697d7c37d2099a7bc09f559d5053c3349b1ad0c39000e611d317"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3a8564f283394634a7a7054b7983e47dbf39c07712d7b177b37e03f2467a024e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:780c82a41dc493b62fc5884fb1d3a3b81106642c5c5c78d6a0d4cbe96d62ba7e"},
+ {file = "wrapt-1.15.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e169e957c33576f47e21864cf3fc9ff47c223a4ebca8960079b8bd36cb014fd0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b02f21c1e2074943312d03d243ac4388319f2456576b2c6023041c4d57cd7019"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:f2e69b3ed24544b0d3dbe2c5c0ba5153ce50dcebb576fdc4696d52aa22db6034"},
+ {file = "wrapt-1.15.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d787272ed958a05b2c86311d3a4135d3c2aeea4fc655705f074130aa57d71653"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win32.whl", hash = "sha256:02fce1852f755f44f95af51f69d22e45080102e9d00258053b79367d07af39c0"},
+ {file = "wrapt-1.15.0-cp37-cp37m-win_amd64.whl", hash = "sha256:abd52a09d03adf9c763d706df707c343293d5d106aea53483e0ec8d9e310ad5e"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:cdb4f085756c96a3af04e6eca7f08b1345e94b53af8921b25c72f096e704e145"},
+ {file = "wrapt-1.15.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:230ae493696a371f1dbffaad3dafbb742a4d27a0afd2b1aecebe52b740167e7f"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:63424c681923b9f3bfbc5e3205aafe790904053d42ddcc08542181a30a7a51bd"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d6bcbfc99f55655c3d93feb7ef3800bd5bbe963a755687cbf1f490a71fb7794b"},
+ {file = "wrapt-1.15.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c99f4309f5145b93eca6e35ac1a988f0dc0a7ccf9ccdcd78d3c0adf57224e62f"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:b130fe77361d6771ecf5a219d8e0817d61b236b7d8b37cc045172e574ed219e6"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:96177eb5645b1c6985f5c11d03fc2dbda9ad24ec0f3a46dcce91445747e15094"},
+ {file = "wrapt-1.15.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d5fe3e099cf07d0fb5a1e23d399e5d4d1ca3e6dfcbe5c8570ccff3e9208274f7"},
+ {file = "wrapt-1.15.0-cp38-cp38-win32.whl", hash = "sha256:abd8f36c99512755b8456047b7be10372fca271bf1467a1caa88db991e7c421b"},
+ {file = "wrapt-1.15.0-cp38-cp38-win_amd64.whl", hash = "sha256:b06fa97478a5f478fb05e1980980a7cdf2712015493b44d0c87606c1513ed5b1"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:2e51de54d4fb8fb50d6ee8327f9828306a959ae394d3e01a1ba8b2f937747d86"},
+ {file = "wrapt-1.15.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:0970ddb69bba00670e58955f8019bec4a42d1785db3faa043c33d81de2bf843c"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76407ab327158c510f44ded207e2f76b657303e17cb7a572ffe2f5a8a48aa04d"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:cd525e0e52a5ff16653a3fc9e3dd827981917d34996600bbc34c05d048ca35cc"},
+ {file = "wrapt-1.15.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9d37ac69edc5614b90516807de32d08cb8e7b12260a285ee330955604ed9dd29"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:078e2a1a86544e644a68422f881c48b84fef6d18f8c7a957ffd3f2e0a74a0d4a"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2cf56d0e237280baed46f0b5316661da892565ff58309d4d2ed7dba763d984b8"},
+ {file = "wrapt-1.15.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:7dc0713bf81287a00516ef43137273b23ee414fe41a3c14be10dd95ed98a2df9"},
+ {file = "wrapt-1.15.0-cp39-cp39-win32.whl", hash = "sha256:46ed616d5fb42f98630ed70c3529541408166c22cdfd4540b88d5f21006b0eff"},
+ {file = "wrapt-1.15.0-cp39-cp39-win_amd64.whl", hash = "sha256:eef4d64c650f33347c1f9266fa5ae001440b232ad9b98f1f43dfe7a79435c0a6"},
+ {file = "wrapt-1.15.0-py3-none-any.whl", hash = "sha256:64b1df0f83706b4ef4cfb4fb0e4c2669100fd7ecacfb59e091fad300d4e04640"},
+ {file = "wrapt-1.15.0.tar.gz", hash = "sha256:d06730c6aed78cee4126234cf2d071e01b44b915e725a6cb439a879ec9754a3a"},
+]
+
+[[package]]
+name = "yarl"
+version = "1.9.2"
+description = "Yet another URL library"
+optional = false
+python-versions = ">=3.7"
+files = [
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:8c2ad583743d16ddbdf6bb14b5cd76bf43b0d0006e918809d5d4ddf7bde8dd82"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:82aa6264b36c50acfb2424ad5ca537a2060ab6de158a5bd2a72a032cc75b9eb8"},
+ {file = "yarl-1.9.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c0c77533b5ed4bcc38e943178ccae29b9bcf48ffd1063f5821192f23a1bd27b9"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ee4afac41415d52d53a9833ebae7e32b344be72835bbb589018c9e938045a560"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:9bf345c3a4f5ba7f766430f97f9cc1320786f19584acc7086491f45524a551ac"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2a96c19c52ff442a808c105901d0bdfd2e28575b3d5f82e2f5fd67e20dc5f4ea"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:891c0e3ec5ec881541f6c5113d8df0315ce5440e244a716b95f2525b7b9f3608"},
+ {file = "yarl-1.9.2-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c3a53ba34a636a256d767c086ceb111358876e1fb6b50dfc4d3f4951d40133d5"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:566185e8ebc0898b11f8026447eacd02e46226716229cea8db37496c8cdd26e0"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:2b0738fb871812722a0ac2154be1f049c6223b9f6f22eec352996b69775b36d4"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:32f1d071b3f362c80f1a7d322bfd7b2d11e33d2adf395cc1dd4df36c9c243095"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:e9fdc7ac0d42bc3ea78818557fab03af6181e076a2944f43c38684b4b6bed8e3"},
+ {file = "yarl-1.9.2-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:56ff08ab5df8429901ebdc5d15941b59f6253393cb5da07b4170beefcf1b2528"},
+ {file = "yarl-1.9.2-cp310-cp310-win32.whl", hash = "sha256:8ea48e0a2f931064469bdabca50c2f578b565fc446f302a79ba6cc0ee7f384d3"},
+ {file = "yarl-1.9.2-cp310-cp310-win_amd64.whl", hash = "sha256:50f33040f3836e912ed16d212f6cc1efb3231a8a60526a407aeb66c1c1956dde"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:646d663eb2232d7909e6601f1a9107e66f9791f290a1b3dc7057818fe44fc2b6"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:aff634b15beff8902d1f918012fc2a42e0dbae6f469fce134c8a0dc51ca423bb"},
+ {file = "yarl-1.9.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a83503934c6273806aed765035716216cc9ab4e0364f7f066227e1aaea90b8d0"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b25322201585c69abc7b0e89e72790469f7dad90d26754717f3310bfe30331c2"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:22a94666751778629f1ec4280b08eb11815783c63f52092a5953faf73be24191"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:8ec53a0ea2a80c5cd1ab397925f94bff59222aa3cf9c6da938ce05c9ec20428d"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:159d81f22d7a43e6eabc36d7194cb53f2f15f498dbbfa8edc8a3239350f59fe7"},
+ {file = "yarl-1.9.2-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:832b7e711027c114d79dffb92576acd1bd2decc467dec60e1cac96912602d0e6"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:95d2ecefbcf4e744ea952d073c6922e72ee650ffc79028eb1e320e732898d7e8"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:d4e2c6d555e77b37288eaf45b8f60f0737c9efa3452c6c44626a5455aeb250b9"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:783185c75c12a017cc345015ea359cc801c3b29a2966c2655cd12b233bf5a2be"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:b8cc1863402472f16c600e3e93d542b7e7542a540f95c30afd472e8e549fc3f7"},
+ {file = "yarl-1.9.2-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:822b30a0f22e588b32d3120f6d41e4ed021806418b4c9f0bc3048b8c8cb3f92a"},
+ {file = "yarl-1.9.2-cp311-cp311-win32.whl", hash = "sha256:a60347f234c2212a9f0361955007fcf4033a75bf600a33c88a0a8e91af77c0e8"},
+ {file = "yarl-1.9.2-cp311-cp311-win_amd64.whl", hash = "sha256:be6b3fdec5c62f2a67cb3f8c6dbf56bbf3f61c0f046f84645cd1ca73532ea051"},
+ {file = "yarl-1.9.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:38a3928ae37558bc1b559f67410df446d1fbfa87318b124bf5032c31e3447b74"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ac9bb4c5ce3975aeac288cfcb5061ce60e0d14d92209e780c93954076c7c4367"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3da8a678ca8b96c8606bbb8bfacd99a12ad5dd288bc6f7979baddd62f71c63ef"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:13414591ff516e04fcdee8dc051c13fd3db13b673c7a4cb1350e6b2ad9639ad3"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf74d08542c3a9ea97bb8f343d4fcbd4d8f91bba5ec9d5d7f792dbe727f88938"},
+ {file = "yarl-1.9.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6e7221580dc1db478464cfeef9b03b95c5852cc22894e418562997df0d074ccc"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:494053246b119b041960ddcd20fd76224149cfea8ed8777b687358727911dd33"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:52a25809fcbecfc63ac9ba0c0fb586f90837f5425edfd1ec9f3372b119585e45"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:e65610c5792870d45d7b68c677681376fcf9cc1c289f23e8e8b39c1485384185"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:1b1bba902cba32cdec51fca038fd53f8beee88b77efc373968d1ed021024cc04"},
+ {file = "yarl-1.9.2-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:662e6016409828ee910f5d9602a2729a8a57d74b163c89a837de3fea050c7582"},
+ {file = "yarl-1.9.2-cp37-cp37m-win32.whl", hash = "sha256:f364d3480bffd3aa566e886587eaca7c8c04d74f6e8933f3f2c996b7f09bee1b"},
+ {file = "yarl-1.9.2-cp37-cp37m-win_amd64.whl", hash = "sha256:6a5883464143ab3ae9ba68daae8e7c5c95b969462bbe42e2464d60e7e2698368"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:5610f80cf43b6202e2c33ba3ec2ee0a2884f8f423c8f4f62906731d876ef4fac"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9a4e67ad7b646cd6f0938c7ebfd60e481b7410f574c560e455e938d2da8e0f4"},
+ {file = "yarl-1.9.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:83fcc480d7549ccebe9415d96d9263e2d4226798c37ebd18c930fce43dfb9574"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fcd436ea16fee7d4207c045b1e340020e58a2597301cfbcfdbe5abd2356c2fb"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:84e0b1599334b1e1478db01b756e55937d4614f8654311eb26012091be109d59"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3458a24e4ea3fd8930e934c129b676c27452e4ebda80fbe47b56d8c6c7a63a9e"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:838162460b3a08987546e881a2bfa573960bb559dfa739e7800ceeec92e64417"},
+ {file = "yarl-1.9.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f4e2d08f07a3d7d3e12549052eb5ad3eab1c349c53ac51c209a0e5991bbada78"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:de119f56f3c5f0e2fb4dee508531a32b069a5f2c6e827b272d1e0ff5ac040333"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:149ddea5abf329752ea5051b61bd6c1d979e13fbf122d3a1f9f0c8be6cb6f63c"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:674ca19cbee4a82c9f54e0d1eee28116e63bc6fd1e96c43031d11cbab8b2afd5"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:9b3152f2f5677b997ae6c804b73da05a39daa6a9e85a512e0e6823d81cdad7cc"},
+ {file = "yarl-1.9.2-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:5415d5a4b080dc9612b1b63cba008db84e908b95848369aa1da3686ae27b6d2b"},
+ {file = "yarl-1.9.2-cp38-cp38-win32.whl", hash = "sha256:f7a3d8146575e08c29ed1cd287068e6d02f1c7bdff8970db96683b9591b86ee7"},
+ {file = "yarl-1.9.2-cp38-cp38-win_amd64.whl", hash = "sha256:63c48f6cef34e6319a74c727376e95626f84ea091f92c0250a98e53e62c77c72"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:75df5ef94c3fdc393c6b19d80e6ef1ecc9ae2f4263c09cacb178d871c02a5ba9"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c027a6e96ef77d401d8d5a5c8d6bc478e8042f1e448272e8d9752cb0aff8b5c8"},
+ {file = "yarl-1.9.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f3b078dbe227f79be488ffcfc7a9edb3409d018e0952cf13f15fd6512847f3f7"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:59723a029760079b7d991a401386390c4be5bfec1e7dd83e25a6a0881859e716"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b03917871bf859a81ccb180c9a2e6c1e04d2f6a51d953e6a5cdd70c93d4e5a2a"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c1012fa63eb6c032f3ce5d2171c267992ae0c00b9e164efe4d73db818465fac3"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a74dcbfe780e62f4b5a062714576f16c2f3493a0394e555ab141bf0d746bb955"},
+ {file = "yarl-1.9.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8c56986609b057b4839968ba901944af91b8e92f1725d1a2d77cbac6972b9ed1"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:2c315df3293cd521033533d242d15eab26583360b58f7ee5d9565f15fee1bef4"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:b7232f8dfbd225d57340e441d8caf8652a6acd06b389ea2d3222b8bc89cbfca6"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:53338749febd28935d55b41bf0bcc79d634881195a39f6b2f767870b72514caf"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:066c163aec9d3d073dc9ffe5dd3ad05069bcb03fcaab8d221290ba99f9f69ee3"},
+ {file = "yarl-1.9.2-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:8288d7cd28f8119b07dd49b7230d6b4562f9b61ee9a4ab02221060d21136be80"},
+ {file = "yarl-1.9.2-cp39-cp39-win32.whl", hash = "sha256:b124e2a6d223b65ba8768d5706d103280914d61f5cae3afbc50fc3dfcc016623"},
+ {file = "yarl-1.9.2-cp39-cp39-win_amd64.whl", hash = "sha256:61016e7d582bc46a5378ffdd02cd0314fb8ba52f40f9cf4d9a5e7dbef88dee18"},
+ {file = "yarl-1.9.2.tar.gz", hash = "sha256:04ab9d4b9f587c06d801c2abfe9317b77cdf996c65a90d5e84ecc45010823571"},
+]
+
+[package.dependencies]
+idna = ">=2.0"
+multidict = ">=4.0"
+
+[[package]]
+name = "zipp"
+version = "3.16.2"
+description = "Backport of pathlib-compatible object wrapper for zip files"
+optional = false
+python-versions = ">=3.8"
+files = [
+ {file = "zipp-3.16.2-py3-none-any.whl", hash = "sha256:679e51dd4403591b2d6838a48de3d283f3d188412a9782faadf845f298736ba0"},
+ {file = "zipp-3.16.2.tar.gz", hash = "sha256:ebc15946aa78bd63458992fc81ec3b6f7b1e92d51c35e6de1c3804e73b799147"},
+]
+
+[package.extras]
+docs = ["furo", "jaraco.packaging (>=9.3)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"]
+testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=2.2)", "pytest-ignore-flaky", "pytest-mypy (>=0.9.1)", "pytest-ruff"]
+
+[metadata]
+lock-version = "2.0"
+python-versions = "^3.8.1"
+content-hash = "85e02897d3dd2c8b9bce7e15985eb7aecbd52c166478fceca445787fdde2d60b"
diff --git a/server/poetry.toml b/server/poetry.toml
new file mode 100644
index 00000000..ab1033bd
--- /dev/null
+++ b/server/poetry.toml
@@ -0,0 +1,2 @@
+[virtualenvs]
+in-project = true
diff --git a/server/pyproject.toml b/server/pyproject.toml
new file mode 100644
index 00000000..4085306d
--- /dev/null
+++ b/server/pyproject.toml
@@ -0,0 +1,47 @@
+[tool.poetry]
+name = "continuedev"
+version = "0.1.42"
+description = ""
+authors = ["Nate Sesti <sestinj@gmail.com>"]
+readme = "README.md"
+
+[tool.poetry.dependencies]
+python = "^3.8.1"
+fastapi = "^0.95.1"
+typer = "^0.7.0"
+openai = "^0.27.5"
+boltons = "^23.0.0"
+pydantic = "^1.10.7"
+uvicorn = "^0.21.1"
+python-dotenv = "^1.0.0"
+nest-asyncio = "^1.5.6"
+websockets = "^11.0.2"
+urllib3 = "1.26.15"
+posthog = "^3.0.1"
+tiktoken = "^0.4.0"
+jsonref = "^1.1.0"
+jsonschema = "^4.17.3"
+directory-tree = "^0.0.3.1"
+anthropic = "^0.3.4"
+chevron = "^0.14.0"
+psutil = "^5.9.5"
+pygithub = "^1.59.0"
+meilisearch-python-async = "^1.4.8"
+socksio = "^1.0.0"
+ripgrepy = "^2.0.0"
+bs4 = "^0.0.1"
+replicate = "^0.11.0"
+redbaron = "^0.9.2"
+python-lsp-server = "^1.7.4"
+huggingface-hub = "^0.16.4"
+
+[tool.poetry.scripts]
+typegen = "continuedev.models.generate_json_schema:main"
+
+[tool.poetry.group.dev.dependencies]
+pytest = "^7.4.1"
+pytest-asyncio = "^0.21.1"
+
+[build-system]
+requires = ["poetry-core"]
+build-backend = "poetry.core.masonry.api"
diff --git a/server/requirements.txt b/server/requirements.txt
new file mode 100644
index 00000000..d430f20d
--- /dev/null
+++ b/server/requirements.txt
@@ -0,0 +1,27 @@
+fastapi==0.95.1
+typer==0.7.0
+openai==0.27.5
+boltons==23.0.0
+pydantic==1.10.7
+uvicorn==0.21.1
+python-dotenv==1.0.0
+nest-asyncio==1.5.6
+websockets==11.0.2
+urllib3==1.26.15
+posthog==3.0.1
+tiktoken==0.4.0
+jsonref==1.1.0
+jsonschema==4.17.3
+directory-tree==0.0.3.1
+anthropic==0.3.4
+chevron==0.14.0
+psutil==5.9.5
+pygithub==1.59.0
+meilisearch-python-async==1.4.8
+socksio==1.0.0
+ripgrepy==2.0.0
+replicate==0.11.0
+bs4==0.0.1
+redbaron==0.9.2
+python-lsp-server[websockets]==1.8.0
+huggingface-hub==0.16.4 \ No newline at end of file
diff --git a/server/tests/__init__.py b/server/tests/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/tests/__init__.py
diff --git a/server/tests/llm_test.py b/server/tests/llm_test.py
new file mode 100644
index 00000000..a016b464
--- /dev/null
+++ b/server/tests/llm_test.py
@@ -0,0 +1,179 @@
+import asyncio
+import os
+from functools import wraps
+
+import pytest
+from continuedev.core.main import ChatMessage
+from continuedev.libs.llm.anthropic import AnthropicLLM
+from continuedev.libs.llm.base import LLM, CompletionOptions
+from continuedev.libs.llm.ggml import GGML
+from continuedev.libs.llm.openai import OpenAI
+from continuedev.libs.llm.together import TogetherLLM
+from continuedev.libs.util.count_tokens import DEFAULT_ARGS
+from dotenv import load_dotenv
+from util.prompts import tokyo_test_pair
+
+load_dotenv()
+
+
+SPEND_MONEY = True
+
+
+def start_model(model):
+ def write_log(msg: str):
+ pass
+
+ asyncio.run(model.start(write_log=write_log, unique_id="test_unique_id"))
+
+
+def async_test(func):
+ @wraps(func)
+ def wrapper(*args, **kwargs):
+ return asyncio.run(func(*args, **kwargs))
+
+ return wrapper
+
+
+class TestBaseLLM:
+ model = "gpt-3.5-turbo"
+ context_length = 4096
+ system_message = "test_system_message"
+
+ def setup_class(cls):
+ cls.llm = LLM(
+ model=cls.model,
+ context_length=cls.context_length,
+ system_message=cls.system_message,
+ )
+
+ start_model(cls.llm)
+
+ def test_llm_is_instance(self):
+ assert isinstance(self.llm, LLM)
+
+ def test_llm_collect_args(self):
+ options = CompletionOptions(model=self.model)
+ assert self.llm.collect_args(options) == {
+ **DEFAULT_ARGS,
+ "model": self.model,
+ }
+
+ @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money")
+ @async_test
+ async def test_completion(self):
+ if self.llm.__class__.__name__ == "LLM":
+ pytest.skip("Skipping abstract LLM")
+
+ resp = await self.llm.complete(tokyo_test_pair[0], temperature=0.0)
+ assert isinstance(resp, str)
+ assert resp.strip().lower() == tokyo_test_pair[1]
+
+ @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money")
+ @async_test
+ async def test_stream_chat(self):
+ if self.llm.__class__.__name__ == "LLM":
+ pytest.skip("Skipping abstract LLM")
+
+ completion = ""
+ role = None
+ async for chunk in self.llm.stream_chat(
+ messages=[
+ ChatMessage(
+ role="user", content=tokyo_test_pair[0], summary=tokyo_test_pair[0]
+ )
+ ],
+ temperature=0.0,
+ ):
+ assert isinstance(chunk, dict)
+ if "content" in chunk:
+ completion += chunk["content"]
+ if "role" in chunk:
+ role = chunk["role"]
+
+ assert role == "assistant"
+ assert completion.strip().lower() == tokyo_test_pair[1]
+
+ @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money")
+ @async_test
+ async def test_stream_complete(self):
+ if self.llm.__class__.__name__ == "LLM":
+ pytest.skip("Skipping abstract LLM")
+
+ completion = ""
+ async for chunk in self.llm.stream_complete(
+ tokyo_test_pair[0], temperature=0.0
+ ):
+ assert isinstance(chunk, str)
+ completion += chunk
+
+ assert completion.strip().lower() == tokyo_test_pair[1]
+
+
+class TestOpenAI(TestBaseLLM):
+ def setup_class(cls):
+ super().setup_class(cls)
+ cls.llm = OpenAI(
+ model=cls.model,
+ context_length=cls.context_length,
+ system_message=cls.system_message,
+ api_key=os.environ["OPENAI_API_KEY"],
+ # api_base=f"http://localhost:{port}",
+ )
+ start_model(cls.llm)
+ # cls.server = start_openai(port=port)
+
+ # def teardown_class(cls):
+ # cls.server.terminate()
+
+ @pytest.mark.asyncio
+ @pytest.mark.skipif(SPEND_MONEY is False, reason="Costs money")
+ async def test_completion(self):
+ resp = await self.llm.complete(
+ "Output a single word, that being the capital of Japan:"
+ )
+ assert isinstance(resp, str)
+ assert resp.strip().lower() == tokyo_test_pair[1]
+
+
+class TestGGML(TestBaseLLM):
+ def setup_class(cls):
+ super().setup_class(cls)
+ cls.llm = GGML(
+ model="gpt-3.5-turbo",
+ context_length=cls.context_length,
+ system_message=cls.system_message,
+ server_url="https://api.openai.com",
+ api_key=os.environ["OPENAI_API_KEY"],
+ )
+ start_model(cls.llm)
+
+
+@pytest.mark.skipif(True, reason="Together is not working")
+class TestTogetherLLM(TestBaseLLM):
+ def setup_class(cls):
+ super().setup_class(cls)
+ cls.llm = TogetherLLM(
+ api_key=os.environ["TOGETHER_API_KEY"],
+ )
+ start_model(cls.llm)
+
+
+class TestAnthropicLLM(TestBaseLLM):
+ def setup_class(cls):
+ super().setup_class(cls)
+ cls.llm = AnthropicLLM(api_key=os.environ["ANTHROPIC_API_KEY"])
+ start_model(cls.llm)
+
+ def test_llm_collect_args(self):
+ options = CompletionOptions(model=self.model)
+ assert self.llm.collect_args(options) == {
+ "max_tokens_to_sample": DEFAULT_ARGS["max_tokens"],
+ "temperature": DEFAULT_ARGS["temperature"],
+ "model": self.model,
+ }
+
+
+if __name__ == "__main__":
+ import pytest
+
+ pytest.main()
diff --git a/server/tests/step_test.py b/server/tests/step_test.py
new file mode 100644
index 00000000..a9132dd3
--- /dev/null
+++ b/server/tests/step_test.py
@@ -0,0 +1,68 @@
+import pytest
+from continuedev.core.config import ContinueConfig
+from continuedev.core.steps import UserInputStep
+from continuedev.headless import start_headless_session
+from continuedev.models.filesystem import Range, RangeInFileWithContents
+from continuedev.plugins.steps.chat import SimpleChatStep
+from continuedev.plugins.steps.main import EditHighlightedCodeStep
+from continuedev.plugins.steps.on_traceback import DefaultOnTracebackStep
+from util.prompts import dotenv_test_pair, tokyo_test_pair
+
+TEST_CONFIG = ContinueConfig()
+
+
+@pytest.mark.asyncio
+async def test_step():
+ pytest.skip("TODO: too slow")
+ session = await start_headless_session(config=TEST_CONFIG)
+
+ await session.autopilot.run_from_step(UserInputStep(user_input=tokyo_test_pair[0]))
+
+ full_state = await session.autopilot.get_full_state()
+
+ assert isinstance(full_state.history.timeline[-1].step, SimpleChatStep)
+
+ assert not full_state.history.timeline[-1].step.hide
+
+ assert (
+ full_state.history.timeline[-1].step.description.strip().lower()
+ == tokyo_test_pair[1]
+ )
+
+ await session.autopilot.cleanup()
+
+
+@pytest.mark.asyncio
+async def test_traceback_step():
+ pytest.skip("TODO: too slow")
+ session = await start_headless_session(config=TEST_CONFIG)
+
+ await session.autopilot.run_from_step(
+ DefaultOnTracebackStep(output=dotenv_test_pair[0])
+ )
+
+ full_state = await session.autopilot.get_full_state()
+ assert dotenv_test_pair[1] in full_state.history.timeline[-1].step.description
+
+ await session.autopilot.cleanup()
+
+
+@pytest.mark.asyncio
+async def test_edit_step():
+ pytest.skip("TODO: too slow")
+ session = await start_headless_session(config=TEST_CONFIG)
+
+ range_in_file = RangeInFileWithContents(
+ filepath=__file__, range=Range.from_shorthand(0, 0, 0, 0), contents=""
+ )
+
+ await session.autopilot.handle_highlighted_code(range_in_files=[range_in_file])
+
+ await session.autopilot.run_from_step(
+ EditHighlightedCodeStep(user_input="Don't edit this code")
+ )
+
+ full_state = await session.autopilot.get_full_state()
+ assert isinstance(full_state.history.timeline[-1].step.description, str)
+
+ await session.autopilot.cleanup()
diff --git a/server/tests/util/__init__.py b/server/tests/util/__init__.py
new file mode 100644
index 00000000..e69de29b
--- /dev/null
+++ b/server/tests/util/__init__.py
diff --git a/server/tests/util/config.py b/server/tests/util/config.py
new file mode 100644
index 00000000..370933a0
--- /dev/null
+++ b/server/tests/util/config.py
@@ -0,0 +1,19 @@
+from continuedev.core.config import ContinueConfig
+from continuedev.core.models import Models
+from continuedev.libs.llm.openai_free_trial import OpenAIFreeTrial
+
+config = ContinueConfig(
+ allow_anonymous_telemetry=False,
+ models=Models(
+ default=OpenAIFreeTrial(api_key="", model="gpt-4"),
+ summarize=OpenAIFreeTrial(
+ api_key="",
+ model="gpt-3.5-turbo",
+ ),
+ ),
+ system_message=None,
+ temperature=0.5,
+ custom_commands=[],
+ slash_commands=[],
+ context_providers=[],
+)
diff --git a/server/tests/util/openai_mock.py b/server/tests/util/openai_mock.py
new file mode 100644
index 00000000..763c5647
--- /dev/null
+++ b/server/tests/util/openai_mock.py
@@ -0,0 +1,139 @@
+import asyncio
+import os
+import random
+import subprocess
+from typing import Dict, List, Optional
+
+from fastapi import FastAPI
+from fastapi.responses import StreamingResponse
+from pydantic import BaseModel
+
+openai = FastAPI()
+
+
+class CompletionBody(BaseModel):
+ prompt: str
+ max_tokens: Optional[int] = 60
+ stream: Optional[bool] = False
+
+ class Config:
+ extra = "allow"
+
+
+@openai.post("/completions")
+@openai.post("/v1/completions")
+async def mock_completion(item: CompletionBody):
+ prompt = item.prompt
+
+ text = "This is a fake completion."
+
+ if item.stream:
+
+ async def stream_text():
+ for i in range(len(text)):
+ word = random.choice(prompt.split())
+ yield {
+ "choices": [
+ {
+ "delta": {"role": "assistant", "content": word},
+ "finish_reason": None,
+ "index": 0,
+ }
+ ],
+ "created": 1677825464,
+ "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
+ "model": "gpt-3.5-turbo-0301",
+ "object": "chat.completion.chunk",
+ }
+ await asyncio.sleep(0.1)
+
+ return StreamingResponse(stream_text(), media_type="text/plain")
+
+ return {
+ "id": "cmpl-uqkvlQyYK7bGYrRHQ0eXlWi7",
+ "object": "text_completion",
+ "created": 1589478378,
+ "model": "gpt-3.5-turbo",
+ "choices": [
+ {
+ "text": text,
+ "index": 0,
+ "logprobs": None,
+ "finish_reason": "length",
+ }
+ ],
+ "usage": {"prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12},
+ }
+
+
+class ChatBody(BaseModel):
+ messages: List[Dict[str, str]]
+ max_tokens: Optional[int] = None
+ stream: Optional[bool] = False
+
+ class Config:
+ extra = "allow"
+
+
+@openai.post("/v1/chat/completions")
+async def mock_chat_completion(item: ChatBody):
+ text = "This is a fake completion."
+
+ if item.stream:
+
+ async def stream_text():
+ for i in range(len(text)):
+ word = text[i]
+ yield {
+ "choices": [
+ {
+ "delta": {"role": "assistant", "content": word},
+ "finish_reason": None,
+ "index": 0,
+ }
+ ],
+ "created": 1677825464,
+ "id": "chatcmpl-6ptKyqKOGXZT6iQnqiXAH8adNLUzD",
+ "model": "gpt-3.5-turbo-0301",
+ "object": "chat.completion.chunk",
+ }
+ await asyncio.sleep(0.1)
+
+ return StreamingResponse(stream_text(), media_type="text/plain")
+
+ return {
+ "id": "chatcmpl-123",
+ "object": "chat.completion",
+ "created": 1677652288,
+ "model": "gpt-3.5-turbo-0613",
+ "choices": [
+ {
+ "index": 0,
+ "message": {
+ "role": "assistant",
+ "content": text,
+ },
+ "finish_reason": "stop",
+ }
+ ],
+ "usage": {"prompt_tokens": 9, "completion_tokens": 12, "total_tokens": 21},
+ }
+
+
+def start_openai(port: int = 8000):
+ server = subprocess.Popen(
+ [
+ "uvicorn",
+ "openai_mock:openai",
+ "--host",
+ "127.0.0.1",
+ "--port",
+ str(port),
+ ],
+ cwd=os.path.dirname(__file__),
+ )
+ return server
+
+
+if __name__ == "__main__":
+ start_openai()
diff --git a/server/tests/util/prompts.py b/server/tests/util/prompts.py
new file mode 100644
index 00000000..e84ddc82
--- /dev/null
+++ b/server/tests/util/prompts.py
@@ -0,0 +1,2 @@
+tokyo_test_pair = ("Output a single word, that being the capital of Japan:", "tokyo")
+dotenv_test_pair = ("ModuleNotFoundError: No module named 'dotenv'", "python-dotenv") \ No newline at end of file