summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorNate Sesti <sestinj@gmail.com>2023-07-17 13:33:29 -0700
committerNate Sesti <sestinj@gmail.com>2023-07-17 13:33:29 -0700
commit05d665e65aaef62254a4da9a7a381f9984ff0db5 (patch)
tree9b5c08baa5c7c1da051e4109ae34fb8a141c2754
parent868e0b7ef5357b89186119c3c2fa8bd427b8db30 (diff)
parent6e95cb64cd5b2e2d55200bf979106f18d395bb97 (diff)
downloadsncontinue-05d665e65aaef62254a4da9a7a381f9984ff0db5.tar.gz
sncontinue-05d665e65aaef62254a4da9a7a381f9984ff0db5.tar.bz2
sncontinue-05d665e65aaef62254a4da9a7a381f9984ff0db5.zip
Merge branch 'main' of https://github.com/continuedev/continue into anthropic
-rw-r--r--continuedev/poetry.lock12
-rw-r--r--continuedev/pyproject.toml1
-rw-r--r--continuedev/src/continuedev/core/autopilot.py19
-rw-r--r--continuedev/src/continuedev/core/config.py11
-rw-r--r--continuedev/src/continuedev/core/policy.py2
-rw-r--r--continuedev/src/continuedev/core/sdk.py15
-rw-r--r--continuedev/src/continuedev/libs/llm/ggml.py86
-rw-r--r--continuedev/src/continuedev/libs/llm/hf_inference_api.py3
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py25
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py6
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py14
-rw-r--r--continuedev/src/continuedev/libs/util/strings.py (renamed from continuedev/src/continuedev/libs/util/dedent.py)24
-rw-r--r--continuedev/src/continuedev/libs/util/templating.py39
-rw-r--r--continuedev/src/continuedev/server/ide.py2
-rw-r--r--continuedev/src/continuedev/steps/chat.py8
-rw-r--r--continuedev/src/continuedev/steps/core/core.py38
-rw-r--r--extension/package-lock.json4
-rw-r--r--extension/package.json7
-rw-r--r--extension/react-app/src/components/ComboBox.tsx65
-rw-r--r--extension/react-app/src/components/InputAndButton.tsx10
-rw-r--r--extension/react-app/src/components/PillButton.tsx47
-rw-r--r--extension/react-app/src/components/StepContainer.tsx19
-rw-r--r--extension/react-app/src/components/TextDialog.tsx14
-rw-r--r--extension/react-app/src/components/index.ts23
-rw-r--r--extension/react-app/src/index.css4
-rw-r--r--extension/react-app/src/pages/gui.tsx11
-rw-r--r--extension/src/activation/activate.ts2
-rw-r--r--extension/src/activation/environmentSetup.ts8
-rw-r--r--extension/src/bridge.ts6
-rw-r--r--extension/src/commands.ts7
-rw-r--r--extension/src/diffs.ts46
-rw-r--r--extension/src/lang-server/codeActions.ts55
-rw-r--r--extension/src/suggestions.ts60
33 files changed, 474 insertions, 219 deletions
diff --git a/continuedev/poetry.lock b/continuedev/poetry.lock
index e688e076..e8927fe7 100644
--- a/continuedev/poetry.lock
+++ b/continuedev/poetry.lock
@@ -318,6 +318,18 @@ files = [
]
[[package]]
+name = "chevron"
+version = "0.14.0"
+description = "Mustache templating language renderer"
+category = "main"
+optional = false
+python-versions = "*"
+files = [
+ {file = "chevron-0.14.0-py3-none-any.whl", hash = "sha256:fbf996a709f8da2e745ef763f482ce2d311aa817d287593a5b990d6d6e4f0443"},
+ {file = "chevron-0.14.0.tar.gz", hash = "sha256:87613aafdf6d77b6a90ff073165a61ae5086e21ad49057aa0e53681601800ebf"},
+]
+
+[[package]]
name = "click"
version = "8.1.3"
description = "Composable command line interface toolkit"
diff --git a/continuedev/pyproject.toml b/continuedev/pyproject.toml
index 08c3fd04..6a646cbe 100644
--- a/continuedev/pyproject.toml
+++ b/continuedev/pyproject.toml
@@ -25,6 +25,7 @@ jsonref = "^1.1.0"
jsonschema = "^4.17.3"
directory-tree = "^0.0.3.1"
anthropic = "^0.3.4"
+chevron = "^0.14.0"
[tool.poetry.scripts]
typegen = "src.continuedev.models.generate_json_schema:main"
diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py
index 0696c360..4e177ac9 100644
--- a/continuedev/src/continuedev/core/autopilot.py
+++ b/continuedev/src/continuedev/core/autopilot.py
@@ -36,7 +36,7 @@ def get_error_title(e: Exception) -> str:
elif isinstance(e, openai_errors.APIConnectionError):
return "The request failed. Please check your internet connection and try again. If this issue persists, you can use our API key for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to \"\""
elif isinstance(e, openai_errors.InvalidRequestError):
- return 'Your API key does not have access to GPT-4. You can use ours for free by going to VS Code settings and changing the value of continue.OPENAI_API_KEY to ""'
+ return 'Invalid request sent to OpenAI. Please try again.'
elif e.__str__().startswith("Cannot connect to host"):
return "The request failed. Please check your internet connection and try again."
return e.__str__() or e.__repr__()
@@ -166,6 +166,22 @@ class Autopilot(ContinueBaseModel):
if not any(map(lambda x: x.editing, self._highlighted_ranges)):
self._highlighted_ranges[0].editing = True
+ def _disambiguate_highlighted_ranges(self):
+ """If any files have the same name, also display their folder name"""
+ name_counts = {}
+ for rif in self._highlighted_ranges:
+ if rif.display_name in name_counts:
+ name_counts[rif.display_name] += 1
+ else:
+ name_counts[rif.display_name] = 1
+
+ for rif in self._highlighted_ranges:
+ if name_counts[rif.display_name] > 1:
+ rif.display_name = os.path.join(
+ os.path.basename(os.path.dirname(rif.range.filepath)), rif.display_name)
+ else:
+ rif.display_name = os.path.basename(rif.range.filepath)
+
async def handle_highlighted_code(self, range_in_files: List[RangeInFileWithContents]):
# Filter out rifs from ~/.continue/diffs folder
range_in_files = [
@@ -211,6 +227,7 @@ class Autopilot(ContinueBaseModel):
) for rif in range_in_files]
self._make_sure_is_editing_range()
+ self._disambiguate_highlighted_ranges()
await self.update_subscribers()
diff --git a/continuedev/src/continuedev/core/config.py b/continuedev/src/continuedev/core/config.py
index 05ba48c6..6af0878d 100644
--- a/continuedev/src/continuedev/core/config.py
+++ b/continuedev/src/continuedev/core/config.py
@@ -67,16 +67,21 @@ DEFAULT_SLASH_COMMANDS = [
]
+class AzureInfo(BaseModel):
+ endpoint: str
+ engine: str
+ api_version: str
+
+
class ContinueConfig(BaseModel):
"""
A pydantic class for the continue config file.
"""
steps_on_startup: Optional[Dict[str, Dict]] = {}
disallowed_steps: Optional[List[str]] = []
- server_url: Optional[str] = None
allow_anonymous_telemetry: Optional[bool] = True
default_model: Literal["gpt-3.5-turbo", "gpt-3.5-turbo-16k",
- "gpt-4", "claude-2"] = 'gpt-4'
+ "gpt-4", "claude-2", "ggml"] = 'gpt-4'
custom_commands: Optional[List[CustomCommand]] = [CustomCommand(
name="test",
description="This is an example custom command. Use /config to edit it and create more",
@@ -85,6 +90,8 @@ class ContinueConfig(BaseModel):
slash_commands: Optional[List[SlashCommand]] = DEFAULT_SLASH_COMMANDS
on_traceback: Optional[List[OnTracebackSteps]] = [
OnTracebackSteps(step_name="DefaultOnTracebackStep")]
+ system_message: Optional[str] = None
+ azure_openai_info: Optional[AzureInfo] = None
# Want to force these to be the slash commands for now
@validator('slash_commands', pre=True)
diff --git a/continuedev/src/continuedev/core/policy.py b/continuedev/src/continuedev/core/policy.py
index bc897357..d007c92b 100644
--- a/continuedev/src/continuedev/core/policy.py
+++ b/continuedev/src/continuedev/core/policy.py
@@ -58,7 +58,7 @@ class DemoPolicy(Policy):
if history.get_current() is None:
return (
MessageStep(name="Welcome to Continue", message=dedent("""\
- - Highlight code and ask a question or give instructions
+ - Highlight code section and ask a question or give instructions
- Use `cmd+m` (Mac) / `ctrl+m` (Windows) to open Continue
- Use `/help` to ask questions about how to use Continue""")) >>
WelcomeStep() >>
diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py
index 28487600..d3501f08 100644
--- a/continuedev/src/continuedev/core/sdk.py
+++ b/continuedev/src/continuedev/core/sdk.py
@@ -12,6 +12,7 @@ from ..models.filesystem import RangeInFile
from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
from ..libs.llm.openai import OpenAI
from ..libs.llm.anthropic import Anthropic
+from ..libs.llm.ggml import GGML
from .observation import Observation
from ..server.ide_protocol import AbstractIdeProtocolServer
from .main import Context, ContinueCustomException, History, Step, ChatMessage
@@ -34,10 +35,12 @@ MODEL_PROVIDER_TO_ENV_VAR = {
class Models:
provider_keys: Dict[ModelProvider, str] = {}
model_providers: List[ModelProvider]
+ system_message: str
def __init__(self, sdk: "ContinueSDK", model_providers: List[ModelProvider]):
self.sdk = sdk
self.model_providers = model_providers
+ self.system_message = sdk.config.system_message
@classmethod
async def create(cls, sdk: "ContinueSDK", with_providers: List[ModelProvider] = ["openai"]) -> "Models":
@@ -56,12 +59,12 @@ class Models:
def __load_openai_model(self, model: str) -> OpenAI:
api_key = self.provider_keys["openai"]
if api_key == "":
- return ProxyServer(self.sdk.ide.unique_id, model)
- return OpenAI(api_key=api_key, default_model=model)
+ return ProxyServer(self.sdk.ide.unique_id, model, system_message=self.system_message)
+ return OpenAI(api_key=api_key, default_model=model, system_message=self.system_message, azure_info=self.sdk.config.azure_openai_info)
def __load_hf_inference_api_model(self, model: str) -> HuggingFaceInferenceAPI:
api_key = self.provider_keys["hf_inference_api"]
- return HuggingFaceInferenceAPI(api_key=api_key, model=model)
+ return HuggingFaceInferenceAPI(api_key=api_key, model=model, system_message=self.system_message)
def __load_anthropic_model(self, model: str) -> Anthropic:
api_key = self.provider_keys["anthropic"]
@@ -91,6 +94,10 @@ class Models:
def gpt4(self):
return self.__load_openai_model("gpt-4")
+ @cached_property
+ def ggml(self):
+ return GGML(system_message=self.system_message)
+
def __model_from_name(self, model_name: str):
if model_name == "starcoder":
return self.starcoder
@@ -102,6 +109,8 @@ class Models:
return self.gpt4
elif model_name == "claude-2":
return self.claude2
+ elif model_name == "ggml":
+ return self.ggml
else:
raise Exception(f"Unknown model {model_name}")
diff --git a/continuedev/src/continuedev/libs/llm/ggml.py b/continuedev/src/continuedev/libs/llm/ggml.py
new file mode 100644
index 00000000..6007fdb4
--- /dev/null
+++ b/continuedev/src/continuedev/libs/llm/ggml.py
@@ -0,0 +1,86 @@
+from functools import cached_property
+import json
+from typing import Any, Coroutine, Dict, Generator, List, Union
+
+import aiohttp
+from ...core.main import ChatMessage
+from ..llm import LLM
+from ..util.count_tokens import compile_chat_messages, DEFAULT_ARGS, count_tokens
+
+SERVER_URL = "http://localhost:8000"
+
+
+class GGML(LLM):
+
+ def __init__(self, system_message: str = None):
+ self.system_message = system_message
+
+ @cached_property
+ def name(self):
+ return "ggml"
+
+ @property
+ def default_args(self):
+ return {**DEFAULT_ARGS, "model": self.name, "max_tokens": 1024}
+
+ def count_tokens(self, text: str):
+ return count_tokens(self.name, text)
+
+ async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:
+ args = self.default_args.copy()
+ args.update(kwargs)
+ args["stream"] = True
+
+ args = {**self.default_args, **kwargs}
+ messages = compile_chat_messages(
+ self.name, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message)
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(f"{SERVER_URL}/v1/completions", json={
+ "messages": messages,
+ **args
+ }) as resp:
+ async for line in resp.content.iter_any():
+ if line:
+ try:
+ yield line.decode("utf-8")
+ except:
+ raise Exception(str(line))
+
+ async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:
+ args = {**self.default_args, **kwargs}
+ messages = compile_chat_messages(
+ self.name, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message)
+ args["stream"] = True
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(f"{SERVER_URL}/v1/chat/completions", json={
+ "messages": messages,
+ **args
+ }) as resp:
+ # This is streaming application/json instaed of text/event-stream
+ async for line in resp.content.iter_chunks():
+ if line[1]:
+ try:
+ json_chunk = line[0].decode("utf-8")
+ if json_chunk.startswith(": ping - ") or json_chunk.startswith("data: [DONE]"):
+ continue
+ chunks = json_chunk.split("\n")
+ for chunk in chunks:
+ if chunk.strip() != "":
+ yield json.loads(chunk[6:])["choices"][0]["delta"]
+ except:
+ raise Exception(str(line[0]))
+
+ async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]:
+ args = {**self.default_args, **kwargs}
+
+ async with aiohttp.ClientSession() as session:
+ async with session.post(f"{SERVER_URL}/v1/completions", json={
+ "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message),
+ **args
+ }) as resp:
+ try:
+ return await resp.text()
+ except:
+ raise Exception(await resp.text())
diff --git a/continuedev/src/continuedev/libs/llm/hf_inference_api.py b/continuedev/src/continuedev/libs/llm/hf_inference_api.py
index 803ba122..7e11fbbe 100644
--- a/continuedev/src/continuedev/libs/llm/hf_inference_api.py
+++ b/continuedev/src/continuedev/libs/llm/hf_inference_api.py
@@ -11,9 +11,10 @@ class HuggingFaceInferenceAPI(LLM):
api_key: str
model: str
- def __init__(self, api_key: str, model: str):
+ def __init__(self, api_key: str, model: str, system_message: str = None):
self.api_key = api_key
self.model = model
+ self.system_message = system_message # TODO: Nothing being done with this
def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs):
"""Return the completion of the text with the given temperature."""
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index f0877d90..33d10985 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -1,30 +1,41 @@
from functools import cached_property
-import time
from typing import Any, Coroutine, Dict, Generator, List, Union
+
from ...core.main import ChatMessage
import openai
from ..llm import LLM
-from ..util.count_tokens import DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top
+from ..util.count_tokens import compile_chat_messages, CHAT_MODELS, DEFAULT_ARGS, count_tokens, prune_raw_prompt_from_top
+from ...core.config import AzureInfo
class OpenAI(LLM):
api_key: str
default_model: str
- def __init__(self, api_key: str, default_model: str, system_message: str = None):
+ def __init__(self, api_key: str, default_model: str, system_message: str = None, azure_info: AzureInfo = None):
self.api_key = api_key
self.default_model = default_model
self.system_message = system_message
+ self.azure_info = azure_info
openai.api_key = api_key
+ # Using an Azure OpenAI deployment
+ if azure_info is not None:
+ openai.api_type = "azure"
+ openai.api_base = azure_info.endpoint
+ openai.api_version = azure_info.api_version
+
@cached_property
def name(self):
return self.default_model
@property
def default_args(self):
- return {**DEFAULT_ARGS, "model": self.default_model}
+ args = {**DEFAULT_ARGS, "model": self.default_model}
+ if self.azure_info is not None:
+ args["engine"] = self.azure_info.engine
+ return args
def count_tokens(self, text: str):
return count_tokens(self.default_model, text)
@@ -37,7 +48,7 @@ class OpenAI(LLM):
if args["model"] in CHAT_MODELS:
async for chunk in await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], with_history, args["max_tokens"], prompt, functions=None),
+ args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message),
**args,
):
if "content" in chunk.choices[0].delta:
@@ -58,7 +69,7 @@ class OpenAI(LLM):
async for chunk in await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], messages, args["max_tokens"], functions=args.get("functions", None)),
+ args["model"], messages, args["max_tokens"], functions=args.get("functions", None), system_message=self.system_message),
**args,
):
yield chunk.choices[0].delta
@@ -69,7 +80,7 @@ class OpenAI(LLM):
if args["model"] in CHAT_MODELS:
resp = (await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], with_history, args["max_tokens"], prompt, functions=None),
+ args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message),
**args,
)).choices[0].message.content
else:
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index eab6e441..3ec492f3 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -38,7 +38,7 @@ class ProxyServer(LLM):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/complete", json={
- "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None),
+ "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None, system_message=self.system_message),
"unique_id": self.unique_id,
**args
}) as resp:
@@ -50,7 +50,7 @@ class ProxyServer(LLM):
async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]:
args = {**self.default_args, **kwargs}
messages = compile_chat_messages(
- self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None))
+ self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None), system_message=self.system_message)
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_chat", json={
@@ -74,7 +74,7 @@ class ProxyServer(LLM):
async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:
args = {**self.default_args, **kwargs}
messages = compile_chat_messages(
- self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None))
+ self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None), system_message=self.system_message)
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_complete", json={
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 73be0717..1ca98fe6 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -1,15 +1,19 @@
import json
from typing import Dict, List, Union
from ...core.main import ChatMessage
+from .templating import render_system_message
import tiktoken
-aliases = {}
+aliases = {
+ "ggml": "gpt-3.5-turbo",
+}
DEFAULT_MAX_TOKENS = 2048
MAX_TOKENS_FOR_MODEL = {
"gpt-3.5-turbo": 4096,
"gpt-3.5-turbo-0613": 4096,
"gpt-3.5-turbo-16k": 16384,
- "gpt-4": 8192
+ "gpt-4": 8192,
+ "ggml": 2048
}
CHAT_MODELS = {
"gpt-3.5-turbo", "gpt-3.5-turbo-16k", "gpt-4", "gpt-3.5-turbo-0613"
@@ -82,13 +86,15 @@ def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int,
for function in functions:
prompt_tokens += count_tokens(model, json.dumps(function))
+ rendered_system_message = render_system_message(system_message)
+
msgs = prune_chat_history(model,
- msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + max_tokens + count_tokens(model, system_message))
+ msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + max_tokens + count_tokens(model, rendered_system_message))
history = []
if system_message:
history.append({
"role": "system",
- "content": system_message
+ "content": rendered_system_message
})
history += [msg.to_dict(with_functions=functions is not None)
for msg in msgs]
diff --git a/continuedev/src/continuedev/libs/util/dedent.py b/continuedev/src/continuedev/libs/util/strings.py
index e59c2e97..f1fb8d0b 100644
--- a/continuedev/src/continuedev/libs/util/dedent.py
+++ b/continuedev/src/continuedev/libs/util/strings.py
@@ -23,3 +23,27 @@ def dedent_and_get_common_whitespace(s: str) -> Tuple[str, str]:
break
return "\n".join(map(lambda x: x.lstrip(lcp), lines)), lcp
+
+
+def remove_quotes_and_escapes(output: str) -> str:
+ """
+ Clean up the output of the completion API, removing unnecessary escapes and quotes
+ """
+ output = output.strip()
+
+ # Replace smart quotes
+ output = output.replace("“", '"')
+ output = output.replace("”", '"')
+ output = output.replace("‘", "'")
+ output = output.replace("’", "'")
+
+ # Remove escapes
+ output = output.replace('\\"', '"')
+ output = output.replace("\\'", "'")
+ output = output.replace("\\n", "\n")
+ output = output.replace("\\t", "\t")
+ output = output.replace("\\\\", "\\")
+ if (output.startswith('"') and output.endswith('"')) or (output.startswith("'") and output.endswith("'")):
+ output = output[1:-1]
+
+ return output
diff --git a/continuedev/src/continuedev/libs/util/templating.py b/continuedev/src/continuedev/libs/util/templating.py
new file mode 100644
index 00000000..ebfc2e31
--- /dev/null
+++ b/continuedev/src/continuedev/libs/util/templating.py
@@ -0,0 +1,39 @@
+import os
+import chevron
+
+
+def get_vars_in_template(template):
+ """
+ Get the variables in a template
+ """
+ return [token[1] for token in chevron.tokenizer.tokenize(template) if token[0] == 'variable']
+
+
+def escape_var(var: str) -> str:
+ """
+ Escape a variable so it can be used in a template
+ """
+ return var.replace(os.path.sep, '').replace('.', '')
+
+
+def render_system_message(system_message: str) -> str:
+ """
+ Render system message with mustache syntax.
+ Right now it only supports rendering absolute file paths as their contents.
+ """
+ vars = get_vars_in_template(system_message)
+
+ args = {}
+ for var in vars:
+ if var.startswith(os.path.sep):
+ # Escape vars which are filenames, because mustache doesn't allow / in variable names
+ escaped_var = escape_var(var)
+ system_message = system_message.replace(
+ var, escaped_var)
+
+ if os.path.exists(var):
+ args[escaped_var] = open(var, 'r').read()
+ else:
+ args[escaped_var] = ''
+
+ return chevron.render(system_message, args)
diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py
index a91708ec..43538407 100644
--- a/continuedev/src/continuedev/server/ide.py
+++ b/continuedev/src/continuedev/server/ide.py
@@ -442,6 +442,7 @@ async def websocket_endpoint(websocket: WebSocket, session_id: str = None):
if session_id is not None:
session_manager.registered_ides[session_id] = ideProtocolServer
other_msgs = await ideProtocolServer.initialize(session_id)
+ capture_event(ideProtocolServer.unique_id, "session_started", { "session_id": ideProtocolServer.session_id })
for other_msg in other_msgs:
handle_msg(other_msg)
@@ -462,4 +463,5 @@ async def websocket_endpoint(websocket: WebSocket, session_id: str = None):
if websocket.client_state != WebSocketState.DISCONNECTED:
await websocket.close()
+ capture_event(ideProtocolServer.unique_id, "session_ended", { "session_id": ideProtocolServer.session_id })
session_manager.registered_ides.pop(ideProtocolServer.session_id)
diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py
index 3751dec2..7c6b42db 100644
--- a/continuedev/src/continuedev/steps/chat.py
+++ b/continuedev/src/continuedev/steps/chat.py
@@ -3,6 +3,7 @@ from typing import Any, Coroutine, List
from pydantic import Field
+from ..libs.util.strings import remove_quotes_and_escapes
from .main import EditHighlightedCodeStep
from .core.core import MessageStep
from ..core.main import FunctionCall, Models
@@ -43,11 +44,8 @@ class SimpleChatStep(Step):
finally:
await generator.aclose()
- self.name = (await sdk.models.gpt35.complete(
- f"Write a short title for the following chat message: {self.description}")).strip()
-
- if self.name.startswith('"') and self.name.endswith('"'):
- self.name = self.name[1:-1]
+ self.name = remove_quotes_and_escapes(await sdk.models.gpt35.complete(
+ f"Write a short title for the following chat message: {self.description}"))
self.chat_context.append(ChatMessage(
role="assistant",
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 90d64287..2b049ecc 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -1,9 +1,11 @@
# These steps are depended upon by ContinueSDK
import os
import subprocess
+import difflib
from textwrap import dedent
from typing import Coroutine, List, Literal, Union
+from ...libs.llm.ggml import GGML
from ...models.main import Range
from ...libs.llm.prompt_utils import MarkdownStyleEncoderDecoder
from ...models.filesystem_edit import EditDiff, FileEdit, FileEditWithFullContents, FileSystemEdit
@@ -11,7 +13,7 @@ from ...models.filesystem import FileSystem, RangeInFile, RangeInFileWithContent
from ...core.observation import Observation, TextObservation, TracebackObservation, UserInputObservation
from ...core.main import ChatMessage, ContinueCustomException, Step, SequentialStep
from ...libs.util.count_tokens import MAX_TOKENS_FOR_MODEL, DEFAULT_MAX_TOKENS
-from ...libs.util.dedent import dedent_and_get_common_whitespace
+from ...libs.util.strings import dedent_and_get_common_whitespace, remove_quotes_and_escapes
import difflib
@@ -156,42 +158,32 @@ class DefaultModelEditCodeStep(Step):
_new_contents: str = ""
_prompt_and_completion: str = ""
- def _cleanup_output(self, output: str) -> str:
- output = output.replace('\\"', '"')
- output = output.replace("\\'", "'")
- output = output.replace("\\n", "\n")
- output = output.replace("\\t", "\t")
- output = output.replace("\\\\", "\\")
- if output.startswith('"') and output.endswith('"'):
- output = output[1:-1]
-
- return output
-
async def describe(self, models: Models) -> Coroutine[str, None, None]:
if self._previous_contents.strip() == self._new_contents.strip():
description = "No edits were made"
else:
+ changes = '\n'.join(difflib.ndiff(
+ self._previous_contents.splitlines(), self._new_contents.splitlines()))
description = await models.gpt3516k.complete(dedent(f"""\
- ```original
- {self._previous_contents}
- ```
+ Diff summary: "{self.user_input}"
- ```new
- {self._new_contents}
+ ```diff
+ {changes}
```
Please give brief a description of the changes made above using markdown bullet points. Be concise:"""))
name = await models.gpt3516k.complete(f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:")
- self.name = self._cleanup_output(name)
+ self.name = remove_quotes_and_escapes(name)
- return f"{self._cleanup_output(description)}"
+ return f"{remove_quotes_and_escapes(description)}"
async def get_prompt_parts(self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str):
# We don't know here all of the functions being passed in.
# We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
- model_to_use = sdk.models.gpt4
- max_tokens = DEFAULT_MAX_TOKENS
+ model_to_use = sdk.models.default
+ max_tokens = int(MAX_TOKENS_FOR_MODEL.get(
+ model_to_use.name, DEFAULT_MAX_TOKENS) / 2)
TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200
if model_to_use.count_tokens(rif.contents) > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE:
@@ -495,6 +487,10 @@ Please output the code to be inserted at the cursor in order to fulfill the user
repeating_file_suffix = False
line_below_highlighted_range = file_suffix.lstrip().split("\n")[0]
+ if isinstance(model_to_use, GGML):
+ messages = [ChatMessage(
+ role="user", content=f"```\n{rif.contents}\n```\n\nUser request: \"{self.user_input}\"\n\nThis is the code after changing to perfectly comply with the user request. It does not include any placeholder code, only real implementations:\n\n```\n", summary=self.user_input)]
+
generator = model_to_use.stream_chat(
messages, temperature=0, max_tokens=max_tokens)
diff --git a/extension/package-lock.json b/extension/package-lock.json
index 6f777c72..e67fa950 100644
--- a/extension/package-lock.json
+++ b/extension/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "continue",
- "version": "0.0.174",
+ "version": "0.0.178",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "continue",
- "version": "0.0.174",
+ "version": "0.0.178",
"license": "Apache-2.0",
"dependencies": {
"@electron/rebuild": "^3.2.10",
diff --git a/extension/package.json b/extension/package.json
index 9fe38f7f..121423ed 100644
--- a/extension/package.json
+++ b/extension/package.json
@@ -14,7 +14,7 @@
"displayName": "Continue",
"pricing": "Free",
"description": "The open-source coding autopilot",
- "version": "0.0.174",
+ "version": "0.0.178",
"publisher": "Continue",
"engines": {
"vscode": "^1.67.0"
@@ -106,6 +106,11 @@
"command": "continue.quickTextEntry",
"category": "Continue",
"title": "Quick Text Entry"
+ },
+ {
+ "command": "continue.quickFix",
+ "category": "Continue",
+ "title": "Quick Fix"
}
],
"keybindings": [
diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx
index f11e07af..f327e3a3 100644
--- a/extension/react-app/src/components/ComboBox.tsx
+++ b/extension/react-app/src/components/ComboBox.tsx
@@ -6,6 +6,7 @@ import {
lightGray,
secondaryDark,
vscBackground,
+ vscForeground,
} from ".";
import CodeBlock from "./CodeBlock";
import PillButton from "./PillButton";
@@ -37,21 +38,6 @@ const EmptyPillDiv = styled.div`
}
`;
-const ContextDropdown = styled.div`
- position: absolute;
- padding: 4px;
- width: calc(100% - 16px - 8px);
- background-color: ${secondaryDark};
- color: white;
- border-bottom-right-radius: ${defaultBorderRadius};
- border-bottom-left-radius: ${defaultBorderRadius};
- /* border: 1px solid white; */
- border-top: none;
- margin: 8px;
- outline: 1px solid orange;
- z-index: 5;
-`;
-
const MainTextInput = styled.textarea`
resize: none;
@@ -63,7 +49,7 @@ const MainTextInput = styled.textarea`
height: auto;
width: 100%;
background-color: ${secondaryDark};
- color: white;
+ color: ${vscForeground};
z-index: 1;
border: 1px solid transparent;
@@ -86,14 +72,15 @@ const Ul = styled.ul<{
position: absolute;
background: ${vscBackground};
background-color: ${secondaryDark};
- color: white;
+ color: ${vscForeground};
max-height: ${UlMaxHeight}px;
+ width: calc(100% - 16px);
overflow-y: scroll;
overflow-x: hidden;
padding: 0;
${({ hidden }) => hidden && "display: none;"}
border-radius: ${defaultBorderRadius};
- border: 0.5px solid gray;
+ outline: 0.5px solid gray;
z-index: 2;
// Get rid of scrollbar and its padding
scrollbar-width: none;
@@ -109,6 +96,7 @@ const Li = styled.li<{
selected: boolean;
isLastItem: boolean;
}>`
+ background-color: ${secondaryDark};
${({ highlighted }) => highlighted && "background: #ff000066;"}
${({ selected }) => selected && "font-weight: bold;"}
padding: 0.5rem 0.75rem;
@@ -138,10 +126,6 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
// The position of the current command you are typing now, so the one that will be appended to history once you press enter
const [positionInHistory, setPositionInHistory] = React.useState<number>(0);
const [items, setItems] = React.useState(props.items);
- const [hoveringButton, setHoveringButton] = React.useState(false);
- const [hoveringContextDropdown, setHoveringContextDropdown] =
- React.useState(false);
- const [pinned, setPinned] = useState(false);
const [highlightedCodeSections, setHighlightedCodeSections] = React.useState(
props.highlightedCodeSections || []
);
@@ -236,6 +220,9 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
? "Editing such a large range may be slow"
: undefined
}
+ onlyShowDelete={
+ highlightedCodeSections.length <= 1 || section.editing
+ }
editing={section.editing}
pinned={section.pinned}
index={idx}
@@ -253,15 +240,6 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
return newSections;
});
}}
- onHover={(val: boolean) => {
- if (val) {
- setHoveringButton(val);
- } else {
- setTimeout(() => {
- setHoveringButton(val);
- }, 100);
- }
- }}
/>
))}
{props.highlightedCodeSections.length > 0 &&
@@ -271,11 +249,11 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
props.onToggleAddContext();
}}
>
- Highlight to Add Context
+ Highlight code section
</EmptyPillDiv>
) : (
<HeaderButtonWithText
- text="Add to Context"
+ text="Add more code to context"
onClick={() => {
props.onToggleAddContext();
}}
@@ -287,7 +265,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
<div className="flex px-2" ref={divRef} hidden={!downshiftProps.isOpen}>
<MainTextInput
disabled={props.disabled}
- placeholder={`Ask a question, give instructions, or type '/' to see slash commands. ${getMetaKeyLabel()}⏎ to edit.`}
+ placeholder={`Ask a question, give instructions, or type '/' to see slash commands`}
{...getInputProps({
onChange: (e) => {
const target = e.target as HTMLTextAreaElement;
@@ -361,6 +339,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
})}
showAbove={showAbove()}
ulHeightPixels={ulRef.current?.getBoundingClientRect().height || 0}
+ hidden={!downshiftProps.isOpen || items.length === 0}
>
{downshiftProps.isOpen &&
items.map((item, index) => (
@@ -387,24 +366,6 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
Inserting at cursor
</div>
)}
- <ContextDropdown
- onMouseEnter={() => {
- setHoveringContextDropdown(true);
- }}
- onMouseLeave={() => {
- setHoveringContextDropdown(false);
- }}
- hidden={true || (!hoveringContextDropdown && !hoveringButton)}
- >
- {highlightedCodeSections.map((section, idx) => (
- <>
- <p>{section.display_name}</p>
- <CodeBlock showCopy={false} key={idx}>
- {section.range.contents}
- </CodeBlock>
- </>
- ))}
- </ContextDropdown>
</>
);
});
diff --git a/extension/react-app/src/components/InputAndButton.tsx b/extension/react-app/src/components/InputAndButton.tsx
index 0a8592f2..8019d014 100644
--- a/extension/react-app/src/components/InputAndButton.tsx
+++ b/extension/react-app/src/components/InputAndButton.tsx
@@ -1,6 +1,6 @@
import React, { useRef } from "react";
import styled from "styled-components";
-import { vscBackground } from ".";
+import { vscBackground, vscForeground } from ".";
interface InputAndButtonProps {
onUserInput: (input: string) => void;
@@ -16,7 +16,7 @@ const Input = styled.input`
padding: 0.5rem;
border: 1px solid white;
background-color: ${vscBackground};
- color: white;
+ color: ${vscForeground};
border-radius: 4px;
border-top-right-radius: 0;
border-bottom-right-radius: 0;
@@ -27,7 +27,7 @@ const Button = styled.button`
padding: 0.5rem;
border: 1px solid white;
background-color: ${vscBackground};
- color: white;
+ color: ${vscForeground};
border-radius: 4px;
border-top-left-radius: 0;
border-bottom-left-radius: 0;
@@ -35,8 +35,8 @@ const Button = styled.button`
cursor: pointer;
&:hover {
- background-color: white;
- color: black;
+ background-color: ${vscForeground};
+ color: ${vscBackground};
}
`;
diff --git a/extension/react-app/src/components/PillButton.tsx b/extension/react-app/src/components/PillButton.tsx
index d9d779d1..c24dba83 100644
--- a/extension/react-app/src/components/PillButton.tsx
+++ b/extension/react-app/src/components/PillButton.tsx
@@ -1,6 +1,11 @@
import { useContext, useState } from "react";
import styled from "styled-components";
-import { StyledTooltip, defaultBorderRadius, secondaryDark } from ".";
+import {
+ StyledTooltip,
+ defaultBorderRadius,
+ secondaryDark,
+ vscForeground,
+} from ".";
import {
Trash,
PaintBrush,
@@ -10,7 +15,7 @@ import { GUIClientContext } from "../App";
const Button = styled.button`
border: none;
- color: white;
+ color: ${vscForeground};
background-color: ${secondaryDark};
border-radius: ${defaultBorderRadius};
padding: 8px;
@@ -27,7 +32,6 @@ const GridDiv = styled.div`
height: 100%;
display: grid;
grid-gap: 0;
- grid-template-columns: 1fr 1fr;
align-items: center;
border-radius: ${defaultBorderRadius};
@@ -69,6 +73,7 @@ interface PillButtonProps {
editing: boolean;
pinned: boolean;
warning?: string;
+ onlyShowDelete?: boolean;
}
const PillButton = (props: PillButtonProps) => {
@@ -105,19 +110,25 @@ const PillButton = (props: PillButtonProps) => {
}}
>
{isHovered && (
- <GridDiv>
- <ButtonDiv
- data-tooltip-id={`edit-${props.index}`}
- backgroundColor={"#8800aa55"}
- onClick={() => {
- client?.setEditingAtIndices([props.index]);
- }}
- >
- <PaintBrush
- style={{ margin: "auto" }}
- width="1.6em"
- ></PaintBrush>
- </ButtonDiv>
+ <GridDiv
+ style={{
+ gridTemplateColumns: props.onlyShowDelete ? "1fr" : "1fr 1fr",
+ }}
+ >
+ {props.onlyShowDelete || (
+ <ButtonDiv
+ data-tooltip-id={`edit-${props.index}`}
+ backgroundColor={"#8800aa55"}
+ onClick={() => {
+ client?.setEditingAtIndices([props.index]);
+ }}
+ >
+ <PaintBrush
+ style={{ margin: "auto" }}
+ width="1.6em"
+ ></PaintBrush>
+ </ButtonDiv>
+ )}
{/* <ButtonDiv
data-tooltip-id={`pin-${props.index}`}
@@ -148,8 +159,8 @@ const PillButton = (props: PillButtonProps) => {
</Button>
<StyledTooltip id={`edit-${props.index}`}>
{props.editing
- ? "Editing this range (with rest of file as context)"
- : "Edit this range"}
+ ? "Editing this section (with entire file as context)"
+ : "Edit this section"}
</StyledTooltip>
<StyledTooltip id={`delete-${props.index}`}>Delete</StyledTooltip>
{props.warning && (
diff --git a/extension/react-app/src/components/StepContainer.tsx b/extension/react-app/src/components/StepContainer.tsx
index 93bdbc89..9ab7430c 100644
--- a/extension/react-app/src/components/StepContainer.tsx
+++ b/extension/react-app/src/components/StepContainer.tsx
@@ -6,6 +6,7 @@ import {
secondaryDark,
vscBackground,
vscBackgroundTransparent,
+ vscForeground,
} from ".";
import {
ChevronDown,
@@ -52,12 +53,7 @@ const StepContainerDiv = styled.div<{ open: boolean }>`
`;
const HeaderDiv = styled.div<{ error: boolean; loading: boolean }>`
- background-color: ${(props) =>
- props.error
- ? "#522"
- : props.loading
- ? vscBackgroundTransparent
- : vscBackground};
+ background-color: ${(props) => (props.error ? "#522" : vscBackground)};
display: grid;
grid-template-columns: 1fr auto auto;
grid-gap: 8px;
@@ -120,20 +116,22 @@ const StyledMarkdownPreview = styled(MarkdownPreview)`
}
code {
- color: #f69292;
+ color: #f78383;
word-wrap: break-word;
+ border-radius: ${defaultBorderRadius};
+ background-color: ${secondaryDark};
}
pre > code {
background-color: ${secondaryDark};
- color: white;
+ color: ${vscForeground};
}
background-color: ${vscBackground};
font-family: "Lexend", sans-serif;
font-size: 13px;
padding: 8px;
- color: white;
+ color: ${vscForeground};
`;
// #endregion
@@ -267,6 +265,9 @@ function StepContainer(props: StepContainerProps) {
) : (
<StyledMarkdownPreview
source={props.historyNode.step.description || ""}
+ wrapperElement={{
+ "data-color-mode": "dark",
+ }}
/>
)}
</ContentDiv>
diff --git a/extension/react-app/src/components/TextDialog.tsx b/extension/react-app/src/components/TextDialog.tsx
index 646d6846..cba3852d 100644
--- a/extension/react-app/src/components/TextDialog.tsx
+++ b/extension/react-app/src/components/TextDialog.tsx
@@ -1,7 +1,7 @@
// Write a component that displays a dialog box with a text field and a button.
import React, { useEffect, useState } from "react";
import styled from "styled-components";
-import { Button, buttonColor, secondaryDark, vscBackground } from ".";
+import { Button, secondaryDark, vscBackground, vscForeground } from ".";
import { isMetaEquivalentKeyPressed } from "../util";
const ScreenCover = styled.div`
@@ -21,13 +21,13 @@ const DialogContainer = styled.div`
`;
const Dialog = styled.div`
- background-color: white;
+ color: ${vscForeground};
+ background-color: ${vscBackground};
border-radius: 8px;
padding: 8px;
display: flex;
flex-direction: column;
- /* box-shadow: 0 0 10px 0 rgba(255, 255, 255, 0.5); */
- border: 2px solid ${buttonColor};
+ box-shadow: 0 0 10px 0 ${vscForeground};
width: fit-content;
margin: auto;
`;
@@ -38,14 +38,16 @@ const TextArea = styled.textarea`
padding: 8px;
outline: 1px solid black;
resize: none;
+ background-color: ${secondaryDark};
+ color: ${vscForeground};
&:focus {
- outline: 1px solid ${buttonColor};
+ outline: 1px solid ${vscForeground};
}
`;
const P = styled.p`
- color: black;
+ color: ${vscForeground};
margin: 8px auto;
`;
diff --git a/extension/react-app/src/components/index.ts b/extension/react-app/src/components/index.ts
index 9ae0f097..cb5e7915 100644
--- a/extension/react-app/src/components/index.ts
+++ b/extension/react-app/src/components/index.ts
@@ -3,12 +3,16 @@ import styled, { keyframes } from "styled-components";
export const defaultBorderRadius = "5px";
export const lightGray = "rgb(100 100 100)";
-export const secondaryDark = "rgb(45 45 45)";
-export const vscBackground = "rgb(30 30 30)";
+// export const secondaryDark = "rgb(45 45 45)";
+// export const vscBackground = "rgb(30 30 30)";
export const vscBackgroundTransparent = "#1e1e1ede";
export const buttonColor = "rgb(113 28 59)";
export const buttonColorHover = "rgb(113 28 59 0.67)";
+export const secondaryDark = "var(--vscode-textBlockQuote-background)";
+export const vscBackground = "var(--vscode-editor-background)";
+export const vscForeground = "var(--vscode-editor-foreground)";
+
export const Button = styled.button`
padding: 10px 12px;
margin: 8px 0;
@@ -46,8 +50,8 @@ export const TextArea = styled.textarea`
resize: vertical;
padding: 4px;
- caret-color: white;
- color: white;
+ caret-color: ${vscForeground};
+ color: #{vscForeground};
&:focus {
outline: 1px solid ${buttonColor};
@@ -120,7 +124,7 @@ export const MainTextInput = styled.textarea`
border: 1px solid #ccc;
margin: 8px 8px;
background-color: ${vscBackground};
- color: white;
+ color: ${vscForeground};
outline: 1px solid orange;
resize: none;
`;
@@ -137,8 +141,9 @@ export const appear = keyframes`
`;
export const HeaderButton = styled.button<{ inverted: boolean | undefined }>`
- background-color: ${({ inverted }) => (inverted ? "white" : "transparent")};
- color: ${({ inverted }) => (inverted ? "black" : "white")};
+ background-color: ${({ inverted }) =>
+ inverted ? vscForeground : "transparent"};
+ color: ${({ inverted }) => (inverted ? vscBackground : vscForeground)};
border: none;
border-radius: ${defaultBorderRadius};
@@ -146,7 +151,9 @@ export const HeaderButton = styled.button<{ inverted: boolean | undefined }>`
&:hover {
background-color: ${({ inverted }) =>
- typeof inverted === "undefined" || inverted ? lightGray : "transparent"};
+ typeof inverted === "undefined" || inverted
+ ? secondaryDark
+ : "transparent"};
}
display: flex;
align-items: center;
diff --git a/extension/react-app/src/index.css b/extension/react-app/src/index.css
index 6e33c89c..bac7fe97 100644
--- a/extension/react-app/src/index.css
+++ b/extension/react-app/src/index.css
@@ -14,13 +14,13 @@ html,
body,
#root {
height: 100%;
- background-color: var(--vsc-background);
+ background-color: var(--vscode-editor-background);
font-family: "Lexend", sans-serif;
}
body {
padding: 0;
- color: white;
+ color: var(--vscode-editor-foreground);
padding: 0px;
margin: 0px;
height: 100%;
diff --git a/extension/react-app/src/pages/gui.tsx b/extension/react-app/src/pages/gui.tsx
index 64207487..c35cf21b 100644
--- a/extension/react-app/src/pages/gui.tsx
+++ b/extension/react-app/src/pages/gui.tsx
@@ -1,5 +1,9 @@
import styled from "styled-components";
-import { defaultBorderRadius } from "../components";
+import {
+ defaultBorderRadius,
+ vscBackground,
+ vscForeground,
+} from "../components";
import Loader from "../components/Loader";
import ContinueButton from "../components/ContinueButton";
import { FullState, HighlightedRangeContext } from "../../../schema/FullState";
@@ -371,12 +375,13 @@ function GUI(props: GUIProps) {
style={{
position: "fixed",
bottom: "50px",
- backgroundColor: "white",
- color: "black",
+ backgroundColor: vscBackground,
+ color: vscForeground,
borderRadius: defaultBorderRadius,
padding: "16px",
margin: "16px",
zIndex: 100,
+ boxShadow: `0px 0px 10px 0px ${vscForeground}`,
}}
hidden={!showDataSharingInfo}
>
diff --git a/extension/src/activation/activate.ts b/extension/src/activation/activate.ts
index cd885b12..5c6ffa02 100644
--- a/extension/src/activation/activate.ts
+++ b/extension/src/activation/activate.ts
@@ -10,6 +10,7 @@ import {
startContinuePythonServer,
} from "./environmentSetup";
import fetch from "node-fetch";
+import registerQuickFixProvider from "../lang-server/codeActions";
// import { CapturedTerminal } from "../terminal/terminalEmulator";
const PACKAGE_JSON_RAW_GITHUB_URL =
@@ -55,6 +56,7 @@ export async function activateExtension(context: vscode.ExtensionContext) {
sendTelemetryEvent(TelemetryEvent.ExtensionActivated);
registerAllCodeLensProviders(context);
registerAllCommands(context);
+ registerQuickFixProvider();
// Initialize IDE Protocol Client
const serverUrl = getContinueServerUrl();
diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts
index df609a34..69a3b75a 100644
--- a/extension/src/activation/environmentSetup.ts
+++ b/extension/src/activation/environmentSetup.ts
@@ -400,6 +400,14 @@ function serverPath(): string {
return sPath;
}
+export function devDataPath(): string {
+ const sPath = path.join(getContinueGlobalPath(), "dev_data");
+ if (!fs.existsSync(sPath)) {
+ fs.mkdirSync(sPath);
+ }
+ return sPath;
+}
+
function serverVersionPath(): string {
return path.join(serverPath(), "server_version.txt");
}
diff --git a/extension/src/bridge.ts b/extension/src/bridge.ts
index 7e6398be..d614ace4 100644
--- a/extension/src/bridge.ts
+++ b/extension/src/bridge.ts
@@ -1,11 +1,7 @@
import fetch from "node-fetch";
import * as path from "path";
import * as vscode from "vscode";
-import {
- Configuration,
- DebugApi,
- UnittestApi,
-} from "./client";
+import { Configuration, DebugApi, UnittestApi } from "./client";
import { convertSingleToDoubleQuoteJSON } from "./util/util";
import { getExtensionUri } from "./util/vscode";
import { extensionContext } from "./activation/activate";
diff --git a/extension/src/commands.ts b/extension/src/commands.ts
index 888f01ed..2b7f4c0c 100644
--- a/extension/src/commands.ts
+++ b/extension/src/commands.ts
@@ -34,6 +34,13 @@ const commandsMap: { [command: string]: (...args: any) => any } = {
"continue.rejectDiff": rejectDiffCommand,
"continue.acceptAllSuggestions": acceptAllSuggestionsCommand,
"continue.rejectAllSuggestions": rejectAllSuggestionsCommand,
+ "continue.quickFix": async (message: string, code: string, edit: boolean) => {
+ ideProtocolClient.sendMainUserInput(
+ `${
+ edit ? "/edit " : ""
+ }${code}\n\nHow do I fix this problem in the above code?: ${message}`
+ );
+ },
"continue.focusContinueInput": async () => {
if (focusedOnContinueInput) {
vscode.commands.executeCommand("workbench.action.focusActiveEditorGroup");
diff --git a/extension/src/diffs.ts b/extension/src/diffs.ts
index 2860258d..0bab326a 100644
--- a/extension/src/diffs.ts
+++ b/extension/src/diffs.ts
@@ -4,6 +4,7 @@ import * as fs from "fs";
import * as vscode from "vscode";
import { extensionContext, ideProtocolClient } from "./activation/activate";
import { getMetaKeyLabel } from "./util/util";
+import { devDataPath } from "./activation/environmentSetup";
interface DiffInfo {
originalFilepath: string;
@@ -13,7 +14,9 @@ interface DiffInfo {
range: vscode.Range;
}
-export const DIFF_DIRECTORY = path.join(os.homedir(), ".continue", "diffs").replace(/^C:/, "c:");
+export const DIFF_DIRECTORY = path
+ .join(os.homedir(), ".continue", "diffs")
+ .replace(/^C:/, "c:");
class DiffManager {
// Create a temporary file in the global .continue directory which displays the updated version
@@ -222,6 +225,8 @@ class DiffManager {
);
this.cleanUpDiff(diffInfo);
});
+
+ recordAcceptReject(true, diffInfo);
}
rejectDiff(newFilepath?: string) {
@@ -251,11 +256,50 @@ class DiffManager {
.then(() => {
this.cleanUpDiff(diffInfo);
});
+
+ recordAcceptReject(false, diffInfo);
}
}
export const diffManager = new DiffManager();
+function recordAcceptReject(accepted: boolean, diffInfo: DiffInfo) {
+ const collectOn = vscode.workspace
+ .getConfiguration("continue")
+ .get<boolean>("dataSwitch");
+
+ if (collectOn) {
+ const devDataDir = devDataPath();
+ const suggestionsPath = path.join(devDataDir, "suggestions.json");
+
+ // Initialize suggestions list
+ let suggestions = [];
+
+ // Check if suggestions.json exists
+ if (fs.existsSync(suggestionsPath)) {
+ const rawData = fs.readFileSync(suggestionsPath, "utf-8");
+ suggestions = JSON.parse(rawData);
+ }
+
+ // Add the new suggestion to the list
+ suggestions.push({
+ accepted,
+ timestamp: Date.now(),
+ suggestion: diffInfo.originalFilepath,
+ });
+
+ // Send the suggestion to the server
+ ideProtocolClient.sendAcceptRejectSuggestion(accepted);
+
+ // Write the updated suggestions back to the file
+ fs.writeFileSync(
+ suggestionsPath,
+ JSON.stringify(suggestions, null, 4),
+ "utf-8"
+ );
+ }
+}
+
export async function acceptDiffCommand(newFilepath?: string) {
diffManager.acceptDiff(newFilepath);
ideProtocolClient.sendAcceptRejectDiff(true);
diff --git a/extension/src/lang-server/codeActions.ts b/extension/src/lang-server/codeActions.ts
new file mode 100644
index 00000000..f0d61ace
--- /dev/null
+++ b/extension/src/lang-server/codeActions.ts
@@ -0,0 +1,55 @@
+import * as vscode from "vscode";
+
+class ContinueQuickFixProvider implements vscode.CodeActionProvider {
+ public static readonly providedCodeActionKinds = [
+ vscode.CodeActionKind.QuickFix,
+ ];
+
+ provideCodeActions(
+ document: vscode.TextDocument,
+ range: vscode.Range | vscode.Selection,
+ context: vscode.CodeActionContext,
+ token: vscode.CancellationToken
+ ): vscode.ProviderResult<(vscode.Command | vscode.CodeAction)[]> {
+ if (context.diagnostics.length === 0) {
+ return [];
+ }
+
+ const createQuickFix = (edit: boolean) => {
+ const diagnostic = context.diagnostics[0];
+ const quickFix = new vscode.CodeAction(
+ edit ? "Fix with Continue" : "Ask Continue",
+ vscode.CodeActionKind.QuickFix
+ );
+ quickFix.isPreferred = false;
+ const surroundingRange = new vscode.Range(
+ Math.max(0, range.start.line - 3),
+ 0,
+ Math.min(document.lineCount, range.end.line + 3),
+ 0
+ );
+ quickFix.command = {
+ command: "continue.quickFix",
+ title: "Continue Quick Fix",
+ arguments: [
+ diagnostic.message,
+ document.getText(surroundingRange),
+ edit,
+ ],
+ };
+ return quickFix;
+ };
+ return [createQuickFix(true), createQuickFix(false)];
+ }
+}
+
+export default function registerQuickFixProvider() {
+ // In your extension's activate function:
+ vscode.languages.registerCodeActionsProvider(
+ { language: "*" },
+ new ContinueQuickFixProvider(),
+ {
+ providedCodeActionKinds: ContinueQuickFixProvider.providedCodeActionKinds,
+ }
+ );
+}
diff --git a/extension/src/suggestions.ts b/extension/src/suggestions.ts
index 6e5a444f..c2373223 100644
--- a/extension/src/suggestions.ts
+++ b/extension/src/suggestions.ts
@@ -1,9 +1,7 @@
import * as vscode from "vscode";
import { sendTelemetryEvent, TelemetryEvent } from "./telemetry";
import { openEditorAndRevealRange } from "./util/vscode";
-import { translate, readFileAtRange } from "./util/vscode";
-import * as fs from "fs";
-import * as path from "path";
+import { translate } from "./util/vscode";
import { registerAllCodeLensProviders } from "./lang-server/codeLens";
import { extensionContext, ideProtocolClient } from "./activation/activate";
@@ -214,62 +212,6 @@ function selectSuggestion(
: suggestion.newRange;
}
- let workspaceDir = vscode.workspace.workspaceFolders
- ? vscode.workspace.workspaceFolders[0]?.uri.fsPath
- : undefined;
-
- let collectOn = vscode.workspace
- .getConfiguration("continue")
- .get<boolean>("dataSwitch");
-
- if (workspaceDir && collectOn) {
- let continueDir = path.join(workspaceDir, ".continue");
-
- // Check if .continue directory doesn't exists
- if (!fs.existsSync(continueDir)) {
- fs.mkdirSync(continueDir);
- }
-
- let suggestionsPath = path.join(continueDir, "suggestions.json");
-
- // Initialize suggestions list
- let suggestions = [];
-
- // Check if suggestions.json exists
- if (fs.existsSync(suggestionsPath)) {
- let rawData = fs.readFileSync(suggestionsPath, "utf-8");
- suggestions = JSON.parse(rawData);
- }
-
- const accepted =
- accept === "new" || (accept === "selected" && suggestion.newSelected);
- suggestions.push({
- accepted,
- timestamp: Date.now(),
- suggestion: suggestion.newContent,
- });
- ideProtocolClient.sendAcceptRejectSuggestion(accepted);
-
- // Write the updated suggestions back to the file
- fs.writeFileSync(
- suggestionsPath,
- JSON.stringify(suggestions, null, 4),
- "utf-8"
- );
-
- // If it's not already there, add .continue to .gitignore
- const gitignorePath = path.join(workspaceDir, ".gitignore");
- if (fs.existsSync(gitignorePath)) {
- const gitignoreData = fs.readFileSync(gitignorePath, "utf-8");
- const gitIgnoreLines = gitignoreData.split("\n");
- if (!gitIgnoreLines.includes(".continue")) {
- fs.appendFileSync(gitignorePath, "\n.continue\n");
- }
- } else {
- fs.writeFileSync(gitignorePath, ".continue\n");
- }
- }
-
rangeToDelete = new vscode.Range(
rangeToDelete.start,
new vscode.Position(rangeToDelete.end.line, 0)