summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--continuedev/src/continuedev/core/autopilot.py36
-rw-r--r--continuedev/src/continuedev/core/main.py1
-rw-r--r--continuedev/src/continuedev/core/sdk.py7
-rw-r--r--continuedev/src/continuedev/libs/llm/openai.py6
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py6
-rw-r--r--continuedev/src/continuedev/libs/util/count_tokens.py4
-rw-r--r--continuedev/src/continuedev/steps/core/core.py23
-rw-r--r--continuedev/src/continuedev/steps/main.py49
-rw-r--r--extension/package-lock.json45
-rw-r--r--extension/package.json3
-rw-r--r--extension/react-app/src/components/ComboBox.tsx6
-rw-r--r--extension/schema/FullState.d.ts2
-rw-r--r--extension/src/activation/environmentSetup.ts69
-rw-r--r--extension/src/diffs.ts19
-rw-r--r--schema/json/FullState.json7
15 files changed, 185 insertions, 98 deletions
diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py
index 02fd61de..5c3baafd 100644
--- a/continuedev/src/continuedev/core/autopilot.py
+++ b/continuedev/src/continuedev/core/autopilot.py
@@ -151,30 +151,26 @@ class Autopilot(ContinueBaseModel):
self._highlighted_ranges[0].editing = True
async def handle_highlighted_code(self, range_in_files: List[RangeInFileWithContents]):
-
- # If un-highlighting, then remove the range
- if len(self._highlighted_ranges) == 1 and len(range_in_files) <= 1 and (len(range_in_files) == 0 or range_in_files[0].range.start == range_in_files[0].range.end) and not self._adding_highlighted_code:
- self._highlighted_ranges = []
- await self.update_subscribers()
- return
-
- # If not toggled to be adding context, only edit or add the first range
- if not self._adding_highlighted_code and len(self._highlighted_ranges) > 0:
- if len(range_in_files) == 0:
- return
- if range_in_files[0].range.overlaps_with(self._highlighted_ranges[0].range) and range_in_files[0].filepath == self._highlighted_ranges[0].range.filepath:
- self._highlighted_ranges = [HighlightedRangeContext(
- range=range_in_files[0].range, editing=True, pinned=False)]
- await self.update_subscribers()
- return
-
# Filter out rifs from ~/.continue/diffs folder
range_in_files = [
rif for rif in range_in_files if not os.path.dirname(rif.filepath) == os.path.expanduser("~/.continue/diffs")]
+ # Make sure all filepaths are relative to workspace
workspace_path = self.continue_sdk.ide.workspace_directory
- for rif in range_in_files:
- rif.filepath = os.path.basename(rif.filepath)
+
+ # If not adding highlighted code
+ if not self._adding_highlighted_code:
+ if len(self._highlighted_ranges) == 1 and len(range_in_files) <= 1 and (len(range_in_files) == 0 or range_in_files[0].range.start == range_in_files[0].range.end):
+ # If un-highlighting the range to edit, then remove the range
+ self._highlighted_ranges = []
+ await self.update_subscribers()
+ elif len(range_in_files) > 0:
+ # Otherwise, replace the current range with the new one
+ # This is the first range to be highlighted
+ self._highlighted_ranges = [HighlightedRangeContext(
+ range=range_in_files[0], editing=True, pinned=False, display_name=os.path.basename(range_in_files[0].filepath))]
+ await self.update_subscribers()
+ return
# If current range overlaps with any others, delete them and only keep the new range
new_ranges = []
@@ -195,7 +191,7 @@ class Autopilot(ContinueBaseModel):
new_ranges.append(rif)
self._highlighted_ranges = new_ranges + [HighlightedRangeContext(
- range=rif, editing=False, pinned=False
+ range=rif, editing=False, pinned=False, display_name=os.path.basename(rif.filepath)
) for rif in range_in_files]
self._make_sure_is_editing_range()
diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py
index 4ea17f20..88690c83 100644
--- a/continuedev/src/continuedev/core/main.py
+++ b/continuedev/src/continuedev/core/main.py
@@ -205,6 +205,7 @@ class HighlightedRangeContext(ContinueBaseModel):
range: RangeInFileWithContents
editing: bool
pinned: bool
+ display_name: str
class FullState(ContinueBaseModel):
diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py
index ed670799..8649cd58 100644
--- a/continuedev/src/continuedev/core/sdk.py
+++ b/continuedev/src/continuedev/core/sdk.py
@@ -13,7 +13,7 @@ from ..libs.llm.hf_inference_api import HuggingFaceInferenceAPI
from ..libs.llm.openai import OpenAI
from .observation import Observation
from ..server.ide_protocol import AbstractIdeProtocolServer
-from .main import Context, ContinueCustomException, History, Step, ChatMessage, ChatMessageRole
+from .main import Context, ContinueCustomException, HighlightedRangeContext, History, Step, ChatMessage, ChatMessageRole
from ..steps.core.core import *
from ..libs.llm.proxy_server import ProxyServer
@@ -178,6 +178,11 @@ class ContinueSDK(AbstractContinueSDK):
else:
return load_global_config()
+ def get_code_context(self, only_editing: bool = False) -> List[RangeInFileWithContents]:
+ context = list(filter(lambda x: x.editing, self.__autopilot._highlighted_ranges)
+ ) if only_editing else self.__autopilot._highlighted_ranges
+ return [c.range for c in context]
+
def update_default_model(self, model: str):
config = self.config
config.default_model = model
diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py
index c4e4139f..f0877d90 100644
--- a/continuedev/src/continuedev/libs/llm/openai.py
+++ b/continuedev/src/continuedev/libs/llm/openai.py
@@ -37,7 +37,7 @@ class OpenAI(LLM):
if args["model"] in CHAT_MODELS:
async for chunk in await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], with_history, prompt, functions=None),
+ args["model"], with_history, args["max_tokens"], prompt, functions=None),
**args,
):
if "content" in chunk.choices[0].delta:
@@ -58,7 +58,7 @@ class OpenAI(LLM):
async for chunk in await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], messages, functions=args.get("functions", None)),
+ args["model"], messages, args["max_tokens"], functions=args.get("functions", None)),
**args,
):
yield chunk.choices[0].delta
@@ -69,7 +69,7 @@ class OpenAI(LLM):
if args["model"] in CHAT_MODELS:
resp = (await openai.ChatCompletion.acreate(
messages=compile_chat_messages(
- args["model"], with_history, prompt, functions=None),
+ args["model"], with_history, args["max_tokens"], prompt, functions=None),
**args,
)).choices[0].message.content
else:
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index 05ece394..eab6e441 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -38,7 +38,7 @@ class ProxyServer(LLM):
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/complete", json={
- "messages": compile_chat_messages(args["model"], with_history, prompt, functions=None),
+ "messages": compile_chat_messages(args["model"], with_history, args["max_tokens"], prompt, functions=None),
"unique_id": self.unique_id,
**args
}) as resp:
@@ -50,7 +50,7 @@ class ProxyServer(LLM):
async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]:
args = {**self.default_args, **kwargs}
messages = compile_chat_messages(
- self.default_model, messages, None, functions=args.get("functions", None))
+ self.default_model, messages, args["max_tokens"], None, functions=args.get("functions", None))
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_chat", json={
@@ -74,7 +74,7 @@ class ProxyServer(LLM):
async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]:
args = {**self.default_args, **kwargs}
messages = compile_chat_messages(
- self.default_model, with_history, prompt, functions=args.get("functions", None))
+ self.default_model, with_history, args["max_tokens"], prompt, functions=args.get("functions", None))
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_complete", json={
diff --git a/continuedev/src/continuedev/libs/util/count_tokens.py b/continuedev/src/continuedev/libs/util/count_tokens.py
index 8b06fef9..73be0717 100644
--- a/continuedev/src/continuedev/libs/util/count_tokens.py
+++ b/continuedev/src/continuedev/libs/util/count_tokens.py
@@ -76,14 +76,14 @@ def prune_chat_history(model: str, chat_history: List[ChatMessage], max_tokens:
return chat_history
-def compile_chat_messages(model: str, msgs: List[ChatMessage], prompt: Union[str, None] = None, functions: Union[List, None] = None, system_message: Union[str, None] = None) -> List[Dict]:
+def compile_chat_messages(model: str, msgs: List[ChatMessage], max_tokens: int, prompt: Union[str, None] = None, functions: Union[List, None] = None, system_message: Union[str, None] = None) -> List[Dict]:
prompt_tokens = count_tokens(model, prompt)
if functions is not None:
for function in functions:
prompt_tokens += count_tokens(model, json.dumps(function))
msgs = prune_chat_history(model,
- msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + DEFAULT_MAX_TOKENS + count_tokens(model, system_message))
+ msgs, MAX_TOKENS_FOR_MODEL[model], prompt_tokens + max_tokens + count_tokens(model, system_message))
history = []
if system_message:
history.append({
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 10853828..4b35a758 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -181,15 +181,22 @@ class DefaultModelEditCodeStep(Step):
# We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
model_to_use = sdk.models.gpt4
+ max_tokens = DEFAULT_MAX_TOKENS
- BUFFER_FOR_FUNCTIONS = 400
- total_tokens = model_to_use.count_tokens(
- full_file_contents + self._prompt + self.user_input) + BUFFER_FOR_FUNCTIONS + DEFAULT_MAX_TOKENS
-
- TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1000
+ TOKENS_TO_BE_CONSIDERED_LARGE_RANGE = 1200
if model_to_use.count_tokens(rif.contents) > TOKENS_TO_BE_CONSIDERED_LARGE_RANGE:
self.description += "\n\n**It looks like you've selected a large range to edit, which may take a while to complete. If you'd like to cancel, click the 'X' button above. If you highlight a more specific range, Continue will only edit within it.**"
+ # At this point, we also increase the max_tokens parameter so it doesn't stop in the middle of generation
+ # Increase max_tokens to be double the size of the range
+ # But don't exceed twice default max tokens
+ max_tokens = int(min(model_to_use.count_tokens(
+ rif.contents), DEFAULT_MAX_TOKENS) * 2.5)
+
+ BUFFER_FOR_FUNCTIONS = 400
+ total_tokens = model_to_use.count_tokens(
+ full_file_contents + self._prompt + self.user_input) + BUFFER_FOR_FUNCTIONS + max_tokens
+
# If using 3.5 and overflows, upgrade to 3.5.16k
if model_to_use.name == "gpt-3.5-turbo":
if total_tokens > MAX_TOKENS_FOR_MODEL["gpt-3.5-turbo"]:
@@ -252,7 +259,7 @@ class DefaultModelEditCodeStep(Step):
file_suffix = "\n" + file_suffix
rif.contents = rif.contents[:-1]
- return file_prefix, rif.contents, file_suffix, model_to_use
+ return file_prefix, rif.contents, file_suffix, model_to_use, max_tokens
def compile_prompt(self, file_prefix: str, contents: str, file_suffix: str, sdk: ContinueSDK) -> str:
prompt = self._prompt
@@ -289,7 +296,7 @@ class DefaultModelEditCodeStep(Step):
await sdk.ide.saveFile(rif.filepath)
full_file_contents = await sdk.ide.readFile(rif.filepath)
- file_prefix, contents, file_suffix, model_to_use = await self.get_prompt_parts(
+ file_prefix, contents, file_suffix, model_to_use, max_tokens = await self.get_prompt_parts(
rif, sdk, full_file_contents)
contents, common_whitespace = dedent_and_get_common_whitespace(
contents)
@@ -435,7 +442,7 @@ class DefaultModelEditCodeStep(Step):
completion_lines_covered = 0
repeating_file_suffix = False
line_below_highlighted_range = file_suffix.lstrip().split("\n")[0]
- async for chunk in model_to_use.stream_chat(messages, temperature=0):
+ async for chunk in model_to_use.stream_chat(messages, temperature=0, max_tokens=max_tokens):
# Stop early if it is repeating the file_suffix or the step was deleted
if repeating_file_suffix:
break
diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py
index 5ccffbfe..4f543022 100644
--- a/continuedev/src/continuedev/steps/main.py
+++ b/continuedev/src/continuedev/steps/main.py
@@ -97,7 +97,7 @@ class FasterEditHighlightedCodeStep(Step):
return "Editing highlighted code"
async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
- range_in_files = await sdk.ide.getHighlightedCode()
+ range_in_files = await sdk.get_code_context(only_editing=True)
if len(range_in_files) == 0:
# Get the full contents of all open files
files = await sdk.ide.getOpenFiles()
@@ -105,21 +105,16 @@ class FasterEditHighlightedCodeStep(Step):
for file in files:
contents[file] = await sdk.ide.readFile(file)
- range_in_files = [RangeInFile.from_entire_file(
+ range_in_files = [RangeInFileWithContents.from_entire_file(
filepath, content) for filepath, content in contents.items()]
- rif_with_contents = []
- for range_in_file in range_in_files:
- file_contents = await sdk.ide.readRangeInFile(range_in_file)
- rif_with_contents.append(
- RangeInFileWithContents.from_range_in_file(range_in_file, file_contents))
- enc_dec = MarkdownStyleEncoderDecoder(rif_with_contents)
+ enc_dec = MarkdownStyleEncoderDecoder(range_in_files)
code_string = enc_dec.encode()
prompt = self._prompt.format(
code=code_string, user_input=self.user_input)
rif_dict = {}
- for rif in rif_with_contents:
+ for rif in range_in_files:
rif_dict[rif.filepath] = rif.contents
completion = await sdk.models.gpt35.complete(prompt)
@@ -193,7 +188,7 @@ class StarCoderEditHighlightedCodeStep(Step):
return await models.gpt35.complete(f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points:")
async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
- range_in_files = await sdk.ide.getHighlightedCode()
+ range_in_files = await sdk.get_code_context(only_editing=True)
found_highlighted_code = len(range_in_files) > 0
if not found_highlighted_code:
# Get the full contents of all open files
@@ -202,20 +197,14 @@ class StarCoderEditHighlightedCodeStep(Step):
for file in files:
contents[file] = await sdk.ide.readFile(file)
- range_in_files = [RangeInFile.from_entire_file(
+ range_in_files = [RangeInFileWithContents.from_entire_file(
filepath, content) for filepath, content in contents.items()]
- rif_with_contents = []
- for range_in_file in range_in_files:
- file_contents = await sdk.ide.readRangeInFile(range_in_file)
- rif_with_contents.append(
- RangeInFileWithContents.from_range_in_file(range_in_file, file_contents))
-
rif_dict = {}
- for rif in rif_with_contents:
+ for rif in range_in_files:
rif_dict[rif.filepath] = rif.contents
- for rif in rif_with_contents:
+ for rif in range_in_files:
prompt = self._prompt.format(
code=rif.contents, user_request=self.user_input)
@@ -255,7 +244,18 @@ class EditHighlightedCodeStep(Step):
return "Editing code"
async def run(self, sdk: ContinueSDK) -> Coroutine[Observation, None, None]:
- range_in_files = await sdk.ide.getHighlightedCode()
+ range_in_files = sdk.get_code_context(only_editing=True)
+
+ # If nothing highlighted, insert at the cursor if possible
+ if len(range_in_files) == 0:
+ highlighted_code = await sdk.ide.getHighlightedCode()
+ if highlighted_code is not None:
+ for rif in highlighted_code:
+ if rif.range.start == rif.range.end:
+ range_in_files.append(
+ RangeInFileWithContents.from_range_in_file(rif, ""))
+
+ # If nothing highlighted, edit the first open file
if len(range_in_files) == 0:
# Get the full contents of all open files
files = await sdk.ide.getOpenFiles()
@@ -263,7 +263,7 @@ class EditHighlightedCodeStep(Step):
for file in files:
contents[file] = await sdk.ide.readFile(file)
- range_in_files = [RangeInFile.from_entire_file(
+ range_in_files = [RangeInFileWithContents.from_entire_file(
filepath, content) for filepath, content in contents.items()]
# If still no highlighted code, create a new file and edit there
@@ -271,7 +271,12 @@ class EditHighlightedCodeStep(Step):
# Create a new file
new_file_path = "new_file.txt"
await sdk.add_file(new_file_path, "")
- range_in_files = [RangeInFile.from_entire_file(new_file_path, "")]
+ range_in_files = [
+ RangeInFileWithContents.from_entire_file(new_file_path, "")]
+
+ range_in_files = list(map(lambda x: RangeInFile(
+ filepath=x.filepath, range=x.range
+ ), range_in_files))
await sdk.run_step(DefaultModelEditCodeStep(user_input=self.user_input, range_in_files=range_in_files))
diff --git a/extension/package-lock.json b/extension/package-lock.json
index 043f0892..5733c2dd 100644
--- a/extension/package-lock.json
+++ b/extension/package-lock.json
@@ -1,12 +1,12 @@
{
"name": "continue",
- "version": "0.0.125",
+ "version": "0.0.143",
"lockfileVersion": 2,
"requires": true,
"packages": {
"": {
"name": "continue",
- "version": "0.0.125",
+ "version": "0.0.143",
"license": "Apache-2.0",
"dependencies": {
"@electron/rebuild": "^3.2.10",
@@ -21,6 +21,7 @@
"downshift": "^7.6.0",
"fkill": "^8.1.0",
"highlight.js": "^11.7.0",
+ "highlightable": "^1.3.0-beta.0",
"posthog-js": "^1.68.3",
"react-markdown": "^8.0.7",
"react-redux": "^8.0.5",
@@ -5809,6 +5810,22 @@
"node": ">=12.0.0"
}
},
+ "node_modules/highlightable": {
+ "version": "1.3.0-beta.0",
+ "resolved": "https://registry.npmjs.org/highlightable/-/highlightable-1.3.0-beta.0.tgz",
+ "integrity": "sha512-pDgzsLBus8oscL7KOqZ5rVMXgCnEmoig+G5gWoDczMm1SFDsNLzRwmNjkJoKdiA3q0oW/f+VqZ3W2YzG5lXQYA==",
+ "dependencies": {
+ "@types/react": "^18.0.28",
+ "emoji-regex": "^10.2.1",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0"
+ }
+ },
+ "node_modules/highlightable/node_modules/emoji-regex": {
+ "version": "10.2.1",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.2.1.tgz",
+ "integrity": "sha512-97g6QgOk8zlDRdgq1WxwgTMgEWGVAQvB5Fdpgc1MkNy56la5SKP9GsMXKDOdqwn90/41a8yPwIGk1Y6WVbeMQA=="
+ },
"node_modules/hoist-non-react-statics": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
@@ -9424,7 +9441,6 @@
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
- "peer": true,
"dependencies": {
"loose-envify": "^1.1.0"
},
@@ -9436,7 +9452,6 @@
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
"integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
- "peer": true,
"dependencies": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.0"
@@ -9889,7 +9904,6 @@
"version": "0.23.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
"integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
- "peer": true,
"dependencies": {
"loose-envify": "^1.1.0"
}
@@ -15741,6 +15755,24 @@
"resolved": "https://registry.npmjs.org/highlight.js/-/highlight.js-11.7.0.tgz",
"integrity": "sha512-1rRqesRFhMO/PRF+G86evnyJkCgaZFOI+Z6kdj15TA18funfoqJXvgPCLSf0SWq3SRfg1j3HlDs8o4s3EGq1oQ=="
},
+ "highlightable": {
+ "version": "1.3.0-beta.0",
+ "resolved": "https://registry.npmjs.org/highlightable/-/highlightable-1.3.0-beta.0.tgz",
+ "integrity": "sha512-pDgzsLBus8oscL7KOqZ5rVMXgCnEmoig+G5gWoDczMm1SFDsNLzRwmNjkJoKdiA3q0oW/f+VqZ3W2YzG5lXQYA==",
+ "requires": {
+ "@types/react": "^18.0.28",
+ "emoji-regex": "^10.2.1",
+ "react": "^18.2.0",
+ "react-dom": "^18.2.0"
+ },
+ "dependencies": {
+ "emoji-regex": {
+ "version": "10.2.1",
+ "resolved": "https://registry.npmjs.org/emoji-regex/-/emoji-regex-10.2.1.tgz",
+ "integrity": "sha512-97g6QgOk8zlDRdgq1WxwgTMgEWGVAQvB5Fdpgc1MkNy56la5SKP9GsMXKDOdqwn90/41a8yPwIGk1Y6WVbeMQA=="
+ }
+ }
+ },
"hoist-non-react-statics": {
"version": "3.3.2",
"resolved": "https://registry.npmjs.org/hoist-non-react-statics/-/hoist-non-react-statics-3.3.2.tgz",
@@ -18297,7 +18329,6 @@
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react/-/react-18.2.0.tgz",
"integrity": "sha512-/3IjMdb2L9QbBdWiW5e3P2/npwMBaU9mHCSCUzNln0ZCYbcfTsGbTJrU/kGemdH2IWmB2ioZ+zkxtmq6g09fGQ==",
- "peer": true,
"requires": {
"loose-envify": "^1.1.0"
}
@@ -18306,7 +18337,6 @@
"version": "18.2.0",
"resolved": "https://registry.npmjs.org/react-dom/-/react-dom-18.2.0.tgz",
"integrity": "sha512-6IMTriUmvsjHUjNtEDudZfuDQUoWXVxKHhlEGSk81n4YFS+r/Kl99wXiwlVXtPBtJenozv2P+hxDsw9eA7Xo6g==",
- "peer": true,
"requires": {
"loose-envify": "^1.1.0",
"scheduler": "^0.23.0"
@@ -18622,7 +18652,6 @@
"version": "0.23.0",
"resolved": "https://registry.npmjs.org/scheduler/-/scheduler-0.23.0.tgz",
"integrity": "sha512-CtuThmgHNg7zIZWAXi3AsyIzA3n4xx7aNyjwC2VJldO2LMVDhFK+63xGqq6CsJH4rTAt6/M+N4GhZiDYPx9eUw==",
- "peer": true,
"requires": {
"loose-envify": "^1.1.0"
}
diff --git a/extension/package.json b/extension/package.json
index 1d4b8055..444372f8 100644
--- a/extension/package.json
+++ b/extension/package.json
@@ -14,7 +14,7 @@
"displayName": "Continue",
"pricing": "Free",
"description": "The open-source coding autopilot",
- "version": "0.0.125",
+ "version": "0.0.143",
"publisher": "Continue",
"engines": {
"vscode": "^1.67.0"
@@ -269,6 +269,7 @@
"downshift": "^7.6.0",
"fkill": "^8.1.0",
"highlight.js": "^11.7.0",
+ "highlightable": "^1.3.0-beta.0",
"posthog-js": "^1.68.3",
"react-markdown": "^8.0.7",
"react-redux": "^8.0.5",
diff --git a/extension/react-app/src/components/ComboBox.tsx b/extension/react-app/src/components/ComboBox.tsx
index e6632360..801c3a03 100644
--- a/extension/react-app/src/components/ComboBox.tsx
+++ b/extension/react-app/src/components/ComboBox.tsx
@@ -224,8 +224,8 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
editing={section.editing}
pinned={section.pinned}
index={idx}
- key={`${section.filepath}${idx}`}
- title={`${section.range.filepath} (${
+ key={`${section.display_name}${idx}`}
+ title={`${section.display_name} (${
section.range.range.start.line + 1
}-${section.range.range.end.line + 1})`}
onDelete={() => {
@@ -372,7 +372,7 @@ const ComboBox = React.forwardRef((props: ComboBoxProps, ref) => {
>
{highlightedCodeSections.map((section, idx) => (
<>
- <p>{section.range.filepath}</p>
+ <p>{section.display_name}</p>
<CodeBlock showCopy={false} key={idx}>
{section.range.contents}
</CodeBlock>
diff --git a/extension/schema/FullState.d.ts b/extension/schema/FullState.d.ts
index 981e772e..abb0832d 100644
--- a/extension/schema/FullState.d.ts
+++ b/extension/schema/FullState.d.ts
@@ -32,6 +32,7 @@ export type Character = number;
export type Contents = string;
export type Editing = boolean;
export type Pinned = boolean;
+export type DisplayName = string;
export type HighlightedRanges = HighlightedRangeContext[];
export type Name3 = string;
export type Description1 = string;
@@ -102,6 +103,7 @@ export interface HighlightedRangeContext {
range: RangeInFileWithContents;
editing: Editing;
pinned: Pinned;
+ display_name: DisplayName;
[k: string]: unknown;
}
/**
diff --git a/extension/src/activation/environmentSetup.ts b/extension/src/activation/environmentSetup.ts
index bbf93f65..714080e3 100644
--- a/extension/src/activation/environmentSetup.ts
+++ b/extension/src/activation/environmentSetup.ts
@@ -189,32 +189,46 @@ async function setupPythonEnv() {
`${pythonCmd} -m venv env`,
].join(" ; ");
- // Repeat until it is successfully created (sometimes it fails to generate the bin, need to try again)
- while (true) {
- const [, stderr] = await runCommand(createEnvCommand);
- if (checkEnvExists()) {
- break;
- } else if (stderr) {
- if (stderr.includes("running scripts is disabled on this system")) {
- vscode.window.showErrorMessage(
- "A Python virtual enviroment cannot be activated because running scripts is disabled for this user. Please enable signed scripts to run with this command in PowerShell: `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`, reload VS Code, and then try again."
- );
- }
+ const [stdout, stderr] = await runCommand(createEnvCommand);
+ if (
+ stderr &&
+ stderr.includes("running scripts is disabled on this system")
+ ) {
+ await vscode.window.showErrorMessage(
+ "A Python virtual enviroment cannot be activated because running scripts is disabled for this user. Please enable signed scripts to run with this command in PowerShell: `Set-ExecutionPolicy -ExecutionPolicy RemoteSigned -Scope CurrentUser`, reload VS Code, and then try again."
+ );
+ throw new Error(stderr);
+ } else if (
+ stderr?.includes("On Debian/Ubuntu systems") ||
+ stdout?.includes("On Debian/Ubuntu systems")
+ ) {
+ // First, try to run the command to install python3-venv
+ let [stdout, stderr] = await runCommand(`${pythonCmd} --version`);
+ if (stderr) {
throw new Error(stderr);
- } else {
- // Remove the env and try again
- const removeCommand = `rm -rf "${path.join(
- getExtensionUri().fsPath,
- "scripts",
- "env"
- )}"`;
- await runCommand(removeCommand);
}
+ const version = stdout.split(" ")[1].split(".")[1];
+ const installVenvCommand = `apt-get install python3.${version}-venv`;
+ await runCommand("apt-get update");
+ // Ask the user to run the command to install python3-venv (requires sudo, so we can't)
+ // First, get the python version
+ const msg = `[Important] Continue needs to create a Python virtual environment, but python3.${version}-venv is not installed. Please run this command in your terminal: \`${installVenvCommand}\`, reload VS Code, and then try again.`;
+ console.log(msg);
+ await vscode.window.showErrorMessage(msg);
+ } else if (checkEnvExists()) {
+ console.log(
+ "Successfully set up python env at ",
+ getExtensionUri().fsPath + "/scripts/env"
+ );
+ } else {
+ const msg = [
+ "Python environment not successfully created. Trying again. Here was the stdout + stderr: ",
+ `stdout: ${stdout}`,
+ `stderr: ${stderr}`,
+ ].join("\n\n");
+ console.log(msg);
+ throw new Error(msg);
}
- console.log(
- "Successfully set up python env at ",
- getExtensionUri().fsPath + "/scripts/env"
- );
}
await retryThenFail(async () => {
@@ -310,7 +324,14 @@ export async function startContinuePythonServer() {
}
}
console.log("Killing old server...");
- await fkill(":65432");
+ try {
+ await fkill(":65432");
+ } catch (e) {
+ console.log(
+ "Failed to kill old server, likely because it didn't exist:",
+ e
+ );
+ }
}
// Do this after above check so we don't have to waste time setting up the env
diff --git a/extension/src/diffs.ts b/extension/src/diffs.ts
index dbfd8f59..b9ef8384 100644
--- a/extension/src/diffs.ts
+++ b/extension/src/diffs.ts
@@ -39,14 +39,25 @@ class DiffManager {
originalFilepath: string,
newFilepath: string
): vscode.TextEditor | undefined {
- // If the file doesn't yet exist, don't open the diff editor
- if (!fs.existsSync(newFilepath)) {
+ // If the file doesn't yet exist or the basename is a single digit number (git hash object or something), don't open the diff editor
+ if (
+ !fs.existsSync(newFilepath) ||
+ path.basename(originalFilepath).match(/^\d$/)
+ ) {
return undefined;
}
const rightUri = vscode.Uri.parse(newFilepath);
const leftUri = vscode.Uri.file(originalFilepath);
const title = "Continue Diff";
+ console.log(
+ "Opening diff window with ",
+ leftUri,
+ rightUri,
+ title,
+ newFilepath,
+ originalFilepath
+ );
vscode.commands.executeCommand("vscode.diff", leftUri, rightUri, title);
const editor = vscode.window.activeTextEditor;
@@ -112,11 +123,13 @@ class DiffManager {
newFilepath = Array.from(this.diffs.keys())[0];
}
if (!newFilepath) {
+ console.log("No newFilepath provided to accept the diff");
return;
}
// Get the diff info, copy new file to original, then delete from record and close the corresponding editor
const diffInfo = this.diffs.get(newFilepath);
if (!diffInfo) {
+ console.log("No corresponding diffInfo found for newFilepath");
return;
}
fs.writeFileSync(
@@ -132,10 +145,12 @@ class DiffManager {
newFilepath = Array.from(this.diffs.keys())[0];
}
if (!newFilepath) {
+ console.log("No newFilepath provided to reject the diff");
return;
}
const diffInfo = this.diffs.get(newFilepath);
if (!diffInfo) {
+ console.log("No corresponding diffInfo found for newFilepath");
return;
}
diff --git a/schema/json/FullState.json b/schema/json/FullState.json
index af0f25e1..5a7e9d10 100644
--- a/schema/json/FullState.json
+++ b/schema/json/FullState.json
@@ -222,12 +222,17 @@
"pinned": {
"title": "Pinned",
"type": "boolean"
+ },
+ "display_name": {
+ "title": "Display Name",
+ "type": "string"
}
},
"required": [
"range",
"editing",
- "pinned"
+ "pinned",
+ "display_name"
]
},
"SlashCommandDescription": {