summaryrefslogtreecommitdiff
path: root/continuedev/src
diff options
context:
space:
mode:
Diffstat (limited to 'continuedev/src')
-rw-r--r--continuedev/src/continuedev/core/sdk.py2
-rw-r--r--continuedev/src/continuedev/libs/llm/proxy_server.py11
-rw-r--r--continuedev/src/continuedev/libs/util/dedent.py8
-rw-r--r--continuedev/src/continuedev/server/ide.py9
-rw-r--r--continuedev/src/continuedev/server/ide_protocol.py2
-rw-r--r--continuedev/src/continuedev/steps/chat.py26
-rw-r--r--continuedev/src/continuedev/steps/core/core.py43
-rw-r--r--continuedev/src/continuedev/steps/main.py7
8 files changed, 83 insertions, 25 deletions
diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py
index 632f8683..50a14bed 100644
--- a/continuedev/src/continuedev/core/sdk.py
+++ b/continuedev/src/continuedev/core/sdk.py
@@ -198,7 +198,7 @@ class ContinueSDK(AbstractContinueSDK):
# Don't insert after latest user message or function call
i = -1
- if history_context[i].role == "user" or history_context[i].role == "function":
+ if len(history_context) > 0 and (history_context[i].role == "user" or history_context[i].role == "function"):
i -= 1
history_context.insert(i, msg)
diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py
index bd831ad9..69c96ee8 100644
--- a/continuedev/src/continuedev/libs/llm/proxy_server.py
+++ b/continuedev/src/continuedev/libs/llm/proxy_server.py
@@ -5,6 +5,11 @@ import aiohttp
from ...core.main import ChatMessage
from ..llm import LLM
from ..util.count_tokens import DEFAULT_ARGS, DEFAULT_MAX_TOKENS, compile_chat_messages, CHAT_MODELS, count_tokens
+import certifi
+import ssl
+
+ca_bundle_path = certifi.where()
+ssl_context = ssl.create_default_context(cafile=ca_bundle_path)
# SERVER_URL = "http://127.0.0.1:8080"
SERVER_URL = "https://proxy-server-l6vsfbzhba-uw.a.run.app"
@@ -31,7 +36,7 @@ class ProxyServer(LLM):
async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]:
args = self.default_args | kwargs
- async with aiohttp.ClientSession() as session:
+ async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/complete", json={
"messages": compile_chat_messages(args["model"], with_history, prompt, functions=None),
"unique_id": self.unique_id,
@@ -47,7 +52,7 @@ class ProxyServer(LLM):
messages = compile_chat_messages(
self.default_model, messages, None, functions=args.get("functions", None))
- async with aiohttp.ClientSession() as session:
+ async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_chat", json={
"messages": messages,
"unique_id": self.unique_id,
@@ -71,7 +76,7 @@ class ProxyServer(LLM):
messages = compile_chat_messages(
self.default_model, with_history, prompt, functions=args.get("functions", None))
- async with aiohttp.ClientSession() as session:
+ async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:
async with session.post(f"{SERVER_URL}/stream_complete", json={
"messages": messages,
"unique_id": self.unique_id,
diff --git a/continuedev/src/continuedev/libs/util/dedent.py b/continuedev/src/continuedev/libs/util/dedent.py
index 74edd173..87876d4b 100644
--- a/continuedev/src/continuedev/libs/util/dedent.py
+++ b/continuedev/src/continuedev/libs/util/dedent.py
@@ -3,11 +3,19 @@ from typing import Tuple
def dedent_and_get_common_whitespace(s: str) -> Tuple[str, str]:
lines = s.splitlines()
+ if len(lines) == 0:
+ return "", ""
# Longest common whitespace prefix
lcp = lines[0].split(lines[0].strip())[0]
+ # Iterate through the lines
for i in range(1, len(lines)):
+ # Empty lines are wildcards
+ if lines[i].strip() == "":
+ continue
+ # Iterate through the leading whitespace characters of the current line
for j in range(0, len(lcp)):
+ # If it doesn't have the same whitespace as lcp, then update lcp
if j >= len(lines[i]) or lcp[j] != lines[i][j]:
lcp = lcp[:j]
if lcp == "":
diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py
index f3deecdb..e2685493 100644
--- a/continuedev/src/continuedev/server/ide.py
+++ b/continuedev/src/continuedev/server/ide.py
@@ -7,6 +7,7 @@ import uuid
from fastapi import WebSocket, Body, APIRouter
from uvicorn.main import Server
+from ..libs.util.telemetry import capture_event
from ..libs.util.queue import AsyncSubscriptionQueue
from ..models.filesystem import FileSystem, RangeInFile, EditDiff, RangeInFileWithContents, RealFileSystem
from ..models.filesystem_edit import AddDirectory, AddFile, DeleteDirectory, DeleteFile, FileSystemEdit, FileEdit, FileEditWithFullContents, RenameDirectory, RenameFile, SequentialFileSystemEdit
@@ -145,6 +146,8 @@ class IdeProtocolServer(AbstractIdeProtocolServer):
elif message_type == "commandOutput":
output = data["output"]
self.onCommandOutput(output)
+ elif message_type == "acceptRejectSuggestion":
+ self.onAcceptRejectSuggestion(data["accepted"])
elif message_type in ["highlightedCode", "openFiles", "readFile", "editFile", "workspaceDirectory", "getUserSecret", "runCommand", "uniqueId"]:
self.sub_queue.post(message_type, data)
else:
@@ -205,8 +208,10 @@ class IdeProtocolServer(AbstractIdeProtocolServer):
# This is where you might have triggers: plugins can subscribe to certian events
# like file changes, tracebacks, etc...
- def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool):
- pass
+ def onAcceptRejectSuggestion(self, accepted: bool):
+ capture_event(self.unique_id, "accept_reject_suggestion", {
+ "accepted": accepted
+ })
def onFileSystemUpdate(self, update: FileSystemEdit):
# Access to Autopilot (so SessionManager)
diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py
index 17a09c3d..de2eea27 100644
--- a/continuedev/src/continuedev/server/ide_protocol.py
+++ b/continuedev/src/continuedev/server/ide_protocol.py
@@ -36,7 +36,7 @@ class AbstractIdeProtocolServer(ABC):
"""Show suggestions to the user and wait for a response"""
@abstractmethod
- def onAcceptRejectSuggestion(self, suggestionId: str, accepted: bool):
+ def onAcceptRejectSuggestion(self, accepted: bool):
"""Called when the user accepts or rejects a suggestion"""
@abstractmethod
diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py
index b10ec3d7..9d556655 100644
--- a/continuedev/src/continuedev/steps/chat.py
+++ b/continuedev/src/continuedev/steps/chat.py
@@ -21,6 +21,7 @@ openai.api_key = OPENAI_API_KEY
class SimpleChatStep(Step):
user_input: str
name: str = "Chat"
+ manage_own_chat_context: bool = True
async def run(self, sdk: ContinueSDK):
self.description = f"`{self.user_input}`\n\n"
@@ -29,16 +30,35 @@ class SimpleChatStep(Step):
self.description = ""
await sdk.update_ui()
- async for chunk in sdk.models.default.stream_complete(self.user_input, with_history=await sdk.get_chat_context()):
+ messages = await sdk.get_chat_context()
+ messages.append(ChatMessage(
+ role="user",
+ content=self.user_input,
+ summary=self.user_input
+ ))
+
+ completion = ""
+ async for chunk in sdk.models.gpt4.stream_chat(messages):
if sdk.current_step_was_deleted():
return
- self.description += chunk
- await sdk.update_ui()
+ if "content" in chunk:
+ self.description += chunk["content"]
+ completion += chunk["content"]
+ await sdk.update_ui()
self.name = (await sdk.models.gpt35.complete(
f"Write a short title for the following chat message: {self.description}")).strip()
+ if self.name.startswith('"') and self.name.endswith('"'):
+ self.name = self.name[1:-1]
+
+ self.chat_context.append(ChatMessage(
+ role="assistant",
+ content=completion,
+ summary=self.name
+ ))
+
class AddFileStep(Step):
name: str = "Add File"
diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py
index 729f5e66..4eb2445c 100644
--- a/continuedev/src/continuedev/steps/core/core.py
+++ b/continuedev/src/continuedev/steps/core/core.py
@@ -154,25 +154,32 @@ class DefaultModelEditCodeStep(Step):
_prompt_and_completion: str = ""
- async def describe(self, models: Models) -> Coroutine[str, None, None]:
- description = await models.gpt3516k.complete(
- f"{self._prompt_and_completion}\n\nPlease give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:")
- self.name = await models.gpt3516k.complete(f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:")
+ def _cleanup_output(self, output: str) -> str:
+ output = output.replace('\\"', '"')
+ output = output.replace("\\'", "'")
+ output = output.replace("\\n", "\n")
+ output = output.replace("\\t", "\t")
+ output = output.replace("\\\\", "\\")
+ if output.startswith('"') and output.endswith('"'):
+ output = output[1:-1]
- # Remove quotes from title and description if they are wrapped
- if description.startswith('"') and description.endswith('"'):
- description = description[1:-1]
+ return output
- if self.name.startswith('"') and self.name.endswith('"'):
- self.name = self.name[1:-1]
+ async def describe(self, models: Models) -> Coroutine[str, None, None]:
+ description = await models.gpt3516k.complete(dedent(f"""\
+ {self._prompt_and_completion}
+
+ Please give brief a description of the changes made above using markdown bullet points. Be concise and only mention changes made to the commit before, not prefix or suffix:"""))
+ name = await models.gpt3516k.complete(f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:")
+ self.name = self._cleanup_output(name)
- return f"`{self.user_input}`\n\n" + description
+ return f"`{self.user_input}`\n\n{self._cleanup_output(description)}"
async def get_prompt_parts(self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str):
# We don't know here all of the functions being passed in.
# We care because if this prompt itself goes over the limit, then the entire message will have to be cut from the completion.
# Overflow won't happen, but prune_chat_messages in count_tokens.py will cut out this whole thing, instead of us cutting out only as many lines as we need.
- model_to_use = sdk.models.default
+ model_to_use = sdk.models.gpt4
BUFFER_FOR_FUNCTIONS = 400
total_tokens = model_to_use.count_tokens(
@@ -360,7 +367,7 @@ class DefaultModelEditCodeStep(Step):
# Insert the suggestion
replacement = "\n".join(current_block_lines)
- start_line = current_block_start + 1
+ start_line = current_block_start
end_line = current_block_start + index_of_last_line_in_block
await sdk.ide.showSuggestion(FileEdit(
filepath=rif.filepath,
@@ -368,10 +375,9 @@ class DefaultModelEditCodeStep(Step):
start_line, 0, end_line, 0),
replacement=replacement
))
- if replacement == "":
- current_line_in_file += 1
# Reset current block / update variables
+ current_line_in_file += 1
offset_from_blocks += len(current_block_lines)
original_lines_below_previous_blocks = original_lines_below_previous_blocks[
index_of_last_line_in_block + 1:]
@@ -493,7 +499,7 @@ class DefaultModelEditCodeStep(Step):
await sdk.ide.showSuggestion(FileEdit(
filepath=rif.filepath,
range=Range.from_shorthand(
- current_block_start + 1, 0, current_block_start + len(original_lines_below_previous_blocks), 0),
+ current_block_start, 0, current_block_start + len(original_lines_below_previous_blocks), 0),
replacement="\n".join(current_block_lines)
))
@@ -585,10 +591,17 @@ class UserInputStep(Step):
name: str = "User Input"
hide: bool = True
+ manage_own_chat_context: bool = True
+
async def describe(self, models: Models) -> Coroutine[str, None, None]:
return self.user_input
async def run(self, sdk: ContinueSDK) -> Coroutine[UserInputObservation, None, None]:
+ self.chat_context.append(ChatMessage(
+ role="user",
+ content=self.user_input,
+ summary=self.user_input
+ ))
return UserInputObservation(user_input=self.user_input)
diff --git a/continuedev/src/continuedev/steps/main.py b/continuedev/src/continuedev/steps/main.py
index def1af4e..3cf78c40 100644
--- a/continuedev/src/continuedev/steps/main.py
+++ b/continuedev/src/continuedev/steps/main.py
@@ -266,6 +266,13 @@ class EditHighlightedCodeStep(Step):
range_in_files = [RangeInFile.from_entire_file(
filepath, content) for filepath, content in contents.items()]
+ # If still no highlighted code, create a new file and edit there
+ if len(range_in_files) == 0:
+ # Create a new file
+ new_file_path = "new_file.txt"
+ await sdk.add_file(new_file_path)
+ range_in_files = [RangeInFile.from_entire_file(new_file_path, "")]
+
await sdk.run_step(DefaultModelEditCodeStep(user_input=self.user_input, range_in_files=range_in_files))