diff options
| author | Ty Dunn <ty@tydunn.com> | 2023-07-04 21:37:39 -0700 | 
|---|---|---|
| committer | Ty Dunn <ty@tydunn.com> | 2023-07-04 21:37:39 -0700 | 
| commit | 26f147d2fdd5412117445c97a937c49b064da082 (patch) | |
| tree | 0dd728f2ce806d0dba2dc15a65a26f2c58562a74 /continuedev/src | |
| parent | d802dae10b26f59893380829feea3104b0650e14 (diff) | |
| parent | 2d8c28965684d03ef711253e5555ef304882828f (diff) | |
| download | sncontinue-26f147d2fdd5412117445c97a937c49b064da082.tar.gz sncontinue-26f147d2fdd5412117445c97a937c49b064da082.tar.bz2 sncontinue-26f147d2fdd5412117445c97a937c49b064da082.zip | |
Merge branch 'main' into stop
Diffstat (limited to 'continuedev/src')
| -rw-r--r-- | continuedev/src/continuedev/core/autopilot.py | 19 | ||||
| -rw-r--r-- | continuedev/src/continuedev/core/main.py | 7 | ||||
| -rw-r--r-- | continuedev/src/continuedev/core/sdk.py | 13 | ||||
| -rw-r--r-- | continuedev/src/continuedev/libs/llm/hugging_face.py | 11 | ||||
| -rw-r--r-- | continuedev/src/continuedev/libs/llm/openai.py | 10 | ||||
| -rw-r--r-- | continuedev/src/continuedev/libs/llm/proxy_server.py | 8 | ||||
| -rw-r--r-- | continuedev/src/continuedev/server/gui.py | 10 | ||||
| -rw-r--r-- | continuedev/src/continuedev/server/ide.py | 6 | ||||
| -rw-r--r-- | continuedev/src/continuedev/server/ide_protocol.py | 4 | ||||
| -rw-r--r-- | continuedev/src/continuedev/steps/chat.py | 1 | ||||
| -rw-r--r-- | continuedev/src/continuedev/steps/core/core.py | 2 | ||||
| -rw-r--r-- | continuedev/src/continuedev/steps/search_directory.py | 4 | 
12 files changed, 65 insertions, 30 deletions
| diff --git a/continuedev/src/continuedev/core/autopilot.py b/continuedev/src/continuedev/core/autopilot.py index 5193a02b..313ceded 100644 --- a/continuedev/src/continuedev/core/autopilot.py +++ b/continuedev/src/continuedev/core/autopilot.py @@ -144,9 +144,7 @@ class Autopilot(ContinueBaseModel):      async def handle_highlighted_code(self, range_in_files: List[RangeInFileWithContents]):          workspace_path = self.continue_sdk.ide.workspace_directory          for rif in range_in_files: -            rif.filepath = os.path.relpath(rif.filepath, workspace_path) -            if rif.filepath.startswith(".."): -                rif.filepath = os.path.basename(rif.filepath) +            rif.filepath = os.path.basename(rif.filepath)          # If current range overlaps with any others, delete them and only keep the new range          new_ranges = [] @@ -156,6 +154,13 @@ class Autopilot(ContinueBaseModel):                  if rif.filepath == new_rif.filepath and rif.range.overlaps_with(new_rif.range):                      found_overlap = True                      break + +                # Also don't allow multiple ranges in same file with same content. This is useless to the model, and avoids +                # the bug where cmd+f causes repeated highlights +                if rif.filepath == new_rif.filepath and rif.contents == new_rif.contents: +                    found_overlap = True +                    break +              if not found_overlap:                  new_ranges.append(rif) @@ -173,8 +178,12 @@ class Autopilot(ContinueBaseModel):          self.history.timeline[index].deleted = True          await self.update_subscribers() -    async def delete_context_item_at_index(self, index: int): -        self._highlighted_ranges.pop(index) +    async def delete_context_at_indices(self, indices: List[int]): +        kept_ranges = [] +        for i, rif in enumerate(self._highlighted_ranges): +            if i not in indices: +                kept_ranges.append(rif) +        self._highlighted_ranges = kept_ranges          await self.update_subscribers()      async def _run_singular_step(self, step: "Step", is_future_step: bool = False) -> Coroutine[Observation, None, None]: diff --git a/continuedev/src/continuedev/core/main.py b/continuedev/src/continuedev/core/main.py index 9a6617f4..8bad09d1 100644 --- a/continuedev/src/continuedev/core/main.py +++ b/continuedev/src/continuedev/core/main.py @@ -107,11 +107,9 @@ class HistoryNode(ContinueBaseModel):              return self.step.chat_context          return self.step.chat_context + [              ChatMessage( -                role="function", +                role="assistant",                  name=self.step.__class__.__name__, -                content=json.dumps({ -                    "description": self.step.description or "Function complete", -                }), +                content=self.step.description or f"Ran function {self.step.name}",                  summary=f"Called function {self.step.name}"              )] @@ -200,6 +198,7 @@ class SlashCommandDescription(ContinueBaseModel):      name: str      description: str +  class FullState(ContinueBaseModel):      """A full state of the program, including the history"""      history: History diff --git a/continuedev/src/continuedev/core/sdk.py b/continuedev/src/continuedev/core/sdk.py index 988ac6b0..fe975b99 100644 --- a/continuedev/src/continuedev/core/sdk.py +++ b/continuedev/src/continuedev/core/sdk.py @@ -97,7 +97,18 @@ class ContinueSDK(AbstractContinueSDK):      async def _ensure_absolute_path(self, path: str) -> str:          if os.path.isabs(path):              return path -        return os.path.join(self.ide.workspace_directory, path) + +        # Else if in workspace +        workspace_path = os.path.join(self.ide.workspace_directory, path) +        if os.path.exists(workspace_path): +            return workspace_path +        else: +            # Check if it matches any of the open files, then use that absolute path +            open_files = await self.ide.getOpenFiles() +            for open_file in open_files: +                if os.path.basename(open_file) == os.path.basename(path): +                    return open_file +            raise Exception(f"Path {path} does not exist")      async def run_step(self, step: Step) -> Coroutine[Observation, None, None]:          return await self.__autopilot._run_singular_step(step) diff --git a/continuedev/src/continuedev/libs/llm/hugging_face.py b/continuedev/src/continuedev/libs/llm/hugging_face.py index 868cb560..b0db585b 100644 --- a/continuedev/src/continuedev/libs/llm/hugging_face.py +++ b/continuedev/src/continuedev/libs/llm/hugging_face.py @@ -1,14 +1,17 @@  from .llm import LLM  from transformers import AutoTokenizer, AutoModelForCausalLM +  class HuggingFace(LLM):      def __init__(self, model_path: str = "Salesforce/codegen-2B-mono"):          self.model_path = model_path          self.tokenizer = AutoTokenizer.from_pretrained(model_path)          self.model = AutoModelForCausalLM.from_pretrained(model_path) -     +      def complete(self, prompt: str, **kwargs): -        args = { "max_tokens": 100 } | kwargs +        args = {"max_tokens": 100} +        args.update(kwargs)          input_ids = self.tokenizer(prompt, return_tensors="pt").input_ids -        generated_ids = self.model.generate(input_ids, max_length=args["max_tokens"]) -        return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True)
\ No newline at end of file +        generated_ids = self.model.generate( +            input_ids, max_length=args["max_tokens"]) +        return self.tokenizer.decode(generated_ids[0], skip_special_tokens=True) diff --git a/continuedev/src/continuedev/libs/llm/openai.py b/continuedev/src/continuedev/libs/llm/openai.py index a3ca5c80..c4e4139f 100644 --- a/continuedev/src/continuedev/libs/llm/openai.py +++ b/continuedev/src/continuedev/libs/llm/openai.py @@ -24,13 +24,14 @@ class OpenAI(LLM):      @property      def default_args(self): -        return DEFAULT_ARGS | {"model": self.default_model} +        return {**DEFAULT_ARGS, "model": self.default_model}      def count_tokens(self, text: str):          return count_tokens(self.default_model, text)      async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: -        args = self.default_args | kwargs +        args = self.default_args.copy() +        args.update(kwargs)          args["stream"] = True          if args["model"] in CHAT_MODELS: @@ -48,7 +49,8 @@ class OpenAI(LLM):                  yield chunk.choices[0].text      async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: -        args = self.default_args | kwargs +        args = self.default_args.copy() +        args.update(kwargs)          args["stream"] = True          args["model"] = self.default_model if self.default_model in CHAT_MODELS else "gpt-3.5-turbo-0613"          if not args["model"].endswith("0613") and "functions" in args: @@ -62,7 +64,7 @@ class OpenAI(LLM):              yield chunk.choices[0].delta      async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: -        args = self.default_args | kwargs +        args = {**self.default_args, **kwargs}          if args["model"] in CHAT_MODELS:              resp = (await openai.ChatCompletion.acreate( diff --git a/continuedev/src/continuedev/libs/llm/proxy_server.py b/continuedev/src/continuedev/libs/llm/proxy_server.py index 69c96ee8..05ece394 100644 --- a/continuedev/src/continuedev/libs/llm/proxy_server.py +++ b/continuedev/src/continuedev/libs/llm/proxy_server.py @@ -28,13 +28,13 @@ class ProxyServer(LLM):      @property      def default_args(self): -        return DEFAULT_ARGS | {"model": self.default_model} +        return {**DEFAULT_ARGS, "model": self.default_model}      def count_tokens(self, text: str):          return count_tokens(self.default_model, text)      async def complete(self, prompt: str, with_history: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, str]: -        args = self.default_args | kwargs +        args = {**self.default_args, **kwargs}          async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(ssl_context=ssl_context)) as session:              async with session.post(f"{SERVER_URL}/complete", json={ @@ -48,7 +48,7 @@ class ProxyServer(LLM):                      raise Exception(await resp.text())      async def stream_chat(self, messages: List[ChatMessage] = [], **kwargs) -> Coroutine[Any, Any, Generator[Union[Any, List, Dict], None, None]]: -        args = self.default_args | kwargs +        args = {**self.default_args, **kwargs}          messages = compile_chat_messages(              self.default_model, messages, None, functions=args.get("functions", None)) @@ -72,7 +72,7 @@ class ProxyServer(LLM):                              raise Exception(str(line[0]))      async def stream_complete(self, prompt, with_history: List[ChatMessage] = [], **kwargs) -> Generator[Union[Any, List, Dict], None, None]: -        args = self.default_args | kwargs +        args = {**self.default_args, **kwargs}          messages = compile_chat_messages(              self.default_model, with_history, prompt, functions=args.get("functions", None)) diff --git a/continuedev/src/continuedev/server/gui.py b/continuedev/src/continuedev/server/gui.py index b2f23bac..4e960f7c 100644 --- a/continuedev/src/continuedev/server/gui.py +++ b/continuedev/src/continuedev/server/gui.py @@ -1,6 +1,6 @@  import json  from fastapi import Depends, Header, WebSocket, APIRouter -from typing import Any, Type, TypeVar, Union +from typing import Any, List, Type, TypeVar, Union  from pydantic import BaseModel  from uvicorn.main import Server @@ -83,8 +83,8 @@ class GUIProtocolServer(AbstractGUIProtocolServer):                  self.on_clear_history()              elif message_type == "delete_at_index":                  self.on_delete_at_index(data["index"]) -            elif message_type == "delete_context_item_at_index": -                self.on_delete_context_item_at_index(data["index"]) +            elif message_type == "delete_context_at_indices": +                self.on_delete_context_at_indices(data["indices"])          except Exception as e:              print(e) @@ -123,9 +123,9 @@ class GUIProtocolServer(AbstractGUIProtocolServer):      def on_delete_at_index(self, index: int):          asyncio.create_task(self.session.autopilot.delete_at_index(index)) -    def on_delete_context_item_at_index(self, index: int): +    def on_delete_context_at_indices(self, indices: List[int]):          asyncio.create_task( -            self.session.autopilot.delete_context_item_at_index(index) +            self.session.autopilot.delete_context_at_indices(indices)          ) diff --git a/continuedev/src/continuedev/server/ide.py b/continuedev/src/continuedev/server/ide.py index e2685493..ea355d3c 100644 --- a/continuedev/src/continuedev/server/ide.py +++ b/continuedev/src/continuedev/server/ide.py @@ -160,6 +160,12 @@ class IdeProtocolServer(AbstractIdeProtocolServer):              "edit": file_edit.dict()          }) +    async def showDiff(self, filepath: str, replacement: str): +        await self._send_json("showDiff", { +            "filepath": filepath, +            "replacement": replacement +        }) +      async def setFileOpen(self, filepath: str, open: bool = True):          # Autopilot needs access to this.          await self._send_json("setFileOpen", { diff --git a/continuedev/src/continuedev/server/ide_protocol.py b/continuedev/src/continuedev/server/ide_protocol.py index de2eea27..2e1f78d7 100644 --- a/continuedev/src/continuedev/server/ide_protocol.py +++ b/continuedev/src/continuedev/server/ide_protocol.py @@ -95,6 +95,10 @@ class AbstractIdeProtocolServer(ABC):      def onHighlightedCodeUpdate(self, range_in_files: List[RangeInFileWithContents]):          """Called when highlighted code is updated""" +    @abstractmethod +    async def showDiff(self, filepath: str, replacement: str): +        """Show a diff""" +      @abstractproperty      def workspace_directory(self) -> str:          """Get the workspace directory""" diff --git a/continuedev/src/continuedev/steps/chat.py b/continuedev/src/continuedev/steps/chat.py index 49dd98e4..db3f9d7f 100644 --- a/continuedev/src/continuedev/steps/chat.py +++ b/continuedev/src/continuedev/steps/chat.py @@ -149,6 +149,7 @@ class ChatWithFunctions(Step):      name: str = "Input"      manage_own_chat_context: bool = True      description: str = "" +    hide: bool = True      async def run(self, sdk: ContinueSDK):          await sdk.update_ui() diff --git a/continuedev/src/continuedev/steps/core/core.py b/continuedev/src/continuedev/steps/core/core.py index 4ad47689..b9f0da35 100644 --- a/continuedev/src/continuedev/steps/core/core.py +++ b/continuedev/src/continuedev/steps/core/core.py @@ -174,7 +174,7 @@ class DefaultModelEditCodeStep(Step):          name = await models.gpt3516k.complete(f"Write a very short title to describe this requested change (no quotes): '{self.user_input}'. This is the title:")          self.name = self._cleanup_output(name) -        return f"`{self.user_input}`\n\n{self._cleanup_output(description)}" +        return f"{self._cleanup_output(description)}"      async def get_prompt_parts(self, rif: RangeInFileWithContents, sdk: ContinueSDK, full_file_contents: str):          # We don't know here all of the functions being passed in. diff --git a/continuedev/src/continuedev/steps/search_directory.py b/continuedev/src/continuedev/steps/search_directory.py index d2966f46..2eecc99c 100644 --- a/continuedev/src/continuedev/steps/search_directory.py +++ b/continuedev/src/continuedev/steps/search_directory.py @@ -1,6 +1,6 @@  import asyncio  from textwrap import dedent -from typing import List +from typing import List, Union  from ..models.filesystem import RangeInFile  from ..models.main import Range @@ -54,7 +54,7 @@ class WriteRegexPatternStep(Step):  class EditAllMatchesStep(Step):      pattern: str      user_request: str -    directory: str | None = None +    directory: Union[str, None] = None      async def run(self, sdk: ContinueSDK):          # Search all files for a given string | 
